reponame
stringlengths
2
39
files
list
median_score
float64
0
11.5
caosbad
[ { "content": "import logging\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom typing import Optional, Tuple, Union\n\nfrom web3 import Web3\n\nfrom gnosis.eth import EthereumClient\nfrom gnosis.eth.contracts import (get_cpk_factory_contract,\n get_proxy_factory_contract)\nfrom gnosis.safe import Safe\nfrom gnosis.safe.exceptions import CannotRetrieveSafeInfoException\nfrom gnosis.safe.safe import SafeInfo\n\nfrom ..exceptions import NodeConnectionException\nfrom ..models import InternalTx\n\nlogger = logging.getLogger(__name__)\n\n\nclass SafeServiceException(Exception):\n pass\n\n\nclass CannotGetSafeInfo(SafeServiceException):\n pass\n\n\nEthereumAddress = str\n\n\n@dataclass\nclass SafeCreationInfo:\n created: datetime\n creator: EthereumAddress\n factory_address: EthereumAddress\n master_copy: Optional[EthereumAddress]\n setup_data: Optional[bytes]\n transaction_hash: str\n\n\nclass SafeServiceProvider:\n def __new__(cls):\n if not hasattr(cls, 'instance'):\n from django.conf import settings\n tracing_enabled = bool(settings.ETHEREUM_TRACING_NODE_URL)\n node_url = settings.ETHEREUM_TRACING_NODE_URL if tracing_enabled else settings.ETHEREUM_NODE_URL\n cls.instance = SafeService(EthereumClient(node_url), tracing_enabled)\n return cls.instance\n\n @classmethod\n def del_singleton(cls):\n if hasattr(cls, 'instance'):\n del cls.instance\n\n\nclass SafeService:\n def __init__(self, ethereum_client: EthereumClient, tracing_enabled: bool):\n self.ethereum_client = ethereum_client\n self.tracing_enabled = tracing_enabled\n dummy_w3 = Web3() # Not needed, just used to decode contracts\n self.proxy_factory_contract = get_proxy_factory_contract(dummy_w3)\n self.cpk_proxy_factory_contract = get_cpk_factory_contract(dummy_w3)\n\n def get_safe_creation_info(self, safe_address: str) -> Optional[SafeCreationInfo]:\n try:\n creation_internal_tx = InternalTx.objects.filter(\n ethereum_tx__status=1 # Ignore Internal Transactions for failed Transactions\n ).select_related('ethereum_tx__block').get(contract_address=safe_address)\n creation_ethereum_tx = creation_internal_tx.ethereum_tx\n\n created_time = creation_ethereum_tx.block.timestamp\n\n parent_internal_tx = self._get_parent_internal_tx(\n creation_internal_tx\n )\n\n creator = (parent_internal_tx or creation_ethereum_tx)._from\n proxy_factory = creation_internal_tx._from\n\n master_copy: Optional[str] = None\n setup_data: Optional[bytes] = None\n data = bytes(parent_internal_tx.data) if parent_internal_tx else bytes(creation_ethereum_tx.data)\n result = self._decode_proxy_factory(data) or self._decode_cpk_proxy_factory(data)\n if result:\n master_copy, setup_data = result\n if not (master_copy and setup_data):\n if setup_internal_tx := self._get_next_internal_tx(creation_internal_tx):\n master_copy = setup_internal_tx.to\n setup_data = setup_internal_tx.data\n except InternalTx.DoesNotExist:\n return None\n except IOError as exc:\n raise NodeConnectionException from exc\n\n return SafeCreationInfo(created_time, creator, proxy_factory, master_copy, setup_data,\n creation_internal_tx.ethereum_tx_id)\n\n def get_safe_info(self, safe_address: str) -> SafeInfo:\n try:\n safe = Safe(safe_address, self.ethereum_client)\n return safe.retrieve_all_info()\n except IOError as exc:\n raise NodeConnectionException from exc\n except CannotRetrieveSafeInfoException as e:\n raise CannotGetSafeInfo from e\n\n def _decode_proxy_factory(self, data: Union[bytes, str]) -> Optional[Tuple[str, bytes]]:\n try:\n _, data_decoded = self.proxy_factory_contract.decode_function_input(data)\n master_copy = (data_decoded.get('masterCopy') or data_decoded.get('_mastercopy')\n or data_decoded.get('_singleton') or data_decoded.get('singleton'))\n setup_data = data_decoded.get('data') or data_decoded.get('initializer')\n if master_copy and setup_data:\n return master_copy, setup_data\n else:\n logger.error('Problem decoding proxy factory, data_decoded=%s', data_decoded)\n return None\n except ValueError:\n return None\n\n def _decode_cpk_proxy_factory(self, data: Union[bytes, str]) -> Optional[Tuple[str, bytes]]:\n try:\n _, data_decoded = self.cpk_proxy_factory_contract.decode_function_input(data)\n master_copy = data_decoded.get('masterCopy')\n setup_data = data_decoded.get('data')\n return master_copy, setup_data\n except ValueError:\n return None\n\n def _get_next_internal_tx(self, internal_tx: InternalTx) -> Optional[InternalTx]:\n if child_trace := internal_tx.get_child(0):\n return child_trace\n if not self.tracing_enabled:\n return None\n try:\n next_traces = self.ethereum_client.parity.get_next_traces(internal_tx.ethereum_tx_id,\n internal_tx.trace_address_as_list,\n remove_calls=True)\n return next_traces and InternalTx.objects.build_from_trace(next_traces[0],\n internal_tx.ethereum_tx)\n except ValueError:\n return None\n\n def _get_parent_internal_tx(self, internal_tx: InternalTx) -> InternalTx:\n if parent_trace := internal_tx.get_parent():\n return parent_trace\n if not self.tracing_enabled:\n return None\n try:\n previous_trace = self.ethereum_client.parity.get_previous_trace(internal_tx.ethereum_tx_id,\n internal_tx.trace_address_as_list,\n skip_delegate_calls=True)\n return previous_trace and InternalTx.objects.build_from_trace(previous_trace,\n internal_tx.ethereum_tx)\n except ValueError:\n return None\n", "id": "6184411", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "safe_transaction_service/history/services/safe_service.py" } ]
0
erathaowl
[ { "content": "import re\nimport subprocess\nfrom pytransform import get_expired_days, get_user_data\n\n\ndef get_expiration_info():\n try:\n # license_info = get_license_info()\n left_days = get_expired_days()\n if left_days == -1:\n print('This license for %s is never expired')\n else:\n print(f'This license for will be expired in {left_days} days')\n except Exception as e:\n print(e)\n\n\ndef check_gpu():\n gpu_uuids = get_gpu_list()\n assert len(gpu_uuids) == 1 # 1 GPU per instance\n if gpu_uuids:\n if len(gpu_uuids) > 1:\n raise RuntimeError(f'This license is issued for one particular GPU, {len(gpu_uuids)} GPUs detected')\n else:\n assert len(gpu_uuids) == 1\n gpu_uuid = gpu_uuids[0]\n if gpu_uuid != get_user_data().decode('utf-8').lower():\n print(f'User data {get_user_data()}')\n raise RuntimeError('A GPU matching the license is not found')\n else:\n print('A GPU license check is passed')\n get_expiration_info()\n else:\n raise RuntimeError('No GPUs detected, this license is issued to particular GPU')\n\n\ndef get_gpu_list():\n try:\n out = subprocess.run([\"nvidia-smi\", \"-L\"], check=True, capture_output=True)\n out.check_returncode()\n out = out.stdout.decode('utf-8').lower()\n # output example\n # gpu 0: geforce gtx 1080 ti (uuid: gpu-70ef1701-4072-9722-cc0b-7c7e75ff76db)\n # gpu 1: geforce gtx 1080 ti (uuid: gpu-5b8df9cc-3b3c-d07a-8bd1-e2a51af4cfa9)\n # ...\n uuid_list = re.findall(r'\\(uuid: \\S+\\)', out)\n uuid_list = [uuid.replace('(uuid: ', '').replace(')', '')\n for uuid in uuid_list]\n return uuid_list\n except:\n # any error means no GPUs available\n return None\n", "id": "8500746", "language": "Python", "matching_score": 0, "max_stars_count": 1463, "path": "plugins/check_gpu.py" }, { "content": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n#############################################################\n# #\n# Copyright @ 2018 - Dashingsoft corp. #\n# All rights reserved. #\n# #\n# pyarmor #\n# #\n# Version: 3.4.0 - #\n# #\n#############################################################\n#\n#\n# @File: utils.py\n#\n# @Author: <NAME>(<EMAIL>)\n#\n# @Create Date: 2018/01/17\n#\n# @Description:\n#\n# All the routines of pytransform.\n#\nimport hashlib\nimport logging\nimport os\nimport re\nimport shutil\nimport struct\nimport sys\nimport tempfile\nfrom base64 import b64encode\nfrom codecs import BOM_UTF8\nfrom glob import glob\nfrom json import dumps as json_dumps, loads as json_loads\nfrom subprocess import Popen\nfrom time import gmtime, strftime\nfrom zipfile import ZipFile\n\ntry:\n from urllib.request import urlopen, Request\nexcept ImportError:\n from urllib2 import urlopen, Request\n\nimport pytransform\nfrom config import dll_ext, dll_name, entry_lines, protect_code_template, \\\n platform_url, platform_config, key_url, reg_url, \\\n core_version, capsule_filename, platform_old_urls\n\nPYARMOR_PATH = os.getenv('PYARMOR_PATH', os.path.dirname(__file__))\nPYARMOR_HOME = os.getenv('PYARMOR_HOME', os.path.join('~', '.pyarmor'))\nPYARMOR_TIMEOUT = float(os.getenv('PYARMOR_TIMEOUT', '6.0'))\nPLATFORM_PATH = os.path.join(PYARMOR_PATH, pytransform.plat_path)\n\nHOME_PATH = os.path.abspath(os.path.expanduser(PYARMOR_HOME))\nCROSS_PLATFORM_PATH = os.path.join(HOME_PATH, pytransform.plat_path)\n\nDEFAULT_CAPSULE = os.path.join(HOME_PATH, capsule_filename)\n# From v6.2.0, change the location of default capsule to ~/.pyarmor/\nOLD_CAPSULE = os.path.join(HOME_PATH, '..', capsule_filename)\n\nFEATURE_ANTI = 1\nFEATURE_JIT = 2\nFEATURE_ADV = 4\nFEATURE_MAPOP = 8\nFEATURE_VM = 16\n\n\ndef _format_platid(platid=None):\n if platid is None:\n platid = pytransform.format_platform()\n if os.path.isabs(platid) or os.path.isfile(platid):\n return os.path.normpath(platid)\n return platid.replace('\\\\', '/').replace('/', '.')\n\n\ndef _search_downloaded_files(path, platid, libname):\n libpath = os.path.join(path, platid)\n if os.path.exists(libpath):\n for x in os.listdir(libpath):\n if os.path.exists(os.path.join(libpath, x, libname)):\n return os.path.join(platid, x)\n\n\ndef pytransform_bootstrap(capsule=None, force=False):\n if pytransform._pytransform is not None and not force:\n logging.debug('No bootstrap, pytransform has been loaded')\n return\n logging.debug('PyArmor installation path: %s', PYARMOR_PATH)\n logging.debug('PyArmor home path: %s', HOME_PATH)\n path = PYARMOR_PATH\n licfile = os.path.join(path, 'license.lic')\n if not os.path.exists(licfile):\n if not os.getenv('PYARMOR_HOME',\n os.getenv('HOME', os.getenv('USERPROFILE'))):\n logging.info('Create trial license file: %s', licfile)\n shutil.copy(os.path.join(path, 'license.tri'), licfile)\n else:\n licfile = os.path.join(HOME_PATH, 'license.lic')\n if not os.path.exists(licfile):\n if not os.path.exists(HOME_PATH):\n logging.info('Create pyarmor home path: %s', HOME_PATH)\n os.makedirs(HOME_PATH)\n old_license = os.path.join(HOME_PATH, '..', 'license.lic')\n if os.path.exists(old_license):\n logging.info('Create license file %s from old license %s',\n licfile, old_license)\n shutil.move(old_license, licfile)\n else:\n logging.info('Create trial license file: %s', licfile)\n shutil.copy(os.path.join(path, 'license.tri'), licfile)\n\n libname = dll_name + dll_ext\n platid = pytransform.format_platform()\n logging.debug('Native platform is %s', _format_platid(platid))\n\n if os.getenv('PYARMOR_PLATFORM'):\n p = os.getenv('PYARMOR_PLATFORM')\n logging.info('PYARMOR_PLATFORM is %s', p)\n platid = os.path.normpath(p) if os.path.isabs(p) or os.path.isfile(p) \\\n else os.path.join(*os.path.normpath(p).split('.'))\n logging.debug('Build platform is %s', _format_platid(platid))\n\n if os.path.isabs(platid):\n if not os.path.exists(os.path.join(platid, libname)):\n raise RuntimeError('No dynamic library found at %s', platid)\n elif not os.path.isfile(platid):\n libpath = PLATFORM_PATH\n logging.debug('Search dynamic library in the path: %s', libpath)\n if not os.path.exists(os.path.join(libpath, platid, libname)):\n libpath = CROSS_PLATFORM_PATH\n logging.debug('Search dynamic library in the path: %s', libpath)\n if not os.path.exists(os.path.join(libpath, platid, libname)):\n found = _search_downloaded_files(libpath, platid, libname)\n if found:\n logging.debug('Found available dynamic library %s', found)\n platid = found\n else:\n if not os.path.exists(libpath):\n logging.info('Create cross platform libraries path %s',\n libpath)\n os.makedirs(libpath)\n rid = download_pytransform(platid, libpath, firstonly=1)[0]\n platid = os.path.join(*rid.split('.'))\n if libpath == CROSS_PLATFORM_PATH:\n platid = os.path.abspath(os.path.join(libpath, platid))\n\n pytransform.pyarmor_init(platid=platid)\n logging.debug('Loaded dynamic library: %s', pytransform._pytransform._name)\n\n ver = pytransform.version_info()\n logging.debug('The version of core library is %s', ver)\n if ver[0] < 32:\n raise RuntimeError('PyArmor does not work with this core library '\n '(r%d), which reversion < r32, please remove '\n '\"%s\" then run command again' % (platid, ver[0]))\n\n if capsule is not None and not os.path.exists(capsule):\n logging.info('Generating public capsule ...')\n make_capsule(capsule)\n\n\ndef _get_old_remote_file(path, timeout=6.0):\n for prefix in platform_old_urls:\n url = '/'.join([prefix, path])\n logging.info('Getting remote file: %s', url)\n try:\n return _urlopen(url, timeout=timeout)\n except Exception as e:\n logging.info('Could not get file from %s: %s', prefix, e)\n\n\ndef _get_user_secret(data):\n secret = []\n data = bytearray(data)\n for i in range(0, len(data), 10):\n secret.append(sum(data[i:i+10]) & 0xFF)\n return b64encode(bytearray(secret)).decode()\n\n\ndef _get_remote_file(path, timeout=6.0, prefix=None):\n rcode = get_registration_code()\n if not rcode:\n logging.warning('The trial version could not download '\n 'the latest platform library')\n return _get_old_remote_file(path, timeout=PYARMOR_TIMEOUT)\n\n rcode = rcode.replace('-sn-1.txt', '')\n\n licfile = os.path.join(PYARMOR_PATH, 'license.lic')\n if not os.path.exists(licfile):\n licfile = os.path.join(HOME_PATH, 'license.lic')\n logging.debug('Got license data from %s', licfile)\n with open(licfile, 'rb') as f:\n licdata = f.read()\n secret = _get_user_secret(licdata)\n\n url = platform_url if prefix is None else prefix\n url = '/'.join([url.format(version=core_version), path])\n logging.info('Getting remote file: %s', url)\n\n req = Request(url)\n auth = b64encode(('%s:%s' % (rcode, secret)).encode())\n req.add_header('Authorization', 'Basic ' + auth.decode())\n return _urlopen(req, None, timeout)\n\n\ndef _get_platform_list(platid=None):\n filename = os.path.join(CROSS_PLATFORM_PATH, platform_config)\n logging.debug('Load platform list from %s', filename)\n\n cached = os.path.exists(filename)\n if not cached:\n res = _get_remote_file(platform_config, timeout=PYARMOR_TIMEOUT)\n if res is None:\n raise RuntimeError('No platform list file %s found' % filename)\n if not os.path.exists(CROSS_PLATFORM_PATH):\n logging.info('Create platform path: %s' % CROSS_PLATFORM_PATH)\n os.makedirs(CROSS_PLATFORM_PATH)\n logging.info('Write cached platform list file %s', filename)\n with open(filename, 'wb') as f:\n f.write(res.read())\n\n with open(filename) as f:\n cfg = json_loads(f.read())\n\n ver = cfg.get('version')\n if not ver.split('.')[0] == core_version.split('.')[0]:\n if not get_registration_code():\n logging.warning('The trial version could not download the latest'\n ' core libraries, tag r41.15a is always used')\n elif cached:\n logging.info('Remove cached platform list file %s', filename)\n os.remove(filename)\n return _get_platform_list(platid)\n\n logging.warning('The core library excepted version is %s, '\n 'but got %s from platform list file %s',\n core_version, ver, filename)\n\n return cfg.get('platforms', []) if platid is None \\\n else [x for x in cfg.get('platforms', [])\n if (platid is None\n or (x['id'] == platid)\n or (x['id'].find(platid + '.') == 0)\n or (x['path'] == platid))]\n\n\ndef get_platform_list(platid=None):\n return _get_platform_list(platid=platid)\n\n\ndef download_pytransform(platid, output=None, url=None, firstonly=False):\n platid = _format_platid(platid)\n\n logging.info('Search library for platform: %s', platid)\n plist = _get_platform_list(platid=platid)\n if not plist:\n logging.error('Unsupport platform %s', platid)\n raise RuntimeError('No available library for this platform')\n\n if firstonly:\n plist = plist[:1]\n\n result = [p['id'] for p in plist]\n logging.info('Found available libraries: %s', result)\n\n if output is None:\n output = CROSS_PLATFORM_PATH\n\n if not os.path.exists(output):\n logging.info('Create cross platforms path: %s', output)\n os.makedirs(output)\n\n if not os.access(output, os.W_OK):\n logging.error('Cound not download library file to %s', output)\n raise RuntimeError('No write permission for target path')\n\n for p in plist:\n libname = p['filename']\n path = '/'.join([p['path'], libname])\n\n dest = os.path.join(output, *p['id'].split('.'))\n logging.info('Target path for %s: %s', p['id'], dest)\n makedirs(dest, exist_ok=True)\n\n logging.info('Downloading library file for %s ...', p['id'])\n res = _get_remote_file(path, timeout=PYARMOR_TIMEOUT, prefix=url)\n\n if res is None:\n raise RuntimeError('Download library file failed')\n\n data = res.read()\n if hashlib.sha256(data).hexdigest() != p['sha256']:\n raise RuntimeError('Verify dynamic library failed, try to '\n 'reinstall the latest pyarmor and run '\n '\"pyarmor download -u\" to fix it')\n\n target = os.path.join(dest, libname)\n logging.info('Writing target file: %s', target)\n with open(target, 'wb') as f:\n f.write(data)\n\n logging.info('Download dynamic library %s OK', p['id'])\n\n return result\n\n\ndef update_pytransform(pattern):\n platfile = os.path.join(CROSS_PLATFORM_PATH, platform_config)\n if os.path.exists(platfile):\n logging.info('Removed cached platform index file %s', platfile)\n os.remove(platfile)\n\n platforms = dict([(p['id'], p) for p in _get_platform_list()])\n path = os.path.join(CROSS_PLATFORM_PATH, '*', '*', '*')\n flist = glob(os.path.join(path, '_pytransform.*')) + \\\n glob(os.path.join(path, 'py*', 'pytransform.*'))\n\n plist = []\n n = len(CROSS_PLATFORM_PATH) + 1\n for filename in flist:\n platid = _format_platid(os.path.dirname(filename)[n:])\n if not ((pattern == '*') or platid.startswith(pattern)):\n continue\n p = platforms.get(platid)\n if p is None:\n logging.warning('No %s found in supported platforms', platid)\n else:\n with open(filename, 'rb') as f:\n data = f.read()\n if hashlib.sha256(data).hexdigest() == p['sha256']:\n logging.info('The platform %s has been the latest', platid)\n else:\n plist.append(p['id'])\n\n if not plist:\n logging.info('Nothing updated')\n return\n\n for platid in plist:\n download_pytransform(platid)\n logging.info('Update library successfully')\n\n\ndef make_capsule(filename):\n if os.path.exists(OLD_CAPSULE):\n logging.info('Move old capsule %s to %s', OLD_CAPSULE, filename)\n shutil.move(OLD_CAPSULE, filename)\n return\n\n if get_registration_code():\n logging.error('The registered version would use private capsule.'\n '\\n\\t Please run `pyarmor register KEYFILE` '\n 'to restore your private capsule.')\n raise RuntimeError('Could not generate private capsule.')\n public_capsule = os.path.join(PYARMOR_PATH, 'public_capsule.zip')\n logging.debug('Copy %s to %s', public_capsule, filename)\n shutil.copy(public_capsule, filename)\n logging.debug('Generate public capsule %s OK.', filename)\n\n\ndef check_capsule(capsule):\n if os.path.getmtime(capsule) < os.path.getmtime(\n os.path.join(PYARMOR_PATH, 'license.lic')):\n logging.info('Capsule %s has been out of date', capsule)\n\n suffix = strftime('%Y%m%d%H%M%S', gmtime())\n logging.info('Rename it as %s.%s', capsule, suffix)\n os.rename(capsule, capsule + '.' + suffix)\n return False\n return True\n\n\ndef _make_entry(filename, rpath=None, relative=None, shell=None, suffix='',\n advanced=0):\n pkg = os.path.basename(filename) == '__init__.py'\n entry_code = entry_lines[0] % (\n '.' if (relative is True) or ((relative is None) and pkg) else '',\n suffix)\n\n with open(filename, 'r') as f:\n lines = f.readlines()\n # Fix empty file issue\n n = 0\n for n in range(len(lines)):\n if lines[n].strip() == '' or lines[n].find('__future__') > 0:\n continue\n if not lines[n][0] == '#':\n break\n for line in lines[n:]:\n if line.strip() == entry_code.strip():\n return\n\n with open(filename, 'w') as f:\n f.write(''.join(lines[:n]))\n if shell:\n f.write(shell)\n f.write(entry_code)\n paras = []\n if rpath is not None:\n paras.append(repr(rpath))\n if suffix:\n paras.append('suffix=%s' % repr(suffix))\n if advanced:\n paras.append('advanced=1')\n f.write(entry_lines[1] % ', '.join(paras))\n f.write(''.join(lines[n:]))\n\n\ndef _get_script_shell(script):\n with open(script, 'r') as f:\n try:\n line = f.read(60)\n if len(line) > 2 and line[:2] == '#!':\n i = line.find('\\n') + 1\n if i > 0:\n return line[:i]\n except Exception:\n pass\n\n\ndef make_entry(entris, path, output, rpath=None, relative=None, suffix='',\n advanced=0):\n for entry in entris.split(','):\n entry = entry.strip()\n filename = build_path(entry, output)\n src = build_path(entry, path)\n if os.path.exists(filename):\n shell = _get_script_shell(src)\n else:\n shell = None\n logging.info('Copy entry script %s to %s', src, relpath(filename))\n shutil.copy(src, filename)\n if shell:\n logging.info('Insert shell line: %s', shell.strip())\n logging.info('Insert bootstrap code to entry script %s',\n relpath(filename))\n _make_entry(filename, rpath, relative=relative, shell=shell,\n suffix=suffix, advanced=advanced)\n\n\ndef obfuscate_scripts(filepairs, mode, capsule, output):\n makedirs(output, exist_ok=True)\n\n prokey = os.path.join(output, 'product.key')\n if not os.path.exists(prokey):\n ZipFile(capsule).extract('product.key', path=output)\n\n dirs = []\n for x in filepairs:\n dirs.append(os.path.dirname(x[1]))\n\n for d in set(dirs):\n makedirs(d, exist_ok=True)\n\n if filepairs:\n pytransform.encrypt_project_files(prokey, tuple(filepairs), mode)\n\n os.remove(prokey)\n return filepairs\n\n\ndef _get_library_filename(platid, checksums=None):\n if os.path.isabs(platid) or os.path.isfile(platid):\n if not os.path.exists(platid):\n raise RuntimeError('No platform library %s found' % platid)\n return platid\n\n xlist = [str(x) for x in platid.split('.')]\n n = len(xlist)\n\n if n < 3:\n raise RuntimeError('Missing features in platform name %s' % platid)\n\n # Always download core libraries\n # if (xlist[2] == '7') and xlist[1] in ('x86', 'x86_64') and \\\n # xlist[0] in ('windows', 'darwin', 'linux'):\n # path = os.path.join(PLATFORM_PATH, *xlist[:2])\n # names = [x for x in os.listdir(path) if x.startswith('_pytransform.')]\n # if names:\n # return os.path.join(path, names[0])\n\n names = None\n path = os.path.join(CROSS_PLATFORM_PATH, *xlist)\n if os.path.exists(path):\n names = [x for x in os.listdir(path) if x.find('pytransform.') > -1]\n if len(names) > 1:\n raise RuntimeError('Invalid platform data, there is more than '\n '1 file in the path %s', path)\n if not names:\n download_pytransform(platid)\n return _get_library_filename(platid, checksums)\n\n filename = os.path.join(path, names[0])\n if checksums is not None and platid in checksums:\n with open(filename, 'rb') as f:\n data = f.read()\n if hashlib.sha256(data).hexdigest() != checksums[platid]:\n if hasattr(sys, '_debug_pyarmor'):\n logging.warning('Found library %s for platform %s, but it does'\n ' not match this pyarmor', filename, platid)\n return filename\n logging.info('The platform %s is out of date', platid)\n download_pytransform(platid)\n return _get_library_filename(platid, checksums)\n\n return filename\n\n\ndef _build_platforms(platforms):\n results = []\n checksums = dict([(p['id'], p['sha256']) for p in _get_platform_list()])\n n = len(platforms)\n\n if not os.path.exists(CROSS_PLATFORM_PATH):\n logging.info('Create cross platforms path: %s', CROSS_PLATFORM_PATH)\n os.makedirs(CROSS_PLATFORM_PATH)\n\n for platid in platforms:\n if (n > 1) and (os.path.isabs(platid) or os.path.isfile(platid)):\n raise RuntimeError('Invalid platform `%s`, for multiple platforms '\n 'it must be `platform.machine`' % platid)\n if (n > 1) and platid.startswith('vs2015.'):\n raise RuntimeError('The platform `%s` does not work '\n 'in multiple platforms target' % platid)\n filename = _get_library_filename(platid, checksums)\n results.append(filename)\n\n logging.debug('Target dynamic library: %s', results)\n return results\n\n\ndef _build_license_file(capsule, licfile, output=None):\n if licfile is None:\n myzip = ZipFile(capsule, 'r')\n try:\n if 'default.lic' in myzip.namelist():\n logging.info('Read default license from capsule')\n lickey = myzip.read('default.lic')\n else:\n logging.info('Generate default license file')\n lickey = make_license_key(capsule, 'PyArmor-Project')\n finally:\n myzip.close()\n elif licfile == 'no-restrict':\n logging.info('Generate no restrict mode license file')\n licode = '*FLAGS:%c*CODE:PyArmor-Project' % chr(1)\n lickey = make_license_key(capsule, licode)\n elif licfile in ('no', 'outer'):\n logging.info('Use outer license file')\n lickey = b''\n else:\n logging.info('Generate license file from %s', relpath(licfile))\n with open(licfile, 'rb') as f:\n lickey = f.read()\n if output is not None and lickey:\n logging.info('Write license file: %s', output)\n with open(output, 'wb') as f:\n f.write(lickey)\n return lickey\n\n\ndef make_runtime(capsule, output, licfile=None, platforms=None, package=False,\n suffix='', supermode=False):\n if supermode:\n return _make_super_runtime(capsule, output, platforms, licfile=licfile,\n suffix=suffix)\n\n if package:\n output = os.path.join(output, 'pytransform' + suffix)\n makedirs(output, exist_ok=True)\n logging.info('Generating runtime files to %s', relpath(output))\n\n checklist = []\n keylist = _build_keylist(capsule, licfile)\n\n def copy3(src, dst, onlycopy=False):\n x = os.path.basename(src)\n if suffix:\n x = x.replace('.', ''.join([suffix, '.']))\n logging.info('Rename it to %s', x)\n target = os.path.join(dst, x)\n shutil.copy2(src, target)\n\n if onlycopy:\n return\n\n logging.info('Patch library %s', target)\n data = _patch_extension(target, keylist, suffix)\n with open(target, 'wb') as f:\n f.write(data)\n checklist.append(sum(bytearray(data)))\n\n if not platforms:\n libfile = pytransform._pytransform._name\n if not os.path.exists(libfile):\n libname = dll_name + dll_ext\n libfile = os.path.join(PYARMOR_PATH, libname)\n if not os.path.exists(libfile):\n pname = pytransform.format_platform()\n libpath = os.path.join(PYARMOR_PATH, 'platforms')\n libfile = os.path.join(libpath, pname, libname)\n logging.info('Copying %s', libfile)\n copy3(libfile, output)\n\n elif len(platforms) == 1:\n filename = _build_platforms(platforms)[0]\n logging.info('Copying %s', filename)\n copy3(filename, output)\n else:\n libpath = os.path.join(output, pytransform.plat_path)\n logging.info('Create library path to support multiple platforms: %s',\n libpath)\n if not os.path.exists(libpath):\n os.mkdir(libpath)\n\n filenames = _build_platforms(platforms)\n for platid, filename in list(zip(platforms, filenames)):\n logging.info('Copying %s', filename)\n path = os.path.join(libpath, *platid.split('.')[:2])\n logging.info('To %s', path)\n makedirs(path, exist_ok=True)\n copy3(filename, path)\n\n filename = os.path.join(PYARMOR_PATH, 'pytransform.py')\n if package:\n logging.info('Copying %s', filename)\n logging.info('Rename it to %s/__init__.py', os.path.basename(output))\n shutil.copy2(filename, os.path.join(output, '__init__.py'))\n else:\n logging.info('Copying %s', filename)\n copy3(filename, output, onlycopy=True)\n\n logging.info('Generate runtime files OK')\n return checklist\n\n\ndef copy_runtime(path, output, licfile=None, dryrun=False):\n logging.info('Copying runtime files from %s', path)\n logging.info('To %s', output)\n makedirs(output, exist_ok=True)\n\n def copy3(src, dst):\n if dryrun:\n return\n if os.path.isdir(src):\n if os.path.exists(dst):\n logging.info('Remove old path %s', dst)\n shutil.rmtree(dst)\n logging.info('Copying directory %s', os.path.basename(src))\n shutil.copytree(src, dst)\n else:\n logging.info('Copying file %s', os.path.basename(src))\n shutil.copy2(src, dst)\n\n name = None\n tlist = []\n for x in os.listdir(path):\n root, ext = os.path.splitext(x)\n if root in ('pytransform_protection', 'pytransform_bootstrap'):\n continue\n src = os.path.join(path, x)\n dst = os.path.join(output, x)\n if x.startswith('pytransform'):\n copy3(src, dst)\n name = x\n tlist.append(ext)\n elif x.startswith('_pytransform') or x == 'platforms':\n copy3(src, dst)\n\n if name is None:\n raise RuntimeError('No module \"pytransform\" found in runtime package')\n\n if (('' in tlist or '.py' in tlist) and len(tlist) > 1):\n raise RuntimeError('Multiple runtime modules found')\n\n if licfile and not dryrun:\n if not os.path.exists(licfile):\n raise RuntimeError('No found license file \"%s\"' % licfile)\n logging.info('Copying outer license %s', licfile)\n dst = os.path.join(output, '' if name.find('.') > 0 else name)\n logging.info('To %s/license.lic', dst)\n shutil.copy2(licfile, os.path.join(dst, 'license.lic'))\n\n\ndef make_project_license(capsule, code, output):\n myzip = ZipFile(capsule, 'r')\n myzip.extract('private.key', tempfile.gettempdir())\n prikey = os.path.join(tempfile.tempdir, 'private.key')\n try:\n pytransform.generate_license_file(output, prikey, code)\n finally:\n os.remove(prikey)\n\n\ndef make_license_key(capsule, code, output=None, key=None):\n prikey = ZipFile(capsule, 'r').read('private.key') \\\n if key is None else key\n size = len(prikey)\n lickey = pytransform.generate_license_key(prikey, size, code)\n if output is None:\n return lickey\n elif output in ('stdout', 'stderr'):\n getattr(sys, output).write(\n lickey.decode() if hasattr(lickey, 'decode') else lickey)\n else:\n with open(output, 'wb') as f:\n f.write(lickey)\n\n\ndef show_hd_info(name=None):\n if name is None:\n pytransform.show_hd_info()\n else:\n t, sep = (0, ':') if name.startswith('/') else (1, '/')\n info = pytransform.get_hd_info(t, name)\n print('Query hardware information: \"%s%s%s\"' % (name, sep, info))\n\n\ndef build_path(path, start):\n return path if os.path.isabs(path) else os.path.join(start, path)\n\n\ndef make_project_command(platform, python, pyarmor, output):\n script = os.path.abspath(pyarmor)\n if platform.startswith('win'):\n filename = os.path.join(output, 'pyarmor.bat')\n with open(filename, 'w') as f:\n f.write('%s %s %%*' % (python, script))\n else:\n filename = os.path.join(output, 'pyarmor')\n with open(filename, 'w') as f:\n f.write('%s %s \"$@\"' % (python, script))\n os.chmod(filename, 0o755)\n return filename\n\n\ndef get_registration_code():\n try:\n code = pytransform.get_license_info()['CODE']\n except Exception:\n code = None\n return code\n\n\ndef search_plugins(plugins):\n if plugins:\n result = []\n for name in plugins:\n if name == 'on':\n logging.info('Enable inline plugin')\n result.append(['<inline>', '<plugin>', 0])\n continue\n i = 1 if name[0] == '@' else 0\n filename = name[i:] + ('' if name.endswith('.py') else '.py')\n key = os.path.basename(name[i:])\n if not os.path.exists(filename):\n if os.path.isabs(filename):\n raise RuntimeError('No script found for plugin %s' % name)\n for path in [os.path.join(x, 'plugins')\n for x in (HOME_PATH, PYARMOR_PATH)]:\n testname = build_path(filename, path)\n if os.path.exists(testname):\n filename = testname\n break\n else:\n raise RuntimeError('No script found for plugin %s' % name)\n logging.info('Found plugin %s at: %s', key, filename)\n result.append([key, filename, not i])\n return result\n\n\ndef _patch_plugins(plugins):\n result = []\n for key, filename, x in plugins:\n if x:\n logging.info('Apply plugin %s', key)\n lines = _readlines(filename)\n result.append(''.join(lines))\n return ['\\n'.join(result)]\n\n\ndef _filter_call_marker(plugins, name):\n for plugin in plugins:\n if plugin[0] == name:\n plugin[-1] = True\n return True\n\n\ndef _build_source_keylist(source, code, closure):\n result = []\n flist = ('dllmethod', 'init_pytransform', 'init_runtime', '_load_library',\n 'get_registration_code', 'get_expired_days', 'get_hd_info',\n 'get_license_info', 'get_license_code', 'format_platform',\n 'pyarmor_init', 'pyarmor_runtime', 'assert_armored')\n\n def _make_value(co):\n return len(co.co_names), len(co.co_consts), len(co.co_code)\n\n def _make_code_key(co):\n v1 = _make_value(co)\n v2 = _make_value(co.co_consts[1]) if co.co_name == 'dllmethod' else None\n co_closure = getattr(co, closure, None)\n v3 = _make_value(getattr(co_closure[0].cell_contents, code)) \\\n if co_closure else None\n return v1, v2, v3\n\n mod_co = compile(source, 'pytransform', 'exec')\n result.append((-1, _make_code_key(mod_co)))\n mod_consts = mod_co.co_consts\n for i in range(len(mod_consts)):\n co_const = mod_consts[i]\n co = getattr(co_const, code, None)\n if co and co.co_name in flist:\n result.append((i, _make_code_key(co)))\n return result\n\n\ndef _build_pytransform_keylist(mod, code, closure):\n result = []\n flist = ('dllmethod', 'init_pytransform', 'init_runtime', '_load_library',\n 'get_registration_code', 'get_expired_days', 'get_hd_info',\n 'get_license_info', 'get_license_code', 'format_platform',\n 'pyarmor_init', 'pyarmor_runtime', '_match_features')\n\n def _make_value(co):\n return len(co.co_names), len(co.co_consts), len(co.co_code)\n\n def _make_code_key(co):\n v1 = _make_value(co)\n v2 = _make_value(co.co_consts[1]) if co.co_name == 'dllmethod'else None\n co_closure = getattr(co, closure, None)\n v3 = _make_value(getattr(co_closure[0].cell_contents, code)) \\\n if co_closure else None\n return v1, v2, v3\n\n for name in flist:\n co = getattr(getattr(mod, name), code)\n result.append((name, _make_code_key(co)))\n return result\n\n\ndef _get_checksum(filename):\n size = os.path.getsize(filename) & 0xFFFFFFF0\n n = size >> 2\n with open(filename, 'rb') as f:\n buf = f.read(size)\n fmt = 'I' * n\n return sum(struct.unpack(fmt, buf)) & 0xFFFFFFFF\n\n\ndef _make_protection_code(relative, checksums, suffix='', multiple=False):\n template = os.path.join(PYARMOR_PATH, protect_code_template % '')\n with open(template) as f:\n buf = f.read()\n\n code = '__code__' if sys.version_info[0] == 3 else 'func_code'\n closure = '__closure__' if sys.version_info[0] == 3 else 'func_closure'\n keylist = _build_pytransform_keylist(pytransform, code, closure)\n rpath = '{0}.os.path.dirname({0}.__file__)'.format('pytransform')\n spath = '{0}.os.path.join({0}.plat_path, {0}.format_platform())'.format(\n 'pytransform') if multiple else repr('')\n return buf.format(code=code, closure=closure, rpath=rpath, spath=spath,\n checksum=str(checksums), keylist=keylist, suffix=suffix,\n relative='from . ' if relative else '')\n\n\ndef _frozen_modname(filename, filename2):\n names = os.path.normpath(filename).split(os.sep)\n names2 = os.path.normpath(filename2).split(os.sep)\n k = -1\n while True:\n try:\n if names[k] != names2[k]:\n break\n except IndexError:\n break\n k -= 1\n if names[-1] == '__init__.py':\n dotnames = names[k if k == -2 else k + 1:-1]\n else:\n names[-1] = names[-1][:-3]\n dotnames = names[k+1:]\n return \"<frozen %s>\" % '.'.join(dotnames)\n\n\ndef _guess_encoding(filename):\n with open(filename, 'rb') as f:\n line = f.read(80)\n if line and line[:3] == BOM_UTF8:\n return 'utf-8'\n if line and line[0] == 35:\n n = line.find(b'\\n')\n m = re.search(r'coding[=:]\\s*([-\\w.]+)', line[:n].decode())\n if m:\n return m.group(1)\n if n > -1 and len(line) > (n+1) and line[n+1] == 35:\n k = n + 1\n n = line.find(b'\\n', k)\n m = re.search(r'coding[=:]\\s*([-\\w.]+)', line[k:n].decode())\n return m and m.group(1)\n\n\ndef _readlines(filename):\n if sys.version_info[0] == 2:\n with open(filename, 'r') as f:\n lines = f.readlines()\n else:\n encoding = _guess_encoding(filename)\n try:\n with open(filename, 'r', encoding=encoding) as f:\n lines = f.readlines()\n except UnicodeDecodeError:\n encoding = 'utf-8'\n with open(filename, 'r', encoding=encoding) as f:\n lines = f.readlines()\n # Try to remove any UTF BOM bytes\n if encoding == 'utf-8' and lines:\n i = 0\n for c in lines[0]:\n if ord(c) < 128:\n break\n i += 1\n if i:\n lines[0] = lines[0][i:]\n return lines\n\n\ndef encrypt_script(pubkey, filename, destname, wrap_mode=1, obf_code=1,\n obf_mod=1, adv_mode=0, rest_mode=1, entry=0, protection=0,\n platforms=None, plugins=None, rpath=None, suffix=''):\n lines = _readlines(filename)\n if plugins:\n n = 0\n k = -1\n plist = []\n stub_marker = '# {PyArmor Plugins}'\n inline_marker = '# PyArmor Plugin: '\n call_markers = '# pyarmor_', '# @pyarmor_'\n for line in lines:\n if line.startswith(stub_marker):\n k = n + 1\n else:\n i = line.find(inline_marker)\n if i > -1:\n plist.append((n if k == -1 else n+1, i, inline_marker))\n else:\n for marker in call_markers:\n i = line.find(marker)\n if i == -1:\n continue\n name = line[i+len(marker):line.find('(')].strip()\n if _filter_call_marker(plugins, name):\n plist.append((n if k == -1 else n+1, i, marker))\n n += 1\n if k > -1:\n logging.info('Patch this script with plugins')\n lines[k:k] = _patch_plugins(plugins)\n for n, i, m in plist:\n c = '@' if m[2] == '@' else ''\n lines[n] = lines[n][:i] + c + lines[n][i+len(m):]\n\n if protection:\n n = 0\n for line in lines:\n if line.startswith('# No PyArmor Protection Code') or \\\n line.startswith('# {No PyArmor Protection Code}'):\n break\n elif (line.startswith('# {PyArmor Protection Code}')\n or line.startswith(\"if __name__ == '__main__':\")\n or line.startswith('if __name__ == \"__main__\":')):\n logging.info('Patch this entry script with protection code')\n if os.path.exists(protection):\n logging.info('Use template: %s', protection)\n with open(protection) as f:\n lines[n:n] = [f.read()]\n else:\n lines[n:n] = [protection]\n break\n n += 1\n\n if hasattr(sys, '_debug_pyarmor') and (protection or plugins):\n patched_script = filename + '.pyarmor-patched'\n logging.info('Write patched script for debugging: %s', patched_script)\n with open(patched_script, 'w') as f:\n f.write(''.join(lines))\n\n modname = _frozen_modname(filename, destname)\n co = compile(''.join(lines), modname, 'exec')\n\n if (adv_mode & 0x7) > 1 and sys.version_info[0] > 2:\n co = _check_code_object_for_super_mode(co, lines, modname)\n\n flags = obf_code | obf_mod << 8 | (wrap_mode | (adv_mode << 4)) << 16 | \\\n ((0x34 if rest_mode == 5 else 0xB0 if rest_mode == 4\n else 0xF0 if rest_mode == 3 else 0x70 if rest_mode == 2\n else 0x10 if rest_mode else 0) | (8 if entry else 0)) << 24\n s = pytransform.encrypt_code_object(pubkey, co, flags, suffix=suffix)\n\n with open(destname, 'w') as f:\n f.write(s.decode())\n\n\ndef get_product_key(capsule):\n return ZipFile(capsule).read('product.key')\n\n\ndef upgrade_capsule(capsule):\n myzip = ZipFile(capsule, 'r')\n try:\n if 'pytransform.key' in myzip.namelist():\n logging.info('The capsule is latest, nothing to do')\n return\n logging.info('Read product key from old capsule')\n pubkey = myzip.read('product.key')\n finally:\n myzip.close()\n\n myzip = ZipFile(capsule, 'a')\n try:\n logging.info('Generate new key')\n licfile = os.path.join(PYARMOR_PATH, 'license.lic')\n _, newkey = pytransform._generate_pytransform_key(licfile, pubkey)\n logging.info('Write new key pytransform.key to the capsule')\n myzip.writestr('pytransform.key', newkey)\n finally:\n myzip.close()\n\n logging.info('Upgrade capsule OK.')\n\n\ndef load_config(filename):\n if os.path.exists(filename):\n with open(filename, 'r') as f:\n cfg = json_loads(f.read())\n else:\n cfg = {}\n return cfg\n\n\ndef save_config(cfg, filename=None):\n s = json_dumps(cfg, indent=2)\n with open(filename, 'w') as f:\n f.write(s)\n\n\ndef query_keyinfo(key):\n try:\n from urllib.parse import urlencode\n except ImportError:\n from urllib import urlencode\n\n licfile = os.path.join(PYARMOR_PATH, 'license.lic')\n if not os.path.exists(licfile):\n licfile = os.path.join(HOME_PATH, 'license.lic')\n logging.debug('Got license data from %s', licfile)\n with open(licfile) as f:\n licdata = urlencode({'rcode': f.read()}).encode('utf-8')\n\n try:\n logging.debug('Query url: %s', key_url % key)\n res = _urlopen(key_url % key, licdata, timeout=6.0)\n data = json_loads(res.read().decode())\n except Exception as e:\n return '\\nError: %s' % str(e)\n\n name = data['name']\n email = data['email']\n if name and email:\n return 'This code is authorized to \"%s <%s>\"\\n\\n' \\\n 'Note: the registration name and email are got from ' \\\n 'remote server and shown here only, they will not be used ' \\\n 'anywhere else. But the code \"%s\" will be distributed ' \\\n 'with obfusated scripts.' % (name, email, key)\n\n if 'error' in data:\n return '\\nError: %s' % data['error']\n\n return '\\nError: this code may NOT be issued by PyArmor officially.' \\\n '\\nPlease contact the author <NAME> <<EMAIL>>'\n\n\ndef activate_regcode(ucode):\n res = _urlopen(reg_url % ucode, timeout=6.0)\n if res is None:\n raise RuntimeError('Activate registration code failed, '\n 'got nothing from server')\n\n if res.code != 200:\n data = res.read().decode()\n raise RuntimeError('Activate registration code failed: %s' % data)\n\n data = res.read()\n dis = res.headers.get('Content-Disposition')\n filename = dis.split('\"')[1] if dis else 'pyarmor-regfile-1.zip'\n with open(filename, 'wb') as f:\n f.write(data)\n\n return filename\n\n\ndef register_keyfile(filename, legency=False):\n if (not legency) and \\\n not os.getenv('PYARMOR_HOME',\n os.getenv('HOME', os.getenv('USERPROFILE'))):\n logging.debug('Force traditional way because no HOME set')\n legency = True\n old_path = HOME_PATH if legency else PYARMOR_PATH\n old_license = os.path.join(old_path, 'license.lic')\n if os.path.exists(old_license):\n logging.info('Remove old license file `%s`', old_license)\n os.remove(old_license)\n\n path = PYARMOR_PATH if legency else HOME_PATH\n if not os.path.exists(path):\n logging.info('Create path: %s', path)\n os.makedirs(path)\n logging.info('Save registration data to: %s', path)\n f = ZipFile(filename, 'r')\n try:\n for item in ('license.lic', '.pyarmor_capsule.zip'):\n logging.info('Extracting %s' % item)\n f.extract(item, path=path)\n finally:\n f.close()\n\n\ndef relpath(path, start=os.curdir):\n try:\n r = os.path.relpath(path, start)\n return path if r.count('..') > 1 else r\n except Exception:\n return path\n\n\ndef _reboot_pytransform(platid):\n os.putenv('PYARMOR_PLATFORM', platid)\n if sys.platform == 'win32' and sys.argv[0].endswith('pyarmor'):\n p = Popen(sys.argv)\n else:\n p = Popen([sys.executable] + sys.argv)\n p.wait()\n return p.returncode\n\n\ndef _get_preferred_platid(platname, features=None):\n if os.path.isabs(platname) or os.path.isfile(platname):\n return platname\n\n nlist = platname.split('.')\n name = '.'.join(nlist[:2])\n\n if name in ('linux.arm', 'linux.ppc64', 'linux.mips64',\n 'linux.mips64el', 'musl.arm', 'musl.mips32',\n 'freebsd.x86_64', 'android.aarch64',\n 'android.x86', 'android.x86_64',\n 'poky.x86', 'vs2015.x86_64', 'vs2015.x86'):\n if features and '0' not in features:\n raise RuntimeError('No feature %s for platform %s', features, name)\n features = ['0']\n\n elif len(nlist) > 2:\n if features and nlist[2] not in features:\n raise RuntimeError('Feature conflicts for platname %s', name)\n features = nlist[2:3]\n\n elif features is None:\n features = ['7', '3', '0']\n\n pyver = None\n if '8' in features or '11' in features or '25' in features:\n pyver = 'py%d%d' % sys.version_info[:2]\n\n plist = [x['id'] for x in _get_platform_list() if x['name'] == name]\n for platid in plist:\n ns = [str(x) for x in platid.split('.')]\n if (features is None or str(ns[2]) in features) \\\n and (pyver is None or pyver in ns[3:]):\n return platid\n\n\ndef check_cross_platform(platforms, supermode=False, vmode=False):\n if not platforms:\n platforms = []\n fn1 = pytransform.version_info()[2]\n\n features = None\n if vmode:\n features = ['25' if supermode else '21']\n if sys.platform not in ('win32',):\n raise RuntimeError('VM Protect mode only works for Windows')\n for platid in platforms:\n if not platid.startswith('windows'):\n raise RuntimeError('VM Protect mode only works for Windows')\n nlist = platid.split('.')\n if len(nlist) > 2:\n raise RuntimeError('Invalid platform name for VM mode')\n if not len(platforms):\n platforms = [_format_platid()]\n elif supermode:\n features = ['11' if (fn1 & FEATURE_JIT) else '8']\n if not len(platforms):\n v = 'py%d%d' % sys.version_info[:2]\n platforms = ['.'.join([_format_platid(), features[0], v])]\n\n result = []\n for name in platforms:\n platid = _get_preferred_platid(name, features=features)\n if platid is None:\n msg = 'default' if features is None else features\n raise RuntimeError('No available dynamic library for %s '\n 'with features %s' % (name, msg))\n result.append(platid)\n\n reboot = None\n if result and not (os.path.isabs(result[0]) or os.path.isfile(result[0])):\n platid = result[0]\n nlist = platid.split('.')\n fn2 = int(nlist[2])\n if fn2 in (21, 25):\n n = 21\n elif fn2 in (0, 8):\n n = 0\n else:\n n = 7\n if (n != fn1) and not (n & fn1 & 0x12):\n if n == 7 and _format_platid().split('.')[1] in (\n 'armv6', 'armv7', 'aarch32', 'aarch64'):\n n = 3\n reboot = '.'.join([_format_platid(), str(n)])\n os.environ['PYARMOR_PLATFORM'] = reboot\n\n logging.info('Update target platforms to: %s', result)\n for p in result[1:]:\n fn3 = int(p.split('.')[2])\n if (n != fn3) and not (n & fn3):\n raise RuntimeError('Multi platforms conflict, platform %s'\n ' could not mixed with %s' % (p, platid))\n\n if reboot:\n logging.info('====================================================')\n logging.info('Reload PyArmor with platform: %s', reboot)\n logging.info('====================================================')\n pytransform_bootstrap(force=True)\n # _reboot_pytransform(reboot)\n # return False\n\n return result\n\n\ndef compatible_platform_names(platforms):\n '''Only for compatibility, it may be removed in next major version.'''\n if not platforms:\n return platforms\n\n old_forms = {\n 'armv5': 'linux.arm',\n 'ppc64le': 'linux.ppc64',\n 'ios.arm64': 'ios.aarch64',\n 'darwin.arm64': 'darwin.aarch64',\n 'freebsd': 'freebsd.x86_64',\n 'alpine': 'musl.x86_64',\n 'alpine.arm': 'musl.arm',\n 'alpine.x86_64': 'musl.x86_64',\n 'poky-i586': 'poky.x86',\n }\n\n result = []\n for names in platforms:\n for name in names.split(','):\n name = name.strip()\n if name in old_forms:\n logging.warning(\n 'This platform name `%s` has been deprecated, '\n 'use `%s` instead. Display all standard platform '\n 'names by `pyarmor download --help-platform`',\n name, old_forms[name])\n result.append(old_forms[name])\n else:\n result.append(name)\n return result\n\n\ndef make_bootstrap_script(output, capsule=None, relative=None, suffix=''):\n filename = os.path.basename(output)\n co = compile('', filename, 'exec')\n flags = 0x18000000\n prokey = get_product_key(capsule)\n buf = pytransform.encrypt_code_object(prokey, co, flags, suffix=suffix)\n with open(output, 'w') as f:\n f.write(buf.decode())\n _make_entry(output, relative=relative, suffix=suffix)\n\n\ndef get_name_suffix():\n rcode = get_registration_code()\n if rcode is None:\n return ''\n\n m, n = rcode.replace('-sn-1.txt', '').split('-')[-2:]\n d = {\n 'vax': 'vax',\n 'clickbank': 'vac',\n 'shareit': 'vas',\n 'regnow': 'var',\n 'Pyarmor': 'vad',\n }\n if len(n) > 6:\n n = n[-6:]\n pad = '0' * (6 - len(n))\n return '_'.join(['', d.get(m, 'unk'), pad + n])\n\n\ndef get_bind_key(filename):\n if not os.path.exists(filename):\n raise RuntimeError('Bind file %s not found' % filename)\n\n with open(filename, 'rb') as f:\n buf = f.read()\n size = len(buf) >> 2\n fmt = 'I' * size\n return sum(struct.unpack(fmt, buf[:size*4]))\n\n\ndef make_super_bootstrap(source, filename, output, relative=None, suffix=''):\n pkg = os.path.basename(filename) == '__init__.py'\n level = ''\n if (relative is True) or ((relative is None) and pkg):\n n = len(filename[len(output)+1:].replace('\\\\', '/').split('/'))\n level = '.' * n\n bootstrap = 'from %spytransform%s import pyarmor\\n' % (level, suffix)\n\n with open(filename, 'r') as f:\n lines = f.readlines()\n for line in lines:\n if line.startswith(bootstrap):\n return\n\n lines.insert(0, bootstrap)\n\n shell = _get_script_shell(source)\n if shell:\n lines.insert(0, shell)\n\n with open(filename, 'w') as f:\n f.write(''.join(lines))\n\n\ndef _patch_extension(filename, keylist, suffix=''):\n logging.debug('Patching %s', relpath(filename))\n patkey = b'\\<KEY>'\n patlen = len(patkey)\n sizelist = [len(x) for x in keylist]\n big_endian = False\n\n def write_integer(data, offset, value):\n if big_endian:\n offset += 3\n step = -1\n else:\n step = 1\n for i in range(4):\n data[offset] = value & 0xFF\n offset += step\n value >>= 8\n\n with open(filename, 'rb') as f:\n data = bytearray(f.read())\n\n n = len(data)\n for i in range(n):\n if data[i:i+patlen] == patkey:\n fmt = 'I' * 8\n header = struct.unpack(fmt, bytes(data[i:i+32]))\n if sum(header[2:]) not in (912, 1452):\n continue\n logging.debug('Found pattern at %x', i)\n max_size = header[1]\n if sum(sizelist) > max_size:\n raise RuntimeError('Too much license data')\n\n break\n else:\n # Maybe big endian\n patkey = b'\\<KEY>'\n for i in range(n):\n if data[i:i+patlen] == patkey:\n fmt = 'I' * 8\n header = struct.unpack('>' + fmt, bytes(data[i:i+32]))\n if sum(header[2:]) not in (912, 1452):\n continue\n logging.debug('Found pattern at %x', i)\n max_size = header[1]\n if sum(sizelist) > max_size:\n raise RuntimeError('Too much license data')\n big_endian = True\n break\n else:\n raise RuntimeError('Invalid extension, no data found')\n\n write_integer(data, i + 12, sizelist[0])\n write_integer(data, i + 16, sizelist[0])\n write_integer(data, i + 20, sizelist[1])\n write_integer(data, i + 24, sizelist[0] + sizelist[1])\n write_integer(data, i + 28, sizelist[2])\n\n offset = i + 32\n for j in range(3):\n size = sizelist[j]\n if size:\n logging.debug('Patch %d bytes from %x', size, offset)\n data[offset:offset+size] = keylist[j]\n offset += size\n\n logging.info('Patch library file OK')\n\n if suffix:\n marker = bytes(b'_vax_000000')\n k = len(marker)\n for i in range(n):\n if data[i:i+k] == marker:\n logging.debug('Found marker at %x', i)\n data[i:i+k] = bytes(suffix.encode())\n\n if filename.endswith('.so'):\n _fix_up_gnu_hash(data, suffix)\n\n return data\n\n\ndef _build_keylist(capsule, licfile):\n myzip = ZipFile(capsule, 'r')\n if 'pytransform.key' not in myzip.namelist():\n raise RuntimeError('No pytransform.key found in capsule')\n logging.info('Extract pytransform.key')\n keydata = myzip.read('pytransform.key')\n myzip.close()\n\n lickey = _build_license_file(capsule, licfile)\n\n if sys.version_info[0] == 2:\n size1 = ord(keydata[0]) + ord(keydata[1]) * 256\n size2 = ord(keydata[2]) + ord(keydata[3]) * 256\n else:\n size1 = keydata[0] + keydata[1] * 256\n size2 = keydata[2] + keydata[3] * 256\n\n k1 = 16\n k2 = k1 + size1\n\n return keydata[k1:k2], keydata[k2:k2+size2], lickey\n\n\ndef _make_super_runtime(capsule, output, platforms, licfile=None, suffix=''):\n logging.info('Generating super runtime library to \"%s\"', relpath(output))\n makedirs(output, exist_ok=True)\n\n if not platforms:\n raise RuntimeError('No platform specified in Super mode')\n elif len(platforms) == 1:\n filelist = _build_platforms(platforms)[:1]\n else:\n filelist = _build_platforms(platforms)\n\n keylist = _build_keylist(capsule, licfile)\n namelist = []\n for filename in filelist:\n name = os.path.basename(filename)\n if name in namelist:\n return _package_super_runtime(output, platforms, filelist, keylist,\n suffix)\n namelist.append(name)\n\n checklist = []\n for filename in filelist:\n logging.info('Copying %s', filename)\n\n name = os.path.basename(filename)\n if suffix:\n k = name.rfind('pytransform') + len('pytransform')\n name = name[:k] + suffix + name[k:]\n logging.info('Rename extension to %s', name)\n\n target = os.path.join(output, name)\n shutil.copy2(filename, target)\n\n logging.info('Patch extension %s', target)\n data = _patch_extension(target, keylist, suffix)\n\n with open(target, 'wb') as f:\n f.write(data)\n checklist.append(sum(bytearray(data)))\n\n logging.info('Generate runtime files OK')\n return checklist\n\n\ndef _package_super_runtime(output, platforms, filelist, keylist, suffix):\n output = os.path.join(output, 'pytransform' + suffix)\n logging.info('Make package path %s', os.path.basename(output))\n makedirs(output, exist_ok=True)\n\n src = os.path.join(PYARMOR_PATH, 'helper', 'superuntime.py')\n dst = os.path.join(output, '__init__.py')\n logging.info('Copying %s', src)\n logging.info('To %s', dst)\n shutil.copy2(src, dst)\n\n checklist = []\n for platname, filename in zip(platforms, filelist):\n logging.info('Copying %s', filename)\n if os.path.isfile(platname):\n raise RuntimeError('Unknown standard platform \"%s\"' % platname)\n path = '_'.join(platname.split('.')[:2])\n name = os.path.basename(filename)\n target = os.path.join(output, path, name)\n makedirs(os.path.dirname(target), exist_ok=True)\n shutil.copy2(filename, target)\n\n logging.info('Patch extension %s', target)\n data = _patch_extension(target, keylist, suffix)\n\n with open(target, 'wb') as f:\n f.write(data)\n checklist.append(sum(bytearray(data)))\n\n logging.info('Generate super runtime package OK')\n return checklist\n\n\ndef _make_protection_code2(relative, checklist, suffix=''):\n template = os.path.join(PYARMOR_PATH, protect_code_template % '2')\n logging.info('Use protection template: %s', relpath(template))\n with open(template) as f:\n buf = f.read()\n\n return buf.format(relative='from . ' if relative else '',\n checklist=checklist, suffix=suffix)\n\n\ndef make_protection_code(args, multiple=False, supermode=False):\n return _make_protection_code2(*args) if supermode \\\n else _make_protection_code(*args, multiple=multiple)\n\n\ndef _check_code_object_for_super_mode(co, lines, name):\n from dis import hasjabs, hasjrel, get_instructions\n HEADER_SIZE = 8\n hasjins = hasjabs + hasjrel\n\n def is_special_code_object(co):\n has_special_jabs = False\n has_header_label = True if co.co_code[6:7] == b'\\x90' else False\n for ins in get_instructions(co):\n if ins.opcode in hasjabs and \\\n (ins.arg & ~0xF) in (0xF0, 0xFFF0, 0xFFFFF0):\n has_special_jabs = True\n if has_header_label:\n if has_special_jabs:\n return True\n continue\n if ins.offset < HEADER_SIZE:\n if ins.is_jump_target or ins.opcode in hasjins:\n has_header_label = True\n elif not has_header_label:\n break\n\n def check_code_object(co):\n co_list = [co] if is_special_code_object(co) else []\n for obj in [x for x in co.co_consts if hasattr(x, 'co_code')]:\n co_list.extend(check_code_object(obj))\n return co_list\n\n co_list = check_code_object(co)\n if co_list:\n pat = re.compile(r'^\\s*')\n for c in co_list:\n # In some cases, co_lnotab[1] is not the first statement\n i = c.co_firstlineno - 1\n k = i + c.co_lnotab[1]\n while i < k:\n s = lines[i].strip()\n j = s.find('#')\n if j > 0 and s[j:].find('\"') == -1 and s[j:].find(\"'\") == -1:\n s = s[:j].strip()\n if s.endswith('):') or (s.endswith(':') and s.find('->') > -1):\n break\n i += 1\n else:\n logging.error('Function does not end with \"):\"')\n raise RuntimeError('Patch function \"%s\" failed' % c.co_name)\n i += 1\n docs = c.co_consts[0]\n n_docs = len(docs.splitlines()) if isinstance(docs, str) else 0\n while i < k:\n if lines[i].strip():\n if n_docs:\n i += n_docs\n n_docs = 0\n continue\n break\n i += 1\n logging.info('\\tPatch function \"%s\" at line %s', c.co_name, i + 1)\n s = lines[i]\n indent = pat.match(s).group(0)\n lines[i] = '%s[None, None]\\n%s' % (indent, s)\n co = compile(''.join(lines), name, 'exec')\n\n return co\n\n\ndef _urlopen(*args, **kwargs):\n try:\n return urlopen(*args, **kwargs)\n except Exception:\n from ssl import _create_unverified_context\n kwargs['context'] = _create_unverified_context()\n return urlopen(*args, **kwargs)\n\n\ndef makedirs(path, exist_ok=False):\n if not (exist_ok and os.path.exists(path)):\n os.makedirs(path)\n\n\ndef _fix_up_gnu_hash(data, suffix):\n n = 0x200\n fmt = 'I' * n\n arr = struct.unpack(fmt, bytes(data[:n*4]))\n\n # ix, kx, key, prefix = (0, 0, 0xb4239787, 'PyInit_')\n # if sys.version_info[0] == 3 else (2, 0, 0xe746a6ab, 'init')\n ix, kx, key, prefix = (0, 2, 0xb4270e0b, 'PyInit_') \\\n if sys.version_info[0] == 3 else (2, 0, 0xe746a6ab, 'init')\n\n symhash = 5381\n for c in ''.join([prefix, 'pytransform', suffix]):\n symhash = symhash * 33 + ord(c)\n symhash &= 0xffffffff\n\n nx = symhash % 3\n i = 0\n\n def write_integer(buf, offset, value):\n for j in range(offset, offset + 4):\n buf[j] = value & 0xFF\n value >>= 8\n\n while True:\n try:\n i = arr.index(key, i)\n except Exception:\n return\n\n k = i + kx\n if (arr[k-12] == 3 and arr[k-10] == 1 and arr[k-9] == 6) \\\n or (arr[k-11] == 3 and arr[k-9] == 1 and arr[k-8] == 5):\n logging.debug('Fix suffix symbol hash at %s', k)\n write_integer(data, (k if ix else (k-3))*4, symhash)\n write_integer(data, (k-6+nx)*4, arr[k-6+ix])\n\n write_integer(data, (k-7)*4, 0xffffffff)\n if arr[k-9] == 6:\n write_integer(data, (k-8)*4, 0xffffffff)\n\n i += 1\n", "id": "2390613", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "src/utils.py" } ]
0
brandiweekes
[ { "content": "from os import chdir, mkdir, getcwd\nfrom os.path import isfile, isdir\nfrom sys import argv\n\n# Add argument to clone existing Calculator if they are similar in function.\ncalcName = argv[1]\n\ncontrollerString = '''using System;\nusing Microsoft.AspNetCore.Mvc;\nusing CalculatorCode.Models;\n\nnamespace CalculatorCode.Controllers\n{{\n public class {0}CalculatorController : Controller\n {{\n\n public IActionResult Index()\n {{\n SetViewBagValues();\n return View();\n }}\n\n [HttpPost]\n [ValidateAntiForgeryToken]\n public IActionResult Index({0}Calculator model)\n {{\n SetViewBagValues();\n if (!ModelState.IsValid)\n {{\n return View();\n }}\n\n String selectedValue = model.SelectedOperation;\n\n switch(selectedValue)\n {{\n case \"Add\" : \n break;\n case \"Subtract\" : \n break;\n case \"Multiply\" : \n break;\n case \"Divide\": \n break;\n }}\n\n return View(model);\n }}\n\n public void SetViewBagValues()\n {{\n ViewBag.Code = AlgorithmCode;\n ViewBag.Instructions = \"INSERT INSTRUCTIONS HERE\";\n }}\n\n public const String AlgorithmCode =\n@\"INSERT CODE HERE\";\n }}\n}}'''.format(calcName)\n\nviewString = '''@model CalculatorCode.Models.{0}Calculator\n\n@{{\n ViewData[\"Title\"] = \"{0} Calculator\";\n\n String algorithmCode = ViewBag.Code;\n}}\n\n\n<body>\n\n @using (Html.BeginForm())\n {{\n\t\t@Html.DisplayNameFor(model => model.InputOne)\n\t\t<span> <br /> </span>\n @Html.EditorFor(model => model.InputOne)\n <span> <br /> </span>\n\t\t@Html.DisplayNameFor(model => model.InputTwo)\n\t\t<span> <br /> </span>\n @Html.EditorFor(model => model.InputTwo)\n <br />\n @Html.DropDownListFor(model => model.SelectedOperation,\n new SelectList(Enum.GetValues(typeof(Operations))), \"Select Operation\")\n <input type=\"submit\" class=\"btn btn-success\" />\n <span> <br /> </span>\n <h4>Result: </h4>\n @Html.DisplayFor(model => model.Result)\n\n }}\n</body>\n\n'''.format(calcName)\n\nmodelString = '''using System;\nusing System.ComponentModel.DataAnnotations;\nusing Microsoft.AspNetCore.Mvc.Rendering;\n\nnamespace CalculatorCode.Models\n{{\n public class {0}Calculator : Calculator\n {{\n [RegularExpression(@\"^$\", ErrorMessage = \"Please enter a valid number\")]\n new public int InputOne {{ get; set; }}\n\n [RegularExpression(@\"^$\", ErrorMessage = \"Please enter a valid number\")]\n new public int InputTwo {{ get; set; }}\n new public int Result {{ get; set; }}\n\n public String SelectedOperation {{ get; set; }}\n \n public int CalculateAdditionResult()\n {{\n return InputOne + InputTwo;\n }}\n\n public int CalculateSubtractionResult()\n {{\n return InputOne - InputTwo;\n }}\n\n public int CalculateMultiplyResult()\n {{\n return InputOne * InputTwo;\n }}\n\n public int CalculateDivideResult()\n {{\n return InputOne / InputTwo;\n }}\n\n }}\n\n}}'''.format(calcName)\n\n# Enter visual studio project\nprint(\"Creating new MVC with name: {0}\".format(calcName))\nchdir(\".\\Controllers\")\nif (not isfile(calcName + \"CalculatorController.cs\")):\n print(\"Generating Controller in \" + getcwd())\n controllerFile = open(calcName + \"CalculatorController.cs\", 'w')\n controllerFile.write(controllerString)\n controllerFile.close()\nelse:\n print(\"Controller already exists\")\n\nchdir(\"..\")\n\nif (not isfile(calcName + \"Calculator.cs\")):\n chdir(\".\\Models\")\n print(\"Generating Model in \" + getcwd())\n modelFile = open(calcName + \"Calculator.cs\", 'w')\n modelFile.write(modelString)\n modelFile.close()\nelse:\n print(\"Model file already exists\")\n\nchdir(\"..\")\n\nchdir(\".\\Views\")\nif (not isdir(calcName + \"Calculator\")):\n mkdir(calcName + \"Calculator\" )\n chdir(\".\\\\{0}\".format(calcName + \"Calculator\"))\n print(\"Generating View in \" + getcwd())\n viewFile = open(\"Index.cshtml\", 'w')\n viewFile.write(viewString)\n viewFile.close()\nelse:\n print(\"Views directory already exists\")\n\nprint(\"COMPLETE\")", "id": "10570473", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "TimeCalculator/newCalculator.py" } ]
0
zurdi15
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Author: <Zurdi>\n\n\nimport time\nfrom selenium.webdriver.common.keys import Keys\nfrom .lib_log_nbz import Logging\n\nlogger = Logging()\n\n\nclass LibB:\n\t\"\"\"Basic library of native functions.\n\n\tThis class contains all the basic functions to interact with the web browser.\n\n\tAttributes:\n\t\tTIME: internal time to wait between some kind of actions\n\t\tSPECIALS: dict of special characters of the keyboard\n\t\turl_retries_set: number of maximum retries to call an url\n\t\turl_retries: counter of retries to call an url\n\t\turl_retries_wait_time: time to wait between url retries\n\t\turl_retries_continute: continue if url fails\n\n\tMethods:\n\t\tset_url_retries\n\t\tget_url\n\t\tfill_field\n\t\tclear_field\n\t\tclick_element\n\t\tselect_option\n\t\twait_time\n\t\tback\n\t\tforward\n\t\trefresh\n\t\tget_text\n\t\tcurrent_url\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\"Init LibB class with its attributes\"\"\"\n\n\t\tself.TIME = 0.5\n\t\tself.SPECIALS = {\n\t\t\t'ENTER': Keys.ENTER,\n\t\t\t'ESC': Keys.ESCAPE,\n\t\t\t'RETURN': Keys.RETURN,\n\t\t\t'TAB': Keys.TAB,\n\t\t}\n\t\tself.url_retries_set = 1\n\t\tself.url_retries = 1\n\t\tself.url_retries_wait_time = 0\n\t\tself.url_retries_continue = False\n\n\tdef set_url_retries(self, browser, params):\n\t\t\"\"\"Defines url retries options\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: number of maximum url retries\n\t\t\t\t-1: time to wait between url retries\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tself.url_retries = params[0]\n\t\t\tself.url_retries_set = params[0]\n\t\t\tself.url_retries_wait_time = params[1]\n\t\t\tself.url_retries_continue = params[2]\n\t\texcept Exception:\n\t\t\traise Exception('Error setting get url retries: 3 arguments needed')\n\n\tdef get_url(self, browser, params):\n\t\t\"\"\"Open an url\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: url to open\n\t\t\"\"\"\n\n\t\turl = str(params[0])\n\t\tif self.url_retries > 0:\n\t\t\ttry:\n\t\t\t\tlogger.log('NOTE', 'Loading: {url}'.format(url=url))\n\t\t\t\tbrowser.get(url)\n\t\t\t\tself.url_retries = self.url_retries_set\n\t\t\texcept Exception as e:\n\t\t\t\tlogger.log('ERROR', 'Error loading url: {exception}'.format(exception=e))\n\t\t\t\tself.url_retries -= 1\n\t\t\t\tlogger.log('ERROR', 'Error loading url, retries left: {url_retries}, '\n\t\t\t\t\t\t\t\t\t'waiting {time} seconds'.format(url_retries=self.url_retries,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttime=self.url_retries_wait_time))\n\t\t\t\ttime.sleep(self.url_retries_wait_time)\n\t\t\t\tself.get_url(browser, params)\n\t\telse:\n\t\t\tif self.url_retries_continue:\n\t\t\t\tself.url_retries = self.url_retries_set\n\t\t\telse:\n\t\t\t\traise Exception('Get url retries limit exceeded')\n\n\tdef fill_field(self, browser, params):\n\t\t\"\"\"Fill a field with value and/or special-key\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: field xpath\n\t\t\t\t-1: string to fill with\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\txpath = params[0]\n\t\t\tkeys = params[1:]\n\t\texcept LookupError:\n\t\t\traise Exception('Function fill(): at least 2 arguments needed')\n\n\t\ttry:\n\t\t\tfield = browser.find_element_by_xpath(xpath)\n\t\t\tfor key in keys:\n\t\t\t\tkey = str(key)\n\t\t\t\tif key in self.SPECIALS:\n\t\t\t\t\tfield.send_keys(self.SPECIALS[key])\n\t\t\t\t\tif key == 'TAB':\n\t\t\t\t\t\tfield = browser.switch_to.active_element\n\t\t\t\t\tlogger.log('NOTE', 'Value: {key}'.format(key=key))\n\t\t\t\telse:\n\t\t\t\t\tfield.send_keys(key)\n\t\t\t\t\tlogger.log('NOTE', 'Value: {key}'.format(key=key))\n\t\t\t\ttime.sleep(self.TIME)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error with field {xpath}: {exception}'.format(xpath=xpath, exception=e))\n\n\t@staticmethod\n\tdef clear_field(browser, params):\n\t\t\"\"\"Clear a field\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: field xpath\n\t\t\"\"\"\n\n\t\txpath = params[0]\n\t\ttry:\n\t\t\tfield = browser.find_element_by_xpath(xpath)\n\t\t\tfield.clear()\n\t\t\tlogger.log('NOTE', 'Field cleared')\n\t\texcept Exception as e:\n\t\t\traise Exception('Error with field {xpath}: {exception}'.format(xpath=xpath,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\tdef click_element(self, browser, params):\n\t\t\"\"\"Click an element\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: element xpath\n\t\t\"\"\"\n\n\t\txpath = params[0]\n\t\ttry:\n\t\t\telement = browser.find_element_by_xpath(xpath)\n\t\t\tif element.text:\n\t\t\t\tlogger.log('NOTE', 'Button clicked {text}'.format(text=element.text))\n\t\t\telif element.get_attribute('value'):\n\t\t\t\tlogger.log('NOTE', 'Button clicked {text}'.format(text=element.get_attribute('value')))\n\t\t\telse:\n\t\t\t\tlogger.log('NOTE', 'Button clicked')\n\t\t\telement.click()\n\t\t\ttime.sleep(self.TIME)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error with button {xpath}: {exception}'.format(xpath=xpath,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texception=e))\n\n\tdef select_option(self, browser, params):\n\t\t\"\"\"Select an option from a selector\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: selector xpath\n\t\t\t\t-1: option xpath\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tselector_xpath = params[0]\n\t\t\toption_xpath = params[1]\n\t\texcept LookupError:\n\t\t\traise Exception('Function select(): 2 arguments needed')\n\n\t\ttry:\n\t\t\tselect = browser.find_element_by_xpath(selector_xpath)\n\t\t\tselect.click()\n\t\t\ttime.sleep(self.TIME)\n\t\t\toption = browser.find_element_by_xpath(option_xpath)\n\t\t\tlogger.log('NOTE', 'Option selected {option}'.format(option=option.text))\n\t\t\toption.click()\n\t\t\ttime.sleep(self.TIME)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error with selector {selector_xpath}: '\n\t\t\t\t\t\t\t'{exception}'.format(selector_xpath=selector_xpath,\n\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef wait_time(browser, params):\n\t\t\"\"\"Just wait given seconds\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: seconds to wait\n\t\t\"\"\"\n\n\t\twait_time = params[0]\n\t\ttry:\n\t\t\tlogger.log('NOTE', 'Waiting {wait_time} seconds'.format(wait_time=wait_time))\n\t\t\ttime.sleep(float(wait_time))\n\t\texcept Exception as e:\n\t\t\traise Exception('Error in explicit waiting: {exception}'.format(exception=e))\n\n\tdef back(self, browser, params):\n\t\t\"\"\"Go back in browser history\n\n\t\t Args:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters (empty)\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tbrowser.back()\n\t\t\tlogger.log('NOTE', 'Going back')\n\t\t\ttime.sleep(self.TIME)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error going back: {exception}'.format(exception=e))\n\n\tdef forward(self, browser, params):\n\t\t\"\"\"Go forward in browser history\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters (empty)\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tbrowser.forward()\n\t\t\tlogger.log('NOTE', 'Going forward')\n\t\t\ttime.sleep(self.TIME)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error going forward: {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef refresh(browser, params):\n\t\t\"\"\"Refresh the page\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters (empty)\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tbrowser.refresh()\n\t\t\tlogger.log('NOTE', 'Refreshing...')\n\t\texcept Exception as e:\n\t\t\traise Exception('Error refreshing the web page: {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef get_text(browser, params):\n\t\t\"\"\"Returns text from selected element\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: web element of which we want the text\n\t\tReturns:\n\t\t\tPlain text of the web element\n\t\t\"\"\"\n\n\t\tweb_element = params[0]\n\t\ttry:\n\t\t\telement = browser.find_element_by_xpath(web_element)\n\t\t\tlogger.log('NOTE', 'Getting element: {web_element}'.format(web_element=web_element))\n\t\t\treturn element.text\n\t\texcept Exception as e:\n\t\t\traise Exception('Error with element {web_element}: {exception}'.format(web_element=web_element,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef current_url(browser, params):\n\t\t\"\"\"Returns current url\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters (empty)\n\t\tReturns:\n\t\t\tCurrent url\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\treturn browser.current_url\n\t\texcept Exception as e:\n\t\t\traise Exception('Error getting current url: {exception}'.format(exception=e))\n", "id": "3007311", "language": "Python", "matching_score": 2.7680513858795166, "max_stars_count": 4, "path": "src/lib/lib_b_nbz.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Author: <Zurdi>\n\n\nimport os\nimport time\nimport datetime\nfrom datetime import datetime\nfrom random import randint\nimport re\nfrom .lib_log_nbz import Logging\n\nlogger = Logging()\n\n\nclass LibA:\n\t\"\"\"Advanced library of native functions.\n\n\tThis class contains all the advanced functions to interact with the web browser.\n\n\tAttributes:\n\t\tscroll: an integer where the actual scroll position of the web browser is stored\n\n\tMethods:\n\t\tprint_\n\t\trandom\n\t\tget_timestamp\n\t\ttimestamp_diff\n\t\topen_file\n\t\twrite_file\n\t\twrite_table_as_csv\n\t\tclose_file\n\t\tget_local_storage\n\t\tset_local_storage\n\t\tget_cookie\n\t\tset_cookie\n\t\tclear_cookies\n\t\tget_element\n\t\tchildren_num\n\t\tpage_load_time\n\t\tscroll_down\n\t\tscroll_to_bottom\n\t\tscroll_up\n\t\tscroll_to_top\n\t\texecute_js\n\t\tset_timeout\n\t\tget_source_html\n\t\texport_source_html\n\t\tget_all_html_links\n\t\tget_element_html\n\t\ttake_screenshot\n\t\tget_parameter\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\"Init LibA class with scroll at top of the web page\"\"\"\n\n\t\tself.scroll = 0\n\n\t@staticmethod\n\tdef change_tab(browser, params):\n\t\t\"\"\"\n\n\t\t:param browser:\n\t\t:param params:\n\t\t:return:\n\t\t\"\"\"\n\n\t\tbrowser.switch_to.window(browser.window_handles[params[0]])\n\n\t@staticmethod\n\tdef print_(browser, params):\n\t\t\"\"\"Print string in terminal\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: string to be printed\n\t\t\"\"\"\n\n\t\tstring = params[0]\n\t\ttry:\n\t\t\tif string is None:\n\t\t\t\tlogger.log('NOTE', '')\n\t\t\telse:\n\t\t\t\tlogger.log('NOTE', string)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error printing {string}: {exception}'.format(string=string, exception=e))\n\n\t@staticmethod\n\tdef random(browser, params):\n\t\t\"\"\"Generate a random number between two numbers.\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: lower limit\n\t\t\t\t-1: higher limit\n\t\tReturns:\n\t\t\tRandom number\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tlower = params[0]\n\t\t\thigher = params[1]\n\t\texcept LookupError:\n\t\t\traise Exception('Function random(): 2 arguments needed')\n\t\ttry:\n\t\t\treturn randint(lower, higher)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error getting random number: {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef get_timestamp(browser, params):\n\t\t\"\"\"Get actual system timestamp\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: string time format (optional)\n\t\tReturns:\n\t\t\tActual system timestamp string\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tdate_format = params[0]\n\t\texcept:\n\t\t\tdate_format = False\n\t\t\tlogger.log('NOTE', 'Using default timestamp format')\n\t\ttry:\n\t\t\tif not date_format:\n\t\t\t\treturn str(datetime.now())[:-3]\n\t\t\telse:\n\t\t\t\treturn time.strftime(date_format)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error getting actual timestamp: {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef timestamp_diff(browser, params):\n\t\t\"\"\"Return two timestamps difference\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: first timestamp\n\t\t\t\t-1: second timestamp\n\t\tReturns:\n\t\t\tDifference between first timestamp and second timestamp\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tdatetime_1 = params[0]\n\t\t\tdatetime_2 = params[1]\n\t\texcept LookupError:\n\t\t\traise Exception('Function timestamp_diff(): 2 arguments needed')\n\n\t\ttry:\n\t\t\td1 = datetime.strptime(datetime_1, \"%Y-%m-%d %H:%M:%S.%f\")\n\t\t\td2 = datetime.strptime(datetime_2, \"%Y-%m-%d %H:%M:%S.%f\")\n\t\t\treturn (d1 - d2).total_seconds()\n\t\texcept Exception as e:\n\t\t\traise Exception('Error calculating date: {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef open_file(browser, params):\n\t\t\"\"\"Open selected file in a variable\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: file name\n\t\t\t\t-1: open mode (read | write | append)\n\t\tReturns:\n\t\t\tFile open in desired mode\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tfile_name = params[0]\n\t\t\tmode = params[1]\n\t\texcept LookupError:\n\t\t\traise Exception('Function open(): 2 arguments needed')\n\n\t\ttry:\n\t\t\treturn open(file_name, mode)\n\t\texcept IOError:\n\t\t\treturn open(file_name, \"{}+\".format(mode))\n\t\texcept Exception as e:\n\t\t\traise Exception('Error opening {file_name}: {exception}'.format(file_name=file_name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texception=e))\n\n\t@staticmethod\n\tdef write_file(browser, params):\n\t\t\"\"\"Write into selected file\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: file name\n\t\t\t\t-1: text to write into file\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tfile_name = params[0]\n\t\t\ttext = params[1]\n\t\texcept LookupError:\n\t\t\traise Exception('Function write(): 2 arguments needed')\n\n\t\ttry:\n\t\t\tsentences = text.split('\\\\n')\n\t\t\tfor sent in sentences:\n\t\t\t\tfile_name.write(sent + '\\n')\n\t\texcept Exception as e:\n\t\t\traise Exception('Error writing {file_name}: {exception}'.format(file_name=file_name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texception=e))\n\n\t@staticmethod\n\tdef write_table_as_csv(browser, params):\n\t\t\"\"\"Write table as csv format\n\n\t\tWrite a table from a web page into a csv file, adding some columns into the table if needed\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: table as web element (see getElement())\n\t\t\t\t-1: file to write in\n\t\t\t\t-2: csv delimiter\n\t\t\t\t-3: columns to add to the left\n\t\t\t\t-4: columns to add to the right\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\ttable = params[0]\n\t\t\tfile_ = params[1]\n\t\t\tdelimiter = params[2]\n\t\t\trow_ = ''\n\t\t\tadd_left = params[3]\n\t\t\tadd_right = params[4]\n\t\texcept LookupError:\n\t\t\traise Exception('Function write_table_as_csv(): at least 3 arguments needed')\n\n\t\ttry:\n\t\t\tfor row in table.find_elements_by_tag_name('tr'):\n\t\t\t\tfor cell in row.find_elements_by_tag_name('td'):\n\t\t\t\t\trow_ += cell.text + delimiter\n\t\t\t\tfile_.write(add_left + row_ + add_right + '\\n')\n\t\t\t\trow_ = ''\n\t\texcept Exception as e:\n\t\t\traise Exception('Error writing \"' + str(params[1]) + '\": {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef close_file(browser, params):\n\t\t\"\"\"Close file\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: file name\n\t\t\"\"\"\n\n\t\tfile_name = params[0]\n\t\ttry:\n\t\t\tfile_name.close()\n\t\texcept Exception as e:\n\t\t\traise Exception('Error closing {file_name}: {exception}'.format(file_name=file_name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texception=e))\n\n\t@staticmethod\n\tdef get_local_storage(browser, params):\n\t\t\"\"\"Returns selected item from local storage\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: item name\n\t\tReturns:\n\t\t\tThe value of the item\n\t\t\"\"\"\n\n\t\titem = params[0]\n\t\ttry:\n\t\t\treturn browser.execute_script(\"return localStorage.getItem('{item}');\".format(item=item))\n\t\texcept Exception as e:\n\t\t\traise Exception('Error getting {item} from local storage: {exception}'.format(item=item,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef set_local_storage(browser, params):\n\t\t\"\"\"Set selected value in selected item from local storage\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: item name\n\t\t\t\t-1: item value\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\titem = params[0]\n\t\t\tvalue = params[1]\n\t\texcept LookupError:\n\t\t\traise Exception('Function set_local_storage(): 2 arguments needed')\n\n\t\ttry:\n\t\t\tbrowser.execute_script(\"localStorage.setItem('{item}', '{value}');\".format(item=item,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t value=value))\n\t\t\tlogger.log('NOTE', 'Setting local storage: {item}={value}'.format(item=item,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t value=value))\n\t\texcept Exception as e:\n\t\t\traise Exception('Error setting {value} in {item} of local storage: {exception}'.format(item=item,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t value=value,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef get_cookie(browser, params):\n\t\t\"\"\"Returns selected cookie\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: cookie name\n\t\tReturns:\n\t\t\tThe value of the cookie\n\t\t\"\"\"\n\n\t\tcookie = params[0]\n\t\ttry:\n\t\t\treturn browser.get_cookie(cookie)['value']\n\t\texcept LookupError:\n\t\t\traise Exception('Error getting cookie {cookie}: Cookie not found'.format(cookie=cookie))\n\n\t@staticmethod\n\tdef set_cookie(browser, params):\n\t\t\"\"\"Set value of cookie\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: cookie name\n\t\t\t\t-1: cookie value\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tcookie = params[0]\n\t\t\tvalue = params[1]\n\t\texcept LookupError:\n\t\t\traise Exception('Function set_cookie(): 2 arguments needed')\n\n\t\ttry:\n\t\t\tbrowser.add_cookie({'name': cookie, 'value': value})\n\t\t\tlogger.log('NOTE', 'Setting cookie: {cookie}={value}'.format(cookie=cookie,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t value=value))\n\t\texcept Exception as e:\n\t\t\traise Exception('Error setting cookie {cookie} with {value}: {exception}'.format(cookie=cookie,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t value=value,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef clear_cookies(browser, params):\n\t\t\"\"\"Clear all cookies\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: cookie name\n\t\t\t\t-1: cookie value\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tbrowser.delete_all_cookies()\n\t\t\tlogger.log('NOTE', 'Deleting all cookies...')\n\t\texcept Exception as e:\n\t\t\traise Exception('Error deleting cookies: {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef get_element(browser, params):\n\t\t\"\"\"Get element from web page as web-element\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: web element xpath\n\t\tReturns:\n\t\t\tWeb element\n\t\t\"\"\"\n\n\t\tweb_element = params[0]\n\t\ttry:\n\t\t\treturn browser.find_element_by_xpath(web_element)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error searching element {web_element}: {exception}'.format(web_element=web_element,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texception=e))\n\n\t@staticmethod\n\tdef children_num(browser, params):\n\t\t\"\"\"Returns the number of child elements of one element\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: web element\n\t\tReturns:\n\t\t\tChildren of web element as integer number\n\t\t\"\"\"\n\n\t\tweb_element = params[0]\n\t\ttry:\n\t\t\tchildren = web_element.find_elements_by_xpath(\".//*\")\n\t\t\treturn len(children)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error getting element children: {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef page_load_time(browser, params):\n\t\t\"\"\"Returns the load time of a web page\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters (empty)\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\treturn browser.execute_script('return performance.timing.loadEventEnd - '\n\t\t\t\t\t\t\t\t\t\t 'performance.timing.navigationStart;') / 1000.0\n\t\texcept Exception as e:\n\t\t\traise Exception('Error getting load page time: {exception}'.format(exception=e))\n\n\tdef scroll_down(self, browser, params):\n\t\t\"\"\"Scroll down just screen height\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters (empty)\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tself.scroll += 1\n\t\t\tbrowser.execute_script(\"window.scrollTo(0, \" + str(700 * self.scroll) + \");\")\n\t\t\tlogger.log('NOTE', 'Scrolling down')\n\t\texcept Exception as e:\n\t\t\traise Exception('Error scrolling down: {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef scroll_to_bottom(browser, params):\n\t\t\"\"\"Scroll to the bottom of the web page\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters (empty)\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tbrowser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\t\tlogger.log('NOTE', 'Scrolling down to bottom')\n\t\texcept Exception as e:\n\t\t\traise Exception('Error scrolling down: {exception}'.format(exception=e))\n\n\tdef scroll_up(self, browser, params):\n\t\t\"\"\"Scroll up just screen height\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters (empty)\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tself.scroll -= 1\n\t\t\tbrowser.execute_script(\"window.scrollTo(0, \" + str(700 * self.scroll) + \");\")\n\t\t\tlogger.log('NOTE', 'Scrolling up')\n\t\texcept Exception as e:\n\t\t\traise Exception('Error scrolling up: {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef scroll_to_top(browser, params):\n\t\t\"\"\"Scroll to the top of the web page\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tbrowser.execute_script(\"window.scrollTo(0, 0);\")\n\t\t\tlogger.log('NOTE', 'Scrolling up to top')\n\t\texcept Exception as e:\n\t\t\traise Exception('Error scrolling top: {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef execute_js(browser, params):\n\t\t\"\"\"Execute any javascript instruction on the browser\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: javascript sentence\n\t\t\"\"\"\n\n\t\tscript = params[0]\n\t\ttry:\n\t\t\tlogger.log('NOTE', 'Executing js: {script}'.format(script=script))\n\t\t\treturn browser.execute_script(script)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error executing js: {script}: {exception}'.format(script=script,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef set_timeout(browser, params):\n\t\t\"\"\"Set timeout at loading websites\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: time to wait until timeout (ms)\n\t\t\"\"\"\n\n\t\ttimeout = params[0]\n\t\ttry:\n\t\t\tbrowser.set_page_load_timeout(timeout)\n\t\t\tlogger.log('NOTE', 'Timeout set to: {timeout}'.format(timeout=timeout))\n\t\texcept Exception as e:\n\t\t\traise Exception('Error setting timeout {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef export_source_html(browser, params):\n\t\t\"\"\"Export html webpage into a file\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: file path where will be stored\n\t\t\"\"\"\n\n\t\thtml_path = params[0]\n\t\ttry:\n\t\t\thtml = open('{html_path}'.format(html_path=html_path), 'w')\n\t\t\thtml_text = browser.page_source\n\t\t\thtml.write(html_text)\n\t\t\thtml.close()\n\t\t\tlogger.log('NOTE', 'HTML from {current_url} saved on: {html_path}'.format(current_url=browser.current_url,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t html_path=html_path))\n\t\texcept Exception as e:\n\t\t\traise Exception('Saving html source code: {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef get_all_html_links(browser, params):\n\t\t\"\"\"Get all links from the page html\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters (empty)\n\t\tReturns:\n\t\t\tA list of strings (links)\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\thtml = browser.page_source\n\t\t\tlinks = re.findall('\"((http)s?://.*?)\"', html)\n\t\t\tall_links = []\n\t\t\tfor link in links:\n\t\t\t\tif not link[0] in all_links:\n\t\t\t\t\tall_links.append(link[0])\n\t\t\treturn all_links\n\t\texcept Exception as e:\n\t\t\traise Exception('Getting all html links: {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef get_element_html(browser, params):\n\t\t\"\"\"Get html code from web element\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: web element xpath\n\t\tReturns:\n\t\t\tHtml from web element\n\t\t\"\"\"\n\n\t\tweb_element = params[0]\n\t\ttry:\n\t\t\telement = browser.find_element_by_xpath(web_element)\n\t\t\thtml = element.get_attribute('outerHTML')\n\t\t\treturn html\n\t\texcept Exception as e:\n\t\t\traise Exception('Error getting html from {web_element}: {exception}'.format(web_element=web_element,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texception=e))\n\n\t@staticmethod\n\tdef take_screenshot(browser, params):\n\t\t\"\"\"Takes a screenshot of the browser as .png\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: file path\n\t\t\"\"\"\n\n\t\tss_path = params[0]\n\t\ttry:\n\t\t\tbrowser.save_screenshot('{ss_path}'.format(ss_path=ss_path))\n\t\t\tlogger.log('NOTE', 'Screenshot from {url} saved on: {ss_path}'.format(url=browser.current_url,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ss_path=ss_path))\n\t\texcept Exception as e:\n\t\t\traise Exception('Error taking screenshot: {exception}'.format(exception=e))\n\n\n\t@staticmethod\n\tdef wait_for_downloads(browser, params):\n\t\t\"\"\"Wait to all downloads to complete\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters (empty)\n\t\t\"\"\"\n\n\t\tdownloaded = False\n\t\tbrowser_name = browser.capabilities['browserName']\n\n\t\tdef chrome_downloader(downloaded):\n\t\t\tbrowser.get('chrome://downloads')\n\t\t\tlogger.log('NOTE', 'Waiting for downloads...')\n\t\t\twhile not downloaded:\n\t\t\t\tfor item in browser.find_elements_by_css_selector('body/deep/downloads-item'):\n\t\t\t\t\tif 'pause' in item.text.lower():\n\t\t\t\t\t\ttime.sleep(2)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdownloaded = True\n\n\t\tdef firefox_downloader(downloaded):\n\t\t\tlogger.log('ERROR', 'Waiting for download not implemented with firefox')\n\n\t\tdef phantomjs_downloader(downloaded):\n\t\t\tlogger.log('ERROR', 'Waiting for download not implemented with phantomjs')\n\n\t\tdownloader = {\n\t\t\t'chrome': chrome_downloader,\n\t\t\t'firefox': firefox_downloader,\n\t\t\t'phantomjs': phantomjs_downloader\n\t\t}\n\n\t\ttry:\n\t\t\tdownloader[browser_name](downloaded)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error waiting for downloads: {exception}'.format(exception=e))\n\n\n\t@staticmethod\n\tdef get_environment_variable(browser, params):\n\t\t\"\"\"Get an environment variable\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: environment variable name\n\t\tReturns:\n\t\t\tValue of the environment variable\n\t\t\"\"\"\n\n\t\tenvironment_variable = params[0]\n\t\ttry:\n\t\t\treturn os.environ.get(environment_variable)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error getting environment variable: {exception}'.format(exception=e))\n\n\t@staticmethod\n\tdef get_parameter(script_parameters, params):\n\t\t\"\"\"Get an environment variable\n\n\t\tArgs:\n\t\t\tparams: list of parameters\n\t\t\t\t-0: script parameter index\n\t\t\tscript_parameters: list of script parameters\n\t\tReturns:\n\t\t\tValue of the script parameter\n\t\t\"\"\"\n\n\t\tscript_parameter_index = params[0]\n\n\t\ttry:\n\t\t\treturn script_parameters[script_parameter_index]\n\t\texcept IndexError:\n\t\t\traise Exception('Error getting script parameter [{}]'.format(script_parameter_index))\n", "id": "9911804", "language": "Python", "matching_score": 5.787330150604248, "max_stars_count": 4, "path": "src/lib/lib_a_nbz.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Author: <Zurdi>\n#\n# File where the dict of built-in functions\n# is generated, to be imported by other modules.\n\n\nfrom lib.lib_snf_nbz import LibSnf\nfrom lib.lib_a_nbz import LibA\nfrom lib.lib_b_nbz import LibB\nfrom lib.lib_d_nbz import LibD\n\nlib_snf_nbz = LibSnf()\nlib_a_nbz = LibA()\nlib_b_nbz = LibB()\nlib_d_nbz = LibD()\n\nNATIVES = {\n\n\t# System functions\n\t'browser': '',\n\t'exit': '',\n\n\t# Basic functions\n\t'get_url': lib_b_nbz.get_url,\n\t'set_url_retries': lib_b_nbz.set_url_retries,\n\t'fill': lib_b_nbz.fill_field,\n\t'clear': lib_b_nbz.clear_field,\n\t'click': lib_b_nbz.click_element,\n\t'select': lib_b_nbz.select_option,\n\t'wait': lib_b_nbz.wait_time,\n\t'back': lib_b_nbz.back,\n\t'forward': lib_b_nbz.forward,\n\t'refresh': lib_b_nbz.refresh,\n\t'get_text': lib_b_nbz.get_text,\n\t'current_url': lib_b_nbz.current_url,\n\n\t# Sniffing functions\n\t'check_net': lib_snf_nbz.check_net,\n\t'reset_har': lib_snf_nbz.reset_har,\n\t'export_net_report': lib_snf_nbz.net_report,\n\n\t# Advanced functions\n\t'change_tab': lib_a_nbz.change_tab,\n\t'print': lib_a_nbz.print_,\n\t'random': lib_a_nbz.random,\n\t'get_timestamp': lib_a_nbz.get_timestamp,\n\t'timestamp_diff': lib_a_nbz.timestamp_diff,\n\t'open': lib_a_nbz.open_file,\n\t'write': lib_a_nbz.write_file,\n\t'write_table_as_csv': lib_a_nbz.write_table_as_csv,\n\t'close': lib_a_nbz.close_file,\n\t'get_local_storage': lib_a_nbz.get_local_storage,\n\t'set_local_storage': lib_a_nbz.set_local_storage,\n\t'get_cookie': lib_a_nbz.get_cookie,\n\t'set_cookie': lib_a_nbz.set_cookie,\n\t'clear_cookies': lib_a_nbz.clear_cookies,\n\t'get_element': lib_a_nbz.get_element,\n\t'children_num': lib_a_nbz.children_num,\n\t'page_load_time': lib_a_nbz.page_load_time,\n\t'scroll_down': lib_a_nbz.scroll_down,\n\t'scroll_to_bottom': lib_a_nbz.scroll_to_bottom,\n\t'scroll_up': lib_a_nbz.scroll_up,\n\t'scroll_to_top': lib_a_nbz.scroll_to_top,\n\t'execute_js': lib_a_nbz.execute_js,\n\t'set_timeout': lib_a_nbz.set_timeout,\n\t'export_source_html': lib_a_nbz.export_source_html,\n\t'get_all_html_links': lib_a_nbz.get_all_html_links,\n\t'get_element_html': lib_a_nbz.get_element_html,\n\t'screenshot': lib_a_nbz.take_screenshot,\n\t'wait_for_downloads': lib_a_nbz.wait_for_downloads,\n\t'get_environment_variable': lib_a_nbz.get_environment_variable,\n\t'get_parameter': lib_a_nbz.get_parameter,\n\n\t# Data types functions\n\t'type': lib_d_nbz.var_type,\n\t'int': lib_d_nbz.cast_int,\n\t'float': lib_d_nbz.cast_float,\n\t'str': lib_d_nbz.cast_str,\n\t'sub_str': lib_d_nbz.sub_str,\n\t'len': lib_d_nbz.length,\n\t'find': lib_d_nbz.find,\n\t'find_regex': lib_d_nbz.find_regex,\n\t'replace': lib_d_nbz.replace,\n\t'split': lib_d_nbz.split,\n\t'append_list': lib_d_nbz.append_list,\n\t'update_list': lib_d_nbz.update_list,\n\t'remove_list': lib_d_nbz.remove_list,\n\t'get_element_list': lib_d_nbz.get_element_list,\n\n}\n", "id": "1335796", "language": "Python", "matching_score": 2.1311798095703125, "max_stars_count": 4, "path": "src/data/natives.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Author: <Zurdi>\n\n\nimport sys\nfrom io import IOBase\nfrom pprint import pprint\nfrom lib.lib_wb_nbz import LibWb\nfrom lib.lib_log_nbz import Logging\n\nlib_wb_nbz = LibWb()\nlogger = Logging()\n\n\nclass NBZCore:\n \"\"\"Core of the NBZ. This is the module where all the nbz-scripts instructions are executed.\n\n\tIn the executeInstructions() method, are explained all the logical processes to parse\n\tand execute properly the list structure generated by the NBZParser module.\n\n\tAttributes:\n\t\tattributes: dictionary of multiple parameters, paths and structures needed to run the nbz-script\n\t\tstatements: dictionary of multiple nbz-script statements to execute each when needed\n\n\tMethods:\n\t\texecute_instructions\n\t\t_assign\n\t\t_def\n\t\t_func\n\t\t_if\n\t\t_for\n\t\t_while\n\t\tget_values\n\t\"\"\"\n\n def __init__(self, attributes):\n \"\"\"Init NBZCore class with his attributes\"\"\"\n\n self.attributes = attributes\n self.statements = {\n 'assign': self._assign,\n 'def': self._def,\n 'func': self._func,\n 'if': self._if,\n 'for': self._for,\n 'while': self._while\n }\n\n def execute_instructions(self, instruction_set=None):\n \"\"\"Execute each instruction from instruction_set (recursively on flow control sentences)\n\n\t\tThe following structure is how parser converts nbz-script to be executed by this method:\n\t\t\tinstruction[0] -> type:\n\t\t\t\t- assign: instruction[1] -> id\n\t\t\t\t\t\t\t\tinstruction[2] -> value | expression\n\t\t\t\t- def\n\t\t\t\t\t\t\t\tinstruction[1] -> id\n\t\t\t\t\t\t\t\tinstruction[2] -> block of sentences\n\n\t\t\t\t- func: instruction[1] -> id\n\t\t\t\t\t\t\t\tinstruction[2] -> parameters list\n\n\t\t\t\t- if: instruction[1] -> condition\n\t\t\t\t\t\t\t\tinstruction[2] -> block of sentences (if)\n\t\t\t\t\t\t\t\tinstruction[3] -> block of sentences (elif or else)\n\t\t\t\t\t\t\t\t(instruction[4]) -> block of sentences (else)\n\n\t\t\t\t- for(normal): instruction[1] -> start index\n\t\t\t\t\t\t\t\tinstruction[2] -> end index\n\t\t\t\t\t\t\t\tinstruction[3] -> mode (+ | ++ | - | --)\n\t\t\t\t\t\t\t\tinstruction[4] -> block of sentences\n\n\t\t\t\t- for(foreach): instruction[1] -> temporal variable\n\t\t\t\t\t\t\t\tinstruction[2] -> iterable structure\n\t\t\t\t\t\t\t\tinstruction[3] -> block of sentences\n\n\t\t\t\t- while: instruction[1] -> condition\n\t\t\t\t\t\t\t\tinstruction[2] -> block of sentences\n\n\t\tArgs:\n\t\t\tinstruction_set: list of instructions to be executed\n\t\t\"\"\"\n\n # We need to check if this method is called from main script\n # or if it is called from a loop inside the script (like a for loop or a while loop)\n if instruction_set is None:\n instructions = self.attributes['instruction_set']\n else:\n instructions = instruction_set\n for instruction in instructions:\n self.statements[instruction[0]](instruction)\n\n def _assign(self, instruction):\n var_name = instruction[1]\n var_value = instruction[2]\n self.attributes['variables'][var_name] = self.get_value(var_value)\n\n def _def(self, instruction):\n func_name = instruction[1]\n func_instructions = instruction[2]\n self.attributes['USER_FUNC'][func_name] = func_instructions\n\n def _func(self, instruction):\n func_name = instruction[1]\n func_parameters = instruction[2]\n params = []\n for param in func_parameters:\n params.append(self.get_value(param))\n if func_name == 'exit':\n sys.exit(params[0])\n elif func_name == 'browser':\n try:\n self.attributes['browser'] = lib_wb_nbz.instance_browser(self.attributes['proxy_enabled'], params)\n except Exception as e:\n logger.log('ERROR', 'Error with browser: {exception}'.format(exception=e))\n sys.exit()\n elif func_name == 'export_net_report':\n if self.attributes['proxy_enabled']:\n self.attributes['complete_csv'] \\\n = self.attributes['NATIVES']['export_net_report'](params, self.attributes['script_name'])\n self.attributes['set_net_report'] = True\n else:\n logger.log('ERROR', 'Can\\'t get net report. Proxy not enabled.')\n elif func_name == 'reset_har':\n if self.attributes['proxy_enabled']:\n self.attributes['NATIVES']['reset_har'](self.attributes['set_net_report'],\n self.attributes['complete_csv'],\n self.attributes['browser'].current_url,\n self.attributes['proxy'])\n else:\n logger.log('ERROR', 'Can\\'t reset HAR. Proxy not enabled.')\n elif func_name == 'check_net':\n pass\n elif func_name == 'get_parameter':\n pass\n else:\n try:\n try:\n self.attributes['NATIVES'][func_name](self.attributes['browser'], params)\n except Exception as e:\n logger.log('ERROR', 'Error with function {function}: {exception}'.format(function=func_name,\n exception=e))\n raise Exception(str(e))\n except LookupError:\n try:\n self.execute_instructions(self.attributes['USER_FUNC'][func_name])\n except LookupError:\n logger.log('ERROR', '{func_name} function not defined.'.format(func_name=func_name))\n raise Exception(str(e))\n\n def _if(self, instruction):\n if_condition = self.get_value(instruction[1])\n if_instructions = instruction[2]\n try:\n elif_else_statements = instruction[3]\n else_instructions = instruction[4][0][1]\n except LookupError:\n pass\n if if_condition:\n self.execute_instructions(if_instructions)\n else:\n if len(instruction) == 4: # If statement have elif OR else\n if elif_else_statements[0][0] == 'elif':\n for elif_ in elif_else_statements:\n elif_condition = self.get_value(elif_[1])\n elif_instructions = elif_[2]\n if elif_condition:\n self.execute_instructions(elif_instructions)\n break\n elif elif_else_statements[0][0] == 'else':\n else_instructions = elif_else_statements[0][1]\n self.execute_instructions(else_instructions)\n elif len(instruction) == 5: # If statement have elif AND else\n elif_done = False\n for elif_ in elif_else_statements:\n elif_condition = self.get_value(elif_[1])\n elif_instructions = elif_[2]\n if elif_condition:\n elif_done = True\n self.execute_instructions(elif_instructions)\n break\n if not elif_done:\n self.execute_instructions(else_instructions)\n\n def _for(self, instruction):\n if len(instruction) == 4: # Foreach\n element = self.get_value(instruction[1])\n structure = self.attributes['variables'][self.get_value(instruction[2])]\n foreach_instructions = instruction[3]\n for iterator_element in structure:\n try:\n if isinstance(structure, file):\n self.attributes['variables'][element] = iterator_element[0:-1] # Avoiding newline character\n else:\n self.attributes['variables'][element] = iterator_element # All other structure types\n except NameError:\n if isinstance(structure, IOBase):\n self.attributes['variables'][element] = iterator_element[0:-1] # Avoiding newline character\n else:\n self.attributes['variables'][element] = iterator_element # All other structure types\n self.execute_instructions(foreach_instructions)\n else: # Standard For\n init_index = self.get_value(instruction[1])\n fin_index = self.get_value(instruction[2])\n op_counters = {'+': 1, '++': 2, '-': -1, '--': -2}\n counter = op_counters[instruction[3]]\n for_instructions = instruction[4]\n for i in range(init_index, fin_index, counter):\n self.execute_instructions(for_instructions)\n\n def _while(self, instruction):\n while_condition = instruction[1]\n while_instructions = instruction[2]\n while self.get_value(while_condition):\n self.execute_instructions(while_instructions)\n\n def get_value(self, sub_instruction):\n \"\"\"Local function inside executeInstructions() method, that is just used for it.\n\n\t\tGet the value from some distinct structures:\n\t\t\t- direct value or variable value of a parameter\n\t\t\t- resolve arithmetic expressions\n\t\t\t- resolve boolean expressions\n\t\t\t- resolve function return value\n\n\t\tArgs:\n\t\t\tsub_instruction: expression that can be one of the previous described structures.\n\t\tReturns:\n\t\t\tThe value of the expression\n\t\t\"\"\"\n\n try:\n if isinstance(sub_instruction, list):\n if len(sub_instruction) > 0:\n if sub_instruction[0] == 'var':\n return self.attributes['variables'][sub_instruction[1]]\n elif sub_instruction[0] == 'value':\n return sub_instruction[1]\n elif sub_instruction[0] == 'arithm':\n op_1 = self.get_value(sub_instruction[1])\n op_2 = self.get_value(sub_instruction[2])\n if isinstance(op_1, str) or isinstance(op_2, str):\n return eval(\n '\\'{op_1}\\' {operand} \\'{op_2}\\''.format(op_1=self.get_value(sub_instruction[1]),\n operand=sub_instruction[3],\n op_2=self.get_value(sub_instruction[2])))\n else:\n return eval('{op_1} {operand} {op_2}'.format(op_1=self.get_value(sub_instruction[1]),\n operand=sub_instruction[3],\n op_2=self.get_value(sub_instruction[2])))\n elif sub_instruction[0] == 'boolean':\n if sub_instruction[3] != 'not':\n op_1 = self.get_value(sub_instruction[1])\n op_2 = self.get_value(sub_instruction[2])\n if isinstance(op_1, str):\n op_1 = \"'{op_1}'\".format(op_1=op_1)\n if isinstance(op_2, str):\n op_2 = \"'{op_2}'\".format(op_2=op_2)\n return eval('{op_1} {operand} {op_2}'.format(op_1=self.get_value(op_1),\n operand=sub_instruction[3],\n op_2=self.get_value(op_2)))\n else:\n return not self.get_value(sub_instruction[1])\n elif sub_instruction[0] == 'func':\n sub_params = []\n for sub_param in sub_instruction[2]:\n sub_params.append(self.get_value(sub_param))\n try:\n if sub_instruction[1] == 'check_net':\n return self.attributes['NATIVES']['check_net'](self.attributes['proxy'].har,\n sub_params)\n elif sub_instruction[1] == 'get_parameter':\n return self.attributes['NATIVES']['get_parameter'](self.attributes['script_parameters'],\n sub_params)\n else:\n return self.attributes['NATIVES'][sub_instruction[1]](self.attributes['browser'],\n sub_params)\n except Exception as e:\n raise Exception(str(e))\n else:\n return sub_instruction\n else:\n return sub_instruction\n else:\n return sub_instruction\n except Exception as e:\n raise Exception(str(e))\n\n def export_har_log(self):\n \"\"\"Export har log\"\"\"\n\n if self.attributes['browser'] is not None:\n if self.attributes['set_net_report']:\n self.attributes['complete_csv'].write(\n 'URL: {url}\\n\\n'.format(url=self.attributes['browser'].current_url))\n pprint(self.attributes['proxy'].har['log']['entries'], self.attributes['complete_csv'])\n self.attributes['complete_csv'].close()\n logger.log('NOTE', 'Net report csv file exported to: '\n '{net_report_csv}'.format(net_report_csv=self.attributes['complete_csv'].name))\n", "id": "11339217", "language": "Python", "matching_score": 3.8812639713287354, "max_stars_count": 4, "path": "src/nbz_core.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Author: <Zurdi>\n\n\nimport sys\nimport os\nimport psutil\nimport argparse\nfrom pyvirtualdisplay import Display\nfrom nbz_core import NBZCore\nfrom parser.nbz_parser import NBZParser\nfrom data.natives import NATIVES\nfrom lib.lib_log_nbz import Logging\n\nlogger = Logging()\nBASE_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\ndef close_all():\n \"\"\"Close all connections\"\"\"\n\n logs_dir = os.path.join(BASE_DIR, \"logs\")\n if not os.path.exists(logs_dir):\n os.makedirs(logs_dir)\n logs = ['server.log', 'bmp.log', 'geckodriver.log', 'ghostdriver.log']\n for log in logs:\n if os.path.isfile(os.path.join(os.getcwd(), log)):\n os.rename(os.path.join(os.getcwd(), log), os.path.join(logs_dir, log))\n root_process = psutil.Process(os.getppid())\n root_children = root_process.children(recursive=True)[1:]\n for child in reversed(root_children):\n os.kill(child.pid, 9)\n\n\nclass NBZInterface:\n \"\"\"Interface between all modules of the nbz.\n\n This class provides all the attributes needed to the core module, using the parser module\n to parse the nbz-script previously. After all script is executed, this class ends all connections.\n\n Attributes:\n core_attributes: dictionary of attributes needed for the core module\n\n Methods:\n compile_script\n \"\"\"\n\n def __init__(self, script, script_parameters, proxy_enabled, debug):\n \"\"\"Init NBZInterface class with some attributes\"\"\"\n\n self.core_attributes = {\n 'instruction_set': [],\n 'variables': {},\n 'NATIVES': NATIVES,\n 'USER_FUNC': {},\n\n 'script': script,\n 'script_name': os.path.basename(script)[0:-4],\n 'script_parameters': script_parameters,\n\n 'browser': [],\n 'proxy_enabled': proxy_enabled,\n\n 'set_net_report': False,\n 'net_reports_path': '',\n 'complete_csv_path': '',\n 'complete_csv': None,\n\n 'debug': debug,\n }\n try:\n logger.log_header()\n self.compile_script()\n nbz_core = NBZCore(self.core_attributes)\n nbz_core.execute_instructions()\n nbz_core.export_har_log()\n logger.log_footer()\n except Exception as e:\n logger.log('ERROR', str(e))\n logger.log_error()\n finally:\n close_all()\n\n def compile_script(self):\n \"\"\"Compile script to be executed.\n\n Returns:\n A lists structure with all the nbz-script converted\n A dict mapping variables of the script and their values\n \"\"\"\n\n try:\n z_code, z_code_vars = NBZParser(self.core_attributes['script'])\n self.core_attributes['instruction_set'] = z_code\n self.core_attributes['variables'] = z_code_vars\n if self.core_attributes['debug']:\n logger.log('NOTE',\n 'Instructions: {instructions}'.format(instructions=self.core_attributes['instruction_set']))\n logger.log('NOTE', 'Variables: {variables}'.format(variables=self.core_attributes['variables']))\n except Exception as e:\n logger.log('ERROR',\n 'Script not compiled ({script}): {exception}'.format(script=self.core_attributes['script'],\n exception=e))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-script\", help=\"script file\", required=False)\n parser.add_argument(\"-script_parameters\", help=\"script parameters\", required=False, nargs='+')\n parser.add_argument(\"-display\", help=\"enable display emulation\", required=False)\n parser.add_argument(\"-resolution\", help=\"set the screen emulator resolution\", required=False)\n parser.add_argument(\"-proxy\", help=\"enable proxy\", required=False)\n parser.add_argument(\"-debug\", help=\"debug mode\", required=False)\n args = parser.parse_args()\n script = args.script\n script_parameters = args.script_parameters\n display = args.display\n resolution = args.resolution\n if display == 'true':\n if resolution != 'default':\n resolution = resolution.split('x')\n try:\n display = Display(visible=0, size=(resolution[0], resolution[1]))\n except IndexError:\n logger.log('ERROR', 'Error in resolution parameter. Must be like 1920x1080.')\n sys.exit(4)\n else:\n display = Display(visible=0, size=(2920, 1080))\n display.start()\n proxy_enabled = True if args.proxy == 'true' else False\n debug = True if args.debug == 'true' else False\n NBZInterface(script, script_parameters, proxy_enabled, debug)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "id": "2165065", "language": "Python", "matching_score": 2.8410727977752686, "max_stars_count": 4, "path": "src/nbz_interface.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Author: <Zurdi>\n\n\nimport os\nfrom pprint import pprint\nfrom .lib_log_nbz import Logging\n\nlogger = Logging()\nBASE_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\nclass LibSnf:\n\t\"\"\"Library of native sniffer functions\n\n\tThis class contains all the sniffer functions to interact with the proxy\n\n\tAttributes:\n\t\tsniffer_attr: dict with all the values that can be get of a http call\n\n\tMethods:\n\t\tcheck_net\n\t\tcheck_net_parameters\n\t\tcheck_net_keywords\n\t\tnet_report\n\t\treset_har\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\"Init LibSnf class with it attributes\"\"\"\n\n\t\tself.reset_attributes()\n\n\tdef reset_attributes(self):\n\t\tself.sniffer_attr = {\n\t\t\t'request_ok': False,\n\t\t\t'url': '',\n\t\t\t'status_code': '404',\n\t\t\t'timestamp': '',\n\t\t\t'times': 0\n\t\t}\n\n\tdef check_net(self, har, request):\n\t\t\"\"\"General method to select the way to check the HAR file\n\n\t\tArgs:\n\t\t\thar: har proxy file\n\t\t\trequest: some parameters which configure the check of a request\n\t\t\t\t-0: mode of checking (by parameters or by keyword)\n\t\tReturns:\n\t\t\tThe value of the selected parameter of the request to check\n\t\t\"\"\"\n\n\t\tcheck_type = request[0]\n\n\t\tif check_type == 'params':\n\t\t\treturn self.check_net_parameters(har, request)\n\t\telif check_type == 'keyword':\n\t\t\treturn self.check_net_keywords(har, request)\n\t\telse:\n\t\t\traise Exception('Not admitted request type: {type}'.format(type=check_type))\n\n\tdef check_net_parameters(self, har, request):\n\t\t\"\"\"Check if any request had the chosen parameters\n\n\t\tArgs:\n\t\t\thar: har proxy file\n\t\t\trequest: some parameters which configure the check of a request\n\t\t\t\t-1: parameter to return\n\t\t\t\t-2, -n: parameters of the url request\n\t\tReturns:\n\t\t\tValue of the parameter of the selected request\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tattribute = request[1]\n\t\t\tparams = request[2:]\n\t\texcept LookupError:\n\t\t\traise Exception('Function check_net(): at least 3 argument needed')\n\n\t\tfor entry in har['log']['entries']:\n\t\t\tparam_list_aux = entry['request']['url'].split('?')\n\t\t\tif len(param_list_aux) > 1:\n\t\t\t\tparam_list = param_list_aux[1].split('&')\n\t\t\t\tif set(params).issubset(set(param_list)):\n\t\t\t\t\tif attribute == 'times':\n\t\t\t\t\t\tself.sniffer_attr['times'] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.sniffer_attr['request_ok'] = True\n\t\t\t\t\t\tself.sniffer_attr['status_code'] = int(entry['response']['status'])\n\t\t\t\t\t\tself.sniffer_attr['url'] = entry['request']['url']\n\t\t\t\t\t\tself.sniffer_attr['timestamp'] = entry['startedDateTime'].replace('T', ' ')[:-6]\n\t\t\t\t\t\tbreak\n\t\ttry:\n\t\t\tattribute = self.sniffer_attr[attribute]\n\t\t\tself.reset_attributes()\n\t\t\treturn attribute\n\t\texcept LookupError:\n\t\t\traise Exception('Check_net() error: can\\'t find {attribute} - '\n\t\t\t\t\t\t\t'invalid parameter to return'.format(attribute=attribute))\n\n\tdef check_net_keywords(self, har, request):\n\t\t\"\"\"Check if any request had the chosen keyword\n\n\t\tArgs:\n\t\t\thar: har proxy file\n\t\t\trequest: some parameters which configure the check of a request\n\t\t\t\t-1: parameter to return\n\t\t\t\t-2: keyword to search\n\t\tReturns:\n\t\t\tValue of the parameter of the selected request\n\t\t\"\"\"\n\n\t\tattribute = request[1]\n\t\tkeyword = request[2]\n\n\t\tfor entry in har['log']['entries']:\n\t\t\tif entry['request']['url'].find(keyword) != -1:\n\t\t\t\tself.sniffer_attr['request_ok'] = True\n\t\t\t\tself.sniffer_attr['status_code'] = int(entry['response']['status'])\n\t\t\t\tself.sniffer_attr['url'] = entry['request']['url']\n\t\t\t\tself.sniffer_attr['timestamp'] = entry['startedDateTime'].replace('T', ' ')[:-6]\n\t\t\t\tbreak\n\t\ttry:\n\t\t\tattribute = self.sniffer_attr[attribute]\n\t\t\tself.reset_attributes()\n\t\t\treturn attribute\n\t\texcept LookupError:\n\t\t\traise Exception('Check_net() error: can\\'t find {attribute} - '\n\t\t\t\t\t\t\t'invalid parameter to return'.format(attribute=attribute))\n\n\t@staticmethod\n\tdef net_report(params, script_name):\n\t\t\"\"\"Create net report csv\n\n\t\tArgs:\n\t\t\tparams: list of parameters\n\t\t\t\t-0: file name\n\t\t\tscript_name: name of the nbz script\n\t\tReturns:\n\t\t\tThe report file opened in write mode\n\t\t\"\"\"\n\t\tfile_name = params[0]\n\n\t\tnet_reports_path = '{base_dir}/net_reports/{script_name}'.format(base_dir=os.path.abspath(os.path.join(BASE_DIR, os.pardir)), script_name=script_name)\n\t\tcomplete_csv_path = '{net_reports_path}/complete_net_log_{report_name}.csv'.format(\n\t\t\tnet_reports_path=net_reports_path,\n\t\t\treport_name=file_name)\n\t\tif not os.path.exists(net_reports_path):\n\t\t\tos.makedirs(net_reports_path)\n\t\treturn open(complete_csv_path, 'w')\n\n\t@staticmethod\n\tdef reset_har(set_net_report, complete_csv, current_url, proxy):\n\t\t\"\"\"Reset proxy's HAR to check new requests\n\n\t\tArgs:\n\t\t\tset_net_report: flag to know if the net report was requested by the user in the nbz-script\n\t\t\tcomplete_csv: file with the complete net report of the script\n\t\t\tcurrent_url: current url of the browser\n\t\t\tproxy: instance of the proxy\n\t\tReturns:\n\t\t\tNew instance of the har file of the proxy\n\t\t\"\"\"\n\n\t\tif set_net_report:\n\t\t\tcomplete_csv.write('URL: {url}\\n\\n'.format(url=current_url))\n\t\t\tpprint(proxy.har['log']['entries'], complete_csv)\n\t\treturn proxy.new_har()\n", "id": "11388026", "language": "Python", "matching_score": 1.321445345878601, "max_stars_count": 4, "path": "src/lib/lib_snf_nbz.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Author: <Zurdi>\n\n\nfrom lib.lib_log_nbz import Logging\nfrom data.natives import NATIVES\nfrom parser.nbz_lexer import tokens\nimport ply.yacc as yacc\n\nlogger = Logging()\n\n\ndef NBZParser(script: str, interactive: bool = False):\n \"\"\"Parser of the nbz-script\n\n\tThis module converts the nbz-script into a structure of lists,\n\tready to be executed by the core module. Each function of this module uses the docstring\n\tto define the parser rules. The rules are documented themselves.\n\n\t:param script: path of the nbz-script\n\t:param interactive: flag to call this module into manual user mode or file mode (you can write sentences directly,\n\tor you can pass a nbz-script)\n\t:return A lists structure with all the nbz-script converted; A dict mapping variables of the script and their values:\n\t\"\"\"\n\n # code structure\n code = []\n\n # variables dictionary\n variables = {}\n\n # Functions dictionary\n functions = NATIVES\n\n # Initial state\n def p_code(p):\n \"\"\"code : code sent\n\t\t\t\t| sent\n\t\t\t\t| empty\"\"\"\n if len(p) == 2:\n if p[1] is None:\n p[0] = []\n else:\n p[0] = [p[1]]\n else:\n p[0] = p[1]\n p[0].append(p[2])\n\n def p_sent(p):\n \"\"\"sent : statement\n | instruction SEMI\"\"\"\n p[0] = p[1]\n\n def p_instruction(p):\n \"\"\"instruction : assign\n | function\"\"\"\n p[0] = p[1]\n\n def p_statement(p):\n \"\"\"statement : function_definition\n | for_statement\n | if_statement\n | while_statement\"\"\"\n p[0] = p[1]\n\n def p_sent_for_flow_int(p):\n \"\"\"for_statement : FOR LPAREN for_valid_expr COMMA for_valid_expr COMMA for_valid_iter RPAREN LBRACE code RBRACE\n | FOR LPAREN ID IN ID RPAREN LBRACE code RBRACE\"\"\"\n if len(p) == 10:\n p[0] = ['for', p[3], p[5], p[8]]\n for i in range(0, len(p[8])):\n code.pop()\n code.append(p[0])\n else:\n p[0] = ['for', p[3], p[5], p[7], p[10]]\n for i in range(0, len(p[10])):\n code.pop()\n code.append(p[0])\n\n def p_for_valid_expressions_num(p):\n \"\"\"for_valid_expr : expr_num\n | expr_arithm\"\"\"\n p[0] = p[1]\n\n def p_for_valid_iterators(p):\n \"\"\"for_valid_iter : PLUS\n | PLUSPLUS\n | MINUS\n | MINUSMINUS\"\"\"\n p[0] = p[1]\n\n def p_sent_if_flow(p):\n \"\"\"if_statement : IF LPAREN logic_list RPAREN LBRACE code RBRACE\n | IF LPAREN logic_list RPAREN LBRACE code RBRACE elif_sent\n | IF LPAREN logic_list RPAREN LBRACE code RBRACE ELSE LBRACE code RBRACE\n | IF LPAREN logic_list RPAREN LBRACE code RBRACE elif_sent ELSE LBRACE code RBRACE\"\"\"\n if len(p) == 8: # Only if\n p[0] = ['if', p[3], p[6]]\n for i in range(0, len(p[6])):\n code.pop()\n code.append(p[0])\n elif len(p) == 9: # If + elif\n p[0] = ['if', p[3], p[6], p[8]]\n for i in range(0, len(p[6])):\n code.pop()\n code.append(p[0])\n elif len(p) == 12: # If + else\n p[0] = ['if', p[3], p[6], [['else'] + [p[10]]]]\n for i in range(0, len(p[6])):\n code.pop()\n for i in range(0, len(p[10])):\n code.pop()\n code.append(p[0])\n elif len(p) == 13: # If + elif + else\n if p[8]:\n p[0] = ['if', p[3], p[6], p[8], [['else'] + [p[11]]]]\n else:\n p[0] = ['if', p[3], p[6], [['else'] + [p[11]]]]\n for i in range(0, len(p[6])):\n code.pop()\n for i in range(0, len(p[11])):\n code.pop()\n code.append(p[0])\n\n def p_sent_elif_flow(p):\n \"\"\"elif_sent : ELIF LPAREN logic_list RPAREN LBRACE code RBRACE elif_sent\n | empty\"\"\"\n if len(p) > 2:\n for i in range(0, len(p[6])):\n code.pop()\n if p[8]:\n p[0] = [['elif', p[3], p[6]], p[8][0]]\n else:\n p[0] = [['elif', p[3], p[6]]]\n\n def p_sent_while_flow(p):\n \"\"\"while_statement : WHILE LPAREN logic_list RPAREN LBRACE code RBRACE\"\"\"\n p[0] = ['while', p[3], p[6]]\n for i in range(0, len(p[6])):\n code.pop()\n code.append(p[0])\n\n def p_function_definition(p):\n \"\"\"function_definition : DEF ID LPAREN RPAREN LBRACE code RBRACE\"\"\"\n functions[p[2]] = ''\n p[0] = ['def', p[2], p[6]]\n for sent in p[6]:\n code.pop()\n code.append(p[0])\n\n def p_assign_expr(p):\n \"\"\"assign : ID ASSIGN expr_type\n | ID ASSIGN expr_arithm\n | ID ASSIGN logic_list\n | ID ASSIGN expr_list\"\"\"\n p[0] = ['assign', p[1], p[3]]\n variables[p[1]] = ''\n code.append(p[0])\n\n def p_assign_func(p):\n \"\"\"assign : ID ASSIGN function\"\"\"\n code.pop()\n p[0] = ['assign', p[1], p[3]]\n variables[p[1]] = ''\n code.append(p[0])\n\n def p_expr_funcs(p):\n \"\"\"function : ID LPAREN list RPAREN\"\"\"\n try:\n check = functions[p[1]]\n p[0] = ['func', p[1], p[3]]\n code.append(p[0])\n except LookupError:\n raise Exception(f\"Undefined function '{p[1]}' line {p.lineno(1)}\")\n\n def p_list_var(p):\n \"\"\"list : list COMMA ID\n | list COMMA function\n | function\n | ID\"\"\"\n if len(p) == 2:\n if isinstance(p[1], str):\n try:\n check = variables[p[1]]\n p[0] = [['var', p[1]]]\n except LookupError:\n raise Exception(f'Undefined variable \"{p[1]}\" line {p.lineno(1)}')\n\n elif isinstance(p[1], list):\n try:\n check = functions[p[1][1]]\n p[0] = [p[1]]\n code.pop()\n except LookupError:\n raise Exception(f'Undefined function \"{p[1][1]}\" line {p.lineno(1)}')\n\n else:\n p[0] = p[1]\n if isinstance(p[3], str):\n try:\n check = variables[p[3]]\n p[0].append(['var', p[3]])\n except LookupError:\n raise Exception(f'Undefined variable \"{p[3]}\" line {p.lineno(1)}')\n\n elif isinstance(p[3], list):\n try:\n check = functions[p[3][1]]\n p[0].append([p[3]])\n code.pop()\n except LookupError:\n raise Exception(f'Undefined function \"{p[3][1]}\" line {p.lineno(1)}')\n\n def p_list_value(p):\n \"\"\"list : list COMMA expr_type\n | expr_type\n | empty\"\"\"\n if len(p) == 2:\n if p[1] is None:\n p[0] = []\n else:\n p[0] = [['value', p[1]]]\n else:\n p[0] = p[1]\n p[0].append(['value', p[3]])\n\n def p_list_expression(p):\n \"\"\"list : list COMMA expr_arithm\n | list COMMA logic_list\n | expr_arithm\n | logic_list\"\"\"\n if len(p) == 2:\n p[0] = [p[1]]\n else:\n p[0] = p[1]\n p[0].append(p[3])\n\n def p_group_logic_list(p):\n \"\"\"logic_list : LPAREN logic_list RPAREN\"\"\"\n p[0] = p[2]\n\n def p_logic_list(p):\n \"\"\"logic_list : logic_list AND logic_list\n | logic_list OR logic_list\n | expr_bool\"\"\"\n if len(p) == 2:\n p[0] = p[1]\n else:\n if p[2] == 'and':\n p[0] = ['boolean', p[1], p[3], 'and']\n elif p[2] == 'or':\n p[0] = ['boolean', p[1], p[3], 'or']\n\n def p_expr_logical(p):\n \"\"\"expr_bool : expr_bool EQ expr_bool\n | expr_bool LT expr_bool\n | expr_bool LET expr_bool\n | expr_bool GT expr_bool\n | expr_bool GET expr_bool\n | expr_bool DIFF expr_bool\n | NOT expr_bool\"\"\"\n if p[2] == '==':\n p[0] = ['boolean', p[1], p[3], '==']\n elif p[2] == '<':\n p[0] = ['boolean', p[1], p[3], '<']\n elif p[2] == '<=':\n p[0] = ['boolean', p[1], p[3], '<=']\n elif p[2] == '>':\n p[0] = ['boolean', p[1], p[3], '>']\n elif p[2] == '>=':\n p[0] = ['boolean', p[1], p[3], '>=']\n elif p[2] == '!=':\n p[0] = ['boolean', p[1], p[3], '!=']\n else:\n p[0] = ['boolean', p[2], p[2], 'not']\n\n def p_logic_valid_var(p):\n \"\"\"expr_bool : function\"\"\"\n try:\n check = functions[p[1][1]]\n code.pop()\n p[0] = p[1]\n except LookupError:\n raise Exception(f'Undefined function \"{p[1][1]}\" line {p.lineno(1)}')\n\n def p_logic_valid_type(p):\n \"\"\"expr_bool : expr_type\n | expr_arithm\"\"\"\n p[0] = p[1]\n\n def p_group_expr_arithmethic(p):\n \"\"\"expr_arithm : LPAREN expr_arithm RPAREN\"\"\"\n p[0] = p[2]\n\n def p_expr_aritmethic(p):\n \"\"\"expr_arithm : expr_arithm PLUS expr_arithm\n | expr_arithm MINUS expr_arithm\n | expr_arithm MULTIPLY expr_arithm\n | expr_arithm DIVIDE expr_arithm\n | MINUS expr_arithm\"\"\"\n if p[2] == '+':\n p[0] = ['arithm', p[1], p[3], '+']\n elif p[2] == '-':\n p[0] = ['arithm', p[1], p[3], '-']\n elif p[2] == '*':\n p[0] = ['arithm', p[1], p[3], '*']\n elif p[2] == '/':\n p[0] = ['arithm', p[1], p[3], '/']\n elif p[1] == '-':\n p[0] = ['arithm', p[2], -1, '*']\n\n def p_arithm_valid_var(p):\n \"\"\"expr_arithm : ID\n | function\"\"\"\n if isinstance(p[1], str):\n try:\n check = variables[p[1]]\n p[0] = ['var', p[1]]\n except LookupError:\n raise Exception(f'Undefined variable \"{p[1]}\" line {p.lineno(1)}')\n\n elif isinstance(p[1], list):\n try:\n check = functions[p[1][1]]\n code.pop()\n p[0] = p[1]\n except LookupError:\n raise Exception(f'Undefined function \"{p[1][1]}\" line {p.lineno(1)}')\n\n def p_arithm_valid_num(p):\n \"\"\"expr_arithm : expr_type\"\"\"\n p[0] = p[1]\n\n def p_sent_index_list(p):\n \"\"\"function : sent_index_list\"\"\"\n p[0] = p[1]\n\n def p_index_list_var(p):\n \"\"\"sent_index_list : sent_index_list LBRACKET ID RBRACKET\n | ID LBRACKET ID RBRACKET\"\"\"\n if not isinstance(p[1], list):\n try:\n check = variables[p[1]]\n except LookupError:\n raise Exception(f'Undefined list \"{p[1]}\" line {p.lineno(1)}')\n\n try:\n check = variables[p[3]]\n except LookupError:\n raise Exception(f'Undefined variable \"{p[3]}\" line {p.lineno(1)}')\n\n p[0] = ['func', 'get_element_list', [['var', p[1]], ['var', p[3]]]]\n code.append(p[0])\n else:\n try:\n check = variables[p[3]]\n except LookupError:\n raise Exception(f'Undefined variable \"{p[3]}\" line {p.lineno(1)}')\n\n p[0] = ['func', 'get_element_list', [p[1], ['var', p[3]]]]\n\n def p_index_list_value(p):\n \"\"\"sent_index_list : sent_index_list LBRACKET INTEGER RBRACKET\n | ID LBRACKET INTEGER RBRACKET\"\"\"\n if not isinstance(p[1], list):\n try:\n check = variables[p[1]]\n p[0] = ['func', 'get_element_list', [['var', p[1]], p[3]]]\n code.append(p[0])\n except LookupError:\n raise Exception(f'Undefined list \"{p[1]}\" line {p.lineno(1)}')\n\n else:\n p[0] = ['func', 'get_element_list', [p[1], p[3]]]\n\n def p_expr_list(p):\n \"\"\"expr_list : LBRACKET expr_inside_list RBRACKET\"\"\"\n p[0] = p[2]\n\n def p_list_expr_inside_list(p):\n \"\"\"expr_inside_list : expr_inside_list COMMA expr_type\n | expr_inside_list COMMA expr_bool\n | expr_type\n | expr_bool\n | empty\"\"\"\n\n if len(p) == 2:\n if p[1] is None:\n p[0] = []\n else:\n p[0] = [p[1]]\n else:\n p[0] = p[1]\n p[0].append(p[3])\n\n def p_expr_type(p):\n \"\"\"expr_type : expr_num\n | expr_string\"\"\"\n p[0] = p[1]\n\n def p_expr_bool_true(p):\n \"\"\"expr_bool : TRUE\"\"\"\n p[0] = True\n\n def p_expr_bool_false(p):\n \"\"\"expr_bool : FALSE\"\"\"\n p[0] = False\n\n def p_expr_number(p):\n \"\"\"expr_num : FLOAT\n | INTEGER\"\"\"\n p[0] = p[1]\n\n def p_expr_string(p):\n \"\"\"expr_string : STRING\"\"\"\n p[0] = p[1]\n\n # Empty rule\n def p_empty(p):\n \"\"\"empty :\"\"\"\n p[0] = None\n\n # Error rule for syntax errors\n def p_error(p):\n if p is not None:\n raise Exception(f'Illegal token: \"{p.value}\" at line: {p.lineno}')\n\n else:\n raise Exception('General error: error at the end of the script.\\n'\n 'Probably one structure is not built properly.')\n\n # Build the parser\n try:\n parser = yacc.yacc(debug=1)\n except Exception as e:\n raise Exception(e)\n if not interactive:\n data = ''\n with open(script, 'r') as s:\n for line in s:\n data += line\n if not line: continue\n try:\n parser.parse(data)\n except EOFError:\n raise Exception('General error parsing {script}'.format(script=script))\n return code, variables\n else:\n while True:\n try:\n s = input('input(sentence) > ')\n except NotImplementedError:\n s = input('input(sentence) > ')\n if not s:\n continue\n result = parser.parse(s)\n print(result)\n\n\n# Interactive mode\nif __name__ == \"__main__\":\n NBZParser('interactive', True)\n", "id": "6689184", "language": "Python", "matching_score": 4.747869491577148, "max_stars_count": 0, "path": "src/parser/nbz_parser.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Author: <Zurdi>\n#\n# This file contains all tokens and lexical rules\n# to parse the nbz-scripts. Some functions are more complex rules\n# that use the docstring to define themselves.\n\nimport ply.lex as lex\n\n# -- Reserved words token list --\nreserved = {\n\n\t# Logical operators\n\t'true': 'TRUE',\n\t'false': 'FALSE',\n\t'or': 'OR',\n\t'and': 'AND',\n\t'not': 'NOT',\n\n\t# Flow control\n\t'if': 'IF',\n\t'elif': 'ELIF',\n\t'else': 'ELSE',\n\t'for': 'FOR',\n\t'in': 'IN',\n\t'while': 'WHILE',\n\n\t# Statements\n\t'def': 'DEF',\n\n}\n\n# --- TOKENS LIST ---\ntokens = [\n\n\t # Types\n\t 'INTEGER',\n\t 'FLOAT',\n\t 'STRING',\n\n\t # Aritmethic operators\n\t 'PLUS',\n\t 'MINUS',\n\t 'MULTIPLY',\n\t 'DIVIDE',\n\t 'PLUSPLUS',\n\t 'MINUSMINUS',\n\n\t # Logical operators\n\t 'EQ',\n\t 'LT',\n\t 'LET',\n\t 'GT',\n\t 'GET',\n\t 'DIFF',\n\n\t # Lexical tokens\n\t 'ASSIGN',\n\t 'LPAREN',\n\t 'RPAREN',\n\t 'COMMA',\n\t 'SEMI',\n\t 'LBRACE',\n\t 'RBRACE',\n\t 'LBRACKET',\n\t 'RBRACKET',\n\t 'ID'\n\n ] + list(reserved.values())\n\n# --- REGULAR EXPRESSION RULES FOR TOKENS ---\n\n# Types\ndef t_INTEGER(t):\n\tr'\\d+'\n\tt.value = int(t.value)\n\treturn t\n\ndef t_FLOAT(t):\n\tr'\\d+[\\.]\\d*'\n\tt.value = float(t.value)\n\treturn t\n\ndef t_STRING(t): # Trimming strings rule (avoiding \" in the string token)\n\tr\"(?P<quote>['\\\"])(?P<string>.*?)(?<!\\\\)(?P=quote)\"\n\tt.value = str(t.value)[1:-1]\n\treturn t\n\n# Arithmetic operators\nt_PLUS = r'\\+'\nt_MINUS = r'-'\nt_MULTIPLY = r'\\*'\nt_DIVIDE = r'/'\nt_PLUSPLUS = r'\\+\\+'\nt_MINUSMINUS = r'--'\n\n# Logical operators\nt_EQ = r'=='\nt_LT = r'<'\nt_GT = r'>'\nt_LET = r'<='\nt_GET = r'>='\nt_DIFF = r'!='\n\n# Lexical tokens\nt_ASSIGN = r'='\nt_LPAREN = r'\\('\nt_RPAREN = r'\\)'\nt_COMMA = r'\\,'\nt_SEMI = r'\\;'\nt_LBRACE = r'\\{'\nt_RBRACE = r'\\}'\nt_LBRACKET = r'\\['\nt_RBRACKET = r'\\]'\n\ndef t_ID(t):\n\tr'[a-z_A-Z]([a-z_A-Z0-9])*'\n\tt.type = reserved.get(t.value.lower(), 'ID') # Check for reserved words (lower() to avoid case-sensitive)\n\treturn t\n\n# --- MISC ---\n\n# - Ignored characters\n\n# Spaces and tabs\nt_ignore = ' \\t'\n\n# Comments\ndef t_comment(t):\n\tr'\\#.*'\n\n# Newlines\ndef t_newline(t):\n\tr'\\n+'\n\tt.lexer.lineno += len(t.value)\n\n# Error handling rule\ndef t_error(t):\n\traise Exception(\"Illegal character '{value}' line: {line} column: {column}\".format(value=t.value[0],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t line=t.lineno,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t column=t.lexpos))\n\n# Build the lexer\nlexer = lex.lex()\n\n# Interactive mode\nif __name__ == \"__main__\":\n\tlexer = lex.lex()\n\tprint('Starting nbz token parser... Press Ctrl+C to exit.')\n\twhile True:\n\t\tlex.input(raw_input('token > '))\n\t\ttry:\n\t\t\ttok = lex.token()\n\t\texcept Exception:\n\t\t\tprint('Illegal token')\n\t\t\tlex.input(input('token > '))\n\t\tprint(tok)\n", "id": "8245617", "language": "Python", "matching_score": 0.249282106757164, "max_stars_count": 4, "path": "src/parser/nbz_lexer.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Author: <Zurdi>\n#\n# File where the dict of some user-agents\n# is generated, to be imported by other modules.\n\n\nUSER_AGENTS = {\n\n\t# CHROME\n\t'Chrome on Android Mobile': 'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.133 Mobile Safari/535.19',\n\t'Chrome on Android Tablet': 'Mozilla/5.0 (Linux; Android 4.1.2; Nexus 7 Build/JZ054K) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19',\n\t'Chrome on Mac': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',\n\t'Chrome on Ubuntu': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Ubuntu/11.10 Chromium/27.0.1453.93 Chrome/27.0.1453.93 Safari/537.36',\n\t'Chrome on Windows': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.94 Safari/537.36',\n\t'Chrome on iPhone': 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_1_4 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) CriOS/27.0.1453.10 Mobile/10B350 Safari/8536.25',\n\t'Chrome on iPad': 'Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/63.0.3239.84 Mobile/13B143 Safari/601.1.46',\n\n\t# FIREFOX\n\t'Firefox on Android Mobile': 'Mozilla/5.0 (Android; Mobile; rv:14.0) Gecko/14.0 Firefox/14.0',\n\t'Firefox on Android Tablet': 'Mozilla/5.0 (Android; Tablet; rv:14.0) Gecko/14.0 Firefox/14.0',\n\t'Firefox on Mac': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:21.0) Gecko/20100101 Firefox/21.0',\n\t'Firefox on Ubuntu': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:21.0) Gecko/20130331 Firefox/21.0',\n\t'Firefox on Windows': 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0',\n\t'Firefox on iPhone': 'Mozilla/5.0 (iPhone; CPU iPhone OS 8_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) FxiOS/1.0 Mobile/12F69 Safari/600.1.4',\n\t'Firefox on iPad': 'Mozilla/5.0 (iPad; CPU iPhone OS 8_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) FxiOS/1.0 Mobile/12F69 Safari/600.1.4',\n\n\t# IE\n\t'Internet Explorer 11': 'Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv 11.0) like Gecko',\n\t'Internet Explorer 10': 'Mozilla/5.0 (compatible; WOW64; MSIE 10.0; Windows NT 6.2)',\n\t'Internet Explorer 9': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',\n\t'Internet Explorer 8': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',\n\t'Internet Explorer 7': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',\n\t'Internet Explorer 6': 'Mozilla/4.0 (Windows; MSIE 6.0; Windows NT 5.2)',\n\n\t# EDGE\n\t'Edge 12': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246',\n\t'Edge 13': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Safari/537.36 Edge/13.10586',\n\t'Edge - Windows': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240',\n\t'Edge - Mobile': 'Mozilla/5.0 (Windows Phone 10.0; Android 4.2.1; Microsoft; Lumia 640 XL LTE) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Mobile Safari/537.36 Edge/12.10166',\n\t'Edge - XBox': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; Xbox; Xbox One) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/13.10586',\n\n\t# OPERA\n\t'Opera on Mac': 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.9.168 Version/11.52',\n\t'Opera on Windows': 'Opera/9.80 (Windows NT 6.1; WOW64; U; en) Presto/2.10.229 Version/11.62',\n\n\t# SAFARI\n\t'Safari on Mac': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',\n\t'Safari on Windows': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',\n\t'Safari on iPad': 'Mozilla/5.0 (iPad; CPU OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3',\n\t'Safari on iPhone': 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3',\n\n\t# SPIDERS/BOTS\n\t'BingBot (Bing spider)': 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',\n\t'Googlebot (Google spider)': 'Googlebot/2.1 (+http://www.googlebot.com/bot.html)',\n\t'Googlebot': 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',\n\t'Googlebot Smartphone': 'Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.96 Mobile Safari/537.36 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',\n\t'Slurp! (Yahoo spider)': 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',\n\n\t# ANDROID\n\t'Nexus 7 (Tablet)': 'Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19',\n\t'Samsung Galaxy Tab (Tablet)': 'Mozilla/5.0 (Linux; U; Android 2.2; en-gb; GT-P1000 Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',\n\t'Samsung Galaxy S3 (Handset)': 'Mozilla/5.0 (Linux; U; Android 4.0.4; en-gb; GT-I9300 Build/IMM76D) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30',\n\t'Android (4.0.2) Galaxy Nexus': 'Mozilla/5.0 (Linux; U; Android 4.0.2; en-us; Galaxy Nexus Build/ICL53F) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30',\n\t'Android (2.3)': 'Mozilla/5.0 (Linux; U; Android 2.3.6; en-us; Nexus S Build/GRK39F) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',\n\n\t# IOS\n\t'iPad': 'Mozilla/5.0 (iPad; CPU OS 8_1_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12B466 Safari/600.1.4',\n\t'iPhone': 'Mozilla/5.0 (iPhone; CPU iPhone OS 8_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12A366 Safari/600.1.4',\n\t'iPod': 'Mozilla/5.0 (iPod; CPU iPhone OS 5_1_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B206 Safari/7534.48.3',\n\n\t# WINDOWS PHONE\n\t'Windows Phone 7': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; IEMobile/7.0; LG; GW910)',\n\t'Windows Phone 7.5': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; SAMSUNG; SGH-i917)',\n\t'Windows Phone 8': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows Phone 8.0; Trident/6.0; IEMobile/10.0; ARM; Touch; NOKIA; Lumia 920)',\n\n\t# BLACKBERRY\n\t'BlackBerry - BB10': 'Mozilla/5.0 (BB10; Touch) AppleWebKit/537.1+ (KHTML, like Gecko) Version/10.0.0.1337 Mobile Safari/537.1+',\n\t'BlackBerry - Playbook 2.1': 'Mozilla/5.0 (PlayBook; U; RIM Tablet OS 2.1.0; en-US) AppleWebKit/536.2+ (KHTML, like Gecko) Version/7.2.1.0 Safari/536.2+',\n\t'BlackBerry - 9900': 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.187 Mobile Safari/534.11+',\n\n\t# OTHERS\n\t'MeeGo - Nokia N9': 'Mozilla/5.0 (MeeGo; NokiaN9) AppleWebKit/534.13 (KHTML, like Gecko) NokiaBrowser/8.5.0 Mobile Safari/534.13',\n\n}\n", "id": "10438339", "language": "Python", "matching_score": 1.7271497249603271, "max_stars_count": 4, "path": "src/data/user_agents.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Author: <Zurdi>\n\n\nimport os\nimport platform\nimport time\nfrom urllib.parse import urlparse\nfrom data.user_agents import USER_AGENTS\nfrom .lib_log_nbz import Logging\nfrom selenium import webdriver\nfrom browsermobproxy import Server\nlogger = Logging()\nBASE_DIR = os.path.dirname(os.path.realpath(__file__))\nproxy_path = os.path.join(BASE_DIR, '..', '..', 'proxy', 'bin', 'browsermob-proxy')\n\n\nclass LibWb:\n \"\"\"Browser and proxy library.\n\n This class contains the methods to start the proxy and the native function to start the web browser.\n\n Methods:\n instance_browser\n get_driver_path\n \"\"\"\n\n def __init__(self):\n \"\"\"Init LibWb class\"\"\"\n\n pass\n\n def instance_browser(self, proxy_enabled, params):\n \"\"\"Start web browser and proxy server\n\n Args:\n proxy_enabled: flag to set proxy\n params: list of parameters\n -0: browser engine\n -1: user-agent\n Returns:\n Instance of the server, the proxy and the web browser\n \"\"\"\n\n if proxy_enabled:\n try:\n server = Server(proxy_path)\n server.start()\n except Exception as e:\n raise Exception('Error launching server: {exception}'.format(exception=e))\n try:\n proxy = server.create_proxy()\n except RuntimeError:\n time.sleep(5)\n try:\n proxy = server.create_proxy()\n except Exception as e:\n raise Exception('Error configuring proxy: {exception}'.format(exception=e))\n proxy.new_har()\n try:\n proxy_url = urlparse.urlparse(proxy.proxy).path\n except AttributeError:\n proxy_url = urlparse(proxy.proxy).path\n else:\n server = None\n proxy = None\n try:\n engine = params[0]\n try:\n user_agent = USER_AGENTS[params[1]]\n except LookupError:\n user_agent = params[1]\n headless = params[2]\n except LookupError:\n raise Exception('Function browser(): 3 arguments needed')\n try:\n logger.log('NOTE', 'Engine: {engine} | User-agent: {user_agent} | Headless: {headless}'.format(engine=engine,\n user_agent=user_agent,\n headless=headless))\n if engine == 'chrome':\n driver_path = self.get_driver_path(engine)\n ch_opt = webdriver.ChromeOptions()\n if proxy_enabled:\n ch_opt.add_argument(\"--proxy-server=\" + proxy_url)\n if user_agent != 'default':\n ch_opt.add_argument(\"--user-agent=\" + user_agent)\n if headless:\n ch_opt.headless = True\n try:\n browser = webdriver.Chrome(executable_path=driver_path,\n chrome_options=ch_opt)\n except LookupError:\n time.sleep(5)\n browser = webdriver.Chrome(executable_path=driver_path,\n chrome_options=ch_opt)\n elif engine == 'firefox':\n driver_path = self.get_driver_path(engine)\n ff_prf = webdriver.FirefoxProfile()\n ff_opt = webdriver.FirefoxOptions()\n if user_agent != 'default':\n ff_prf.set_preference(\"general.useragent.override\", user_agent)\n if headless:\n ff_opt.headless = True\n try:\n browser = webdriver.Firefox(executable_path=driver_path, firefox_profile=ff_prf, proxy=proxy.selenium_proxy(), options=ff_opt) if proxy_enabled \\\n else webdriver.Firefox(executable_path=driver_path, firefox_profile=ff_prf, options=ff_opt)\n except LookupError:\n time.sleep(5)\n browser = webdriver.Firefox(executable_path=driver_path, firefox_profile=ff_prf, proxy=proxy.selenium_proxy(), options=ff_opt) if proxy_enabled \\\n else webdriver.Firefox(executable_path=driver_path, firefox_profile=ff_prf, options=ff_opt)\n else:\n raise Exception('Not supported engine: {engine}'.format(engine=engine))\n except Exception as e:\n raise Exception('Error launching {engine} ({user_agent}): {exception}'.format(engine=engine,\n user_agent=user_agent,\n exception=e))\n return browser\n\n @staticmethod\n def get_driver_path(engine):\n \"\"\"Method to get the driver path for each engine and each operative system\n\n Args:\n engine: web browser to execute the nbz-script\n Returns:\n The driver path of the selected engine\n \"\"\"\n\n if engine == 'chrome':\n driver_path = os.path.join(BASE_DIR, 'drivers', 'chromedriver')\n elif engine == 'firefox':\n driver_path = os.path.join(BASE_DIR, 'drivers', 'geckodriver')\n else:\n raise Exception('Not supported engine {engine}'.format(engine=engine))\n return driver_path\n", "id": "9739048", "language": "Python", "matching_score": 2.0137417316436768, "max_stars_count": 4, "path": "src/lib/lib_wb_nbz.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Author: <Zurdi>\n\n\nimport re\nfrom .lib_log_nbz import Logging\n\nlogger = Logging()\n\n\nclass LibD:\n\t\"\"\"Data types library of native functions.\n\n\tThis class contains all the data types functions to handle types into nbz-scripts.\n\n\tMethods:\n\t\tvar_type\n\t\tcast_int\n\t\tcast_float\n\t\tcast_str\n\t\tsub_str\n\t\tlength\n\t\tfind\n\t\tfind_regex\n\t\treplace\n\t\tsplit\n\t\tappend_list\n\t\tupdate_list\n\t\tremove_list\n\t\tget_element_list\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\"Init LibD class\"\"\"\n\n\t\tpass\n\n\t@staticmethod\n\tdef var_type(browser, params):\n\t\t\"\"\"Print the variable type\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: variable to print type\n\n\t\tReturns:\n\t\t\tString with the type of the variable\n\t\t\"\"\"\n\n\t\tvariable = params[0]\n\t\treturn type(variable)\n\n\t@staticmethod\n\tdef cast_int(browser, params):\n\t\t\"\"\"Cast numeric string | float value to integer\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: value to convert to string (string | float)\n\t\tReturns:\n\t\t\tInteger of the converted data\n\t\t\"\"\"\n\n\t\tvalue = params[0]\n\t\ttry:\n\t\t\treturn int(value)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error casting {value} to integer: {exception}'.format(value=value,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef cast_float(browser, params):\n\t\t\"\"\"Cast numeric string | integer value to float\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: value to convert to float (integer | string)\n\t\tReturns:\n\t\t\tFloat of the converted data\n\t\t\"\"\"\n\n\t\tvalue = params[0]\n\t\ttry:\n\t\t\treturn float(value)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error casting {value} to float: {exception}'.format(value=value,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef cast_str(browser, params):\n\t\t\"\"\"Cast numeric value to string\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: value to convert to string (integer | float)\n\t\tReturns:\n\t\t\tString of the converted number\n\t\t\"\"\"\n\n\t\tvalue = params[0]\n\t\ttry:\n\t\t\treturn str(value)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error casting {value} to str: {exception}'.format(value=value,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef sub_str(browser, params):\n\t\t\"\"\"Returns substring from bigger string\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: string from extracting\n\t\t\t\t-1: start character index of substring (to the end if -2 is empty)\n\t\t\t\t-2: end character index of substring (optional)\n\t\tReturns:\n\t\t\tSubstring from main string\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tstring = params[0]\n\t\t\tsubstring_index_start = params[1]\n\t\texcept LookupError:\n\t\t\traise Exception('Function sub_str(): at least 2 arguments needed')\n\n\t\ttry:\n\t\t\tif len(params) == 2:\n\t\t\t\treturn string[substring_index_start:]\n\t\t\telse:\n\t\t\t\tsubstring_index_end = params[2]\n\t\t\t\treturn string[substring_index_start:substring_index_end]\n\t\texcept Exception as e:\n\t\t\traise Exception('Error getting substring from {string}: {exception}'.format(string=string,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texception=e))\n\n\t@staticmethod\n\tdef length(browser, params):\n\t\t\"\"\"Returns length from any compatible data (string, list, dict)\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: data which can be measured (string, list, dict)\n\t\tReturns:\n\t\t\tInteger number of the length of the data\n\t\t\"\"\"\n\n\t\tdata = params[0]\n\t\ttry:\n\t\t\treturn len(data)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error getting length from {data}: {exception}'.format(data=data,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef find(browser, params):\n\t\t\"\"\"Search one string into another string.\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: main string\n\t\t\t\t-1: substring to search\n\t\tReturns:\n\t\t\t-1 if substring is not found\n\t\t\tInteger of the character index where the substring starts on the main string\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tstring = params[0]\n\t\t\tsubstring = params[1]\n\t\texcept LookupError:\n\t\t\traise Exception('Function find(): 2 arguments needed')\n\n\t\ttry:\n\t\t\treturn string.find(substring)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error searching substring into {string}: {exception}'.format(string=string,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef find_regex(browser, params):\n\t\t\"\"\"Search a regex pattern into string.\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: main string\n\t\t\t\t-1: regex\n\t\tReturns:\n\t\t\tString found with that pattern\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tstring = params[0]\n\t\t\tpattern = params[1]\n\t\t\tresult = re.search(pattern, string)\n\t\t\tif result:\n\t\t\t\treturn result.group()\n\t\t\telse:\n\t\t\t\treturn \"\"\n\t\texcept Exception as e:\n\t\t\traise Exception('Error searching pattern into {pattern}: {exception}'.format(pattern=pattern,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef replace(browser, params):\n\t\t\"\"\"Replace substring into string\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: main string\n\t\t\t\t-1: old substring to replace\n\t\t\t\t-2: new substring to put\n\t\tReturns:\n\t\t\tString with the new substring replaced on old substring\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tstring = params[0]\n\t\t\tsubstring_old = params[1]\n\t\t\tsubstring_new = params[2]\n\t\texcept LookupError:\n\t\t\traise Exception('Function replace(): 3 arguments needed')\n\n\t\ttry:\n\t\t\treturn string.replace(substring_old, substring_new)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error replacing: {string}({old}, {new}): {exception}'.format(string=string,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t old=substring_old,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t new=substring_new,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef split(browser, params):\n\t\t\"\"\"Split string into some strings with a delimiter\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: string to split\n\t\t\t\t-1: delimiter to split with\n\t\tReturns:\n\t\t\tA list of substrings from main string between delimiters\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tstring = params[0]\n\t\t\tdelimiter = params[1]\n\t\texcept LookupError:\n\t\t\traise Exception('Function split(): 2 arguments needed')\n\n\t\ttry:\n\t\t\treturn string.split(delimiter)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error splitting: {string} with {delimiter}: {exception}'.format(string=string,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t delimiter=delimiter,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef append_list(browser, params):\n\t\t\"\"\"Append an element into a list\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: list\n\t\t\t\t-1: element to append\n\t\tReturns:\n\t\t\tList with the new element at the end\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tlist_ = params[0]\n\t\t\telement = params[1]\n\t\texcept LookupError:\n\t\t\traise Exception('Function append(): 2 arguments needed')\n\n\t\ttry:\n\t\t\treturn list_.append(element)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error appending {element} into {list}: {exception}'.format(element=element,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlist=list_,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texception=e))\n\n\t@staticmethod\n\tdef update_list(browser, params):\n\t\t\"\"\"Update an element on a list\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: list\n\t\t\t\t-1: index of the element to upadte\n\t\t\t\t-2: new element\n\t\tReturns:\n\t\t\tA list with the new element updated\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tlist_ = params[0]\n\t\t\tindex = params[1]\n\t\t\telement = params[2]\n\t\texcept LookupError:\n\t\t\traise Exception('Function update(): 3 arguments needed')\n\n\t\ttry:\n\t\t\tlist_[index] = element\n\t\t\treturn list_\n\t\texcept Exception as e:\n\t\t\traise Exception('Error updating {index} into {list} with {element}: {exception}'.format(index=index,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlist=list_,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telement=element,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texception=e))\n\n\t@staticmethod\n\tdef remove_list(browser, params):\n\t\t\"\"\"Remove and element from a list\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: list\n\t\t\t\t-1: element to remove\n\t\tReturns:\n\t\t\tA list with the element removed\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tlist_ = params[0]\n\t\t\telement = params[1]\n\t\texcept LookupError:\n\t\t\traise Exception('Function remove(): 2 arguments needed')\n\n\t\ttry:\n\t\t\treturn list_.remove(element)\n\t\texcept Exception as e:\n\t\t\traise Exception('Error removing {element} from {list}: {exception}'.format(element=element,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t list=list_,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n\n\t@staticmethod\n\tdef get_element_list(browser, params):\n\t\t\"\"\"Return element from list with index\n\n\t\tArgs:\n\t\t\tbrowser: web browser instance\n\t\t\tparams: list of parameters\n\t\t\t\t-0: list\n\t\t\t\t-1: index of the wanted element\n\t\tReturns:\n\t\t\tElement of the list\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tlist_ = params[0]\n\t\t\tindex = params[1]\n\t\texcept LookupError:\n\t\t\traise Exception('Function get_element_list(): 2 arguments needed')\n\n\t\ttry:\n\t\t\treturn list_[index]\n\t\texcept Exception as e:\n\t\t\traise Exception('Error getting element {index} from list {list}: {exception}'.format(index=index,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t list=list_,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t exception=e))\n", "id": "621478", "language": "Python", "matching_score": 0.8513109683990479, "max_stars_count": 4, "path": "src/lib/lib_d_nbz.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Author: <Zurdi>\n\n\nimport os\nimport time\nimport datetime\n\n\nclass Logging:\n\t\"\"\"Logger library to show the output of the script\n\n\tThis library provides a way to log each step of an nbz-script, showing if it is going right,\n\tor if an error occurs.\n\n\tMethods:\n\t\tlog\n\t\tlog_header\n\t\tlog_footer\n\t\tlog_error\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\"Init Logging class\"\"\"\n\n\t\tself.GREEN = '\\033[92m'\n\t\tself.YELLOW = '\\033[93m'\n\t\tself.RED = '\\033[91m'\n\t\tself.NC = '\\033[0m'\n\n\tdef log(self, level, msg):\n\t\t\"\"\"Print the log in terminal\n\n\t\tArgs:\n\t\t\tlevel: this parameter indicates if the message to print is a log message or an error message\n\t\t\tmsg: message to print in terminal\n\t\t\"\"\"\n\n\t\tts = time.time()\n\t\tst = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n\n\t\tif level == 'NOTE':\n\t\t\tprint(\"{GREEN} - NBZ Log{YELLOW}[{st}]: {NC}{msg}\".format(GREEN=self.GREEN,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tYELLOW=self.YELLOW,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tNC=self.NC,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tst=st,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmsg=msg))\n\t\telif level == 'ERROR':\n\t\t\tprint(\"{RED} - NBZ Log{YELLOW}[{st}]: {NC}{msg}\".format(RED=self.RED,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t YELLOW=self.YELLOW,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t NC=self.NC,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t st=st,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t msg=msg))\n\t\telse:\n\t\t\tprint('Not defined logger level: {level}'.format(level=level))\n\n\tdef log_header(self):\n\t\tprint(\"{YELLOW} ############################ START NBZ ############################{NC}\\n\".format(YELLOW=self.YELLOW,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tNC=self.NC))\n\n\tdef log_footer(self):\n\t\tprint(\"\\n{YELLOW} ############################ END NBZ ############################{NC}\\n\".format(YELLOW=self.YELLOW,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tNC=self.NC))\n\n\tdef log_error(self):\n\t\tprint(\"\\n{RED} ************************ ERROR ENDING NBZ ************************{NC}\\n\".format(RED=self.RED,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t NC=self.NC))\n", "id": "7317045", "language": "Python", "matching_score": 0.33536607027053833, "max_stars_count": 4, "path": "src/lib/lib_log_nbz.py" } ]
2.072461
ZenoABC
[ { "content": "import asyncio\nimport contextlib\nimport difflib\nimport inspect\nimport os\nimport platform\nimport random\nimport time\nimport typing\n\nimport discord\nimport psutil\nfrom discord.ext import commands, tasks\nfrom discord.ext.commands.cooldowns import BucketType\n\nimport utils\n\n\nclass Bot(commands.Cog):\n \"Basic info about the bot and quick commands to get you access to support\"\n\n def __init__(self, bot):\n self.bot = bot\n self.status_task.start()\n\n @tasks.loop(seconds=40)\n async def status_task(self):\n await self.bot.change_presence(\n status=discord.Status.online,\n activity=discord.Activity(type=discord.ActivityType.listening, name=f\"the return of {self.bot.user.name}\"),\n )\n await asyncio.sleep(40)\n await self.bot.change_presence(\n status=discord.Status.online,\n activity=discord.Activity(\n type=discord.ActivityType.watching, name=f\"{len(self.bot.guilds)} servers | {len(self.bot.users)} users\"\n ),\n )\n await asyncio.sleep(40)\n await self.bot.change_presence(\n status=discord.Status.online,\n activity=discord.Activity(type=discord.ActivityType.watching, name=\"the new updates coming soon...\"),\n )\n await asyncio.sleep(40)\n\n @status_task.before_loop\n async def before_status_task(self):\n await self.bot.wait_until_ready()\n\n def cog_unload(self):\n self.status_task.stop()\n\n @commands.command(brief=\"sends pong and the time it took to do so.\")\n async def ping(self, ctx):\n start = time.perf_counter()\n message = await ctx.send(\"Ping\")\n end = time.perf_counter()\n\n embed = discord.Embed(title=\"Bot Ping Data\", color=15428885, timestamp=ctx.message.created_at)\n\n embed.add_field(name=\"Bot Latency:\", value=f\"{round((end - start)*1000)} MS\", inline=False)\n\n embed.add_field(name=\"Websocket Response time:\", value=f\"{round(self.bot.latency*1000)} MS\", inline=False)\n\n await message.edit(content=f\"Pong\", embed=embed)\n\n @commands.command(brief=\"gives you an invite to invite the bot.\", aliases=[\"inv\"])\n async def invite(self, ctx):\n normal_inv = discord.utils.oauth_url(\n self.bot.user.id, permissions=discord.Permissions(permissions=8), scopes=(\"bot\",)\n )\n minimial_invite = discord.utils.oauth_url(\n self.bot.user.id, permissions=discord.Permissions(permissions=70634561), scopes=(\"bot\",)\n )\n\n normal_inv_slash = discord.utils.oauth_url(\n self.bot.user.id,\n permissions=discord.Permissions(permissions=8),\n )\n minimial_invite_slash = discord.utils.oauth_url(\n self.bot.user.id,\n permissions=discord.Permissions(permissions=70634561),\n )\n\n embed = discord.Embed(title=\"Invite link:\", color=random.randint(0, 16777215))\n embed.add_field(\n name=f\"{self.bot.user.name} invite:\",\n value=f\"[{self.bot.user.name} invite url]({normal_inv}) \\nNon Markdowned invite : {normal_inv}\",\n )\n embed.add_field(name=\"Minimial permisions\", value=f\"{ minimial_invite}\")\n\n embed.set_thumbnail(url=self.bot.user.display_avatar.url)\n embed.set_footer(\n text=f\"not all features may work if you invite with minimal perms, if you invite with 0 make sure these permissions are in a Bots/Bot role.\"\n )\n\n view = discord.ui.View()\n\n view.add_item(\n discord.ui.Button(\n label=f\"{self.bot.user.name}'s Normal invite\", url=normal_inv, style=discord.ButtonStyle.link\n )\n )\n view.add_item(\n discord.ui.Button(\n label=f\"{self.bot.user.name}'s Minimial Permisions Invite\",\n url=minimial_invite,\n style=discord.ButtonStyle.link,\n )\n )\n\n view.add_item(\n discord.ui.Button(\n label=f\"{self.bot.user.name}'s Normal Invite(Slash)\",\n url=normal_inv_slash,\n style=discord.ButtonStyle.link,\n row=2,\n )\n )\n view.add_item(\n discord.ui.Button(\n label=f\"{self.bot.user.name}'s Minimial Permisions(Slash)\",\n url=minimial_invite_slash,\n style=discord.ButtonStyle.link,\n row=2,\n )\n )\n\n await ctx.send(embed=embed, view=view)\n\n @commands.command(brief=\"gives you who the owner is.\")\n async def owner(self, ctx):\n\n info = await self.bot.application_info()\n owner_id = info.team.owner_id if info.team else info.owner.id\n\n support_guild = self.bot.get_guild(736422329399246990)\n\n owner = await self.bot.try_member(support_guild, owner_id) or await self.bot.try_user(owner_id)\n\n embed = discord.Embed(\n title=f\"Bot Owner: {owner}\", color=random.randint(0, 16777215), timestamp=ctx.message.created_at\n )\n embed.set_image(url=owner.display_avatar.url)\n\n view = utils.OwnerInfoSuper(ctx, owner, support_guild)\n\n await ctx.send(\n \"Pick a way for Mutual Guilds to be sent to you or not if you really don't the mutualguilds\",\n embed=embed,\n view=view,\n )\n\n @commands.command(\n help=\"a command to give information about the team\",\n brief=\"this command works if you are in team otherwise it will just give the owner.\",\n )\n async def team(self, ctx):\n information = await self.bot.application_info()\n if information.team == None:\n true_owner = information.owner\n team_members = []\n\n if information.team != None:\n true_owner = information.team.owner\n team_members = information.team.members\n embed = discord.Embed(title=information.name, color=random.randint(0, 16777215))\n embed.add_field(name=\"Owner\", value=true_owner)\n embed.set_footer(text=f\"ID: {true_owner.id}\")\n\n embed.set_image(url=information.icon.url if information.icon else self.bot.display_avatar.url)\n # I don't gunatree this works, but I hope it does.\n\n for x in team_members:\n embed.add_field(name=x, value=x.id)\n await ctx.send(embed=embed)\n\n @commands.command(\n help=\"get the stats of users and members in the bot\",\n brief=\"this is an alternative that just looking at the custom status time to time.\",\n )\n async def stats(self, ctx):\n embed = discord.Embed(title=\"Bot stats\", color=random.randint(0, 16777215))\n embed.add_field(name=\"Guild count\", value=len(self.bot.guilds))\n embed.add_field(name=\"User Count:\", value=len(self.bot.users))\n embed.add_field(name=\"True Command Count:\", value=f\"{len(list(self.bot.walk_commands()))}\")\n embed.add_field(name=\"Command Count:\", value=f\"{len(self.bot.commands)}\")\n embed.add_field(\n name=\"Usable Command Count:\", value=f\"{len(await utils.filter_commands(ctx, self.bot.commands))}\"\n )\n embed.add_field(name=\"Approximate Member Count:\", value=f\"{sum(g.member_count for g in self.bot.guilds)}\")\n embed.set_footer(\n text=f\"if you at all don't get what this means, you can ask our support team, if you do understand you can ask for clarification\"\n )\n await ctx.send(embed=embed)\n\n @commands.command(\n brief=\"finds out where the location of the command on my github repo(so people can learn from my commands)\"\n )\n async def source(self, ctx, *, command=None):\n github_url = \"https://github.com/JDJGInc/JDBot\"\n branch = \"master\"\n\n embed = discord.Embed(\n title=\"Github link\", description=f\"{github_url}\", color=15428885, timestamp=ctx.message.created_at\n )\n\n embed.set_footer(\n text=\"This Bot's License is MIT, you must credit if you use my code, but please just make your own, if you don't know something works ask me, or try to learn how mine works.\"\n )\n\n if command is None:\n return await ctx.send(\"Here's the github link:\", embed=embed)\n\n command_wanted = self.bot.get_command(command)\n if not command_wanted:\n return await ctx.send(f\"Couldn't find {command}. Here's source anyway:\", embed=embed)\n\n src = command_wanted.callback.__code__\n filename = src.co_filename\n\n module = command_wanted.callback.__module__\n\n if command == \"help\":\n src = type(self.bot.help_command)\n module = src.__module__\n filename = inspect.getsourcefile(src)\n\n lines, firstline = inspect.getsourcelines(src)\n\n check_path = filename.startswith(os.getcwd())\n filename = module.replace(\".\", \"/\") + \".py\"\n\n if not check_path:\n\n if module.startswith(\"jishaku\"):\n github_url = \"https://github.com/Gorialis/jishaku\"\n branch = \"master\"\n\n elif module.startswith(\"discord\"):\n github_url = \"https://github.com/Rapptz/discord.py\"\n branch = \"master\"\n\n else:\n module = module.split(\".\")[0]\n return await ctx.send(\n f\"We don't support getting the source of {module}. Here's my bot's source:\", embed=embed\n )\n\n embed.title = f\"Source for {command_wanted}:\"\n embed.description = (\n f\"[**Click Here**]({github_url}/blob/{branch}/{filename}#L{firstline}-L{firstline + len(lines)-1})\"\n )\n\n await ctx.send(embed=embed)\n\n @commands.command(brief=\"a set of rules we will follow\")\n async def promise(self, ctx):\n embed = discord.Embed(title=\"Promises we will follow:\", color=random.randint(0, 16777215))\n embed.add_field(\n name=\"Rule 1:\",\n value=\"if you are worried about what the bot may collect, please send a DM to the bot, and we will try to compile the data the bot may have on you.\",\n )\n embed.add_field(\n name=\"Rule 2:\",\n value=\"in order to make sure our bot is safe, we will be making sure the token is secure and making sure anyone who works on the project is very trustworthy.\",\n )\n embed.add_field(\n name=\"Rule 3:\",\n value=\"we will not nuke your servers, as this happened to us before and we absolutely hated it.\",\n )\n embed.add_field(name=\"Rule 4:\", value=\"We will also give you a list of suspicious people\")\n embed.add_field(name=\"Rule 5:\", value=\"we also made sure our code is open source so you can see what it does.\")\n embed.add_field(\n name=\"Rule 6:\",\n value=\"We will also let you ask us questions directly, just DM me directly(the owner is listed in the owner command(and anyone should be able to friend me)\",\n )\n embed.add_field(\n name=\"Rule 7:\",\n value=\"Using our bot to attempt to break TOS, will cause us to ban you from using the bot, then upgrade our security\",\n )\n embed.add_field(\n name=\"Rule 8:\",\n value=\"Attempting to break discord TOS like having a giveaway but having people require to join an external guild(will eventually be reportable to us, if they owner counties, then the reporter should report to discord.(essentially breaking TOS near the functionalties with our bot)\",\n )\n embed.add_field(\n name=\"Rule 9:\",\n value=\"If our bot doesn't do the giveaway requirements we're actually safe, as we don't require it, however please report this to us, so we can contact them to get them to stop, Thanks. if they don't listen we'll tell you, then you can report them.\",\n )\n await ctx.send(embed=embed)\n\n @commands.command(brief=\"Privacy Policy\", aliases=[\"privacy\"])\n async def promises(self, ctx):\n embed = discord.Embed(title=\"Privacy Policy\", color=random.randint(0, 16777215))\n embed.add_field(\n name=\"1:\", value=\"We have a logging channel that notifies us when the bot joins or leaves a guild\"\n )\n\n embed.add_field(\n name=\"2:\",\n value=\"We log errors that occured in the bot(although it might contain private information). This is only visible temporarily in console, and is not stored in any of our DBs.\",\n )\n embed.add_field(name=\"3:\", value=\"We store user ids for economy commands (Opt In)\")\n embed.add_field(\n name=\"4:\",\n value=\"We store user id , and support channel id, as well as last time it was used(for archive reasons), for allowing ticket based support. (Opt In)\",\n )\n embed.add_field(name=\"5:\", value=\"We store inputted invalid commands\")\n embed.add_field(\n name=\"6:\",\n value=\"We may temporarily look at your mutual guilds with our bot or the list of servers our bot is in, with some information, just to check if any problems occured(like if a command is going hayware) or to prevent abuse. If you want to look at what is sent in embeds, just ask, We will show you.\",\n )\n embed.add_field(\n name=\"6.1:\",\n value=\"This is a temp command, which is stored no where else, and we also delete the embed when done :D. If you have a problem with this contact me.\",\n )\n embed.add_field(\n name=\"7:\",\n value=\"Any message content in global chat is logged to a private channel on discord with included channel ids, and guild ids, but otherwise, message content is not stored, except in moderation channels, and user ids are used to blacklist users per guild or globaly, or tell who is mod or staff or just a normal user.\",\n )\n embed.add_field(\n name=\"8:\",\n value=\"In the future we will store the guild id and channel id for member joining/leaving messages, and member changes\",\n )\n embed.add_field(\n name=\"Final:\",\n value=\"There should be no more except the sus list(list of ids I put together by hand of people who probaly shouldn't hang out with). We also now use the built in discord.py guild.members list with cache_member to only use memory to store you, we only use this to limit api calls(it's only opt in anyway)\",\n )\n embed.add_field(\n name=\"Who Gets this info:\",\n value=\"Only us, and our DB provider MongoDB(but they are unlikely to use our data. Sus users do show up if they exist in the same guild though and the reason why.\",\n )\n embed.add_field(name=\"More Info:\", value=\"Contact me at JDJG Inc. Official#3493\")\n await ctx.send(embed=embed)\n\n @commands.command(brief=\"Sends you an invite to the official Bot support guild\", aliases=[\"guild_invite\"])\n async def support_invite(self, ctx):\n\n view = discord.ui.View()\n view.add_item(\n discord.ui.Button(\n label=f\"Support Guild Invite\", url=\"https://discord.gg/sHUQCch\", style=discord.ButtonStyle.link, row=1\n )\n )\n await ctx.send(\n \"If you press the button you will be invited to our guild :D, you can also manually use discord.gg/sHUQCch\",\n view=view,\n )\n\n @commands.command(brief=\"This command gives you an alt bot to use\", aliases=[\"alt_invite\", \"alt_bot\"])\n async def verify_issue(self, ctx):\n await ctx.send(\n \"You can invite the bot 831162894447804426(this will be an alt bot with almost the same code but with javascript though some report functionalies will have their guild swapped :D\"\n )\n\n @commands.command()\n async def whyprefixtest(self, ctx):\n await ctx.send(\n \"Because I don't have any alternative suggestions, and I don't feel like changing it to jd! or something. I can confirm this isn't a test bot :D\"\n )\n\n @commands.command()\n async def find_command(self, ctx, *, command=None):\n if command is None:\n await ctx.send(\"Please provide an arg.\")\n\n if command:\n\n all_commands = list(self.bot.walk_commands())\n\n command_names = [f\"{x}\" for x in await utils.filter_commands(ctx, all_commands)]\n\n # only reason why it's like this is uh, it's a bit in variable.\n\n matches = difflib.get_close_matches(command, command_names)\n\n if matches:\n await ctx.send(f\"Did you mean... `{matches[0]}`?\")\n\n else:\n await ctx.send(\"got nothing sorry.\")\n\n @commands.cooldown(1, 30, BucketType.user)\n @commands.command(\n brief=\"a command to automatically summon by creating an invite and having jdjg look at something if it's there something wrong\"\n )\n async def jdjgsummon(self, ctx):\n\n view = utils.BasicButtons(ctx)\n msg = await ctx.send(\n \"React with \\N{WHITE HEAVY CHECK MARK} if you want me to be summoned if not use \\N{CROSS MARK}. \\nPlease don't use jdjgsummon to suggest something use suggest to suggest something, alright? If you want to contact me directly, you can find my tag using owner(please don't use jdjgsummon for suggest stuff thanks)\",\n view=view,\n )\n\n await view.wait()\n\n if view.value is None:\n return await ctx.reply(\"You let me time out :(\")\n\n if view.value is True:\n message = await ctx.send(\n content=f\"Summoning JDJG now a.k.a the Bot Owner to the guild make sure invite permissions are open!\"\n )\n await msg.delete()\n\n if isinstance(ctx.channel, discord.threads.Thread):\n\n channel = self.bot.get_channel(ctx.channel.parent_id)\n\n ctx.channel = channel if channel else ctx.channel\n\n if isinstance(ctx.channel, discord.TextChannel):\n await asyncio.sleep(1)\n await message.edit(content=\"This is attempting to make an invite\")\n\n invite = None\n with contextlib.suppress(discord.NotFound, discord.HTTPException):\n invite = await ctx.channel.create_invite(max_uses=0)\n\n if not invite:\n await asyncio.sleep(1)\n return await message.edit(\n content=\"Failed making an invite. You likely didn't give it proper permissions(a.k.a create invite permissions) or it errored for not being found.\"\n )\n\n else:\n await asyncio.sleep(1)\n await message.edit(content=\"Contacting JDJG...\")\n\n jdjg = await self.bot.try_user(168422909482762240)\n\n embed = discord.Embed(\n title=f\"{ctx.author} wants your help\",\n description=f\"Invite: {invite.url} \\nChannel : {ctx.channel.mention} \\nName : {ctx.channel}\",\n color=random.randint(0, 16777215),\n )\n\n embed.set_footer(text=f\"Guild: {ctx.guild} \\nGuild ID: {ctx.guild.id}\")\n\n await jdjg.send(embed=embed)\n\n if isinstance(ctx.channel, discord.DMChannel):\n await asyncio.sleep(1)\n return await message.edit(\n content=\"This is meant for guilds not Dm channel if you want support in DM channel contact the owner, By DMS at JDJG Inc. Official#3493.\"\n )\n\n if view.value is False:\n await ctx.send(content=f\" You didn't agree to summoning me. So I will not be invited.\")\n await msg.delete()\n\n @commands.command(brief=\"this command tells you to how to report ex issues to owner\")\n async def report_issue(self, ctx):\n await ctx.send(\n \"if you have an issue please join the support server, create a ticket, or Dm the owner at JDJG Inc. Official#3493. Thanks :D!\"\n )\n\n @commands.command(brief=\"apply for tester\")\n async def apply_tester(self, ctx, *, reason=None):\n if not reason:\n return await ctx.send(\"Give us a reason why please.\")\n\n embed = discord.Embed(\n title=f\"{ctx.author} requested to be a tester.\",\n description=f\"For the reason of {reason}\",\n color=random.randint(0, 16777215),\n )\n embed.set_footer(text=f\"User ID: {ctx.author.id}\")\n\n shadi = await self.bot.try_user(717822288375971900)\n jdjg = await self.bot.try_user(168422909482762240)\n\n await jdjg.send(embed=embed)\n await shadi.send(embed=embed)\n\n await ctx.send(\n \"the application went through to JDJG, please make your DMs open to JDJG so we can talk to you. Don't send it again.\"\n )\n\n @commands.command(brief=\"Lists the current prefixes that could be used.\")\n async def prefixes(self, ctx):\n prefixes = await self.bot.get_prefix(ctx.message)\n pag = commands.Paginator(prefix=\"\", suffix=\"\")\n for p in prefixes:\n pag.add_line(f\"{p}\")\n\n menu = utils.PrefixesEmbed(pag.pages, ctx=ctx, delete_after=True)\n\n await menu.send()\n\n @commands.command(brief=\"Lists the current used prefix\", aliases=[\"prefix\"])\n async def currentprefix(self, ctx):\n embed = discord.Embed(title=\"Current Prefix:\", description=f\"{ctx.prefix}\", color=random.randint(0, 16777215))\n await ctx.send(\n content=f\"Current Prefix: {ctx.prefix}\", embed=embed, allowed_mentions=discord.AllowedMentions.none()\n )\n\n @commands.command(brief=\"Gives the bot's uptime\")\n async def uptime(self, ctx):\n delta_uptime = discord.utils.utcnow() - self.bot.launch_time\n hours, remainder = divmod(int(delta_uptime.total_seconds()), 3600)\n minutes, seconds = divmod(remainder, 60)\n days, hours = divmod(hours, 24)\n\n embed = discord.Embed(\n title=f\"Up Since:\\n{discord.utils.format_dt(self.bot.launch_time, style = 'd')}\\n{discord.utils.format_dt(self.bot.launch_time, style = 'T')}\",\n description=f\"Days: {days}d, \\nHours: {hours}h, \\nMinutes: {minutes}m, \\nSeconds: {seconds}s\",\n color=random.randint(0, 16777215),\n )\n\n embed.set_author(name=f\"{self.bot.user}'s Uptime:\", icon_url=self.bot.user.display_avatar.url)\n\n await ctx.send(embed=embed)\n\n @commands.cooldown(1, 30, BucketType.user)\n @commands.command(brief=\"make a suggestion to the bot owner of a command to add\", aliases=[\"suggestion\"])\n async def suggest(self, ctx, *, args=None):\n if not args:\n return await ctx.send(\"You didn't give me a command to add to the suggestion.\")\n ctx.command.reset_cooldown(ctx)\n\n embed = discord.Embed(\n title=f\"New Suggestion requested by {ctx.author}\",\n description=f\"Suggestion: {args}\",\n timestamp=ctx.message.created_at,\n color=random.randint(0, 16777215),\n )\n\n embed.set_footer(text=f\"User ID: {ctx.author.id}\")\n embed.set_image(url=ctx.author.display_avatar.url)\n\n jdjg = await self.bot.try_user(168422909482762240)\n await jdjg.send(f\"New suggestion from {ctx.author}\", embed=embed)\n\n await ctx.send(\n \"Sent suggestion to JDJG! You agree to being Dmed about this suggestion or somehow contacted(it makes some things easier lol)\"\n )\n\n @commands.group(name=\"support\", invoke_without_command=True)\n async def support(self, ctx):\n\n await ctx.send_help(ctx.command)\n\n @support.command(brief=\"a command that Dms support help to JDJG\", name=\"dm\")\n async def support_dm(self, ctx, *, args=None):\n\n if not args:\n return await ctx.send(\"You need a reason why you want support.\")\n\n await ctx.send(\"sending support to JDJG, the message will be deleted when support is done\")\n\n embed = discord.Embed(title=f\"{args}\", timestamp=ctx.message.created_at, color=random.randint(0, 16777215))\n\n embed.set_author(name=f\"Help Needed from {ctx.author}:\", icon_url=ctx.author.display_avatar.url)\n embed.set_footer(text=f\"{ctx.author.id} \\nSupport Mode: DM\")\n embed.set_thumbnail(url=\"https://i.imgur.com/lcND9Z2.png\")\n\n jdjg = await self.bot.try_user(168422909482762240)\n\n await jdjg.send(content=\"someone needs help! Remeber to delete when done with support.\", embed=embed)\n\n await ctx.send(f\"successfully sent to {jdjg}\")\n\n @support.command(brief=\"a command that sends support help to our log channel\", name=\"channel\")\n async def support_channel(self, ctx, *, args=None):\n\n if not args:\n return await ctx.send(\"You didn't give a reason that you need support\")\n\n embed = discord.Embed(title=f\"{args}\", timestamp=ctx.message.created_at, color=random.randint(0, 16777215))\n\n embed.set_author(name=f\"Help Needed from {ctx.author}:\", icon_url=ctx.author.display_avatar.url)\n\n embed.set_footer(text=f\"{ctx.author.id} \\nSupport Mode: Channel\")\n embed.set_thumbnail(url=\"https://i.imgur.com/lcND9Z2.png\")\n\n await self.bot.get_channel(855217084710912050).send(\n content=\"someone needs help! Remeber to delete when done with support.\", embed=embed\n )\n\n await ctx.send(\"successfully sent to the support channel!\")\n\n @commands.command(brief=\"information about donating\")\n async def donate(self, ctx):\n await ctx.send(\n \"JDBot is completly free, but if you would love to donate you should run owner to see the owner of the bot to contact them about suggesting an idea, or I guess donating stuff. Though this is my lobby of mine. Just please don't steal my hard work.\"\n )\n\n @commands.command(brief=\"Gives Information about JDBot\")\n async def about(self, ctx):\n\n embed = discord.Embed(\n title=\"About Bot\",\n description=\"Here you can view bot and author information\",\n timestamp=ctx.message.created_at,\n color=0xEB6D15,\n )\n\n embed.add_field(\n name=\"Author Information\",\n value=\"```This Bot is made by JDJG Inc. Official#3493(you can find out more about owners from the owner command).```\",\n inline=False,\n )\n\n embed.add_field(name=\"Bot Version\", value=\"```1.0.0```\")\n\n embed.add_field(name=\"Python Version:\", value=f\"```{platform.python_version()}```\")\n\n embed.add_field(name=\"Library\", value=\"```discord.py```\")\n\n embed.add_field(name=\"Discord.Py Version\", value=f\"```{discord.__version__}```\")\n\n embed.add_field(\n name=\"RAM Usage\", value=f\"```{(psutil.Process(os.getpid()).memory_full_info().rss / 1024**2):.2f} MB```\"\n )\n\n embed.add_field(name=\"Servers\", value=f\"```{len(self.bot.guilds)}```\")\n\n embed.add_field(name=\"Contributers\", value=\"```Shadi#9492 \\nMiddlle#8801 \\nDutchy#6127```\")\n\n embed.add_field(name=\"Sponsors\", value=\"```No current sponsors :(```\")\n\n embed.add_field(name=\"Source code Info:\", value=f\"```yaml\\n{utils.linecount()}```\", inline=False)\n\n embed.set_author(name=f\"{self.bot.user}\", icon_url=self.bot.user.display_avatar.url)\n\n embed.set_footer(\n text=\"Learn More from: \\nStats \\nOr Any Other Bot Commands \\nYou can Even Sponsor the Bot \\nIf you want to sponsor the bot DM me. \\nI hope I am not missing any contibutors or sponsors\"\n )\n\n embed.set_image(url=\"https://discord.c99.nl/widget/theme-4/347265035971854337.png\")\n await ctx.send(embed=embed)\n\n @commands.cooldown(1, 90, BucketType.user)\n @commands.command(\n brief=\"sends a emoji to me to review(a.k.a reviewed in the review channel, you will be Dmed if you failed or not)\"\n )\n async def emoji_save(self, ctx, *, emoji: typing.Optional[discord.PartialEmoji] = None):\n\n if not emoji:\n await ctx.send(\"That's not a valid emoji or isn't a custom emoji\")\n ctx.command.reset_cooldown(ctx)\n\n else:\n already_exists = False\n if emoji.id in [e.id for e in self.bot.emojis]:\n await ctx.send(\"That emoji was already added to the bot's emojis(sent it anyway)\")\n already_exists = True\n\n if not emoji.id in [e.id for e in self.bot.emojis]:\n await ctx.send(\"The emoji is now in the bot's emoji review channel\")\n\n embed = discord.Embed(title=\"Emoji Submission\", color=5565960)\n\n embed.add_field(\n name=\"Emoji\",\n value=f\"Regex: {emoji}\\nName:{emoji.name} \\nAnimated: {emoji.animated} \\nEmoji Exists in bot's emoji: {already_exists}\",\n )\n\n embed.set_author(name=f\"{ctx.author}\", icon_url=ctx.author.display_avatar.url)\n\n embed.set_image(url=f\"{emoji.url}\")\n embed.set_footer(text=f\"ID: {emoji.id}\")\n\n await self.bot.get_channel(855217084710912050).send(embed=embed)\n\n @commands.command(brief=\"gives you info if someone is a tester of the bot or not\")\n async def is_tester(self, ctx, *, user: typing.Optional[discord.User] = None):\n user = user or ctx.author\n truth = user.id in self.bot.testers\n\n if user.bot:\n return await ctx.send(f\"A bot can't be a tester. So {user} is not a tester\")\n\n if not truth:\n await ctx.send(f\"{user} is not in a tester.\")\n\n else:\n await ctx.send(f\"{user} is a tester\")\n\n @commands.command(brief=\"bug about massive Test\")\n async def test_bug(self, ctx):\n await ctx.send(\n f\"If you are a moderator please contact JDJG Inc. Official#3493, I made a mistake when doing the checks for just doing {self.bot.user.mention}, if you get a massive error or something wrong please contact me, thanks :D\"\n )\n\n @commands.command(brief=\"bot contribution credits\")\n async def credits(self, ctx):\n\n embed = discord.Embed(\n color=14352639, description=f\"```dartmern#7563 \\nDutchy#6127 \\nMiddlle#8801 \\nShadi#9492 \\nSoheab_#6240```\"\n )\n embed.set_author(name=f\"{self.bot.user} Bot Credits:\", icon_url=self.bot.user.display_avatar.url)\n embed.set_footer(\n text=\"Credits are done in abc order. \\nPlease don't randomly contact them unless they allow you to.\"\n )\n await ctx.send(embed=embed)\n\n\nasync def setup(bot):\n await bot.add_cog(Bot(bot))\n", "id": "2462256", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "cogs/Bot.py" } ]
0
cinaljess
[ { "content": "\"\"\"Read GAF file and allow ND Evidence codes.\"\"\"\n\n__copyright__ = \"Copyright (C) 2016-2018, <NAME>, <NAME>. All rights reserved.\"\n__author__ = \"<NAME>\"\n\nimport sys\nfrom goatools.associations import read_gaf\nfrom goatools.base import dnld_gaf\n\ndef test_gaf_read(log=sys.stdout):\n \"\"\"Return GO associations from a GAF file. Download if necessary.\"\"\"\n # On 2017/04/10, there were 3 GO IDs with ND Evidence Codes:\n #\n # $ cut -f5,7 goa_human.gaf | grep ND | sort | uniq -c\n # 739 GO:0003674 ND\n # 484 GO:0005575 ND\n # 639 GO:0008150 ND\n\n # Example species_ids: goa_human mgi fb\n fin_gaf = dnld_gaf('goa_human', loading_bar=None)\n\n # Example 1: Read GAF\n go2ids = read_gaf(fin_gaf, go2geneids=True)\n num_gos_dflt = len(go2ids)\n log.write(\"Read {N} GOs with all default values\\n\\n\".format(N=num_gos_dflt))\n\n # Example 2: Read GAF using defaults (No NOT Qualifiers and no ND Evidence Codes)\n go2ids = read_gaf(fin_gaf, go2geneids=True, keep_ND=False, keep_NOT=False)\n log.write(\"Read {N} GOs; keepif is default in goatools.associations.read_gaf\\n\\n\".format(\n N=len(go2ids)))\n\n # Example 3: Read GAF allowing GOs with ND Evidence Codes\n go2ids = read_gaf(fin_gaf, go2geneids=True, keep_ND=True)\n log.write(\"Read {N} GOs; Allow ND Evidence codes\\n\\n\".format(N=len(go2ids)))\n\n # Example 4: Read GAF allowing all GOs, even those with NOT Qualifiers or ND Evidence Codes\n go2ids = read_gaf(fin_gaf, go2geneids=True, keep_ND=True, keep_NOT=True)\n log.write(\"Read {N} GOs; Allow ND Evidence codes and NOT Qualifiers\\n\\n\".format(N=len(go2ids)))\n\n\nif __name__ == '__main__':\n test_gaf_read()\n\n# Copyright (C) 2016-2018, <NAME>, <NAME>. All rights reserved.\n", "id": "7335084", "language": "Python", "matching_score": 2.379838466644287, "max_stars_count": 0, "path": "tests/test_read_gaf_allow_nd.py" }, { "content": "#!/usr/bin/env python3\n\"\"\"Tests that all evidence codes seen in NCBI's gene2go have description.\"\"\"\n\nfrom __future__ import print_function\n\n__copyright__ = \"Copyright (C) 2016-2019, <NAME>, <NAME>. All rights reserved.\"\n__author__ = \"<NAME>\"\n\nimport os\n\nfrom goatools.associations import dnld_ncbi_gene_file\nfrom goatools.evidence_codes import EvidenceCodes\nREPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"../\")\n\ndef test_ev():\n \"\"\"Return GO associations from a GAF file. Download if necessary.\"\"\"\n evs = _get_evidencecodes('gene2go')\n obj = EvidenceCodes()\n missing = evs.difference(obj.code2name)\n assert not missing, 'MISSING({EV})'.format(EV=missing)\n\ndef _get_evidencecodes(fin_gene2go):\n \"\"\"Get all evidence codes and qualifiers.\"\"\"\n evs = set()\n fin_gene2go = os.path.join(REPO, 'gene2go')\n dnld_ncbi_gene_file(fin_gene2go, force_dnld=False, loading_bar=False)\n with open(fin_gene2go) as ifstrm:\n for line in ifstrm:\n if line[0] != '#': # Line contains data. Not a comment\n line = line.rstrip() # chomp\n flds = line.split('\\t')\n if len(flds) >= 5:\n # taxid_curr, geneid, go_id, evidence, qualifier = flds[:5]\n evidence = flds[3]\n assert len(evidence) >= 2, flds\n evs.add(evidence)\n print('{N} evidence codes in {FIN}'.format(N=len(evs), FIN=fin_gene2go))\n return evs\n\n\nif __name__ == '__main__':\n test_ev()\n\n# Copyright (C) 2016-2019, <NAME>, <NAME>. All rights reserved.\n", "id": "6136932", "language": "Python", "matching_score": 1.8989077806472778, "max_stars_count": 0, "path": "tests/test_rpt_gene2go_evidencecodes.py" }, { "content": "\"\"\"Read a GO Association File (GAF) and store the data in a Python object.\n\n Annotations available from the Gene Ontology Consortium:\n http://geneontology.org/page/download-annotations\n\n GAF format:\n http://geneontology.org/page/go-annotation-file-formats\n\"\"\"\n\nimport sys\nimport os\nimport re\nimport collections as cx\nfrom goatools.evidence_codes import EvidenceCodes\n\n__copyright__ = \"Copyright (C) 2016-2018, <NAME>, <NAME>. All rights reserved.\"\n__author__ = \"<NAME>\"\n\n\n# pylint: disable=broad-except,too-few-public-methods,line-too-long\nclass GafReader(object):\n \"\"\"Reads a Gene Annotation File (GAF). Returns a Python object.\"\"\"\n\n exp_kwdct = set(['allow_missing_symbol'])\n\n def __init__(self, filename=None, hdr_only=False, prt=sys.stdout, **kws):\n # kws: allow_missing_symbol\n self.kws = {k:v for k, v in kws.items() if k in self.exp_kwdct}\n self.filename = filename\n self.evobj = EvidenceCodes()\n # Initialize associations and header information\n self.hdr = None\n self.datobj = None\n self.associations = self._init_assn(filename, hdr_only, prt) if filename is not None else []\n\n def read_gaf(self, **kws):\n \"\"\"Read Gene Association File (GAF). Return data.\"\"\"\n # Simple associations\n id2gos = cx.defaultdict(set)\n # keyword arguments for choosing which GO IDs to keep\n # Optional detailed associations split by taxid and having both ID2GOs & GO2IDs\n taxid2asscs = kws.get('taxid2asscs', None)\n b_geneid2gos = not kws.get('go2geneids', False)\n evs = kws.get('evidence_set', None)\n eval_nd = self._get_nd(kws.get('keep_ND', False))\n eval_not = self._get_not(kws.get('keep_NOT', False))\n # Optionally specify a subset of GOs based on their evidence.\n # By default, return id2gos. User can cause go2geneids to be returned by:\n # >>> read_ncbi_gene2go(..., go2geneids=True\n for ntgaf in self.associations:\n if eval_nd(ntgaf) and eval_not(ntgaf):\n if evs is None or ntgaf.Evidence_Code in evs:\n geneid = ntgaf.DB_ID\n go_id = ntgaf.GO_ID\n if b_geneid2gos:\n id2gos[geneid].add(go_id)\n else:\n id2gos[go_id].add(geneid)\n if taxid2asscs is not None:\n if ntgaf.Taxon:\n taxid = ntgaf.Taxon[0]\n taxid2asscs[taxid]['ID2GOs'][geneid].add(go_id)\n taxid2asscs[taxid]['GO2IDs'][go_id].add(geneid)\n return id2gos # return simple associations\n\n @staticmethod\n def _get_nd(keep_nd):\n \"\"\"Allow GAF values always or never.\"\"\"\n if keep_nd:\n return lambda nt: True\n return lambda nt: nt.Evidence_Code != 'ND'\n\n @staticmethod\n def _get_not(keep_not):\n \"\"\"Allow GAF values always or never.\"\"\"\n if keep_not:\n return lambda nt: True\n return lambda nt: 'NOT' not in nt.Qualifier\n\n def _init_assn(self, fin_gaf, hdr_only, prt):\n \"\"\"Read GAF file. Store annotation data in a list of namedtuples.\"\"\"\n nts = self._read_gaf_nts(fin_gaf, hdr_only)\n # GAF file has been read\n if prt:\n prt.write(\" READ {N:9,} associations: {FIN}\\n\".format(N=len(nts), FIN=fin_gaf))\n # If there are illegal GAF lines ...\n if self.datobj:\n if self.datobj.ignored or self.datobj.illegal_lines:\n self.datobj.prt_error_summary(fin_gaf)\n return self.evobj.sort_nts(nts, 'Evidence_Code')\n\n def _read_gaf_nts(self, fin_gaf, hdr_only):\n \"\"\"Read GAF file. Store annotation data in a list of namedtuples.\"\"\"\n nts = []\n ver = None\n hdrobj = GafHdr()\n datobj = None\n lnum = line = -1\n try:\n with open(fin_gaf) as ifstrm:\n for lnum, line in enumerate(ifstrm, 1):\n # Read header\n if datobj is None:\n if line[0] == '!':\n if ver is None and line[1:13] == 'gaf-version:':\n ver = line[13:].strip()\n hdrobj.chkaddhdr(line)\n else:\n self.hdr = hdrobj.get_hdr()\n if hdr_only:\n return nts\n datobj = GafData(ver, **self.kws)\n # Read data\n if datobj is not None and line[0] != '!':\n # print(lnum, line)\n ntgaf = datobj.get_ntgaf(line, lnum)\n if ntgaf is not None:\n nts.append(ntgaf)\n else:\n datobj.ignored.append((lnum, line))\n except Exception as inst:\n import traceback\n traceback.print_exc()\n sys.stderr.write(\"\\n **FATAL: {MSG}\\n\\n\".format(MSG=str(inst)))\n sys.stderr.write(\"**FATAL: {FIN}[{LNUM}]:\\n{L}\".format(FIN=fin_gaf, L=line, LNUM=lnum))\n if datobj is not None:\n datobj.prt_line_detail(sys.stdout, line)\n sys.exit(1)\n self.datobj = datobj\n return nts\n\n def prt_summary_anno2ev(self, prt=sys.stdout):\n \"\"\"Print annotation/evidence code summary.\"\"\"\n ctr = cx.Counter()\n for ntgaf in self.associations:\n evidence_code = ntgaf.Evidence_Code\n if 'NOT' not in ntgaf.Qualifier:\n ctr[evidence_code] += 1\n elif 'NOT' in ntgaf.Qualifier:\n ctr[\"NOT {EV}\".format(EV=ntgaf.Evidence_Code)] += 1\n else:\n raise Exception(\"UNEXPECTED INFO\")\n self.evobj.prt_ev_cnts(ctr, prt)\n\n\nclass GafData(object):\n \"\"\"Extracts GAF fields from a GAF line.\"\"\"\n\n spec_req1 = [0, 1, 2, 4, 6, 8, 11, 13, 14]\n\n req_str = [\"REQ\", \"REQ\", \"REQ\", \"\", \"REQ\", \"REQ\", \"REQ\", \"\", \"REQ\", \"\", \"\",\n \"REQ\", \"REQ\", \"REQ\", \"REQ\", \"\", \"\"]\n\n gafhdr = [ # Col Req? Cardinality Example\n # --- -------- -------------- -----------------\n 'DB', # 0 required 1 UniProtKB\n 'DB_ID', # 1 required 1 P12345\n 'DB_Symbol', # 2 required 1 PHO3\n 'Qualifier', # 3 optional 0 or greater NOT\n 'GO_ID', # 4 required 1 GO:0003993\n 'DB_Reference', # 5 required 1 or greater PMID:2676709\n 'Evidence_Code', # 6 required 1 IMP\n 'With_From', # 7 optional 0 or greater GO:0000346\n 'Aspect', # 8 required 1 F\n 'DB_Name', # 9 optional 0 or 1 Toll-like receptor 4\n 'DB_Synonym', # 10 optional 0 or greater hToll|Tollbooth\n 'DB_Type', # 11 required 1 protein\n 'Taxon', # 12 required 1 or 2 taxon:9606\n 'Date', # 13 required 1 20090118\n 'Assigned_By', # 14 required 1 SGD\n ]\n\n # Col Required Cardinality Example\n gafhdr2 = [ # --- -------- ------------ -------------------\n 'Annotation_Extension', # 15 optional 0 or greater part_of(CL:0000576)\n 'Gene_Product_Form_ID', # 16 optional 0 or 1 UniProtKB:P12345-2\n ]\n\n gaf_columns = {\n \"2.1\" : gafhdr + gafhdr2, # !gaf-version: 2.1\n \"2.0\" : gafhdr + gafhdr2, # !gaf-version: 2.0\n \"1.0\" : gafhdr} # !gaf-version: 1.0\n\n # Expected numbers of columns for various versions\n gaf_numcol = {\n \"2.1\" : 17,\n \"2.0\" : 17,\n \"1.0\" : 15}\n\n # Expected values for a Qualifier\n exp_qualifiers = set([\n 'not', 'contributes_to', 'colocalizes_with',\n # Although thee go not appear in:\n # http://geneontology.org/page/go-annotation-conventions#qual\n # they do appear in more than one July 2018 GAFs:\n # 'enables', 'involved_in', 'part_of', \n ])\n\n def __init__(self, ver, allow_missing_symbol=False):\n self.ver = ver\n self.ntgafobj = cx.namedtuple(\"ntgafobj\", \" \".join(self.gaf_columns[ver]))\n self.req1 = self.spec_req1 if not allow_missing_symbol else [i for i in self.spec_req1 if i != 2]\n self.exp_mincol = 15 # Last required field is at the 15th column\n # Store information about illegal lines seen in a GAF file from the field\n self.ignored = [] # Illegal GAF lines that are ignored (e.g., missing an ID)\n self.illegal_lines = cx.defaultdict(list) # GAF lines that are missing information (missing taxon)\n\n def get_ntgaf(self, line, lnum):\n \"\"\"Return namedtuple filled with data.\"\"\"\n flds = self.split_line(line)\n num_flds = len(flds)\n if num_flds >= self.exp_mincol:\n return self._get_ntgaf(flds, num_flds, lnum)\n\n @staticmethod\n def split_line(line):\n \"\"\"Split line into field values.\"\"\"\n line = re.split('\\t', line)\n line[-1] = line[-1].rstrip('\\r\\n')\n return line\n\n def _get_ntgaf(self, flds, num_flds, lnum):\n \"\"\"Convert fields from string to preferred format for GAF ver 2.1 and 2.0.\"\"\"\n # Cardinality\n is_set = False\n is_list = True\n qualifiers = [t.lower() for t in self._rd_fld_vals(\"Qualifier\", flds[3], is_set)]\n db_reference = self._rd_fld_vals(\"DB_Reference\", flds[5], is_set, 1)\n with_from = self._rd_fld_vals(\"With_From\", flds[7], is_set)\n db_name = self._rd_fld_vals(\"DB_Name\", flds[9], is_set, 0) # , 1)\n db_synonym = self._rd_fld_vals(\"DB_Synonym\", flds[10], is_set)\n taxons = self._rd_fld_vals(\"Taxon\", flds[12], is_list, 1, 2)\n if not self._chk_qty_eq_1(flds):\n return None\n # Additional Formatting\n taxons = self._do_taxons(taxons, flds, lnum)\n self._chk_qualifier(qualifiers, flds, lnum)\n # Create list of values\n gafvals = [\n flds[0], # 0 DB\n flds[1], # 1 DB_ID\n flds[2], # 2 DB_Symbol\n qualifiers, # 3 Qualifier\n flds[4], # 4 GO_ID\n db_reference, # 5 DB_Reference\n flds[6], # 6 Evidence_Code\n with_from, # 7 With_From\n flds[8], # 8 Aspect\n db_name, # 9 DB_Name\n db_synonym, # 10 DB_Synonym\n flds[11], # 11 DB_Type\n taxons, # 12 Taxon\n flds[12], # 13 Date\n flds[13]] # 14 Assigned_By\n # Version 2.x has these additional fields not found in v1.0\n if self.ver[0] == '2':\n # i=15) Annotation_Extension: optional 0 or greater; Ex: part_of(CL:0000576)\n if num_flds > 15:\n gafvals.append(self._rd_fld_vals(\"Annotation_Extension\", flds[15], is_set))\n else:\n gafvals.append(None)\n # i=16) Gene_Product_Form_ID: optional 0 or 1; Ex: UniProtKB:P12345-2\n if num_flds > 16:\n #self._prt_line_detail(sys.stdout, flds, lnum)\n gafvals.append(self._rd_fld_vals(\"Gene_Product_Form_ID\", flds[16], is_set))\n else:\n gafvals.append(None)\n return self.ntgafobj._make(gafvals)\n\n @staticmethod\n def _rd_fld_vals(name, val, set_list_ft=True, qty_min=0, qty_max=None):\n \"\"\"Further split a GAF value within a single field.\"\"\"\n if not val and qty_min == 0:\n return [] if set_list_ft else set()\n vals = val.split('|') # Use a pipe to separate entries\n num_vals = len(vals)\n assert num_vals >= qty_min, \\\n \"FIELD({F}): MIN QUANTITY({Q}) WASN'T MET: {V}\".format(F=name, Q=qty_min, V=vals)\n if qty_max is not None:\n assert num_vals <= qty_max, \\\n \"FIELD({F}): MAX QUANTITY({Q}) EXCEEDED: {V}\".format(F=name, Q=qty_max, V=vals)\n return vals if set_list_ft else set(vals)\n\n def _chk_qualifier(self, qualifiers, flds, lnum):\n \"\"\"Check that qualifiers are expected values.\"\"\"\n # http://geneontology.org/page/go-annotation-conventions#qual\n for qual in qualifiers:\n if qual not in self.exp_qualifiers:\n errname = 'UNEXPECTED QUALIFIER({QUAL})'.format(QUAL=qual)\n self.illegal_lines[errname].append((lnum, \"\\t\".join(flds)))\n\n def prt_line_detail(self, prt, line):\n \"\"\"Print line header and values in a readable format.\"\"\"\n values = self.split_line(line)\n self._prt_line_detail(prt, values)\n\n def _prt_line_detail(self, prt, values, lnum=\"\"):\n \"\"\"Print header and field values in a readable format.\"\"\"\n data = zip(self.req_str, self.ntgafobj._fields, values)\n txt = [\"{:2}) {:3} {:13} {}\".format(i, req, hdr, val) for i, (req, hdr, val) in enumerate(data)]\n prt.write(\"{LNUM}\\n{TXT}\\n\".format(LNUM=lnum, TXT=\"\\n\".join(txt)))\n\n def _chk_qty_eq_1(self, flds):\n \"\"\"Check that these fields have only one value: required 1.\"\"\"\n for col in self.req1:\n if not flds[col]:\n # sys.stderr.write(\"**ERROR: UNEXPECTED REQUIRED VAL({V}) FOR COL({R}):{H}: \".format(\n # V=flds[col], H=self.gafhdr[col], R=col))\n # sys.stderr.write(\"{H0}({DB}) {H1}({ID})\\n\".format(\n # H0=self.gafhdr[0], DB=flds[0], H1=self.gafhdr[1], ID=flds[1]))\n return False # Check failed\n return True # Check passed\n\n def _do_taxons(self, taxons, flds, lnum):\n \"\"\"Taxon\"\"\"\n taxons_str = [v.split(':')[1] for v in taxons] # strip \"taxon:\"\n taxons_int = [int(s) for s in taxons_str if s]\n # taxons = [int(v[6:]) for v in taxons] # strip \"taxon:\"\n num_taxons = len(taxons_int)\n if taxons_int:\n assert num_taxons == 1 or num_taxons == 2\n else:\n self.illegal_lines['ILLEGAL TAXON'].append((lnum, \"\\t\".join(flds)))\n return taxons_int\n\n def prt_error_summary(self, fin_gaf):\n \"\"\"Print a summary about the GAF file that was read.\"\"\"\n # Get summary of error types and their counts\n errcnts = []\n if self.ignored:\n errcnts.append(\" {N:9,} IGNORED associations\\n\".format(N=len(self.ignored)))\n if self.illegal_lines:\n for err_name, errors in self.illegal_lines.items():\n errcnts.append(\" {N:9,} {ERROR}\\n\".format(N=len(errors), ERROR=err_name))\n # Save error details into a log file\n fout_log = self._wrlog_details_illegal_gaf(fin_gaf, errcnts)\n sys.stdout.write(\" WROTE GAF ERROR LOG: {LOG}:\\n\".format(LOG=fout_log))\n for err_cnt in errcnts:\n sys.stdout.write(err_cnt)\n\n def _wrlog_details_illegal_gaf(self, fin_gaf, err_cnts):\n \"\"\"Print details regarding illegal GAF lines seen to a log file.\"\"\"\n fout_log = \"{}.log\".format(fin_gaf)\n gaf_base = os.path.basename(fin_gaf)\n with open(fout_log, 'w') as prt:\n prt.write(\"ILLEGAL GAF ERROR SUMMARY:\\n\\n\")\n for err_cnt in err_cnts:\n prt.write(err_cnt)\n prt.write(\"\\n\\nILLEGAL GAF ERROR DETAILS:\\n\\n\")\n for lnum, line in self.ignored:\n prt.write(\"**WARNING: GAF LINE IGNORED: {FIN}[{LNUM}]:\\n{L}\\n\".format(\n FIN=gaf_base, L=line, LNUM=lnum))\n self.prt_line_detail(prt, line)\n prt.write(\"\\n\\n\")\n for error, lines in self.illegal_lines.items():\n for lnum, line in lines:\n prt.write(\"**WARNING: GAF LINE ILLEGAL({ERR}): {FIN}[{LNUM}]:\\n{L}\\n\".format(\n ERR=error, FIN=gaf_base, L=line, LNUM=lnum))\n self.prt_line_detail(prt, line)\n prt.write(\"\\n\\n\")\n return fout_log\n\n\nclass GafHdr(object):\n \"\"\"Used to build a GAF header.\"\"\"\n\n cmpline = re.compile(r'^!(\\w[\\w\\s-]+:.*)$')\n\n def __init__(self):\n self.gafhdr = []\n\n def get_hdr(self):\n \"\"\"Return GAF header data as a string paragragh.\"\"\"\n return \"\\n\".join(self.gafhdr)\n\n def chkaddhdr(self, line):\n \"\"\"If this line contains desired header info, save it.\"\"\"\n mtch = self.cmpline.search(line)\n if mtch:\n self.gafhdr.append(mtch.group(1))\n\n# Copyright (C) 2016-2018, <NAME>, <NAME>. All rights reserved.\"\n", "id": "3146074", "language": "Python", "matching_score": 3.017911911010742, "max_stars_count": 0, "path": "goatools/anno/gaf_reader.py" }, { "content": "#!/usr/bin/env python\n\"\"\"Tests read a GAF with missing (required) DB_Symbol text.\"\"\"\n\nimport os\nfrom goatools.associations import read_gaf\n\nREPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\")\n\n\ndef test_missingsym():\n \"\"\"Tests read a GAF with missing (required) DB_Symbol text.\"\"\"\n # Original gaf file (mgi.gaf) was reduced\n fin_gaf = \"tests/data/gaf_missingsym.mgi\"\n # Test that gene products that are missing the required DB_Symbol are ignored\n gene2gos = read_gaf(os.path.join(REPO, fin_gaf))\n assert len(gene2gos) == 16, len(gene2gos)\n assert 'MGI:3643263' not in gene2gos\n assert 'P84751' not in gene2gos\n # Tests saving annotation, even if missing required DB_Symbol\n gene2gos = read_gaf(os.path.join(REPO, fin_gaf), allow_missing_symbol=True)\n assert len(gene2gos) == 18\n assert 'MGI:3643263' in gene2gos\n assert 'P84751' in gene2gos\n\n\nif __name__ == '__main__':\n test_missingsym()\n", "id": "1425610", "language": "Python", "matching_score": 0.7014433741569519, "max_stars_count": 0, "path": "tests/test_gaf_missingsym.py" }, { "content": "\"\"\"Product gene lists with ASCII art sections and GO IDs for each gene product.\"\"\"\n\nimport sys\nimport collections as cx\nfrom goatools.associations import get_b2aset\nfrom goatools.rpt.goea_nt_xfrm import MgrNtGOEAs\nfrom goatools.grouper.grprobj import Grouper\nfrom goatools.grouper.sorter import Sorter\nfrom goatools.grouper.wrxlsx import WrXlsxSortedGos\n\n__copyright__ = \"Copyright (C) 2016-2018, <NAME>, <NAME>, All rights reserved.\"\n__author__ = \"<NAME>\"\n\n\n# pylint: disable=too-many-instance-attributes\nclass AArtGeneProductSetsOne(object):\n \"\"\"Product gene lists with ASCII art sections and GO IDs for each gene product.\"\"\"\n # nts need: nt.GO and nt.study_items\n\n def __init__(self, name, goea_results, obj):\n self.name = name\n self.datobj = obj # AArtGeneProductSetsAll\n _ini = _Init(obj)\n self.go2nt = _ini.get_go2nt(goea_results)\n _grprobj = Grouper(\"grp\", self.go2nt, obj.hdrobj, obj.grprdflt.gosubdag, go2nt=self.go2nt)\n self.sortobj = Sorter(_grprobj)\n self.sec2gos = _ini.get_sec2gos(self.sortobj)\n self.sec2chr = cx.OrderedDict([(s, obj.sec2chr[s]) for s in self.sec2gos.keys()])\n self.go2chrs = _ini.get_go2chrs(self.sec2gos, self.sec2chr)\n self.gene2gos = _ini.get_gene2gos(self.go2nt)\n self.gene2section2gos = _ini.get_gene2section2gos(self.gene2gos, self.sec2gos)\n self.gene2aart = _ini.get_gene2aart(self.gene2section2gos, self.sec2chr)\n\n def prt_report_grp0(self, prt=sys.stdout):\n \"\"\"Print full GO/gene report without grouping.\"\"\"\n summaryline = self.str_summaryline()\n kws_grp = {'use_sections':False,\n 'hdrgo_prt':False,\n 'sortby':lambda nt: [-1*nt.dcnt, nt.depth]}\n # Print grouped GO IDs\n prt.write(\"{SUMMARY}\\n\".format(SUMMARY=summaryline))\n self.prt_gos_grouped(sys.stdout, **kws_grp)\n # genes\n genes = sorted(self.gene2gos.keys())\n prt.write(\"\\n\\n{SUMMARY}\\n\\n\".format(SUMMARY=summaryline))\n self.prt_gene_aart(genes, prt)\n # Sort genes\n prt.write(\"\\n\\n{SUMMARY}\\n\\n\".format(SUMMARY=summaryline))\n self.prt_gene_aart_details(genes, prt)\n return (self.name, self.get_section_marks())\n\n def prt_report_grp1(self, prt=sys.stdout, **kws_grp):\n \"\"\"Print full GO/gene report with grouping.\"\"\"\n summaryline = self.str_summaryline()\n # Print grouped GO IDs\n prt.write(\"{SUMMARY}\\n\".format(SUMMARY=summaryline))\n self.prt_gos_grouped(prt, **kws_grp)\n # genes\n genes = sorted(self.gene2gos.keys())\n prt.write(\"\\n\\n{SUMMARY}\\n\\n\".format(SUMMARY=summaryline))\n self.prt_section_key(prt)\n self.prt_gene_aart(genes, prt)\n # Sort genes\n prt.write(\"\\n\\n{SUMMARY}\\n\\n\".format(SUMMARY=summaryline))\n self.prt_gene_aart_details(genes, prt)\n return (self.name, self.get_section_marks())\n\n def str_summaryline(self):\n \"\"\"Print: 47 GOs, 262 genes described by 10 of 19 sections consistent_increase.\"\"\"\n return \"{N} GOs, {M} genes described by {X} of {Y} sections {NM}\".format(\n N=len(self.go2nt), M=len(self.gene2gos),\n X=len(self.sec2chr), Y=len(self.datobj.sec2chr), NM=self.name)\n\n def prt_gos_grouped(self, prt, **kws_grp):\n \"\"\"Print grouped GO list.\"\"\"\n prtfmt = self.datobj.kws['fmtgo']\n wrobj = WrXlsxSortedGos(self.name, self.sortobj)\n # Keyword arguments: control content: hdrgo_prt section_prt top_n use_sections\n desc2nts = self.sortobj.get_desc2nts(**kws_grp)\n wrobj.prt_txt_desc2nts(prt, desc2nts, prtfmt)\n\n def prt_gos_flat(self, prt):\n \"\"\"Print flat GO list.\"\"\"\n prtfmt = self.datobj.kws['fmtgo']\n _go2nt = self.sortobj.grprobj.go2nt\n go2nt = {go:_go2nt[go] for go in self.go2nt}\n prt.write(\"\\n{N} GO IDs:\\n\".format(N=len(go2nt)))\n _sortby = self._get_sortgo()\n for ntgo in sorted(go2nt.values(), key=_sortby):\n prt.write(prtfmt.format(**ntgo._asdict()))\n #print(\"FFFMMMTTT\", prtfmt)\n\n def _get_sortgo(self):\n \"\"\"Get function for sorting GO terms in a list of namedtuples.\"\"\"\n if 'sortgo' in self.datobj.kws:\n return self.datobj.kws['sortgo']\n return self.datobj.grprdflt.gosubdag.prt_attr['sort'] + \"\\n\"\n\n def prt_gene_aart(self, geneids, prt=sys.stdout):\n \"\"\"For each gene, print ASCII art which represents its associated GO IDs.\"\"\"\n patgene = self.datobj.kws[\"fmtgene\"]\n itemid2name = self.datobj.kws.get(\"itemid2name\")\n prt.write(\"\\n{HDR}\\n\".format(HDR=self.str_hdr()))\n for geneid in geneids:\n symbol = \"\" if itemid2name is None else itemid2name.get(geneid, \"\")\n prt.write(patgene.format(AART=self.gene2aart[geneid], ID=geneid, NAME=symbol))\n\n def prt_gene_aart_details(self, geneids, prt=sys.stdout):\n \"\"\"For each gene, print ASCII art which represents its associated GO IDs.\"\"\"\n _go2nt = self.sortobj.grprobj.go2nt\n patgene = self.datobj.kws[\"fmtgene2\"]\n patgo = self.datobj.kws[\"fmtgo2\"]\n itemid2name = self.datobj.kws.get(\"itemid2name\")\n chr2i = self.datobj.get_chr2idx()\n for geneid in geneids:\n gos_gene = self.gene2gos[geneid]\n symbol = \"\" if itemid2name is None else itemid2name.get(geneid, \"\")\n prt.write(\"\\n\")\n prt.write(patgene.format(AART=self.gene2aart[geneid], ID=geneid, NAME=symbol))\n go2nt = {go:(_go2nt[go], \"\".join(self.go2chrs[go])) for go in gos_gene}\n for ntgo, abc in sorted(go2nt.values(),\n key=lambda t: [chr2i[t[1][:1]], t[0].NS, -1*t[0].dcnt]):\n prt.write(\"{ABC} \".format(ABC=abc))\n prt.write(patgo.format(**ntgo._asdict()))\n\n def prt_section_key(self, prt=sys.stdout):\n \"\"\"Print the section name and its alias.\"\"\"\n for section_name, letter in self.datobj.sec2chr.items():\n mrk = '*' if section_name in self.sec2chr else \"\"\n prt.write(\"{M:1} {ABC} {SECT}\\n\".format(M=mrk, ABC=letter, SECT=section_name))\n\n def str_hdr(self):\n \"\"\"Return a string representing the section headers: \"\"\"\n return \"\".join([c for _, c in self.sec2chr.items()])\n\n def get_section_marks(self):\n \"\"\"For each section in AArtGeneProducts, return '*' or \"\" .\"\"\"\n return [abc if s in self.sec2chr else \".\" for s, abc in self.datobj.sec2chr.items()]\n\n def get_gene2binvec(self):\n \"\"\"Return a boolean vector for each gene representing GO section membership.\"\"\"\n _sec2chr = self.sec2chr\n return {g:[s in s2gos for s in _sec2chr] for g, s2gos in self.gene2section2gos.items()}\n\n\nclass _Init(object):\n\n def __init__(self, objaartall):\n self.objaartall = objaartall # AArtGeneProductSetsAll\n\n def get_go2nt(self, goea_results):\n \"\"\"Return go2nt with added formatted string versions of the P-values.\"\"\"\n go2obj = self.objaartall.grprdflt.gosubdag.go2obj\n # Add string version of P-values\n goea_nts = MgrNtGOEAs(goea_results).get_nts_strpval()\n return {go2obj[nt.GO].id:nt for nt in goea_nts if nt.GO in go2obj}\n\n @staticmethod\n def get_sec2gos(sortobj):\n \"\"\"Initialize section_name2goids.\"\"\"\n sec_gos = []\n for section_name, nts in sortobj.get_desc2nts_fnc(hdrgo_prt=True)['sections']:\n sec_gos.append((section_name, set(nt.GO for nt in nts)))\n return cx.OrderedDict(sec_gos)\n\n @staticmethod\n def get_gene2gos(go2nt):\n \"\"\"Create a gene product to GO set dict.\"\"\"\n gene2gos = cx.defaultdict(set)\n nt0 = next(iter(go2nt.values()))\n b_str = isinstance(nt0.study_items, str)\n # print(\"NNNNTTTT\", nt0)\n for goid, ntgo in go2nt.items():\n study_items = ntgo.study_items.split(', ') if b_str else ntgo.study_items\n for geneid in study_items:\n gene2gos[geneid].add(goid)\n if b_str:\n b_set = set(isinstance(g.isdigit(), int) for g in nt0.study_items.split(', '))\n if b_set == set([True]):\n return {int(g):gos for g, gos in gene2gos.items()}\n return {g:gos for g, gos in gene2gos.items()}\n\n @staticmethod\n def get_go2chrs(sec2gos, sec2chr):\n \"\"\"Dict: given a GO return a set of letters representing it's section membership(s).\"\"\"\n go2chrs = {}\n for goid, sections in get_b2aset(sec2gos).items():\n go2chrs[goid] = set(sec2chr[s] for s in sections)\n return go2chrs\n\n @staticmethod\n def get_gene2aart(gene2section2gos, sec2chr):\n \"\"\"Return a string for each gene representing GO section membership.\"\"\"\n geneid2str = {}\n for geneid, section2gos_gene in gene2section2gos.items():\n letters = [abc if s in section2gos_gene else \".\" for s, abc in sec2chr.items()]\n geneid2str[geneid] = \"\".join(letters)\n return geneid2str\n\n @staticmethod\n def get_gene2section2gos(gene2gos, sec2gos):\n \"\"\"Get a list of section aliases for each gene product ID.\"\"\"\n gene2section2gos = {}\n for geneid, gos_gene in gene2gos.items():\n section2gos = {}\n for section_name, gos_sec in sec2gos.items():\n gos_secgene = gos_gene.intersection(gos_sec)\n if gos_secgene:\n section2gos[section_name] = gos_secgene\n gene2section2gos[geneid] = section2gos\n return gene2section2gos\n\n\n# Copyright (C) 2016-2018, DV Klopfenstein, <NAME>, All rights reserved.\n", "id": "4763082", "language": "Python", "matching_score": 4.395522117614746, "max_stars_count": 1, "path": "goatools/grouper/aart_geneproducts_one.py" }, { "content": "\"\"\"Compare two or more sets of GO IDs. Best done using sections.\n\nUsage:\n compare_gos.py [GO_FILE] ...\n compare_gos.py [GO_FILE] ... [options]\n\nOptions:\n -h --help show this help message and exit\n\n -s <sections.txt> --sections=<sections.txt> Sections file for grouping\n -S <sections module str> Python module with SECTIONS variable\n\n -o <file.txt>, --ofile=<file.txt> write comparison of GO IDs into ASCII file\n --xlsx=<file.xlsx> write comparison of GO IDs into an xlsx file\n -v --verbose Print sections as GO headers followed by each header's user GOs\n\n --obo=<file.obo> Ontologies in obo file [default: go-basic.obo].\n --slims=<file.obo> GO slims in obo file [default: goslim_generic.obo].\n\n --gaf=<file.gaf> Annotations from a gaf file\n --gene2go=<gene2go> Annotations from a gene2go file downloaded from NCBI\n\n\"\"\"\n\nfrom __future__ import print_function\n\n__copyright__ = \"Copyright (C) 2016-2019, <NAME>, <NAME>. All rights reserved.\"\n__author__ = \"<NAME>\"\n\n\nimport os\nimport sys\nfrom collections import namedtuple\n# from collections import OrderedDict\n\nfrom goatools.base import get_godag\nfrom goatools.associations import get_tcntobj\nfrom goatools.godag.relationship_str import RelationshipStr\n\nfrom goatools.cli.docopt_parse import DocOptParse\nfrom goatools.cli.gos_get import GetGOs\nfrom goatools.cli.grouped import Grouped\n\nfrom goatools.gosubdag.gosubdag import GoSubDag\nfrom goatools.gosubdag.rpt.wr_xlsx import GoDepth1LettersWr\nfrom goatools.grouper.sorter import Sorter\nfrom goatools.grouper.wrxlsx import WrXlsxSortedGos\n\n\n# pylint: disable=too-few-public-methods\nclass CompareGOsCli(object):\n \"\"\"Class for command-line interface for creating GO term diagrams\"\"\"\n\n kws_dict = set(['GO_FILE',\n 'sections', 'S',\n 'obo', 'slims',\n 'ofile', 'xlsx',\n 'gaf', 'gene2go', 'taxid',\n ])\n kws_set = set(['verbose'])\n\n # Print fields to exclude, unless verbose is used\n excl_flds = {'level', 'reldepth', 'alt', 'D1', 'childcnt',\n 'format_txt', 'num_usrgos', 'is_hdrgo', 'is_usrgo', 'hdr_idx', 'hdr1usr01',\n 'REL', 'REL_short', 'rel', 'id'}\n\n def __init__(self, **kws):\n _objdoc = DocOptParse(__doc__, self.kws_dict, self.kws_set)\n self.kws = _objdoc.get_docargs(prt=None) if not kws else kws\n self.godag = get_godag(self.kws.get('obo'), prt=sys.stdout,\n loading_bar=False, optional_attrs=['relationship'])\n _ini = _Init(self.godag)\n self.go_ntsets = _ini.get_go_ntsets(self.kws.get('GO_FILE'))\n self.go_all = set.union(*[nt.go_set for nt in self.go_ntsets])\n _tcntobj = _ini.get_tcntobj(self.go_all, **self.kws) # Gets TermCounts or None\n self.gosubdag = GoSubDag(self.go_all, self.godag, True, tcntobj=_tcntobj, prt=sys.stdout)\n self.objgrpd = _ini.get_grouped(self.go_ntsets, self.go_all, self.gosubdag, **self.kws)\n # KWS: sortby hdrgo_sortby section_sortby\n\n def write(self, fout_xlsx=None, fout_txt=None, verbose=False):\n \"\"\"Command-line interface for go_draw script.\"\"\"\n sortby = self._get_fncsortnt(self.objgrpd.grprobj.gosubdag.prt_attr['flds'])\n kws_sort = {'sortby' if verbose else 'section_sortby': sortby}\n sortobj = Sorter(self.objgrpd.grprobj, **kws_sort)\n # KWS: hdrgo_prt=True section_prt=None top_n=None use_sections=True\n # RET: {sortobj, sections, hdrgo_prt} or {sortobj flat hdrgo_prt}\n desc2nts = sortobj.get_desc2nts_fnc(\n hdrgo_prt=verbose,\n section_prt=True,\n top_n=None,\n use_sections=True)\n # print('FFFF', desc2nts['flds'])\n # Write user GO IDs in sections\n objgowr = WrXlsxSortedGos(\"init\", sortobj, self.objgrpd.ver_list)\n if fout_xlsx is not None:\n kws_xlsx = {'shade_hdrgos':verbose}\n if not verbose:\n kws_xlsx['prt_flds'] = [f for f in desc2nts['flds'] if f not in self.excl_flds]\n objgowr.wr_xlsx_nts(fout_xlsx, desc2nts, **kws_xlsx)\n fout_desc = '{BASE}_desc.txt'.format(BASE=os.path.splitext(fout_xlsx)[0])\n self._wr_ver_n_key(fout_desc, verbose)\n if fout_txt is not None:\n self._wr_txt_nts(fout_txt, desc2nts, objgowr, verbose)\n if fout_xlsx is None and fout_txt is None:\n self._prt_ver_n_key(sys.stdout, verbose)\n prtfmt = self._get_prtfmt(objgowr, verbose)\n summary_dct = objgowr.prt_txt_desc2nts(sys.stdout, desc2nts, prtfmt)\n self._prt_ver_n_key(sys.stdout, verbose)\n if summary_dct:\n print(\"\\n{N} GO IDs in {S} sections\".format(\n N=desc2nts['num_items'], S=desc2nts['num_sections']))\n\n def _get_prtfmt(self, objgowr, verbose):\n \"\"\"Get print format containing markers.\"\"\"\n prtfmt = objgowr.get_prtfmt('fmt')\n prtfmt = prtfmt.replace('# ', '')\n # print('PPPPPPPPPPP', prtfmt)\n if not verbose:\n prtfmt = prtfmt.replace('{hdr1usr01:2}', '')\n prtfmt = prtfmt.replace('{childcnt:3} L{level:02} ', '')\n prtfmt = prtfmt.replace('{num_usrgos:>4} uGOs ', '')\n prtfmt = prtfmt.replace('{D1:5} {REL} {rel}', '')\n prtfmt = prtfmt.replace('R{reldepth:02} ', '')\n # print('PPPPPPPPPPP', prtfmt)\n marks = ''.join(['{{{}}}'.format(nt.hdr) for nt in self.go_ntsets])\n return '{MARKS} {PRTFMT}'.format(MARKS=marks, PRTFMT=prtfmt)\n\n @staticmethod\n def _get_fncsortnt(flds):\n \"\"\"Return a sort function for sorting header GO IDs found in sections.\"\"\"\n if 'tinfo' in flds:\n return lambda ntgo: [ntgo.NS, -1*ntgo.tinfo, ntgo.depth, ntgo.alt]\n if 'dcnt' in flds:\n return lambda ntgo: [ntgo.NS, -1*ntgo.dcnt, ntgo.depth, ntgo.alt]\n return lambda ntgo: [ntgo.NS, -1*ntgo.depth, ntgo.alt]\n\n def _wr_txt_nts(self, fout_txt, desc2nts, objgowr, verbose):\n \"\"\"Write grouped and sorted GO IDs to GOs.\"\"\"\n with open(fout_txt, 'w') as prt:\n self._prt_ver_n_key(prt, verbose)\n prt.write('\\n\\n')\n prt.write('# ----------------------------------------------------------------\\n')\n prt.write('# - Sections and GO IDs\\n')\n prt.write('# ----------------------------------------------------------------\\n')\n prtfmt = self._get_prtfmt(objgowr, verbose)\n summary_dct = objgowr.prt_txt_desc2nts(prt, desc2nts, prtfmt)\n if summary_dct:\n print(\" {N:>5} GO IDs WROTE: {FOUT} ({S} sections)\".format(\n N=desc2nts['num_items'], FOUT=fout_txt, S=desc2nts['num_sections']))\n else:\n print(\" WROTE: {TXT}\".format(TXT=fout_txt))\n\n def _wr_ver_n_key(self, fout_txt, verbose):\n \"\"\"Write GO DAG version and key indicating presence of GO ID in a list.\"\"\"\n with open(fout_txt, 'w') as prt:\n self._prt_ver_n_key(prt, verbose)\n print(' WROTE: {TXT}'.format(TXT=fout_txt))\n\n\n def _prt_ver_n_key(self, prt, verbose):\n \"\"\"Print GO DAG version and key indicating presence of GO ID in a list.\"\"\"\n pre = '# '\n prt.write('# ----------------------------------------------------------------\\n')\n prt.write('# - Description of GO ID fields\\n')\n prt.write('# ----------------------------------------------------------------\\n')\n prt.write(\"# Versions:\\n# {VER}\\n\".format(VER=\"\\n# \".join(self.objgrpd.ver_list)))\n prt.write('\\n# Marker keys:\\n')\n for ntgos in self.go_ntsets:\n prt.write('# X -> GO is present in {HDR}\\n'.format(HDR=ntgos.hdr))\n if verbose:\n prt.write('\\n# Markers for header GO IDs and user GO IDs:\\n')\n prt.write(\"# '**' -> GO term is both a header and a user GO ID\\n\")\n prt.write(\"# '* ' -> GO term is a header, but not a user GO ID\\n\")\n prt.write(\"# ' ' -> GO term is a user GO ID\\n\")\n prt.write('\\n# GO Namspaces:\\n')\n prt.write('# BP -> Biological Process\\n')\n prt.write('# MF -> Molecular Function\\n')\n prt.write('# CC -> Cellualr Component\\n')\n if verbose:\n prt.write('\\n# Example fields: 5 uGOs 362 47 L04 D04 R04\\n')\n prt.write('# N uGOs -> number of user GO IDs under this GO header\\n')\n prt.write('# First integer -> number of GO descendants\\n')\n prt.write('# Second integer -> number of GO children for the current GO ID\\n')\n prt.write('\\n# Depth information:\\n')\n if not verbose:\n prt.write('# int -> number of GO descendants\\n')\n if verbose:\n prt.write('# Lnn -> level (minimum distance from root to node)\\n')\n prt.write('# Dnn -> depth (maximum distance from root to node)\\n')\n if verbose:\n prt.write('# Rnn -> depth accounting for relationships\\n\\n')\n RelationshipStr().prt_keys(prt, pre)\n if verbose:\n prt.write('\\n')\n objd1 = GoDepth1LettersWr(self.gosubdag.rcntobj)\n objd1.prt_header(prt, 'DEPTH-01 GO terms and their aliases', pre)\n objd1.prt_txt(prt, pre)\n\n\nclass _Init(object):\n \"\"\"Initialize object.\"\"\"\n\n def __init__(self, godag):\n self.godag = godag\n\n def get_tcntobj(self, go_all, **kws):\n \"\"\"Get a TermCounts object if the user provides an annotation file, otherwise None.\"\"\"\n # kws: gaf (gene2go taxid)\n if 'gaf' in kws or 'gene2go' in kws:\n # Get a reduced go2obj set for TermCounts\n _gosubdag = GoSubDag(go_all, self.godag, rcntobj=False, prt=None)\n return get_tcntobj(_gosubdag.go2obj, **kws) # TermCounts\n\n def get_grouped(self, go_ntsets, go_all, gosubdag, **kws):\n \"\"\"Get Grouped object.\"\"\"\n kws_grpd = {k:v for k, v in kws.items() if k in Grouped.kws_dict}\n kws_grpd['go2nt'] = self._init_go2ntpresent(go_ntsets, go_all, gosubdag)\n return Grouped(gosubdag, self.godag.version, **kws_grpd)\n\n @staticmethod\n def _init_go2ntpresent(go_ntsets, go_all, gosubdag):\n \"\"\"Mark all GO IDs with an X if present in the user GO list.\"\"\"\n go2ntpresent = {}\n ntobj = namedtuple('NtPresent', \" \".join(nt.hdr for nt in go_ntsets))\n # Get present marks for GO sources\n for goid_all in go_all:\n present_true = [goid_all in nt.go_set for nt in go_ntsets]\n present_str = ['X' if tf else '.' for tf in present_true]\n go2ntpresent[goid_all] = ntobj._make(present_str)\n # Get present marks for all other GO ancestors\n goids_ancestors = set(gosubdag.go2obj).difference(go2ntpresent)\n assert not goids_ancestors.intersection(go_all)\n strmark = ['.' for _ in range(len(go_ntsets))]\n for goid in goids_ancestors:\n go2ntpresent[goid] = ntobj._make(strmark)\n return go2ntpresent\n\n def get_go_ntsets(self, go_fins):\n \"\"\"For each file containing GOs, extract GO IDs, store filename and header.\"\"\"\n nts = []\n ntobj = namedtuple('NtGOFiles', 'hdr go_set, go_fin')\n go_sets = self._init_go_sets(go_fins)\n hdrs = [os.path.splitext(os.path.basename(f))[0] for f in go_fins]\n assert len(go_fins) == len(go_sets)\n assert len(go_fins) == len(hdrs)\n for hdr, go_set, go_fin in zip(hdrs, go_sets, go_fins):\n nts.append(ntobj(hdr=hdr, go_set=go_set, go_fin=go_fin))\n return nts\n\n def _init_go_sets(self, go_fins):\n \"\"\"Get lists of GO IDs.\"\"\"\n go_sets = []\n assert go_fins, \"EXPECTED FILES CONTAINING GO IDs\"\n assert len(go_fins) >= 2, \"EXPECTED 2+ GO LISTS. FOUND: {L}\".format(\n L=' '.join(go_fins))\n obj = GetGOs(self.godag)\n for fin in go_fins:\n assert os.path.exists(fin), \"GO FILE({F}) DOES NOT EXIST\".format(F=fin)\n go_sets.append(obj.get_usrgos(fin, sys.stdout))\n return go_sets\n\n\n# Copyright (C) 2016-2019, <NAME>, <NAME>. All rights reserved.\n", "id": "9671763", "language": "Python", "matching_score": 2.8132193088531494, "max_stars_count": 1, "path": "goatools/cli/compare_gos.py" }, { "content": "#!/usr/bin/env python\n\"\"\"Test the function, get_parents, in GoSubDag.\"\"\"\n\nfrom goatools.base import get_godag\nfrom goatools.gosubdag.gosubdag import GoSubDag\nfrom goatools.gosubdag.plot.plot import plt_goids\n\ndef test_go_parents():\n \"\"\"Run GO parent tests\"\"\"\n gosubdag_all = GoSubDag(None, get_godag(\"go-basic.obo\", prt=None), rcntobj=True)\n run_1(gosubdag_all)\n run_2(gosubdag_all)\n\ndef run_1(gosubdag_all):\n \"\"\"Test that using an Alt ID to color a parent in the middle does color.\"\"\"\n # Main ID: GO:0035556 135 L04 D04 AB intracellular signal transduction\n # Alt ID: GO:0007242 135 L04 D04 AB intracellular signal transduction\n alt_goid_middle = 'GO:0007242' # Alt GO ID. Key GO (GO:0035556) NOT in go2color\n alt_goid_color = '#f6cefc' # very light purple (Alt GO ID)\n goid_key = gosubdag_all.go2obj[alt_goid_middle].id\n\n # dcnt lev dep D1 Description\n goids = [ # ---- --- --- ---- ----------------------------------------------\n \"GO:0007165\", # 716 L03 D03 AB signal transduction (Header)\n \"GO:0007166\", # 336 L04 D04 AB cell surface receptor signaling pathway\n \"GO:0007186\", # 99 L04 D04 AB G-protein coupled receptor signaling pathway\n \"GO:0097527\", # 0 L04 D04 AB necroptotic signaling pathway\n \"GO:0007167\", # 120 L05 D05 AB enzyme linked receptor protein signaling pathway\n \"GO:0042770\", # 11 L05 D05 ABCG signal transduction in response to DNA damage\n \"GO:0007229\", # 0 L05 D05 AB integrin-mediated signaling pathway\n \"GO:0007205\", # 0 L05 D05 AB protein kinase C-activating GPCR signaling pathway\n \"GO:0008630\", # 1 L05 D06 ABCG intrinsic apoptotic signaling pw in rsp to DNA damage\n \"GO:0070059\", # 1 L05 D06 ABCG intrinsic apoptotic signaling pw in rsp to ER stress\n \"GO:0035590\", # 1 L06 D06 AB purinergic nucleotide receptor signaling pathway\n \"GO:0038063\", # 0 L06 D07 AB collagen-activated tyrosine kinase receptor signaling pw\n ]\n\n # If Alt ID is colored, then all equivalent GO IDs should be colored the same (unless overrode)\n go2color = {go:'#d6fffa' for go in goids} # klash ice\n go2color[alt_goid_middle] = alt_goid_color # very light purple (Alt GO ID)\n # Check that middle parent was NOT colored by user, even tho alt GO ID's color was set\n assert goid_key not in go2color\n # Plot\n godagplot = plt_goids(gosubdag_all, \"test_get_parents1.png\", goids, go2color=go2color)\n # Check that middle parent is colored properly, even if alt GO ID was used to set color\n assert godagplot.pydotnodego.go2color[alt_goid_middle] == alt_goid_color\n assert godagplot.pydotnodego.go2color[goid_key] == alt_goid_color\n # Check that original user data is NOT modified (User not expecting their data modified)\n assert goid_key not in go2color\n\n\ndef run_2(gosubdag):\n \"\"\"Test GO colors at high and low levels of hierarchy.\"\"\"\n goids = [\n 'GO:0002682', # GO:0002682 1,127 D03 A regulation of immune system process\n 'GO:0002726'] # GO:0002726 2 D09 A +reg of T cell cytokine production\n gosubdag.prt_goids(goids)\n go2color = {\n 'GO:0002682': '#b1fc99', # pale light green\n 'GO:0002726': '#f6cefc'} # very light purple\n plt_goids(gosubdag, \"test_get_parents2.png\", goids, go2color=go2color, mark_alt_id=True)\n assert 'GO:0002682' in gosubdag.rcntobj.go2parents\n\n\n\nif __name__ == '__main__':\n test_go_parents()\n", "id": "4765858", "language": "Python", "matching_score": 2.539051055908203, "max_stars_count": 1, "path": "tests/test_plot_get_parents.py" }, { "content": "#!/usr/bin/env python\n\"\"\"Ancestors/Descendants.\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport timeit\nimport numpy as np\nfrom numpy.random import shuffle\nfrom scipy import stats\n\nfrom goatools.base import download_go_basic_obo\nfrom goatools.obo_parser import GODag\nfrom goatools.test_data.godag_timed import prt_hms\nfrom goatools.gosubdag.gosubdag import GoSubDag\n\n\ndef test_go_pools():\n \"\"\"Print a comparison of GO terms from different species in two different comparisons.\"\"\"\n objr = _Run()\n # Check that subset GoSubDags have the same ancestors/descendants as Full GoSubDag\n # pylint: disable=no-member\n for qty in np.random.randint(10, 100, size=10):\n print(\"\")\n goids = objr.get_goids_rand(qty)\n # No relationships loaded; GoSubDag subset equivalent to Full subset?\n gosubdag_r0 = objr.get_gosubdag_r0(goids)\n for goid in gosubdag_r0.go2obj:\n r0_u = gosubdag_r0.rcntobj.go2parents[goid]\n r0_d = gosubdag_r0.rcntobj.go2descendants[goid]\n assert r0_u == objr.gosubdag_r0.rcntobj.go2parents[goid]\n assert r0_d == objr.gosubdag_r0.rcntobj.go2descendants[goid]\n # All relationships loaded; GoSubDag(r0) vs. GoSubDag(r1)\n gosubdag_r1 = objr.get_gosubdag_r1(goids)\n assert gosubdag_r0.go_sources == gosubdag_r1.go_sources\n assert set(gosubdag_r0.go2obj).issubset(gosubdag_r1.go2obj)\n cnts = {'r0_u':[], 'r1_u':[], 'r0_d':[], 'r1_d':[]}\n for goid in gosubdag_r0.go2obj:\n r0_u = gosubdag_r0.rcntobj.go2parents[goid]\n r0_d = gosubdag_r0.rcntobj.go2descendants[goid]\n r1_u = gosubdag_r1.rcntobj.go2parents[goid]\n r1_d = gosubdag_r1.rcntobj.go2descendants[goid]\n assert r0_d.issubset(r1_d), \"R1({}) R0({})\".format(len(r1_d), len(r0_d))\n assert r0_u.issubset(r1_u), \"R1({}) R0({})\".format(len(r1_u), len(r0_u))\n cnts['r0_u'].append(len(r0_u))\n cnts['r1_u'].append(len(r1_u))\n cnts['r0_d'].append(len(r0_d))\n cnts['r1_d'].append(len(r1_d))\n objr.prt_cnts(cnts)\n\n\nclass _Run(object):\n \"\"\"Group entire go-basic.obo\"\"\"\n\n obo = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"../../go-basic.obo\")\n\n def __init__(self):\n download_go_basic_obo(self.obo, sys.stdout, loading_bar=None)\n self.godag_r0 = GODag(self.obo)\n self.godag_r1 = GODag(self.obo, optional_attrs=set(['relationship']))\n self.goids = list(set(o.id for o in self.godag_r0.values()))\n # GoSubDag (plain)\n tic = timeit.default_timer()\n self.gosubdag_r0 = GoSubDag(self.goids, self.godag_r0, prt=None)\n prt_hms(tic, \"GoSubDag r0 {N:4} GOs {S:3} srcs\".format(\n N=len(self.gosubdag_r0.go2obj), S=len(self.gosubdag_r0.go_sources)))\n # GoSubDag with relationships\n self.gosubdag_r1 = GoSubDag(self.goids, self.godag_r1, prt=None, relationships=True)\n prt_hms(tic, \"GoSubDag r1 {N:4} GOs {S:3} srcs\".format(\n N=len(self.gosubdag_r1.go2obj), S=len(self.gosubdag_r1.go_sources)))\n\n def prt_cnts(self, cnts):\n \"\"\"Compare ancestor/descendant counts with relatives=False/True.\"\"\"\n k2v = {k:self.str_stats(v) for k, v in cnts.items()}\n print(k2v)\n\n @staticmethod\n def str_stats(vals):\n \"\"\"Print statistics on values.\"\"\"\n ntd = stats.describe(vals)\n std = int(round(np.sqrt(ntd.variance)))\n return \"({m} {M}) STD={STD:,}\".format(m=ntd.minmax[0], M=ntd.minmax[1], STD=std)\n\n def get_gosubdag_r0(self, goids):\n \"\"\"Return a GoSubDag with N randomly chosen GO sources.\"\"\"\n tic = timeit.default_timer()\n gosubdag = GoSubDag(goids, self.godag_r0, relationships=None,\n #rcntobj=self.gosubdag_r0.rcntobj,\n prt=None)\n prt_hms(tic, \"GoSubDag r0 {N:4} GOs {S:3} srcs\".format(\n N=len(gosubdag.go2obj), S=len(gosubdag.go_sources)))\n return gosubdag\n\n def get_gosubdag_r1(self, goids):\n \"\"\"Return a GoSubDag with N randomly chosen GO sources.\"\"\"\n tic = timeit.default_timer()\n gosubdag = GoSubDag(goids, self.godag_r1, relationships=True,\n #rcntobj=self.gosubdag_r1.rcntobj,\n prt=None)\n prt_hms(tic, \"GoSubDag r1 {N:4} GOs {S:3} srcs\".format(\n N=len(gosubdag.go2obj), S=len(gosubdag.go_sources)))\n return gosubdag\n\n def get_goids_rand(self, qty):\n \"\"\"Return N randomly chosen GO IDs.\"\"\"\n shuffle(self.goids)\n return self.goids[:qty]\n\n\nif __name__ == '__main__':\n test_go_pools()\n", "id": "9595622", "language": "Python", "matching_score": 1.2009071111679077, "max_stars_count": 1, "path": "tests/test_dcnt_r01.py" }, { "content": "\"\"\"Tasks for go2obj dicts.\"\"\"\n\n__copyright__ = \"Copyright (C) 2016-2018, <NAME>, <NAME>, All rights reserved.\"\n__author__ = \"<NAME>\"\n\nimport sys\nimport collections as cx\nfrom goatools.godag.go_tasks import get_id2parents\nfrom goatools.godag.go_tasks import get_id2children\n\n\n# ------------------------------------------------------------------------------------\ndef get_sorted_relationship(goterms):\n \"\"\"Topological sort of GO Terms w/'relationship's loaded.\"\"\"\n return TopologicalSortRelationships(goterms).goterms_sorted\n\nclass TopologicalSortRelationships(object):\n \"\"\"Topological sort of GO Terms w/'relationship's loaded.\"\"\"\n\n # pylint: disable=too-few-public-methods\n def __init__(self, goterms):\n self.goterms_sorted = []\n self.goids_seen = set()\n self._init_sorted_relationship(goterms)\n\n def _init_sorted_relationship(self, goterms):\n \"\"\"Topologically sort GO Terms using 'is_a' parents and 'relationship' GO IDs.\"\"\"\n # NOTE: GODag must be loaded with 'relationship' to use this function\n for goterm in goterms:\n self._get_sorted_relationships(goterm)\n\n def _get_sorted_relationships(self, goterm):\n \"\"\"Traverse GO Terms above the current GO Term. Then add current GO Term to sorted.\"\"\"\n if goterm.id in self.goids_seen:\n return\n self.goids_seen.add(goterm.id)\n for goterm_upper in goterm.get_goterms_upper():\n self._get_sorted_relationships(goterm_upper)\n self.goterms_sorted.append(goterm)\n\n\n# ------------------------------------------------------------------------------------\ndef update_association(assc_gene2gos, go2obj):\n \"\"\"Add the GO parents of a gene's associated GO IDs to the gene's association.\"\"\"\n # Replaces update_association in GODag\n goids_avail = set(go2obj)\n # Get all assc GO IDs that are current\n goid_sets = assc_gene2gos.values()\n goids_assoc_all = set.union(*goid_sets)\n goids_assoc_cur = goids_assoc_all.intersection(goids_avail)\n # Get the subset of GO objects in the association\n go2obj_assc = {go:go2obj[go] for go in goids_assoc_cur}\n go2parents = get_go2parents_go2obj(go2obj_assc)\n # Update the association: update the GO set for each gene\n for goids_cur in goid_sets:\n parents = set()\n for goid in goids_cur.intersection(goids_avail):\n parents.update(go2parents[goid])\n goids_cur.update(parents)\n goids_bad = goids_assoc_all.difference(goids_avail)\n if goids_bad:\n sys.stderr.write(\"{N} GO IDs NOT FOUND IN ASSOCIATION: {GOs}\\n\".format(\n N=len(goids_bad), GOs=\" \".join(goids_bad)))\n\n# ------------------------------------------------------------------------------------\ndef get_go2obj_unique(go2obj):\n \"\"\"If GO keys point to the same GOTerm, return new go2obj w/no duplicates.\"\"\"\n # Find the unique GO Terms that are represented for each GO in go2obj\n goid2gokeys = cx.defaultdict(set)\n for goid, goobj in go2obj.items():\n goid2gokeys[goobj.id].add(goid)\n go_unique = set()\n for goid, gos_seen in goid2gokeys.items():\n # Return main GO ID, if it is present in the go2obj keys\n if goid in gos_seen:\n go_unique.add(goid)\n # Otherwise return an alternate GO ID\n else:\n go_unique.add(next(iter(gos_seen)))\n return go_unique\n\n# ------------------------------------------------------------------------------------\ndef get_go2parents_go2obj(go2obj):\n \"\"\"Return go2parents (set of parent GO IDs) for all GO ID keys in go2obj.\"\"\"\n goobjs, altgo2goobj = get_goobjs_altgo2goobj(go2obj)\n go2parents = get_id2parents(goobjs)\n add_alt_goids(go2parents, altgo2goobj)\n return go2parents\n\n# ------------------------------------------------------------------------------------\ndef get_go2children_go2obj(go2obj):\n \"\"\"Return go2children (set of child GO IDs) for all GO ID keys in go2obj.\"\"\"\n goobjs, altgo2goobj = get_goobjs_altgo2goobj(go2obj)\n go2children = get_id2children(goobjs)\n add_alt_goids(go2children, altgo2goobj)\n return go2children\n\n# ------------------------------------------------------------------------------------\ndef get_goobjs_altgo2goobj(go2obj):\n \"\"\"Separate alt GO IDs and key GO IDs.\"\"\"\n goobjs = set()\n altgo2goobj = {}\n for goid, goobj in go2obj.items():\n goobjs.add(goobj)\n if goid != goobj.id:\n altgo2goobj[goid] = goobj\n return goobjs, altgo2goobj\n\ndef add_alt_goids(go2values, altgo2goobj):\n \"\"\"Add alternate source GO IDs.\"\"\"\n for goobj_key in altgo2goobj.values():\n values_curr = go2values[goobj_key.id]\n for goid_alt in goobj_key.alt_ids:\n go2values[goid_alt] = values_curr\n return go2values\n\n# ------------------------------------------------------------------------------------\ndef fill_main_goids(go2obj, goids):\n \"\"\"Ensure main GO IDs are included in go2obj.\"\"\"\n # User GO IDs (goids) may be either main GO IDs or alternate GO IDs.\n for goid in goids:\n goobj = go2obj[goid]\n # If a user specified an ALT GO ID and main GO ID not in go2obj:\n if goid != goobj.id and goobj.id not in go2obj:\n # Add main GO ID to go2obj\n go2obj[goobj.id] = goobj\n\ndef fill_altgoids(go2obj):\n \"\"\"Given a go2obj containing key GO IDs, fill with all alternate GO IDs.\"\"\"\n alt2obj = {altgo:goobj for goobj in go2obj.values() for altgo in goobj.alt_ids}\n for goid, goobj in alt2obj.items():\n go2obj[goid] = goobj\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\ndef fill_relationshipobjs(go2obj, relationships):\n \"\"\"Add GO IDs to go2obj that are involved in relationships.\"\"\"\n # Get all GO Term record objects that have relationships\n obj = RelationshipFill(go2obj, relationships)\n for goobj in go2obj.values():\n if goobj.relationship:\n obj.fill_relationshipgo2obj(goobj)\n if goobj.relationship_rev:\n obj.fill_relationshiprevgo2obj(goobj)\n\nclass RelationshipFill(object):\n \"\"\"Fill go2obj with GO IDs in relatinships.\"\"\"\n\n def __init__(self, go2obj, relationships):\n # This dict shall be augmented with higher parent/relationship GO IDs\n self.go2obj = go2obj\n # A set of relationships we would like to keep\n self.relationships = relationships\n\n def fill_relationshipgo2obj(self, goobj):\n \"\"\"Fill go2obj with all relationship key GO IDs and their objects.\"\"\"\n for reltyp, relgoobjs in goobj.relationship.items():\n if reltyp in self.relationships:\n for relgoobj in relgoobjs:\n if relgoobj.id not in self.go2obj:\n self.go2obj[relgoobj.id] = relgoobj\n self.fill_relationshipgo2obj(relgoobj)\n\n def fill_relationshiprevgo2obj(self, goobj):\n \"\"\"Fill go2obj with all relationship key GO IDs and their objects.\"\"\"\n for reltyp, relgoobjs in goobj.relationship_rev.items():\n if reltyp in self.relationships:\n for relgoobj in relgoobjs:\n if relgoobj.id not in self.go2obj:\n self.go2obj[relgoobj.id] = relgoobj\n self.fill_relationshiprevgo2obj(relgoobj)\n\n# ------------------------------------------------------------------------------------\ndef get_child_objs(parent_obj):\n \"\"\"Fill child2obj with all child key and alt GO IDs and their objects.\"\"\"\n child2obj = {}\n fill_childgoid2obj(child2obj, parent_obj)\n fill_altgoids(child2obj)\n return child2obj\n\ndef fill_childgoid2obj(childgoid2obj, parent_obj):\n \"\"\"Fill childgoid2obj with all child key GO IDs and their objects.\"\"\"\n for child_obj in parent_obj.children:\n if child_obj.id not in childgoid2obj:\n childgoid2obj[child_obj.id] = child_obj\n fill_childgoid2obj(childgoid2obj, child_obj)\n\n# ------------------------------------------------------------------------------------\ndef get_leaf_children(gos_user, go2obj_arg):\n \"\"\"Find all the GO descendants under all user GO IDs. Return leaf-level GO IDs.\"\"\"\n childgoid2obj = {}\n for goid_usr in gos_user:\n goobj_usr = go2obj_arg[goid_usr]\n fill_childgoid2obj(childgoid2obj, goobj_usr)\n return set(go for go, o in childgoid2obj.items() if not o.children)\n\n# ------------------------------------------------------------------------------------\ndef goid_is_valid(goid):\n \"\"\"Check format of user-provided GO IDs\"\"\"\n return goid[:3] == \"GO:\" and len(goid) == 10 and goid[3:].isdigit()\n\ndef goids_valid(goids):\n \"\"\"Check format of user-provided GO IDs\"\"\"\n for goid in goids:\n if not goid_is_valid(goid):\n return False\n return True\n\ndef chk_goids(goids, msg=None, raise_except=True):\n \"\"\"check that all GO IDs have the proper format.\"\"\"\n for goid in goids:\n if not goid_is_valid(goid):\n if raise_except:\n raise RuntimeError(\"BAD GO({GO}): {MSG}\".format(GO=goid, MSG=msg))\n else:\n return goid\n\n# Copyright (C) 2016-2018, <NAME>, <NAME>, All rights reserved.\n", "id": "6075425", "language": "Python", "matching_score": 2.6760518550872803, "max_stars_count": 1, "path": "goatools/gosubdag/go_tasks.py" }, { "content": "\"\"\"Get descendant/parent counts for all GO terms in a GODag and broad L0 and L1 terms.\"\"\"\n\nfrom __future__ import print_function\n\n__copyright__ = \"Copyright (C) 2016-2018, <NAME>, <NAME>, All rights reserved.\"\n__author__ = \"<NAME>\"\n\nimport collections as cx\nfrom itertools import chain\nfrom goatools.godag.go_tasks import get_id2parents\nfrom goatools.godag.go_tasks import get_id2upper\nfrom goatools.godag.go_tasks import get_id2children\nfrom goatools.godag.go_tasks import get_id2lower\nfrom goatools.gosubdag.go_tasks import get_goobjs_altgo2goobj\nfrom goatools.gosubdag.go_tasks import add_alt_goids\n\n\nclass CountRelativesInit(object):\n \"\"\"Get descendant/parent counts for all GO terms in a GODag and broad L0 and L1 terms.\"\"\"\n\n def __init__(self, go2obj, relationships, dcnt, go2letter):\n # Subset go2obj contains only items needed by go_sources\n self.go2obj = go2obj\n self.relationships = relationships\n self.dcnt = dcnt\n self.go2letter = go2letter\n # Ex: set(['part_of', 'regulates', 'negatively_regulates', 'positively_regulates'])\n _goobjs, _altgo2goobj = get_goobjs_altgo2goobj(self.go2obj)\n _r0 = not relationships # True if not using relationships\n self.go2descendants = get_id2children(_goobjs) if _r0 else get_id2lower(_goobjs)\n self.go2parents = get_id2parents(_goobjs) if _r0 else get_id2upper(_goobjs)\n self.go2dcnt = {go: len(p) for go, p in self.go2descendants.items()}\n add_alt_goids(self.go2parents, _altgo2goobj)\n add_alt_goids(self.go2descendants, _altgo2goobj)\n add_alt_goids(self.go2dcnt, _altgo2goobj)\n # print('INIT CountRelativesInit', self.relationships)\n\n def get_relationship_dicts(self):\n \"\"\"Given GO DAG relationships, return summaries per GO ID.\"\"\"\n if not self.relationships:\n return None\n for goid, goobj in self.go2obj.items():\n for reltyp, relset in goobj.relationship.items():\n relfwd_goids = set(o.id for o in relset)\n # for relfwd_goid in relfwd_goids:\n # assert relfwd_goid in self.go2obj, \"{GO} {REL} NOT FOUND {GO_R}\".format(\n # GO=goid, REL=reltyp, GO_R=relfwd_goid)\n print(\"CountRelativesInit RELLLLS\", goid, goobj.id, reltyp, relfwd_goids)\n\n def get_goone2ntletter(self, go2dcnt, depth2goobjs):\n \"\"\"Assign letters to depth-01 GO terms ordered using descendants cnt.\"\"\"\n # 1. Group level-01/depth-01 GO terms by namespace\n ns2dcntgoobj = cx.defaultdict(list)\n for goobj in depth2goobjs[1]:\n dcnt = go2dcnt[goobj.id]\n ns2dcntgoobj[goobj.namespace].append((dcnt, goobj))\n # 2. Assign letters to level-01/depth-01 GO terms\n go2nt = {}\n ntobj = cx.namedtuple(\"NtGoLetters\", \"D1 dcnt goobj\")\n _go2abc = self.go2letter\n letters = list(chain(range(ord('A'), ord('Z') + 1), range(ord('a'), ord('z') + 1)))\n for list_dcnt_goobj in ns2dcntgoobj.values():\n letter_idx = 0\n for dcnt, goobj in sorted(list_dcnt_goobj, key=lambda t: t[0], reverse=True):\n letter = chr(letters[letter_idx]) if _go2abc is None else _go2abc.get(goobj.id, '')\n go2nt[goobj.id] = ntobj._make([letter, dcnt, goobj])\n letter_idx += 1\n return go2nt\n\n @staticmethod\n def get_depth2goobjs(go2obj, max_depth=2):\n \"\"\"Init depth2goobjs using list sorted by depth, get level-00/01 GO terms.\"\"\"\n depth2goobjs = {d:list() for d in range(max_depth+1)}\n goid_seen = set()\n for _, goobj in sorted(go2obj.items(), key=lambda t: t[1].depth):\n # Save depth-00, depth-01, depth-02\n if goobj.depth > max_depth:\n break\n goid = goobj.id\n if not goobj.is_obsolete and goid not in goid_seen:\n depth2goobjs[goobj.depth].append(goobj)\n goid_seen.add(goid)\n return depth2goobjs\n\n# Copyright (C) 2016-2018, <NAME>, <NAME>, All rights reserved.\n", "id": "1694051", "language": "Python", "matching_score": 0.4492361843585968, "max_stars_count": 1, "path": "goatools/gosubdag/godag_rcnt_init.py" }, { "content": "\"\"\"Annotation Extension for relational expressions.\n\n https://link.springer.com/protocol/10.1007/978-1-4939-3743-1_17\n\n\"\"\"\n\n__copyright__ = \"Copyright (C) 2016-2018, <NAME>, <NAME>. All rights reserved.\"\n__author__ = \"<NAME>\"\n\n# pylint: disable=too-few-public-methods\nclass AnnotationExtension(object):\n \"\"\"Annotation Extension for relational expressions.\"\"\"\n\n def __init__(self, relation, entity):\n # Relationship between GO term and the entity\n self.relation = relation\n # An identifier for a database object or ontology term\n self.entity = entity\n\n def __str__(self):\n return \"{R}({E})\".format(R=self.relation, E=self.entity)\n\n\n# Copyright (C) 2016-2018, <NAME>, <NAME>. All rights reserved.\"\n", "id": "6499766", "language": "Python", "matching_score": 0.8142827153205872, "max_stars_count": 0, "path": "goatools/anno/extensions/extension.py" }, { "content": "\"\"\"Create strings representing relationships on GO Terms.\n\n +------- has 'part_of' relationship(s)\n | +-- pointed to by a GO ID with a 'part_of' relationship\n | |\n V V\nGO:0008150 L00 D00 .... .rdu biological_process\nGO:0050896 L01 D01 .... .rdu response to stimulus\nGO:0042221 L02 D02 .... p... response to chemical\nGO:0032501 L01 D01 .... .rdu multicellular organismal process\nGO:0003008 L02 D02 .... .r.. system process\nGO:0051606 L02 D02 .... .... detection of stimulus\nGO:0050877 L03 D03 .... .rdu nervous system process\nGO:0009593 L03 D03 P... .... detection of chemical stimulus\nGO:0007600 L04 D04 .... pr.. sensory perception\nGO:0050906 L03 D03 P... .... detection of stimulus involved in sensory perception\nGO:0050890 L04 D04 .... .... cognition\nGO:0050907 L04 D04 P... .... detection of chemical stimulus involved in sensory perception\nGO:0007606 L05 D05 .... p... sensory perception of chemical stimulus\nGO:0050893 L05 D05 P... .... sensory processing\nGO:0050911 L05 D05 P... .... detection of chemical stimulus involved in sensory perception of smell\nGO:0007608 L06 D06 .... p... sensory perception of smell\n\n\"\"\"\n\n__copyright__ = \"Copyright (C) 2010-2019, <NAME>, <NAME>, All rights reserved.\"\n__author__ = \"<NAME>\"\n\nfrom collections import OrderedDict\nfrom goatools.godag.consts import Consts\n\n# pylint: disable=too-few-public-methods,bad-whitespace\nclass RelationshipStr(object):\n \"\"\"Create strings representing relationships on GO Terms.\"\"\"\n\n # go-basic.obo: fmt(1.2) rel(2019-02-20) 47,177 GO Terms; optional_attrs(relationship)\n # relationship:\n # 6,882 part_of\n # 3,230 regulates\n # 2,804 negatively_regulates\n # 2,785 positively_regulates\n\n rel2chr = OrderedDict([\n ('part_of', 'P'),\n ('regulates', 'R'),\n ('negatively_regulates', 'D'),\n ('positively_regulates', 'U')])\n\n rev2chr = OrderedDict([\n ('part_of', 'p'),\n ('regulates', 'r'),\n ('negatively_regulates', 'd'),\n ('positively_regulates', 'u')])\n\n def __init__(self, relationships=None):\n self.consts = Consts()\n assert set(self.rel2chr.keys()) == self.consts.relationships\n # Ordered relationships\n _rels = relationships if relationships else set()\n self.rels = [r for r in self.consts.RELATIONSHIP_LIST if r in _rels]\n\n def str_relationships(self, goobj):\n \"\"\"Get a string representing the presence of absence of relationships. Ex: P...\"\"\"\n rel_cur = goobj.relationship\n return \"\".join([self.rel2chr.get(r, '?') if r in rel_cur else '.' for r in self.rels])\n\n def str_rel_short(self, goobj):\n \"\"\"Get a string representing the presence of absence of relationships. Ex: P\"\"\"\n if not goobj.relationship:\n return ''\n rel_cur = goobj.relationship\n return \"\".join([self.rel2chr.get(r, '?') for r in self.rels if r in rel_cur])\n\n def str_relationships_rev(self, goobj):\n \"\"\"Get a string representing the presence of absence of relationships. Ex: pr..\"\"\"\n rel_cur = goobj.relationship_rev\n return \"\".join([self.rev2chr[r] if r in rel_cur else '.' for r in self.rels])\n\n def prt_keys(self, prt, pre):\n \"\"\"Print the alias for a relationship and its alias.\"\"\"\n prt.write('{PRE}Relationship to parent: {ABC}\\n'.format(\n PRE=pre, ABC=''.join(self.rel2chr.values())))\n for rel, alias in self.rel2chr.items():\n prt.write('{PRE} {A} {DESC}\\n'.format(PRE=pre, A=alias, DESC=rel))\n prt.write('\\n{PRE}Relationship to child: {ABC}\\n'.format(\n PRE=pre, ABC=''.join(self.rev2chr.values())))\n for rel, alias in self.rev2chr.items():\n prt.write('{PRE} {A} {DESC}\\n'.format(PRE=pre, A=alias, DESC=rel))\n\n\n# Copyright (C) 2010-2019, <NAME>, <NAME>, All rights reserved.\n", "id": "4504925", "language": "Python", "matching_score": 1.84687340259552, "max_stars_count": 1, "path": "goatools/godag/relationship_str.py" }, { "content": "\"\"\"Manage optional GO-DAG attributes.\"\"\"\n\n__copyright__ = \"Copyright (C) 2015-2018, <NAME>, <NAME>, All rights reserved.\"\n__author__ = \"<NAME>\"\n\nimport re\nimport collections as cx\n\n\nclass OboOptionalAttrs(object):\n \"\"\"Manage optional GO-DAG attributes.\"\"\"\n\n attributes = set(['def', 'defn', 'synonym', 'relationship', 'xref', 'subset', 'comment'])\n\n def __init__(self, optional_attrs):\n assert optional_attrs\n self.optional_attrs = optional_attrs\n self.attr2cmp = self._init_compile_patterns(optional_attrs)\n\n def update_rec(self, rec, line):\n \"\"\"Update current GOTerm with optional record.\"\"\"\n if 'def' in self.optional_attrs and line[:5] == \"def: \":\n assert not hasattr(rec, 'defn'), \"ATTR(defn) ALREADY SET({VAL})\".format(VAL=rec.defn)\n # Use 'defn' because 'def' is a reserved word in python\n rec.defn = line[5:]\n elif 'synonym' in self.optional_attrs and line[:9] == \"synonym: \":\n rec.synonym.append(self._get_synonym(line[9:]))\n # http://geneontology.org/page/ontology-relations\n elif 'relationship' in self.optional_attrs and line[:14] == \"relationship: \":\n # relationships are stored in a dict of sets, mirroring\n # the structure implied in the GO DAG. Example:\n #\n # relationship = {\n # 'part_of': set(['GO:0021513', 'GO:0006310']),\n # 'regulates': set(['GO:0006313']),\n # 'negatively_regulates': set(['GO:0021910']),\n # 'positively_regulates': set(['GO:0006313']),\n # }\n rel, goid = line[14:].split()[:2]\n if rel not in rec.relationship:\n rec.relationship[rel] = set([goid])\n else:\n rec.relationship[rel].add(goid)\n elif 'xref' in self.optional_attrs and line[:6] == \"xref: \":\n rec.xref.add(self._get_xref(line[6:]))\n elif 'subset' in self.optional_attrs and line[:8] == \"subset: \":\n rec.subset.add(line[8:])\n elif 'comment' in self.optional_attrs and line[:9] == \"comment: \":\n rec.comment = line[9:]\n\n def init_datamembers(self, rec):\n \"\"\"Initialize current GOTerm with data members for storing optional attributes.\"\"\"\n # pylint: disable=multiple-statements\n if 'synonym' in self.optional_attrs: rec.synonym = []\n if 'xref' in self.optional_attrs: rec.xref = set()\n if 'subset' in self.optional_attrs: rec.subset = set()\n if 'comment' in self.optional_attrs: rec.comment = \"\"\n if 'relationship' in self.optional_attrs:\n rec.relationship = {}\n rec.relationship_rev = {}\n\n def _get_synonym(self, line):\n \"\"\"Given line, return optional attribute synonym value in a namedtuple.\n\n Example synonym and its storage in a namedtuple:\n synonym: \"The other white meat\" EXACT MARKETING_SLOGAN [MEAT:00324, BACONBASE:03021]\n text: \"The other white meat\"\n scope: EXACT\n typename: MARKETING_SLOGAN\n dbxrefs: set([\"MEAT:00324\", \"BACONBASE:03021\"])\n\n Example synonyms:\n \"peptidase inhibitor complex\" EXACT [GOC:bf, GOC:pr]\n \"regulation of postsynaptic cytosolic calcium levels\" EXACT syngo_official_label []\n \"tocopherol 13-hydroxylase activity\" EXACT systematic_synonym []\n \"\"\"\n mtch = self.attr2cmp['synonym'].match(line)\n text, scope, typename, dbxrefs, _ = mtch.groups()\n typename = typename.strip()\n dbxrefs = set(dbxrefs.split(', ')) if dbxrefs else set()\n return self.attr2cmp['synonym nt']._make([text, scope, typename, dbxrefs])\n\n def _get_xref(self, line):\n \"\"\"Given line, return optional attribute xref value in a dict of sets.\"\"\"\n # Ex: Wikipedia:Zygotene\n # Ex: Reactome:REACT_22295 \"Addition of a third mannose to ...\"\n mtch = self.attr2cmp['xref'].match(line)\n return mtch.group(1).replace(' ', '')\n\n @staticmethod\n def _init_compile_patterns(optional_attrs):\n \"\"\"Compile search patterns for optional attributes if needed.\"\"\"\n attr2cmp = {}\n if optional_attrs is None:\n return attr2cmp\n # \"peptidase inhibitor complex\" EXACT [GOC:bf, GOC:pr]\n # \"blood vessel formation from pre-existing blood vessels\" EXACT systematic_synonym []\n # \"mitochondrial inheritance\" EXACT []\n # \"tricarboxylate transport protein\" RELATED [] {comment=\"WIkipedia:Mitochondrial_carrier\"}\n if 'synonym' in optional_attrs:\n attr2cmp['synonym'] = re.compile(r'\"(\\S.*\\S)\" ([A-Z]+) (.*)\\[(.*)\\](.*)$')\n attr2cmp['synonym nt'] = cx.namedtuple(\"synonym\", \"text scope typename dbxrefs\")\n # Wikipedia:Zygotene\n # Reactome:REACT_27267 \"DHAP from Ery4P and PEP, Mycobacterium tuberculosis\"\n if 'xref' in optional_attrs:\n attr2cmp['xref'] = re.compile(r'^(\\S+:\\s*\\S+)\\b(.*)$')\n return attr2cmp\n\n @staticmethod\n def get_optional_attrs(optional_attrs):\n \"\"\"Prepare to store data from user-desired optional fields.\n\n Not loading these optional fields by default saves in space and speed.\n But allow the possibility for saving these fields, if the user desires,\n Including:\n comment consider def is_class_level is_metadata_tag is_transitive\n relationship replaced_by subset synonym transitive_over xref\n \"\"\"\n attrs_opt = set(['def', 'defn', 'synonym', 'relationship', 'xref', 'subset', 'comment'])\n # Required attributes are always loaded. All others are optionally loaded.\n # Allow user to specify either: 'def' or 'defn'\n # 'def' is an obo field name, but 'defn' is legal Python attribute name\n getnm = lambda aopt: aopt if aopt != \"defn\" else \"def\"\n # pylint: disable=redefined-variable-type\n opts = None\n if isinstance(optional_attrs, str) and optional_attrs in attrs_opt:\n opts = set([getnm(optional_attrs)])\n else:\n opts = set([getnm(f) for f in optional_attrs if f in attrs_opt])\n if opts:\n return opts\n\n\n# Copyright (C) 2015-2018, <NAME>, <NAME>, All rights reserved.\n", "id": "12434930", "language": "Python", "matching_score": 1.425443172454834, "max_stars_count": 1, "path": "goatools/godag/obo_optional_attributes.py" }, { "content": "\"\"\"Manage evidence codes as reported by the Gene Ontology Consortium.\"\"\"\n\n__copyright__ = \"Copyright (C) 2016-2019, <NAME>, <NAME>. All rights reserved.\"\n__author__ = \"<NAME>\"\n\nimport sys\nimport collections as cx\n\n\n# pylint: disable=line-too-long\nclass EvidenceCodes(object):\n \"\"\"From http://geneontology.org/page/guide-go-evidence-codes\"\"\"\n # gocwiki.geneontology.org/index.php/Evidence_Code_Ontology_%28ECO%29\n\n ntobj = cx.namedtuple(\"NtCode\", \"eco group name\")\n\n code2name = cx.OrderedDict([\n # Experimental Evidence codes:\n (\"EXP\", ntobj._make([\"ECO:0000269\", \"Experimental\", \"Inferred from Experiment\"])),\n (\"IDA\", ntobj._make([\"ECO:0000314\", \"Experimental\", \"Inferred from Direct Assay\"])),\n (\"IPI\", ntobj._make([\"ECO:0000353\", \"Experimental\", \"Inferred from Physical Interaction\"])),\n (\"IMP\", ntobj._make([\"ECO:0000315\", \"Experimental\", \"Inferred from Mutant Phenotype\"])),\n (\"IGI\", ntobj._make([\"ECO:0000316\", \"Experimental\", \"Inferred from Genetic Interaction\"])),\n (\"IEP\", ntobj._make([\"ECO:0000270\", \"Experimental\", \"Inferred from Expression Pattern\"])),\n\n # Similarity evidence codes\n (\"ISS\", ntobj._make([\"ECO:0000250\", \"Similarity\", \"Inferred from Sequence or structural Similarity\"])),\n (\"ISO\", ntobj._make([\"ECO:0000266\", \"Similarity\", \"Inferred from Sequence Orthology\"])),\n (\"ISA\", ntobj._make([\"ECO:0000247\", \"Similarity\", \"Inferred from Sequence Alignment\"])),\n (\"ISM\", ntobj._make([\"ECO:0000255\", \"Similarity\", \"Inferred from Sequence Model used in manual assertion\"])),\n (\"IGC\", ntobj._make([\"ECO:0000317\", \"Similarity\", \"Inferred from Genomic Context\"])),\n (\"IBA\", ntobj._make([\"ECO:0000318\", \"Similarity\", \"Inferred from Biological aspect of Ancestor\"])),\n (\"IBD\", ntobj._make([\"ECO:0000319\", \"Similarity\", \"Inferred from Biological aspect of Descendant\"])),\n (\"IKR\", ntobj._make([\"ECO:0000320\", \"Similarity\", \"Inferred from phylogenetic determination of loss of key residues (manual assertion)\"])),\n (\"IRD\", ntobj._make([\"ECO:0000321\", \"Similarity\", \"Inferred from Rapid Divergence from ancestral sequence (manual assertion)\"])),\n (\"IMR\", ntobj._make([\"ECO:0000320\", \"Similarity\", \"Phylogenetic determination of loss of key residues in manual assertion\"])),\n\n # Combinatorial evidence codes\n (\"RCA\", ntobj._make([\"ECO:0000245\", \"Combinatorial\", \"Inferred from Reviewed Computational Analysis\"])),\n\n # High Throughput Experimental evidence codes\n (\"HTP\", ntobj._make([\"ECO:0006056\", \"High_Throughput\", \"Inferred from High Throughput Experimental\"])),\n (\"HDA\", ntobj._make([\"ECO:0007005\", \"High_Throughput\", \"Inferred from High Throughput Direct Assay\"])),\n (\"HMP\", ntobj._make([\"ECO:0007001\", \"High_Throughput\", \"Inferred from High Throughput Mutant Phenotype\"])),\n (\"HGI\", ntobj._make([\"ECO:0007003\", \"High_Throughput\", \"Inferred from High Throughput Genetic Interaction\"])),\n (\"HEP\", ntobj._make([\"ECO:0007007\", \"High_Throughput\", \"Inferred from High Throughput Expression Pattern\"])),\n\n # Author Statement evidence codes\n (\"TAS\", ntobj._make([\"ECO:0000304\", \"Author\", \"Traceable Author Statement used in manual assertion\"])),\n (\"NAS\", ntobj._make([\"ECO:0000303\", \"Author\", \"Non-traceable Author Statement used in manual assertion\"])),\n\n # Curator Inference\n (\"IC\", ntobj._make([\"ECO:0000305\", \"Curatorial\", \"Inferred by Curator\"])),\n\n # No Biological Data\n (\"ND\", ntobj._make([\"ECO:0000307\", \"No biological data\", \"No biological Data available\"])),\n\n # Automatic Assertion\n (\"IEA\", ntobj._make([\"ECO:0000501\", \"Automatic\", \"Inferred from Electronic Annotation\"]))])\n\n def __init__(self):\n self.ev2idx = {ev:i for i, ev in enumerate(self.code2name.keys())}\n\n def sort_nts(self, nt_list, codekey):\n \"\"\"Sort list of namedtuples such so evidence codes in same order as code2name.\"\"\"\n # Problem is that some members in the nt_list do NOT have\n # codekey=EvidenceCode, then it returns None, which breaks py34 and 35\n # The fix here is that for these members, default to -1 (is this valid?)\n sortby = lambda nt: self.ev2idx.get(getattr(nt, codekey), -1)\n return sorted(nt_list, key=sortby)\n\n def get_grp_name(self, code):\n \"\"\"Return group and name for an evidence code.\"\"\"\n nt_code = self.code2name.get(code, None)\n if nt_code is not None:\n return nt_code.group, nt_code.name\n return \"\", \"\"\n\n def prt_ev_cnts(self, ctr, prt=sys.stdout):\n \"\"\"Prints evidence code counts stored in a collections Counter.\"\"\"\n for key, cnt in ctr.most_common():\n grp, name = self.get_grp_name(key.replace(\"NOT \", \"\"))\n prt.write(\"{CNT:7,} {EV:>7} {GROUP:<13} {NAME}\\n\".format(\n CNT=cnt, EV=key, GROUP=grp, NAME=name))\n\n def get_order(self, codes):\n \"\"\"Return evidence codes in order shown in cod2name.\"\"\"\n return sorted(codes, key=lambda e: [self.ev2idx.get(e)])\n\n# Copyright (C) 2016-2019, <NAME>, <NAME>. All rights reserved.\"\n", "id": "11749737", "language": "Python", "matching_score": 1.622402548789978, "max_stars_count": 0, "path": "goatools/evidence_codes.py" }, { "content": "#!/usr/bin/env python\n\"\"\"Test TermCounts object used in Resnik and Lin similarity calculations.\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport timeit\nimport datetime\nfrom goatools.base import get_godag\nfrom goatools.associations import dnld_assc\nfrom goatools.semantic import TermCounts\nfrom goatools.semantic import get_info_content\nfrom goatools.test_data.gafs import ASSOCIATIONS\n\nTIC = timeit.default_timer()\n\ndef test_semantic_similarity(usr_assc=None):\n \"\"\"Computing basic semantic similarities between GO terms.\"\"\"\n not_these = {'goa_uniprot_all.gaf', 'goa_uniprot_all_noiea.gaf'}\n associations = sorted(ASSOCIATIONS.difference(not_these))\n go2obj = get_go2obj()\n # goids = go2obj.keys()\n # http://current.geneontology.org/annotations/\n if usr_assc is not None:\n associations = [usr_assc]\n cwd = os.getcwd()\n for assc_name in associations: # Limit test numbers for speed\n tic = timeit.default_timer()\n # Get all the annotations from arabidopsis.\n assc_gene2gos = dnld_assc(os.path.join(cwd, assc_name), go2obj, prt=sys.stdout)\n\n # Calculate the information content of the single term, GO:0048364\n # \"Information content (GO:0048364) = 7.75481392334\n\n # First get the counts of each GO term.\n termcounts = TermCounts(go2obj, assc_gene2gos)\n go_cnt = termcounts.gocnts.most_common()\n #print termcounts.gocnts.most_common()\n\n if go_cnt:\n print(\"{ASSC}\".format(ASSC=assc_name))\n print(sorted(termcounts.aspect_counts.most_common()))\n gocnt_max = go_cnt[0][1]\n prt_info(termcounts, go_cnt, None)\n prt_info(termcounts, go_cnt, gocnt_max/2.0)\n prt_info(termcounts, go_cnt, gocnt_max/10.0)\n print(\"{HMS} {hms} {ASSC}\\n\".format(ASSC=assc_name, HMS=_hms(TIC), hms=_hms(tic)))\n print('{HMS} {N} Associations'.format(HMS=_hms(TIC), N=len(associations)))\n\ndef _hms(tic):\n \"\"\"Get Timing.\"\"\"\n return '{HMS}'.format(HMS=str(datetime.timedelta(seconds=(timeit.default_timer()-tic))))\n\ndef prt_info(termcounts, go_cnt, max_val):\n \"\"\"Print the information content of a frequently used GO ID.\"\"\"\n go_id, cnt = get_goid(go_cnt, max_val)\n infocontent = get_info_content(go_id, termcounts)\n msg = 'Information content ({GO} {CNT:7,}) = {INFO:8.6f} {NAME}'\n print(msg.format(GO=go_id, CNT=cnt, INFO=infocontent, NAME=termcounts.go2obj[go_id].name))\n\ndef get_goid(go_cnt, max_val):\n \"\"\"Get frequently used GO ID.\"\"\"\n if max_val is not None:\n for goid, cnt in go_cnt:\n if cnt < max_val:\n return goid, cnt\n return go_cnt[-1][0], go_cnt[-1][1]\n return go_cnt[0][0], go_cnt[0][1]\n\ndef get_go2obj():\n \"\"\"Read GODag and return go2obj.\"\"\"\n godag = get_godag(os.path.join(os.getcwd(), \"go-basic.obo\"), loading_bar=None)\n return {go:o for go, o in godag.items() if not o.is_obsolete}\n\nif __name__ == '__main__':\n ASSC_NAME = None if len(sys.argv) == 1 else sys.argv[1]\n test_semantic_similarity(ASSC_NAME)\n", "id": "160277", "language": "Python", "matching_score": 1.4698309898376465, "max_stars_count": 0, "path": "tests/test_termcounts_asscs.py" }, { "content": "\"\"\"Download GOA files from the Gene Ontology Annotation (GOA) resource http://www.ebi.ac.uk/GOA.\"\"\"\n\n__copyright__ = \"Copyright (C) 2016-2018, <NAME>, <NAME>. All rights reserved.\"\n__author__ = \"<NAME>\"\n\nimport os\nimport sys\nfrom goatools.base import dnld_file\n\nclass DnldGoa(object):\n \"\"\"Download files from the Gene Ontology Annotation (GOA) resource http://www.ebi.ac.uk/GOA.\"\"\"\n\n # European Bioinformatics Institute (EMBL-EBI) ftp site\n ftp_pub = 'ftp://ftp.ebi.ac.uk/pub/'\n\n # Species available from ftp://ftp.ebi.ac.uk/pub/databases/GO/goa/\n species = [\n 'arabidopsis',\n 'chicken',\n 'cow',\n 'dicty',\n 'dog',\n 'fly',\n 'human',\n 'mouse',\n #'pdb',\n 'pig',\n 'rat',\n 'uniprot',\n 'worm',\n 'yeast',\n 'zebrafish',\n ]\n\n species_items = ['complex', 'isoform', 'rna']\n exts = ['gaf', 'gpa', 'gpi']\n\n def __init__(self):\n self.ftp_src_goa = os.path.join(self.ftp_pub, 'databases/GO/goa/')\n\n def dnld_goa(self, species, ext='gaf', item=None, fileout=None):\n \"\"\"Download GOA source file name on EMBL-EBI ftp server.\"\"\"\n basename = self.get_basename(species, ext, item)\n src = os.path.join(self.ftp_src_goa, species.upper(), \"{F}.gz\".format(F=basename))\n dst = os.path.join(os.getcwd(), basename) if fileout is None else fileout\n dnld_file(src, dst, prt=sys.stdout, loading_bar=None)\n return dst\n\n def get_basename(self, species, ext='gaf', item=None):\n \"\"\"Get GOA basename for a specific species. Ex: goa_human.gaf\"\"\"\n assert ext in self.exts, \" \".join(self.exts)\n if species == 'uniprot':\n species = 'uniprot_all' if item != 'gcrp' else 'uniprot_gcrp'\n if item is None:\n return 'goa_{SPECIES}.{EXT}'.format(SPECIES=species, EXT=ext)\n assert item in self.species_items\n return 'goa_{SPECIES}_{ITEM}.{EXT}'.format(SPECIES=species, ITEM=item, EXT=ext)\n\n\n# Copyright (C) 2016-2018, <NAME>, <NAME>. All rights reserved.\"\n", "id": "5031517", "language": "Python", "matching_score": 1.0546331405639648, "max_stars_count": 1, "path": "goatools/anno/dnld_ebi_goa.py" }, { "content": "\"\"\"Find human genes related to cell cycle.\"\"\"\n\nimport sys\nimport os\nimport re\nfrom collections import defaultdict\nfrom goatools.base import download_go_basic_obo\nfrom goatools.go_search import GoSearch\nfrom goatools.associations import get_assoc_ncbi_taxids\nfrom goatools.wr_tbl import prt_txt\n\n__copyright__ = \"Copyright (C) 2010-2018, <NAME>, <NAME>, All rights reserved.\"\n__author__ = \"<NAME>\"\n\ndef test_cell_cycle(taxid=9606, log=sys.stdout):\n \"\"\"Get all genes related to cell cycle. Write results to file.\"\"\"\n geneids = get_genes_cell_cycle(taxid, log)\n fout = \"cell_cycle_genes_{TAXID}.txt\".format(TAXID=taxid)\n prt_genes(fout, geneids, taxid, log)\n\ndef get_genes_cell_cycle(taxid=9606, log=sys.stdout):\n \"\"\"Test GOEA with local multipletest correction methods for cell cycle.\"\"\"\n # Download ontologies and annotations, if necessary\n fin_go_obo = os.path.join(os.getcwd(), \"go-basic.obo\")\n download_go_basic_obo(fin_go_obo, loading_bar=None)\n # Because get_assoc_ncbi_taxids returns id2gos, we will opt to\n # use the (optional) multi-level dictionary separate associations by taxid\n # taxid2asscs contains both GO2GeneIDs and GeneID2GOs.\n taxid2asscs = defaultdict(lambda: defaultdict(lambda: defaultdict(set)))\n get_assoc_ncbi_taxids([taxid], taxid2asscs=taxid2asscs, loading_bar=None)\n\n # Initialize GO-search helper object with obo and annotations(go2items)\n srch = GoSearch(fin_go_obo, go2items=taxid2asscs[taxid]['GO2GeneIDs'])\n # Compile search pattern for 'cell cycle'\n cell_cycle = re.compile(r'cell cycle', flags=re.IGNORECASE)\n # Find ALL GOs that have 'cell cycle'. Store results in file.\n fout_allgos = \"cell_cycle_gos_{TAXID}.log\".format(TAXID=taxid)\n with open(fout_allgos, \"w\") as prt:\n # Search for 'cell cycle' in GO terms\n gos_cc_all = srch.get_matching_gos(cell_cycle, prt=prt)\n # Researcher carefully reviews GO results and finds GO:0005764(lysosome)\n # in the results when it should not be because the match was found:\n # cell cycle-independent\n # Researcher removes 'lysosome' from 'cell cycle' results\n # by removing any GOs matching 'cell cycle-independent'\n cell_cycle_ind = re.compile(r'cell cycle.independent', flags=re.IGNORECASE)\n gos_no_cc = srch.get_matching_gos(cell_cycle_ind, gos=gos_cc_all, prt=prt)\n gos = gos_cc_all.difference(gos_no_cc)\n # Add children GOs of cell cycle GOs\n gos_all = srch.add_children_gos(gos)\n if log is not None:\n log.write(' taxid {TAXID:>5}\\n'.format(TAXID=taxid))\n log.write(' FOUND {N:>5} GOs: {F}\\n'.format(\n N=len(gos_all), F=fout_allgos))\n # Get Entrez GeneIDs for cell cycle GOs\n geneids = srch.get_items(gos_all)\n return geneids\n\ndef prt_genes(fout_genes, geneids, taxid, log):\n \"\"\"Print 'cell cycle' geneids, with or without Symbol and description information.\"\"\"\n fin_symbols = \"genes_NCBI_{TAXID}_All.py\".format(TAXID=taxid)\n # If gene Symbol information is available, print geneid and Symbol\n if os.path.isfile(fin_symbols):\n import importlib\n module_name = \"\".join([\"goatools.test_data.\", fin_symbols[:-3]])\n module = importlib.import_module(module_name)\n geneid2nt = module.GENEID2NT\n fmtstr = \"{GeneID:>9} {Symbol:<16} {description}\\n\"\n nts = [geneid2nt[geneid] for geneid in sorted(geneids) if geneid in geneid2nt]\n with open(fout_genes, 'w') as prt:\n prt_txt(prt, nts, fmtstr)\n if log is not None:\n log.write(\" WROTE {N:>5} genes: {FOUT}\\n\".format(FOUT=fout_genes, N=len(nts)))\n # Just print geneids\n else:\n with open(fout_genes, 'w') as prt:\n for geneid in geneids:\n prt.write(\"{geneid}\\n\".format(geneid=geneid))\n if log is not None:\n log.write(\" WROTE {N:>5} genes: {FOUT}\\n\".format(\n FOUT=fout_genes, N=len(geneids)))\n\nif __name__ == '__main__':\n test_cell_cycle(9606)\n test_cell_cycle(10090)\n\n# Copyright (C) 2010-2018, <NAME>, <NAME>, All rights reserved.\n", "id": "6113629", "language": "Python", "matching_score": 2.4200615882873535, "max_stars_count": 0, "path": "tests/test_genes_cell_cycle.py" }, { "content": "#!/usr/bin/env python\n\"\"\"Test propagate_counts up relationships as well as parent-child links.\"\"\"\n\nimport sys\nimport os\n# from itertools import combinations\n# import collections as cx\n\nfrom goatools.go_enrichment import GOEnrichmentStudy\nfrom goatools.base import get_godag\nfrom goatools.test_data.genes_NCBI_10090_ProteinCoding import GENEID2NT as GeneID2nt_mus\nfrom goatools.test_data.nature3102_goea import get_geneid2symbol\nfrom goatools.associations import get_assoc_ncbi_taxids\n\nREPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\")\n\ndef test_pc_w_rels(prt=sys.stdout):\n \"\"\"Test P-value calculations.\"\"\"\n file_obo = os.path.join(REPO, \"go-basic.obo\")\n godag_r0 = get_godag(file_obo, prt, loading_bar=None)\n godag_r1 = get_godag(file_obo, prt, loading_bar=None, optional_attrs=['relationship'])\n results_r0 = _get_results(godag_r1, propagate_counts=True, relationships=False, prt=prt)\n results_r1 = _get_results(godag_r1, propagate_counts=True, relationships=True, prt=prt)\n _chk_results(results_r0, results_r1, prt)\n\ndef _chk_results(results_r0, results_r1, prt):\n \"\"\"Test propagate_counts up relationships as well as parent-child links.\"\"\"\n prt.write('TBD: Compare results')\n pass\n\ndef _get_results(godag, propagate_counts, relationships, prt=sys.stdout):\n \"\"\"Run a GOEA. Return results\"\"\"\n taxid = 10090 # Mouse study\n geneids_pop = set(GeneID2nt_mus.keys())\n assoc_geneid2gos = get_assoc_ncbi_taxids([taxid], loading_bar=None)\n geneids_study = get_geneid2symbol(\"nbt.3102-S4_GeneIDs.xlsx\")\n goeaobj = GOEnrichmentStudy(\n geneids_pop,\n assoc_geneid2gos,\n godag,\n propagate_counts=propagate_counts,\n relationships=relationships,\n alpha=0.05,\n methods=['fdr_bh'])\n return goeaobj.run_study(geneids_study, prt=prt)\n\n\nif __name__ == '__main__':\n test_pc_w_rels()\n", "id": "5890454", "language": "Python", "matching_score": 2.1306633949279785, "max_stars_count": 1, "path": "tests/test_propagate_counts_w_relationships.py" }, { "content": "\"\"\"Test zipping lists whose elements are namedtuples or class objects.\"\"\"\n\nimport os\nimport collections as cx\nfrom goatools.nt_utils import combine_nt_lists\nfrom goatools.obo_parser import GODag\nfrom goatools.go_enrichment import GOEnrichmentStudy\nfrom goatools.associations import read_associations\nfrom goatools.rpt.goea_nt_xfrm import get_goea_nts_prt\n\ndef test_combine_nt_lists():\n \"\"\"Test combining lists whose elements are namedtuples or class objects.\"\"\"\n ntobj = cx.namedtuple(\"Nt\", \"idx\")\n goea_results = get_goea_results()\n # Zip a list of namedtuples and another list of namedtuples\n goea_nts = get_goea_nts_prt(goea_results)\n lst2_nts = [ntobj._make([i]) for i in range(len(goea_nts))]\n # Combine lists into a single list whose elements are a namedtuple\n flds = lst2_nts[0]._fields + goea_nts[0]._fields\n lst_all = combine_nt_lists([lst2_nts, goea_nts], flds)\n assert lst_all[0]._fields == lst2_nts[0]._fields + goea_nts[0]._fields\n # Combine list contains a subset of namedtuple fields\n hdrs = ['idx', 'NS', 'level', 'depth', 'GO',\n 'study_count', 'study_n', 'pop_count', 'pop_n', 'p_fdr_bh', 'name']\n lst_sub = combine_nt_lists([lst2_nts, goea_nts], hdrs)\n assert list(lst_sub[0]._fields) == hdrs, \"{F} {H}\".format(F=lst_sub[0]._fields, H=hdrs)\n\n\ndef get_goea_results(method=\"fdr_bh\"):\n \"\"\"Get GOEA results.\"\"\"\n root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"data\")\n obo_fin = os.path.join(root_dir, \"goslim_generic.obo\")\n obo_dag = GODag(obo_fin)\n assoc = read_associations(os.path.join(root_dir, \"slim_association\"), no_top=True)\n popul_ids = [line.rstrip() for line in open(os.path.join(root_dir, \"small_population\"))]\n goeaobj = GOEnrichmentStudy(popul_ids, assoc, obo_dag, methods=[method])\n study_ids = [line.rstrip() for line in open(os.path.join(root_dir, \"small_study\"))]\n goea_results = goeaobj.run_study(study_ids, methods=[method])\n return goea_results\n\nif __name__ == '__main__':\n test_combine_nt_lists()\n", "id": "12459003", "language": "Python", "matching_score": 1.9157944917678833, "max_stars_count": 0, "path": "tests/test_combine_nt_lists.py" }, { "content": "#!/usr/bin/env python\n\"\"\"Test working with DAVID results in DAVID chart files.\"\"\"\n\nfrom __future__ import print_function\n\n__copyright__ = \"Copyright (C) 2016-2017, <NAME>, <NAME>, All rights reserved.\"\n__author__ = \"<NAME>\"\n\nimport os\nimport collections as cx\nimport timeit\nfrom goatools.parsers.david_chart import DavidChartReader\nfrom goatools.test_data.godag_timed import prt_hms\n\n\ndef test_david_chart():\n \"\"\"Read in a small obo, print list of GO terms and plot.\"\"\"\n repo = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\")\n david_dir = \"{REPO}/data/gjoneska_pfenning\".format(REPO=repo)\n ntobj = cx.namedtuple(\"david6p8\", \"TOTAL FDR Bonferroni Benjamini PValue\")\n # pylint: disable=bad-whitespace\n fin2exp = {\n \"david_chart6p8_Consistent_Decrease.txt\": ntobj._make([ 1773, 259, 249, 432, 1316]),\n \"david_chart6p8_Transient_Decrease.txt\": ntobj._make([ 423, 0, 2, 2, 246]),\n \"david_chart6p8_Consistent_Increase.txt\": ntobj._make([ 2359, 353, 308, 781, 1868]),\n \"david_chart6p8_Transient_Increase.txt\": ntobj._make([ 2191, 658, 652, 1105, 1786]),\n \"david_chart6p8_Late_Decrease.txt\": ntobj._make([ 2752, 591, 568, 1153, 2187]),\n \"david_chart6p8_Late_Increase.txt\": ntobj._make([ 4597, 708, 616, 1715, 3603]),\n }\n tic = timeit.default_timer()\n fin2obj = {f:DavidChartReader(os.path.join(david_dir, f)) for f in fin2exp.keys()}\n prt_hms(tic, \"Created DavidChartReader objects\")\n for fin, obj in fin2obj.items():\n ntexp = fin2exp[fin]\n assert ntexp.TOTAL == len(obj.nts)\n obj.prt_num_sig()\n ctr = obj.get_num_sig()\n for fld, cnt_actual in ctr.most_common():\n assert cnt_actual == getattr(ntexp, fld), \"{FIN}: {FLD} Act({ACT}) Exp({EXP})\".format(\n FIN=fin, FLD=fld, ACT=cnt_actual, EXP=getattr(ntexp, fld))\n\n\nif __name__ == '__main__':\n test_david_chart()\n\n# Copyright (C) 2016-2017, <NAME>, <NAME>, All rights reserved.\n", "id": "7476139", "language": "Python", "matching_score": 1.4609910249710083, "max_stars_count": 1, "path": "tests/test_david_nts.py" }, { "content": "\"\"\"\nData conversion script for Illumina HiSeq short read runs\n\nSet up to be routinely executed by a cron job at qhatever frequency required\nby your organization's level of throughput\n\nThe script will check the output RTA directory of any unconverted Illumina \nHiSeq run. A file in the conversion output directory has the history of all \npreviously converted runs.\nIf the run is found to be new then two files that signify RTA transfer is\n complete are checked. Once all data is deemed transfered, a shell script is\ngenerated in the user's tmp directory. This shell script is submit to a cluster\nvia qsub (open Grid Scheduler) and monitored for successful return status.\n\nThe shell script adds to the path the required bcl2fastq script, change dir to\nthe raw data dir, and then executes the bcl2fastq program. Parameters can be \nchanged by modifying the cmd in this script.\n\nUpon successful return status, the name of the directory is added to the history\nfile so that upon next invocation of this monitor script there is no redundant\ncomputing.\n\nA log file with any HiSeq runs sent to the grid during each time the script is\nexecuted also gets updated.\n\nCrontab:\n# run hiseq monitor every 30 minutes to convert bcl2fastq\n*/30 * * * * /usr/bin/python /path/to/hiseq_monitor.py\n\n\"\"\"\n\nimport os\nimport sys\nimport subprocess\nimport datetime\n\nHISEQ_DIR = \"/path/out/raw/HiSeq/\"\nHISEQ_DATA_DIR = \"/path/to/hiseq_fastq/\"\nanalyzed_file = HISEQ_DATA_DIR + \"hiseq_complete.txt\"\n\nanalyzed = []\n\nwith open(analyzed_file, 'r') as h:\n\tanalyzed = h.readlines()\n\tanalyzed = [x.rstrip() for x in analyzed]\n\nhiseq_files = os.listdir(HISEQ_DIR)\nfastq_files = [x for x in analyzed]\n\nfor hiseq_file in hiseq_files: \n\tif hiseq_file.startswith(\".\"):\n\t\thiseq_files.remove(hiseq_file)\n\nfor hiseq_file in fastq_files:\n\tif hiseq_file.startswith(\".\"):\n \tfastq_files.remove(hiseq_file)\n\nstatus = 1\nfor hiseq_file in hiseq_files:\n\tif hiseq_file not in fastq_files:\n\t\tcheck_1, check_2 = False, False\n\t\tfor f in os.listdir(HISEQ_DIR + hiseq_file):\n\t\t\tif f == 'Basecalling_Netcopy_complete.txt':\n\t\t\t\tcheck_1 = True\n\t\t\tif f == 'ImageAnalysis_Netcopy_complete.txt':\n\t\t\t\tcheck_2 = True\n\t\tif check_1 and check_2:\n\t\t\tprint(hiseq_file)\n\t\t\tconversion_file = hiseq_file\n\t\t\ttmpdir = \"/hptmp/acristo/\"\n\t\t\tcmd = \"/add_to_path .bcl2fastq2-2.17.1.14\\n\"\n\t\t\tcmd += \"cd {hs}\".format(hs = HISEQ_DIR) + hiseq_file + \"/\\n\"\n\t\t\tcmd += \"bcl2fastq --with-failed-reads -o {hs}{fastq_dir} --sample-sheet {raw}{in_dir}/Samplesheet.csv --input-dir {raw}{in_dir} -p 8 -d 8\".format(hs = HISEQ_DIR, raw = HISEQ_DATA_DIR, fastq_dir = hiseq_file, in_dir = hiseq_file)\n\t\t\twith open(tmpdir + \"{in_dir}.sh\".format(in_dir = hiseq_file), 'w') as op:\n\t\t\t\top.write(cmd)\n\t\t\tsubcmd = \"qsub -sync y -q short -o \" + tmpdir + \" -e \" + tmpdir + \" \" + tmpdir + hiseq_file +\".sh\"\n\t\t\tstatus = subprocess.call(subcmd.split())\n\t\t\tsubprocess.call(\"mkdir \" + HISEQ_DATA_DIR + \"{fastq_dir}/logs/\".format(fastq_dir = hiseq_file))\n\t\t\tprint(status)\n\t\tif status == 0:\n\t\t\twith open(analyzed_file, 'a') as af:\n\t\t\t\taf.write(conversion_file + \"\\n\")\n\telse:\n\t\tcontinue\n\nmonitor_log = HISEQ_DATA_DIR + \"hiseq_monitor.log\"\nwith open(monitor_log, 'a') as m: \n\ttime = str(datetime.datetime.now())\n\tif status == 0:\n\t\tm.write(\"Did conversion on {file} at \".format(file = conversion_file) + time + \"\\n\")\n\telse:\n\t\tm.write(\"Found no new HiSeq run at \" + time + \"\\n\")\n", "id": "5185680", "language": "Python", "matching_score": 0.9638235569000244, "max_stars_count": 0, "path": "hiseq_monitor.py" }, { "content": "\"\"\"\nThis module looks at genbank features for coding sequences 'CDS' and then\noutputs the gene name, orientation, and sequence in nucleotides per line. \nInfile searching is done utilizing the Bio python imports SeqIO and Seq.\n\nFirst look up all features with Biopython, finding CDS, then get the location.\nThe orientation will determine the sequence as pulled from the below fasta and,\nif complementary, the sequence will be complemented.\n\npython fasta_cds_genbank.py GU19504.gb > genes_seq.txt\n\"\"\"\n\nimport sys\nimport re\nfrom Bio import SeqIO, Seq \n\ndat = SeqIO.read(open(sys.argv[1]), \"genbank\") \ngenes = {}\nfor feat in dat.features:\n if feat.type == 'CDS':\n for line in str(feat).split(\"\\n\"):\n if \"location\" in line:\n loc = re.findall(\"[0-9]*:[0-9]*\", line)\n loc = [x for x in loc if len(x) > 1][0]\n ori = re.findall(\"\\(.\\)\", line)[0].strip(\"(\").rstrip(\")\")\n pseq = feat.qualifiers['translation'][0]\n genes[feat.qualifiers['gene'][0]] = [pseq, loc, ori]\n\nfor gene in genes:\n seq = ''\n if genes[gene][2] == \"+\":\n start, end = genes[gene][1].split(\":\")\n seq = dat.seq[int(start):int(end)]\n else:\n start, end = genes[gene][1].split(\":\")\n seq = dat.seq[int(start):int(end)]\n seq = Seq.reverse_complement(seq)\n genes[gene].append(seq)\n print(gene, genes[gene][2], str(genes[gene][3]))\n", "id": "4100062", "language": "Python", "matching_score": 1.0266841650009155, "max_stars_count": 0, "path": "fasta_cds_gbk.py" }, { "content": "\"\"\"\nModule to check whether TypeIIs end-sites are compatible. Looks for\n3bp homology then 2 basepair edge-homology for each input sequence given.\nComparisons are made between each element in the list and to the reverse\ncomplement of each element. Repeat elements are also validated along with\nnoncanonical basepairs.\n\nExample:\n python2.7 type2_endcheck.py \"GAGG, GAGG, TACT, GACT, PAPP\"\n\"\"\"\nimport sys\nfrom string import maketrans\nfrom collections import Counter\n\n\ndef main(ends_list):\n ends_list = ends_list.split(\",\")\n ends_list = [x.strip(\" \") for x in ends_list]\n ncs = 'ATGC'\n silly_list = []\n for end in ends_list:\n for c in end:\n if c not in ncs:\n ter = ends_list.index(end)\n silly_list = ends_list.pop(ter)\n break\n notgood = False\n sim_list = set([])\n rc_list = [revcomplement(x) for x in ends_list]\n counts = Counter(ends_list)\n self_list = set([])\n # Check list for repeats\n for c, n in counts.items():\n if n >= 2:\n notgood = True\n self_list.add((c))\n for x in ends_list:\n # Validate no ends share homology to each other\n for g in ends_list:\n if g != x:\n score = align(x, g)\n if score >= 3:\n notgood = True\n sim_list.add((x, g))\n # Validate no reverse complements are equivalent to entry list\n if x in rc_list:\n notgood = True\n idx = rc_list.index(x)\n sim_list.add((x, rc_list[idx]))\n # Validate no ends share 3 max homology & 2bp edge homology of revers complement list\n for h in rc_list:\n revscore = align(x, h)\n if revscore >= 3:\n rrevset = [h, reverse_region(h)]\n for p in rrevset:\n rpositionscore = align(x[:2], p[:2])\n if rpositionscore == 2:\n notgood = True\n sim_list.add((x, p))\n if not notgood:\n print('Good to go!!!')\n if silly_list:\n print 'Bad entry: ', silly_list\n else:\n print('Not good!')\n if silly_list:\n print 'Bad entry: ', silly_list\n for x in sim_list:\n print 'Entry: ' + str(x[0]) + ' > (' + revcomplement(x[0]) + ') : ' + revcomplement(x[1]) + ' > (' + \\\n reverse_region(x[1]) + ')'\n for x in self_list:\n print 'Entry: ' + x + ' appeared more than once'\n\n\ndef revcomplement(seq):\n \"\"\"\n A quick reverse-complement routine that understands\n IUPAC ambiguity codes, and preserves case.\n \"\"\"\n revcompTBL = maketrans('AGCTagctWSKMYRnN', 'TCGAtcgaWSMKTYnN')\n _t = list(seq.translate(revcompTBL))\n _t.reverse()\n rc = ''.join(_t)\n return rc\n\n\ndef reverse_region(region):\n return region[::-1]\n\n\ndef align(end, rcend):\n pairs = zip(end, rcend)\n match_score = 0\n for a, b in pairs:\n if a == b:\n match_score += 1\n return match_score\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n", "id": "4003715", "language": "Python", "matching_score": 0.4288734197616577, "max_stars_count": 0, "path": "type2_endcheck.py" }, { "content": "\"\"\"Reads an NCBI Gene tsv file.\"\"\"\n\nfrom __future__ import print_function\n\nimport sys\nimport re\nfrom collections import namedtuple\nfrom collections import OrderedDict\n\n__copyright__ = \"Copyright (C) 2016-2018, <NAME>, <NAME>, All rights reserved.\"\n__author__ = \"<NAME>\"\n\n\n#pylint: disable=line-too-long,too-many-instance-attributes,unnecessary-lambda\nclass NCBIgeneFileReader(object):\n \"\"\"Reads an NCBI Gene tsv file.\n\n Generate the NCBI gene file by following these steps:\n 1) Open a browser at: https://www.ncbi.nlm.nih.gov/gene\n 2) Type search text. Example:\n genetype protein coding[Properties] AND \"3702\"[Taxonomy ID] AND alive[property]\n 3) Press the \"Search\" button.\n 4) From the pull down menu: \"Send to\" -> File\n \"\"\"\n\n # ints=None, floats=None, hdr_ex=None, log=sys.stdout):\n #def __init__(self, sep, ints, floats, hdr_ex, log):\n def __init__(self, fin, sep=\"\\t\", **kwargs_dict):\n self.log = kwargs_dict.get('log', sys.stdout)\n self.int_hdrs = [\n 'tax_id', 'GeneID', 'CurrentID', # NCBI Gene\n 'start_position_on_the_genomic_accession', # NCBI Gene\n 'end_position_on_the_genomic_accession', # NCBI Gene\n 'exon_count', # NCBI Gene\n 'OMIM', # NCBI Gene\n 'Start', 'start', 'End', 'end', # Cluster\n 'Len', 'len', 'Length', 'length', # cluster\n 'Qty', 'qty', '# Genes'] # Cluster\n if 'ints' in kwargs_dict:\n ints = kwargs_dict['ints']\n if len(ints) != 0:\n self.int_hdrs.extend(ints)\n else:\n self.int_hdrs = []\n self.float_hdrs = ['Density', 'density', 'MinDensity'] # Cluster\n # These are formated for expected sorting: eg. Chr \"09\", \"10\"\n self.strpat_hdrs = {'Chr':'{:>2}', 'chromosome':'{:>2}'}\n if 'floats' in kwargs_dict:\n self.float_hdrs.extend(kwargs_dict['floats'])\n self.idxs_float = [] # run() inits proper values\n self.idxs_int = [] # run() inits proper values\n self.idxs_strpat = [] # run() inits proper values\n # Data Members used by all functions\n self.fin = fin\n self.hdr2idx = None\n self.len = 0\n self.sep = self._get_sep(fin, sep)\n self.hdr_ex = kwargs_dict.get('hdr_ex', None)\n # Data Members used by various functions\n self.ret_list = [] # tbl2list\n self.hdrs_usr = [] # tbl2sublist tbl2list\n self.usr_max_idx = None\n\n # list: Return the one item (a list of items) of interest to the user.\n # sublist: Return the items (a list of lists) of interest to the user.\n # lists: Return all items (a list of lists) read from the tsv/csv file.\n self.fncs = {\n 'list': lambda fld: self.ret_list.extend([fld[hdr_i[1]] for hdr_i in self.hdrs_usr]),\n 'sublist': lambda fld: self.ret_list.append([fld[hdr_i[1]] for hdr_i in self.hdrs_usr]),\n 'lists': lambda fld: self.ret_list.append(fld)\n }\n\n\n def get_h2i(self, hdrs_usr):\n \"\"\"Read csv/tsv file and return specified data in a list of lists.\"\"\"\n with open(self.fin) as fin_stream:\n for line in fin_stream:\n line = line.rstrip('\\r\\n') # chomp\n if not self.hdr2idx:\n if self.do_hdr(line, hdrs_usr):\n return self.hdr2idx\n return None\n\n def do_hdr(self, line, hdrs_usr):\n \"\"\"Initialize self.h2i.\"\"\"\n # If there is no header hint, consider the first line the header.\n if self.hdr_ex is None:\n self._init_hdr(line, hdrs_usr)\n return True\n # If there is a header hint, examine each beginning line until header hint is found.\n elif self.hdr_ex in line:\n self._init_hdr(line, hdrs_usr)\n return True\n return False\n\n def run(self, fnc_name, hdrs_usr):\n \"\"\"Read csv/tsv file and return specified data in a list of lists.\"\"\"\n fnc = self.fncs[fnc_name]\n with open(self.fin) as fin_stream:\n for lnum, line in enumerate(fin_stream):\n line = line.rstrip('\\r\\n') # chomp\n # Obtain Data if headers have been collected from the first line\n if self.hdr2idx:\n self._init_data_line(fnc, lnum, line)\n # Obtain the header\n else:\n self.do_hdr(line, hdrs_usr)\n if self.log is not None:\n self.log.write(\" {:9} data READ: {}\\n\".format(len(self.ret_list), self.fin))\n return self.ret_list, self.hdr2idx\n\n def get_nts(self):\n \"\"\"Read csv/tsv file and return specified data in a list of lists.\"\"\"\n data = []\n nt_obj = None\n with open(self.fin) as fin_stream:\n for lnum, line in enumerate(fin_stream, 1):\n try:\n line = line.rstrip('\\r\\n') # chomp\n # Obtain Data if headers have been collected from the first line\n if nt_obj is not None:\n flds = re.split(self.sep, line)\n self.convert_ints_floats(flds)\n flds[6] = [s.strip() for s in flds[6].split(',')]\n ntdata = nt_obj._make(flds)\n data.append(ntdata)\n # Obtain the header\n else:\n nt_obj = self._init_nt_hdr(line)\n except RuntimeError:\n # Print headers\n #if nt_obj is not None:\n # sys.stdout.write(\"{HDRS}\\n\".format(HDRS='\\n'.join(nt_obj._fields)))\n flds = re.split(self.sep, line)\n print(len(flds), \"FIELDS\")\n print(flds)\n #raise Exception(\"{FIN}({LNUM}): {LINE}\\n\".format(\n # FIN=self.fin, LNUM=lnum, LINE=line))\n # JUST SKIP LINES WITH INCOMPLETE DATA, BUT PRINT ERROR MESSAGE\n sys.stdout.write(\"**ERROR: {FIN}({LNUM}): {LINE}\\n\".format(\n FIN=self.fin, LNUM=lnum, LINE=line))\n if self.log is not None:\n self.log.write(\" {:9} lines READ: {}\\n\".format(len(data), self.fin))\n return data\n\n def hdr_xform(self, hdrs):\n \"\"\"Transform NCBI Gene header fields into valid namedtuple fields.\"\"\"\n xform = []\n hdrs = self.replace_nulls(hdrs)\n for hdr in hdrs:\n hdr = hdr.replace('.', '_')\n hdr = hdr.replace(' ', '_')\n hdr = hdr.replace('#', 'N')\n hdr = hdr.replace('-', '_')\n hdr = hdr.replace('\"', '')\n xform.append(hdr)\n return xform\n\n def _init_nt_hdr(self, line):\n \"\"\"Convert headers into valid namedtuple fields.\"\"\"\n line = line.replace('.', '_')\n line = line.replace(' ', '_')\n line = line.replace('#', 'N')\n line = line.replace('-', '_')\n line = line.replace('\"', '')\n #line = re.sub(r\"_$\", r\"\", line)\n hdrs = re.split(self.sep, line)\n if '' in hdrs:\n hdrs = NCBIgeneFileReader.replace_nulls(hdrs)\n # Init indexes which will be converted to int or float\n self.idxs_int = [idx for idx, hdr in enumerate(hdrs) if hdr in self.int_hdrs]\n self.idxs_float = [idx for idx, hdr in enumerate(hdrs) if hdr in self.float_hdrs]\n assert hdrs[6] == 'Aliases'\n return namedtuple('ntncbi', ' '.join(hdrs))\n\n @staticmethod\n def _get_sep(fin, sep):\n \"\"\"Uses extension(.tsv, .csv) to determine separator.\"\"\"\n if '.tsv' in fin:\n return r'\\t'\n elif '.csv' in fin:\n return r','\n else:\n return sep\n\n @staticmethod\n def replace_nulls(hdrs):\n \"\"\"Replace '' in hdrs.\"\"\"\n ret = []\n idx = 0\n for hdr in hdrs:\n if hdr == '':\n ret.append(\"no_hdr{}\".format(idx))\n else:\n ret.append(hdr)\n return ret\n\n def _init_data_line(self, fnc, lnum, line):\n \"\"\"Process Data line.\"\"\"\n fld = re.split(self.sep, line)\n # Lines may contain different numbers of items.\n # The line should have all columns requested by the user.\n if self.usr_max_idx < len(fld):\n self.convert_ints_floats(fld)\n fnc(fld)\n else:\n for fld in enumerate(zip(self.hdr2idx.keys(), fld)):\n print(fld)\n for hdr in self.hdrs_usr:\n print(hdr)\n print('# ITEMS ON A LINE:', len(fld))\n print('MAX USR IDX:', self.usr_max_idx)\n raise Exception(\"ERROR ON LINE {} IN {}\".format(lnum+1, self.fin))\n\n def convert_ints_floats(self, flds):\n \"\"\"Convert strings to ints and floats, if so specified.\"\"\"\n for idx in self.idxs_float:\n flds[idx] = float(flds[idx])\n for idx in self.idxs_int:\n dig = flds[idx]\n #print 'idx={} ({}) {}'.format(idx, flds[idx], flds) # DVK\n flds[idx] = int(flds[idx]) if dig.isdigit() else dig\n for idx in self.idxs_strpat:\n hdr = self.hdr2idx.items()[idx][0]\n pat = self.strpat_hdrs[hdr]\n flds[idx] = pat.format(flds[idx])\n\n def _init_hdr(self, line, hdrs_usr):\n \"\"\"Initialize self.hdr2idx, self.len, self.idxs_float, and self.idxs_int\"\"\"\n self.hdr2idx = OrderedDict([(v.strip(), i) for i, v in enumerate(re.split(self.sep, line))])\n self.len = len(self.hdr2idx)\n # If user is requesting specific data fields...\n if hdrs_usr is not None:\n # Loop through the user headers\n for usr_hdr in hdrs_usr:\n # If the user header is contained in the file....\n if usr_hdr in self.hdr2idx:\n # Add the user header and the field index to a list\n self.hdrs_usr.append([usr_hdr, self.hdr2idx[usr_hdr]])\n else:\n raise Exception(\"NO COLUMN({}) FOUND:\\n HDR={}\\n\".format(\n hdrs_usr, '\\n HDR='.join(self.hdr2idx.keys())))\n usr_hdrs = [E[0] for E in self.hdrs_usr] if self.hdrs_usr else self.hdr2idx\n self._init_idxs_float(usr_hdrs)\n self._init_idxs_int(usr_hdrs)\n self._init_idxs_strpat(usr_hdrs)\n self.usr_max_idx = max(E[1] for E in self.hdrs_usr) if self.hdrs_usr else len(self.hdr2idx)-1\n\n def _init_idxs_float(self, usr_hdrs):\n \"\"\"List of indexes whose values will be floats.\"\"\"\n self.idxs_float = [\n Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in self.float_hdrs]\n\n def _init_idxs_int(self, usr_hdrs):\n \"\"\"List of indexes whose values will be ints.\"\"\"\n self.idxs_int = [\n Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in self.int_hdrs]\n\n def _init_idxs_strpat(self, usr_hdrs):\n \"\"\"List of indexes whose values will be strings.\"\"\"\n strpat = self.strpat_hdrs.keys()\n self.idxs_strpat = [\n Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in strpat]\n\n\n # Copyright (C) 2016-2018, <NAME>, <NAME>, All rights reserved.\n", "id": "7087610", "language": "Python", "matching_score": 3.838442802429199, "max_stars_count": 1, "path": "goatools/parsers/ncbi_gene_file_reader.py" }, { "content": "\"\"\"Read a NCBI Gene gene_result.txt file and write a Python module.\n\nUsage:\n ncbi_gene_results_to_python.py [options]\n\nOptions:\n -h --help show this help message and exit\n\n -i <gene_result.txt> Read NCBI Gene file [default: gene_result.txt]\n -o <gene_result.py> Write Python file [default: gene_result.py]\n\n\"\"\"\n\nfrom __future__ import print_function\n\n__copyright__ = \"Copyright (C) 2016-2018, <NAME>, <NAME>. All rights reserved.\"\n__author__ = \"<NAME>\"\n\nimport os\nimport sys\nimport re\nimport datetime\nfrom goatools.cli.docopt_parse import DocOptParse\nfrom goatools.parsers.ncbi_gene_file_reader import NCBIgeneFileReader\n\n\n# pylint: disable=too-few-public-methods\nclass NCBIgeneToPythonCli(object):\n \"\"\"Read a NCBI Gene gene_result.txt file and write a Python module.\"\"\"\n\n kws_dict = set(['i', 'o'])\n\n def __init__(self):\n self.objdoc = DocOptParse(__doc__, self.kws_dict, set())\n\n def cli(self, prt=sys.stdout):\n \"\"\"Command-line interface to print specified GO Terms from the DAG source .\"\"\"\n kws = self.objdoc.get_docargs(prt=None)\n if os.path.exists(kws['i']):\n obj = NCBIgeneFileReader(kws['i'])\n nts = obj.get_nts()\n if nts:\n geneid2nt = self._get_geneid2nt(nts)\n self._wrpy_ncbi_gene_nts(kws['o'], geneid2nt, prt)\n else:\n raise RuntimeError(\"\\n{DOC}\\n**ERROR: NO FILE FOUND: {NCBI}\".format(\n NCBI=kws['i'], DOC=__doc__))\n\n @staticmethod\n def _get_geneid2nt(nts):\n \"\"\"Get geneid2nt given a list of namedtuples.\"\"\"\n geneid2nt = {}\n for ntd in nts:\n geneid = ntd.GeneID\n if geneid not in geneid2nt:\n geneid2nt[geneid] = ntd\n else:\n print(\"DUPLICATE GeneID FOUND {N:9} {SYM}\".format(N=geneid, SYM=ntd.Symbol))\n return geneid2nt\n\n @staticmethod\n def _wrpy_ncbi_gene_nts(fout_py, geneid2nt, log):\n \"\"\"Write namedtuples to a dict in a Python module.\"\"\"\n num_genes = len(geneid2nt)\n with open(fout_py, 'w') as ofstrm:\n docstr = \"Data downloaded from NCBI Gene converted into Python namedtuples.\"\n ofstrm.write('\"\"\"{PYDOC}\"\"\"\\n\\n'.format(PYDOC=docstr))\n ofstrm.write(\"from collections import namedtuple\\n\\n\")\n ofstrm.write('WRITTEN = \"{DATE}\"'.format(\n DATE=re.sub('-', '_', str(datetime.date.today()))))\n ofstrm.write(' # {N} items\\n\\n'.format(N=num_genes))\n ntd = next(iter(geneid2nt.values())) # Access one dictionary value in Python 2\n ofstrm.write(\"#pylint: disable=line-too-long,too-many-lines,invalid-name\\n\")\n ofstrm.write(\"{NtName} = namedtuple('{NtName}', '{FLDS}')\\n\\n\".format(\n NtName=type(ntd).__name__, FLDS=' '.join(ntd._fields)))\n ofstrm.write(\"GENEID2NT = {{ # {N:,} items\\n\".format(N=num_genes))\n for geneid, ntd in sorted(geneid2nt.items(), key=lambda t: t[0]):\n ofstrm.write(\" {GeneID} : {NT},\\n\".format(GeneID=geneid, NT=ntd))\n ofstrm.write(\"}\\n\")\n log.write(\" {N:9} geneids WROTE: {PY}\\n\".format(N=num_genes, PY=fout_py))\n\n\n# Copyright (C) 2016-2018, <NAME>, <NAME>. All rights reserved.\n", "id": "3890034", "language": "Python", "matching_score": 4.478847503662109, "max_stars_count": 1, "path": "goatools/cli/ncbi_gene_results_to_python.py" }, { "content": "\"\"\"Command-line interface to print specified GO Terms from the DAG source\n\nUsage:\n prt_terms.py [GO ...] [GO_FILE]\n prt_terms.py [GO ...] [GO_FILE] [options]\n\nOptions:\n -h --help show this help message and exit\n\n -i <file.txt>, --ifile=<sections_in.txt> Read or Write file name [default: sections_in.txt]\n -n <GO_name>, --name=<GO_name> Name of a GO Term\n\n --obo=<file.obo> Ontologies in obo file [default: go-basic.obo].\n\"\"\"\n\nfrom __future__ import print_function\n\n__copyright__ = \"Copyright (C) 2016-2018, <NAME>, <NAME>. All rights reserved.\"\n__author__ = \"<NAME>\"\n\n\nimport sys\nfrom goatools.cli.docopt_parse import DocOptParse\nfrom goatools.cli.gos_get import GetGOs\nfrom goatools.test_data.wr_subobo import WrSubObo\n\n\n# pylint: disable=too-few-public-methods\nclass PrtGOterms(object):\n \"\"\"Command-line interface to print specified GO Terms from the DAG source.\"\"\"\n\n kws_dict = set(['GO', 'GO_FILE', 'name', 'obo'])\n kws_set = set()\n\n def __init__(self):\n self.objdoc = DocOptParse(__doc__, self.kws_dict, self.kws_set)\n self.objsub = WrSubObo()\n\n def cli(self, prt=sys.stdout):\n \"\"\"Command-line interface to print specified GO Terms from the DAG source .\"\"\"\n kws = self.objdoc.get_docargs(prt=None)\n print(\"KWS\", kws)\n goids = GetGOs().get_goids(kws.get('GO'), kws.get('GO_FILE'), sys.stdout)\n if not goids and 'name' in kws:\n goids = self.objsub.get_goids(kws['obo'], kws['name'])\n self.objsub.prt_goterms(kws['obo'], goids, prt, b_prt=False)\n print(\"Printing {N:6} GO IDs: {GOs}\".format(N=len(goids), GOs=goids))\n\n\n# Copyright (C) 2016-2018, <NAME>, <NAME>. All rights reserved.\n", "id": "3995401", "language": "Python", "matching_score": 1.398959994316101, "max_stars_count": 1, "path": "goatools/cli/prt_terms.py" }, { "content": "\"\"\"Get GO IDs from command-line arguments or from an ASCII file.\"\"\"\n\nfrom __future__ import print_function\n\n__copyright__ = \"Copyright (C) 2016-2019, <NAME>, <NAME>. All rights reserved.\"\n__author__ = \"<NAME>\"\n\nimport os\nimport re\nfrom goatools.gosubdag.go_tasks import get_go2obj_unique\nfrom goatools.godag.consts import Consts\n\n\nclass GetGOs(object):\n \"\"\"Return a list of GO IDs for plotting.\"\"\"\n\n def __init__(self, go2obj=None, max_gos=None):\n self.go2obj = go2obj\n self.max_gos = max_gos\n self.godagconsts = Consts()\n\n def get_goids(self, go_args, fin_goids, prt):\n \"\"\"Return source GO IDs .\"\"\"\n goids = set()\n if fin_goids is not None:\n goids.update(self.rdtxt_gos(fin_goids, prt))\n if go_args:\n goids.update(self.get_goargs(go_args, prt))\n return goids\n\n def get_usrgos(self, fin_goids, prt):\n \"\"\"Return source GO IDs .\"\"\"\n ret = self.get_goids(None, fin_goids, prt)\n # If there have been no GO IDs explicitly specified by the user\n if not ret:\n # If the GO-DAG is sufficiently small, print all GO IDs\n if self.max_gos is not None and len(self.go2obj) < self.max_gos:\n main_gos = set(o.id for go, o in self.go2obj.items() if go != o.id)\n go_leafs = set(go for go, o in self.go2obj.items() if not o.children)\n ret = go_leafs.difference(main_gos)\n else:\n raise RuntimeError(\"GO IDs NEEDED\")\n go2obj = self.get_go2obj(ret)\n return get_go2obj_unique(go2obj)\n\n def get_go2obj(self, goids):\n \"\"\"Return GO Terms for each user-specified GO ID. Note missing GO IDs.\"\"\"\n goids = goids.intersection(self.go2obj.keys())\n if len(goids) != len(goids):\n goids_missing = goids.difference(goids)\n print(\" {N} MISSING GO IDs: {GOs}\".format(N=len(goids_missing), GOs=goids_missing))\n return {go:self.go2obj[go] for go in goids}\n\n @staticmethod\n def rdtxt_gos(go_file, prt):\n \"\"\"Read GO IDs from a file.\"\"\"\n goids_all = set()\n if not os.path.exists(go_file):\n raise RuntimeError(\"CAN NOT READ GO FILE: {FILE}\\n\".format(FILE=go_file))\n re_go = re.compile(r'(GO:\\d{7})+?')\n re_com = re.compile(r'^\\s*#') # Lines starting with a '#' are comment lines and ignored\n with open(go_file) as ifstrm:\n for line in ifstrm:\n # Skip lines that are comments\n if re_com.search(line):\n continue\n # Search for GO IDs on the line\n goids_found = re_go.findall(line)\n if goids_found:\n goids_all.update(goids_found)\n if prt:\n prt.write(\" {N} GO IDs READ: {TXT}\\n\".format(N=len(goids_all), TXT=go_file))\n return goids_all\n\n def get_goargs(self, go_args, prt):\n \"\"\"Get GO IDs and colors for GO IDs from the GO ID runtime arguments.\"\"\"\n goids = set()\n go2color = {}\n # Match on \"GO ID\" or \"GO ID and color\"\n re_gocolor = re.compile(r'(GO:\\d{7})((?:#[0-9a-fA-F]{6})?)')\n for go_arg in go_args:\n mtch = re_gocolor.match(go_arg)\n if mtch:\n goid, color = mtch.groups()\n goids.add(goid)\n if color:\n go2color[goid] = color\n elif go_arg in self.godagconsts.NS2GO:\n goids.add(self.godagconsts.NS2GO[go_arg])\n elif prt:\n prt.write(\"WARNING: UNRECOGNIZED ARG({})\\n\".format(go_arg))\n return goids\n\n# Copyright (C) 2016-2019, <NAME>, <NAME>. All rights reserved.\n", "id": "12422925", "language": "Python", "matching_score": 1.1084474325180054, "max_stars_count": 1, "path": "goatools/cli/gos_get.py" }, { "content": "\"\"\"item-DAG tasks.\"\"\"\n\n__copyright__ = \"Copyright (C) 2010-2018, <NAME>, <NAME>, All rights reserved.\"\n__author__ = \"<NAME>\"\n\n# ------------------------------------------------------------------------------------\ndef get_id2parents(objs):\n \"\"\"Get all parent item IDs for each item in dict keys.\"\"\"\n id2parents = {}\n for obj in objs:\n _get_id2parents(id2parents, obj.item_id, obj)\n return id2parents\n\ndef get_id2children(objs):\n \"\"\"Get all parent item IDs for each item in dict keys.\"\"\"\n id2children = {}\n for obj in objs:\n _get_id2children(id2children, obj.item_id, obj)\n return id2children\n\ndef get_id2upper(objs):\n \"\"\"Get all parent item IDs for each item in dict keys.\"\"\"\n id2upper = {}\n for obj in objs:\n _get_id2upper(id2upper, obj.item_id, obj)\n return id2upper\n\ndef get_id2lower(objs):\n \"\"\"Get all parent item IDs for each item in dict keys.\"\"\"\n id2lower = {}\n for obj in objs:\n _get_id2lower(id2lower, obj.item_id, obj)\n return id2lower\n\ndef get_relationship_targets(item_ids, relationships, id2rec):\n \"\"\"Get item ID set of item IDs in a relationship target set.\"\"\"\n # Requirements to use this function:\n # 1) item Terms must have been loaded with 'relationships'\n # 2) item IDs in 'item_ids' arguement must be present in id2rec\n # 3) Arg, 'relationships' must be True or an iterable\n reltgt_objs_all = set()\n for goid in item_ids:\n obj = id2rec[goid]\n for reltype, reltgt_objs_cur in obj.relationship.items():\n if relationships is True or reltype in relationships:\n reltgt_objs_all.update(reltgt_objs_cur)\n return reltgt_objs_all\n\n# ------------------------------------------------------------------------------------\ndef _get_id2parents(id2parents, item_id, item_obj):\n \"\"\"Add the parent item IDs for one item object and their parents.\"\"\"\n if item_id in id2parents:\n return id2parents[item_id]\n parent_ids = set()\n for parent_obj in item_obj.parents:\n parent_id = parent_obj.item_id\n parent_ids.add(parent_id)\n parent_ids |= _get_id2parents(id2parents, parent_id, parent_obj)\n id2parents[item_id] = parent_ids\n return parent_ids\n\ndef _get_id2children(id2children, item_id, item_obj):\n \"\"\"Add the child item IDs for one item object and their children.\"\"\"\n if item_id in id2children:\n return id2children[item_id]\n child_ids = set()\n for child_obj in item_obj.children:\n child_id = child_obj.item_id\n child_ids.add(child_id)\n child_ids |= _get_id2children(id2children, child_id, child_obj)\n id2children[item_id] = child_ids\n return child_ids\n\ndef _get_id2upper(id2upper, item_id, item_obj):\n \"\"\"Add the parent item IDs for one item object and their upper.\"\"\"\n if item_id in id2upper:\n return id2upper[item_id]\n upper_ids = set()\n for upper_obj in item_obj.get_goterms_upper():\n upper_id = upper_obj.item_id\n upper_ids.add(upper_id)\n upper_ids |= _get_id2upper(id2upper, upper_id, upper_obj)\n id2upper[item_id] = upper_ids\n return upper_ids\n\ndef _get_id2lower(id2lower, item_id, item_obj):\n \"\"\"Add the lower item IDs for one item object and the objects below them.\"\"\"\n if item_id in id2lower:\n return id2lower[item_id]\n lower_ids = set()\n for lower_obj in item_obj.get_goterms_lower():\n lower_id = lower_obj.item_id\n lower_ids.add(lower_id)\n lower_ids |= _get_id2lower(id2lower, lower_id, lower_obj)\n id2lower[item_id] = lower_ids\n return lower_ids\n\n# ------------------------------------------------------------------------------------\nclass CurNHigher(object):\n \"\"\"Fill id2obj with item IDs in relationships.\"\"\"\n\n def __init__(self, relationships, id2obj_all):\n # True or A set of relationships we would like to keep\n self.relationships = relationships\n self.id2obj_all = id2obj_all\n\n def get_id2obj_cur_n_high(self, id2obj_user, id_sources):\n \"\"\"Get id2obj containing: id_srcs and parents.\"\"\"\n if not self.relationships:\n self._get_id2obj_high(id2obj_user, id_sources, self.fill_parentidid2obj_r0)\n else:\n self._get_id2obj_high(id2obj_user, id_sources, self.fill_parentidid2obj_r1)\n\n def _get_id2obj_high(self, id2obj_user, id_sources, fnc_fill):\n \"\"\"Get id2obj containing: id_srcs and parents.\"\"\"\n for idid_user in id_sources:\n idobj_user = self.id2obj_all[idid_user]\n fnc_fill(id2obj_user, idobj_user)\n id2obj_user[idobj_user.item_id] = idobj_user\n if idid_user != idobj_user.item_id:\n id2obj_user[idid_user] = idobj_user\n\n def fill_parentidid2obj_r0(self, id2obj, child_obj):\n \"\"\"Fill id2obj with all parent key item IDs and their objects.\"\"\"\n for parent_obj in child_obj.parents:\n if parent_obj.item_id not in id2obj:\n id2obj[parent_obj.item_id] = parent_obj\n self.fill_parentidid2obj_r0(id2obj, parent_obj)\n\n def fill_parentidid2obj_r1(self, id2obj_user, child_obj):\n \"\"\"Fill id2obj_user with all parent/relationship key item IDs and their objects.\"\"\"\n for higher_obj in self._getobjs_higher(child_obj):\n if higher_obj.item_id not in id2obj_user:\n id2obj_user[higher_obj.item_id] = higher_obj\n self.fill_parentidid2obj_r1(id2obj_user, higher_obj)\n\n def _getobjs_higher(self, idobj):\n \"\"\"Get all parents/relationships on this GOTerm.\"\"\"\n idobjs_higher = set(idobj.parents)\n for reltyp, relidobjs in idobj.relationship.items():\n if self.relationships is True or reltyp in self.relationships:\n idobjs_higher.update(relidobjs)\n return idobjs_higher\n\n\n# Copyright (C) 2010-2018, <NAME>, <NAME>, All rights reserved.\n", "id": "3189573", "language": "Python", "matching_score": 2.0151355266571045, "max_stars_count": 1, "path": "goatools/godag/go_tasks.py" }, { "content": "# Copyright (C) 2010-2018 by <NAME> et al. All rights reserved.\n#\n# This code is part of the goatools distribution and goverend by its\n# license. Please see the LICENSE file included with goatools.\n\n\n\"\"\"Read and store Gene Ontology's obo file.\"\"\"\n# -*- coding: UTF-8 -*-\nfrom __future__ import print_function\n\nimport sys\nimport os\nfrom goatools.godag.obo_optional_attributes import OboOptionalAttrs\nfrom goatools.godag.typedef import TypeDef\nfrom goatools.godag.typedef import add_to_typedef\n\nGraphEngines = (\"pygraphviz\", \"pydot\")\n\n__copyright__ = \"Copyright (C) 2010-2018, <NAME> et al., All rights reserved.\"\n__author__ = \"various\"\n\n\n#pylint: disable=too-few-public-methods\nclass OBOReader(object):\n \"\"\"Read goatools.org's obo file. Load into this iterable class.\n\n Download obo from: http://geneontology.org/ontology/go-basic.obo\n\n >>> reader = OBOReader()\n >>> for rec in reader:\n print(rec)\n \"\"\"\n\n # Scalar attributes for Typedefs:\n # 'is_class_level', 'is_metadata_tag',\n # 'is_transitive', 'transitive_over'])\n\n def __init__(self, obo_file=\"go-basic.obo\", optional_attrs=None):\n \"\"\"Read obo file. Load dictionary.\"\"\"\n self.optobj = self._init_optional_attrs(optional_attrs) # OboOptionalAttrs or None\n self.format_version = None # e.g., \"1.2\" of \"format-version:\" line\n self.data_version = None # e.g., \"releases/2016-07-07\" from \"data-version:\" line\n self.typedefs = {}\n\n # True if obo file exists or if a link to an obo file exists.\n if os.path.isfile(obo_file):\n self.obo_file = obo_file\n # GOTerm attributes that are necessary for any operations:\n else:\n raise Exception(\"COULD NOT READ({OBO})\\n\"\n \"download obo file first\\n \"\n \"[http://geneontology.org/ontology/\"\n \"go-basic.obo]\".format(OBO=obo_file))\n\n def __iter__(self):\n \"\"\"Return one GO Term record at a time from an obo file.\"\"\"\n # Wait to open file until needed. Automatically close file when done.\n with open(self.obo_file) as fstream:\n rec_curr = None # Stores current GO Term\n typedef_curr = None # Stores current typedef\n for line in fstream:\n # obo lines start with any of: [Term], [Typedef], /^\\S+:/, or /^\\s*/\n if self.data_version is None:\n self._init_obo_version(line)\n if rec_curr is None and line[0:6].lower() == \"[term]\":\n rec_curr = GOTerm()\n if self.optobj:\n self.optobj.init_datamembers(rec_curr)\n elif typedef_curr is None and line[0:9].lower() == \"[typedef]\":\n typedef_curr = TypeDef()\n elif rec_curr is not None or typedef_curr is not None:\n line = line.rstrip() # chomp\n if line:\n self._add_to_obj(rec_curr, typedef_curr, line)\n else:\n if rec_curr is not None:\n yield rec_curr\n rec_curr = None\n elif typedef_curr is not None:\n # Save typedef.\n self.typedefs[typedef_curr.item_id] = typedef_curr\n typedef_curr = None\n # Return last record, if necessary\n if rec_curr is not None:\n yield rec_curr\n\n def _add_to_obj(self, rec_curr, typedef_curr, line):\n \"\"\"Add information on line to GOTerm or Typedef.\"\"\"\n if rec_curr is not None:\n self._add_to_ref(rec_curr, line)\n else:\n add_to_typedef(typedef_curr, line)\n\n def _init_obo_version(self, line):\n \"\"\"Save obo version and release.\"\"\"\n if line[0:14] == \"format-version\":\n self.format_version = line[16:-1]\n if line[0:12] == \"data-version\":\n self.data_version = line[14:-1]\n\n def _add_to_ref(self, rec_curr, line):\n \"\"\"Add new fields to the current reference.\"\"\"\n # Examples of record lines containing ':' include:\n # id: GO:0000002\n # name: mitochondrial genome maintenance\n # namespace: biological_process\n # def: \"The maintenance of ...\n # is_a: GO:0007005 ! mitochondrion organization\n if line[:4] == \"id: \":\n assert not rec_curr.item_id\n item_id = line[4:]\n rec_curr.item_id = item_id\n rec_curr.id = item_id\n elif line[:8] == \"alt_id: \":\n rec_curr.alt_ids.add(line[8:])\n elif line[:6] == \"name: \":\n assert not rec_curr.name\n rec_curr.name = line[6:]\n elif line[:11] == \"namespace: \":\n assert not rec_curr.namespace\n rec_curr.namespace = line[11:]\n elif line[:6] == \"is_a: \":\n rec_curr._parents.add(line[6:].split()[0])\n elif line[:13] == \"is_obsolete: \" and line[13:] == \"true\":\n rec_curr.is_obsolete = True\n elif self.optobj and ':' in line:\n self.optobj.update_rec(rec_curr, line)\n\n @staticmethod\n def _init_optional_attrs(optional_attrs):\n \"\"\"Create OboOptionalAttrs or return None.\"\"\"\n if optional_attrs is None:\n return None\n opts = OboOptionalAttrs.get_optional_attrs(optional_attrs)\n if opts:\n return OboOptionalAttrs(opts)\n\n\nclass GOTerm(object):\n \"\"\"\n GO term, actually contain a lot more properties than interfaced here\n \"\"\"\n\n def __init__(self):\n self.id = \"\" # GO:NNNNNNN **DEPRECATED** RESERVED NAME IN PYTHON\n self.item_id = \"\" # GO:NNNNNNN (will replace deprecated \"id\")\n self.name = \"\" # description\n self.namespace = \"\" # BP, CC, MF\n self._parents = set() # is_a basestring of parents\n self.parents = set() # parent records\n self.children = set() # children records\n self.level = None # shortest distance from root node\n self.depth = None # longest distance from root node\n self.is_obsolete = False # is_obsolete\n self.alt_ids = set() # alternative identifiers\n\n def __str__(self):\n ret = ['{GO}\\t'.format(GO=self.item_id)]\n if self.level is not None:\n ret.append('level-{L:>02}\\t'.format(L=self.level))\n if self.depth is not None:\n ret.append('depth-{D:>02}\\t'.format(D=self.depth))\n ret.append('{NAME} [{NS}]'.format(NAME=self.name, NS=self.namespace))\n if self.is_obsolete:\n ret.append('obsolete')\n return ''.join(ret)\n\n def __repr__(self):\n \"\"\"Print GO ID and all attributes in GOTerm class.\"\"\"\n ret = [\"GOTerm('{ID}'):\".format(ID=self.item_id)]\n for key, val in self.__dict__.items():\n if isinstance(val, int) or isinstance(val, str):\n ret.append(\"{K}:{V}\".format(K=key, V=val))\n elif val is not None:\n ret.append(\"{K}: {V} items\".format(K=key, V=len(val)))\n if len(val) < 10:\n if not isinstance(val, dict):\n for elem in val:\n ret.append(\" {ELEM}\".format(ELEM=elem))\n else:\n for (typedef, terms) in val.items():\n ret.append(\" {TYPEDEF}: {NTERMS} items\"\n .format(TYPEDEF=typedef,\n NTERMS=len(terms)))\n for term in terms:\n ret.append(\" {TERM}\".format(TERM=term))\n else:\n ret.append(\"{K}: None\".format(K=key))\n return \"\\n \".join(ret)\n\n def has_parent(self, term):\n \"\"\"Return True if this GO object has a parent GO ID.\"\"\"\n for parent in self.parents:\n if parent.item_id == term or parent.has_parent(term):\n return True\n return False\n\n def has_child(self, term):\n \"\"\"Return True if this GO object has a child GO ID.\"\"\"\n for parent in self.children:\n if parent.item_id == term or parent.has_child(term):\n return True\n return False\n\n def get_all_parents(self):\n \"\"\"Return all parent GO IDs.\"\"\"\n all_parents = set()\n for parent in self.parents:\n all_parents.add(parent.item_id)\n all_parents |= parent.get_all_parents()\n return all_parents\n\n def get_all_upper(self):\n \"\"\"Return all parent GO IDs through both 'is_a' and all relationships.\"\"\"\n all_upper = set()\n for upper in self.get_goterms_upper():\n all_upper.add(upper.item_id)\n all_upper |= upper.get_all_upper()\n return all_upper\n\n def get_all_children(self):\n \"\"\"Return all children GO IDs.\"\"\"\n all_children = set()\n for parent in self.children:\n all_children.add(parent.item_id)\n all_children |= parent.get_all_children()\n return all_children\n\n def get_all_lower(self):\n \"\"\"Return all parent GO IDs through both reverse 'is_a' and all relationships.\"\"\"\n all_lower = set()\n for lower in self.get_goterms_lower():\n all_lower.add(lower.item_id)\n all_lower |= lower.get_all_lower()\n return all_lower\n\n def get_all_parent_edges(self):\n \"\"\"Return tuples for all parent GO IDs, containing current GO ID and parent GO ID.\"\"\"\n all_parent_edges = set()\n for parent in self.parents:\n all_parent_edges.add((self.item_id, parent.item_id))\n all_parent_edges |= parent.get_all_parent_edges()\n return all_parent_edges\n\n def get_all_child_edges(self):\n \"\"\"Return tuples for all child GO IDs, containing current GO ID and child GO ID.\"\"\"\n all_child_edges = set()\n for parent in self.children:\n all_child_edges.add((parent.item_id, self.item_id))\n all_child_edges |= parent.get_all_child_edges()\n return all_child_edges\n\n def get_goterms_upper(self):\n \"\"\"Returns a set containing parents and relationship GO Terms.\"\"\"\n # Requires GODag is created with 'relationship' in optional_attrs argument\n # pylint: disable=no-member\n return set.union(self.parents, *self.relationship.values())\n\n def get_goterms_lower(self):\n \"\"\"Returns a set containing children and reverse-relationship GO Terms.\"\"\"\n # Requires GODag is created with 'relationship' in optional_attrs argument\n # pylint: disable=no-member\n return set.union(self.children, *self.relationship_rev.values())\n\n\n#### def write_hier_rec(self, gos_printed, out=sys.stdout,\n#### len_dash=1, max_depth=None, num_child=None, short_prt=False,\n#### include_only=None, go_marks=None,\n#### depth=1, depth_dashes=\"-\"):\n#### \"\"\"Write hierarchy for a GO Term record.\"\"\"\n#### # Added by <NAME>\n#### goid = self.item_id\n#### # Shortens hierarchy report by only printing the hierarchy\n#### # for the sub-set of user-specified GO terms which are connected.\n#### if include_only is not None and goid not in include_only:\n#### return\n#### nrp = short_prt and goid in gos_printed\n#### if go_marks is not None:\n#### out.write('{} '.format('>' if goid in go_marks else ' '))\n#### if len_dash is not None:\n#### # Default character indicating hierarchy level is '-'.\n#### # '=' is used to indicate a hierarchical path printed in detail previously.\n#### letter = '-' if not nrp or not self.children else '='\n#### depth_dashes = ''.join([letter]*depth)\n#### out.write('{DASHES:{N}} '.format(DASHES=depth_dashes, N=len_dash))\n#### if num_child is not None:\n#### out.write('{N:>5} '.format(N=len(self.get_all_children())))\n#### out.write('{GO}\\tL-{L:>02}\\tD-{D:>02}\\t{desc}\\n'.format(\n#### GO=self.item_id, L=self.level, D=self.depth, desc=self.name))\n#### # Track GOs previously printed only if needed\n#### if short_prt:\n#### gos_printed.add(goid)\n#### # Do not print hierarchy below this turn if it has already been printed\n#### if nrp:\n#### return\n#### depth += 1\n#### if max_depth is not None and depth > max_depth:\n#### return\n#### for child in self.children:\n#### child.write_hier_rec(gos_printed, out, len_dash, max_depth, num_child, short_prt,\n#### include_only, go_marks,\n#### depth, depth_dashes)\n\n\nclass GODag(dict):\n \"\"\"Holds the GO DAG as a dict.\"\"\"\n\n def __init__(self, obo_file=\"go-basic.obo\", optional_attrs=None, load_obsolete=False, prt=sys.stdout):\n super(GODag, self).__init__()\n self.version = self.load_obo_file(obo_file, optional_attrs, load_obsolete, prt)\n\n def load_obo_file(self, obo_file, optional_attrs, load_obsolete, prt):\n \"\"\"Read obo file. Store results.\"\"\"\n reader = OBOReader(obo_file, optional_attrs)\n\n # Save alt_ids and their corresponding main GO ID. Add to GODag after populating GO Terms\n alt2rec = {}\n for rec in reader:\n # Save record if:\n # 1) Argument load_obsolete is True OR\n # 2) Argument load_obsolete is False and the GO term is \"live\" (not obsolete)\n if load_obsolete or not rec.is_obsolete:\n self[rec.item_id] = rec\n for alt in rec.alt_ids:\n alt2rec[alt] = rec\n\n # Save the typedefs and parsed optional_attrs\n # self.optobj = reader.optobj\n self.typedefs = reader.typedefs\n\n self._populate_terms(reader.optobj)\n self._set_level_depth(reader.optobj)\n\n # Add alt_ids to go2obj\n for goid_alt, rec in alt2rec.items():\n self[goid_alt] = rec\n desc = self._str_desc(reader)\n if prt is not None:\n prt.write(\"{DESC}\\n\".format(DESC=desc))\n return desc\n\n def _str_desc(self, reader):\n \"\"\"String containing information about the current GO DAG.\"\"\"\n data_version = reader.data_version\n if data_version is not None:\n data_version = data_version.replace(\"releases/\", \"\")\n desc = \"{OBO}: fmt({FMT}) rel({REL}) {N:,} GO Terms\".format(\n OBO=reader.obo_file, FMT=reader.format_version,\n REL=data_version, N=len(self))\n if reader.optobj:\n desc = \"{D}; optional_attrs({A})\".format(D=desc, A=\" \".join(sorted(reader.optobj.optional_attrs)))\n return desc\n\n\n def _populate_terms(self, optobj):\n \"\"\"Convert GO IDs to GO Term record objects. Populate children.\"\"\"\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n # Make parents and relationships references to the actual GO terms.\n for rec in self.values():\n # Given parent GO IDs, set parent GO Term objects\n rec.parents = set([self[goid] for goid in rec._parents])\n\n # For each parent GO Term object, add it's child GO Term to the children data member\n for parent_rec in rec.parents:\n parent_rec.children.add(rec)\n\n if has_relationship:\n self._populate_relationships(rec)\n\n def _populate_relationships(self, rec_curr):\n \"\"\"Convert GO IDs in relationships to GO Term record objects. Populate children.\"\"\"\n for relationship_type, goids in rec_curr.relationship.items():\n parent_recs = set([self[goid] for goid in goids])\n rec_curr.relationship[relationship_type] = parent_recs\n for parent_rec in parent_recs:\n if relationship_type not in parent_rec.relationship_rev:\n parent_rec.relationship_rev[relationship_type] = set([rec_curr])\n else:\n parent_rec.relationship_rev[relationship_type].add(rec_curr)\n\n def _set_level_depth(self, optobj):\n \"\"\"Set level, depth and add inverted relationships.\"\"\"\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n\n def _init_level(rec):\n if rec.level is None:\n if rec.parents:\n rec.level = min(_init_level(rec) for rec in rec.parents) + 1\n else:\n rec.level = 0\n return rec.level\n\n def _init_depth(rec):\n if rec.depth is None:\n if rec.parents:\n rec.depth = max(_init_depth(rec) for rec in rec.parents) + 1\n else:\n rec.depth = 0\n return rec.depth\n\n def _init_reldepth(rec):\n if not hasattr(rec, 'reldepth'):\n up_terms = rec.get_goterms_upper()\n if up_terms:\n rec.reldepth = max(_init_reldepth(rec) for rec in up_terms) + 1\n else:\n rec.reldepth = 0\n return rec.reldepth\n\n for rec in self.values():\n\n # Add invert relationships\n if has_relationship:\n if rec.depth is None:\n _init_reldepth(rec)\n\n # print(\"BBBBBBBBBBB1\", rec.item_id, rec.relationship)\n #for (typedef, terms) in rec.relationship.items():\n # invert_typedef = self.typedefs[typedef].inverse_of\n # # print(\"BBBBBBBBBBB2 {} ({}) ({}) ({})\".format(\n # # rec.item_id, rec.relationship, typedef, invert_typedef))\n # if invert_typedef:\n # # Add inverted relationship\n # for term in terms:\n # if not hasattr(term, 'relationship'):\n # term.relationship = defaultdict(set)\n # term.relationship[invert_typedef].add(rec)\n # print(\"BBBBBBBBBBB3\", rec.item_id, rec.relationship)\n\n if rec.level is None:\n _init_level(rec)\n\n if rec.depth is None:\n _init_depth(rec)\n\n def write_dag(self, out=sys.stdout):\n \"\"\"Write info for all GO Terms in obo file, sorted numerically.\"\"\"\n for rec in sorted(self.values()):\n print(rec, file=out)\n\n#### def write_hier_all(self, out=sys.stdout,\n#### len_dash=1, max_depth=None, num_child=None, short_prt=False):\n#### \"\"\"Write hierarchy for all GO Terms in obo file.\"\"\"\n#### # Print: [biological_process, molecular_function, and cellular_component]\n#### for go_id in ['GO:0008150', 'GO:0003674', 'GO:0005575']:\n#### self.write_hier(go_id, out, len_dash, max_depth, num_child, short_prt, None)\n####\n#### def write_hier(self, go_id, out=sys.stdout,\n#### len_dash=1, max_depth=None, num_child=None, short_prt=False,\n#### include_only=None, go_marks=None):\n#### \"\"\"Write hierarchy for a GO Term.\"\"\"\n#### gos_printed = set()\n#### self[go_id].write_hier_rec(gos_printed, out, len_dash, max_depth, num_child,\n#### short_prt, include_only, go_marks)\n\n @staticmethod\n def id2int(go_id):\n \"\"\"Given a GO ID, return the int value.\"\"\"\n return int(go_id.replace(\"GO:\", \"\", 1))\n\n def query_term(self, term, verbose=False):\n \"\"\"Given a GO ID, return GO object.\"\"\"\n if term not in self:\n sys.stderr.write(\"Term %s not found!\\n\" % term)\n return\n\n rec = self[term]\n if verbose:\n print(rec)\n sys.stderr.write(\"all parents: {}\\n\".format(\n repr(rec.get_all_parents())))\n sys.stderr.write(\"all children: {}\\n\".format(\n repr(rec.get_all_children())))\n return rec\n\n def paths_to_top(self, term):\n \"\"\" Returns all possible paths to the root node\n\n Each path includes the term given. The order of the path is\n top -> bottom, i.e. it starts with the root and ends with the\n given term (inclusively).\n\n Parameters:\n -----------\n - term:\n the ID of the GO term, where the paths begin (i.e. the\n accession 'GO:0003682')\n\n Returns:\n --------\n - a list of lists of GO Terms\n \"\"\"\n # error handling consistent with original authors\n if term not in self:\n sys.stderr.write(\"Term %s not found!\\n\" % term)\n return\n\n def _paths_to_top_recursive(rec):\n if rec.level == 0:\n return [[rec]]\n paths = []\n for parent in rec.parents:\n top_paths = _paths_to_top_recursive(parent)\n for top_path in top_paths:\n top_path.append(rec)\n paths.append(top_path)\n return paths\n\n go_term = self[term]\n return _paths_to_top_recursive(go_term)\n\n def label_wrap(self, label):\n \"\"\"Label text for plot.\"\"\"\n wrapped_label = r\"%s\\n%s\" % (label,\n self[label].name.replace(\",\", r\"\\n\"))\n return wrapped_label\n\n def make_graph_pydot(self, recs, nodecolor,\n edgecolor, dpi,\n draw_parents=True, draw_children=True):\n \"\"\"draw AMIGO style network, lineage containing one query record.\"\"\"\n import pydot\n grph = pydot.Dot(graph_type='digraph', dpi=\"{}\".format(dpi)) # Directed Graph\n edgeset = set()\n usr_ids = [rec.item_id for rec in recs]\n for rec in recs:\n if draw_parents:\n edgeset.update(rec.get_all_parent_edges())\n if draw_children:\n edgeset.update(rec.get_all_child_edges())\n\n rec_id_set = set([rec_id for endpts in edgeset for rec_id in endpts])\n nodes = {str(ID):pydot.Node(\n self.label_wrap(ID).replace(\"GO:\", \"\"), # Node name\n shape=\"box\",\n style=\"rounded, filled\",\n # Highlight query terms in plum:\n fillcolor=\"beige\" if ID not in usr_ids else \"plum\",\n color=nodecolor)\n for ID in rec_id_set}\n\n # add nodes explicitly via add_node\n for rec_id, node in nodes.items():\n grph.add_node(node)\n\n for src, target in edgeset:\n # default layout in graphviz is top->bottom, so we invert\n # the direction and plot using dir=\"back\"\n grph.add_edge(pydot.Edge(nodes[target], nodes[src],\n shape=\"normal\",\n color=edgecolor,\n label=\"is_a\",\n dir=\"back\"))\n\n return grph\n\n def make_graph_pygraphviz(self, recs, nodecolor,\n edgecolor, dpi,\n draw_parents=True, draw_children=True):\n \"\"\"Draw AMIGO style network, lineage containing one query record.\"\"\"\n import pygraphviz as pgv\n\n grph = pgv.AGraph(name=\"GO tree\")\n\n edgeset = set()\n for rec in recs:\n if draw_parents:\n edgeset.update(rec.get_all_parent_edges())\n if draw_children:\n edgeset.update(rec.get_all_child_edges())\n\n edgeset = [(self.label_wrap(a), self.label_wrap(b))\n for (a, b) in edgeset]\n\n # add nodes explicitly via add_node\n # adding nodes implicitly via add_edge misses nodes\n # without at least one edge\n for rec in recs:\n grph.add_node(self.label_wrap(rec.item_id))\n\n for src, target in edgeset:\n # default layout in graphviz is top->bottom, so we invert\n # the direction and plot using dir=\"back\"\n grph.add_edge(target, src)\n\n grph.graph_attr.update(dpi=\"%d\" % dpi)\n grph.node_attr.update(shape=\"box\", style=\"rounded,filled\",\n fillcolor=\"beige\", color=nodecolor)\n grph.edge_attr.update(shape=\"normal\", color=edgecolor,\n dir=\"back\", label=\"is_a\")\n # highlight the query terms\n for rec in recs:\n try:\n node = grph.get_node(self.label_wrap(rec.item_id))\n node.attr.update(fillcolor=\"plum\")\n except:\n continue\n\n return grph\n\n def draw_lineage(self, recs, nodecolor=\"mediumseagreen\",\n edgecolor=\"lightslateblue\", dpi=96,\n lineage_img=\"GO_lineage.png\", engine=\"pygraphviz\",\n gml=False, draw_parents=True, draw_children=True):\n \"\"\"Draw GO DAG subplot.\"\"\"\n assert engine in GraphEngines\n grph = None\n if engine == \"pygraphviz\":\n grph = self.make_graph_pygraphviz(recs, nodecolor, edgecolor, dpi,\n draw_parents=draw_parents,\n draw_children=draw_children)\n else:\n grph = self.make_graph_pydot(recs, nodecolor, edgecolor, dpi,\n draw_parents=draw_parents, draw_children=draw_children)\n\n if gml:\n import networkx as nx # use networkx to do the conversion\n gmlbase = lineage_img.rsplit(\".\", 1)[0]\n obj = nx.from_agraph(grph) if engine == \"pygraphviz\" else nx.from_pydot(grph)\n\n del obj.graph['node']\n del obj.graph['edge']\n gmlfile = gmlbase + \".gml\"\n nx.write_gml(self.label_wrap, gmlfile)\n sys.stderr.write(\"GML graph written to {0}\\n\".format(gmlfile))\n\n sys.stderr.write((\"lineage info for terms %s written to %s\\n\" %\n ([rec.item_id for rec in recs], lineage_img)))\n\n if engine == \"pygraphviz\":\n grph.draw(lineage_img, prog=\"dot\")\n else:\n grph.write_png(lineage_img)\n\n def update_association(self, association):\n \"\"\"Add the GO parents of a gene's associated GO IDs to the gene's association.\"\"\"\n bad_goids = set()\n # Loop through all sets of GO IDs for all genes\n for goids in association.values():\n parents = set()\n # Iterate thru each GO ID in the current gene's association\n for goid in goids:\n try:\n parents.update(self[goid].get_all_parents())\n except:\n bad_goids.add(goid.strip())\n # Add the GO parents of all GO IDs in the current gene's association\n goids.update(parents)\n if bad_goids:\n sys.stdout.write(\"{N} GO IDs in assc. are not found in the GO-DAG: {GOs}\\n\".format(\n N=len(bad_goids), GOs=\" \".join(bad_goids)))\n\n# Copyright (C) 2010-2018, <NAME>., All rights reserved.\n", "id": "5856100", "language": "Python", "matching_score": 3.169614791870117, "max_stars_count": 0, "path": "goatools/obo_parser.py" }, { "content": "#!/usr/bin/env python\n\"\"\"Test that hierarchy below specified GO terms is printed.\"\"\"\n\nfrom __future__ import print_function\n\n__copyright__ = \"Copyright (c) 2017-2018, <NAME>. <NAME>. All rights reserved.\"\n\nfrom goatools.cli.wr_hierarchy import WrHierCli\n\n# --o Output file in ASCII text format\n# --no_indent Do not indent GO terms\n# --max_indent max depth for printing relative to GO Term\n# --num_child Print count of total number of children for each GO\n# --concise If a branch has already been printed, do not re-print.\n# Print '===' instead of dashes to note the point of compression\n\ndef test_cli():\n \"\"\"Add and remove markers for a file.\"\"\"\n # pylint: disable=bad-whitespace\n args_exp = [\n # args exp_set expected_dict\n # -------- ------- ---------------------\n ([], {'dag':'go-basic.obo', 'dash_len':6}),\n (['--dag=go-basic.obo'], {'dag':'go-basic.obo', 'dash_len':6}),\n (['-o rpt.txt'], {'dag':'go-basic.obo', 'dash_len':6, 'o':'rpt.txt'}),\n (['--max_indent=7'], {'dag':'go-basic.obo', 'dash_len':6, 'max_indent':7}),\n (['--concise'], {'dag':'go-basic.obo', 'dash_len':6, 'concise':True}),\n (['--no_indent'], {'dag':'go-basic.obo', 'dash_len':6, 'no_indent':True}),\n (['--concise', '--no_indent'], {'dag':'go-basic.obo', 'dash_len':6,\n 'concise':True, 'no_indent':True}),\n ]\n for args, exp_dict in args_exp:\n print(\"ARGS={ARGS}\".format(ARGS=args))\n print(\"EXP={EXP}\".format(EXP=exp_dict))\n obj = WrHierCli(args)\n print(\"DCT: {DCT}\".format(DCT=obj.kws))\n print(\"WWWWWWWWWWWWWWWWWWW WrHierCli\", obj.kws)\n assert obj.kws == exp_dict, \"DCT: ACT({}) != EXP({})\".format(obj.kws, exp_dict)\n print(\"\")\n\n\nif __name__ == '__main__':\n test_cli()\n\n# Copyright (c) 2017-2018, <NAME>, <NAME>. All rights reserved.\n", "id": "5858293", "language": "Python", "matching_score": 1.506530523300171, "max_stars_count": 1, "path": "tests/test_cli_write_hierarchy.py" }, { "content": "\"\"\"Common checks in test data.\"\"\"\n\n__copyright__ = \"Copyright (C) 2010-2018, <NAME>, <NAME>, All rights reserved.\"\n__author__ = \"<NAME>\"\n\n\ndef _chk_a2bset(exp_a2bset, act_a2bset):\n assert set(exp_a2bset) == set(act_a2bset)\n for goid, exp_goids in exp_a2bset.items():\n act_goids = act_a2bset[goid]\n assert act_goids == exp_goids\n\n\n# Copyright (C) 2010-2018, <NAME>, <NAME>, All rights reserved.\n", "id": "9296737", "language": "Python", "matching_score": 0.10851183533668518, "max_stars_count": 1, "path": "goatools/test_data/checks.py" }, { "content": "#!/usr/bin/env python\n\"\"\"Test gracefully exiting if no study genes are in assc or population.\"\"\"\n\nimport os\n# from goatools.rpt.goea_nt_xfrm import MgrNtGOEAs # get_goea_nts_all\nfrom goatools.test_data.genes_NCBI_10090_ProteinCoding import GENEID2NT as GeneID2nt_mus\nfrom goatools.test_data.nature3102_goea import get_geneid2symbol, get_goeaobj\n\n__copyright__ = \"Copyright (C) 2016-2017, <NAME>, <NAME>, All rights reserved.\"\n\nREPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"../\")\n\n\ndef test_example():\n \"\"\"Test GOEnrichmentStudy::print_results.\"\"\"\n # --------------------------------------------------------------------\n # --------------------------------------------------------------------\n # Gene Ontology Enrichment Analysis (GOEA)\n # --------------------------------------------------------------------\n # --------------------------------------------------------------------\n taxid = 10090 # Mouse study\n # Load ontologies, associations, and population ids\n geneids_pop = GeneID2nt_mus.keys()\n geneids2symbol_study = get_geneid2symbol(\"nbt.3102-S4_GeneIDs.xlsx\")\n goeaobj = get_goeaobj(\"fdr_bh\", geneids_pop, taxid)\n # No study genes at all\n geneids_study_none = set()\n goea_results_all = goeaobj.run_study(geneids_study_none)\n assert not goea_results_all, 'NO STUDY GENES TEST FAILED: {R}'.format(R=goea_results_all)\n # No study genes in population or association\n geneids_study_bad = set(['BADVAL'])\n goea_results_all = goeaobj.run_study(geneids_study_bad)\n # goea_results_sig = [r for r in goea_results_all if r.p_fdr_bh < 0.05]\n assert not goea_results_all, 'NO VALID STUDY GENES TEST FAILED: {R}'.format(R=goea_results_all)\n # goea_results_all = goeaobj.run_study(geneids_study)\n goeaobj.print_results(goea_results_all, pval=None)\n goeaobj.print_date()\n\n\nif __name__ == '__main__':\n test_example()\n\n# Copyright (C) 2016-2017, <NAME>, <NAME>, All rights reserved.\n", "id": "12762762", "language": "Python", "matching_score": 7.1397857666015625, "max_stars_count": 0, "path": "tests/test_study_zero.py" }, { "content": "#!/usr/bin/env python\n\"\"\"Test GOEnrichmentStudy::print_results.\"\"\"\n\nimport os\n# from goatools.rpt.goea_nt_xfrm import MgrNtGOEAs # get_goea_nts_all\nfrom goatools.test_data.genes_NCBI_10090_ProteinCoding import GENEID2NT as GeneID2nt_mus\nfrom goatools.test_data.nature3102_goea import get_geneid2symbol, get_goeaobj\n\n__copyright__ = \"Copyright (C) 2016-2017, <NAME>, <NAME>, All rights reserved.\"\n\nREPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"../\")\n\n\ndef test_example():\n \"\"\"Test GOEnrichmentStudy::print_results.\"\"\"\n # --------------------------------------------------------------------\n # --------------------------------------------------------------------\n # Gene Ontology Enrichment Analysis (GOEA)\n # --------------------------------------------------------------------\n # --------------------------------------------------------------------\n taxid = 10090 # Mouse study\n # Load ontologies, associations, and population ids\n geneids_pop = GeneID2nt_mus.keys()\n geneids2symbol_study = get_geneid2symbol(\"nbt.3102-S4_GeneIDs.xlsx\")\n geneids_study = geneids2symbol_study.keys()\n goeaobj = get_goeaobj(\"fdr_bh\", geneids_pop, taxid)\n # Run GOEA on study\n goea_results_all = goeaobj.run_study(geneids_study)\n goea_results_sig = [r for r in goea_results_all if r.p_fdr_bh < 0.05]\n #goea_results_nt = MgrNtGOEAs(goea_results_sig).get_goea_nts_all()\n goeaobj.print_results(goea_results_sig)\n goeaobj.print_date()\n\n\nif __name__ == '__main__':\n test_example()\n\n# Copyright (C) 2016-2017, <NAME>, <NAME>, All rights reserved.\n", "id": "3606632", "language": "Python", "matching_score": 1.7403138875961304, "max_stars_count": 0, "path": "tests/test_print_results.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\"\"\"\npython find_enrichment.py study.file population.file gene-association.file\n\nThis program returns P-values for functional enrichment in a cluster of study\ngenes using Fisher's exact test, and corrected for multiple testing (including\nBonferroni, Holm, Sidak, and false discovery rate).\n\nAbout significance cutoff:\n--alpha: test-wise alpha; for each GO term, what significance level to apply\n (most often you don't need to change this other than 0.05 or 0.01)\n--pval: experiment-wise alpha; for the entire experiment, what significance\n level to apply after Bonferroni correction\n\"\"\"\n\n__copyright__ = \"Copyright (C) 2010-2018, <NAME>. All rights reserved.\"\n__author__ = \"various\"\n\nimport sys\nimport os.path as op\nfrom goatools.cli.find_enrichment import GoeaCliArgs\nfrom goatools.cli.find_enrichment import GoeaCliFnc\n\nsys.path.insert(0, op.join(op.dirname(__file__), \"..\"))\n\n\ndef main():\n \"\"\"Run gene enrichment analysis.\"\"\"\n # Load study, population, associations, and GoDag. Run GOEA.\n obj = GoeaCliFnc(GoeaCliArgs().args)\n # Reduce results to significant results (pval<value)\n results_specified = obj.get_results()\n # Print results in a flat list\n obj.prt_results(results_specified)\n # if obj.sections and obj.args.outfile_detail:\n # #fout_detail = obj.args.outfile_detail if obj.args.outfile_detail else \"goea_details.txt\"\n # objaart = obj.get_objaart()\n # objaart.run(\"GOEA\", results, sys.stdout)\n #### prt_grouped(results, objgoea, args)\n\n\nif __name__ == \"__main__\":\n main()\n\n# Copyright (C) 2010-2018, <NAME> al. All rights reserved.\n", "id": "12078758", "language": "Python", "matching_score": 2.08036470413208, "max_stars_count": 1, "path": "scripts/find_enrichment.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n\"\"\"\nA list of commonly used multiple correction routines\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport sys\nimport random\nimport numpy as np\nimport collections as cx\n\n__copyright__ = \"Copyright (C) 2010-2018, <NAME> et al., All rights reserved.\"\n__author__ = \"various\"\n\nclass Methods(object):\n \"\"\"Class to manage multipletest methods from both local and remote sources.\"\"\"\n\n # https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/multitest.py\n all_methods = [\n (\"local\", (\"bonferroni\", \"sidak\", \"holm\", \"fdr\")),\n (\"statsmodels\", (\n 'bonferroni', # 0) Bonferroni one-step correction\n 'sidak', # 1) Sidak one-step correction\n 'holm-sidak', # 2) Holm-Sidak step-down method using Sidak adjustments\n 'holm', # 3) Holm step-down method using Bonferroni adjustments\n 'simes-hochberg', # 4) Simes-Hochberg step-up method (independent)\n 'hommel', # 5) Hommel closed method based on Simes tests (non-negative)\n 'fdr_bh', # 6) FDR Benjamini/Hochberg (non-negative)\n 'fdr_by', # 7) FDR Benjamini/Yekutieli (negative)\n 'fdr_tsbh', # 8) FDR 2-stage Benjamini-Hochberg (non-negative)\n 'fdr_tsbky', # 9) FDR 2-stage Benjamini-Krieger-Yekutieli (non-negative)\n 'fdr_gbs', # 10) FDR adaptive Gavrilov-Benjamini-Sarkar\n )),\n\n ]\n prefixes = {'statsmodels':'sm_'}\n NtMethodInfo = cx.namedtuple(\"NtMethodInfo\", \"source method fieldname\")\n\n def __init__(self, usr_methods=None):\n self._srcmethod2fieldname = self._init_srcmethod2fieldname()\n self.statsmodels_multicomp = None\n if usr_methods is None:\n usr_methods = ['bonferroni']\n self._init_methods(usr_methods)\n\n def _init_methods(self, usr_methods):\n \"\"\"From the methods list, set list of methods to be used during GOEA.\"\"\"\n self.methods = []\n for usr_method in usr_methods:\n self._add_method(usr_method)\n\n def _add_method(self, method, method_source=None):\n \"\"\"Determine method source if needed. Add method to list.\"\"\"\n try:\n if method_source is not None:\n self._add_method_src(method_source, method)\n else:\n self._add_method_nosrc(method)\n except Exception as inst:\n raise Exception(\"{ERRMSG}\".format(ERRMSG=inst))\n\n def _add_method_nosrc(self, usr_method):\n \"\"\"Add method source, method, and fieldname to list of methods.\"\"\"\n for method_source, available_methods in self.all_methods:\n if usr_method in available_methods:\n fieldname = self.get_fldnm_method(usr_method)\n nmtup = self.NtMethodInfo(method_source, usr_method, fieldname)\n self.methods.append(nmtup)\n return\n for src, prefix in self.prefixes.items():\n if usr_method.startswith(prefix):\n method_source = src\n method = usr_method[len(prefix):]\n nmtup = self.NtMethodInfo(method_source, method, usr_method)\n self.methods.append(nmtup)\n return\n raise self.rpt_invalid_method(usr_method)\n\n def getmsg_valid_methods(self):\n \"\"\"Return a string containing valid method names.\"\"\"\n msg = []\n msg.append(\" Available methods:\")\n for method_source, methods in self.all_methods:\n msg.append(\" {SRC}(\".format(SRC=method_source))\n for method in methods:\n attrname = self._srcmethod2fieldname[(method_source, method)]\n msg.append(\" {ATTR}\".format(ATTR=attrname))\n msg.append(\" )\")\n return \"\\n\".join(msg)\n\n def get_fieldname(self, method_source, method):\n \"\"\"Get the name of the method used to create namedtuple fieldnames which store floats.\"\"\"\n return self._srcmethod2fieldname[(method_source, method)]\n\n def _init_srcmethod2fieldname(self):\n \"\"\"Return an OrderedDict with key, (method_src, method), and value, attrname.\"\"\"\n srcmethod_fieldname = []\n ctr = self._get_method_cnts()\n for method_source, methods in self.all_methods:\n for method in methods:\n prefix = self.prefixes.get(method_source, \"\")\n prefix = prefix if ctr[method] != 1 else \"\"\n fieldname = \"{P}{M}\".format(P=prefix, M=method.replace('-', '_'))\n srcmethod_fieldname.append(((method_source, method), fieldname))\n return cx.OrderedDict(srcmethod_fieldname)\n\n def rpt_invalid_method(self, usr_method):\n \"\"\"Report which methods are available.\"\"\"\n msgerr = \"FATAL: UNRECOGNIZED METHOD({M})\".format(M=usr_method)\n msg = [msgerr, self.getmsg_valid_methods(), msgerr]\n raise Exception(\"\\n\".join(msg))\n\n def _get_method_cnts(self):\n \"\"\"Count the number of times a method is seen.\"\"\"\n ctr = cx.Counter()\n for source_methods in self.all_methods:\n for method in source_methods[1]:\n ctr[method] += 1\n return ctr\n\n def _add_method_src(self, method_source, usr_method, fieldname=None):\n \"\"\"Add method source and method to list of methods.\"\"\"\n fieldname = self._srcmethod2fieldname.get((method_source, usr_method), None)\n if fieldname is not None:\n nmtup = self.NtMethodInfo(method_source, usr_method, fieldname)\n self.methods.append(nmtup)\n else: raise Exception(\"ERROR: FIELD({FN}) METHOD_SOURCE({MS}) AND METHOD({M})\".format(\n FN=fieldname, MS=method_source, M=usr_method))\n\n @staticmethod\n def get_fldnm_method(method):\n \"\"\"Given method and source, return fieldname for method.\"\"\"\n fieldname = method.replace('-', '_')\n return fieldname\n\n def get_statsmodels_multipletests(self):\n \"\"\"Only load statsmodels package if it is used.\"\"\"\n if self.statsmodels_multicomp is not None:\n return self.statsmodels_multicomp\n from statsmodels.sandbox.stats.multicomp import multipletests\n self.statsmodels_multicomp = multipletests\n return self.statsmodels_multicomp\n\n def __iter__(self):\n return iter(self.methods)\n\n\nclass _AbstractCorrection(object):\n \"\"\"Base class for local multiple test correction calculations.\"\"\"\n\n def __init__(self, pvals, a=.05):\n self.pvals = self.corrected_pvals = np.array(pvals)\n self.n = len(self.pvals) # number of multiple tests\n self.a = a # type-1 error cutoff for each test\n\n self.set_correction()\n # Reset all pvals > 1 to 1\n self.corrected_pvals[self.corrected_pvals > 1] = 1\n\n def set_correction(self):\n # the purpose of multiple correction is to lower the alpha\n # instead of the canonical value (like .05)\n pass\n\n\nclass Bonferroni(_AbstractCorrection):\n \"\"\"\n >>> Bonferroni([0.01, 0.01, 0.03, 0.05, 0.005], a=0.05).corrected_pvals\n array([ 0.05 , 0.05 , 0.15 , 0.25 , 0.025])\n \"\"\"\n def set_correction(self):\n \"\"\"Do Bonferroni multiple test correction on original p-values.\"\"\"\n self.corrected_pvals *= self.n\n\n\nclass Sidak(_AbstractCorrection):\n \"\"\"http://en.wikipedia.org/wiki/Bonferroni_correction\n >>> Sidak([0.01, 0.01, 0.03, 0.05, 0.005], a=0.05).corrected_pvals\n array([ 0.04898974, 0.04898974, 0.14696923, 0.24494871, 0.02449487])\n \"\"\"\n\n def set_correction(self):\n \"\"\"Do Sidak multiple test correction on original p-values.\"\"\"\n if self.n != 0:\n correction = self.a * 1. / (1 - (1 - self.a) ** (1. / self.n))\n else:\n correction = 1\n self.corrected_pvals *= correction\n\n\nclass HolmBonferroni(_AbstractCorrection):\n\n \"\"\"http://en.wikipedia.org/wiki/Holm-Bonferroni_method\n given a list of pvals, perform the Holm-Bonferroni correction\n and return the indexes from original list that are significant.\n (cant use p-value as that may be repeated.)\n >>> HolmBonferroni([0.01, 0.01, 0.03, 0.05, 0.005], a=0.05).corrected_pvals\n array([ 0.04 , 0.04 , 0.06 , 0.05 , 0.025])\n \"\"\"\n def set_correction(self):\n \"\"\"Do Holm-Bonferroni multiple test correction on original p-values.\"\"\"\n if len(self.pvals):\n idxs, correction = list(zip(*self._generate_significant()))\n idxs = list(idxs)\n self.corrected_pvals[idxs] *= correction\n\n def _generate_significant(self):\n\n pvals = self.pvals\n pvals_idxs = list(zip(pvals, list(range(len(pvals)))))\n pvals_idxs.sort()\n\n num_pvals = len(self.pvals)\n\n from itertools import groupby\n for pval, idxs in groupby(pvals_idxs, lambda x: x[0]):\n idxs = list(idxs)\n for p, i in idxs:\n if p * 1. / num_pvals < self.a:\n yield (i, num_pvals)\n num_pvals -= len(idxs)\n\n\nclass FDR(object):\n \"\"\"\n Generate a p-value distribution based on re-sampling, as described in:\n http://www.biomedcentral.com/1471-2105/6/168\n \"\"\"\n def __init__(self, p_val_distribution, results, a=.05):\n self.corrected_pvals = fdr = []\n for rec in results:\n q = (sum(1 for x in p_val_distribution if x < rec.p_uncorrected)\n * 1.0 / len(p_val_distribution))\n fdr.append(q)\n\ndef mcorrection_factory(pvals, alpha, method):\n \"\"\"Return 'multiple correction' object of requested AbstractCorrection base class.\"\"\"\n correctioncls = globals().get(method, None)\n if correctioncls is not None:\n return correctioncls(pvals, alpha)\n\n\n\n\n\ndef calc_qval(study_n, pop_n,\n pop, assoc, term_pop, obo_dag, T=500):\n \"\"\"Generate p-value distribution for FDR based on resampling.\"\"\"\n from goatools.pvalcalc import FisherFactory\n from goatools.ratio import count_terms\n sys.stderr.write(\"Generate p-value distribution for FDR \"\n \"based on resampling (this might take a while)\\n\")\n distribution = []\n calc_pvalue = FisherFactory().pval_obj.calc_pvalue\n for i in range(T):\n new_study = random.sample(pop, study_n)\n new_term_study = count_terms(new_study, assoc, obo_dag)\n\n smallest_p = 1\n for term, study_count in list(new_term_study.items()):\n pop_count = term_pop[term]\n p_uncorrected = calc_pvalue(study_count,\n study_n,\n pop_count,\n pop_n)\n if p_uncorrected < smallest_p:\n smallest_p = p_uncorrected\n\n distribution.append(smallest_p)\n if i % 10 == 0:\n sys.stderr.write(\"Sample {0} / {1}: \"\n \"p-value {2}\\n\".format(i, T, smallest_p))\n return distribution\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n\n# Copyright (C) 2010-2018, <NAME>., All rights reserved.\n", "id": "5213446", "language": "Python", "matching_score": 2.028702974319458, "max_stars_count": 1, "path": "goatools/multiple_testing.py" } ]
1.846873
dhermes
[ { "content": "def hello():\n return 42\n", "id": "3023158", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "example/__init__.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\nimport tcp_h2_describe._describe\n\n\nclass Test_describe:\n @staticmethod\n def test_invalid_preface():\n h2_frames = b\"\"\n connection_description = \"client->server\"\n\n with pytest.raises(RuntimeError) as exc_info:\n tcp_h2_describe._describe.describe(\n h2_frames, connection_description, True, None\n )\n\n expected_args = (tcp_h2_describe._describe.MISSING_PREFACE, h2_frames)\n assert exc_info.value.args == expected_args\n\n @staticmethod\n def test_with_preface():\n h2_frames = (\n b\"PRI * HTTP/2.0\\r\\n\\r\\nSM\\r\\n\\r\\n\\x00\\x00$\\x04\\x00\\x00\\x00\\x00\"\n b\"\\x00\\x00\\x01\\x00\\x00\\x10\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x04\\x00\"\n b\"\\x00\\xff\\xff\\x00\\x05\\x00\\x00@\\x00\\x00\\x03\\x00\\x00\\x00d\\x00\\x06\"\n b\"\\x00\\x01\\x00\\x00\\x00\\x00\\x06\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x02\"\n b\"\\x00\\x00\\x00\\x00\"\n )\n connection_description = \"client->server\"\n\n message = tcp_h2_describe._describe.describe(\n h2_frames, connection_description, True, None\n )\n expected = \"\\n\".join(\n [\n tcp_h2_describe._describe.HEADER,\n connection_description,\n \"\",\n tcp_h2_describe._describe.PREFACE_PRETTY,\n tcp_h2_describe._describe.FOOTER,\n \"Frame Length = 36 (00 00 24)\",\n \"Frame Type = SETTINGS (04)\",\n \"Flags = UNSET (00)\",\n \"Stream Identifier = 0 (00 00 00 00)\",\n \"Settings =\",\n \" SETTINGS_HEADER_TABLE_SIZE:0x1 -> 4096 (00 01 | 00 00 10 00)\",\n \" SETTINGS_ENABLE_PUSH:0x2 -> 1 (00 02 | 00 00 00 01)\",\n \" SETTINGS_INITIAL_WINDOW_SIZE:0x4 -> 65535 (00 04 | 00 00 ff ff)\",\n \" SETTINGS_MAX_FRAME_SIZE:0x5 -> 16384 (00 05 | 00 00 40 00)\",\n \" SETTINGS_MAX_CONCURRENT_STREAMS:0x3 -> 100 (00 03 | 00 00 00 64)\",\n \" SETTINGS_MAX_HEADER_LIST_SIZE:0x6 -> 65536 (00 06 | 00 01 00 00)\",\n tcp_h2_describe._describe.FOOTER,\n \"Frame Length = 6 (00 00 06)\",\n \"Frame Type = SETTINGS (04)\",\n \"Flags = UNSET (00)\",\n \"Stream Identifier = 0 (00 00 00 00)\",\n \"Settings =\",\n \" SETTINGS_ENABLE_PUSH:0x2 -> 0 (00 02 | 00 00 00 00)\",\n tcp_h2_describe._describe.FOOTER,\n ]\n )\n assert message == expected\n\n @staticmethod\n def test_without_preface():\n h2_frames = (\n b\"\\x00\\x00$\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x10\\x00\\x00\"\n b\"\\x02\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\xff\\xff\\x00\\x05\\x00\\x00@\"\n b\"\\x00\\x00\\x03\\x00\\x00\\x00d\\x00\\x06\\x00\\x01\\x00\\x00\"\n )\n connection_description = \"server->client\"\n\n message = tcp_h2_describe._describe.describe(\n h2_frames, connection_description, False, None\n )\n expected = \"\\n\".join(\n [\n tcp_h2_describe._describe.HEADER,\n connection_description,\n \"\",\n \"Frame Length = 36 (00 00 24)\",\n \"Frame Type = SETTINGS (04)\",\n \"Flags = UNSET (00)\",\n \"Stream Identifier = 0 (00 00 00 00)\",\n \"Settings =\",\n \" SETTINGS_HEADER_TABLE_SIZE:0x1 -> 4096 (00 01 | 00 00 10 00)\",\n \" SETTINGS_ENABLE_PUSH:0x2 -> 0 (00 02 | 00 00 00 00)\",\n \" SETTINGS_INITIAL_WINDOW_SIZE:0x4 -> 65535 (00 04 | 00 00 ff ff)\",\n \" SETTINGS_MAX_FRAME_SIZE:0x5 -> 16384 (00 05 | 00 00 40 00)\",\n \" SETTINGS_MAX_CONCURRENT_STREAMS:0x3 -> 100 (00 03 | 00 00 00 64)\",\n \" SETTINGS_MAX_HEADER_LIST_SIZE:0x6 -> 65536 (00 06 | 00 01 00 00)\",\n tcp_h2_describe._describe.FOOTER,\n ]\n )\n assert message == expected\n", "id": "5867905", "language": "Python", "matching_score": 2.357881546020508, "max_stars_count": 0, "path": "tests/unit/test__describe.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport struct\nimport textwrap\n\nimport hpack\n\n\nPREFACE = b\"PRI * HTTP/2.0\\r\\n\\r\\nSM\\r\\n\\r\\n\"\nPREFACE_PRETTY = r\"\"\"Client Connection Preface = b'PRI * HTTP/2.0\\r\\n\\r\\nSM\\r\\n\\r\\n'\nHexdump (Client Connection Preface) =\n 50 52 49 20 2a 20 48 54 54 50 2f 32 2e 30 0d 0a\n 0d 0a 53 4d 0d 0a 0d 0a\"\"\"\nMISSING_PREFACE = (\n \"Expected TCP packet data to begin with client connection preface\"\n)\nHEADER = \"=\" * 60\nFOOTER = \"-\" * 40\nSTRUCT_H = struct.Struct(\">H\")\nSTRUCT_L = struct.Struct(\">L\")\nHPACK_DECODER = hpack.Decoder()\n# See: https://http2.github.io/http2-spec/#iana-frames\nFRAME_TYPES = {\n 0x0: \"DATA\",\n 0x1: \"HEADERS\",\n 0x2: \"PRIORITY\",\n 0x3: \"RST_STREAM\",\n 0x4: \"SETTINGS\",\n 0x5: \"PUSH_PROMISE\",\n 0x6: \"PING\",\n 0x7: \"GOAWAY\",\n 0x8: \"WINDOW_UPDATE\",\n 0x9: \"CONTINUATION\",\n}\n# The following bit flags are defined \"globally\" (i.e. across all types), but\n# some flags apply to only certain frame types (e.g. END_STREAM only applies\n# to DATA or HEADERS frames). This is why the mapping is keys first based on\n# the frame type.\nFLAG_ACK = 0x1\nFLAG_END_STREAM = 0x1\nFLAG_END_HEADERS = 0x4\nFLAG_PADDED = 0x8\nFLAG_PRIORITY = 0x20\nFLAGS_DEFINED = {\n # See: https://http2.github.io/http2-spec/#DATA\n \"DATA\": {FLAG_END_STREAM: \"END_STREAM\", FLAG_PADDED: \"PADDED\"},\n # See: https://http2.github.io/http2-spec/#HEADERS\n \"HEADERS\": {\n FLAG_END_STREAM: \"END_STREAM\",\n FLAG_END_HEADERS: \"END_HEADERS\",\n FLAG_PADDED: \"PADDED\",\n FLAG_PRIORITY: \"PRIORITY\",\n },\n # See: https://http2.github.io/http2-spec/#PRIORITY\n \"PRIORITY\": {},\n # See: https://http2.github.io/http2-spec/#RST_STREAM\n \"RST_STREAM\": {},\n # See: https://http2.github.io/http2-spec/#SETTINGS\n \"SETTINGS\": {FLAG_ACK: \"ACK\"},\n # See: https://http2.github.io/http2-spec/#PUSH_PROMISE\n \"PUSH_PROMISE\": {FLAG_END_HEADERS: \"END_HEADERS\", FLAG_PADDED: \"PADDED\"},\n # See: https://http2.github.io/http2-spec/#PING\n \"PING\": {FLAG_ACK: \"ACK\"},\n # See: https://http2.github.io/http2-spec/#GOAWAY\n \"GOAWAY\": {},\n # See: https://http2.github.io/http2-spec/#WINDOW_UPDATE\n \"WINDOW_UPDATE\": {},\n # See: https://http2.github.io/http2-spec/#CONTINUATION\n \"CONTINUATION\": {FLAG_END_HEADERS: \"END_HEADERS\"},\n}\n# NOTE: Using an ``object()`` sentinel for an identity check will not work\n# across threads. However, it's not expected that code using this module\n# will be forked.\nUNSET = object()\nFRAME_PAYLOAD_HANDLERS = {\n \"DATA\": UNSET,\n \"HEADERS\": UNSET,\n \"PRIORITY\": UNSET,\n \"RST_STREAM\": UNSET,\n \"SETTINGS\": UNSET,\n \"PUSH_PROMISE\": UNSET,\n \"PING\": UNSET,\n \"GOAWAY\": UNSET,\n \"WINDOW_UPDATE\": UNSET,\n \"CONTINUATION\": UNSET,\n}\nRESERVED_HIGHEST_BIT = 0x80000000\nSETTINGS = {\n # See: https://http2.github.io/http2-spec/#SettingValues\n 0x1: \"SETTINGS_HEADER_TABLE_SIZE\",\n 0x2: \"SETTINGS_ENABLE_PUSH\",\n 0x3: \"SETTINGS_MAX_CONCURRENT_STREAMS\",\n 0x4: \"SETTINGS_INITIAL_WINDOW_SIZE\",\n 0x5: \"SETTINGS_MAX_FRAME_SIZE\",\n 0x6: \"SETTINGS_MAX_HEADER_LIST_SIZE\",\n # See: https://tools.ietf.org/html/rfc8441\n 0x8: \"SETTINGS_ENABLE_CONNECT_PROTOCOL\",\n}\n\n\ndef simple_hexdump(bytes_, row_size=16):\n \"\"\"Convert a bytestring into hex characters.\n\n This is called \"simple\" because it doesn't print the index in the leftmost\n column or the printable characters in the rightmost column (as the CLI\n ``hexdump -C`` does).\n\n Args:\n bytes_ (bytes): The bytestring to convert.\n row_size (int): The number of bytes that should go in each row of\n output. If ``row_size`` is ``-1``, then all output will go in\n a single row.\n\n Returns:\n str: The hexdump of ``bytes_``.\n \"\"\"\n # NOTE: This utilizes the fact that iterating over a bytestring produces\n # the corresponding integers for each character.\n if row_size == -1:\n return \" \".join(f\"{c:02x}\" for c in bytes_)\n\n rows = []\n for i in range(0, len(bytes_), row_size):\n rows.append(\" \".join(f\"{c:02x}\" for c in bytes_[i : i + row_size]))\n return \"\\n\".join(rows)\n\n\ndef describe_flags(frame_type, flags):\n \"\"\"Convert a set of flags into a description.\n\n Args:\n frame_type (str): The type of the current frame.\n flags (int): The flags for the current frame.\n\n Returns:\n str: The \"pretty\" description of the flags.\n\n Raises:\n RuntimeError: If not all bit flags are accounted for.\n \"\"\"\n flag_map = FLAGS_DEFINED[frame_type]\n\n remaining = flags\n description_parts = []\n for flag_value in sorted(flag_map.keys()):\n if remaining & flag_value == flag_value:\n remaining -= flag_value\n description_parts.append(\n f\"{flag_map[flag_value]}:{hex(flag_value)}\"\n )\n\n if remaining != 0:\n raise RuntimeError(\"Some flags not accounted for\", frame_type, flags)\n\n if not description_parts:\n return \"UNSET\"\n\n return \" | \".join(description_parts)\n\n\ndef default_payload_handler(frame_payload, unused_flags):\n \"\"\"Default handler for an HTTP/2 frame payload.\n\n Acts as identity function.\n\n Args:\n frame_payload (bytes): The frame payload to be parsed.\n unused_flags (int): The flags for the frame payload.\n\n Returns:\n str: Either an empty string (if the frame payload is empty) or a\n pretty printed version of the payload along with a hexdump.\n \"\"\"\n if frame_payload == b\"\":\n return \"\"\n\n return \"\\n\".join(\n [\n \"Frame Payload =\",\n f\" {frame_payload}\",\n \"Hexdump (Frame Payload) =\",\n textwrap.indent(simple_hexdump(frame_payload), \" \"),\n ]\n )\n\n\ndef handle_headers_payload(frame_payload, flags):\n \"\"\"Handle a HEADERS HTTP/2 frame payload.\n\n .. HEADERS spec: https://http2.github.io/http2-spec/#HEADERS\n .. header compression and decompression: https://http2.github.io/http2-spec/#HeaderBlock\n\n See `HEADERS spec`_ and `header compression and decompression`_.\n\n Args:\n frame_payload (bytes): The frame payload to be parsed.\n flags (int): The flags for the frame payload.\n\n Returns:\n str: A list of the headers in the payload and the hexdump for\n ``frame_payload``.\n\n Raises:\n NotImplementedError: If ``flags`` has ``PADDED`` set.\n NotImplementedError: If ``flags`` has ``PRIORITY`` set.\n \"\"\"\n if flags & FLAG_PADDED == FLAG_PADDED:\n raise NotImplementedError(\n \"PADDED flag not currently supported for headers\"\n )\n if flags & FLAG_PRIORITY == FLAG_PRIORITY:\n raise NotImplementedError(\n \"PRIORITY flag not currently supported for headers\"\n )\n\n lines = [\"Headers =\"]\n headers = HPACK_DECODER.decode(frame_payload)\n lines.extend(f\" {key!r} -> {value!r}\" for key, value in headers)\n lines.append(\"Hexdump (Compressed Headers) =\")\n lines.append(textwrap.indent(simple_hexdump(frame_payload), \" \"))\n return \"\\n\".join(lines)\n\n\ndef handle_settings_payload(frame_payload, unused_flags):\n \"\"\"Handle a SETTINGS HTTP/2 frame payload.\n\n .. SETTINGS spec: https://http2.github.io/http2-spec/#SETTINGS\n\n See `SETTINGS spec`_.\n\n Args:\n frame_payload (bytes): The frame payload to be parsed.\n unused_flags (int): The flags for the frame payload.\n\n Returns:\n str: A list of all the settings in ``frame_payload``, as well as a\n hexdump for each 6-octet setting.\n\n Raises:\n ValueError: If the length of ``frame_payload`` is not a multiple of 6.\n \"\"\"\n num_settings, remainder = divmod(len(frame_payload), 6)\n if remainder != 0:\n raise ValueError(\n \"The length of the frame payload is not a multiple of 6.\",\n frame_payload,\n )\n\n if num_settings == 0:\n return \"\"\n\n lines = [\"Settings =\"]\n for setting in range(num_settings):\n start = 6 * setting\n\n setting_id, = STRUCT_H.unpack(frame_payload[start : start + 2])\n setting_id_str = SETTINGS.get(setting_id, \"UNKNOWN\")\n setting_id_hex = simple_hexdump(\n frame_payload[start : start + 2], row_size=-1\n )\n\n setting_value, = STRUCT_L.unpack(frame_payload[start + 2 : start + 6])\n setting_value_hex = simple_hexdump(\n frame_payload[start + 2 : start + 6], row_size=-1\n )\n\n lines.append(\n f\" {setting_id_str}:{hex(setting_id)} -> \"\n f\"{setting_value} ({setting_id_hex} | {setting_value_hex})\"\n )\n\n return \"\\n\".join(lines)\n\n\ndef handle_ping_payload(frame_payload, unused_flags):\n \"\"\"Handle a PING HTTP/2 frame payload.\n\n .. PING spec: https://http2.github.io/http2-spec/#PING\n\n See `PING spec`_.\n\n Args:\n frame_payload (bytes): The frame payload to be parsed.\n unused_flags (int): The flags for the frame payload.\n\n Returns:\n str: The opaque data in ``frame_payload`` as a hexdump.\n\n Raises:\n ValueError: If the length of ``frame_payload`` is not 8.\n \"\"\"\n if len(frame_payload) != 8:\n raise ValueError(\n \"The length of the frame payload is not 8.\", frame_payload\n )\n\n opaque_data = simple_hexdump(frame_payload, row_size=-1)\n return f\"Opaque Data = {opaque_data}\"\n\n\ndef handle_window_update_payload(frame_payload, unused_flags):\n \"\"\"Handle a WINDOW_UPDATE HTTP/2 frame payload.\n\n .. WINDOW_UPDATE spec: https://http2.github.io/http2-spec/#WINDOW_UPDATE\n\n See `WINDOW_UPDATE spec`_.\n\n Args:\n frame_payload (bytes): The frame payload to be parsed.\n unused_flags (int): The flags for the frame payload.\n\n Returns:\n str: Description of the reserved bit, window size increment and display\n of the hexdump for ``frame_payload``.\n\n Raises:\n ValueError: If the ``frame_payload`` does not have 4 bytes.\n \"\"\"\n if len(frame_payload) != 4:\n raise ValueError(\"\")\n\n window_size_increment, = STRUCT_L.unpack(frame_payload)\n reserved_bit = 0\n if window_size_increment & RESERVED_HIGHEST_BIT == RESERVED_HIGHEST_BIT:\n reserved_bit = 1\n window_size_increment -= RESERVED_HIGHEST_BIT\n\n return (\n f\"Reserved Bit = {reserved_bit}, \"\n f\"Window Size Increment = {window_size_increment} \"\n f\"({simple_hexdump(frame_payload, row_size=-1)})\"\n )\n\n\ndef next_h2_frame(h2_frames):\n \"\"\"Parse the next HTTP/2 frame from partially parsed TCP packet data.\n\n .. frame header spec: https://http2.github.io/http2-spec/#FrameHeader\n\n Args:\n h2_frames (bytes): The remaining unparsed HTTP/2 frames (as raw bytes)\n from TCP packet data.\n\n Returns:\n Tuple[List[str], bytes]: A pair of\n * The message parts for the parsed HTTP/2 frame.\n * The remaining bytes in ``h2_frames``; i.e. the frame that was just\n parsed will be removed.\n\n Raises:\n RuntimeError: If ``h2_frames`` contains fewer than 9 bytes. This is\n because all frames begin with a fixed 9-octet header followed by\n a variable-length payload. See `frame header spec`_.\n RuntimeError: If ``h2_frames`` contains fewer than ``9 + frame_length``\n bytes. The ``frame_length`` is determined by the first 3 bytes.\n \"\"\"\n if len(h2_frames) < 9:\n raise RuntimeError(\n \"Not large enough to contain an HTTP/2 frame\", h2_frames\n )\n\n # Frame length\n frame_length, = STRUCT_L.unpack(b\"\\x00\" + h2_frames[:3])\n frame_length_hex = simple_hexdump(h2_frames[:3], row_size=-1)\n parts = [f\"Frame Length = {frame_length} ({frame_length_hex})\"]\n # Frame Type\n frame_type = FRAME_TYPES[h2_frames[3]]\n frame_type_hex = simple_hexdump(h2_frames[3:4], row_size=-1)\n parts.append(f\"Frame Type = {frame_type} ({frame_type_hex})\")\n # Flags\n flags = h2_frames[4]\n flags_str = describe_flags(frame_type, flags)\n flags_hex = simple_hexdump(h2_frames[4:5], row_size=-1)\n parts.append(f\"Flags = {flags_str} ({flags_hex})\")\n # Stream Identifier\n stream_identifier, = STRUCT_L.unpack(h2_frames[5:9])\n stream_identifier_hex = simple_hexdump(h2_frames[5:9], row_size=-1)\n parts.append(\n f\"Stream Identifier = {stream_identifier} ({stream_identifier_hex})\"\n )\n # Frame Payload\n frame_payload = h2_frames[9 : 9 + frame_length]\n if len(frame_payload) != frame_length:\n raise RuntimeError(\n \" HTTP/2 frame not large enough to contain frame payload\",\n h2_frames,\n )\n frame_payload_part = handle_frame(frame_type, frame_payload, flags)\n if frame_payload_part != \"\":\n parts.append(frame_payload_part)\n\n return parts, h2_frames[9 + frame_length :]\n\n\ndef describe(h2_frames, connection_description, expect_preface, proxy_line):\n \"\"\"Describe an HTTP/2 frame.\n\n .. connection header spec: https://http2.github.io/http2-spec/#ConnectionHeader\n .. proxy protocol: https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-proxy-protocol.html\n\n Args:\n h2_frames (bytes): The raw bytes of TCP packet data containing HTTP/2\n frames.\n connection_description (str): A description of the RECV->SEND\n relationship for a socket pair.\n expect_preface (bool): Indicates if the ``h2_frames`` should begin\n with the client connection preface. This should only be\n :data:`True` on the data from the **first** TCP packet for the\n client socket. See `connection header spec`_.\n proxy_line (Optional[bytes]): An optional `proxy protocol`_ line parsed\n from the first frame.\n\n Returns:\n str: The description of ``h2_frames``, expected to be printed by the\n caller.\n\n Raises:\n RuntimeError: If ``expect_preface`` is :data:`True` but ``h2_frames``\n does not begin with the client connection preface.\n \"\"\"\n parts = [HEADER, connection_description, \"\"]\n if proxy_line is not None:\n parts.extend(\n [\n \"Proxy Protocol Header =\",\n f\" {proxy_line}\",\n \"Hexdump (Proxy Protocol Header) =\",\n textwrap.indent(simple_hexdump(proxy_line), \" \"),\n FOOTER,\n ]\n )\n\n if expect_preface:\n if not h2_frames.startswith(PREFACE):\n raise RuntimeError(MISSING_PREFACE, h2_frames)\n\n parts.extend([PREFACE_PRETTY, FOOTER])\n h2_frames = h2_frames[len(PREFACE) :]\n\n while h2_frames:\n frame_parts, h2_frames = next_h2_frame(h2_frames)\n parts.extend(frame_parts)\n parts.append(FOOTER)\n\n return \"\\n\".join(parts)\n\n\ndef register_payload_handler(frame_type, handler):\n \"\"\"Register a handler for frame payloads.\n\n .. note::\n\n This function updates a mapping, but is not threadsafe.\n\n This function should be called well before :func:`serve_proxy`.\n\n Args:\n frame_type (str): A frame type, e.g. ``DATA``.\n handler (Callable[[bytes, int], str]): A handler for a frame payload.\n The arguments are ``frame_payload`` and ``flags`` and the return\n value is a string.\n\n Raises:\n ValueError: If ``frame_type`` is an invalid value.\n KeyError: If ``frame_type`` already has a registered handler.\n \"\"\"\n existing = FRAME_PAYLOAD_HANDLERS.get(frame_type)\n if existing is None:\n raise ValueError(f\"Invalid frame type {frame_type}\")\n\n if existing is not UNSET:\n raise KeyError(f\"Frame type {frame_type} already has a handler\")\n\n FRAME_PAYLOAD_HANDLERS[frame_type] = handler\n\n\ndef handle_frame(frame_type, frame_payload, flags):\n \"\"\"Register a handler for frame payloads.\n\n Args:\n frame_type (str): A frame type, e.g. ``DATA``.\n frame_payload (bytes): The frame payload to be parsed.\n flags (int): The flags for the frame payload.\n\n Returns:\n str: The full description of the frame payload.\n\n Raises:\n ValueError: If ``frame_type`` is an invalid value.\n \"\"\"\n handler = FRAME_PAYLOAD_HANDLERS.get(frame_type)\n if handler is None:\n raise ValueError(f\"Invalid frame type {frame_type}\")\n\n if handler is UNSET:\n handler = default_payload_handler\n\n return handler(frame_payload, flags)\n\n\ndef register_setting(setting_id, setting_name):\n \"\"\"Add a custom setting to the registry.\n\n This allows callers to add custom settings (e.g.\n ``GRPC_ALLOW_TRUE_BINARY_METADATA``) based on systems build on top of\n HTTP/2.\n\n Args:\n setting_id (int): The setting to be added.\n setting_name (str): The name of the setting being added.\n\n Raises:\n KeyError: If ``setting_id`` is already registered.\n \"\"\"\n if setting_id in SETTINGS:\n raise KeyError(f\"Setting {setting_id} is already set\")\n\n SETTINGS[setting_id] = setting_name\n\n\n# Register the frame payload handlers.\nregister_payload_handler(\"HEADERS\", handle_headers_payload)\nregister_payload_handler(\"WINDOW_UPDATE\", handle_window_update_payload)\nregister_payload_handler(\"SETTINGS\", handle_settings_payload)\nregister_payload_handler(\"PING\", handle_ping_payload)\n", "id": "772473", "language": "Python", "matching_score": 3.736128091812134, "max_stars_count": 0, "path": "src/tcp_h2_describe/_describe.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport contextlib\nimport os\nimport struct\nimport sys\nimport textwrap\n\nimport google.protobuf.message\nimport grpc_reflection.v1alpha.reflection_pb2\nimport tcp_h2_describe\nimport tcp_h2_describe._describe\n\nimport users_pb2\n\n\nsimple_hexdump = tcp_h2_describe._describe.simple_hexdump\nFLAG_PADDED = tcp_h2_describe._describe.FLAG_PADDED\nSTRUCT_L = struct.Struct(\">L\")\nREFLECTION_REQUEST = (\n grpc_reflection.v1alpha.reflection_pb2.ServerReflectionRequest\n)\nREFLECTION_RESPONSE = (\n grpc_reflection.v1alpha.reflection_pb2.ServerReflectionResponse\n)\nPB_TYPES = (\n users_pb2.User,\n users_pb2.AddUserResponse,\n REFLECTION_REQUEST,\n REFLECTION_RESPONSE,\n)\n\n\ndef _redirect_stderr(destination, file_descriptor):\n sys.stderr.close()\n os.dup2(destination.fileno(), file_descriptor)\n sys.stderr = os.fdopen(file_descriptor, \"w\")\n\n\n@contextlib.contextmanager\ndef stderr_to_devnull():\n # H/T: https://stackoverflow.com/a/17954769/1068170\n file_descriptor = sys.stderr.fileno()\n\n with os.fdopen(os.dup(file_descriptor), \"w\") as old_stderr:\n with open(os.devnull, \"w\") as file_obj:\n _redirect_stderr(file_obj, file_descriptor)\n\n try:\n yield # allow code to be run with the redirected stdout\n finally:\n _redirect_stderr(old_stderr, file_descriptor)\n\n\ndef _maybe_parse(pb_bytes, pb_class):\n \"\"\"Attempt to parse a protobuf to a given message class.\n\n Args:\n pb_bytes (str): A raw protobuf serialized as a bytestring.\n pb_class (type): A protobuf message type.\n\n Returns:\n object: An instance of ``pb_class`` if parsing succeeded, otherwise\n :data:`None`.\n \"\"\"\n pb = pb_class()\n try:\n # NOTE: If ``ParseFromString()`` fails, the underlying Python binary\n # extension may print a message to STDERR, so we temporarily\n # send STDERR to ``/dev/null`` during the function call.\n with stderr_to_devnull():\n pb.ParseFromString(pb_bytes)\n except google.protobuf.message.DecodeError:\n return None\n\n pb.DiscardUnknownFields()\n if pb.SerializeToString() == pb_bytes:\n return pb\n\n return None\n\n\ndef _parse_pb_prune(matches):\n \"\"\"Prune the list of matches in for a given serialized protobuf.\n\n Args:\n matches (list): A list of matched protobufs.\n\n Returns:\n list: A (potentially pruned) subset of ``matches``.\n \"\"\"\n if len(matches) != 2:\n return matches\n\n pb1, pb2 = matches\n # Break a tie between reflection request/response by just \"guessing\" it is\n # a response.\n if isinstance(pb1, REFLECTION_REQUEST) and isinstance(\n pb2, REFLECTION_RESPONSE\n ):\n return [pb2]\n if isinstance(pb1, REFLECTION_RESPONSE) and isinstance(\n pb2, REFLECTION_REQUEST\n ):\n return [pb1]\n\n return matches\n\n\ndef parse_pb(pb_bytes):\n \"\"\"Parse a serialized protobuf and display with message name.\n\n Args:\n pb_bytes (str): A raw protobuf serialized as a bytestring.\n\n Returns:\n Tuple[str, str]: Pair of the full name of the matched message type and\n a string representation of the protobuf (with field names, etc.).\n\n Raises:\n ValueError: If ``pb_bytes`` could not be matched to a message type.\n \"\"\"\n matches = []\n for pb_class in PB_TYPES:\n pb = _maybe_parse(pb_bytes, pb_class)\n if pb is not None:\n matches.append(pb)\n\n matches = _parse_pb_prune(matches)\n if len(matches) != 1:\n raise ValueError(\n \"Serialized protobuf could not be matched to a message type\",\n pb_bytes,\n matches,\n )\n\n pb = matches[0]\n return pb.DESCRIPTOR.full_name, str(pb).rstrip()\n\n\ndef handle_data_payload(frame_payload, flags):\n \"\"\"Handle a DATA HTTP/2 frame payload.\n\n This assumes **every** DATA frame is a serialized protobuf sent over\n gRPC with a length prefix.\n\n .. DATA spec: https://http2.github.io/http2-spec/#DATA\n\n See `DATA spec`_.\n\n Args:\n frame_payload (bytes): The frame payload to be parsed.\n flags (int): The flags for the frame payload.\n\n Returns:\n str: The deserialized protobuf from ``frame_payload``.\n\n Raises:\n NotImplementedError: If ``flags`` has ``PADDED`` set.\n NotImplementedError: If the first byte is ``\\x01``.\n ValueError: If the first byte is not ``\\x00`` or ``\\x01``.\n ValueError: If the length of ``frame_payload`` does not match the\n length prefix.\n \"\"\"\n if flags & FLAG_PADDED == FLAG_PADDED:\n raise NotImplementedError(\n \"PADDED flag not currently supported for data\"\n )\n\n if frame_payload == b\"\":\n return \"\"\n\n # See: https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md\n is_compressed = frame_payload[:1]\n if is_compressed != b\"\\x00\":\n if is_compressed == b\"\\x01\":\n raise NotImplementedError(\n \"Protobuf over gRPC only supported without compression\",\n frame_payload,\n )\n\n raise ValueError(\n \"Unexpected compressed flag for gRPC\", is_compressed, frame_payload\n )\n\n # NOTE: This will fail if ``frame_payload`` has fewer than 5 bytes.\n length, = STRUCT_L.unpack(frame_payload[1:5])\n if len(frame_payload) != 5 + length:\n raise ValueError(\n \"Frame payload has unexpected length\", frame_payload, 5 + length\n )\n length_bytes = simple_hexdump(frame_payload[1:5])\n\n pb_bytes = frame_payload[5:]\n parts = [\n \"gRPC Compressed Flag = 0 (00)\",\n f\"Protobuf Length = {length} ({length_bytes})\",\n ]\n if length == 0:\n return \"\\n\".join(parts)\n\n pb_name, pb_str = parse_pb(pb_bytes)\n parts.extend(\n [\n f\"Protobuf Message ({pb_name}) =\",\n textwrap.indent(pb_str, \" \"),\n \"Hexdump (Protobuf Message) =\",\n textwrap.indent(simple_hexdump(pb_bytes), \" \"),\n ]\n )\n return \"\\n\".join(parts)\n\n\ndef main():\n tcp_h2_describe.register_payload_handler(\"DATA\", handle_data_payload)\n # See: https://github.com/grpc/proposal/blob/master/G1-true-binary-metadata.md\n tcp_h2_describe.register_setting(0xFE03, \"GRPC_ALLOW_TRUE_BINARY_METADATA\")\n proxy_port = 24909\n server_port = int(os.environ.get(\"GRPC_PORT\", 50051))\n tcp_h2_describe.serve_proxy(proxy_port, server_port)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "9126510", "language": "Python", "matching_score": 3.1664209365844727, "max_stars_count": 0, "path": "_bin/grpc_proxy.py" }, { "content": "# -*- coding: utf-8 -*-\n# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: users.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\nfrom google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='users.proto',\n package='users.v1',\n syntax='proto3',\n serialized_options=None,\n serialized_pb=_b('\\n\\x0busers.proto\\x12\\x08users.v1\\x1a\\x1bgoogle/protobuf/empty.proto\\\"9\\n\\x04User\\x12\\x12\\n\\nfirst_name\\x18\\x01 \\x01(\\t\\x12\\x11\\n\\tlast_name\\x18\\x02 \\x01(\\t\\x12\\n\\n\\x02id\\x18\\x03 \\x01(\\x04\\\"\\\"\\n\\x0f\\x41\\x64\\x64UserResponse\\x12\\x0f\\n\\x07user_id\\x18\\x01 \\x01(\\x04\\x32w\\n\\x05Users\\x12\\x36\\n\\x07\\x41\\x64\\x64User\\x12\\x0e.users.v1.User\\x1a\\x19.users.v1.AddUserResponse\\\"\\x00\\x12\\x36\\n\\x08GetUsers\\x12\\x16.google.protobuf.Empty\\x1a\\x0e.users.v1.User\\\"\\x00\\x30\\x01\\x62\\x06proto3')\n ,\n dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])\n\n\n\n\n_USER = _descriptor.Descriptor(\n name='User',\n full_name='users.v1.User',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='first_name', full_name='users.v1.User.first_name', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='last_name', full_name='users.v1.User.last_name', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='id', full_name='users.v1.User.id', index=2,\n number=3, type=4, cpp_type=4, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n serialized_options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=54,\n serialized_end=111,\n)\n\n\n_ADDUSERRESPONSE = _descriptor.Descriptor(\n name='AddUserResponse',\n full_name='users.v1.AddUserResponse',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='user_id', full_name='users.v1.AddUserResponse.user_id', index=0,\n number=1, type=4, cpp_type=4, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n serialized_options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=113,\n serialized_end=147,\n)\n\nDESCRIPTOR.message_types_by_name['User'] = _USER\nDESCRIPTOR.message_types_by_name['AddUserResponse'] = _ADDUSERRESPONSE\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nUser = _reflection.GeneratedProtocolMessageType('User', (_message.Message,), {\n 'DESCRIPTOR' : _USER,\n '__module__' : 'users_pb2'\n # @@protoc_insertion_point(class_scope:users.v1.User)\n })\n_sym_db.RegisterMessage(User)\n\nAddUserResponse = _reflection.GeneratedProtocolMessageType('AddUserResponse', (_message.Message,), {\n 'DESCRIPTOR' : _ADDUSERRESPONSE,\n '__module__' : 'users_pb2'\n # @@protoc_insertion_point(class_scope:users.v1.AddUserResponse)\n })\n_sym_db.RegisterMessage(AddUserResponse)\n\n\n\n_USERS = _descriptor.ServiceDescriptor(\n name='Users',\n full_name='users.v1.Users',\n file=DESCRIPTOR,\n index=0,\n serialized_options=None,\n serialized_start=149,\n serialized_end=268,\n methods=[\n _descriptor.MethodDescriptor(\n name='AddUser',\n full_name='users.v1.Users.AddUser',\n index=0,\n containing_service=None,\n input_type=_USER,\n output_type=_ADDUSERRESPONSE,\n serialized_options=None,\n ),\n _descriptor.MethodDescriptor(\n name='GetUsers',\n full_name='users.v1.Users.GetUsers',\n index=1,\n containing_service=None,\n input_type=google_dot_protobuf_dot_empty__pb2._EMPTY,\n output_type=_USER,\n serialized_options=None,\n ),\n])\n_sym_db.RegisterServiceDescriptor(_USERS)\n\nDESCRIPTOR.services_by_name['Users'] = _USERS\n\n# @@protoc_insertion_point(module_scope)\n", "id": "2463835", "language": "Python", "matching_score": 2.355186939239502, "max_stars_count": 0, "path": "_grpc/users_pb2.py" }, { "content": "# Copyright 2012 Google Inc. All Rights Reserved.\n\n\"\"\"Utility module for converting properties to ProtoRPC messages/fields.\n\nThe methods here are not specific to NDB or DB (the datastore APIs) and can\nbe used by utility methods in the datastore API specific code.\n\"\"\"\n\n__all__ = ['GeoPtMessage', 'MessageFieldsSchema', 'UserMessage',\n 'method', 'positional', 'query_method']\n\n\nimport datetime\nimport json\n\nfrom endpoints import protojson\nfrom protorpc import messages\nfrom protorpc import util as protorpc_util\n\nfrom google.appengine.api import users\n\n\nALLOWED_DECORATOR_NAME = frozenset(['method', 'query_method'])\nDATETIME_STRING_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'\nDATE_STRING_FORMAT = '%Y-%m-%d'\nTIME_STRING_FORMAT = '%H:%M:%S.%f'\n\npositional = protorpc_util.positional\n\n\ndef IsSubclass(candidate, parent_class):\n \"\"\"Calls issubclass without raising an exception.\n\n Args:\n candidate: A candidate to check if a subclass.\n parent_class: A class or tuple of classes representing a potential parent.\n\n Returns:\n A boolean indicating whether or not candidate is a subclass of parent_class.\n \"\"\"\n try:\n return issubclass(candidate, parent_class)\n except TypeError:\n return False\n\n\ndef IsSimpleField(property_type):\n \"\"\"Checks if a property type is a \"simple\" ProtoRPC field.\n\n We consider \"simple\" ProtoRPC fields to be ones which are not message/enum\n fields, since those depend on extra data when defined.\n\n Args:\n property_type: A ProtoRPC field.\n\n Returns:\n A boolean indicating whether or not the passed in property type is a\n simple field.\n \"\"\"\n if IsSubclass(property_type, messages.Field):\n return property_type not in (messages.EnumField, messages.MessageField)\n\n return False\n\n\ndef CheckValidPropertyType(property_type, raise_invalid=True):\n \"\"\"Checks if a property type is a valid class.\n\n Here \"valid\" means the property type is either a simple field, a ProtoRPC\n enum class which can be used to define an EnumField or a ProtoRPC message\n class that can be used to define a MessageField.\n\n Args:\n property_type: A ProtoRPC field, message class or enum class that\n describes the output of the alias property.\n raise_invalid: Boolean indicating whether or not an exception should be\n raised if the given property is not valid. Defaults to True.\n\n Returns:\n A boolean indicating whether or not the passed in property type is valid.\n NOTE: Only returns if raise_invalid is False.\n\n Raises:\n TypeError: If raise_invalid is True and the passed in property is not valid.\n \"\"\"\n is_valid = IsSimpleField(property_type)\n if not is_valid:\n is_valid = IsSubclass(property_type, (messages.Enum, messages.Message))\n\n if not is_valid and raise_invalid:\n error_msg = ('Property field must be either a subclass of a simple '\n 'ProtoRPC field, a ProtoRPC enum class or a ProtoRPC message '\n 'class. Received %r.' % (property_type,))\n raise TypeError(error_msg)\n\n return is_valid\n\n\ndef _DictToTuple(to_sort):\n \"\"\"Converts a dictionary into a tuple of keys sorted by values.\n\n Args:\n to_sort: A dictionary like object that has a callable items method.\n\n Returns:\n A tuple containing the dictionary keys, sorted by value.\n \"\"\"\n items = to_sort.items()\n items.sort(key=lambda pair: pair[1])\n return tuple(pair[0] for pair in items)\n\n\nclass MessageFieldsSchema(object):\n \"\"\"A custom dictionary which is hashable.\n\n Intended to be used so either dictionaries or lists can be used to define\n field index orderings of a ProtoRPC message classes. Since hashable, we can\n cache these ProtoRPC message class definitions using the fields schema\n as a key.\n\n These objects can be used as if they were dictionaries in many contexts and\n can be compared for equality by hash.\n \"\"\"\n\n def __init__(self, fields, name=None, collection_name=None, basename=''):\n \"\"\"Save list/tuple or convert dictionary a list based on value ordering.\n\n Attributes:\n name: A name for the fields schema.\n collection_name: A name for collections using the fields schema.\n _data: The underlying dictionary holding the data for the instance.\n\n Args:\n fields: A dictionary or ordered iterable which defines an index ordering\n for fields in a ProtoRPC message class\n name: A name for the fields schema, defaults to None. If None, uses the\n names in the fields in the order they appear. If the fields schema\n passed in is an instance of MessageFieldsSchema, this is ignored.\n collection_name: A name for collections containing the fields schema,\n defaults to None. If None, uses the name and appends the string\n 'Collection'.\n basename: A basename for the default fields schema name, defaults to the\n empty string. If the fields passed in is an instance of\n MessageFieldsSchema, this is ignored.\n\n Raises:\n TypeError: if the fields passed in are not a dictionary, tuple, list or\n existing MessageFieldsSchema instance.\n \"\"\"\n if isinstance(fields, MessageFieldsSchema):\n self._data = fields._data\n name = fields.name\n collection_name = fields.collection_name\n elif isinstance(fields, dict):\n self._data = _DictToTuple(fields)\n elif isinstance(fields, (list, tuple)):\n self._data = tuple(fields)\n else:\n error_msg = ('Can\\'t create MessageFieldsSchema from object of type %s. '\n 'Must be a dictionary or iterable.' % (fields.__class__,))\n raise TypeError(error_msg)\n\n self.name = name or self._DefaultName(basename=basename)\n self.collection_name = collection_name or (self.name + 'Collection')\n\n def _DefaultName(self, basename=''):\n \"\"\"The default name of the fields schema.\n\n Can potentially use a basename at the front, but otherwise uses the instance\n fields and joins all the values together using an underscore.\n\n Args:\n basename: An optional string, defaults to the empty string. If not empty,\n is used at the front of the default name.\n\n Returns:\n A string containing the default name of the fields schema.\n \"\"\"\n name_parts = []\n if basename:\n name_parts.append(basename)\n name_parts.extend(self._data)\n return '_'.join(name_parts)\n\n def __ne__(self, other):\n \"\"\"Not equals comparison that uses the definition of equality.\"\"\"\n return not self.__eq__(other)\n\n def __eq__(self, other):\n \"\"\"Comparison for equality that uses the hash of the object.\"\"\"\n if not isinstance(other, self.__class__):\n return False\n return self.__hash__() == other.__hash__()\n\n def __hash__(self):\n \"\"\"Unique and idempotent hash.\n\n Uses a the property list (_data) which is uniquely defined by its elements\n and their sort order, the name of the fields schema and the collection name\n of the fields schema.\n\n Returns:\n Integer hash value.\n \"\"\"\n return hash((self._data, self.name, self.collection_name))\n\n def __iter__(self):\n \"\"\"Iterator for loop expressions.\"\"\"\n return iter(self._data)\n\n\nclass GeoPtMessage(messages.Message):\n \"\"\"ProtoRPC container for GeoPt instances.\n\n Attributes:\n lat: Float; The latitude of the point.\n lon: Float; The longitude of the point.\n \"\"\"\n # TODO(dhermes): This behavior should be regulated more directly.\n # This is to make sure the schema name in the discovery\n # document is GeoPtMessage rather than\n # EndpointsProtoDatastoreGeoPtMessage.\n __module__ = ''\n\n lat = messages.FloatField(1, required=True)\n lon = messages.FloatField(2, required=True)\n\n\nclass UserMessage(messages.Message):\n \"\"\"ProtoRPC container for users.User objects.\n\n Attributes:\n email: String; The email of the user.\n auth_domain: String; The auth domain of the user.\n user_id: String; The user ID.\n federated_identity: String; The federated identity of the user.\n \"\"\"\n # TODO(dhermes): This behavior should be regulated more directly.\n # This is to make sure the schema name in the discovery\n # document is UserMessage rather than\n # EndpointsProtoDatastoreUserMessage.\n __module__ = ''\n\n email = messages.StringField(1, required=True)\n auth_domain = messages.StringField(2, required=True)\n user_id = messages.StringField(3)\n federated_identity = messages.StringField(4)\n\n\ndef UserMessageFromUser(user):\n \"\"\"Converts a native users.User object to a UserMessage.\n\n Args:\n user: An instance of users.User.\n\n Returns:\n A UserMessage with attributes set from the user.\n \"\"\"\n return UserMessage(email=user.email(),\n auth_domain=user.auth_domain(),\n user_id=user.user_id(),\n federated_identity=user.federated_identity())\n\n\ndef UserMessageToUser(message):\n \"\"\"Converts a UserMessage to a native users.User object.\n\n Args:\n message: The message to be converted.\n\n Returns:\n An instance of users.User with attributes set from the message.\n \"\"\"\n return users.User(email=message.email,\n _auth_domain=message.auth_domain,\n _user_id=message.user_id,\n federated_identity=message.federated_identity)\n\n\ndef DatetimeValueToString(value):\n \"\"\"Converts a datetime value to a string.\n\n Args:\n value: The value to be converted to a string.\n\n Returns:\n A string containing the serialized value of the datetime stamp.\n\n Raises:\n TypeError: if the value is not an instance of one of the three\n datetime types.\n \"\"\"\n if isinstance(value, datetime.time):\n return value.strftime(TIME_STRING_FORMAT)\n # Order is important, datetime.datetime is a subclass of datetime.date\n elif isinstance(value, datetime.datetime):\n return value.strftime(DATETIME_STRING_FORMAT)\n elif isinstance(value, datetime.date):\n return value.strftime(DATE_STRING_FORMAT)\n else:\n raise TypeError('Could not serialize timestamp: %s.' % (value,))\n\n\ndef DatetimeValueFromString(value):\n \"\"\"Converts a serialized datetime string to the native type.\n\n Args:\n value: The string value to be deserialized.\n\n Returns:\n A datetime.datetime/date/time object that was deserialized from the string.\n\n Raises:\n TypeError: if the value can not be deserialized to one of the three\n datetime types.\n \"\"\"\n try:\n return datetime.datetime.strptime(value, TIME_STRING_FORMAT).time()\n except ValueError:\n pass\n\n try:\n return datetime.datetime.strptime(value, DATE_STRING_FORMAT).date()\n except ValueError:\n pass\n\n try:\n return datetime.datetime.strptime(value, DATETIME_STRING_FORMAT)\n except ValueError:\n pass\n\n raise TypeError('Could not deserialize timestamp: %s.' % (value,))\n\n\ndef RaiseNotImplementedMethod(property_class, explanation=None):\n \"\"\"Wrapper method that returns a method which always fails.\n\n Args:\n property_class: A property class\n explanation: An optional argument explaining why the given property\n has not been implemented\n\n Returns:\n A method which will always raise NotImplementedError. If explanation is\n included, it will be raised as part of the exception, otherwise, a\n simple explanation will be provided that uses the name of the property\n class.\n \"\"\"\n if explanation is None:\n explanation = ('The property %s can\\'t be used to define an '\n 'EndpointsModel.' % (property_class.__name__,))\n\n def RaiseNotImplemented(unused_prop, unused_index):\n \"\"\"Dummy method that will always raise NotImplementedError.\n\n Raises:\n NotImplementedError: always\n \"\"\"\n raise NotImplementedError(explanation)\n return RaiseNotImplemented\n\n\ndef _GetEndpointsMethodDecorator(decorator_name, modelclass, **kwargs):\n \"\"\"Decorate a ProtoRPC method for use by the endpoints model passed in.\n\n Requires exactly two positional arguments and passes the rest of the keyword\n arguments to the classmethod method at the decorator name on the given class.\n\n Args:\n decorator_name: The name of the attribute on the model containing the\n function which will produce the decorator.\n modelclass: An Endpoints model class.\n\n Returns:\n A decorator that will use the endpoint metadata to decorate an endpoints\n method.\n \"\"\"\n if decorator_name not in ALLOWED_DECORATOR_NAME:\n raise TypeError('Decorator %s not allowed.' % (decorator_name,))\n\n # Import here to avoid circular imports\n from .ndb import model\n if IsSubclass(modelclass, model.EndpointsModel):\n return getattr(modelclass, decorator_name)(**kwargs)\n\n raise TypeError('Model class %s not a valid Endpoints model.' % (modelclass,))\n\n\n@positional(1)\ndef method(modelclass, **kwargs):\n \"\"\"Decorate a ProtoRPC method for use by the endpoints model passed in.\n\n Requires exactly one positional argument and passes the rest of the keyword\n arguments to the classmethod \"method\" on the given class.\n\n Args:\n modelclass: An Endpoints model class that can create a method.\n\n Returns:\n A decorator that will use the endpoint metadata to decorate an endpoints\n method.\n \"\"\"\n return _GetEndpointsMethodDecorator('method', modelclass, **kwargs)\n\n\n@positional(1)\ndef query_method(modelclass, **kwargs):\n \"\"\"Decorate a ProtoRPC method intended for queries\n\n For use by the endpoints model passed in. Requires exactly one positional\n argument and passes the rest of the keyword arguments to the classmethod\n \"query_method\" on the given class.\n\n Args:\n modelclass: An Endpoints model class that can create a query method.\n\n Returns:\n A decorator that will use the endpoint metadata to decorate an endpoints\n query method.\n \"\"\"\n return _GetEndpointsMethodDecorator('query_method', modelclass, **kwargs)\n\n\nclass _EPDProtoJson(protojson.EndpointsProtoJson):\n \"\"\"Slightly modifed version of EndpointsProtoJson.\n\n The key difference is that when parsing a message, the dictionary keys\n are stored on the newly created message.\n \"\"\"\n\n def _ProtoJson__decode_dictionary(self, message_type, msg_dictionary):\n \"\"\"Merge dictionary into message.\n\n This implementation is virtually identical to protorpc.protojson.ProtoJson\n except the keys of the parsed dictionary are stored in\n _Message__decoded_fields on the parsed message. Note that the field name\n must begin with _Message since the protorpc.message.Message metaclass\n rejects almost all other names.\n\n Args:\n message_type: Message type to merge dictionary into.\n msg_dictionary: Dictionary to extract information from. Dictionary\n is as parsed from JSON. Nested objects will also be dictionaries.\n\n Returns:\n Decoded instance of message_type.\n \"\"\"\n result = super(_EPDProtoJson, self)._ProtoJson__decode_dictionary(\n message_type, msg_dictionary)\n result._Message__decoded_fields = msg_dictionary.keys()\n return result\n", "id": "4636937", "language": "Python", "matching_score": 3.0438907146453857, "max_stars_count": 91, "path": "endpoints_proto_datastore/utils.py" }, { "content": "import ndb\n\nfrom endpoints.apiserving import _ApiServer\n\n\n__all__ = [ndb]\n\nfrom utils import *\n__all__ += utils.__all__\n\n# Monkey patch the ProtoJson instance to be used\n# for parsing requests.\n_ApiServer._ApiServer__PROTOJSON = utils._EPDProtoJson()\n", "id": "672514", "language": "Python", "matching_score": 0.27635854482650757, "max_stars_count": 91, "path": "endpoints_proto_datastore/__init__.py" }, { "content": "\"\"\"Forward one socket to another.\n\nThe server socket is a UDS (Unix Domain Socket) speaking UDP and the client\nsocket is a INET socket speaking UDP.\n\nH/T: https://stackoverflow.com/a/23731713/1068170\n\"\"\"\n\nimport os\nimport pathlib\nimport select\nimport signal\nimport socket\n\nimport uds_server\n\n\nDATADOG_HOSTNAME = os.environ.get(\"DATADOG_HOSTNAME\", \"localhost\")\nDATADOG_PORT = int(os.environ.get(\"DATADOG_PORT\", 8125))\nDATADOG_ADDRESS = os.environ.get(\"DATADOG_ADDRESS\")\n\n\ndef _try_remove_uds():\n # Try to remove the UDS.\n path = pathlib.Path(DATADOG_ADDRESS)\n try:\n path.unlink()\n except FileNotFoundError:\n pass\n\n\ndef _signal_handler(signal_number, unused_frame):\n signal_pretty = signal_number\n if signal_number == signal.SIGTERM:\n signal_pretty = \"SIGTERM\"\n elif signal_number == signal.SIGINT:\n signal_pretty = \"SIGINT\"\n\n print(f\"Handling shutdown signal ({signal_pretty}) for UDS->UDP forwarder\")\n _try_remove_uds()\n\n\ndef _recv_all(reader, writer):\n data = b\"\"\n chunk = reader.read(4096)\n while len(chunk) == 4096:\n data += chunk\n chunk = reader.read(4096)\n # Add the last chunk\n data += chunk\n\n writer.write(data)\n\n\ndef read_loop(server_socket, client_socket):\n server_reader = server_socket.makefile(mode=\"rb\", buffering=False)\n server_writer = server_socket.makefile(mode=\"wb\", buffering=False)\n client_reader = client_socket.makefile(mode=\"rb\", buffering=False)\n client_writer = client_socket.makefile(mode=\"wb\", buffering=False)\n\n rlist = (client_reader, server_reader)\n wlist = ()\n xlist = ()\n while True:\n ready_rlist, _, _ = select.select(rlist, wlist, xlist)\n for ready_socket in ready_rlist:\n if ready_socket == server_reader:\n _recv_all(server_reader, client_writer)\n elif ready_socket == client_reader:\n _recv_all(client_reader, server_writer)\n\n\ndef main():\n # Register signal handlers.\n signal.signal(signal.SIGTERM, _signal_handler)\n signal.signal(signal.SIGINT, _signal_handler)\n\n uds_server.clear_path(DATADOG_ADDRESS)\n server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n server_socket.bind(DATADOG_ADDRESS)\n\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n client_socket.connect((DATADOG_HOSTNAME, DATADOG_PORT))\n\n try:\n read_loop(server_socket, client_socket)\n except ConnectionRefusedError:\n print(\"Server is no longer running\")\n finally:\n _try_remove_uds()\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "7455041", "language": "Python", "matching_score": 2.700404167175293, "max_stars_count": 1, "path": "src/python/forward_to.py" }, { "content": "import os\nimport pathlib\nimport socketserver\n\nimport counter\nimport parse_datagram\nimport pretty_json\nimport udp_server\n\n\nCOUNTER = counter.Counter()\nUDS_PATH = os.environ.get(\"UDS_PATH\", \"/var/run/datadog/dsd.socket\")\n\n\nclass Handler(udp_server.Handler):\n\n PREFIX_SUFFIX = \"UDS \"\n\n\ndef clear_path(uds_path):\n path = pathlib.Path(uds_path)\n try:\n path.unlink()\n except FileNotFoundError:\n if path.exists():\n raise\n\n path.parent.mkdir(parents=True, exist_ok=True)\n\n\ndef main():\n clear_path(UDS_PATH)\n print(f\"Starting UDS metrics server at {UDS_PATH!r}\")\n with socketserver.UnixDatagramServer(UDS_PATH, Handler) as server:\n server.serve_forever()\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "687800", "language": "Python", "matching_score": 2.6668310165405273, "max_stars_count": 1, "path": "src/python/uds_server.py" }, { "content": "import os\nimport socketserver\n\nimport counter\nimport parse_datagram\nimport pretty_json\n\n\nCOUNTER = counter.Counter()\nHOST = \"0.0.0.0\"\nMETRICS_PORT = int(os.environ.get(\"METRICS_PORT\", 8125))\n\n\nclass Handler(socketserver.BaseRequestHandler):\n\n PREFIX_SUFFIX = METRICS_PORT\n\n def handle(self):\n count = COUNTER.increment()\n # There is no difference between request count / metric count as there\n # is for the trace server.\n prefix = f\"{count:03d}-{count:03d}-{self.PREFIX_SUFFIX} | \"\n\n data = self.request[0].strip()\n data_parsed = parse_datagram.parse_metric(data)\n try:\n colorful_json = pretty_json.printable_json(data_parsed, prefix)\n\n print(f\"{prefix}metric =\")\n print(colorful_json, end=\"\")\n except:\n print(f\"{prefix}metric raw data = {data_parsed!r}\")\n\n\ndef main():\n print(f\"Starting UDP metrics server on {HOST}:{METRICS_PORT}\")\n with socketserver.UDPServer((HOST, METRICS_PORT), Handler) as server:\n server.serve_forever()\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "10712716", "language": "Python", "matching_score": 2.2312839031219482, "max_stars_count": 1, "path": "src/python/udp_server.py" }, { "content": "import json\n\nimport flask\nimport msgpack\n\nimport counter\nimport pretty_json\n\n\nREQUEST_COUNTER = counter.Counter()\nTRACE_COUNTER = counter.Counter()\nAPP = flask.Flask(__name__)\nMETHODS = (\"GET\", \"POST\", \"PUT\", \"PATCH\", \"DELETE\")\nCONTENT_TYPE_MSGPACK = \"application/msgpack\"\nCONTENT_TYPE_JSON = \"application/json\"\n\n\ndef _flatten_traces(data_parsed):\n # Flatten from the \"outside\"\n if len(data_parsed) == 1:\n return data_parsed[0]\n\n # Flatten from the \"inside\"\n if all(len(value) == 1 for value in data_parsed):\n return [value[0] for value in data_parsed]\n\n # Do nothing\n return data_parsed\n\n\n@APP.route(\"/\", defaults={\"path\": \"\"}, methods=METHODS)\n@APP.route(\"/<path:path>\", methods=METHODS)\ndef catch_all(path):\n request_count = REQUEST_COUNTER.increment()\n\n content_type = flask.request.content_type\n data = flask.request.get_data()\n try:\n if content_type == CONTENT_TYPE_MSGPACK:\n data_parsed = msgpack.loads(data)\n elif content_type == CONTENT_TYPE_JSON:\n data_parsed = json.loads(data)\n else:\n raise ValueError(\"Unexpected content type\", content_type)\n\n traces = _flatten_traces(data_parsed)\n for trace in traces:\n trace_count = TRACE_COUNTER.increment()\n prefix = f\"{trace_count:03d}-{request_count:03d}-8126 | \"\n colorful_json = pretty_json.printable_json(trace, prefix)\n print(f\"{prefix}trace =\")\n print(colorful_json, end=\"\")\n except:\n trace_count = TRACE_COUNTER.increment()\n prefix = f\"{trace_count:03d}-{request_count:03d}-8126 | \"\n print(f\"{prefix}trace raw data = {data!r}\")\n\n return flask.jsonify({})\n\n\nif __name__ == \"__main__\":\n APP.run()\n", "id": "5488005", "language": "Python", "matching_score": 0.8415676951408386, "max_stars_count": 1, "path": "src/python/app.py" }, { "content": "import json\nimport textwrap\n\nimport pygments\nimport pygments.formatters\nimport pygments.lexers\n\n\ndef printable_json(value, prefix):\n for_terminal = pygments.highlight(\n json.dumps(value, indent=2),\n pygments.lexers.JsonLexer(),\n pygments.formatters.TerminalTrueColorFormatter(style=\"solarized-dark\"),\n )\n return textwrap.indent(for_terminal, prefix)\n", "id": "5186944", "language": "Python", "matching_score": 0.07988142222166061, "max_stars_count": 1, "path": "src/python/pretty_json.py" }, { "content": "import _ast\nimport ast\nimport os\nimport subprocess\n\n\nACCEPTED_NODE_TYPES = (_ast.Import, _ast.ImportFrom, _ast.FunctionDef)\nGENERATED_FILENAME = 'butterfly.py'\nDESIRED_FILENAME = 'butterfly_algorithm.py'\nUNUSED_MODULES = ('make_partition_plots', 'IPython', 'matplotlib',\n 'time', 'os', 'sympy')\n\n\ndef generate_file():\n # NOTE: We could probably do this with `import IPython` ... etc.\n # but the overhead is not likely worth it.\n subprocess.check_output(['ipython', 'nbconvert', '--to',\n 'python', 'butterfly.ipynb'])\n\n\ndef get_tree():\n with open(GENERATED_FILENAME, 'rU') as fh:\n generated_butterfly = fh.read()\n\n file_lines = generated_butterfly.split('\\n')\n tree = ast.parse(generated_butterfly)\n return tree, file_lines\n\n\ndef get_captured_lines(tree):\n captured_imports = []\n captured_functions = []\n for i, node in enumerate(tree.body):\n if isinstance(node, ACCEPTED_NODE_TYPES):\n # tree.body should only be top level.\n if node.col_offset != 0:\n raise ValueError('Node is not top-level', node)\n # NOTE: This may fail if `node` is the last entry.\n next_node = tree.body[i + 1]\n\n section = (node.lineno - 1, next_node.lineno - 1)\n if isinstance(node, _ast.FunctionDef):\n # The `get_time` function requires globals from the\n # notebook which we don't have and we don't need to use\n # the `custom_update` function.\n if node.name not in ('get_time', 'custom_update'):\n captured_functions.append(section)\n elif isinstance(node, _ast.ImportFrom):\n if node.module not in UNUSED_MODULES:\n captured_imports.append(section)\n else:\n if (len(node.names) == 1 and\n node.names[0].name not in UNUSED_MODULES):\n captured_imports.append(section)\n\n return captured_imports, captured_functions\n\n\ndef write_import(lines, fh):\n for line in lines:\n # Don't include top level comments.\n if line.startswith('#'):\n continue\n # Don't write blank lines.\n if not line.strip():\n continue\n\n # If the line is accepted write to the file.\n fh.write(line + '\\n')\n\n\ndef write_function(lines, fh):\n true_last_line = len(lines) - 1\n for last_line in xrange(len(lines) - 1, -1, -1):\n if lines[last_line].startswith('#'):\n continue\n elif not lines[last_line].strip():\n continue\n else:\n true_last_line = last_line\n break\n\n for line in lines[:true_last_line + 1]:\n fh.write(line + '\\n')\n\n\ndef rewrite_file():\n tree, file_lines = get_tree()\n captured_imports, captured_functions = get_captured_lines(tree)\n\n with open(DESIRED_FILENAME, 'w') as fh:\n for begin, end in captured_imports:\n write_import(file_lines[begin:end], fh)\n\n for begin, end in captured_functions:\n # Two newlines between every function\n fh.write('\\n\\n')\n write_function(file_lines[begin:end], fh)\n\n\ndef main():\n generate_file()\n print 'Writing functions to: %r' % (DESIRED_FILENAME,)\n rewrite_file()\n print 'Removing: %r' % (GENERATED_FILENAME,)\n os.remove(GENERATED_FILENAME)\n\n\nif __name__ == '__main__':\n main()\n", "id": "4294182", "language": "Python", "matching_score": 0.8511645197868347, "max_stars_count": 0, "path": "only_functions_from_ast.py" }, { "content": "# Temporary script to allow checking notebooks run without errors, or\n# to approximately lint check notebooks.\n#\n# Note: lint checking will not yet work on windows unless sed is\n# present or we replace sed with python equivalent.\n#\n# Run all notebooks & render to html:\n# python examples/test_notebooks.py\n#\n# Approximately lint check all notebooks:\n# python examples/test_notebooks.py lint\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport glob\nimport pprint\n\n############################################################\n# Set nbdir, run_skip, and run_allow_error for your project.\n# You may need to increase run_cell_timeout if you have\n# notebook cells that take a long time to execute.\n\nnbdir = \"examples\"\n\nrun_skip = []\n\nrun_allow_error = []\n\nrun_cell_timeout = 360\n\n############################################################\n\n\nnotebooks = sorted([x.replace(os.path.sep,\"/\") for x in glob.glob(nbdir+\"/*.ipynb\")])\n\nchecked = []\nerrored = []\nrun_skipped = []\n\nif len(sys.argv) == 1:\n do_what = \"run\"\nelif sys.argv[1] == \"lint\":\n do_what = \"lint\"\nelse:\n raise\n\nif do_what==\"run\":\n for nb in notebooks:\n cmd = \"jupyter nbconvert %s --execute --ExecutePreprocessor.kernel_name=python%s --ExecutePreprocessor.timeout=%s --to html\"%(nb,sys.version_info[0],run_cell_timeout)\n if nb in run_skip:\n run_skipped.append(nb)\n continue\n \n if nb in run_allow_error:\n cmd += \" --allow-errors\"\n print(cmd)\n r = os.system(cmd)\n checked.append(nb)\n if r!=0:\n errored.append(nb)\n\nelif sys.argv[1]=='lint':\n for nb in notebooks:\n cmd = \"\"\"sed -e 's/%/#%/' {f} > {f}~ && jupyter nbconvert {f}~ --to python --PythonExporter.file_extension=.py~ && flake8 --ignore=E,W {p}\"\"\".format(f=nb,p=nb[0:-5]+'py~')\n print(cmd)\n r = os.system(cmd)\n checked.append(nb)\n if r!=0:\n errored.append(nb)\nelse:\n raise\n\nprint(\"%s checked\"%len(checked))\nif len(checked)>0: pprint.pprint(checked)\nprint()\nprint(\"%s error(s)\"%len(errored))\nif len(errored)>0: pprint.pprint(errored)\nprint()\n\nif do_what == 'run':\n print(\"%s skipped\"%len(run_skipped))\n if len(run_skipped)>0: pprint.pprint(run_skipped)\n print()\n if len(run_allow_error) > 0:\n print(\"Note: the following notebooks were not checked for run errors:\")\n pprint.pprint(run_allow_error)\n\nsys.exit(len(errored))\n", "id": "8561997", "language": "Python", "matching_score": 1.057066559791565, "max_stars_count": 0, "path": "etc/test_notebooks.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup file for ci-diff-helper.\"\"\"\n\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nVERSION = '0.2.0'\nPACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(PACKAGE_ROOT, 'README.rst.template')) as file_obj:\n TEMPLATE = file_obj.read()\n\nTAG_QUERY = 'tag={}'.format(VERSION)\nREADME = TEMPLATE.format(\n pypi='',\n pypi_img='',\n versions='',\n versions_img='',\n travis_info=TAG_QUERY,\n appveyor_info=TAG_QUERY,\n coveralls_branch=VERSION,\n rtd_version=VERSION,\n)\n\nREQUIREMENTS = (\n 'enum34',\n 'requests',\n 'six >= 1.9.0',\n)\nDESCRIPTION = 'Diff Helper for Continuous Integration (CI) Services'\n\n\nsetup(\n name='ci-diff-helper',\n version=VERSION,\n description=DESCRIPTION,\n author='<NAME>',\n author_email='<EMAIL>',\n long_description=README,\n scripts=(),\n url='https://github.com/dhermes/ci-diff-helper',\n packages=find_packages(),\n license='Apache 2.0',\n platforms='Posix; MacOS X; Windows',\n include_package_data=True,\n zip_safe=True,\n install_requires=REQUIREMENTS,\n classifiers=(\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Internet',\n ),\n)\n", "id": "10470638", "language": "Python", "matching_score": 3.9648239612579346, "max_stars_count": 5, "path": "setup.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Check that the current README.rst is built from the template.\"\"\"\n\n\nfrom __future__ import print_function\n\nimport os\n\n\n_SCRIPTS_DIR = os.path.dirname(__file__)\n_ROOT_DIR = os.path.abspath(os.path.join(_SCRIPTS_DIR, '..'))\nTEMPLATE_FILE = os.path.join(_ROOT_DIR, 'README.rst.template')\nACTUAL_FILE = os.path.join(_ROOT_DIR, 'README.rst')\n\nPYPI_IMG = \"\"\"\\\n.. |pypi| image:: https://img.shields.io/pypi/v/ci-diff-helper.svg\n :target: https://pypi.python.org/pypi/ci-diff-helper\n\"\"\"\nVERSIONS_IMG = \"\"\"\\\n.. |versions| image:: https://img.shields.io/pypi/pyversions/ci-diff-helper.svg\n :target: https://pypi.python.org/pypi/ci-diff-helper\n\"\"\"\n\n\ndef main():\n \"\"\"Populate the template and compare values.\n\n Raises:\n ValueError: If the current README doesn't agree with the expected\n value computed from the template.\n \"\"\"\n with open(TEMPLATE_FILE, 'r') as file_obj:\n template = file_obj.read()\n\n expected = template.format(\n pypi='|pypi| ',\n pypi_img=PYPI_IMG,\n versions='|versions| ',\n versions_img=VERSIONS_IMG,\n travis_info='branch=master',\n appveyor_info='branch=master',\n coveralls_branch='master',\n rtd_version='latest',\n )\n\n with open(ACTUAL_FILE, 'r') as file_obj:\n contents = file_obj.read()\n\n if contents != expected:\n raise ValueError('README.rst is not up to date with template',\n 'Expected', expected,\n 'Actual', contents)\n else:\n print('REAMDE contents are as expected.')\n\n\nif __name__ == '__main__':\n main()\n", "id": "7970017", "language": "Python", "matching_score": 0.7655032277107239, "max_stars_count": 5, "path": "scripts/check_readme.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Shared utilities and settings for plotting.\"\"\"\n\n\nimport fractions\nimport math\nimport os\n\nimport seaborn\n\n\n# As of ``0.9.0``, this palette has (BLUE, ORANGE, GREEN, RED, PURPLE, BROWN).\n_COLORS = seaborn.color_palette(palette=\"deep\", n_colors=6)\nBLUE = _COLORS[0]\nGREEN = _COLORS[2]\nRED = _COLORS[3]\nPURPLE = _COLORS[4]\ndel _COLORS\nTEXT_SIZE = 10 # NOTE: Thesis text uses 12 point.\nTICK_SIZE = 7\n\n\ndef set_styles():\n \"\"\"Set the styles used for plotting.\"\"\"\n seaborn.set(style=\"white\")\n\n\ndef get_path(*parts):\n \"\"\"Get a file path in the ``images/`` directory.\n\n This assumes the script is currently in the ``src/``\n directory.\n \"\"\"\n curr_dir = os.path.abspath(os.path.dirname(__file__))\n root_dir = os.path.dirname(curr_dir)\n images_dir = os.path.join(root_dir, \"images\")\n return os.path.join(images_dir, *parts)\n\n\ndef binomial(n, k):\n numerator = math.factorial(n)\n denominator = math.factorial(k) * math.factorial(n - k)\n result = fractions.Fraction(numerator, denominator)\n if float(result) != result:\n raise ValueError(\"Cannot be represented exactly\")\n return float(result)\n\n\ndef next_float(value, greater=True):\n \"\"\"Gets the next (or previous) floating point value.\"\"\"\n frac, exponent = math.frexp(value)\n if greater:\n if frac == -0.5:\n ulp = 0.5 ** 54\n else:\n ulp = 0.5 ** 53\n else:\n if frac == 0.5:\n ulp = -0.5 ** 54\n else:\n ulp = -0.5 ** 53\n\n return (frac + ulp) * 2.0 ** exponent\n\n\ndef to_float(v):\n \"\"\"Converts an MPF (``mpmath`` float) to a ``float``.\"\"\"\n f = float(v)\n if f == v:\n return f\n if f < v:\n low = f\n high = next_float(f, greater=True)\n else:\n low = next_float(f, greater=False)\n high = f\n\n d_low = v - low\n d_high = high - v\n if d_low < d_high:\n return low\n else:\n return high\n", "id": "875926", "language": "Python", "matching_score": 5.640937805175781, "max_stars_count": 2, "path": "src/plot_utils.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Shared utilities and settings for plotting.\"\"\"\n\n\nimport os\n\nimport seaborn\n\n\n# As of ``0.9.0``, this palette has (BLUE, ORANGE, GREEN, RED, PURPLE, BROWN).\n_COLORS = seaborn.color_palette(palette=\"deep\", n_colors=6)\nBLUE = _COLORS[0]\nGREEN = _COLORS[2]\nPURPLE = _COLORS[4]\ndel _COLORS\nTEXT_SIZE = 10\nTICK_SIZE = 7\n\n\ndef set_styles():\n \"\"\"Set the styles used for plotting.\"\"\"\n seaborn.set(style=\"white\")\n\n\ndef get_path(*parts):\n \"\"\"Get a file path in the ``images/`` directory.\n\n This assumes the script is currently in the ``scripts/``\n directory.\n \"\"\"\n curr_dir = os.path.abspath(os.path.dirname(__file__))\n root_dir = os.path.dirname(curr_dir)\n images_dir = os.path.join(root_dir, \"images\")\n return os.path.join(images_dir, *parts)\n", "id": "11329234", "language": "Python", "matching_score": 0.8659815788269043, "max_stars_count": 2, "path": "scripts/plot_utils.py" }, { "content": "import os\nimport subprocess\nimport sys\n\nfrom example import fast\n\n\n_MAC_OS_X = \"darwin\"\n_FORTRAN_LIBRARY_PREFIX = \"libraries: =\"\nPACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))\n\n\nfoo = fast.foo\nmake_udf = fast.make_udf\nfoo_array = fast.foo_array\nudf_ptr = fast.udf_ptr\njust_print = fast.just_print\nview_knob = fast.view_knob\nturn_knob = fast.turn_knob\n\n\ndef get_include():\n return os.path.join(PACKAGE_ROOT, \"include\")\n\n\ndef get_lib():\n return os.path.join(PACKAGE_ROOT, \"lib\")\n\n\ndef _add_gfortran(libraries, library_dirs):\n \"\"\"Add ``gfortran`` library and library directories.\n\n This is a \"temporary\" hack that is problematic because\n\n * It **assumes** ``gfortran`` is the Fortran compiler, even\n though others would be perfectly fine to use\n * On OS X (at least when installing with Homebrew), ``gcc``\n cannot find ``libgfortran`` on the default path\n\n Unfortunately, this is needed for ``libexample`` because the\n ``just_print()`` subroutine uses some of the standard library,\n e.g. ``_gfortran_st_write``.\n \"\"\"\n libraries.append(\"gfortran\")\n\n # NOTE: This is essentially the same as ``fortran_search_path``\n # in ``setup.py``.\n if sys.platform != _MAC_OS_X:\n return\n\n cmd = (\"gfortran\", \"-print-search-dirs\")\n cmd_output = subprocess.check_output(cmd).decode(\"utf-8\")\n\n search_lines = cmd_output.strip().split(\"\\n\")\n library_lines = [\n line[len(_FORTRAN_LIBRARY_PREFIX) :]\n for line in search_lines\n if line.startswith(_FORTRAN_LIBRARY_PREFIX)\n ]\n if len(library_lines) != 1:\n # NOTE: This means we will fail to update the paths.\n return\n\n library_line = library_lines[0]\n accepted = set()\n for part in library_line.split(\":\"):\n full_path = os.path.abspath(part)\n\n if not os.path.exists(full_path):\n continue\n\n if os.path.isdir(full_path):\n accepted.add(full_path)\n\n library_dirs.extend(accepted)\n\n\ndef get_extension_keywords(\n include_dirs=None, libraries=None, library_dirs=None\n):\n \"\"\"Get keyword arguments for a ``setuptools.Extension``.\n\n This way, an extension can be created that depends on the shared library\n which this library uses. This allows building an extension directly and\n calling the C interface or using ``cimport example.example_fortran`` from\n Cython.\n\n Each of the arguments are optional. If provided, they will be\n appended to.\n\n This way, if an extension depends on **other** libraries, the set up can\n be done before calling this function and the lists, e.g. ``libraries``\n can passed in here (and updated).\n\n Args:\n include_dirs (Optional[List[str]]): List of directories to search for\n C/C++ header files (in Unix form for portability).\n libraries (Optional[List[str]]): List of library names (not filenames\n or paths) to link against.\n library_dirs (Optional[List[str]]): List of directories to search for\n shared libraries at link time.\n\n Returns:\n Dict[str, List[str]]: Mapping of the keyword arguments. This will\n always contain ``include_dirs``, ``libraries`` and ``library_dirs``.\n \"\"\"\n if include_dirs is None:\n include_dirs = []\n if libraries is None:\n libraries = []\n if library_dirs is None:\n library_dirs = []\n\n example_include = get_include()\n example_lib = get_lib()\n\n include_dirs.append(example_include)\n libraries.append(\"example\")\n library_dirs.append(example_lib)\n _add_gfortran(libraries, library_dirs)\n\n return {\n \"include_dirs\": include_dirs,\n \"libraries\": libraries,\n \"library_dirs\": library_dirs,\n }\n", "id": "4641261", "language": "Python", "matching_score": 2.2700250148773193, "max_stars_count": 14, "path": "cython/package/example/__init__.py" }, { "content": "\"\"\"Create a universal library for ``libgfortran`` and its dependencies.\n\nIntended to be used on OS X only. It is needed because the Homebrew\ninstalled ``libgfortran`` is ``x86_64`` only, but it also distributes\nan ``i386`` version of the necessary dynamic libraries.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport subprocess\nimport sys\n\n\nCURR_DIR = os.path.abspath(os.path.dirname(__file__))\nFRANKENSTEIN = os.path.join(CURR_DIR, \"frankenstein\")\nLIBGFORTRAN = \"libgfortran.dylib\"\nFORTRAN_LIBRARY_PREFIX = \"libraries: =\"\nLIBRARY_DIRS_ERR = \"Fortran search default library path not found.\"\n\n\ndef get_library_dirs():\n \"\"\"Get library directories in ``gfortran`` search path.\n\n Uses the information from ``gfortran -print-search-dirs``.\n\n If the command line output is not of the expected format, prints\n an error message and exits the program with a status code of 1.\n\n Returns:\n List[str]: Directories in the ``gfortran`` search path.\n \"\"\"\n cmd = (\"gfortran\", \"-print-search-dirs\")\n cmd_output = subprocess.check_output(cmd).decode(\"utf-8\")\n\n search_lines = cmd_output.strip().split(\"\\n\")\n library_lines = [\n line[len(FORTRAN_LIBRARY_PREFIX) :]\n for line in search_lines\n if line.startswith(FORTRAN_LIBRARY_PREFIX)\n ]\n if len(library_lines) != 1:\n print(LIBRARY_DIRS_ERR, file=sys.stderr)\n sys.exit(1)\n\n library_line = library_lines[0]\n directories = []\n for part in library_line.split(\":\"):\n full_path = os.path.abspath(part)\n\n if not os.path.exists(full_path):\n continue\n\n if os.path.isdir(full_path):\n directories.append(full_path)\n else:\n msg = \"Path {} is not a directory.\".format(full_path)\n print(msg, file=sys.stderr)\n\n if directories:\n print(\"``gfortran`` library directories:\")\n for directory in directories:\n print(\"\\t{}\".format(directory))\n else:\n print(\"No ``gfortran`` library directories found.\", file=sys.stderr)\n sys.exit(1)\n\n return directories\n\n\ndef find_libgfortran():\n \"\"\"Get the directory and name of ``libgfortran``.\n\n Assumes and checks that this ``libgfortran`` is **only** for\n ``x86_64``.\n\n Exits the program with a status code of 1 if:\n\n * there is more than one (or zero) directories that contain\n ``libgfortran.dylib``.\n * the ``libgfortran.dylib`` found is **not** ``x86_64``.\n\n Returns:\n Tuple[str, str]: The directory that contains ``libgfortran`` and the\n full name (with version) of ``libgfortran``.\n \"\"\"\n library_dirs = get_library_dirs()\n matches = []\n for library_dir in library_dirs:\n path = os.path.join(library_dir, LIBGFORTRAN)\n versioned_path = os.path.realpath(path)\n if os.path.exists(versioned_path):\n matches.append(versioned_path)\n\n if len(matches) != 1:\n msg = \"Expected exactly one match: {}\".format(\", \".join(matches))\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n dylib = matches[0]\n architectures = get_architectures(dylib)\n if architectures != [\"x86_64\"]:\n msg = \"Expected {} to be x86_64 only, not {}.\".format(\n dylib, \", \".join(architectures)\n )\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n x86_64_dir, libgfortran = os.path.split(dylib)\n print(\"Found x86_64 ``libgfortran``:\")\n print(\"\\t{}\".format(dylib))\n return x86_64_dir, libgfortran\n\n\ndef get_i386_dir(x86_64_dir, libgfortran):\n \"\"\"Gets directory containing dynamic libraries targeting ``i386``.\n\n Exits the program with a status code of 1 if:\n\n * The expected location of the ``i386`` verison of ``libgfortran``\n does not exist\n * t\n\n Args:\n x86_64_dir (str): Directory containing ``x86_64`` dynamic libraries.\n libgfortran (str): The name (not path) of the ``libgfortran`` dynamic\n library (should include version).\n\n Returns:\n str: The directory containing ``i386`` binaries.\n \"\"\"\n i386_dir = os.path.join(x86_64_dir, \"i386\")\n dylib = os.path.join(i386_dir, libgfortran)\n if not os.path.exists(dylib):\n template = \"Expected location of i386 libgfortran does not exist: {}\"\n print(template.format(dylib), file=sys.stderr)\n sys.exit(1)\n\n architectures = get_architectures(dylib)\n if architectures != [\"i386\"]:\n msg = \"Expected {} to be i386 only, not {}.\".format(\n dylib, \", \".join(architectures)\n )\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n print(\"Found directory with ``i386`` dynamic libraries:\")\n print(\"\\t{}\".format(i386_dir))\n\n return i386_dir\n\n\ndef get_otool_path(otool_line):\n \"\"\"Parse path from a line from ``otool -L`` output.\n\n This **assumes** the format, but does not check it.\n\n Args:\n otool_line (str): A dependency (or ``install_name``) from ``otool -L``\n output. Expected to be of the form '\\t{PATH} (compatibility ...)'.\n\n Returns:\n str: The ``PATH`` in the ``otool_line``.\n \"\"\"\n parts = otool_line.split()\n return parts[0]\n\n\ndef get_dependencies(dylib, check_exists=True):\n \"\"\"Get dependencies for a dynamic library.\n\n For example:\n\n .. code-block:: python\n\n >>> dylib = '/usr/local/Cellar/gcc/7.2.0/lib/gcc/7/libgfortran.4.dylib'\n >>> get_dependencies(dylib)\n ['/usr/local/Cellar/gcc/7.2.0/lib/gcc/7/libquadmath.0.dylib',\n '/usr/lib/libSystem.B.dylib',\n '/usr/local/lib/gcc/7/libgcc_s.1.dylib']\n\n Args:\n dylib (str): The path to a dynamic library.\n check_exists (Optional[bool]): Indicates if the existence of a\n dependency should be checked.\n\n Returns:\n List[str]: The dependencies of ``dylib``.\n\n Raises:\n ValueError: If the first line of the output is not ``{dylib}:``.\n ValueError: If the ``install_name`` (i.e. the second line) does not\n have the same name (not necessarily same path) as ``dylib``.\n For example\n ``/usr/local/Cellar/gcc/7.2.0/lib/gcc/7/libgfortran.4.dylib``\n has an install name of\n ``/usr/local/opt/gcc/lib/gcc/7/libgfortran.4.dylib``.\n ValueError: If one of the dependencies (from any line other than the\n first two) is not actual a file on the current machine. (Will\n only be raised if ``check_exists=True``.)\n \"\"\"\n cmd = (\"otool\", \"-L\", dylib)\n cmd_output = subprocess.check_output(cmd).decode(\"utf-8\")\n\n lines = cmd_output.strip().split(\"\\n\")\n if lines[0] != dylib + \":\":\n raise ValueError(\"Unexpected first line\", lines[0])\n\n install_name = get_otool_path(lines[1])\n if os.path.basename(install_name) != os.path.basename(dylib):\n raise ValueError(\"Unexpected install_name\", install_name, dylib)\n\n dependencies = []\n for line in lines[2:]:\n dependency = get_otool_path(line)\n if check_exists and not os.path.exists(dependency):\n raise ValueError(\"Dependency does not exist\", dependency)\n dependencies.append(dependency)\n\n return dependencies\n\n\ndef get_architectures(dylib):\n \"\"\"Determine the architectures that a dynamic library supports.\n\n Uses ``lipo -info`` to determine the architectures. Expects outputs to\n resemble one of the following:\n\n .. code-block:: console\n\n $ lipo -info /usr/local/Cellar/gcc/7.2.0/lib/gcc/7/libgfortran.dylib\n Non-fat file: .../libgfortran.dylib is architecture: x86_64\n $ lipo -info /usr/local/lib/gcc/7/libgcc_s.1.dylib\n Architectures in the fat file: .../libgcc_s.1.dylib are: x86_64 i386\n\n (Path information has been replaced by ``...`` for display purposes.)\n\n Putting this to use:\n\n .. code-block:: python\n\n >>> libgfortran = (\n ... '/usr/local/Cellar/gcc/7.2.0/lib/gcc/7/libgfortran.4.dylib')\n >>> get_architectures(libgfortran)\n ['x86_64']\n >>> libgcc_s = '/usr/local/lib/gcc/7/libgcc_s.1.dylib'\n >>> get_architectures(libgcc_s)\n ['x86_64', 'i386']\n\n Args:\n dylib (str): The path to a dynamic library.\n\n Returns:\n List[str]: The architecture(s) supported by ``dylib``.\n\n Raises:\n ValueError: If the ``lipo -info {dylib}`` output does not conform\n to one of the two expected formats.\n \"\"\"\n cmd = (\"lipo\", \"-info\", dylib)\n cmd_output = subprocess.check_output(cmd).decode(\"utf-8\").strip()\n\n prefix = \"Architectures in the fat file: {} are: \".format(dylib)\n\n if cmd_output.startswith(prefix):\n architectures = cmd_output[len(prefix) :].split()\n return architectures\n else:\n prefix = \"Non-fat file: {} is architecture: \".format(dylib)\n if not cmd_output.startswith(prefix):\n raise ValueError(\"Unexpected output\", cmd_output)\n return [cmd_output[len(prefix) :]]\n\n\ndef is_universal(dylib):\n \"\"\"Checks if a dynamic library is a \"universal\" binary.\n\n Uses ``get_architectures`` and checks if both ``i386`` and ``x86_64``\n are supported architectures.\n\n Args:\n dylib (str): The path to a dynamic library.\n\n Returns:\n bool: Flag indicating if ``dylib`` is a universal binary.\n \"\"\"\n architectures = get_architectures(dylib)\n return \"i386\" in architectures and \"x86_64\" in architectures\n\n\ndef non_universal_libraries(dylib):\n \"\"\"Get all dependencies (recursively) that are not universal binaries.\n\n For example:\n\n .. code-block:: python\n\n >>> libgfortran = (\n ... '/usr/local/Cellar/gcc/7.2.0/lib/gcc/7/libgfortran.4.dylib')\n >>> non_universal_libraries(libgfortran)\n {'/usr/local/Cellar/gcc/7.2.0/lib/gcc/7/libgfortran.4.dylib',\n '/usr/local/Cellar/gcc/7.2.0/lib/gcc/7/libquadmath.0.dylib'}\n\n Args:\n dylib (str): Path to a dynamic library.\n\n Returns:\n Set[str]: All non-universal libraries needed by ``dylib``\n (possibly including itself).\n \"\"\"\n result = set()\n if is_universal(dylib):\n return result\n else:\n result.add(dylib)\n\n for dependency in get_dependencies(dylib):\n if is_universal(dependency):\n continue\n\n result.add(dependency)\n result.update(non_universal_libraries(dependency))\n\n return result\n\n\ndef verify_libraries(libgfortran_path, libraries):\n \"\"\"Verifies the non-universal dependencies of ``libgfortran``.\n\n Checks these against our assumption that there are **only** two:\n ``libgfortran`` and ``libquadmath``.\n\n Exits the program with a status code of 1 if:\n\n * ``libraries`` does not have two members or ``libgfortran_path`` is\n not one of them.\n * the ``libquadmath`` found is **not** ``x86_64``.\n * the ``libquadmath`` found is not in the same directory as\n ``libgfortran``.\n\n Args:\n libgfortran_path (str): The full path to the ``libgfortran`` dynamic\n library.\n libraries (Set[str]): The non-universal dynamic libraries required\n by ``libgfortran``. Determined by :func:`non_universal_libraries`.\n\n Returns:\n str: The name (not path) of the ``libquadmath`` dynamic library.\n \"\"\"\n print(\"Non-universal libraries found:\")\n for library in libraries:\n print(\"\\t{}\".format(library))\n\n if len(libraries) != 2 or libgfortran_path not in libraries:\n msg = \"Expected ``libgfortran`` and ``libquadmath``: {}\".format(\n \", \".join(sorted(libraries))\n )\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n libraries.remove(libgfortran_path)\n libquadmath_path = libraries.pop()\n\n architectures = get_architectures(libquadmath_path)\n if architectures != [\"x86_64\"]:\n msg = \"Expected {} to be x86_64 only, not {}.\".format(\n libquadmath_path, \", \".join(architectures)\n )\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n library_dir, libquadmath = os.path.split(libquadmath_path)\n if library_dir != os.path.dirname(libgfortran_path):\n msg = \"Expected {} and {} in same directory.\".format(\n libgfortran_path, libquadmath\n )\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n return libquadmath\n\n\ndef make_root_dir():\n \"\"\"Creates \"frankenstein\" directory for a universal ``libgfortran``.\n\n If the directory already exists, then exits the program with a status\n code of 1.\n \"\"\"\n if os.path.exists(FRANKENSTEIN):\n msg = \"The directory {} already exists.\".format(FRANKENSTEIN)\n print(msg, file=sys.stderr)\n sys.exit(1)\n else:\n os.mkdir(FRANKENSTEIN)\n\n\ndef copyfile(source, destination):\n \"\"\"Copy a file and print a message.\n\n Args:\n source (str): The file being copied.\n destination (str): The place to copy the file.\n \"\"\"\n shutil.copyfile(source, destination)\n msg = \"Copied:\\n\\t {}\\n\\t->{}\".format(source, destination)\n print(msg)\n\n\ndef copy_arch(arch, library_dir, libgfortran, libquadmath):\n \"\"\"Copy libraries specific to a given architecture.\n\n Args:\n arch (str): The architecture being copied.\n library_dir (str): The directory containing the dynamic libraries.\n libgfortran (str): The name (not path) of the ``libgfortran``\n dynamic library.\n libquadmath (str): The name (not path) of the ``libquadmath``\n dynamic library.\n\n Returns:\n Tuple[str, str, str, str]: Four-tuple of\n\n * The path to the ``arch``-specific location of the newly\n created ``libgfortran``\n * The path to the location of the universal ``libgfortran``\n (not yet created, but reference here as the ``install_name``)\n * The path to the ``arch``-specific location of the newly\n created ``libquadmath``\n * The path to the location of the universal ``libquadmath``\n (not yet created, but reference here as the ``install_name``)\n \"\"\"\n sub_dir = os.path.join(FRANKENSTEIN, arch)\n os.mkdir(sub_dir)\n\n # Determine the old/new filenames.\n old_libgfortran = os.path.join(library_dir, libgfortran)\n arch_libgfortran = os.path.join(sub_dir, libgfortran)\n universal_libgfortran = os.path.join(FRANKENSTEIN, libgfortran)\n\n old_libquadmath = os.path.join(library_dir, libquadmath)\n arch_libquadmath = os.path.join(sub_dir, libquadmath)\n universal_libquadmath = os.path.join(FRANKENSTEIN, libquadmath)\n\n # Update ``libgfortran``\n copyfile(old_libgfortran, arch_libgfortran)\n os.chmod(arch_libgfortran, 0o644)\n subprocess.check_call(\n (\"install_name_tool\", \"-id\", universal_libgfortran, arch_libgfortran)\n )\n subprocess.check_call(\n (\n \"install_name_tool\",\n \"-change\",\n old_libquadmath,\n universal_libquadmath,\n arch_libgfortran,\n )\n )\n os.chmod(arch_libgfortran, 0o444)\n\n print(\"{}:\".format(arch_libgfortran))\n print(\"\\t``install_name``:\")\n print(\"\\t\\t{}\".format(universal_libgfortran))\n print(\"\\tDependencies:\")\n dependencies = get_dependencies(arch_libgfortran, check_exists=False)\n for dependency in dependencies:\n print(\"\\t\\t{}\".format(dependency))\n\n # Update ``libquadmath``\n copyfile(old_libquadmath, arch_libquadmath)\n os.chmod(arch_libquadmath, 0o644)\n subprocess.check_call(\n (\"install_name_tool\", \"-id\", universal_libquadmath, arch_libquadmath)\n )\n os.chmod(arch_libquadmath, 0o444)\n\n print(\"{}:\".format(arch_libquadmath))\n print(\"\\t``install_name``:\")\n print(\"\\t\\t{}\".format(universal_libquadmath))\n print(\"\\tDependencies:\")\n dependencies = get_dependencies(arch_libquadmath, check_exists=False)\n for dependency in dependencies:\n print(\"\\t\\t{}\".format(dependency))\n\n return (\n arch_libgfortran,\n universal_libgfortran,\n arch_libquadmath,\n universal_libquadmath,\n )\n\n\ndef combine_dylibs(i386_dylib, x86_64_dylib, universal_dylib):\n \"\"\"Combine two dynamic libraries into one universal dynamic library.\n\n Args:\n i386_dylib (str): The full path to the copy of the dynamic library\n that targets the ``i386`` architecture.\n x86_64_dylib (str): The full path to the copy of the dynamic library\n that targets the ``x86_64`` architecture.\n universal_dylib (str): The full path of the universal dynamic library\n that they should be combined into.\n \"\"\"\n subprocess.check_call(\n (\n \"lipo\",\n i386_dylib,\n x86_64_dylib,\n \"-create\",\n \"-output\",\n universal_dylib,\n )\n )\n print(\"Created universal dynamic library:\")\n print(\"\\t{}\".format(universal_dylib))\n curr_dir = os.getcwd()\n\n # Make a symlink **without** the library version.\n\n # NOTE: This assumes that os.path.dirname(universal_dylib) == FRANKENSTEIN.\n filename = os.path.basename(universal_dylib)\n name, _, extension = filename.split(\".\")\n unversioned = \"{}.{}\".format(name, extension)\n\n os.chdir(FRANKENSTEIN)\n os.symlink(filename, unversioned)\n os.chdir(curr_dir)\n\n print(\"Created symbolic link:\")\n print(\"\\t{}@ -> {}\".format(unversioned, filename))\n\n\ndef main():\n make_root_dir()\n\n x86_64_dir, libgfortran = find_libgfortran()\n i386_dir = get_i386_dir(x86_64_dir, libgfortran)\n\n full_path = os.path.join(x86_64_dir, libgfortran)\n libraries = non_universal_libraries(full_path)\n libquadmath = verify_libraries(full_path, libraries)\n\n i386_names = copy_arch(\"i386\", i386_dir, libgfortran, libquadmath)\n x86_64_names = copy_arch(\"x86_64\", x86_64_dir, libgfortran, libquadmath)\n\n (\n i386_libgfortran,\n universal_libgfortran,\n i386_libquadmath,\n universal_libquadmath,\n ) = i386_names\n x86_64_libgfortran, _, x86_64_libquadmath, _ = x86_64_names\n\n combine_dylibs(i386_libgfortran, x86_64_libgfortran, universal_libgfortran)\n combine_dylibs(i386_libquadmath, x86_64_libquadmath, universal_libquadmath)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "7198840", "language": "Python", "matching_score": 3.429257392883301, "max_stars_count": 14, "path": "cython/make_universal_libgfortran.py" }, { "content": "from __future__ import print_function\n\nimport distutils.ccompiler\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nimport numpy.distutils.ccompiler\nimport numpy.distutils.core\nimport numpy.distutils.fcompiler\nimport numpy as np\nimport setuptools\nfrom setuptools.command import build_ext\n\n\nVERSION = \"0.0.1\"\nLOCAL_INCLUDE = os.path.join(\"example\", \"include\")\nLOCAL_LIB = os.path.join(\"example\", \"lib\")\n# NOTE: We prefer the relative path below over the absolute path\n# source_file = os.path.abspath(os.path.join(\n# os.path.dirname(__file__),\n# 'example.f90',\n# ))\n# because ``compile()`` will create the entire subdirectory\n# path matching it.\nSOURCE_FILE = os.path.join(\"example\", \"example.f90\")\nFORTRAN_LIBRARY_PREFIX = \"libraries: =\"\nERR_MSG = \"Fortran search default library path not found.\"\nBAD_PATH = \"Path {} is not a directory.\"\nMAC_OS_X = \"darwin\"\nMAC_OS_LINKER_ERR = \"Unexpected `linker_so` on OS X: {}.\"\nJOURNAL_TEMPLATE = \"journal-{}-{}.{}.txt\"\nJOURNAL_ENV = \"EXAMPLE_JOURNAL\"\n\n\ndef fortran_executable(f90_compiler):\n version_cmd = f90_compiler.version_cmd\n if len(version_cmd) != 2 or version_cmd[1] != \"-dumpversion\":\n raise ValueError(\"Unexpected Fortran version command\", version_cmd)\n\n return version_cmd[0]\n\n\ndef fortran_search_path(f90_compiler):\n cmd = (fortran_executable(f90_compiler), \"-print-search-dirs\")\n cmd_output = subprocess.check_output(cmd).decode(\"utf-8\")\n\n search_lines = cmd_output.strip().split(\"\\n\")\n library_lines = [\n line[len(FORTRAN_LIBRARY_PREFIX) :]\n for line in search_lines\n if line.startswith(FORTRAN_LIBRARY_PREFIX)\n ]\n if len(library_lines) != 1:\n print(ERR_MSG, file=sys.stderr)\n sys.exit(1)\n\n library_line = library_lines[0]\n accepted = set(f90_compiler.library_dirs)\n for part in library_line.split(\":\"):\n full_path = os.path.abspath(part)\n\n if not os.path.exists(full_path):\n continue\n\n if not os.path.isdir(full_path):\n msg = BAD_PATH.format(full_path)\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n accepted.add(full_path)\n\n return sorted(accepted)\n\n\ndef patch_library_dirs(f90_compiler):\n \"\"\"Patch up ``f90_compiler.library_dirs``.\n\n This is needed on Mac OS X for a Homebrew installed ``gfortran``, which\n doesn't come with the correct library directories by default (this is\n likely unintentional).\n\n The ``library_dirs`` directory can be over-ridden by using the\n ``GFORTRAN_LIB`` environment variable. This might be desirable, since the\n Homebrew ``libgfortran`` is **also** not a universal binary. So this\n over-ride could be used to point to a custom made ``libgfortran.dylib``\n that is a combined version of the ``i386`` and ``x86_64`` versions of\n ``libgfortran`` provided by Homebrew.\n\n Args:\n f90_compiler (numpy.distutils.fcompiler.FCompiler): A Fortran compiler\n instance.\n \"\"\"\n from numpy.distutils.fcompiler import gnu\n\n # Only Mac OS X.\n if sys.platform != MAC_OS_X:\n return\n # Only ``gfortran``.\n if not isinstance(f90_compiler, gnu.Gnu95FCompiler):\n return\n\n gfortran_lib = os.environ.get(\"GFORTRAN_LIB\")\n library_dirs = f90_compiler.library_dirs\n\n # Update in place.\n if gfortran_lib is None:\n library_dirs[:] = fortran_search_path(f90_compiler)\n else:\n library_dirs[:] = [gfortran_lib]\n\n\ndef check_dual_architecture():\n \"\"\"Checks if the current Python binary is dual architecture.\n\n Only relevant on OS X. This uses ``lipo -info`` to check that the\n executable is a \"fat file\" with both ``i386`` and ``x86_64``\n architectures.\n\n We use ``lipo -info`` rather than ``file`` because ``lipo`` is\n purpose-built for checking the architecture(s) in a file.\n\n This property could also be checked by looking for the presence of\n multiple architectures in\n ``distutils.sysconfig.get_config_var('LDFLAGS')``.\n\n Returns:\n bool: Indicating if the Python binary is dual architecture\n (:data:`True`) or single architecture (:data:`False`).\n \"\"\"\n if sys.platform != MAC_OS_X:\n return False\n\n cmd = (\"lipo\", \"-info\", sys.executable)\n cmd_output = subprocess.check_output(cmd).decode(\"utf-8\").strip()\n\n prefix = \"Architectures in the fat file: {} are: \".format(sys.executable)\n\n if cmd_output.startswith(prefix):\n architectures = cmd_output[len(prefix) :].split()\n return \"i386\" in architectures and \"x86_64\" in architectures\n else:\n return False\n\n\ndef gfortran_supports_dual_architecture():\n \"\"\"Simple check if ``gfortran`` supports dual architecture.\n\n Only relevant on OS X. By default, the Homebrew ``gfortran`` **does not**\n support building dual architecture object files. This checks support\n for this feature by trying to build a very simple Fortran 90 program.\n \"\"\"\n if sys.platform != MAC_OS_X:\n return False\n\n temporary_directory = tempfile.mkdtemp(suffix=\"-fortran\")\n source_name = os.path.join(temporary_directory, \"bar.f90\")\n with open(source_name, \"w\") as file_obj:\n file_obj.writelines(\n [\n \"subroutine bar(x, y)\\n\",\n \" integer, intent(in) :: x\\n\",\n \" integer, intent(out) :: y\\n\",\n \"\\n\",\n \" y = x + 2\\n\",\n \"\\n\",\n \"end subroutine bar\\n\",\n \"\\n\",\n ]\n )\n\n object_name = os.path.join(temporary_directory, \"bar.o\")\n cmd = (\n \"gfortran\",\n \"-arch\",\n \"i386\",\n \"-arch\",\n \"x86_64\",\n \"-c\",\n source_name,\n \"-o\",\n object_name,\n )\n\n cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n result = b\"arch flags ignored\" not in cmd_output\n\n shutil.rmtree(temporary_directory)\n\n return result\n\n\nclass _DualArchitectureCompile(object):\n \"\"\"Callable wrapper that over-rides ``_compile``.\n\n Only relevant on OS X. Objects of this type are intended to be used\n to replace / augment to ``_compile`` method on a ``Gnu95FCompiler`` (i.e.\n a Fortran compiler). This is because the Homebrew ``gfortran`` can't\n build fat binaries:\n\n .. code-block:: console\n\n $ gfortran -arch i386 -arch x86_64 -c bar.f90 -o bar.o\n gfortran: warning: x86_64 conflicts with i386 (arch flags ignored)\n\n So instead, this will compile two separate object files and combine them:\n\n .. code-block:: console\n\n $ gfortran -arch i386 -c bar.f90 -o ${I386_DIR}/bar.o\n $ gfortran -arch x86_64 -c bar.f90 -o ${X86_64_DIR}/bar.o\n $ lipo ${I386_DIR}/bar.o ${X86_64_DIR}/bar.o -create -output bar.o\n\n Args:\n f90_compiler (numpy.distutils.fcompiler.gnu.Gnu95FCompiler): A Fortran\n compiler instance.\n \"\"\"\n\n def __init__(self, f90_compiler):\n self.f90_compiler = f90_compiler\n self.original_compile = f90_compiler._compile\n self.compiler_cmd = f90_compiler.compiler_f90\n self.arch_index = None # Set in ``_verify()``.\n self.arch_value = None # Set in ``_verify()``.\n self._verify()\n\n def _verify(self):\n \"\"\"Makes sure the constructor arguments are valid.\n\n In particular, makes sure that ``f90_compiler`` corresponds to\n ``gfortran`` and that ``compiler_cmd`` has exactly one instance\n of ``-arch``.\n\n If this succeeds, will set ``arch_index`` and ``arch_value`` on\n the instance.\n\n Raises:\n TypeError: If ``compiler_cmd`` is not a ``list``.\n TypeError: If ``f90_compiler`` is not a ``Gnu95FCompiler``.\n ValueError: If ``compiler_cmd`` doesn't have exactly one ``-arch``\n segment.\n ValueError: If ``-arch`` is the **last** segment in\n ``compiler_cmd``.\n ValueError: If the ``-arch`` value is not ``i386`` or ``x86_64``.\n \"\"\"\n from numpy.distutils.fcompiler import gnu\n\n if not isinstance(self.compiler_cmd, list):\n raise TypeError(\"Expected a list\", self.compiler_cmd)\n\n if not isinstance(self.f90_compiler, gnu.Gnu95FCompiler):\n raise TypeError(\"Expected a Gnu95FCompiler\", self.f90_compiler)\n\n if self.compiler_cmd.count(\"-arch\") != 1:\n raise ValueError(\n 'Did not find exactly one \"-arch\" in', self.compiler_cmd\n )\n\n arch_index = self.compiler_cmd.index(\"-arch\") + 1\n if arch_index == len(self.compiler_cmd):\n raise ValueError(\n \"There is no architecture specified in\", self.compiler_cmd\n )\n\n arch_value = self.compiler_cmd[arch_index]\n if arch_value not in (\"i386\", \"x86_64\"):\n raise ValueError(\n \"Unexpected architecture\", arch_value, \"in\", self.compiler_cmd\n )\n\n self.arch_index = arch_index\n self.arch_value = arch_value\n\n def _set_architecture(self, architecture):\n \"\"\"Set the architecture on the Fortran compiler.\n\n ``compiler_cmd`` is actually a list (mutable), so we can update it here\n and it will change the architecture that ``f90_compiler`` targets.\n\n Args:\n architecture (str): One of ``i386`` or ``x86_64``.\n \"\"\"\n self.compiler_cmd[self.arch_index] = architecture\n\n def _restore_architecture(self):\n \"\"\"Restore the architecture on the Fortran compiler.\n\n Resets the ``-arch`` value in ``compiler_cmd`` to its original value.\n \"\"\"\n self.compiler_cmd[self.arch_index] = self.arch_value\n\n def __call__(self, obj, src, ext, cc_args, extra_postargs, pp_opts):\n \"\"\"Call-able replacement for ``_compile``.\n\n This assumes (but does not verify) that ``original_compile`` has\n no return value.\n\n Args:\n obj (str): The location of the object file to be created.\n src (str): The location of the source file to be compiled.\n ext (str): The file extension (used to determine flags).\n cc_args (List[str]): Compile args, typically just ``['-c']``.\n extra_postargs (List[str]): Extra arguments at the end of the\n compile command.\n pp_opts (List[str]): Unused by the NumPy ``distutils`` Fortran\n compilers. List of pre-processor options.\n \"\"\"\n obj_name = os.path.basename(obj)\n\n # Create a directory and compile an object targeting i386.\n i386_dir = tempfile.mkdtemp(suffix=\"-i386\")\n i386_obj = os.path.join(i386_dir, obj_name)\n self._set_architecture(\"i386\")\n self.original_compile(\n i386_obj, src, ext, cc_args, extra_postargs, pp_opts\n )\n\n # Create a directory and compile an object targeting x86_64.\n x86_64_dir = tempfile.mkdtemp(suffix=\"-x86_64\")\n x86_64_obj = os.path.join(x86_64_dir, obj_name)\n self._set_architecture(\"x86_64\")\n self.original_compile(\n x86_64_obj, src, ext, cc_args, extra_postargs, pp_opts\n )\n\n # Restore the compiler back to how it was before we modified it.\n self._restore_architecture()\n\n # Use ``lipo`` to combine the object files into a universal.\n lipo_cmd = (\"lipo\", i386_obj, x86_64_obj, \"-create\", \"-output\", obj)\n self.f90_compiler.spawn(lipo_cmd)\n\n # Clean up the temporary directories.\n shutil.rmtree(i386_dir)\n shutil.rmtree(x86_64_dir)\n\n\ndef patch_gfortran(f90_compiler):\n \"\"\"Modify the Fortran compiler to create universal binary object files.\n\n Does so by patching ``f90_compiler._compile`` with a custom command.\n\n Patching is only done if:\n\n * The platform is OS X\n * The current compiler is ``gfortran``\n * The current Python is a universal binary (i.e. dual architecture)\n * The version of ``gfortran`` cannot create universal binaries\n\n Args:\n f90_compiler (numpy.distutils.fcompiler.FCompiler): A Fortran compiler\n instance.\n \"\"\"\n from numpy.distutils.fcompiler import gnu\n\n # Only on OS X.\n if sys.platform != MAC_OS_X:\n return\n\n # Only with ``gfortran``.\n if not isinstance(f90_compiler, gnu.Gnu95FCompiler):\n return\n\n # Only if Python is a universal binary.\n if not check_dual_architecture():\n return\n\n # Only if ``gfortran`` can't produce universal binaries.\n if gfortran_supports_dual_architecture():\n return\n\n f90_compiler._compile = _DualArchitectureCompile(f90_compiler)\n\n\ndef get_f90_compiler():\n c_compiler = distutils.ccompiler.new_compiler()\n c_compiler.verbose = 2\n\n f90_compiler = numpy.distutils.fcompiler.new_fcompiler(\n requiref90=True, c_compiler=c_compiler\n )\n dist = numpy.distutils.core.get_distribution(always=True)\n f90_compiler.customize(dist)\n f90_compiler.verbose = 2\n\n patch_library_dirs(f90_compiler)\n patch_gfortran(f90_compiler)\n\n return f90_compiler\n\n\ndef compile_fortran_obj_file(f90_compiler):\n obj_file, = f90_compiler.compile(\n [SOURCE_FILE],\n output_dir=None,\n macros=[],\n include_dirs=[],\n debug=None,\n extra_postargs=[],\n depends=[],\n )\n\n return obj_file\n\n\ndef make_fortran_lib(f90_compiler, obj_file):\n c_compiler = f90_compiler.c_compiler\n c_compiler.create_static_lib([obj_file], \"example\", output_dir=LOCAL_LIB)\n\n\ndef add_directory(dir_name, example_files, prefix):\n for subdirectory, _, filenames in os.walk(dir_name):\n for filename in filenames:\n path = os.path.join(subdirectory, filename)\n # NOTE: We assume but don't check that `_` is the empty\n # string (i.e. `filename` starts with the prefix.\n _, relative_name = path.split(prefix, 1)\n example_files.append(relative_name)\n\n\ndef get_package_data():\n return {\n \"example\": [\n \"*.pxd\",\n os.path.join(\"include\", \"*.h\"),\n os.path.join(\"lib\", \"*.a\"),\n os.path.join(\"lib\", \"*.lib\"),\n ]\n }\n\n\nclass BuildFortranThenExt(build_ext.build_ext):\n\n # Will be set at runtime, not import time.\n F90_COMPILER = None\n\n def __init__(self, *args, **kwargs):\n build_ext.build_ext.__init__(self, *args, **kwargs)\n self.root_dir = self.get_root_dir()\n # NOTE: ``get_journal_file()`` depends on ``root_dir`` being set.\n self.journal_file = self.get_journal_file()\n self.commands = []\n\n @classmethod\n def set_compiler(cls):\n if cls.F90_COMPILER is None:\n cls.F90_COMPILER = get_f90_compiler()\n\n @classmethod\n def get_library_dirs(cls):\n cls.set_compiler()\n\n # NOTE: This is a hack to show failure when `libgfortran`\n # is not included. (Only for the `Makefile`, not for\n # actual usage.)\n if \"IGNORE_LIBRARIES\" in os.environ:\n return [], []\n else:\n return cls.F90_COMPILER.libraries, cls.F90_COMPILER.library_dirs\n\n @staticmethod\n def get_root_dir():\n \"\"\"Get directory where ``setup.py`` was invoked.\n\n This is a **really** nasty hack that relies on this ``setup.py``\n file installing into a \"known\" virtual environment.\n\n Does this by inspecting the last entry in ``sys.argv`` and looking\n for the sub-path ``/cython/venv/include/`` as a sign for where\n the \"parent\" directory is.\n\n Returns:\n Optional[str]: The root directory, if it can be\n determined.\n \"\"\"\n if not sys.argv:\n return None\n\n final_arg = sys.argv[-1]\n # Nasty hack:\n sub_path = \"{0}cython{0}venv{0}include{0}\".format(os.path.sep)\n index = final_arg.find(sub_path)\n if index == -1:\n return None\n\n return final_arg[:index]\n\n def get_journal_file(self):\n if self.root_dir is None:\n return None\n\n journal = os.environ.get(JOURNAL_ENV)\n\n if journal is None:\n filename = JOURNAL_TEMPLATE.format(\n sys.platform, sys.version_info[0], sys.version_info[1]\n )\n journal = os.path.join(self.root_dir, \"cython\", filename)\n\n return journal\n\n def start_journaling(self):\n \"\"\"Capture calls to the system by compilers.\n\n See: https://github.com/numpy/numpy/blob/v1.13.1/\\\n numpy/distutils/ccompiler.py#L154\n\n Intercepts all calls to ``CCompiler.spawn`` and keeps the\n arguments around to be stored in the local ``commands``\n instance attribute.\n \"\"\"\n if self.journal_file is None:\n return\n\n def journaled_spawn(patched_self, cmd, display=None):\n self.commands.append(cmd)\n return numpy.distutils.ccompiler.CCompiler_spawn(\n patched_self, cmd, display=None\n )\n\n numpy.distutils.ccompiler.replace_method(\n distutils.ccompiler.CCompiler, \"spawn\", journaled_spawn\n )\n\n @staticmethod\n def _command_to_text(command):\n # NOTE: This assumes, but doesn't check that the command has 3\n # or more arguments.\n first_line = \"{} \\\\\"\n middle_line = \" {} \\\\\"\n last_line = \" {}\"\n\n parts = [first_line.format(command[0])]\n for argument in command[1:-1]:\n parts.append(middle_line.format(argument))\n parts.append(last_line.format(command[-1]))\n\n return \"\\n\".join(parts)\n\n def _commands_to_text(self):\n separator = \"-\" * 40\n\n parts = [separator]\n for command in self.commands:\n command_text = self._command_to_text(command)\n parts.extend([command_text, separator])\n\n parts.append(\"\") # Trailing newline in file.\n return \"\\n\".join(parts)\n\n def save_journal(self):\n \"\"\"Save journaled commands to file.\n\n If there is no active journal, does nothing.\n \"\"\"\n if self.journal_file is None:\n return\n\n as_text = self._commands_to_text()\n # Replace the \"sensitive\" parts of the file.\n as_text = as_text.replace(self.root_dir, \"${foreign-fortran}\")\n home_dir = os.path.expanduser(\"~\")\n as_text = as_text.replace(home_dir, \"${HOME}\")\n\n with open(self.journal_file, \"w\") as file_obj:\n file_obj.write(as_text)\n\n def run(self):\n self.set_compiler()\n self.start_journaling()\n\n obj_file = compile_fortran_obj_file(self.F90_COMPILER)\n make_fortran_lib(self.F90_COMPILER, obj_file)\n # Copy into the ``build_lib`` directory (which is what will end\n # up being installed).\n lib_dir = os.path.join(self.build_lib, LOCAL_LIB)\n self.copy_tree(LOCAL_LIB, lib_dir)\n\n result = build_ext.build_ext.run(self)\n self.save_journal()\n return result\n\n\ndef main():\n libraries, library_dirs = BuildFortranThenExt.get_library_dirs()\n npy_include_dir = np.get_include()\n cython_extension = setuptools.Extension(\n \"example.fast\",\n [os.path.join(\"example\", \"fast.c\")],\n include_dirs=[npy_include_dir, LOCAL_INCLUDE],\n libraries=libraries,\n library_dirs=library_dirs,\n extra_objects=[os.path.join(\"example\", \"example.o\")],\n )\n setuptools.setup(\n name=\"example\",\n version=VERSION,\n description=\"Cython Example calling Fortran\",\n author=\"<NAME>\",\n author_email=\"<EMAIL>\",\n url=\"https://github.com/dhermes/foreign-fortran\",\n packages=[\"example\"],\n ext_modules=[cython_extension],\n package_data=get_package_data(),\n cmdclass={\"build_ext\": BuildFortranThenExt},\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "12720403", "language": "Python", "matching_score": 5.295933723449707, "max_stars_count": 14, "path": "cython/package/setup.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# NOTE: This file is copied (rather than symlinked) since a symlink **outside**\n# of the package tree won't get copied during a ``pip install``.\n\nimport os\nimport pathlib\nimport subprocess\nimport sys\n\nimport numpy as np\nimport setuptools\n\n\nFORTRAN_LIBRARY_PREFIX = \"libraries: =\"\nGFORTRAN_MISSING_LIBS = \"\"\"\\\n``gfortran`` default library path not found via:\n\n$ gfortran -print-search-dirs\n{}\"\"\"\nGFORTRAN_BAD_PATH = \"``gfortran`` library path {} is not a directory.\"\n\n\ndef gfortran_search_path():\n \"\"\"Get the library directory paths for ``gfortran``.\n\n Looks for ``libraries: =`` in the output of ``gfortran -print-search-dirs``\n and then parses the paths. If this fails for any reason, this method will\n print an error and return ``library_dirs``.\n\n Returns:\n List[str]: The library directories for ``gfortran``.\n \"\"\"\n cmd = (\"gfortran\", \"-print-search-dirs\")\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n return_code = process.wait()\n # Bail out if the command failed.\n if return_code != 0:\n return []\n\n cmd_output = process.stdout.read().decode(\"utf-8\")\n # Find single line starting with ``libraries: ``.\n search_lines = cmd_output.strip().split(\"\\n\")\n library_lines = [\n line[len(FORTRAN_LIBRARY_PREFIX) :]\n for line in search_lines\n if line.startswith(FORTRAN_LIBRARY_PREFIX)\n ]\n if len(library_lines) != 1:\n msg = GFORTRAN_MISSING_LIBS.format(cmd_output)\n print(msg, file=sys.stderr)\n return []\n\n # Go through each library in the ``libraries: = ...`` line.\n library_line = library_lines[0]\n accepted = set()\n for part in library_line.split(os.pathsep):\n full_path = os.path.abspath(part.strip())\n if os.path.isdir(full_path):\n accepted.add(full_path)\n else:\n # Ignore anything that isn't a directory.\n msg = GFORTRAN_BAD_PATH.format(full_path)\n print(msg, file=sys.stderr)\n\n return sorted(accepted)\n\n\ndef get_extra_objects(here):\n return (\n os.path.join(here, \"object_files\", \"types.o\"),\n os.path.join(here, \"object_files\", \"forall_.o\"),\n os.path.join(here, \"object_files\", \"do_.o\"),\n os.path.join(here, \"object_files\", \"spread_.o\"),\n os.path.join(here, \"object_files\", \"serial_.o\"),\n os.path.join(here, \"object_files\", \"vs_algorithm.o\"),\n )\n\n\ndef extension_modules(here, name):\n extra_objects = get_extra_objects(here)\n missing = [path for path in extra_objects if not os.path.isfile(path)]\n if missing:\n parts = [\"Missing object file(s):\"]\n parts.extend(f\"- {path}\" for path in missing)\n parts.extend(\n [\n \"\",\n f\"here: {here}\",\n f\"__file__: {__file__}\",\n \"\",\n \"files in `here`:\",\n ]\n )\n files_here = pathlib.Path(here).glob(\"*\")\n parts.extend(f\"- {path}\" for path in files_here)\n\n msg = \"\\n\".join(parts)\n raise RuntimeError(msg)\n\n extension = setuptools.Extension(\n f\"{name}._binary\",\n [os.path.join(name, \"_binary.c\")],\n extra_objects=extra_objects,\n include_dirs=[np.get_include()],\n libraries=[\"gfortran\"],\n library_dirs=gfortran_search_path(),\n )\n return [extension]\n\n\ndef do_setup(here, name):\n ext_modules = extension_modules(here, name)\n setuptools.setup(\n name=name,\n packages=[name],\n install_requires=[\"numpy\"],\n ext_modules=ext_modules,\n )\n", "id": "111570", "language": "Python", "matching_score": 4.203651428222656, "max_stars_count": 0, "path": "src/python-bakeoff/setup_shared.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport collections\nimport os\nimport subprocess\nimport sys\n\n\nLIBRARY_PREFIX = \"libraries: =\"\nERR_MSG = \"Fortran search default library path not found.\"\nBAD_PATH = \"Path {} is not a directory.\"\n\n\ndef main():\n cmd = (\"gfortran\", \"-print-search-dirs\")\n cmd_output = subprocess.check_output(cmd)\n cmd_output = cmd_output.decode(\"utf-8\")\n\n search_lines = cmd_output.strip().split(\"\\n\")\n library_lines = [\n line[len(LIBRARY_PREFIX) :]\n for line in search_lines\n if line.startswith(LIBRARY_PREFIX)\n ]\n if len(library_lines) != 1:\n print(ERR_MSG, file=sys.stderr)\n sys.exit(1)\n\n library_line = library_lines[0]\n accepted = collections.OrderedDict() # Like an ordered set.\n for part in library_line.split(\":\"):\n full_path = os.path.abspath(part)\n\n if not os.path.exists(full_path):\n continue\n\n if not os.path.isdir(full_path):\n msg = BAD_PATH.format(full_path)\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n accepted[\"-L\" + full_path] = True\n\n print(\" \".join(accepted.keys()))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "6276965", "language": "Python", "matching_score": 0.9388213753700256, "max_stars_count": 14, "path": "python/gfortran_search_path.py" }, { "content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/2.0/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nimport os\nimport subprocess\n\nimport sphinx_rtd_theme\n\n\n# -- Project information -----------------------------------------------------\n\n\ndef is_clean():\n cmd_output = subprocess.check_output((\"git\", \"status\", \"--short\"))\n modified_files = cmd_output.decode(\"ascii\").strip()\n return modified_files == \"\"\n\n\ndef get_version_full():\n cmd_output = subprocess.check_output((\"git\", \"log\", \"-1\", \"--pretty=%H\"))\n return cmd_output.decode(\"ascii\").strip()\n\n\ndef get_version(version_full, currently_clean):\n short_version = version_full[:8]\n if not currently_clean:\n return f\"{short_version}-dirty\"\n\n return short_version\n\n\ndef get_github_version(version_full, currently_clean):\n if not currently_clean:\n return \"main\"\n\n return version_full\n\n\ndef get_copyright(version):\n return f\"2020, <NAME> Revision {version}\"\n\n\nproject = \"myst-go\"\n_currently_clean = is_clean()\n_version_full = get_version_full()\nversion = get_version(_version_full, _currently_clean)\ncopyright = get_copyright(version)\nauthor = \"<NAME>\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"myst_parser\",\n]\nmyst_config = {}\n# See:\n# https://www.ericholscher.com/blog/2016/mar/15/dont-use-markdown-for-technical-docs/\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_favicon = os.path.join(\"_static\", \"favicon.png\")\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nhtml_context = {\n \"display_github\": True,\n \"github_host\": \"github.com\",\n \"github_user\": \"dhermes\",\n \"github_repo\": \"myst-go\",\n \"github_version\": get_github_version(_version_full, _currently_clean),\n \"conf_py_path\": \"/docs/\",\n}\n", "id": "6782137", "language": "Python", "matching_score": 3.7227492332458496, "max_stars_count": 0, "path": "docs/conf.py" }, { "content": "# -*- coding: utf-8 -*-\n\nfrom nbsite.shared_conf import *\n\nproject = u'hvPlot'\nauthors = u'PyViz developers'\ncopyright = u'2018 ' + authors\ndescription = 'A high-level plotting API for the PyData ecosystem built on HoloViews'\n\n# TODO: gah, version\nversion = '0.0.1'\nrelease = '0.0.1'\n\nhtml_static_path += ['_static']\nhtml_theme = 'sphinx_ioam_theme'\nhtml_theme_options = {\n 'logo': 'hvplot-logo.png',\n 'favicon': 'favicon.ico',\n 'css': 'main.css'\n}\n\n_NAV = (\n ('Getting Started', 'getting_started/index'),\n ('User Guide', 'user_guide/index'),\n ('About', 'about')\n)\n\ntemplates_path = ['_templates']\n\nhtml_context.update({\n 'PROJECT': project,\n 'DESCRIPTION': description,\n 'AUTHOR': authors,\n # will work without this - for canonical (so can ignore when building locally or test deploying)\n 'WEBSITE_SERVER': 'https://pyviz.github.io/holoplot',\n 'VERSION': version,\n 'NAV': _NAV,\n 'LINKS': _NAV,\n 'SOCIAL': (\n ('Gitter', '//gitter.im/pyviz/pyviz'),\n ('Github', '//github.com/pyviz/holoplot'),\n )\n})\n", "id": "5756523", "language": "Python", "matching_score": 1.0585614442825317, "max_stars_count": 0, "path": "doc/conf.py" }, { "content": "from distutils.version import LooseVersion\n\nfrom . import patch, _hv\n\ntry:\n import intake.plotting # noqa\n patch('intake', extension='bokeh')\nexcept:\n import intake\n if LooseVersion(intake.__version__) <= '0.1.5':\n patch('intake', extension='bokeh')\n patch('intake', 'plot')\n else:\n if not _hv.extension._loaded:\n _hv.extension('bokeh', logo=False)\n", "id": "4989485", "language": "Python", "matching_score": 1.5915908813476562, "max_stars_count": 0, "path": "hvplot/intake.py" }, { "content": "from . import patch\n\npatch('pandas', extension='bokeh')\n", "id": "8117512", "language": "Python", "matching_score": 1.616901159286499, "max_stars_count": 0, "path": "hvplot/pandas.py" }, { "content": "from . import patch\n\npatch('dask', extension='bokeh')\n", "id": "12604433", "language": "Python", "matching_score": 1.616901159286499, "max_stars_count": 0, "path": "hvplot/dask.py" }, { "content": "from . import patch\n\npatch('streamz', extension='bokeh')\n", "id": "4145694", "language": "Python", "matching_score": 1.2717527151107788, "max_stars_count": 0, "path": "hvplot/streamz.py" }, { "content": "from . import patch\n\npatch('xarray', extension='bokeh')\n", "id": "1809337", "language": "Python", "matching_score": 0.5024964809417725, "max_stars_count": 0, "path": "hvplot/xarray.py" }, { "content": "import Cython.Build\nimport setuptools\n\nimport example\n\n\ndef main():\n extension_keywords = example.get_extension_keywords()\n ext_module = setuptools.Extension(\n \"wrapper\", [\"wrapper.pyx\"], **extension_keywords\n )\n setuptools.setup(\n name=\"cimport-ing example module interface\",\n ext_modules=Cython.Build.cythonize([ext_module]),\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "2285404", "language": "Python", "matching_score": 2.7451560497283936, "max_stars_count": 14, "path": "cython/use_cimport/setup.py" }, { "content": "# NOTE: This is copied in three different places (bad software practice).\nimport setuptools\nimport Cython.Build\n\n\ndef main():\n extension = Cython.Build.cythonize('repro.pyx')\n setuptools.setup(\n ext_modules=extension,\n )\n\n\nif __name__ == '__main__':\n main()\n", "id": "1107957", "language": "Python", "matching_score": 0.4505465626716614, "max_stars_count": 0, "path": "simple/setup.py" }, { "content": "# NOTE: Most of this is copied in 3 places (bad software practice).\nfrom __future__ import print_function\n\nimport numpy as np\n\nimport repro\n\n\nSEPARATOR = '-' * 60\nTEMPLATE = \"\"\"\\\nmat =\n{}\n\n mat F-contiguous? {}\n\nnew_mat F-contiguous? {!r:5}\n all(new_mat == mat)? {!r:5}\n new_mat is mat ? {!r:5}\n\"\"\"\n\n\ndef main():\n mat1 = np.array([\n [2.0, 0.0],\n [1.0, 3.0],\n ], order='C')\n mat2 = np.array([\n [5.5, -1.0],\n [2.0, 0.0],\n ], order='F')\n mat3 = np.array([\n [7.0, 2.0],\n ], order='F')\n mat4 = np.array([\n [11.0],\n [12.0],\n [10.0],\n ], order='F')\n\n mats = (mat1, mat2, mat3, mat4)\n\n for mat in mats:\n print(SEPARATOR)\n new_mat = repro.advanced_copy_fortran(mat)\n msg = TEMPLATE.format(\n mat, mat.flags.f_contiguous,\n new_mat.flags.f_contiguous,\n np.all(mat == new_mat),\n mat is new_mat)\n print(msg, end='')\n\n\nif __name__ == '__main__':\n main()\n", "id": "10694781", "language": "Python", "matching_score": 0.7337709665298462, "max_stars_count": 0, "path": "advanced/test_it.py" }, { "content": "DEBUG = True\n\n\ndef waldo(J):\n if DEBUG:\n print '=' * 60\n print 'type(J): %r' % (type(J),)\n print 'J.dtype: %r' % (J.dtype,)\n print 'J.shape: %r' % (J.shape,)\n print 'J:'\n print J\n print 'J.flags:'\n print J.flags\n print '=' * 60\n J *= 2.1\n", "id": "11298899", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "bar.py" }, { "content": "# pylint: disable=missing-docstring\n\nimport os\n\nimport screen_tab_utils\n\n\ndef remove_emacs_desktop(session_info):\n if screen_tab_utils.emacs_desktop_saved(session_info):\n os.remove(screen_tab_utils.emacs_desktop_path(session_info))\n\n\ndef remove_current_session(screen_sessions, session_info):\n session_id = session_info[\"session_id\"]\n window = session_info[\"window\"]\n\n current_session = screen_sessions.setdefault(session_id, {})\n\n current_tab = current_session.get(window)\n\n keep_tab_intact = False\n if isinstance(current_tab, list):\n if len(current_tab) == 2 and current_tab[1] == \"SIGTERM\":\n keep_tab_intact = True\n # The tab was not EXITed by a user, restore the correct path\n # for future use.\n current_session[window] = current_tab[0]\n else:\n raise ValueError(\"Only valid list ends in SIGTERM.\")\n elif isinstance(current_tab, str):\n current_session.pop(window)\n elif current_tab is not None:\n raise TypeError(\"Unexpected value for current tab.\")\n\n if not current_session:\n screen_sessions.pop(session_id)\n\n return keep_tab_intact\n\n\ndef main():\n session_info = screen_tab_utils.get_session_info()\n if session_info is None:\n return\n\n screen_sessions = screen_tab_utils.load_sessions()\n keep_tab_intact = remove_current_session(screen_sessions, session_info)\n screen_tab_utils.write_sessions(screen_sessions)\n if not keep_tab_intact:\n remove_emacs_desktop(session_info)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "3832935", "language": "Python", "matching_score": 2.7644927501678467, "max_stars_count": 1, "path": "remove_screen_tab.py" }, { "content": "# pylint: disable=missing-docstring\n\nimport screen_tab_utils\n\n\ndef main():\n session_info = screen_tab_utils.get_session_info()\n if session_info is None:\n return\n\n session_id = session_info[\"session_id\"]\n window = session_info[\"window\"]\n\n screen_sessions = screen_tab_utils.load_sessions()\n current_session = screen_sessions.setdefault(session_id, {})\n current_tab = current_session.get(window)\n\n if isinstance(current_tab, str):\n current_session[window] = [current_tab, \"SIGTERM\"]\n screen_tab_utils.write_sessions(screen_sessions)\n elif current_tab is not None:\n raise TypeError(\n \"Tab value not expected to differ from non-existent or a string.\"\n )\n\n\nif __name__ == \"__main__\":\n # See http://stackoverflow.com/a/9256709/1068170\n # for details on determining which signals are being caught.\n main()\n", "id": "7321406", "language": "Python", "matching_score": 2.602821111679077, "max_stars_count": 1, "path": "detect_term.py" }, { "content": "# pylint: disable=missing-docstring\n\nfrom __future__ import print_function\n\nimport subprocess\nimport sys\n\nimport screen_tab_utils\n\n\nEMACS_DESKTOP_ALERT = \"\"\"\\\nYo dawg\n\nOne last thing\n\n\nYou have an emacs desktop saved for this tab.\nYou should launch emacs and restore that shizz.\n\"\"\"\nTAB_MISSING_TEMPLATE = \"\"\"\\\nYo dawg\n\n\nTab %s is not stored for session %r.\n\n\"\"\"\n\n\ndef obey_symlink_pwd():\n \"\"\"A version of pwd which does not follow symlinks.\n\n This is because os.getcwd() follows symlinks.\n \"\"\"\n proc = subprocess.Popen([\"pwd\"], stdout=subprocess.PIPE)\n proc.wait()\n return proc.stdout.read().strip()\n\n\ndef print_emacs_stuff(session_info):\n if screen_tab_utils.emacs_desktop_saved(session_info):\n print(EMACS_DESKTOP_ALERT, file=sys.stderr)\n\n\ndef change_directory(screen_sessions, session_info, called_from_bashrc=False):\n session_id = session_info[\"session_id\"]\n window = session_info[\"window\"]\n\n current_session = screen_sessions.setdefault(session_id, {})\n\n if not called_from_bashrc:\n # We only need to overwrite the current_session[window] value\n # if not being called from bashrc.\n current_session[window] = obey_symlink_pwd()\n return\n\n previous_path = current_session.get(window)\n if previous_path is not None:\n # NOTE: The printed value will be sourced.\n print(previous_path)\n # NOTE: `print_emacs_stuff()` puts information in stderr so the user\n # can learn about the environment they are re-launching.\n print_emacs_stuff(session_info)\n # After we print the path, the ~/.bashrc file will cd into it to\n # re-initialize a stored environment.\n sys.exit(0)\n else:\n print(TAB_MISSING_TEMPLATE % (window, session_id), file=sys.stderr)\n windows_as_ints = map(int, current_session.keys())\n max_window = 0\n if windows_as_ints:\n max_window = max(windows_as_ints)\n print(\"Your max is\", max_window, file=sys.stderr)\n # Since `previous_path` is None, add the\n # new path (nothing to overwrite).\n current_session[window] = obey_symlink_pwd()\n # Return the path since it will be printed within the sourced bashrc.\n return current_session[window]\n\n\ndef main(argv):\n if len(argv) > 2:\n raise ValueError(\"Using this method wrong.\")\n\n session_info = screen_tab_utils.get_session_info()\n if session_info is None:\n return\n\n screen_sessions = screen_tab_utils.load_sessions()\n\n called_from_bashrc = len(argv) == 2 and argv[1] == \"--new\"\n path_to_print = change_directory(\n screen_sessions, session_info, called_from_bashrc=called_from_bashrc\n )\n\n screen_tab_utils.write_sessions(screen_sessions)\n\n if called_from_bashrc:\n print(path_to_print)\n print_emacs_stuff(session_info)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "id": "3620782", "language": "Python", "matching_score": 3.0868563652038574, "max_stars_count": 1, "path": "add_screen_tab.py" }, { "content": "# pylint: disable=missing-docstring\n\nimport json\nimport os\n\n\nSESSION_ID_KEY = \"STY\"\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\nSCREEN_SESSIONS_FILE = os.path.join(SCRIPT_DIR, \"screen_sessions.json\")\n\n\ndef get_session_id():\n session_sty_value = os.getenv(SESSION_ID_KEY)\n if session_sty_value is None or \".\" not in session_sty_value:\n return\n return session_sty_value.split(\".\", 1)[1]\n\n\ndef emacs_desktop_path(session_info):\n session_id = session_info[\"session_id\"]\n window = session_info[\"window\"]\n\n session_name = \"emacs-desktop-%s-%s\" % (session_id, window)\n session_path = os.path.join(SCRIPT_DIR, \"emacs.d\", session_name)\n return session_path\n\n\ndef get_session_info():\n session_id = get_session_id()\n window = os.getenv(\"WINDOW\")\n try:\n # NOTE: Don't actually use integer, since integers can't be JSON keys.\n int(window)\n except (ValueError, TypeError):\n pass\n if session_id is None or window is None:\n return None\n\n return {\"session_id\": session_id, \"window\": window}\n\n\ndef emacs_desktop_saved(session_info):\n \"\"\"Checks if there is a saved desktop session for this tab.\n\n Caller makes sure session_id and window are not None.\n \"\"\"\n return os.path.isfile(emacs_desktop_path(session_info))\n\n\ndef load_sessions():\n if os.path.isfile(SCREEN_SESSIONS_FILE):\n # NOTE: Not using `with` since may be using an old version of Python.\n with open(SCREEN_SESSIONS_FILE, \"r\") as file_obj:\n screen_sessions = json.load(file_obj)\n else:\n screen_sessions = {}\n\n return screen_sessions\n\n\ndef write_sessions(screen_sessions):\n # NOTE: Not using `with` since may be using an old version of Python.\n with open(SCREEN_SESSIONS_FILE, \"w\") as file_obj:\n json.dump(screen_sessions, file_obj, indent=2)\n", "id": "1541014", "language": "Python", "matching_score": 1.3355499505996704, "max_stars_count": 1, "path": "screen_tab_utils.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This is a configuration file for running ``nox`` on this project.\n\nTo determine the supported actions run ``nox --list-sessions`` from the\nproject root.\n\"\"\"\n\nimport os\n\nimport nox\nimport py.path\n\n\nNOX_DIR = os.path.abspath(os.path.dirname(__file__))\nDEFAULT_INTERPRETER = \"3.6\"\n\n\ndef get_path(*names):\n return os.path.join(NOX_DIR, *names)\n\n\nclass Remove(object):\n def __init__(self, prefix, extensions):\n self.prefix = prefix\n self.extensions = extensions\n\n def __call__(self):\n for extension in self.extensions:\n path = \"{}.{}\".format(self.prefix, extension)\n os.remove(path)\n\n\ndef build_tex_file(session, base, new_id, extensions=(), with_bibtex=False):\n # NOTE: This assumes that ``session.chdir(get_path('doc'))``\n # has been called.\n modify_id = get_path(\"scripts\", \"modify_pdf_id.py\")\n\n if with_bibtex:\n session.run(\"pdflatex\", base)\n session.run(\"bibtex\", base)\n session.run(\"pdflatex\", base)\n session.run(\"bibtex\", base)\n session.run(\"pdflatex\", base)\n session.run(\"pdflatex\", base)\n else:\n session.run(\"pdflatex\", base)\n session.run(\"pdflatex\", base)\n session.run(\"pdflatex\", base)\n\n path = get_path(\"doc\", base)\n remove = Remove(path, extensions)\n session.run(remove)\n session.run(\"python\", modify_id, \"--base\", path, \"--id\", new_id)\n\n\n@nox.session(py=False)\ndef build_tex(session):\n if py.path.local.sysfind(\"pdflatex\") is None:\n session.skip(\"`pdflatex` must be installed\")\n\n if py.path.local.sysfind(\"bibtex\") is None:\n session.skip(\"`bibtex` must be installed\")\n\n session.chdir(get_path(\"doc\"))\n\n build_tex_file(\n session,\n \"paper\",\n \"F092359D979FDC08931DA1922F3E123E\",\n extensions=(\"aux\", \"bbl\", \"blg\", \"log\", \"out\", \"spl\"),\n with_bibtex=True,\n )\n\n build_tex_file(\n session,\n \"cover_letter\",\n \"BACA8D659970198BDF7D11B67FEA6299\",\n extensions=(\"aux\", \"log\", \"out\"),\n )\n\n build_tex_file(\n session,\n \"tikz_local_err\",\n \"EF7ADBEFE6118EFEE506836A7AFF7C9E\",\n extensions=(\"aux\", \"log\", \"out\"),\n )\n\n build_tex_file(\n session,\n \"tikz_filtration\",\n \"52F169AADDA4C4C85C6D5038361816C9\",\n extensions=(\"aux\", \"log\", \"out\"),\n )\n\n\n@nox.session(py=False)\ndef flop_counts(session):\n env = {\"PYTHONPATH\": get_path(\"src\")}\n compute_counts = get_path(\"scripts\", \"compute_counts.py\")\n session.run(\"python\", compute_counts, env=env)\n\n\n@nox.session(py=False)\ndef verify_table(session):\n env = {\"PYTHONPATH\": get_path(\"src\")}\n script = get_path(\"scripts\", \"verify_table.py\")\n session.run(\"python\", script, env=env)\n\n\n@nox.session(py=DEFAULT_INTERPRETER)\ndef make_images(session):\n # Install all dependencies.\n session.install(\"--requirement\", \"make-images-requirements.txt\")\n # Run the script(s).\n # Make sure\n # - Custom ``matplotlibrc`` is used\n # - Code in ``src/`` is importable\n # - PDFs have deterministic ``CreationDate``\n env = {\n \"MATPLOTLIBRC\": get_path(\"images\"),\n \"PYTHONPATH\": get_path(\"src\"),\n \"SOURCE_DATE_EPOCH\": \"0\",\n }\n names = (\n \"error_against_cond.py\",\n \"smooth_drawing.py\",\n \"horner_inferior.py\",\n \"compensated_insufficient.py\",\n )\n for name in names:\n script = get_path(\"scripts\", name)\n session.run(\"python\", script, env=env)\n\n\n@nox.session(py=DEFAULT_INTERPRETER)\ndef update_requirements(session):\n if py.path.local.sysfind(\"git\") is None:\n session.skip(\"`git` must be installed\")\n\n # Install all dependencies.\n session.install(\"pip-tools\")\n\n # Update all of the requirements file(s).\n names = (\"make-images\",)\n for name in names:\n in_name = \"{}-requirements.in\".format(name)\n txt_name = \"{}-requirements.txt\".format(name)\n session.run(\n \"pip-compile\", \"--upgrade\", \"--output-file\", txt_name, in_name\n )\n session.run(\"git\", \"add\", txt_name)\n\n\n@nox.session(py=False)\ndef verify_cpp(session):\n if py.path.local.sysfind(\"clang-format\") is None:\n session.skip(\"`clang-format` must be installed\")\n if py.path.local.sysfind(\"g++\") is None:\n session.skip(\"`g++` must be installed\")\n\n session.run(\n \"clang-format\",\n \"-i\",\n \"-style=file\",\n os.path.join(\"src\", \"de_casteljau.cpp\"),\n os.path.join(\"src\", \"de_casteljau.hpp\"),\n os.path.join(\"src\", \"eft.cpp\"),\n os.path.join(\"src\", \"eft.hpp\"),\n os.path.join(\"scripts\", \"tests.cpp\"),\n )\n\n session.run(\n \"g++\",\n \"-std=c++11\",\n \"-O3\",\n \"-march=native\",\n \"-o\",\n \"main\",\n os.path.join(\"scripts\", \"tests.cpp\"),\n os.path.join(\"src\", \"de_casteljau.cpp\"),\n os.path.join(\"src\", \"eft.cpp\"),\n \"-I\",\n \"src\",\n )\n main_exe = os.path.join(\".\", \"main\")\n session.run(os.system, main_exe)\n session.run(os.remove, main_exe)\n\n\n@nox.session(py=False)\ndef verify_c(session):\n if py.path.local.sysfind(\"clang-format\") is None:\n session.skip(\"`clang-format` must be installed\")\n if py.path.local.sysfind(\"gcc\") is None:\n session.skip(\"`gcc` must be installed\")\n\n session.run(\n \"clang-format\",\n \"-i\",\n \"-style=file\",\n os.path.join(\"src\", \"de_casteljau.c\"),\n os.path.join(\"src\", \"de_casteljau.h\"),\n os.path.join(\"src\", \"eft.c\"),\n os.path.join(\"src\", \"eft.h\"),\n os.path.join(\"scripts\", \"tests.c\"),\n )\n\n session.run(\n \"gcc\",\n \"-std=c99\",\n \"-O3\",\n \"-march=native\",\n \"-o\",\n \"main\",\n os.path.join(\"scripts\", \"tests.c\"),\n os.path.join(\"src\", \"de_casteljau.c\"),\n os.path.join(\"src\", \"eft.c\"),\n \"-I\",\n \"src\",\n \"-lm\",\n )\n main_exe = os.path.join(\".\", \"main\")\n session.run(os.system, main_exe)\n session.run(os.remove, main_exe)\n", "id": "8589351", "language": "Python", "matching_score": 6.583189487457275, "max_stars_count": 2, "path": "noxfile.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This is a configuration file for running ``nox`` on this project.\n\nTo determine the supported actions run ``nox --list-sessions`` from the\nproject root.\n\"\"\"\n\nimport os\n\nimport nox\nimport py.path\n\n\nNOX_DIR = os.path.abspath(os.path.dirname(__file__))\nSINGLE_INTERP = \"python3.6\"\n\n\ndef get_path(*names):\n return os.path.join(NOX_DIR, *names)\n\n\nclass Remove(object):\n def __init__(self, prefix, extensions):\n self.prefix = prefix\n self.extensions = extensions\n\n def __call__(self):\n for extension in self.extensions:\n path = \"{}.{}\".format(self.prefix, extension)\n os.remove(path)\n\n\ndef build_tex_file(\n session, base, new_id, extensions=(), with_bibtex=False, use_xelatex=False\n):\n # NOTE: This assumes that ``session.chdir(get_path('doc'))``\n # has been called.\n modify_id = get_path(\"scripts\", \"modify_pdf_id.py\")\n\n if use_xelatex:\n session.run(\"xelatex\", base)\n session.run(\"xelatex\", base)\n elif with_bibtex:\n session.run(\"pdflatex\", base)\n session.run(\"bibtex\", base)\n session.run(\"pdflatex\", base)\n session.run(\"bibtex\", base)\n session.run(\"pdflatex\", base)\n session.run(\"pdflatex\", base)\n else:\n session.run(\"pdflatex\", base)\n session.run(\"pdflatex\", base)\n session.run(\"pdflatex\", base)\n\n path = get_path(\"doc\", base)\n remove = Remove(path, extensions)\n session.run(remove)\n\n if not use_xelatex:\n session.run(\"python\", modify_id, \"--base\", path, \"--id\", new_id)\n\n\n@nox.session\ndef build_tex(session):\n session.interpreter = SINGLE_INTERP\n\n if py.path.local.sysfind(\"pdflatex\") is None:\n session.skip(\"`pdflatex` must be installed\")\n\n if py.path.local.sysfind(\"xelatex\") is None:\n session.skip(\"`xelatex` must be installed\")\n\n if py.path.local.sysfind(\"bibtex\") is None:\n session.skip(\"`bibtex` must be installed\")\n\n # No need to create a virtualenv.\n session.virtualenv = False\n\n session.chdir(get_path(\"doc\"))\n\n build_tex_file(\n session,\n \"thesis\",\n \"55008F4EDC13ADFCEFED89CA0A359ACD\",\n extensions=(\"aux\", \"bbl\", \"blg\", \"lof\", \"log\", \"lot\", \"out\", \"toc\"),\n with_bibtex=True,\n )\n\n build_tex_file(\n session,\n \"approval_page\",\n \"73E1E12D3FFF1C2BBD94B849733BF55A\",\n extensions=(\"aux\", \"log\", \"out\"),\n )\n\n extras = (\n \"abstract\",\n \"algorithms\",\n \"bezier-intersection\",\n \"compensated-newton\",\n \"conclusion\",\n \"introduction\",\n \"k-compensated\",\n \"metadata\",\n \"preliminaries\",\n \"proofs\",\n \"solution-transfer\",\n )\n for extra in extras:\n session.run(Remove(extra, (\"aux\",)))\n\n build_tex_file(\n session,\n \"tikz_local_err\",\n \"61C99C3315FAE74F6F2E4EEAB3E4D3AA\",\n extensions=(\"aux\", \"log\", \"out\"),\n )\n\n build_tex_file(\n session,\n \"tikz_filtration\",\n \"5AD16E27EBA16C57CF11C93F5CE4D079\",\n extensions=(\"aux\", \"log\", \"out\"),\n )\n\n build_tex_file(\n session,\n \"tikz_shape_fns1\",\n \"B5234F7B23999E2560401FE20167B7C8\",\n extensions=(\"aux\", \"log\", \"out\"),\n )\n\n build_tex_file(\n session,\n \"tikz_shape_fns2\",\n \"8C88A92F10A23D28C060D88CF0CE94A0\",\n extensions=(\"aux\", \"log\", \"out\"),\n )\n\n build_tex_file(\n session,\n \"thesis_talk\",\n \"6A945FD7D33437399D0EB8EC77533E6C\",\n extensions=(\"aux\", \"log\", \"nav\", \"out\", \"snm\", \"toc\"),\n use_xelatex=True,\n )\n\n\n@nox.session\ndef make_images(session):\n session.interpreter = SINGLE_INTERP\n # Install all dependencies.\n session.install(\"--requirement\", \"make-images-requirements.txt\")\n # Run the script(s).\n # Make sure\n # - Custom ``matplotlibrc`` is used\n # - Code in ``src/`` is importable\n # - PDFs have deterministic ``CreationDate``\n env = {\n \"MATPLOTLIBRC\": get_path(\"images\"),\n \"PYTHONPATH\": get_path(\"src\"),\n \"SOURCE_DATE_EPOCH\": \"0\",\n }\n script_paths = (\n (\"bezier-intersection\", \"locate_in_triangle.py\"),\n (\"bezier-intersection\", \"subdivision.py\"),\n (\"compensated-newton\", \"almost_tangent.py\"),\n (\"compensated-newton\", \"jghplus13.py\"),\n (\"compensated-newton\", \"newton_de_casteljau.py\"),\n (\"compensated-newton\", \"root_plots.py\"),\n (\"compensated-newton\", \"tangent_intersection.py\"),\n (\"k-compensated\", \"compensated_insufficient.py\"),\n (\"k-compensated\", \"error_against_cond.py\"),\n (\"k-compensated\", \"horner_inferior.py\"),\n (\"k-compensated\", \"smooth_drawing.py\"),\n (\"preliminaries\", \"inverted_element.py\"),\n (\"slides\", \"curved_vs_straight.py\"),\n (\"slides\", \"distort.py\"),\n (\"slides\", \"element_distortion.py\"),\n (\"slides\", \"error_against_cond.py\"),\n (\"slides\", \"inverted_element.py\"),\n (\"slides\", \"newton_de_casteljau.py\"),\n (\"slides\", \"polygon_vs_curved.py\"),\n (\"slides\", \"tangent_intersection.py\"),\n (\"solution-transfer\", \"distort.py\"),\n (\"solution-transfer\", \"polygon_vs_curved.py\"),\n (\"solution-transfer\", \"simple_transport.py\"),\n )\n for segments in script_paths:\n script = get_path(\"scripts\", *segments)\n session.run(\"python\", script, env=env)\n\n\n@nox.session\ndef update_requirements(session):\n session.interpreter = SINGLE_INTERP\n\n if py.path.local.sysfind(\"git\") is None:\n session.skip(\"`git` must be installed\")\n\n # Install all dependencies.\n session.install(\"pip-tools\")\n\n # Update all of the requirements file(s).\n names = (\"make-images\",)\n for name in names:\n in_name = \"{}-requirements.in\".format(name)\n txt_name = \"{}-requirements.txt\".format(name)\n session.run(\n \"pip-compile\", \"--upgrade\", \"--output-file\", txt_name, in_name\n )\n session.run(\"git\", \"add\", txt_name)\n", "id": "2543446", "language": "Python", "matching_score": 5.202389717102051, "max_stars_count": 2, "path": "nox.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This is a configuration file for running ``nox`` on this project.\n\nTo determine the supported actions run ``nox --list-sessions`` from the\nproject root.\n\"\"\"\n\nimport os\n\nimport nox\nimport py.path\n\n\nNOX_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_path(*names):\n return os.path.join(NOX_DIR, *names)\n\n\nclass Remove(object):\n def __init__(self, prefix, extensions):\n self.prefix = prefix\n self.extensions = extensions\n\n def __call__(self):\n for extension in self.extensions:\n path = \"{}.{}\".format(self.prefix, extension)\n os.remove(path)\n\n\ndef build_tex_file(session, base, new_id, extensions=()):\n # NOTE: This assumes that ``session.chdir(get_path('doc'))``\n # has been called.\n modify_id = get_path(\"scripts\", \"modify_pdf_id.py\")\n\n session.run(\"pdflatex\", base)\n session.run(\"bibtex\", base)\n session.run(\"pdflatex\", base)\n session.run(\"bibtex\", base)\n session.run(\"pdflatex\", base)\n session.run(\"pdflatex\", base)\n\n path = get_path(\"doc\", base)\n remove = Remove(path, extensions)\n session.run(remove)\n session.run(\"python\", modify_id, \"--base\", path, \"--id\", new_id)\n\n\n@nox.session(py=False)\ndef build_tex(session):\n if py.path.local.sysfind(\"pdflatex\") is None:\n session.skip(\"`pdflatex` must be installed\")\n\n if py.path.local.sysfind(\"bibtex\") is None:\n session.skip(\"`bibtex` must be installed\")\n\n session.chdir(get_path(\"doc\"))\n\n build_tex_file(\n session,\n \"paper\",\n \"608646FC6AEA50B9AB4A218D45189E6A\",\n extensions=(\"aux\", \"bbl\", \"blg\", \"log\", \"out\", \"spl\"),\n )\n", "id": "2396522", "language": "Python", "matching_score": 1.9100189208984375, "max_stars_count": 0, "path": "noxfile.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport errno\nimport os\nimport shutil\nimport subprocess\n\nimport nox\nimport psutil\nimport py.path\n\n\nnox.options.error_on_external_run = True\n\nDEFAULT_INTERPRETER = \"3.7\"\nPRINT_SEP = \"=\" * 60\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\nINPUT_DIR = os.path.join(BASE_DIR, \"content\")\nOUTPUT_DIR = os.path.join(BASE_DIR, \"output\")\nCONF_FILE = os.path.join(BASE_DIR, \"pelicanconf.py\")\nALT_CONF_FILE = os.path.join(BASE_DIR, \"pelicanconf_with_pagination.py\")\nDEBUG = \"DEBUG\" in os.environ\nPORT = os.environ.get(\"PORT\")\n\n\ndef get_path(*names):\n return os.path.join(BASE_DIR, *names)\n\n\ndef _render(session, env=None):\n # I will typically run this via\n # PATH=\"${PATH}:${HOME}/.nodenv/versions/${VERSION}/bin\" nox -s render\n # because I don't have a ``node`` executable on my default ``${PATH}``.\n if py.path.local.sysfind(\"node\") is None:\n session.skip(\"`node` must be installed\")\n if py.path.local.sysfind(\"npm\") is None:\n session.skip(\"`npm` must be installed\")\n\n session.run(\"npm\", \"install\", external=True)\n script = get_path(\"render_jinja2_templates.py\")\n session.run(\"python\", script, env=env)\n\n\n@nox.session(py=DEFAULT_INTERPRETER)\ndef render(session):\n \"\"\"Render blog posts from templates.\n\n If the post has already been rendered, this will check the file hash against\n a stored mapping of hashes and do nothing if confirmed.\n \"\"\"\n session.install(\"--requirement\", \"render-requirements.txt\")\n _render(session)\n\n\n@nox.session(py=DEFAULT_INTERPRETER)\ndef rerender(session):\n \"\"\"Re-render blog posts from templates.\"\"\"\n session.install(\"--requirement\", \"render-requirements.txt\")\n _render(session, env={\"FORCE_RENDER\": \"true\"})\n\n\ndef _generate(\n session, pelican_opts, regenerate=False, conf_file=CONF_FILE, env=None\n):\n args = [os.path.join(session.bin, \"pelican\")]\n if regenerate:\n args.append(\"-r\")\n args.extend([INPUT_DIR, \"-o\", OUTPUT_DIR, \"-s\", conf_file])\n args.extend(pelican_opts)\n session.run(*args, env=env)\n\n\ndef get_pelican_opts():\n pelican_opts = []\n if DEBUG:\n pelican_opts.append(\"-D\")\n return pelican_opts\n\n\n@nox.session(py=DEFAULT_INTERPRETER)\ndef html(session):\n \"\"\"(Re)-generate the web site.\"\"\"\n pelican_opts = get_pelican_opts()\n session.install(\"--requirement\", \"html-requirements.txt\")\n\n # 1. Render\n print(\"Rendering templates...\")\n print(PRINT_SEP)\n _render(session)\n print(PRINT_SEP)\n # 2. Build HTML with paging.\n print(\"Making first pass with paging\")\n print(PRINT_SEP)\n env = {\"PYTHONPATH\": get_path()}\n _generate(session, pelican_opts, conf_file=ALT_CONF_FILE, env=env)\n print(PRINT_SEP)\n # 3. Keep around the paged index files and nothing else.\n print(\"Storing paging index*.html files for re-use\")\n print(\" and removing paged output.\")\n print(PRINT_SEP)\n index_files = glob.glob(os.path.join(OUTPUT_DIR, \"index*.html\"))\n for filename in index_files:\n session.run(shutil.move, filename, BASE_DIR)\n session.run(shutil.rmtree, OUTPUT_DIR, ignore_errors=True)\n print(PRINT_SEP)\n # 4. Build HTML without paging.\n print(\"Making second pass without paging\")\n print(PRINT_SEP)\n _generate(session, pelican_opts, env=env)\n print(PRINT_SEP)\n # 5. Add back paging information.\n print(\"Putting back paging index*.html files\")\n print(PRINT_SEP)\n session.run(os.remove, os.path.join(OUTPUT_DIR, \"index.html\"))\n index_files = glob.glob(os.path.join(BASE_DIR, \"index*.html\"))\n for filename in index_files:\n session.run(shutil.move, filename, OUTPUT_DIR)\n print(PRINT_SEP)\n # 6. Delete generated pages that are unused\n print(\"Removing unwanted pages\")\n print(PRINT_SEP)\n session.run(remove_file, os.path.join(OUTPUT_DIR, \"authors.html\"))\n session.run(\n shutil.rmtree, os.path.join(OUTPUT_DIR, \"author\"), ignore_errors=True\n )\n session.run(remove_file, os.path.join(OUTPUT_DIR, \"categories.html\"))\n session.run(\n shutil.rmtree, os.path.join(OUTPUT_DIR, \"category\"), ignore_errors=True\n )\n session.run(remove_file, os.path.join(OUTPUT_DIR, \"tags.html\"))\n print(PRINT_SEP)\n # 7. Rewrite URL paths for the pagination feature.\n print(\"Rewriting paths for paging index*.html files.\")\n print(PRINT_SEP)\n script = get_path(\"rewrite_custom_pagination.py\")\n session.run(\"python\", script)\n print(PRINT_SEP)\n\n\ndef remove_file(filename):\n try:\n os.remove(filename)\n except OSError as exc:\n # errno.ENOENT = no such file or directory\n if exc.errno != errno.ENOENT:\n raise\n\n\n@nox.session(py=DEFAULT_INTERPRETER)\ndef regenerate(session):\n \"\"\"Regenerate files upon modification.\n\n This runs a daemon that waits on file changes and updates generated\n content when files are updated.\n \"\"\"\n pelican_opts = get_pelican_opts()\n session.install(\"--requirement\", \"html-requirements.txt\")\n\n env = {\"PYTHONPATH\": get_path()}\n _generate(session, pelican_opts, regenerate=True, env=env)\n\n\n@nox.session(py=DEFAULT_INTERPRETER)\ndef serve(session):\n \"\"\"\"Serve site at http://localhost:${PORT}'.\"\"\"\n script = get_path(\"pelican_server.py\")\n session.cd(OUTPUT_DIR)\n if PORT is None:\n session.run(\"python\", script)\n else:\n session.run(\"python\", script, PORT)\n\n\n@nox.session(py=DEFAULT_INTERPRETER)\ndef serve_local(session):\n \"\"\"Serve at http://192.168.XX.YY:8001.\"\"\"\n script = get_path(\"get_local_ip.py\")\n local_ip = session.run(\"python\", script, silent=True)\n script = get_path(\"pelican_server.py\")\n\n session.cd(OUTPUT_DIR)\n # ``root`` doesn't know about our virtualenv.\n py_exe = os.path.join(session.bin, \"python\")\n session.run(py_exe, script, \"8001\", local_ip.strip())\n\n\n@nox.session(py=DEFAULT_INTERPRETER)\ndef dev_server(session):\n \"\"\"Start / restart ``develop_server.sh``.\n\n Uses ``${PORT}`` environment variable.\n \"\"\"\n script = get_path(\"develop_server.sh\")\n if PORT is None:\n session.run(script, \"restart\")\n else:\n session.run(script, \"restart\", PORT)\n\n\ndef get_pelican_pid():\n try:\n with open(get_path(\"pelican.pid\"), \"r\") as fh:\n return int(fh.read())\n except (OSError, ValueError):\n return None\n\n\ndef get_srv_pid():\n try:\n with open(get_path(\"srv.pid\"), \"r\") as fh:\n return int(fh.read())\n except (OSError, ValueError):\n return None\n\n\n@nox.session(py=False)\ndef stop_server(session):\n \"\"\"Stop local server.\"\"\"\n pelican_pid = session.run(get_pelican_pid)\n srv_pid = session.run(get_srv_pid)\n if pelican_pid is None:\n if srv_pid is None:\n session.error(\"`pelican.pid` and `srv.pid` files invalid\")\n else:\n session.error(\"`pelican.pid` file invalid\")\n if srv_pid is None:\n session.error(\"srv.pid` file invalid\")\n\n pelican_proc = psutil.Process(pelican_pid)\n srv_proc = psutil.Process(srv_pid)\n session.run(pelican_proc.kill)\n session.run(srv_proc.kill)\n\n\n@nox.session(py=DEFAULT_INTERPRETER)\ndef update_requirements(session):\n if py.path.local.sysfind(\"git\") is None:\n session.skip(\"`git` must be installed\")\n\n # Install all dependencies.\n session.install(\"pip-tools\")\n\n # Update all of the requirements file(s).\n names = (\"render\", \"html\")\n for name in names:\n in_name = \"{}-requirements.in\".format(name)\n txt_name = \"{}-requirements.txt\".format(name)\n session.run(\"rm\", \"-f\", txt_name, external=True)\n session.run(\n \"pip-compile\",\n \"--generate-hashes\",\n \"--output-file\",\n txt_name,\n in_name,\n )\n session.run(\"git\", \"add\", txt_name, external=True)\n\n\n@nox.session(python=DEFAULT_INTERPRETER)\ndef blacken(session):\n session.install(\"black\")\n file_list_str = subprocess.check_output([\"git\", \"ls-files\", \"*.py\"])\n file_list = file_list_str.decode(\"ascii\").strip().split(\"\\n\")\n session.run(\"black\", \"--line-length=79\", *file_list)\n\n\n@nox.session(py=False)\ndef clean(session):\n \"\"\"Remove the generated files.\"\"\"\n dir_paths = (\n OUTPUT_DIR,\n get_path(\"__pycache__\"),\n get_path(\"node_modules\"),\n get_path(\"pelican-plugins\", \"__pycache__\"),\n )\n for dir_path in dir_paths:\n session.run(shutil.rmtree, dir_path, ignore_errors=True)\n", "id": "12834101", "language": "Python", "matching_score": 2.5926809310913086, "max_stars_count": 1, "path": "noxfile.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport nox\nimport pathlib\n\n\nnox.options.error_on_external_run = True\nDEFAULT_INTERPRETER = \"3.7\"\nHERE = pathlib.Path(__file__).resolve().parent\n\n\ndef get_path(*parts, relative=True):\n full_path = parts\n if not relative:\n full_path = HERE.parts + parts\n return str(pathlib.Path(*full_path))\n\n\n@nox.session(py=[DEFAULT_INTERPRETER])\ndef unit(session):\n \"\"\"Run unit tests.\"\"\"\n # Install all dependencies.\n session.install(\"--upgrade\", \"pytest\")\n # Install this package.\n session.install(\"--upgrade\", \".\")\n\n # Run pytest against the unit tests.\n run_args = [\"pytest\"] + session.posargs + [get_path(\"tests\", \"unit\")]\n session.run(*run_args)\n\n\n@nox.session(py=DEFAULT_INTERPRETER)\ndef generate_pb(session):\n \"\"\"(Re)-generate ``*_pb2.py`` files from ``protobuf`` definitions.\"\"\"\n # Install all dependencies.\n session.install(\"--upgrade\", \"grpcio-tools\")\n # Generate the ``*_pb2.py`` files.\n src_dest = get_path(\"_grpc\", relative=False)\n session.run(\n \"python\",\n \"-m\",\n \"grpc_tools.protoc\",\n \"-I\" + get_path(\"_grpc\"),\n \"--python_out\",\n src_dest,\n \"--grpc_python_out\",\n src_dest,\n get_path(\"_grpc\", \"users.proto\"),\n )\n", "id": "5118090", "language": "Python", "matching_score": 0.2331606149673462, "max_stars_count": 0, "path": "noxfile.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Perform Newton's iteration to find roots.\n\nThis uses the de Casteljau algorithm to evaluate both :math:`p(s)` and\n:math:`p'(s)`.\n\nThis script in particular uses :math:`p(s) = (1 - 5s)^n + 2^{30} (1 - 3s)^n`\nwhich has :math:`\\widetilde{p}(s) = (1 + 3s)^n + 2^{30} (1 + s)^n` when\n:math:`n` is odd.\n\"\"\"\n\nimport fractions\n\nimport matplotlib.pyplot as plt\nimport mpmath\nimport numpy as np\n\nimport de_casteljau\nimport plot_utils\n\n\nU = fractions.Fraction(1, 2 ** 53)\nRHS = 2.0 ** 30\nALPHA = 0.25\n\n\ndef get_coeffs(n):\n r\"\"\"Get the coefficients of a specific polynomial.\n\n When :math:`p(s) = (1 - 5s)^n + 2^{30} (1 - 3s)^n`, the coefficients\n are :math:`b_j = (-4)^j + 2^{30} (-2)^j` since\n :math:`1 - 5s = (1 - s) - 4s` and :math:`1 - 2s = (1 - s) - 2s`.\n\n It's worth noting that these terms can be represented exactly in\n floating point when :math:`|j - 30| <= 52`.\n \"\"\"\n coeffs = []\n for j in range(n + 1):\n coeff = (-4.0) ** j + RHS * (-2.0) ** j\n coeffs.append(coeff)\n\n return tuple(coeffs)\n\n\ndef find_best_solution(n, ctx):\n \"\"\"Finds :math:`(1 + 2^{n/d}) / (5 + 3 (2^{n/d}))` to highest precision.\"\"\"\n highest = 500\n w = ctx.root(RHS, n) - 1\n return (2 + w) / (8 + 3 * w)\n\n\ndef condition_number(n, root):\n r\"\"\"Compute the condition number of :math:`p(s)` at a root.\n\n When :math:`p(s) = (1 - 5s)^n + 2^{30} (1 - 3s)^n` is written in the\n Bernstein basis, we have :math:`\\widetilde{p}(s) = (1 + 3s)^n +\n 2^{30} (1 + s)^n`.\n \"\"\"\n if not 0 <= root <= 1:\n raise ValueError(\"Expected root in unit interval.\", root)\n p_tilde = (1 + 3 * root) ** n + RHS * (1 + root) ** n\n dp = -5 * n * (1 - 5 * root) ** (n - 1) - RHS * 3 * n * (1 - 3 * root) ** (\n n - 1\n )\n return plot_utils.to_float(abs(p_tilde / (root * dp)))\n\n\ndef root_info(n, ctx):\n coeffs = get_coeffs(n)\n root = find_best_solution(n, ctx)\n cond = condition_number(n, root)\n # We know the root is in [1/4, 1/3] so this starts outside that interval.\n s0 = 0.5\n s1_converged = de_casteljau.basic_newton(s0, coeffs)\n s2_converged = de_casteljau.accurate_newton(s0, coeffs)\n s3_converged = de_casteljau.full_newton(s0, coeffs)\n\n rel_error1 = plot_utils.to_float(abs((s1_converged - root) / root))\n rel_error2 = plot_utils.to_float(abs((s2_converged - root) / root))\n rel_error3 = plot_utils.to_float(abs((s3_converged - root) / root))\n if rel_error1 == 0:\n raise RuntimeError(\n \"Unexpected error for basic Newton.\", n, root, s1_converged\n )\n if rel_error2 == 0:\n raise RuntimeError(\n \"Unexpected error for accurate Newton.\", n, root, s2_converged\n )\n if rel_error3 == 0:\n raise RuntimeError(\n \"Unexpected error for full Newton.\", n, root, s3_converged\n )\n\n return cond, rel_error1, rel_error2, rel_error3\n\n\ndef get_bounds(values):\n # NOTE: This assumes ``values`` is positive.\n min_exp = np.log10(min(values))\n max_exp = np.log10(max(values))\n min_result = np.floor(min_exp)\n if min_exp - min_result < 0.5:\n min_result -= 1.0\n max_result = np.ceil(max_exp)\n if max_result - max_exp < 0.5:\n max_result += 1.0\n\n return 10.0 ** min_result, 10.0 ** max_result\n\n\ndef main():\n ctx = mpmath.MPContext()\n ctx.prec = 500\n\n cond_nums = []\n rel_errors_basic = []\n rel_errors_accurate = []\n rel_errors_full = []\n bounds_vals = []\n for n in range(1, 71 + 2, 2):\n cond, rel_error1, rel_error2, rel_error3 = root_info(n, ctx)\n cond_nums.append(cond)\n rel_errors_basic.append(rel_error1)\n rel_errors_accurate.append(rel_error2)\n rel_errors_full.append(rel_error3)\n gamma2n = (2 * n * U) / (1 - 2 * n * U)\n bounds_vals.append(float(gamma2n * cond))\n\n figure = plt.figure()\n ax = figure.gca()\n ax.loglog(\n cond_nums,\n rel_errors_basic,\n marker=\"d\",\n linestyle=\"none\",\n markersize=7,\n color=plot_utils.BLUE,\n zorder=2,\n label=r\"$\\mathtt{DNewtonBasic}$\",\n )\n ax.loglog(\n cond_nums,\n rel_errors_accurate,\n marker=\"o\",\n linestyle=\"none\",\n markersize=3,\n color=\"black\",\n zorder=2,\n label=r\"$\\mathtt{DNewtonAccurate}$\",\n )\n # H/T: (http://widu.tumblr.com/post/43624348228/\n # making-unfilled-hollow-markers-in-matplotlib)\n ax.loglog(\n cond_nums,\n rel_errors_full,\n marker=\"o\",\n linestyle=\"none\",\n markersize=6,\n markeredgewidth=1,\n markerfacecolor=\"none\",\n color=plot_utils.GREEN,\n zorder=2,\n label=r\"$\\mathtt{DNewtonFull}$\",\n )\n # Add the error lines.\n ax.loglog(\n cond_nums,\n bounds_vals,\n color=\"black\",\n linestyle=\"dashed\",\n alpha=ALPHA,\n zorder=1,\n )\n # Add the ``x = 1/u^k`` vertical lines.\n min_x, max_x = get_bounds(cond_nums)\n min_y, max_y = get_bounds(\n rel_errors_basic + rel_errors_accurate + rel_errors_full\n )\n delta_y = max_y - min_y\n for exponent in (1, 2, 3):\n u_inv = 1.0 / float(U) ** exponent\n ax.loglog(\n [u_inv, u_inv],\n [min_y - 0.05 * delta_y, max_y + 0.05 * delta_y],\n color=\"black\",\n linestyle=\"dashed\",\n alpha=ALPHA,\n zorder=1,\n )\n # Add the ``y = 1`` and ``y = u`` horizontal lines.\n for exponent in (0, 1):\n u_pow = float(U) ** exponent\n ax.loglog(\n [min_x, max_x],\n [u_pow, u_pow],\n color=\"black\",\n linestyle=\"dashed\",\n alpha=ALPHA,\n zorder=1,\n )\n # Set the axis limits.\n ax.set_xlim(min_x, max_x)\n ax.set_ylim(min_y, max_y)\n # Set \"nice\" ticks.\n ax.set_xticks([10.0 ** n for n in range(0, 50 + 10, 10)])\n ax.set_yticks([10.0 ** n for n in range(-16, 0 + 4, 4)])\n # Set special ``xticks`` for ``1/u^k``.\n u_xticks = [1.0 / float(U), 1.0 / float(U) ** 2, 1.0 / float(U) ** 3]\n u_xticklabels = [\n r\"$1/\\mathbf{u}$\",\n r\"$1/\\mathbf{u}^2$\",\n r\"$1/\\mathbf{u}^3$\",\n ]\n ax.set_xticks(u_xticks, minor=True)\n ax.set_xticklabels(u_xticklabels, minor=True)\n ax.tick_params(\n axis=\"x\",\n which=\"minor\",\n direction=\"out\",\n top=1,\n bottom=0,\n labelbottom=0,\n labeltop=1,\n )\n # Set special ``yticks`` for ``u`` and ``1``.\n ax.set_yticks([float(U), 1.0], minor=True)\n ax.set_yticklabels([r\"$\\mathbf{u}$\", \"$1$\"], minor=True)\n ax.tick_params(\n axis=\"y\",\n which=\"minor\",\n direction=\"out\",\n left=0,\n right=1,\n labelleft=0,\n labelright=1,\n )\n # Label the axes.\n ax.set_xlabel(\"Condition Number\", fontsize=plot_utils.TEXT_SIZE)\n ax.set_ylabel(\"Relative Forward Error\", fontsize=plot_utils.TEXT_SIZE)\n # Add the legend.\n ax.legend(\n loc=\"lower right\",\n framealpha=1.0,\n frameon=True,\n fontsize=plot_utils.TEXT_SIZE,\n )\n\n ax.tick_params(labelsize=plot_utils.TICK_SIZE)\n ax.tick_params(labelsize=plot_utils.TEXT_SIZE, which=\"minor\")\n\n figure.set_size_inches(5.4, 4.0)\n figure.subplots_adjust(\n left=0.12, bottom=0.11, right=0.95, top=0.92, wspace=0.2, hspace=0.2\n )\n filename = \"newton_de_casteljau.pdf\"\n path = plot_utils.get_path(\"compensated-newton\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "10667223", "language": "Python", "matching_score": 8.951924324035645, "max_stars_count": 2, "path": "scripts/compensated-newton/newton_de_casteljau.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Perform Newton's iteration to find roots.\n\nThis uses Horner's algorithm to evaluate both :math:`p(s)` and\n:math:`p'(s)`.\n\nThis script in particular uses :math:`p(x) = (x - 1)^n - 2^{-31}` which\nhas :math:`\\widetilde{p}(s) = (x + 1)^n - (-1)^n 2^{-31}`.\n\"\"\"\n\nimport collections\nimport fractions\n\nimport matplotlib.pyplot as plt\nimport mpmath\n\nimport horner\nimport plot_utils\n\n\nU = fractions.Fraction(1, 2 ** 53)\nALPHA = 0.25\n\n\ndef get_coeffs(n):\n r\"\"\"Get the coefficients of a specific polynomial.\n\n We have :math:`p(x) = (x - 1)^n - 2^{-31}` and the coefficients\n are :math:`a_0 = (-1)^{n} - 2^{-31}` and\n :math:`a_j = \\binom{n}{j} (-1)^{n - j}`.\n \"\"\"\n coeffs = []\n for j in range(n + 1):\n coeff = plot_utils.binomial(n, j) * (-1.0) ** j\n if j == n:\n coeff -= 0.5 ** 31\n coeffs.append(coeff)\n\n return tuple(coeffs)\n\n\ndef find_best_solution(n):\n \"\"\"Finds :math:`1 + 2^{-31/n}` to highest precision.\"\"\"\n highest = 500\n root = None\n counter = collections.Counter()\n for precision in range(100, highest + 20, 20):\n ctx = mpmath.MPContext()\n ctx.prec = precision\n\n value = 1 + ctx.root(0.5 ** 31, n)\n if precision == highest:\n root = value\n value = plot_utils.to_float(value)\n counter[value] += 1\n\n if len(counter) != 1:\n raise ValueError(\"Expected only one value.\")\n return root\n\n\ndef condition_number(n, root):\n r\"\"\"Compute the condition number of :math:`p(x)` at a root.\n\n When :math:`p(x) = (x - 1)^n - 2^{-31}` is written in the monomial basis,\n we have :math:`\\widetilde{p}(x) = (x + 1)^n - (-1)^n 2^{-31}`.\n \"\"\"\n if root <= 0:\n raise ValueError(\"Expected positive root.\", root)\n p_tilde = (root + 1) ** n - (-1) ** n * 0.5 ** 31\n dp = n * (root - 1) ** (n - 1)\n return plot_utils.to_float(abs(p_tilde / (root * dp)))\n\n\ndef root_info(n):\n coeffs = get_coeffs(n)\n root = find_best_solution(n)\n cond = condition_number(n, root)\n # We know the root is in [1, 2) so this starts outside that interval.\n x1_converged = horner.basic_newton(2.0, coeffs)\n x2_converged = horner.accurate_newton(2.0, coeffs)\n x3_converged = horner.full_newton(2.0, coeffs)\n\n rel_error1 = plot_utils.to_float(abs((x1_converged - root) / root))\n rel_error2 = plot_utils.to_float(abs((x2_converged - root) / root))\n rel_error3 = plot_utils.to_float(abs((x3_converged - root) / root))\n if rel_error1 == 0:\n raise RuntimeError(\"Unexpected error for basic Newton.\")\n if rel_error2 == 0:\n raise RuntimeError(\"Unexpected error for accurate Newton.\")\n if rel_error3 == 0:\n raise RuntimeError(\"Unexpected error for full Newton.\")\n\n return cond, rel_error1, rel_error2, rel_error3\n\n\ndef main(filename=None):\n cond_nums = []\n rel_errors_basic = []\n rel_errors_accurate = []\n rel_errors_full = []\n bounds_vals1 = []\n bounds_vals2 = []\n for n in range(2, 55 + 1):\n cond, rel_error1, rel_error2, rel_error3 = root_info(n)\n cond_nums.append(cond)\n rel_errors_basic.append(rel_error1)\n rel_errors_accurate.append(rel_error2)\n rel_errors_full.append(rel_error3)\n gamma2n = (2 * n * U) / (1 - 2 * n * U)\n bounds_vals1.append(float(10 * gamma2n * cond))\n bounds_vals2.append(float(6 * gamma2n ** 2 * cond))\n\n figure = plt.figure()\n ax = figure.gca()\n ax.loglog(\n cond_nums,\n rel_errors_basic,\n marker=\"d\",\n linestyle=\"none\",\n markersize=7,\n color=plot_utils.BLUE,\n zorder=2,\n label=r\"$\\mathtt{HNewtonBasic}$\",\n )\n ax.loglog(\n cond_nums,\n rel_errors_accurate,\n marker=\"o\",\n linestyle=\"none\",\n markersize=3,\n color=\"black\",\n zorder=2,\n label=r\"$\\mathtt{HNewtonAccurate}$\",\n )\n # H/T: (http://widu.tumblr.com/post/43624348228/\n # making-unfilled-hollow-markers-in-matplotlib)\n ax.loglog(\n cond_nums,\n rel_errors_full,\n marker=\"o\",\n linestyle=\"none\",\n markersize=6,\n markeredgewidth=1,\n markerfacecolor=\"none\",\n color=plot_utils.GREEN,\n zorder=2,\n label=r\"$\\mathtt{HNewtonFull}$\",\n )\n # Add the error lines.\n ax.loglog(\n cond_nums,\n bounds_vals1,\n color=\"black\",\n linestyle=\"dashed\",\n alpha=ALPHA,\n zorder=1,\n )\n ax.loglog(\n cond_nums,\n bounds_vals2,\n color=\"black\",\n linestyle=\"dashed\",\n alpha=ALPHA,\n zorder=1,\n )\n # Add the ``x = 1/u^k`` vertical lines.\n min_x, max_x = 1e4, 1e33\n min_y, max_y = 1e-19, 1e2\n delta_y = max_y - min_y\n for exponent in (1, 2):\n u_inv = 1.0 / float(U) ** exponent\n ax.loglog(\n [u_inv, u_inv],\n [min_y - 0.05 * delta_y, max_y + 0.05 * delta_y],\n color=\"black\",\n linestyle=\"dashed\",\n alpha=ALPHA,\n zorder=1,\n )\n # Add the ``y = 1`` and ``y = u`` horizontal lines.\n for exponent in (0, 1):\n u_pow = float(U) ** exponent\n ax.loglog(\n [min_x, max_x],\n [u_pow, u_pow],\n color=\"black\",\n linestyle=\"dashed\",\n alpha=ALPHA,\n zorder=1,\n )\n # Set the axis limits.\n ax.set_xlim(min_x, max_x)\n ax.set_ylim(min_y, max_y)\n # Set the major x- and y-ticks.\n ax.set_xticks([1e5, 1e10, 1e15, 1e20, 1e25, 1e30])\n ax.set_yticks(\n [1e-18, 1e-16, 1e-14, 1e-12, 1e-10, 1e-8, 1e-6, 1e-4, 1e-2, 1]\n )\n # Set special ``xticks`` for ``1/u`` and ``1/u^2``.\n u_xticks = [1.0 / float(U), 1.0 / float(U) ** 2]\n u_xticklabels = [r\"$1/\\mathbf{u}$\", r\"$1/\\mathbf{u}^2$\"]\n ax.set_xticks(u_xticks, minor=True)\n ax.set_xticklabels(u_xticklabels, minor=True)\n ax.tick_params(\n axis=\"x\",\n which=\"minor\",\n direction=\"out\",\n top=1,\n bottom=0,\n labelbottom=0,\n labeltop=1,\n )\n # Set special ``yticks`` for ``u`` and ``1``.\n ax.set_yticks([float(U), 1.0], minor=True)\n ax.set_yticklabels([r\"$\\mathbf{u}$\", \"$1$\"], minor=True)\n ax.tick_params(\n axis=\"y\",\n which=\"minor\",\n direction=\"out\",\n left=0,\n right=1,\n labelleft=0,\n labelright=1,\n )\n # Label the axes.\n ax.set_xlabel(\"Condition Number\", fontsize=plot_utils.TEXT_SIZE)\n ax.set_ylabel(\"Relative Forward Error\", fontsize=plot_utils.TEXT_SIZE)\n # Add the legend.\n ax.legend(\n loc=\"lower right\",\n framealpha=1.0,\n frameon=True,\n fontsize=plot_utils.TEXT_SIZE,\n )\n\n ax.tick_params(labelsize=plot_utils.TICK_SIZE)\n ax.tick_params(labelsize=plot_utils.TEXT_SIZE, which=\"minor\")\n\n figure.set_size_inches(5.4, 4.0)\n figure.subplots_adjust(\n left=0.12, bottom=0.11, right=0.95, top=0.92, wspace=0.2, hspace=0.2\n )\n filename = \"newton_jghplus13.pdf\"\n path = plot_utils.get_path(\"compensated-newton\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "9079602", "language": "Python", "matching_score": 5.2691426277160645, "max_stars_count": 2, "path": "scripts/compensated-newton/jghplus13.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Perform plots of relative error against the condition number.\n\nThis uses :math:`p(s) = (s - 1) \\left(s - \\frac{3}{4}\\right)^7` which\nhas :math:`\\widetilde{p}(s) = (s - 1) \\left(\\frac{s}{2} -\n\\frac{3}{4}\\right)^7`.\n\"\"\"\n\nimport fractions\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport de_casteljau\nimport plot_utils\n\n\nF = fractions.Fraction\nU = F(1, 2 ** 53)\n# p(s) = (s - 1) (s - 3/4)^7\nBEZIER_COEFFS = (\n 2187.0 / 16384.0,\n -5103.0 / 131072.0,\n 729.0 / 65536.0,\n -405.0 / 131072.0,\n 27.0 / 32768.0,\n -27.0 / 131072.0,\n 3.0 / 65536.0,\n -1.0 / 131072.0,\n 0.0,\n)\nROOT = 0.75\nPOWER_VAL = 1.3\nALPHA = 0.25\n\n\ndef main():\n n = 8\n gamma2n = (2 * n * U) / (1 - 2 * n * U)\n bound_coeff1 = float(gamma2n)\n bound_coeff2 = 3 * n * (3 * n + 7) * U ** 2 / 2\n bound_coeff2 = float(bound_coeff2)\n bound_coeff3 = 3 * n * (3 * n ** 2 + 36 * n + 61) * U ** 3 / 2\n bound_coeff3 = float(bound_coeff3)\n bound_coeff4 = (\n 9 * n * (3 * n ** 3 + 102 * n ** 2 + 773 * n + 1122) * U ** 4 / 8\n )\n bound_coeff4 = float(bound_coeff4)\n\n cond_nums = []\n forward_errs1 = []\n forward_errs2 = []\n forward_errs3 = []\n forward_errs4 = []\n for j in range(-5, -90 - 1, -1):\n s = ROOT - POWER_VAL ** j\n exact_s = F(s)\n\n # Compute the condition number.\n exact_p = (exact_s - 1) * (4 * exact_s - 3) ** 7 / 16384\n # p_tilde(s) = SUM_j |b_j| B_{j, 8}(s) = (s - 1) (s/2 - 3/4)^7\n exact_p_tilde = (exact_s - 1) * (2 * exact_s - 3) ** 7 / 16384\n exact_cond = abs(exact_p_tilde / exact_p)\n cond_nums.append(float(exact_cond))\n\n # Compute the forward error for uncompensated de Casteljau.\n b, db, d2b, d3b = de_casteljau._compensated_k(s, BEZIER_COEFFS, 4)\n exact_b1 = F(b)\n exact_forward_err1 = abs((exact_b1 - exact_p) / exact_p)\n forward_errs1.append(float(exact_forward_err1))\n\n # Compute the forward error for compensated de Casteljau.\n b2 = b + db\n exact_b2 = F(b2)\n exact_forward_err2 = abs((exact_b2 - exact_p) / exact_p)\n forward_errs2.append(float(exact_forward_err2))\n\n # Compute the forward error for K-compensated de Casteljau (K=3).\n b3 = b2 + d2b\n exact_b3 = F(b3)\n exact_forward_err3 = abs((exact_b3 - exact_p) / exact_p)\n forward_errs3.append(float(exact_forward_err3))\n\n # Compute the forward error for K-compensated de Casteljau (K=3).\n b4 = b3 + d3b\n exact_b4 = F(b4)\n exact_forward_err4 = abs((exact_b4 - exact_p) / exact_p)\n forward_errs4.append(float(exact_forward_err4))\n\n # Set a tight ``x``-limit.\n min_exp = np.log(min(cond_nums))\n max_exp = np.log(max(cond_nums))\n delta_exp = max_exp - min_exp\n min_x = np.exp(min_exp - 0.01 * delta_exp)\n max_x = np.exp(max_exp + 0.01 * delta_exp)\n\n figure = plt.figure()\n ax = figure.gca()\n ax.loglog(\n cond_nums,\n forward_errs1,\n marker=\"v\",\n linestyle=\"none\",\n zorder=2,\n label=r\"$\\mathtt{DeCasteljau}$\",\n color=plot_utils.BLUE,\n )\n ax.loglog(\n cond_nums,\n forward_errs2,\n marker=\"d\",\n linestyle=\"none\",\n zorder=2,\n label=r\"$\\mathtt{CompDeCasteljau}$\",\n color=plot_utils.GREEN,\n )\n ax.loglog(\n cond_nums,\n forward_errs3,\n marker=\"P\",\n linestyle=\"none\",\n zorder=1.5, # Beneath ``K=2``.\n label=r\"$\\mathtt{CompDeCasteljau3}$\",\n color=plot_utils.RED,\n )\n ax.loglog(\n cond_nums,\n forward_errs4,\n marker=\"o\",\n linestyle=\"none\",\n zorder=1.25, # Beneath ``K=2, 3``.\n label=r\"$\\mathtt{CompDeCasteljau4}$\",\n color=plot_utils.PURPLE,\n )\n # Figure out the bounds before adding the bounding lines.\n min_y, max_y = ax.get_ylim()\n # Plot the lines of the a priori error bounds.\n ax.loglog(\n [min_x, max_x],\n [bound_coeff1 * min_x, bound_coeff1 * max_x],\n color=\"black\",\n alpha=ALPHA,\n zorder=1,\n )\n ax.loglog(\n [min_x, max_x],\n [bound_coeff2 * min_x, bound_coeff2 * max_x],\n color=\"black\",\n alpha=ALPHA,\n zorder=1,\n )\n ax.loglog(\n [min_x, max_x],\n [bound_coeff3 * min_x, bound_coeff3 * max_x],\n color=\"black\",\n alpha=ALPHA,\n zorder=1,\n )\n ax.loglog(\n [min_x, max_x],\n [bound_coeff4 * min_x, bound_coeff4 * max_x],\n color=\"black\",\n alpha=ALPHA,\n zorder=1,\n )\n # Add the ``x = 1/u^k`` vertical lines.\n delta_y = max_y - min_y\n for exponent in (1, 2, 3, 4):\n u_inv = 1.0 / float(U) ** exponent\n ax.loglog(\n [u_inv, u_inv],\n [min_y - 0.05 * delta_y, max_y + 0.05 * delta_y],\n color=\"black\",\n linestyle=\"dashed\",\n alpha=ALPHA,\n zorder=1,\n )\n # Add the ``y = u`` horizontal lines.\n ax.loglog(\n [min_x, max_x],\n [float(U), float(U)],\n color=\"black\",\n linestyle=\"dashed\",\n alpha=ALPHA,\n zorder=1,\n )\n\n # Make sure the ``y``-limit stays set (the bounds lines exceed).\n ax.set_ylim(min_y, 1.0)\n ax.set_xlim(min_x, max_x)\n # Add the legend.\n ax.legend(\n loc=\"lower right\",\n framealpha=1.0,\n frameon=True,\n fontsize=plot_utils.TEXT_SIZE,\n )\n # Set \"nice\" ticks.\n ax.set_xticks([10.0 ** n for n in range(5, 65 + 10, 10)])\n ax.set_yticks([10.0 ** n for n in range(-18, 0 + 2, 2)])\n # Set special ``xticks`` for ``1/u^k``.\n u_xticks = []\n u_xticklabels = []\n for exponent in (1, 2, 3, 4):\n u_xticks.append(1.0 / float(U) ** exponent)\n if exponent == 1:\n u_xticklabels.append(r\"$1/\\mathbf{u}$\")\n else:\n u_xticklabels.append(r\"$1/\\mathbf{{u}}^{}$\".format(exponent))\n\n ax.set_xticks(u_xticks, minor=True)\n ax.set_xticklabels(u_xticklabels, minor=True)\n ax.tick_params(\n axis=\"x\",\n which=\"minor\",\n direction=\"out\",\n top=1,\n bottom=0,\n labelbottom=0,\n labeltop=1,\n )\n # Set special ``yticks`` for ``u``.\n ax.set_yticks([float(U)], minor=True)\n ax.set_yticklabels([r\"$\\mathbf{u}$\"], minor=True)\n ax.tick_params(\n axis=\"y\",\n which=\"minor\",\n direction=\"out\",\n left=0,\n right=1,\n labelleft=0,\n labelright=1,\n )\n # Label the axes.\n ax.set_xlabel(\"Condition Number\", fontsize=plot_utils.TEXT_SIZE)\n ax.set_ylabel(\"Relative Forward Error\", fontsize=plot_utils.TEXT_SIZE)\n\n ax.tick_params(labelsize=plot_utils.TICK_SIZE)\n ax.tick_params(labelsize=plot_utils.TEXT_SIZE, which=\"minor\")\n\n figure.set_size_inches(5.2, 3.9)\n figure.subplots_adjust(\n left=0.12, bottom=0.11, right=0.95, top=0.92, wspace=0.2, hspace=0.2\n )\n filename = \"de_casteljau_rel_error.pdf\"\n path = plot_utils.get_path(\"k-compensated\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main_jlcs10():\n \"\"\"This recreates the plot from `JLCS10`_.\n\n .. _JLCS10: https://doi.org/10.1016/j.camwa.2010.05.021\n\n Note that it is essentially a copy of :func:`main` (and much of the\n code could be shared, but it isn't worth the effort).\n\n This seeks to recreate the plot from the original paper, but to show\n what happens as the exponent on ``POWER_VAL`` decreases from ``-44`` down\n to ``-64``. In particular, it shows that the compensated de Casteljau\n algorithm produces exactly zero.\n \"\"\"\n n = 8\n gamma2n = (2 * n * U) / (1 - 2 * n * U)\n bound_coeff1 = float(gamma2n)\n bound_coeff2 = 3 * n * (3 * n + 7) * U ** 2 / 2\n bound_coeff2 = float(bound_coeff2)\n\n cond_nums = []\n forward_errs1 = []\n forward_errs2 = []\n for j in range(-5, -64 - 1, -1):\n s = ROOT - POWER_VAL ** j\n exact_s = F(s)\n\n # Compute the condition number.\n exact_p = (exact_s - 1) * (4 * exact_s - 3) ** 7 / 16384\n # p_tilde(s) = SUM_j |b_j| B_{j, 8}(s) = (s - 1) (s/2 - 3/4)^7\n exact_p_tilde = (exact_s - 1) * (2 * exact_s - 3) ** 7 / 16384\n exact_cond = abs(exact_p_tilde / exact_p)\n cond_nums.append(float(exact_cond))\n\n # Compute the forward error for uncompensated de Casteljau.\n b, db = de_casteljau._compensated_k(s, BEZIER_COEFFS, 2)\n exact_b1 = F(b)\n exact_forward_err1 = abs((exact_b1 - exact_p) / exact_p)\n forward_errs1.append(float(exact_forward_err1))\n\n # Compute the forward error for compensated de Casteljau.\n b2 = b + db\n exact_b2 = F(b2)\n exact_forward_err2 = abs((exact_b2 - exact_p) / exact_p)\n forward_errs2.append(float(exact_forward_err2))\n\n # Set a tight ``x``-limit.\n min_exp = np.log(min(cond_nums))\n max_exp = np.log(max(cond_nums))\n delta_exp = max_exp - min_exp\n min_x = np.exp(min_exp - 0.01 * delta_exp)\n max_x = np.exp(max_exp + 0.01 * delta_exp)\n\n figure = plt.figure()\n ax = figure.gca()\n ax.loglog(\n cond_nums,\n forward_errs1,\n marker=\"v\",\n linestyle=\"none\",\n zorder=2,\n label=r\"$\\mathtt{DeCasteljau}$\",\n color=plot_utils.BLUE,\n )\n ax.loglog(\n cond_nums,\n forward_errs2,\n marker=\"d\",\n linestyle=\"none\",\n zorder=2,\n label=r\"$\\mathtt{CompDeCasteljau}$\",\n color=plot_utils.GREEN,\n )\n # Figure out the bounds before adding the bounding lines.\n min_y, max_y = ax.get_ylim()\n # Plot the lines of the a priori error bounds.\n ax.loglog(\n [min_x, max_x],\n [bound_coeff1 * min_x, bound_coeff1 * max_x],\n color=\"black\",\n alpha=ALPHA,\n zorder=1,\n )\n ax.loglog(\n [min_x, max_x],\n [bound_coeff2 * min_x, bound_coeff2 * max_x],\n color=\"black\",\n alpha=ALPHA,\n zorder=1,\n )\n # Add the ``x = 1/u^k`` vertical lines.\n delta_y = max_y - min_y\n for exponent in (1, 2):\n u_inv = 1.0 / float(U) ** exponent\n ax.loglog(\n [u_inv, u_inv],\n [min_y - 0.05 * delta_y, max_y + 0.05 * delta_y],\n color=\"black\",\n linestyle=\"dashed\",\n alpha=ALPHA,\n zorder=1,\n )\n # Add the ``y = u`` and ``y = 1`` horizontal lines.\n ax.loglog(\n [min_x, max_x],\n [float(U), float(U)],\n color=\"black\",\n linestyle=\"dashed\",\n alpha=ALPHA,\n zorder=1,\n )\n ax.loglog(\n [min_x, max_x],\n [1.0, 1.0],\n color=\"black\",\n linestyle=\"dashed\",\n alpha=ALPHA,\n zorder=1,\n )\n\n # Make sure the ``y``-limit stays set (the bounds lines exceed).\n ax.set_ylim(min_y, 10.0 ** 18)\n ax.set_xlim(min_x, max_x)\n # Add the legend.\n ax.legend(\n loc=\"lower right\",\n framealpha=1.0,\n frameon=True,\n fontsize=plot_utils.TEXT_SIZE,\n )\n # Set \"nice\" ticks.\n ax.set_xticks([10.0 ** n for n in range(5, 45 + 5, 5)])\n ax.set_yticks([10.0 ** n for n in range(-18, 14 + 4, 4)])\n # Set special ``xticks`` for ``1/u`` and ``1/u^2``.\n ax.set_xticks([1.0 / float(U), 1.0 / float(U) ** 2], minor=True)\n ax.set_xticklabels([r\"$1/\\mathbf{u}$\", r\"$1/\\mathbf{u}^2$\"], minor=True)\n ax.tick_params(\n axis=\"x\",\n which=\"minor\",\n direction=\"out\",\n top=1,\n bottom=0,\n labelbottom=0,\n labeltop=1,\n )\n # Set special ``yticks`` for ``u`` and ``1``.\n ax.set_yticks([float(U), 1.0], minor=True)\n ax.set_yticklabels([r\"$\\mathbf{u}$\", \"$1$\"], minor=True)\n ax.tick_params(\n axis=\"y\",\n which=\"minor\",\n direction=\"out\",\n left=0,\n right=1,\n labelleft=0,\n labelright=1,\n )\n # Label the axes.\n ax.set_xlabel(\"Condition Number\", fontsize=plot_utils.TEXT_SIZE)\n ax.set_ylabel(\"Relative Forward Error\", fontsize=plot_utils.TEXT_SIZE)\n # Make sure the ticks are sized appropriately.\n ax.tick_params(labelsize=plot_utils.TICK_SIZE)\n ax.tick_params(labelsize=plot_utils.TEXT_SIZE, which=\"minor\")\n\n figure.set_size_inches(5.2, 3.9)\n figure.subplots_adjust(\n left=0.12, bottom=0.11, right=0.95, top=0.92, wspace=0.2, hspace=0.2\n )\n filename = \"jlcs10_plot.pdf\"\n path = plot_utils.get_path(\"k-compensated\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n main_jlcs10()\n", "id": "7751473", "language": "Python", "matching_score": 6.112049579620361, "max_stars_count": 2, "path": "scripts/k-compensated/error_against_cond.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport matplotlib.pyplot as plt\nimport mpmath\nimport numpy as np\n\nimport newton_bezier\nimport plot_utils\n\n\nU = 0.5 ** 53\nALPHA = 0.25\n\n\ndef kappa(r, ctx):\n sqrt_r = ctx.sqrt(r)\n mu1 = 9 + 4 * sqrt_r + 2 * r - r * sqrt_r - 0.5 * r * r\n mu2 = 2 / r + 2 + 2 * r\n\n v1_v1 = 5.0 / (64.0 * r)\n v1_v2 = -3.0 / (32.0 * r) - 5.0 / (32.0 * r * sqrt_r)\n v2_v2 = (5 + 6 * sqrt_r + 2 * r) / (16 * r * r)\n\n numerator = (\n mu1 * mu1 * v1_v1 + 2 * mu1 * mu2 * abs(v1_v2) + mu2 * mu2 * v2_v2\n )\n alpha = 0.5 * (1 + sqrt_r)\n beta = 0.25 * (2 + sqrt_r)\n denominator = alpha * alpha + beta * beta\n kappa_sq = numerator / denominator\n\n return ctx.sqrt(kappa_sq)\n\n\ndef main():\n ctx = mpmath.MPContext()\n ctx.prec = 500\n\n s0 = 1.0\n t0 = 1.0\n\n cond_nums = np.empty((24,))\n rel_errors1 = np.empty((24,))\n rel_errors2 = np.empty((24,))\n for n in range(3, 49 + 2, 2):\n r = 0.5 ** n\n r_inv = 1.0 / r\n cond_num = kappa(r, ctx)\n\n # Compute the coefficients.\n coeffs1 = np.asfortranarray(\n [[-2.0 - r, -2.0 - r, 6.0 - r], [2.0 + r_inv, r_inv, 2.0 + r_inv]]\n )\n coeffs2 = np.asfortranarray(\n [[-4.0, -4.0, 12.0], [5.0 + r_inv, -3.0 + r_inv, 5.0 + r_inv]]\n )\n # Use Newton's method to find the intersection.\n iterates1 = newton_bezier.newton(\n s0, coeffs1, t0, coeffs2, newton_bezier.standard_residual\n )\n iterates2 = newton_bezier.newton(\n s0, coeffs1, t0, coeffs2, newton_bezier.compensated_residual\n )\n # Just keep the final iterate and discard the rest.\n s1, t1 = iterates1[-1]\n s2, t2 = iterates2[-1]\n # Compute the relative error in the 2-norm.\n sqrt_r = ctx.sqrt(r)\n alpha = 0.5 * (1 + sqrt_r)\n beta = 0.25 * (2 + sqrt_r)\n size = ctx.norm([alpha, beta], p=2)\n rel_error1 = ctx.norm([alpha - s1, beta - t1], p=2) / size\n rel_error2 = ctx.norm([alpha - s2, beta - t2], p=2) / size\n # Convert the errors to floats and store.\n cond_nums[(n - 3) // 2] = plot_utils.to_float(cond_num)\n rel_errors1[(n - 3) // 2] = plot_utils.to_float(rel_error1)\n rel_errors2[(n - 3) // 2] = plot_utils.to_float(rel_error2)\n\n # Make sure all of the non-compensated errors are non-zero and\n # at least one of the compensated errors is zero.\n if rel_errors1.min() <= 0.0:\n raise ValueError(\"Unexpected minimum error (non-compensated).\")\n if rel_errors2.min() <= 0.0:\n raise ValueError(\"Unexpected minimum error (compensated).\")\n\n figure = plt.figure()\n ax = figure.gca()\n # Add all of the non-compensated errors.\n ax.loglog(\n cond_nums,\n rel_errors1,\n marker=\"v\",\n linestyle=\"none\",\n color=plot_utils.BLUE,\n label=\"Standard\",\n )\n # Add all of the compensated errors.\n ax.loglog(\n cond_nums,\n rel_errors2,\n marker=\"d\",\n linestyle=\"none\",\n color=plot_utils.GREEN,\n label=\"Compensated\",\n )\n\n # Plot the lines of the a priori error bounds.\n min_x = 1.5\n max_x = 5.0e+32\n for coeff in (U, U ** 2):\n start_x = min_x\n start_y = coeff * start_x\n ax.loglog(\n [start_x, max_x],\n [start_y, coeff * max_x],\n color=\"black\",\n alpha=ALPHA,\n zorder=1,\n )\n # Add the ``x = 1/U`` and ``x = 1/U^2`` vertical lines.\n min_y = 1e-18\n max_y = 5.0\n for x_val in (1.0 / U, 1.0 / U ** 2):\n ax.loglog(\n [x_val, x_val],\n [min_y, max_y],\n color=\"black\",\n linestyle=\"dashed\",\n alpha=ALPHA,\n zorder=1,\n )\n # Add the ``y = u`` and ``y = 1`` horizontal lines.\n for y_val in (U, 1.0):\n ax.loglog(\n [min_x, max_x],\n [y_val, y_val],\n color=\"black\",\n linestyle=\"dashed\",\n alpha=ALPHA,\n zorder=1,\n )\n\n # Set \"nice\" ticks.\n ax.set_xticks([10.0 ** n for n in range(4, 28 + 8, 8)])\n ax.set_yticks([10.0 ** n for n in range(-16, 0 + 4, 4)])\n # Set special ``xticks`` for ``1/u`` and ``1/u^2``.\n ax.set_xticks([1.0 / U, 1.0 / U ** 2], minor=True)\n ax.set_xticklabels([r\"$1/\\mathbf{u}$\", r\"$1/\\mathbf{u}^2$\"], minor=True)\n ax.tick_params(\n axis=\"x\",\n which=\"minor\",\n direction=\"out\",\n top=1,\n bottom=0,\n labelbottom=0,\n labeltop=1,\n )\n # Set special ``yticks`` for ``u`` and ``1``.\n ax.set_yticks([U, 1.0], minor=True)\n ax.set_yticklabels([r\"$\\mathbf{u}$\", \"$1$\"], minor=True)\n ax.tick_params(\n axis=\"y\",\n which=\"minor\",\n direction=\"out\",\n left=0,\n right=1,\n labelleft=0,\n labelright=1,\n )\n # Label the axes.\n ax.set_xlabel(\"Condition Number\", fontsize=plot_utils.TEXT_SIZE)\n ax.set_ylabel(\"Relative Forward Error\", fontsize=plot_utils.TEXT_SIZE)\n # Make sure the ticks are sized appropriately.\n ax.tick_params(labelsize=plot_utils.TICK_SIZE)\n ax.tick_params(labelsize=plot_utils.TEXT_SIZE, which=\"minor\")\n\n # Set axis limits.\n ax.set_xlim(min_x, max_x)\n ax.set_ylim(min_y, max_y)\n # Add the legend.\n ax.legend(\n loc=\"upper left\",\n framealpha=1.0,\n frameon=True,\n fontsize=plot_utils.TEXT_SIZE,\n )\n\n figure.set_size_inches(6.0, 4.5)\n figure.subplots_adjust(\n left=0.11, bottom=0.09, right=0.96, top=0.94, wspace=0.2, hspace=0.2\n )\n filename = \"almost_tangent.pdf\"\n path = plot_utils.get_path(\"compensated-newton\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "6474862", "language": "Python", "matching_score": 3.3141584396362305, "max_stars_count": 2, "path": "scripts/compensated-newton/almost_tangent.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Show an example where compensated de Casteljau is still not enough.\"\"\"\n\nimport fractions\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport de_casteljau\nimport plot_utils\n\n\nF = fractions.Fraction\n# p(s) = (2s - 1)^3 (s - 1)\nBEZIER_COEFFS = (1.0, -0.75, 0.5, -0.25, 0.0)\nROOT = 0.5\nDELTA_S = 1.5e-11\nNUM_POINTS = 401\n\n\ndef _main():\n s_vals = np.linspace(ROOT - DELTA_S, ROOT + DELTA_S, NUM_POINTS)\n\n de_casteljau2 = []\n exact = []\n\n for s in s_vals:\n de_casteljau2.append(de_casteljau.compensated(s, BEZIER_COEFFS))\n\n exact_s = F(s)\n exact_p = (2 * exact_s - 1) ** 3 * (exact_s - 1)\n exact.append(float(exact_p))\n\n figure, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True)\n ax1.plot(s_vals, de_casteljau2)\n ax2.plot(s_vals, exact)\n\n # Since ``sharex=True``, ticks only need to be set once.\n ax1.set_xticks(\n [\n ROOT - DELTA_S,\n ROOT - 0.5 * DELTA_S,\n ROOT,\n ROOT + 0.5 * DELTA_S,\n ROOT + DELTA_S,\n ]\n )\n\n ax1.set_title(r\"$\\mathtt{CompDeCasteljau}$\", fontsize=plot_utils.TEXT_SIZE)\n ax2.set_title(\"$p(s)$\", fontsize=plot_utils.TEXT_SIZE)\n\n ax1.tick_params(labelsize=plot_utils.TICK_SIZE, which=\"both\")\n ax2.tick_params(labelsize=plot_utils.TICK_SIZE, which=\"both\")\n\n filename = \"compensated_insufficient.pdf\"\n # NOTE: These are (intended to be) the same settings used in\n # ``horner_inferior.py``, so they should probably be\n # kept in sync.\n figure.set_size_inches(6.0, 2.9)\n figure.subplots_adjust(\n left=0.07, bottom=0.13, right=0.97, top=0.92, wspace=0.13, hspace=0.20\n )\n path = plot_utils.get_path(\"k-compensated\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main():\n mapping = {\n \"xtick.labelsize\": plot_utils.TICK_SIZE,\n \"ytick.labelsize\": plot_utils.TICK_SIZE,\n }\n with plt.style.context(mapping):\n _main()\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "5360707", "language": "Python", "matching_score": 7.081995964050293, "max_stars_count": 2, "path": "scripts/k-compensated/compensated_insufficient.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Show an example where compensated de Casteljau is still not enough.\"\"\"\n\nimport fractions\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport de_casteljau\nimport plot_utils\n\n\nF = fractions.Fraction\n# p(s) = (2s - 1)^3 (s - 1)\nBEZIER_COEFFS = (1.0, -0.75, 0.5, -0.25, 0.0)\nROOT = 0.5\nDELTA_S = 1.5e-11\nNUM_POINTS = 401\n\n\ndef main(filename=None):\n s_vals = np.linspace(ROOT - DELTA_S, ROOT + DELTA_S, NUM_POINTS)\n\n de_casteljau2 = []\n exact = []\n\n for s in s_vals:\n de_casteljau2.append(de_casteljau.compensated(s, BEZIER_COEFFS))\n\n exact_s = F(s)\n exact_p = (2 * exact_s - 1) ** 3 * (exact_s - 1)\n exact.append(float(exact_p))\n\n figure, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True)\n ax1.plot(s_vals, de_casteljau2)\n ax2.plot(s_vals, exact)\n\n # Since ``sharex=True``, ticks only need to be set once.\n ax1.set_xticks(\n [\n ROOT - DELTA_S,\n ROOT - 0.5 * DELTA_S,\n ROOT,\n ROOT + 0.5 * DELTA_S,\n ROOT + DELTA_S,\n ]\n )\n\n ax1.set_title(r\"$\\mathtt{CompDeCasteljau}$\")\n ax2.set_title(\"$p(s)$\")\n\n if filename is None:\n plt.show()\n else:\n # NOTE: These are (intended to be) the same settings used in\n # ``horner_inferior.py``, so they should probably be\n # kept in sync.\n figure.set_size_inches(9.87, 4.8)\n figure.subplots_adjust(\n left=0.06,\n bottom=0.12,\n right=0.97,\n top=0.92,\n wspace=0.13,\n hspace=0.20,\n )\n path = plot_utils.get_path(filename)\n figure.savefig(path, bbox_inches=\"tight\")\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main(filename=\"compensated_insufficient.pdf\")\n", "id": "5244507", "language": "Python", "matching_score": 5.69511079788208, "max_stars_count": 2, "path": "scripts/compensated_insufficient.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Compare how \"smooth\" plotted values look.\n\nThis will contrast three different implementations (``K = 1, 2, 3``)\nand show that more accuracy produces smoother plots.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport de_casteljau\nimport horner\nimport plot_utils\n\n# p(s) = (2s - 1)^3 = (-(1 - s) + s)^3\nPOLY_COEFFS = (8.0, -12.0, 6.0, -1.0)\nBEZIER_COEFFS = (-1.0, 1.0, -1.0, 1.0)\nROOT = 0.5\nDELTA_S = 5e-6\nNUM_POINTS = 401\n\n\ndef _main():\n s_vals = np.linspace(ROOT - DELTA_S, ROOT + DELTA_S, NUM_POINTS)\n\n horner1 = []\n de_casteljau1 = []\n\n for s in s_vals:\n horner1.append(horner.basic(s, POLY_COEFFS))\n de_casteljau1.append(de_casteljau.basic(s, BEZIER_COEFFS))\n\n figure, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True)\n ax1.plot(s_vals, horner1)\n ax2.plot(s_vals, de_casteljau1)\n\n # Since ``sharex=True``, ticks only need to be set once.\n ax1.set_xticks(\n [\n ROOT - DELTA_S,\n ROOT - 0.5 * DELTA_S,\n ROOT,\n ROOT + 0.5 * DELTA_S,\n ROOT + DELTA_S,\n ]\n )\n\n ax1.set_title(r\"$\\mathtt{Horner}$\", fontsize=plot_utils.TEXT_SIZE)\n ax2.set_title(r\"$\\mathtt{DeCasteljau}$\", fontsize=plot_utils.TEXT_SIZE)\n\n ax1.tick_params(labelsize=plot_utils.TICK_SIZE, which=\"both\")\n ax2.tick_params(labelsize=plot_utils.TICK_SIZE, which=\"both\")\n\n filename = \"horner_inferior.pdf\"\n # NOTE: These are (intended to be) the same settings used in\n # ``compensated_insufficient.py``, so they should probably be\n # kept in sync.\n figure.set_size_inches(6.0, 2.9)\n figure.subplots_adjust(\n left=0.07, bottom=0.13, right=0.97, top=0.92, wspace=0.13, hspace=0.20\n )\n path = plot_utils.get_path(\"k-compensated\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main():\n mapping = {\n \"xtick.labelsize\": plot_utils.TICK_SIZE,\n \"ytick.labelsize\": plot_utils.TICK_SIZE,\n }\n with plt.style.context(mapping):\n _main()\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "6222654", "language": "Python", "matching_score": 7.255927085876465, "max_stars_count": 2, "path": "scripts/k-compensated/horner_inferior.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Compare how \"smooth\" plotted values look.\n\nThis will contrast three different implementations (``K = 1, 2, 3``)\nand show that more accuracy produces smoother plots.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport de_casteljau\nimport horner\nimport plot_utils\n\n# p(s) = (2s - 1)^3 = (-(1 - s) + s)^3\nPOLY_COEFFS = (8.0, -12.0, 6.0, -1.0)\nBEZIER_COEFFS = (-1.0, 1.0, -1.0, 1.0)\nROOT = 0.5\nDELTA_S = 5e-6\nNUM_POINTS = 401\n\n\ndef main(filename=None):\n s_vals = np.linspace(ROOT - DELTA_S, ROOT + DELTA_S, NUM_POINTS)\n\n horner1 = []\n de_casteljau1 = []\n\n for s in s_vals:\n horner1.append(horner.basic(s, POLY_COEFFS))\n de_casteljau1.append(de_casteljau.basic(s, BEZIER_COEFFS))\n\n figure, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True)\n ax1.plot(s_vals, horner1)\n ax2.plot(s_vals, de_casteljau1)\n\n # Since ``sharex=True``, ticks only need to be set once.\n ax1.set_xticks(\n [\n ROOT - DELTA_S,\n ROOT - 0.5 * DELTA_S,\n ROOT,\n ROOT + 0.5 * DELTA_S,\n ROOT + DELTA_S,\n ]\n )\n\n ax1.set_title(r\"$\\mathtt{Horner}$\")\n ax2.set_title(r\"$\\mathtt{DeCasteljau}$\")\n\n if filename is None:\n plt.show()\n else:\n figure.set_size_inches(9.87, 4.8)\n figure.subplots_adjust(\n left=0.06,\n bottom=0.12,\n right=0.97,\n top=0.92,\n wspace=0.13,\n hspace=0.20,\n )\n path = plot_utils.get_path(filename)\n figure.savefig(path, bbox_inches=\"tight\")\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main(filename=\"horner_inferior.pdf\")\n", "id": "10213423", "language": "Python", "matching_score": 5.767876148223877, "max_stars_count": 2, "path": "scripts/horner_inferior.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Compare how \"smooth\" plotted values look.\n\nThis will contrast three different implementations (``K = 1, 2, 3``)\nand show that more accuracy produces smoother plots.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport de_casteljau\nimport plot_utils\n\n\n# p(s) = (s - 1) (s - 3/4)^7\nBEZIER_COEFFS = (\n 2187.0 / 16384.0,\n -5103.0 / 131072.0,\n 729.0 / 65536.0,\n -405.0 / 131072.0,\n 27.0 / 32768.0,\n -27.0 / 131072.0,\n 3.0 / 65536.0,\n -1.0 / 131072.0,\n 0.0,\n)\nROOT = 0.75\nDELTA_S = 1e-5\nNUM_POINTS = 401\n\n\ndef _main():\n s_vals = np.linspace(ROOT - DELTA_S, ROOT + DELTA_S, NUM_POINTS)\n\n evaluated1 = []\n evaluated2 = []\n evaluated3 = []\n\n for s in s_vals:\n b, db, d2b = de_casteljau._compensated_k(s, BEZIER_COEFFS, 3)\n evaluated1.append(b)\n b2 = b + db\n evaluated2.append(b2)\n b3 = b2 + d2b\n evaluated3.append(b3)\n\n figure, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex=True)\n ax1.plot(s_vals, evaluated1)\n ax2.plot(s_vals, evaluated2)\n ax3.plot(s_vals, evaluated3)\n\n # Since ``sharex=True``, ticks only need to be set once.\n ax1.set_xticks([ROOT - 0.8 * DELTA_S, ROOT, ROOT + 0.8 * DELTA_S])\n\n ax1.set_title(\n r\"$\\mathtt{DeCasteljau}$\", fontsize=plot_utils.TEXT_SIZE, pad=16.0\n )\n ax2.set_title(\n r\"$\\mathtt{CompDeCasteljau}$\", fontsize=plot_utils.TEXT_SIZE, pad=16.0\n )\n ax3.set_title(\n r\"$\\mathtt{CompDeCasteljau3}$\", fontsize=plot_utils.TEXT_SIZE, pad=16.0\n )\n\n filename = \"de_casteljau_smooth_drawing.pdf\"\n figure.set_size_inches(6.0, 3.0)\n figure.subplots_adjust(\n left=0.07, bottom=0.13, right=0.98, top=0.87, wspace=0.21, hspace=0.2\n )\n path = plot_utils.get_path(\"k-compensated\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main():\n mapping = {\n \"xtick.labelsize\": plot_utils.TICK_SIZE,\n \"ytick.labelsize\": plot_utils.TICK_SIZE,\n }\n with plt.style.context(mapping):\n _main()\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "1360126", "language": "Python", "matching_score": 7.501099586486816, "max_stars_count": 2, "path": "scripts/k-compensated/smooth_drawing.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Compare how \"smooth\" plotted values look.\n\nThis will contrast three different implementations (``K = 1, 2, 3``)\nand show that more accuracy produces smoother plots.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport de_casteljau\nimport plot_utils\n\n\n# p(s) = (s - 1) (s - 3/4)^7\nBEZIER_COEFFS = (\n 2187.0 / 16384.0,\n -5103.0 / 131072.0,\n 729.0 / 65536.0,\n -405.0 / 131072.0,\n 27.0 / 32768.0,\n -27.0 / 131072.0,\n 3.0 / 65536.0,\n -1.0 / 131072.0,\n 0.0,\n)\nROOT = 0.75\nDELTA_S = 1e-5\nNUM_POINTS = 401\n\n\ndef main(filename=None):\n s_vals = np.linspace(ROOT - DELTA_S, ROOT + DELTA_S, NUM_POINTS)\n\n evaluated1 = []\n evaluated2 = []\n evaluated3 = []\n\n for s in s_vals:\n b, db, d2b = de_casteljau._compensated_k(s, BEZIER_COEFFS, 3)\n evaluated1.append(b)\n b2 = b + db\n evaluated2.append(b2)\n b3 = b2 + d2b\n evaluated3.append(b3)\n\n figure, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex=True)\n ax1.plot(s_vals, evaluated1)\n ax2.plot(s_vals, evaluated2)\n ax3.plot(s_vals, evaluated3)\n\n # Since ``sharex=True``, ticks only need to be set once.\n ax1.set_xticks(\n [\n ROOT - DELTA_S,\n ROOT - 0.5 * DELTA_S,\n ROOT,\n ROOT + 0.5 * DELTA_S,\n ROOT + DELTA_S,\n ]\n )\n\n ax1.set_title(r\"$\\mathtt{DeCasteljau}$\")\n ax2.set_title(r\"$\\mathtt{CompDeCasteljau}$\")\n ax3.set_title(r\"$\\mathtt{CompDeCasteljau3}$\")\n\n if filename is None:\n plt.show()\n else:\n figure.set_size_inches(13.65, 6.41)\n figure.subplots_adjust(\n left=0.04,\n bottom=0.10,\n right=0.97,\n top=0.95,\n wspace=0.15,\n hspace=0.19,\n )\n path = plot_utils.get_path(filename)\n figure.savefig(path, bbox_inches=\"tight\")\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main(filename=\"de_casteljau_smooth_drawing.pdf\")\n", "id": "7044821", "language": "Python", "matching_score": 0.7220585942268372, "max_stars_count": 2, "path": "scripts/smooth_drawing.py" }, { "content": "#!/usr/bin/env python\n\n# It is possible to show that the square root of two can be expressed as\n# an infinite continued fraction.\n\n# sqrt(2) = 1 + 1/(2 + 1/(2 + 1/(2 + ...))) = 1.414213...\n\n# By expanding this for the first four iterations, we get:\n# 1 + 1/2 = 3/2 = 1.5\n# 1 + 1/(2 + 1/2) = 7/5 = 1.4\n# 1 + 1/(2 + 1/(2 + 1/2)) = 17/12 = 1.41666...\n# 1 + 1/(2 + 1/(2 + 1/(2 + 1/2))) = 41/29 = 1.41379...\n\n# The next three expansions are 99/70, 239/169, and 577/408, but the eighth\n# expansion, 1393/985, is the first example where the number of digits in\n# the numerator exceeds the number of digits in the denominator.\n\n# In the first one-thousand expansions, how many fractions contain a numerator\n# with more digits than denominator?\n\nfrom python.decorators import euler_timer\nfrom python.functions import recurrence_next\n\n\n# We can write the rth expansion as h_r/k_r where both h and k satisfy\n# f(n+2) = 2f(n+1) + f(n)\n# and h_0 = 1, h_1 = 3, k_0 = 1, k_1 = 2\ndef main(verbose=False):\n relation = [1, 2]\n\n h_values = [1, 3]\n k_values = [1, 2]\n\n count = 0\n for i in range(2, 1000 + 1):\n h_values = recurrence_next(relation, h_values)\n k_values = recurrence_next(relation, k_values)\n if len(str(h_values[1])) > len(str(k_values[1])):\n count += 1\n\n return count\n\nif __name__ == '__main__':\n print euler_timer(57)(main)(verbose=True)\n", "id": "4706651", "language": "Python", "matching_score": 3.199237585067749, "max_stars_count": 7, "path": "python/complete/no057.py" }, { "content": "#!/usr/bin/env python\n\n# The square root of 2 can be written as an infinite continued fraction.\n# sqrt(2) = [1;(2)]\n\n# It turns out that the sequence of partial values of continued fractions\n# for square roots provide the best rational approximations. Let us\n# consider the convergents for sqrt(2).\n\n# 1 + 1/2 = 3/2\n# 1 + 1/(2 + 1/2) = 7/5\n# 1 + 1/(2 + 1/(2 + 1/2)) = 17/12\n# 1 + 1/(2 + 1/(2 + 1/(2 + 1/2))) = 41/29\n\n# What is most surprising is that the important mathematical constant,\n# e = [2; 1,2,1, 1,4,1, 1,6,1 , ... , 1,2k,1, ...].\n\n# The first ten terms in the sequence of convergents for e are:\n\n# 2, 3, 8/3, 11/4, 19/7, 87/32, 106/39, 193/71, 1264/465, 1457/536, ...\n\n# The sum of digits in the numerator of the 10th convergent is 1+4+5+7=17.\n\n# Find the sum of digits in the numerator of the 100th convergent of the\n# continued fraction for e.\n\n# We can show (but won't) that the sequence of convergents\n# h_0/k_0, h_1/k_1, h_2/k_2, h_3/k_3, h_4/k_4, ...\n# satisfies h_n = a_n h_(n-1) + h_(n-2) (and similar for k) with\n# h_(-1) = 1, h_(-2) = 0, k_(-1) = 0, k_(-2) = 1\n\nimport operator\n\nfrom fractions import gcd\n\nfrom python.decorators import euler_timer\nfrom python.functions import recurrence_next\n\n\ndef main(verbose=False):\n # we have h_(-1) = 1, k_(-1) = 0\n # h_0/k_0 = 2\n h_values = [1, 2]\n k_values = [0, 1]\n\n # The problem wants the 100th convergent which will\n # be h_99/k_99. To get to this, we need the first 99\n # values of a\n a = reduce(operator.add, [[1, 2 * k, 1] for k in range(1, 33 + 1)])\n for a_i in a:\n relation = [1, a_i]\n h_values = recurrence_next(relation, h_values)\n k_values = recurrence_next(relation, k_values)\n\n h_99 = h_values[1]\n k_99 = k_values[1]\n reduced = h_99 / (gcd(h_99, k_99))\n return sum(int(dig) for dig in str(reduced))\n\nif __name__ == '__main__':\n print euler_timer(65)(main)(verbose=True)\n", "id": "7008889", "language": "Python", "matching_score": 2.3257126808166504, "max_stars_count": 7, "path": "python/complete/no065.py" }, { "content": "#!/usr/bin/env python\n\n# It is well known that if the square root of a natural number is not an\n# integer, then it is irrational. The decimal expansion of such square\n# roots is infinite without any repeating pattern at all.\n\n# The square root of two is 1.41421356237309504880..., and the digital\n# sum of the first one hundred decimal digits is 475.\n\n# For the first one hundred natural numbers, find the total of the digital sums\n# of the first one hundred decimal digits for all the irrational square roots.\n\n###########################\n# To deal with precision, we use no064 and no065 to help out.\n# The expanded_digits function will expanded n/d to d digits no matter\n# the precision simply by multiplying by 10\n\n# We use the cycle (algorithm calculated in 64) to find the values of a_i\n# in the continued fraction expansion and we use the recurrence on\n# the numerator and denominator in the fractional estimate as in\n# 65. Once these estimates agree to 100 digits, we stop\n\nfrom python.decorators import euler_timer\nfrom python.functions import continued_fraction_cycle\nfrom python.functions import is_power\nfrom python.functions import recurrence_next\n\n\ndef expanded_digits(numerator, denominator, digits):\n # use integer division on num and denom to get a quotient\n quotient_digits = [int(dig) for dig in str(numerator / denominator)]\n remainder = numerator % denominator\n if len(quotient_digits) >= digits:\n return quotient_digits[:digits]\n return quotient_digits + expanded_digits(10 * remainder,\n denominator,\n digits - len(quotient_digits))\n\n\ndef stable_expansion(digits, n):\n values = continued_fraction_cycle(n)\n h_values = [1, values[0]] # a_0\n k_values = [0, 1]\n\n cycle_length = len(values) - 1 # we only cycle over a_1,...,a_{k-1}\n last = expanded_digits(h_values[1], k_values[1], digits)\n index = 1\n relation = [1, values[index]]\n h_values = recurrence_next(relation, h_values)\n k_values = recurrence_next(relation, k_values)\n current = expanded_digits(h_values[1], k_values[1], digits)\n while current != last:\n index += 1\n last = current\n\n relative_index = ((index - 1) % cycle_length) + 1\n # we want residues 1,..,k-1 instead of the traditional 0,...,k-2\n relation = [1, values[relative_index]]\n h_values = recurrence_next(relation, h_values)\n k_values = recurrence_next(relation, k_values)\n current = expanded_digits(h_values[1], k_values[1], digits)\n return current\n\n\ndef main(verbose=False):\n non_squares = [num for num in range(1, 100 + 1)\n if not is_power(num, 2)]\n running_sum = 0\n for n in non_squares:\n running_sum += sum(stable_expansion(100, n))\n return running_sum\n\nif __name__ == '__main__':\n print euler_timer(80)(main)(verbose=True)\n", "id": "895967", "language": "Python", "matching_score": 2.0541446208953857, "max_stars_count": 7, "path": "python/complete/no080.py" }, { "content": "#!/usr/bin/env python\n\n# All square roots are periodic when written as continued fractions\n# and can be written in the form:\n\n# sqrt(N) = a0 + 1/(a_1 + 1(a_2 + ...))\n\n# For example, let us consider 23:\n# sqrt(23) = 4 + sqrt(23) - 4\n# = 4 + 1/(1/(sqrt(23)-4)) = 4 + 1/(1 + (sqrt(23) - 3)/7)\n\n# If we continue we would get the following expansion:\n# sqrt(23) = 4 + 1/(1 + 1/(3 + 1/(1 + 1/(8 + ...))))\n\n# The process can be summarised as follows:\n# a_0 = 4 --> 1 + (sqrt(23) - 3)/7\n# a_1 = 1 --> 3 + (sqrt(23) - 3)/2\n# a_2 = 3 --> 1 + (sqrt(23) - 4)/7\n# a_3 = 1 --> 8 + (sqrt(23) - 4)/1\n# a_4 = 8 --> 1 + (sqrt(23) - 3)/7\n# a_5 = 1 --> 3 + (sqrt(23) - 3)/2\n# a_6 = 3 --> 1 + (sqrt(23) - 4)/7\n# a_7 = 1 --> 8 + (sqrt(23) - 4)/1\n\n# It can be seen that the sequence is repeating. For conciseness, we use\n# the notation 23 = [4;(1,3,1,8)], to indicate that the\n# block (1,3,1,8) repeats indefinitely.\n\n# The first ten continued fraction representations of\n# (irrational) square roots are:\n\n# 2=[1;(2)], period=1\n# 3 =[1;(1,2)], period=2\n# 5=[2;(4)], period=1\n# 6=[2;(2,4)], period=2\n# 7=[2;(1,1,1,4)], period=4\n# 8=[2;(1,4)], period=2\n# 10=[3;(6)], period=1\n# 11=[3;(3,6)], period=2\n# 12= [3;(2,6)], period=2\n# 13=[3;(1,1,1,1,6)], period=5\n\n# Exactly four continued fractions, for N <= 13, have an odd period.\n\n# How many continued fractions for N <= 10000 have an odd period?\n\n# NOTE\n# We can define the following sequence to help us\n\n# Let r_i = 1/(a_(i+1) + 1/(a(i+2) + ...\n# Then 1/r_i = a_(i+1) + r_(i+1); a_i = floor(1/r_i)\n\n# We see we can write r_i = (A_i*rt(n) + B_i)/C_i\n# then 1/r_i = C_i(A_i*rt(n) - B_i)/(n*A_i**2 - B_i**2)\n\n# represent each r_i as r_i = (A, B, C) -> 1/r_i = a + r_(i + 1)\n# -> a = floor(1/r_i) = floor(C/(A rt(n) + B))\n# -> r_(i + 1) = (C*A, C*B - a*(n*A**2 - B**2), n*A**2 - B**2)\n# -> r_(i + 1) = (A', B', C') #reduce\n# then r_(i+1) = (C_i*A_i*rt(n) - [C_i*B_i + a_(i+1)*(n*A_i**2 - B_i**2)])\n# divided by (n*A_i**2 - B_i**2)\n\nfrom python.decorators import euler_timer\nfrom python.functions import continued_fraction_cycle\nfrom python.functions import is_power\n\n\ndef main(verbose=False):\n non_squares = [num for num in range(1, 10000 + 1)\n if not is_power(num, 2)]\n cycle_lengths = [len(continued_fraction_cycle(num)) - 1\n for num in non_squares]\n return len([num for num in cycle_lengths if num % 2 == 1])\n\nif __name__ == '__main__':\n print euler_timer(64)(main)(verbose=True)\n", "id": "11004273", "language": "Python", "matching_score": 0.19472333788871765, "max_stars_count": 7, "path": "python/complete/no064.py" }, { "content": "\"\"\"\nThe *sudoku* module offers three objects building a sudoku solver:\n\n- the *Sudoku* class modelling the sudoku board and sudoku rules,\n\n- the *stack_assumptions* generic backtracking algorithm. The function\n takes a list of generator functions as argument,\n\n- the *make_generators* function returning a list of\n generator functions suited for manipulating a sudoku and compatible\n with the bactracking algorithm.\n\n\"\"\"\n\nimport array\nfrom contextlib import contextmanager\n\n\ndef newarray():\n # helper of initialisation of the data structures in Sudoku\n return array.array('i', [0] * 9)\n\n\n# Bitfield manipulation methods\ndef _one(val, index):\n return val | 1 << index - 1\n\n\ndef _zero(val, index):\n return val & ~(1 << index - 1)\n\n\ndef _get(val, index):\n return (val >> index - 1) & 1\n\n\nclass Sudoku(object):\n \"\"\"The *Sudoku* board class.\n\n Has the methods for reading the start state of a sudoku board, for\n representing a board. It also has the methods for setting and freeing a\n digit in a slot of the board, according to the rules of the sudoku game.\n \"\"\"\n\n def __init__(self, problem):\n # Private bitfield presence sets\n self._lines = newarray() # Lines, columns and\n self._columns = newarray() # square are bitfields of length 9.\n self._squares = newarray() # When bit 3 is set in lines[5], 3\n # is present in the fifth line.\n\n self.board = [newarray() for i in range(9)]\n # a 9x9 matrix of of ints between 1 and 9, an empty position\n # is represented by a false value.\n\n # Reading the problem\n k = 0\n for i in range(9):\n for j in range(9):\n if int(problem[k]) != 0:\n self.set(i, j, int(problem[k]))\n k += 1\n\n def set(self, i, j, val):\n \"\"\"Sets a new digit on the board in position i,j.\n\n This only updates the board *without* checking first if the rules of\n the sudoku game are respected\n \"\"\"\n\n self.board[i][j] = val\n\n # Not only update the board but also the lines, columns and\n # squares arrays\n self._lines[i] = _one(self._lines[i], val)\n self._columns[j] = _one(self._columns[j], val)\n self._squares[(j / 3) * 3 + i / 3] = _one(\n self._squares[(j / 3) * 3 + i / 3], val)\n\n def free(self, i, j):\n \"\"\"Frees the slot in position i,j\"\"\"\n\n # The value to be removed from the lines, columns and square\n # presence set is found in the *board* member attribute\n val, self.board[i][j] = self.board[i][j], 0\n\n # Also update the line, column and square presence sets.\n self._lines[i] = _zero(self._lines[i], val)\n self._columns[j] = _zero(self._columns[j], val)\n self._squares[(j / 3) * 3 + i / 3] = _zero(\n self._squares[(j / 3) * 3 + i / 3], val)\n\n @contextmanager\n def attempt(self, col, row, candidate):\n \"\"\"A context manager which sets the value of the board.\n\n Set value at position: *col*, *line* on entering the context and which\n frees the position on exiting the context.\n \"\"\"\n\n self.set(col, row, candidate)\n yield\n self.free(col, row)\n\n def candidates(self, col, row):\n \"\"\"Returns the list of possible values for the slot specified\n\n According to the current state of the sudoku board and according to\n the rules of the sudoku game.\n\n The sudoku rules states that the candidates are the numbers\n which are not present neither in the column *col*, neither in\n the line *row*, neither in the square identified by *col* and\n *row*.\n \"\"\"\n\n return filter(\n lambda val: all(not _get(bf, val) for bf in (\n self._lines[col],\n self._columns[row],\n self._squares[(row / 3) * 3 + col / 3])),\n range(1, 10))\n\n def __str__(self):\n\n # The matrix is transformed into a list of characters\n l = [str(self.board[i][j]) if self.board[i][j] else ' '\n for i in range(9) for j in range(9)]\n\n l = ['\\n ' + e if i % 9 == 0 else e for (i, e) in enumerate(l)] # 1.\n l = [' ' + e if i % 3 == 0 else e for (i, e) in enumerate(l)] # 2.\n l = ['\\n' + e if i % 27 == 0 else e for (i, e) in enumerate(l)] # 3.\n # 1. New lines every 9 elements\n # 2,3. Squares are represented by extra spaces and another\n # newline\n\n return ' '.join(l)\n\n\ndef make_generators(sudoku):\n \"\"\"Makes list of candidate generators for backtrack algorithm.\n\n Generators will be used in stack_assumptions. The sudoku argument must\n provide two functions: *candidates(i,j)*, and *attempt(col, row,\n candidate)* and a member attribute called *board*, which is a 9x9\n matrix.\n\n There are as many generator functions than there are slots on the\n sudoku board, they are stored in a list. Each generator function\n is specific to a slot: it actually *contains* the coordinates of\n the slot, like a closure.\n\n When called for the first time, the generator computes the list of\n candidate numbers for the slot, according to the current sudoku\n board. The list of candidates depends on the state of the board at\n the time the generator is called for the first time.\n \"\"\"\n\n generators = []\n for i in range(9):\n for j in range(9):\n def gen_func(col=i, row=j):\n if sudoku.board[col][row] != 0:\n yield\n else:\n for candidate in sudoku.candidates(col, row):\n with sudoku.attempt(col, row, candidate):\n yield\n generators.append(gen_func)\n return generators\n\n\ndef stack_assumptions(generators, i=0):\n \"\"\"Stack up several generator assumptions.\n\n Takes a list of generators. This list is assumed to manipulate\n a shared representation of the problem. When this algorithm\n yields, a solution has been found and can be printed.\n\n The algorithm works by calling the generator at the *nth* position\n of the list, and pulls the *next()* method on the iterator\n returned:\n\n #. either *next()* returns, in which case, the algorithm\n instantiates the generator from position **n+1** of the input\n list function and tries to pull its *next()* method,\n\n #. or the method raises a StopIteration, in which case, the\n algorithm trigger *next()* on the generator at position **n-1**,\n\n This algorithm yields whenever every generator of the list has\n yielded, at this point, every position of the board is filled with\n a digit according to the sudoku rules: a solution has been\n reached and the board can be printed.\n\n When a generator has yielded, this means that a suitable candidate\n could be found and was set in the board's slot and that an\n assumption can be tried on the next slot, with generator i+1.\n\n When a generator raises a StopIteration, then a dead-end was\n met. A wrong assumption must have been taken somewhere along the\n stack of the previous recursion: the algorithm backtracks at the\n previous recursion, another assumption can be attempted.\n \"\"\"\n\n if i >= len(generators):\n yield\n else:\n for _ in generators[i]():\n for _ in stack_assumptions(generators, i + 1):\n yield\n", "id": "8729545", "language": "Python", "matching_score": 2.5227372646331787, "max_stars_count": 7, "path": "python/sudoku.py" }, { "content": "#!/usr/bin/env python\n\n# By solving all fifty puzzles find the sum of the 3-digit numbers\n# found in the top left corner of each solution grid; for example,\n# 483 is the 3-digit number found in the top left corner of the\n# solution grid above.\n\nimport operator\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\nfrom python.sudoku import make_generators\nfrom python.sudoku import stack_assumptions\nfrom python.sudoku import Sudoku\n\n\ndef corner_sum(board):\n sudoku = Sudoku(board)\n\n for _ in stack_assumptions(make_generators(sudoku)):\n first, second, third = sudoku.board[0][:3]\n return 100 * first + 10 * second + third\n\n\ndef main(verbose=False):\n puzzles = get_data(96).split(\"\\n\")\n puzzles = [reduce(operator.add, puzzles[10 * index + 1:10 * index + 10])\n for index in range(50)]\n return sum(corner_sum(puzzle) for puzzle in puzzles)\n\nif __name__ == '__main__':\n print euler_timer(96)(main)(verbose=True)\n", "id": "11834337", "language": "Python", "matching_score": 1.0218837261199951, "max_stars_count": 7, "path": "python/complete/no096.py" }, { "content": "#!/usr/bin/env python\n\n# By counting carefully it can be seen that a rectangular grid measuring\n# 3 by 2 contains eighteen rectangles:\n# Although there exists no rectangular grid that contains exactly two\n# million rectangles, find the area of the grid with the nearest solution.\n\n# (3,2)\n# T1(1,1)-->(3+1-1,2+1-1)=(3,2)=6\n# T2(2,1)-->(3+1-2,2+1-1)=(2,2)=4\n# T3(3,1)-->(3+1-3,2+1-1)=(1,2)=2\n# B1(1,2)-->(3+1-1,2+1-2)=(3,1)=3\n# B2(2,2)-->(3+1-2,2+1-2)=(2,1)=2\n# B3(3,2)-->(3+1-3,2+1-2)=(1,1)=1\n# (1+2+3)(1+2)\n\n# (m,n)\n# ...\n# m(m+1)/2 * n(n+1)/2\n\n# Find P = m(m+1)n(n+1) nearest to 8000000\n# WLOG n <= m\n# WLOG n <= m, P <= m^2(m+1)^2\n\n# n >= 1, 2P + 1 >= 4m^2 + 4m + 1 >= 16*10**6 + 1\n# m >= 1999.5\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n max_m = int(((16 * 10 ** 6 + 1) ** (0.5) - 1) / 2.0) + 1\n closest = 0\n area = 0\n for m in range(1, max_m + 1):\n for n in range(1, m + 1):\n if (abs(m * n * (m + 1) * (n + 1) - 8 * 10 ** 6) <\n abs(closest - 8 * 10 ** 6)):\n closest = m * n * (m + 1) * (n + 1)\n area = m * n\n return area\n\nif __name__ == '__main__':\n print euler_timer(85)(main)(verbose=True)\n", "id": "7991141", "language": "Python", "matching_score": 1.360908031463623, "max_stars_count": 7, "path": "python/complete/no085.py" }, { "content": "#!/usr/bin/env python\n\n# Starting in the top left corner of a 2 x 2 grid, there are\n# 6 routes (without backtracking) to the bottom right corner.\n# How many routes are there through a 20 x 20 grid?\n\nfrom python.decorators import euler_timer\nfrom python.functions import choose\n\n\ndef main(verbose=False):\n # In an n x m grid there are (n + m) C m = (n + m) C n such paths.\n return choose(20 + 20, 20)\n\nif __name__ == '__main__':\n print euler_timer(15)(main)(verbose=True)\n", "id": "4966896", "language": "Python", "matching_score": 0.22830946743488312, "max_stars_count": 7, "path": "python/complete/no015.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import all_subsets\n\n\ndef can_concat(left, right, candidates):\n possible = ['%s%s' % (dig_l, dig_r) for dig_l in left for dig_r in right]\n for dig_l in right:\n for dig_r in left:\n possible.append('%s%s' % (dig_l, dig_r))\n return (len(set(possible).intersection(candidates)) == 9)\n\n\ndef main(verbose=False):\n dice = all_subsets(range(10), 6)\n size = len(dice)\n for i in range(size):\n if 6 in dice[i] and 9 not in dice[i]:\n dice[i].append(9)\n if 9 in dice[i] and 6 not in dice[i]:\n dice[i].append(6)\n\n count = 0\n candidates = [str(n ** 2).zfill(2) for n in range(1, 10)]\n for left_ind in range(size - 1):\n for right_ind in range(left_ind, size):\n if can_concat(dice[left_ind], dice[right_ind], candidates):\n count += 1\n return count\n\nif __name__ == '__main__':\n print euler_timer(90)(main)(verbose=True)\n", "id": "8685501", "language": "Python", "matching_score": 0.7018500566482544, "max_stars_count": 7, "path": "python/complete/no090.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import ascending\nfrom python.functions import total_perms\n\n\ndef dice_outcomes(num_dice, num_sides):\n result = {}\n for bottom in range(1, num_sides + 1):\n for dice_sum in range(1 * num_dice, num_sides * num_dice + 1):\n for outcome in ascending(num_dice, dice_sum, bottom, num_sides):\n curr_sum = sum(outcome)\n # if curr_sum is not in result, sets to total_perms(outcome)\n # (default 0 returned by get)\n result[curr_sum] = (result.get(curr_sum, 0) +\n total_perms(outcome))\n return result\n\n\ndef main(verbose=False):\n OUTCOMES_4 = dice_outcomes(9, 4)\n OUTCOMES_6 = dice_outcomes(6, 6)\n\n winning_outcomes = 0\n for pete_score in range(9, 36 + 1):\n for colin_score in range(6, pete_score):\n winning_outcomes += (OUTCOMES_4[pete_score] *\n OUTCOMES_6[colin_score])\n\n return round(winning_outcomes * 1.0 / ((4 ** 9) * (6 ** 6)), 7)\n\nif __name__ == '__main__':\n print euler_timer(205)(main)(verbose=True)\n", "id": "4467488", "language": "Python", "matching_score": 1.4423457384109497, "max_stars_count": 7, "path": "python/complete/no205.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import ascending\nfrom python.functions import total_perms\n\n\ndef generate_addons(num, smallest, biggest):\n if num == 1:\n return [[i] for i in range(smallest, biggest + 1)]\n\n result = []\n for i in range(smallest, biggest + 1):\n result.extend([[i] + addon for addon\n in generate_addons(num - 1, i, biggest)])\n return result\n\n\ndef main(verbose=False):\n MATCHES = []\n for bottom in range(1, 12 + 1):\n MATCHES.extend(ascending(10, 70, bottom, 12))\n\n add_ons = {}\n for biggest in range(1, 8):\n add_ons[biggest] = generate_addons(10, 1, biggest)\n\n count = 0\n for match in MATCHES:\n bottom = match[0]\n for addon in add_ons[bottom]:\n curr = addon + match\n count += total_perms(curr)\n return count\n\nif __name__ == '__main__':\n print euler_timer(240)(main)(verbose=True)\n", "id": "6594862", "language": "Python", "matching_score": 0.7710375785827637, "max_stars_count": 7, "path": "python/complete/no240.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n destinations = {1: 1, 89: 89}\n count = 1 # 89 has already been encountered\n for i in range(2, 10 ** 7):\n curr = i\n to_add = []\n while curr not in destinations:\n to_add.append(curr)\n curr = sum(int(digit) ** 2 for digit in str(curr))\n\n # now curr will be in destinations\n value = destinations[curr]\n for elt in to_add:\n if elt < 10 ** 7 and value == 89:\n count += 1\n destinations[elt] = value\n\n return count\n\nif __name__ == '__main__':\n print euler_timer(92)(main)(verbose=True)\n", "id": "8997737", "language": "Python", "matching_score": 0.7852163314819336, "max_stars_count": 7, "path": "python/too_slow/no092.py" }, { "content": "#!/usr/bin/env python\n\n# In England there are 8 coins in circulation:\n# 1p, 2p, 5p, 10p, 20p, 50p, 100p and 200p.\n# How many different ways can 200p be made using any number of coins?\n\n# This will be the coeficient of x^200 in\n# (1 + x + x^2 + ...)(1 + x^2 + x^4 + ...)(1 + x^5 + x^10 + ...)*...\n# ...*(1 + x^10 + x^20 + ...)(1 + x^20 + x^40 + ...)(1 + x^50 + x^100 + ...)\n\nfrom python.decorators import euler_timer\n\n\ndef polynomial_add(left, right):\n max_len = max(len(left), len(right))\n to_add_left = [0] * (max_len - len(left)) + left[:]\n to_add_right = [0] * (max_len - len(right)) + right[:]\n return [to_add_left[i] + to_add_right[i] for i in range(max_len)]\n\n\n# represent ax^n + bx^(n-1) + ... + c as [c,...b,a]\n# 1 + 2x + x^2 + 2x^3 = (1+2x)*(1+x^2) =\n# [1,2]*[1,0,1] = [1,0,1,0] + [2,0,2] = [1,2,1,2]\ndef polynomial_mult(f, g):\n result = []\n for ind in range(len(f)):\n to_add = [f[-ind - 1] * coeff for coeff in g] + [0] * ind\n result = polynomial_add(result, to_add)\n return result\n\n\ndef generating_poly(max_power, base):\n add_on = [0] * (base - 1) + [1]\n return [1] + add_on * (max_power / base)\n\n\ndef main(verbose=False):\n prod = generating_poly(200, 1)\n coins = [2, 5, 10, 20, 50, 100, 200]\n for coin in coins:\n prod = polynomial_mult(prod, generating_poly(200, coin))\n return prod[200]\n\nif __name__ == '__main__':\n print euler_timer(31)(main)(verbose=True)\n", "id": "6571780", "language": "Python", "matching_score": 0.7432240843772888, "max_stars_count": 7, "path": "python/complete/no031.py" }, { "content": "#!/usr/bin/env python\n\n# a0\n# a1\n# a2\n# a3 a4\n# a5\n# a6 a7 a8\n# a9\n\n# In clockwise order, chains are\n# (a0, a2, a4)\n# (a1, a4, a7)\n# (a8, a7, a6)\n# (a9, a6, a3)\n# (a5, a3, a2)\n\nfrom python.decorators import euler_timer\nfrom python.functions import all_permutations\n\n\ndef magic_5_gon(perm):\n node_indices = [0, 1, 8, 9, 5]\n triples = {0: [0, 2, 4],\n 1: [1, 4, 7],\n 8: [8, 7, 6],\n 9: [9, 6, 3],\n 5: [5, 3, 2]}\n node_values = [perm[ind] for ind in node_indices]\n start = node_values.index(min(node_values))\n\n ordered_nodes = node_indices[start:] + node_indices[:start]\n result = []\n for node in ordered_nodes:\n curr_ind = triples[node]\n result.append([perm[ind] for ind in curr_ind])\n return result\n\n\ndef main(verbose=False):\n result = []\n perms = all_permutations(range(1, 11))\n for perm in perms:\n magic = magic_5_gon(perm)\n sums = [sum(triple) for triple in magic]\n if len(set(sums)) == 1:\n to_add = \"\".join(\"\".join(str(ind) for ind in triple)\n for triple in magic)\n result.append(to_add)\n return max(int(concat) for concat in result if len(concat) == 16)\n\nif __name__ == '__main__':\n print euler_timer(68)(main)(verbose=True)\n", "id": "5772403", "language": "Python", "matching_score": 1.0400948524475098, "max_stars_count": 7, "path": "python/too_slow/no068.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n MAX_n = 200\n\n optimal_chains = {1: [[1]]}\n for exponent in range(2, MAX_n + 1):\n addition_chains = []\n for needed_value in xrange(1, exponent / 2 + 1):\n for chain in optimal_chains[exponent - needed_value]:\n if needed_value in chain:\n addition_chains.append(chain[:] + [exponent])\n\n min_length = min(len(chain) for chain in addition_chains)\n optimal_chains[exponent] = [chain for chain in addition_chains\n if len(chain) == min_length]\n\n return sum(len(chain[0]) - 1 for chain in optimal_chains.values())\n\nif __name__ == '__main__':\n print euler_timer(122)(main)(verbose=True)\n", "id": "3205044", "language": "Python", "matching_score": 1.2232657670974731, "max_stars_count": 7, "path": "python/complete/no122.py" }, { "content": "#!/usr/bin/env python\n\n# Which starting number, under one million, produces the\n# longest chain? (Of the collatz chain)\n\nfrom python.decorators import euler_timer\n\n\ndef collatz_next(n):\n if n % 2 == 0:\n return n / 2\n else:\n return 3 * n + 1\n\n\ndef length(n, hash_):\n if n in hash_:\n return hash_[n]\n else:\n curr_length = 1 + length(collatz_next(n), hash_)\n hash_[n] = curr_length\n return curr_length\n\n\ndef max_collatz_length_up_to_n(n, hash_={1: 1}):\n max_length = -1\n max_length_at = -1\n for i in range(1, n + 1):\n if length(i, hash_) > max_length:\n max_length = length(i, hash_)\n max_length_at = i\n return [max_length_at, max_length]\n\n\ndef main(verbose=False):\n ans = max_collatz_length_up_to_n(999999)\n if verbose:\n return '%s.\\nThe Collatz chain at %s has length %s.' % (\n ans[0], ans[0], ans[1])\n else:\n return ans[0]\n\nif __name__ == '__main__':\n print euler_timer(14)(main)(verbose=True)\n", "id": "8495948", "language": "Python", "matching_score": 1.8373793363571167, "max_stars_count": 7, "path": "python/complete/no014.py" }, { "content": "#!/usr/bin/env python\n\n# Perhaps less well known is 169, in that it produces the longest chain of\n# numbers that link back to 169; it turns out that there are only three\n# such loops that exist:\n# 169 --> 363601 --> 1454 --> 169\n# 871 --> 45361 --> 871\n# 872 --> 45362 --> 872\n# It is not difficult to prove that EVERY starting number will eventually\n# get stuck in a loop. For example,\n# 69 --> 363600 --> 1454 --> 169 --> 363601 (--> 1454)\n# 78 --> 45360 --> 871 --> 45361 (--> 871)\n# 540 --> 145 (--> 145)\n\n# Starting with 69 produces a chain of five non-repeating terms, but the\n# longest non-repeating chain with a starting number below one million is\n# sixty terms. How many chains, with a starting number below one million,\n# contain exactly sixty non-repeating terms?\n\nfrom math import factorial\n\nfrom python.decorators import euler_timer\n\n\ndef digit_factorial_sum(n, hash_=None):\n if hash_ is None:\n hash_ = {}\n elif n in hash_:\n return hash_[n]\n\n result = sum(factorial(int(dig)) for dig in str(n))\n hash_[n] = result\n return result\n\n\ndef chain(n, next_hash=None, chain_hash=None):\n if chain_hash is None:\n chain_hash = {}\n\n path = [n]\n if n in chain_hash:\n return chain_hash[n]\n next_ = digit_factorial_sum(path[-1], next_hash)\n while next_ not in path:\n if next_ in chain_hash:\n chain_hash[n] = path + chain_hash[next_]\n return chain_hash[n]\n path += [next_]\n next_ = digit_factorial_sum(path[-1], next_hash)\n chain_hash[n] = path\n return path\n\n\ndef main(verbose=False):\n chains = {}\n next_hash = {}\n for n in range(1, 10 ** 6):\n chain(n, next_hash, chains)\n # This sets the value in chains\n return len([n for n in range(1, 10 ** 6) if len(chains[n]) == 60])\n\nif __name__ == '__main__':\n print euler_timer(74)(main)(verbose=True)\n", "id": "10322268", "language": "Python", "matching_score": 1.911494255065918, "max_stars_count": 7, "path": "python/complete/no074.py" }, { "content": "#!/usr/bin/env python\n\n# f(1) = 1, f(2) = 2\n# if n is odd, we must have first term equal\n# to 1, hence f(n) = f((n - 1)/2)\n# if n is even, we can either have 0 or 2 as\n# the first term, leaving us\n# f(n) = f(n/2) + f((n - 2)/2)\n\nfrom python.decorators import euler_timer\n\n\ndef f(n, hash_=None):\n if hash_ is None:\n hash_ = {}\n\n if n in hash_:\n return hash_[n]\n if n in [1, 2]:\n hash_[n] = n\n return n\n if n % 2 == 0:\n result = f(n / 2) + f(n / 2 - 1)\n else:\n result = f((n - 1) / 2)\n hash_[n] = result\n return result\n\n\ndef main(verbose=False):\n f_hash = {}\n return f(10 ** 25, f_hash)\n\nif __name__ == '__main__':\n print euler_timer(169)(main)(verbose=True)\n", "id": "4229697", "language": "Python", "matching_score": 1.3409427404403687, "max_stars_count": 7, "path": "python/complete/no169.py" }, { "content": "#!/usr/bin/env python\n\n# (a+1)**n + (a-1)**n == na + 1 + (-1)**n(1 - na) mod a**2\n# If n even, na + 1 + (1-na) = 2 mod a**2\n# If n odd, (na + 1) - (1-na) = 2an mod a**2\n# For a >= 3, set n = 1, then 2an > 2 and 2an < a**2 hence\n# max occurs when n is odd\n\n# Since 2an is a multiple of a, the best we can do is\n# -a or -2a mod a**2\n\n# a odd, set n = (a-1)/2 + ak (until n is odd)\n# then 2an == 2ka**2 + a(a-1) == a**2 - a\n\n# a even, set n = a/2 - 1 + ak (until n is odd)\n# then 2an == 2ka**2 + a**2 - 2a == a**2 - 2a\n\n# For even a r_m(a) = a**2 - 2*a\n# For odd a r_m(a) = a**2 - a\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n low = 3\n high = 1000\n if low % 2 == 1:\n odds = range(low, high + 1, 2)\n evens = range(low + 1, high + 1, 2)\n else:\n odds = range(low + 1, high + 1, 2)\n evens = range(low, high + 1, 2)\n\n odd_sum = sum(val ** 2 - val for val in odds)\n even_sum = sum(val ** 2 - 2 * val for val in evens)\n return odd_sum + even_sum\n\nif __name__ == '__main__':\n print euler_timer(120)(main)(verbose=True)\n", "id": "11051334", "language": "Python", "matching_score": 0.7018527388572693, "max_stars_count": 7, "path": "python/complete/no120.py" }, { "content": "#!/usr/bin/env python\n\n# Starting with the number 1 and moving to the right in a clockwise\n# direction a 5 by 5 spiral is formed as follows:\n\n# 21 22 23 24 25\n# 20 7 8 9 10\n# 19 6 1 2 11\n# 18 5 4 3 12\n# 17 16 15 14 13\n\n# What is the sum of the numbers on the diagonals in a 1001 by 1001\n# spiral formed in the same way?\n\nfrom python.decorators import euler_timer\n\n\n# 1, 1, 2, 2, 3, 3, 4, 4, etc., every 2 is a corner\n\ndef spiral_sum(n):\n if n % 2 == 0:\n raise ValueError(\"Spiral only occurs on odds.\")\n\n curr_val = 1\n total = 0\n\n # as we move along the corners on the spiral, the number of\n # steps (i.e. number of corners) dictates the size of each\n # new step. In almost all cases, the step increases by one\n # but every four, when the next corner wraps a new layer,\n # it does not increase\n step_num = 0\n step_size = 0\n while curr_val <= n ** 2:\n if step_num % 2 == 0:\n step_size += 1\n curr_val += step_size\n if step_num % 4 == 0:\n total += curr_val - 1\n else:\n total += curr_val\n step_num += 1\n\n return total\n\n\ndef main(verbose=False):\n return spiral_sum(1001)\n\nif __name__ == '__main__':\n print euler_timer(28)(main)(verbose=True)\n", "id": "3769455", "language": "Python", "matching_score": 1.662224531173706, "max_stars_count": 7, "path": "python/complete/no028.py" }, { "content": "#!/usr/bin/env python\n\n# Starting with 1 and spiralling anticlockwise in the following way, a square\n# spiral with side length 7 is formed.\n#\n# 37 36 35 34 33 32 31\n# 38 17 16 15 14 13 30\n# 39 18 5 4 3 12 29\n# 40 19 6 1 2 11 28\n# 41 20 7 8 9 10 27\n# 42 21 22 23 24 25 26\n# 43 44 45 46 47 48 49\n#\n# It is interesting to note that the odd squares lie along the bottom right\n# diagonal, but what is more interesting is that 8 out of the 13 numbers lying\n# along both diagonals are prime; that is, a ratio of 8/13 or approx. 62%.\n\n# If one complete new layer is wrapped around the spiral above, a square\n# spiral with side length 9 will be formed. If this process is continued, what\n# is the side length of the square spiral for which the ratio of primes along\n# both diagonals first falls below 10%?\n\n# ALGORITHM:\n# The corners are as follows:\n# 1-(3-5-7-9)-(13-17-21-25)-(31-37-43-49)-...\n# Each set of four corners has an associated side\n# length and each new layer needs to be taken into\n# consideration\n\n# We can represent each layer as such:\n# (index, last, primes)\n# For example the first \"layer\" is just the number 1\n# index = 1 for first layer\n# last = 1 for the last corner in the layer\n# primes = 0 since we have encountered no primes on the \"corners\"\n\n# To get from the last in a layer to the first corner in the next layer\n# one needs to go right one square and then go the entire length of\n# the side, less one, since coming from the inside layer. Hence, the first\n# corner is the same distance from the last in the previous layer as the\n# other three corners are than their previous counterparts.\n# It takes 0 steps to walk along a side in Layer 1 (the single 1) .\n# As we add a layer, we add both top and bottom, both left and right, hence\n# this walk increases by 2. In general, layer (i+1) will take 2*i steps\n# With that in mind, (index, last) gives rise to the next four corners\n# C1 = last + #steps = last + 2*index (since in layer index + 1)\n# C2 = C1 + #steps, etc. gives\n# [last + 2*index*i for i in range(1,5)]\n\n# Since layer 1 is (1, 1, 0)\n# From here, the next layer is [1 + 2*1*i for i in range(1,5)] = [3, 5, 7, 9]\n# We can represent this layer similarly as (2, 9, 3) since the last corner\n# is 9 and 3,5 and 7 are all prime\n# The next layer is [9 + 2*2*i] = [13,17,21,25] which yields (3, 25, 5)\n\n# Since the side length includes both corners, we know the side length\n# to be 2*(index - 1) + 1 (THIS IS USED TO COMPUTE THE ANSWER)\n# Also, since we add 4 corners at each step and start with 1, we have\n# 4*index - 3 total corners at a given step\n# Also notice that last[index] = (2*index - 1)**2, hence can\n# never be prime (can prove easily by induction)\n\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\nfrom python.functions import is_prime\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n # layer/primes\n # 2/3\n # 1581/835\n # 3536/1677\n # 5000/2249\n # 13121/5248, winning layer\n\n # ratio >= .1 iff 10*(primes/total) >= 1 iff 10*primes >= total\n # iff 10*primes >= 4*index - 3\n FAILURE_POINT = 10 ** 9\n PRIMES = sieve(int(sqrt(FAILURE_POINT)) + 1)\n\n layer = 2\n num_primes = 3\n while 10 * num_primes >= 4 * layer - 3:\n layer += 1\n candidates = [(2 * layer - 1) ** 2 - 2 * (layer - 1) * i\n for i in range(1, 4)]\n if candidates[-1] >= FAILURE_POINT:\n raise ValueError(\"Sieve was not big enough, restart function\")\n for candidate in candidates:\n if is_prime(candidate, primes=PRIMES, failure_point=FAILURE_POINT):\n num_primes += 1\n side_length = 2 * layer - 1 # 2*(layer - 1) + 1\n return side_length\n\nif __name__ == '__main__':\n print euler_timer(58)(main)(verbose=True)\n", "id": "5438152", "language": "Python", "matching_score": 1.697903037071228, "max_stars_count": 7, "path": "python/complete/no058.py" }, { "content": "#!/usr/bin/env python\n\n# The prime 41, can be written as the sum of six consecutive primes:\n# 41 = 2 + 3 + 5 + 7 + 11 + 13\n\n# This is the longest sum of consecutive primes that adds to a prime\n# below one-hundred.\n\n# The longest sum of consecutive primes below one-thousand that adds\n# to a prime, contains 21 terms, and is equal to 953.\n\n# Which prime, below one-million, can be written as the sum of the\n# most consecutive primes?\n\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef max_prime_length(digits, primes=[]):\n \"\"\"\n Returns the length of the longest string of primes\n (starting at 2,3,5,...) that will sum to less 10**digits\n \"\"\"\n cap = 10 ** digits\n if primes == []:\n primes = sieve(int(4 * sqrt(cap)))\n count = 0\n num_primes = 0\n for prime in primes:\n if count + prime < cap:\n count += prime\n num_primes += 1\n else:\n return num_primes\n raise ValueError(\"max_prime_length failed logic.\")\n\n\ndef all_prime_seqs(length, digits, primes=[]):\n \"\"\"\n Returns all sequences of primes of\n\n Assumes length <= max_prime_length(digits)\n \"\"\"\n cap = 10 ** digits\n if primes == []:\n primes = sieve(cap)\n\n result = []\n final_index = length - 1\n curr = primes[final_index - length + 1:final_index + 1]\n running_sum = sum(curr)\n while running_sum < cap:\n running_sum -= curr[0] # remove the smallest value from the sum\n result.append(curr)\n final_index += 1\n curr = primes[final_index - length + 1:final_index + 1]\n running_sum += curr[-1] # add the new largest\n return result\n\n\ndef prime_sequence_exists(length, digits, primes=[]):\n \"\"\"\n Returns True if a sequence of length consecutive primes which sums\n to less than 10**digits also sums to a prime number\n \"\"\"\n cap = 10 ** digits\n if primes == []:\n primes = sieve(cap)\n sums = [sum(seq) for seq in all_prime_seqs(length, digits, primes)]\n return (set(sums).intersection(primes) != set())\n\n\ndef longest_prime_sequence(digits, primes=[]):\n \"\"\"\n Returns the length of the most consecutive primes which sum\n to a prime and sum to less then 10**digits\n \"\"\"\n if primes == []:\n primes = sieve(10 ** digits)\n max_length = max_prime_length(digits, primes)\n for length in range(max_length, 0, -1):\n if prime_sequence_exists(length, digits, primes):\n return length\n raise ValueError(\"Algorithm failed\")\n\n\ndef longest_prime(digits):\n primes = sieve(10 ** digits)\n length = longest_prime_sequence(digits, primes)\n sums = [sum(seq) for seq in all_prime_seqs(length, digits, primes)]\n return max(set(sums).intersection(primes))\n\n\ndef main(verbose=False):\n return longest_prime(6)\n\nif __name__ == '__main__':\n print euler_timer(50)(main)(verbose=True)\n", "id": "4674855", "language": "Python", "matching_score": 1.7353252172470093, "max_stars_count": 7, "path": "python/complete/no050.py" }, { "content": "#!/usr/bin/env python\n\n# What is the smallest odd composite n that can't be written n = p + 2k^2\n# for some prime p and some integer k?\n\nfrom python.decorators import euler_timer\nfrom python.functions import is_power\nfrom python.functions import sieve\n\n\ndef is_possible(n, primes):\n if n % 2 == 0 or n in primes:\n raise Error(\"Value poorly specified\")\n\n primes_less = [prime for prime in primes if prime < n and prime != 2]\n for prime in primes_less:\n if is_power((n - prime) / 2.0, 2):\n return True\n return False\n\n\ndef main(verbose=False):\n # sieve(6000) will do it (answer is 5777)\n curr = 9\n primes = sieve(5777)\n while is_possible(curr, primes):\n curr += 2\n while curr in primes:\n curr += 2\n return curr\n\nif __name__ == '__main__':\n print euler_timer(46)(main)(verbose=True)\n", "id": "3857935", "language": "Python", "matching_score": 1.8225219249725342, "max_stars_count": 7, "path": "python/complete/no046.py" }, { "content": "#!/usr/bin/env python\n\n# What is the 10001st prime number?\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n # By the prime number theorem, pi(x) =~ x/ln(x)\n # pi(x) >= 10001 when x >= 10001 ln(x)\n # To be safe, we'll double it and solve\n # x = 20002 ln(x)\n\n # We are left with approximately 248490\n primes = sieve(248490)\n return primes[10001 - 1]\n\nif __name__ == '__main__':\n print euler_timer(7)(main)(verbose=True)\n", "id": "7199777", "language": "Python", "matching_score": 1.5214046239852905, "max_stars_count": 7, "path": "python/complete/no007.py" }, { "content": "#!/usr/bin/env python\n\n# The number 3797 has an interesting property. Being prime itself,\n# it is possible to continuously remove digits from left to right,\n# and remain prime at each stage:\n# 3797, 797, 97, and 7.\n\n# Similarly we can work from right to left: 3797, 379, 37, and 3.\n\n# Find the sum of the only eleven primes that are both truncatable from\n# left to right and right to left.\n\n# NOTE: 2, 3, 5, and 7 are not considered to be truncatable primes.\n\nimport copy\n\nfrom python.decorators import euler_timer\nfrom python.functions import is_prime\nfrom python.functions import sieve\n\n\ndef truncated_list(n, from_left):\n if from_left:\n digs = [dig for dig in str(n)]\n return [int(\"\".join(digs[i:])) for i in range(len(digs))]\n # If the bool from_left is false, we are right\n else:\n digs = [dig for dig in str(n)]\n return [int(\"\".join(digs[:i + 1])) for i in range(len(digs))]\n\n\ndef truncated_all(n):\n return list(set(truncated_list(n, True) + truncated_list(n, False)))\n\n\ndef is_truncatable_prime(n, primes):\n candidates = truncated_all(n)\n for candidate in candidates:\n if candidate in primes:\n continue\n elif is_prime(candidate):\n primes.add(candidate)\n else:\n return False\n return True\n\n\ndef find_first_n_truncatable(n, max_n):\n result = []\n primes = set(sieve(max_n)[4:]) # We don't include 2, 3, 5, or 7\n for prime in copy.copy(primes):\n if is_truncatable_prime(prime, primes):\n result.append(prime)\n if len(result) == n:\n return result\n\n if len(result) < n:\n raise Exception(\"Not enough found, raise max_n\")\n\n return result\n\n\ndef main(verbose=False):\n ans = find_first_n_truncatable(11, 10 ** 6)\n\n if verbose:\n return \"%s.\\nThe primes are: %s.\" % (\n sum(ans), \", \".join(str(prime) for prime in ans))\n else:\n return sum(ans)\n\nif __name__ == '__main__':\n print euler_timer(37)(main)(verbose=True)\n", "id": "4036417", "language": "Python", "matching_score": 1.9887990951538086, "max_stars_count": 7, "path": "python/complete/no037.py" }, { "content": "#!/usr/bin/env python\n\n# We shall say that an n-digit number is pandigital if it makes use of all\n# the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital\n# and is also prime.\n\n# What is the largest n-digit pandigital prime that exists?\n\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\nfrom python.functions import all_permutations_digits\nfrom python.functions import is_prime\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n MAX_n = 987654321\n PRIMES = sieve(int(sqrt(MAX_n)))\n # A 9 digit pandigital will have digit sum 45, so can't be prime\n # must be divisible by 9\n for i in range(8, 1, -1):\n cand = [str(dig) for dig in range(1, i + 1)]\n cand = int(\"\".join(cand))\n candidates = sorted(all_permutations_digits(cand))[::-1]\n for candidate in candidates:\n if is_prime(candidate, primes=PRIMES, failure_point=MAX_n):\n return candidate\n raise ValueError(\"No prime was found, algorithm busted.\")\n\nif __name__ == '__main__':\n print euler_timer(41)(main)(verbose=True)\n", "id": "7957435", "language": "Python", "matching_score": 2.048149824142456, "max_stars_count": 7, "path": "python/complete/no041.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef most_common_digit(n):\n digs = set(str(n))\n counts = [(dig, str(n).count(dig)) for dig in digs]\n return sorted(counts, key=lambda pair: pair[1])[-1]\n\n\ndef find_and_replace(n, dig):\n digits = range(10)\n if str(n)[0] == dig:\n digits.remove(0) # no leading 0s\n\n string_val = str(n)\n result = [string_val.replace(dig, str(substitute))\n for substitute in digits]\n return [int(val) for val in result]\n\n\ndef main(verbose=False):\n PRIMES = sieve(10 ** 6)\n\n for prime in PRIMES:\n digit, count = most_common_digit(prime)\n if count > 2:\n candidates = find_and_replace(prime, digit)\n match_count = 0\n for candidate in candidates:\n if candidate in PRIMES:\n match_count += 1\n if match_count == 8:\n return prime\n\nif __name__ == '__main__':\n print euler_timer(51)(main)(verbose=True)\n", "id": "8523902", "language": "Python", "matching_score": 1.6840622425079346, "max_stars_count": 7, "path": "python/too_slow/no051.py" }, { "content": "#!/usr/bin/env python\n\n# The number, 1406357289, is a 0 to 9 pandigital number because it is made up\n# of each of the digits 0 to 9 in some order, but it also has a rather\n# interesting sub-string divisibility property.\n\n# Let d_1 be the 1st digit, d_2 be the 2nd digit, and so on. In this way, we\n# note the following:\n\n# d_2 d_3 d_4 = 406 is divisible by 2\n# d_3 d_4 d_5 = 063 is divisible by 3\n# d_4 d_5 d_6 = 635 is divisible by 5\n# d_5 d_6 d_7 = 357 is divisible by 7\n# d_6 d_7 d_8 = 572 is divisible by 11\n# d_7 d_8 d_9 = 728 is divisible by 13\n# d_8 d_9 d_10 = 289 is divisible by 17\n\n# Find the sum of all 0 to 9 pandigital numbers with this property.\n\n# Since 5 | d_4 d_5 d_6, we know d_6 in [0,5]\n# Since 11 | d_6 d_7 d_8 in [0 d_7 d_8, 5 d_7, d_8], we know d_6 = 5\n# This is because the only multiples of 11 with d_6 = 0 are\n# 11, 22, ..., 99, all of which have repeated digits\n# 11*50 = 550 is also eliminated, leaving us with\n# d_6 d_7 d_8 in [506, 517, 528, 539, 561, 572, 583, 594]\n\nimport operator\n\nfrom python.decorators import euler_timer\n\n\ndef extend_matches(value, choices, direction):\n \"\"\"\n Extends the value by anything in choices the\n 1) ends in the same 2 digits that value begins with\n 2) has remaining digit(s) unique from those in value\n \"\"\"\n value_digits = set(value)\n if direction == 'right':\n first_two_match = [choice for choice in choices\n if choice[:2] == value[-2:]]\n matches = [choice for choice in first_two_match\n if value_digits.intersection(choice[2:]) == set()]\n return [value + choice[2:] for choice in matches]\n elif direction == 'left':\n last_two_match = [choice for choice in choices\n if choice[-2:] == value[:2]]\n matches = [choice for choice in last_two_match\n if value_digits.intersection(choice[:-2]) == set()]\n return [choice[:-2] + value for choice in matches]\n else:\n raise ValueError(\"%s not a valid direction.\" % direction)\n\n\ndef extend_to_remaining_digit(value):\n last_digit = set('0123456789').difference(value)\n if len(last_digit) != 1:\n raise ValueError(\"Algorithm for 43 failed.\")\n last_digit = last_digit.pop()\n return int(last_digit + value)\n\n\ndef main(verbose=False):\n unique_digits = {}\n primes = [2, 3, 5, 7, 11, 13, 17]\n for prime in primes:\n mults = [str(number).zfill(3) for number in range(1, 1000)\n if number % prime == 0]\n unique_digits[prime] = [number for number in mults\n if len(set(number)) == 3]\n candidates = [number for number in unique_digits[11] if number[0] == '5']\n\n # We have only covered the 11 case, so we need to include those for the\n # 13 and 17 to the right and 7, 5, 3, 2 to the left (in order)\n for prime in [13, 17]:\n candidates = reduce(\n operator.add,\n [extend_matches(candidate, unique_digits[prime], 'right')\n for candidate in candidates])\n for prime in [7, 5, 3, 2]:\n candidates = reduce(\n operator.add,\n [extend_matches(candidate, unique_digits[prime], 'left')\n for candidate in candidates])\n # We now have all possibilities for d_2 ... d_10, from which we can\n # generate d_1\n candidates = [extend_to_remaining_digit(candidate)\n for candidate in candidates]\n return sum(candidates)\n\nif __name__ == '__main__':\n print euler_timer(43)(main)(verbose=True)\n", "id": "1334637", "language": "Python", "matching_score": 1.3683756589889526, "max_stars_count": 7, "path": "python/complete/no043.py" }, { "content": "#!/usr/bin/env python\n\n# 1 --> (2,3,4,5,6,7)\n\n# [1] 1\n# [2,...,7] 6\n# [8,...,19] 12\n# [20,...,37] 18\n# [38,...,61] 24\n\n# f(k) = 3k^2 - 3k + 1\n# f(k) = elements before layer k if k > 0\n# Layer 0\n# 1 -- (1,1) -- (2,1),(2,2),(2,3),(2,4),(2,5),(2,6)\n\n# Layer 1\n# 2 -- (2,1) -- (1,1), (2,2),(2,6), (3,1),(3,2),(3,12) C\n# 3 -- (2,2) -- (1,1), (2,1),(2,3), (3,2),(3,3),(3,4) C\n# 4 -- (2,3) -- (1,1), (2,2),(2,4), (3,4),(3,5),(3,6) C\n# 5 -- (2,4) -- (1,1), (2,3),(2,5), (3,6),(3,7),(3,8) C\n# 6 -- (2,5) -- (1,1), (2,4),(2,6), (3,8),(3,9),(3,10) C\n# 7 -- (2,6) -- (1,1), (2,5),(2,1), (3,10),(3,11),(3,12) C\n\n# Layer 2\n# 8 -- (3,1) -- (2,1), (3,2),(3,12),(4,1),(4,2),(4,18) C\n# 9 -- (3,2) -- (2,1),(2,2),(3,1),(3,3), (4,2),(4,3)\n# 10 -- (3,3) -- (2,2), (3,2),(3,4), (4,3),(4,4),(4,5) C\n# 11 -- (3,4) -- (2,2),(2,3),(3,3),(3,5), (4,5),(4,6)\n# 12 -- (3,5) -- (2,3), (3,4),(3,6), (4,6),(4,7),(4,8) C\n# 13 -- (3,6) -- (2,3),(2,4)\n# 14 -- (3,7) -- (2,4)\n# 15 -- (3,8) -- (2,4),(2,5)\n# 16 -- (3,9) -- (2,5)\n# 17 -- (3,10) -- (2,5),(2,6)\n# 18 -- (3,11) -- (2,6)\n# 19 -- (3,12) -- (2,6),(2,1)\n\n# 20 -- (4,1) -- (3,)(4,)(5,)\n# 21 -- (4,2) --(3,1)(3,2)\n# 22 -- (4,3) -- (3,2)(3,3)\n# 22 -- (4,4) --\n\n# (n, k) is corner if k % (n - 1) == 1\n# A corner is adjacent to 1 block of lower class, 2 of same, and 3 of higher\n# the 2 of same will always be (n, k - 1 *wrap*), (n, k + 1 *wrap*)\n# (n,1) will always be (n-1,1),(n,0),(n,2),(n+1,0),(n+1,1),(n+1,2)\n# Both the n-1 and n+1 grouping will start where the previous one left off\n\n# Only the corners and the final non-corner have a chance at 3 primes\n# This is because if we are not either, then they are next to 2 consec. #'s,\n# which give a diff. of 1, the other two pairs will give differences that\n# differ by one, so at most 1 of each can be prime\n\n##############################\n# Case1, k neq 1, corner\n##############################\n# The corner (n, k) is adjacent to\n# (n-1, (k-1)/(n-1)*(n-2) + 1), (n,k-1), (n,k+1)\n# -->don't matter if not end piece\n# (n+1, (k-1)/(n-1)*n), (n+1, (k-1)/(n-1)*n + 1), (n+1, (k-1)/(n-1)*n + 2),\n# 3*(n - 1)*(n - 2) + 1 + k vs.\n# 3*(n - 2)*(n - 3) + 1 + (k - 1)/(n - 1)*(n - 2) + 1,\n# 3*(n - 1)*(n - 2) + k,3*(n - 1)*(n - 2) + 2 + k,\n# 3*n*(n - 1) + 1 + (k - 1)/(n - 1)*n, 3*n*(n - 1) + 1 + (k - 1)/(n - 1)*n + 1,\n# 3*n*(n - 1) + 1 + (k - 1)/(n - 1)*n + 2\n\n# Diffs\n# 6*(n - 2) + (k - 1)/(n - 1),\n# 1,1,\n# 6*(n - 1) + (k - 1)/(n - 1) - 1,\n# 6*(n - 1) + (k - 1)/(n - 1),\n# 6*(n - 1) + (k - 1)/(n - 1) + 1,\n# Only way it can be 3 is if\n# c1=6*(n - 2) + (k - 1)/(n - 1),\n# c2=6*(n - 1) + (k - 1)/(n - 1) - 1,\n# c3=6*(n - 1) + (k - 1)/(n - 1) + 1,\n# But if n > 2, c1 prime implies (k-1)/(n-1) == 1,5 mod 6\n# implies c2 == 0,4 mod 6, c3 == 0,2 mod 6, so it is never possible\n# for n > 2\n# For n = 1, 1 works\n# For n = 2, of 3,4,5,6,7 none work\n\n##############################\n# Case2, k = 1\n##############################\n# The corner (n, 1) is adjacent to\n# (n-1, 1), (n,6*(n-1)), (n,2)--> don't matter if not end piece,\n# (n+1, 6*n), (n+1, 1), (n+1, 2),\n# 3*(n - 1)*(n - 2) + 2 vs.\n# 3*(n - 2)*(n - 3) + 2,\n# 3*(n - 1)*(n - 2) + 1 + 6*(n - 1),3*(n - 1)*(n - 2) + 3,\n# 3*n*(n - 1) + 1 + 6*n, 3*n*(n - 1) + 2,\n# 3*n*(n - 1) + 3\n\n# Diffs\n# 6*(n - 2),\n# 6*(n - 1) - 1,1\n# 6*(2*n - 1) - 1, 6*(n - 1),\n# 6*(n - 1) + 1\n\n# c1=6*(n - 1) - 1\n# c2=6*(2*n - 1) - 1\n# c3=6*(n - 1) + 1\n\n# Start at n = 3 (cases 1 and 2 already done, special cases)\n\n##############################\n# Case3\n##############################\n# The one outlier is the final piece (n, 6*(n - 1))\n# When n > 2, this is not 1 mod n - 1, hence not a corner\n# This is adjacent to (n,1),(n,6*n-7),(n-1,1),(n-1,6*(n-2)),\n# (n+1,6*n),(n+1,6*n-1)\n\n# 3*(n - 1)*(n - 2) + 1 + 6*(n-1) vs.\n# 3*(n - 1)*(n - 2) + 1 + 1, 3*(n - 1)*(n - 2) + 6*(n - 1),\n# 3*(n - 2)*(n - 3) + 1 + 1, 3*(n - 2)*(n - 3) + 1 + 6*(n-2),\n# 3*n*(n - 1) + 1 + 6*n, 3*n*(n - 1) + 6*n\n\n# Diffs\n# 6*(n - 1) - 1, 1,\n# 6*(2*n - 3) - 1, 6*(n - 1),\n# 6*n, 6*n - 1\n\n# c1=6*(n - 1) - 1\n# c2=6*(2*n - 3) - 1\n# c3=6*n - 1\n\n# Start at n = 3 (cases 1 and 2 already done, special cases)\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\n# 3*(n - 1)*(n - 2) + 2:\n# c1=6*(n - 1) - 1 = 6*n - 7\n# c2=6*(2*n - 1) - 1=12*n - 7\n# c3=6*(n - 1) + 1=6*n - 5\n\n# 3*(n - 1)*(n - 2) + 1 + 6*(n-1):\n# c1=6*(n - 1) - 1=6*n - 7\n# c2=6*(2*n - 3) - 1=12*n - 19\n# c3=6*n - 1=6*n - 1\n\n# in the first two layers only 1 and 2 do as we wish\n# from there, first = 8, last = 19 and we can increment\n# first by 6*(layer - 1) and last by 6*layer\n\n# The first corner will be FC(layer) = 3*(layer - 1)*(layer - 2) + 2\n# it only has PD = 3 if\n# (6*layer - 7), (6*layer - 5) and (12*layer - 7) are prime\n\n# The last corner will be\n# LC(layer) = 3*(layer - 1)*(layer - 2) + 1 + 6*(layer - 1)\n# it only has PD = 3 if\n# (6*layer - 7), (6*layer - 1) and (12*layer - 19) are prime\n\n# Instead of carrying out costly multiplications, we can increment\n# these by 6 and 12 respectively, similarly\n# FC(L + 1) - FC(L) = 6*(L - 1)\n# LC(L + 1) - LC(L) = 6*L\n# So we can increment these as well\n\ndef main(verbose=False):\n TOTAL = 2000\n MAX_n = 10 ** 6\n PRIMES = sieve(MAX_n)\n # Constant, rather than linear lookup\n prime_bools = [False] * (MAX_n + 1)\n for prime in PRIMES:\n prime_bools[prime] = True\n\n count = 2\n current = 2\n\n layer = 3\n first_corner = 8 # Value of first corner in layer\n last_corner = 19 # Value of last corner in layer\n six_shared = 11 # prime candidate shared by both corners,\n # with a difference of 6\n six_first = 13 # prime candidate for first corner, diff 6\n six_last = 17 # prime candidate for last corner, diff 6\n twelve_first = 29 # prime candidate for first corner, diff 12\n twelve_last = 17 # prime candidate for last corner, diff 12\n while count < TOTAL:\n if twelve_first > MAX_n:\n raise Exception(\"Primes not large enough\")\n if prime_bools[six_shared]:\n if prime_bools[six_first] and prime_bools[twelve_first]:\n current = first_corner\n count += 1\n if count < TOTAL:\n if prime_bools[six_last] and prime_bools[twelve_last]:\n current = last_corner\n count += 1\n\n six_shared, six_last = six_last, six_last + 6\n six_first += 6\n twelve_last, twelve_first = twelve_first, twelve_first + 12\n\n first_corner += 6 * (layer - 1)\n last_corner += 6 * layer\n\n layer += 1\n\n return current\n\nif __name__ == '__main__':\n print euler_timer(128)(main)(verbose=True)\n", "id": "12382008", "language": "Python", "matching_score": 2.4450995922088623, "max_stars_count": 7, "path": "python/complete/no128.py" }, { "content": "#!/usr/bin/env python\n\n# We have (n**2)*(n + p) = m**3, we first show p does not divide n or m\n# If p | m, then n != 0 mod p, implies LHS != 0, RAA ==> p | n\n# But p | n means n = kp, (k**2)*(p**2)*p*(k + 1) = m**3\n# hence k**3 + k**2 = (m/p)**3, but\n# k**3 < k**3 + k**2 < k**3 + 3*(k**2) + 3*k + 1 = (k + 1)**3\n# so this is impossible\n\n# With this, let a prime q divide m**3 (hence m)\n# We know q | n**2 or q | (n + p) or both since Z_q is a domain\n# But q | n**2 ==> n == 0 mod q ==> n + p == p != 0 mod q;\n# since by the above q | m, means q != p\n# Thus all factors of q must divide n**2. Since q | m**3 and q\n# prime we must have q**(3*k) | m**3 for some value of k,\n# forcing q**(3*k) | n**2.\n# Similarly, q | n + p ==> n == -p != 0 mod q, and the same argument\n# implies q**(3*k) | n + p.\n# This n + p must be composed of cubic prime powers dividing m**3,\n# and similarly for n**2. Since (2, 3) = 1, this forces n to be a cube\n# and makes p = (n + p) - n a difference of cubes\n\n# Since p = r**3 - s**3 = (r - s)*(r**2 + r*s + s**2)\n# p | r - s, implies r - s = 1 or p, but r - s = p, clearly won't work\n# hence r = s + 1\n# Given a problem max M, we just go up to the biggest L such that\n# (L + 1)**3 - L**3 <= M, forcing 6*L + 3 <= sqrt(12*M - 3)\n\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n problem_max = 10 ** 6\n count = 0\n PRIMES = sieve(problem_max)\n max_L = int(round((sqrt(12 * problem_max - 3) - 3) / 6))\n for L in xrange(1, max_L + 1):\n difference = (L + 1) ** 3 - L ** 3\n if difference in PRIMES:\n count += 1\n return count\n\nif __name__ == '__main__':\n print euler_timer(131)(main)(verbose=True)\n", "id": "12348747", "language": "Python", "matching_score": 1.3279412984848022, "max_stars_count": 7, "path": "python/complete/no131.py" }, { "content": "#!/usr/bin/env python\n\n# The cube, 41063625 (345**3), can be permuted to produce two other cubes:\n# 56623104 (384**3) and 66430125 (405**3). In fact, 41063625 is the smallest\n# cube which has exactly three permutations of its digits which are also cube.\n\n# Find the smallest cube for which exactly five permutations\n# of its digits are cube.\n\nfrom python.decorators import euler_timer\n\n\ndef all_cubes(digits):\n # 10**(d-1) <= X**3 < 10**d\n cube_10 = 10 ** (1 / 3.0)\n M = int(cube_10 ** digits)\n if digits % 3 == 0:\n M = M - 1\n m = cube_10 ** (digits - 1)\n if abs(int(m) - m) < 0.01:\n m = int(m)\n else:\n m = int(m) + 1\n return [x ** 3 for x in range(m, M + 1)]\n\n\ndef has_k_perms(digits, k, cubes):\n sorted_cubes = {}\n for cube in cubes:\n sorted_digs = ''.join(sorted(str(cube)))\n # sets value to [] if not set, returns value at key\n sorted_cubes.setdefault(sorted_digs, []).append(cube)\n\n possible_matches = [value for value in sorted_cubes.values()\n if len(value) == 5]\n\n possible_matches.sort(key=lambda list_: min(list_))\n if possible_matches:\n return min(possible_matches[0])\n else:\n return -1\n\n\ndef main(verbose=False):\n digits = len(str(41063625))\n cubes = all_cubes(digits)\n while has_k_perms(digits, 5, cubes) == -1:\n digits += 1\n cubes = all_cubes(digits)\n return has_k_perms(digits, 5, cubes)\n\nif __name__ == '__main__':\n print euler_timer(62)(main)(verbose=True)\n", "id": "11255074", "language": "Python", "matching_score": 1.5073820352554321, "max_stars_count": 7, "path": "python/complete/no062.py" }, { "content": "#!/usr/bin/env python\n\n# The number, 197, is called a circular prime because all\n# rotations of the digits: 197, 971, and 719, are themselves prime.\n\n# How many circular primes are there below one million?\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef contains_only_digits(n, digits):\n n_digits = [int(dig) for dig in str(n)]\n return set(n_digits) <= set(digits)\n\n\ndef all_circular_perms(list_):\n n = len(list_)\n result = []\n for lead in range(n):\n indices = [index % n for index in range(lead, lead + n)]\n result.append([list_[index] for index in indices])\n return result\n\n\ndef all_circular_perms_int(n):\n digs = [dig for dig in str(n)]\n return [int(''.join(perm)) for perm in all_circular_perms(digs)]\n\n\ndef all_circular_perm_in(prime, primes):\n perms = all_circular_perms_int(prime)\n return set(perms) <= set(primes)\n\n\ndef all_circular(n):\n # the number of digits limits the size of all permutations\n digs = len(str(n))\n possible_primes = ([2, 5] +\n [prime for prime in sieve(10 ** digs - 1)\n if contains_only_digits(prime, [1, 3, 7, 9])])\n return [prime for prime in possible_primes if prime <= n\n and all_circular_perm_in(prime, possible_primes)]\n\n\ndef main(verbose=False):\n return len(all_circular(10 ** 6 - 1))\n\nif __name__ == '__main__':\n print euler_timer(35)(main)(verbose=True)\n", "id": "8673269", "language": "Python", "matching_score": 1.9599757194519043, "max_stars_count": 7, "path": "python/complete/no035.py" }, { "content": "#!/usr/bin/env python\n\n# The lexicographic permutations of 0, 1 and 2 are:\n# 012 021 102 120 201 210\n# What is the millionth lexicographic permutation of the\n# digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9\n\nfrom math import factorial\n\nfrom python.decorators import euler_timer\n\n\ndef lex(list_, perm):\n \"\"\"\n Returns the perm-th permutation of the list in order\n\n The ordering can be thought of like so:\n Consider the indices representing each step, they will make\n up all permutations of 0, 1, ..., len(list_) - 1. So we can\n order them based on the value of this number in base len(list_)\n \"\"\"\n if len(list_) < 2:\n return list_[:]\n\n index = perm / factorial(len(list_) - 1) # int. division intended\n remaining = perm % factorial(len(list_) - 1)\n return [list_[index]] + lex(list_[:index] + list_[index + 1:], remaining)\n\n\ndef main(verbose=False):\n list_ = range(10)\n perm = 10 ** 6 - 1 # Our indexing begins at 0\n return \"\".join(str(dig) for dig in lex(list_, perm))\n\nif __name__ == '__main__':\n print euler_timer(24)(main)(verbose=True)\n", "id": "6937073", "language": "Python", "matching_score": 1.9490249156951904, "max_stars_count": 7, "path": "python/complete/no024.py" }, { "content": "#!/usr/bin/env python\n\n# What is the sum of the digits of the number 2^(1000)?\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n return sum(int(dig) for dig in str(2 ** 1000))\n\nif __name__ == '__main__':\n print euler_timer(16)(main)(verbose=True)\n", "id": "12589368", "language": "Python", "matching_score": 0.6716967225074768, "max_stars_count": 7, "path": "python/complete/no016.py" }, { "content": "#!/usr/bin/env python\n\n# Find the value of n, 1 < n < 10**7, for which PHI(n) is a permutation of n\n# and the ratio n/PHI(n) produces a minimum.\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef same_digits(n, m):\n dig_n = [dig for dig in str(n)]\n dig_m = [dig for dig in str(m)]\n return (sorted(dig_n) == sorted(dig_m))\n\n\ndef matches(problem_max):\n PRIMES = sieve(problem_max)\n phi_list = range(problem_max + 1) # ignore zero index\n for prime in PRIMES:\n phi_list[prime::prime] = [(val / prime) * (prime - 1) for val in\n phi_list[prime::prime]]\n cands = [(val, phi_list[val]) for val in range(2, problem_max + 1)\n if same_digits(val, phi_list[val])]\n cands.sort(key=lambda cand: (cand[0] * 1.0) / cand[1])\n return cands[0][0]\n\n\ndef main(verbose=False):\n problem_max = 10 ** 7\n return matches(problem_max)\n\nif __name__ == '__main__':\n print euler_timer(70)(main)(verbose=True)\n", "id": "582693", "language": "Python", "matching_score": 3.0124077796936035, "max_stars_count": 7, "path": "python/too_slow/no070.py" }, { "content": "#!/usr/bin/env python\n\n# Find the value of n <= 1,000,000 for which n/PHI(n) is a maximum.\n# n/PHI(n) = prod_(p | n) p/(p-1)\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n problem_max = 10 ** 6\n PRIMES = sieve(problem_max)\n # ignore zero index\n ratios = [(1, index) for index in range(problem_max + 1)]\n for prime in PRIMES:\n ratios[prime::prime] = [((ratio * prime * 1.0) / (prime - 1), index)\n for ratio, index in ratios[prime::prime]]\n ratios.sort(key=lambda pair: pair[0], reverse=True)\n return ratios[0][1]\n\nif __name__ == '__main__':\n print euler_timer(69)(main)(verbose=True)\n", "id": "4177225", "language": "Python", "matching_score": 0.76516193151474, "max_stars_count": 7, "path": "python/complete/no069.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import prime_divides_repunit_power10\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n PRIMES = sieve(10 ** 6)\n prime_index = 3 # p0=2, p1=3, and p2=5 are false positives\n matches = []\n while len(matches) < 40:\n prime = PRIMES[prime_index]\n if prime_divides_repunit_power10(prime, 9):\n matches.append(prime)\n prime_index += 1\n\n return sum(matches)\n\nif __name__ == '__main__':\n print euler_timer(132)(main)(verbose=True)\n", "id": "6145873", "language": "Python", "matching_score": 2.4366438388824463, "max_stars_count": 7, "path": "python/complete/no132.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import prime_divides_repunit_power10\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n PRIMES = sieve(10 ** 5)\n running_sum = 0\n for prime in PRIMES:\n if not prime_divides_repunit_power10(prime):\n running_sum += prime\n return running_sum\n\nif __name__ == '__main__':\n print euler_timer(133)(main)(verbose=True)\n", "id": "8145993", "language": "Python", "matching_score": 1.1844431161880493, "max_stars_count": 7, "path": "python/complete/no133.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import inverse_mod_n\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n PRIMES = sieve(10 ** 6 + 3) # 10**6 + 3 is the final value of p_2\n\n running_sum = 0\n for index in range(2, len(PRIMES) - 1):\n p_1 = PRIMES[index]\n p_2 = PRIMES[index + 1]\n ten_inverse = inverse_mod_n(10, p_2)\n digits = len(str(p_1))\n k = (ten_inverse ** digits) * (p_2 - p_1) % p_2\n running_sum += int('%s%s' % (k, p_1))\n return running_sum\n\nif __name__ == '__main__':\n print euler_timer(134)(main)(verbose=True)\n", "id": "5041446", "language": "Python", "matching_score": 2.4259490966796875, "max_stars_count": 7, "path": "python/complete/no134.py" }, { "content": "#!/usr/bin/env python\n\n# Write n = a_k ... a_1 a_0, then\n# f(n) = (all but the last digit of n) + (the last digit of n)*m\n# is equivalent to\n# f(n) = (n - a_0)/10 + a_0*m = (1/10)*(n + (10*m - 1)*a_0)\n# Since \"p > 1 coprime to 10\", we know 10 is invertible in Z_p,\n# So in order for n == 0 mod p <==> f(n) == 0 mod p to hold,\n# we must have 10*m - 1 == 0 mod p (the operation works independent of a_0)\n# Thus m is 10**(-1) mod p\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\nfrom python.functions import inverse_mod_n\n\n\ndef main(verbose=False):\n PRIMES = sieve(10 ** 7)\n running_sum = 0\n for prime in PRIMES:\n if prime not in [2, 5]:\n running_sum += inverse_mod_n(10, prime)\n return running_sum\n\nif __name__ == '__main__':\n print euler_timer(274)(main)(verbose=True)\n", "id": "2402731", "language": "Python", "matching_score": 1.0751097202301025, "max_stars_count": 7, "path": "python/complete/no274.py" }, { "content": "#!/usr/bin/env python\n\n# Since M(2**n + 1) = 4**n + 3**n - 2**(n + 1) (empirically),\n# we find sum_{n=0}^{P} M(2**n + 1) is equal to\n# (4**(P + 1) - 1)/3 + (3**(P + 1) - 1)/2 + 2*(2**(P + 1) - 1)\n# = (4*(4**P) - 1)*(3**(-1)) + (3*(3**P) - 1)*(2**(-1)) + 4*(2**P) - 2\n# (This is because (r - 1)*(r**P + ... + r + 1) = r**(P + 1) - 1\n\nfrom python.decorators import euler_timer\nfrom python.functions import inverse_mod_n\n\n\ndef moves(n):\n if n < 3:\n return n\n goal_state = [1] * n\n state = [0, 2] + [1] * (n - 2)\n num_moves = 1\n last_placed = 1\n\n while state != goal_state:\n beans = state[last_placed]\n state[last_placed] = 0\n for bean in range(1, beans + 1):\n next_index = (last_placed + bean) % n\n state[next_index] += 1\n last_placed = (last_placed + beans) % n\n num_moves += 1\n\n return num_moves\n\n\ndef check_formula(n):\n return (moves(2 ** n + 1) == 4 ** n - 3 ** n + 2 ** (n + 1))\n\n\n# Since (a**(n**k))**n = a**(n*(n**k)) = a**(n**(k + 1)),\n# We can easily compute X**(P + 1) = X*(X**P) for P = 10**18\ndef modular_exponentiate(val, exp_base, exp_power, modulus):\n result = val\n for i in xrange(exp_power):\n result = (result ** exp_base) % modulus\n return result\n\n\ndef main(verbose=False):\n for n in range(10):\n if not check_formula(n):\n raise Exception(\"Proposed formula for M(2**k + 1) incorrect.\")\n\n modulus = 7 ** 9\n p_2 = 4 * modular_exponentiate(2, 10, 18, modulus) - 2\n p_3 = 3 * modular_exponentiate(3, 10, 18, modulus) - 1\n p_4 = 4 * modular_exponentiate(4, 10, 18, modulus) - 1\n\n return (p_4 * inverse_mod_n(3, modulus) -\n p_3 * inverse_mod_n(2, modulus) + p_2) % (modulus)\n\nif __name__ == '__main__':\n print euler_timer(335)(main)(verbose=True)\n", "id": "5610408", "language": "Python", "matching_score": 1.6938751935958862, "max_stars_count": 7, "path": "python/complete/no335.py" }, { "content": "#!/usr/bin/env python\n\n# Define the operator {{ as such:\n# a {{ 1 = a,\n# a {{ (k + 1) = a**(a {{ k)\n\n# Find the last 8 digits of 1777 {{ 1855.\n\n# E.G. 2 {{ 4 = 2**(2 {{ 3) = 2**(2**(2 {{ 2))\n# = 2**(2**(2**(2))) = 2**16 = 65536\n\n####################\n# Let Y = ord_X(a) be the order of a mod X, then\n# a**n == a**(n mod Y) mod X\n# if n = a**b, then\n# n mod Y == a**b mod Y = a**(b mod ord_Y(a)) mod Y\n\n# So we have\n# a**(a**a) mod X = f(a, 3, X) = a**f(a, 2, ord_X(a))\n# f(a, k, X) = a**f(a, k - 1, ord_X(a))\n# With boundary conditions\n# f(a, 1, X) = a % X\n# f(a, k, 1) = 1\n# f(a, k, 2) = a % 2 (since a**P == a mod 2 for all P > 1)\n\nfrom python.decorators import euler_timer\nfrom python.functions import order_mod_n\n\n\ndef hyper_exponentiate(a, b, modulus):\n if modulus == 1:\n return 1\n elif modulus == 2:\n return a % 2\n if b == 1:\n return a % modulus\n a_order = order_mod_n(a, modulus)\n return (a ** hyper_exponentiate(a, b - 1, a_order)) % modulus\n\n\ndef main(verbose=False):\n return hyper_exponentiate(1777, 1855, 10 ** 8)\n\nif __name__ == '__main__':\n print euler_timer(188)(main)(verbose=True)\n", "id": "4496123", "language": "Python", "matching_score": 1.2344483137130737, "max_stars_count": 7, "path": "python/complete/no188.py" }, { "content": "#!/usr/bin/env python\n\n# R(k) = (10^k - 1)/9\n# So the smallest k such that R(k) == 0 mod n is related to the order\n# or the element 10 in the multiplicative group of units\n\n# if 3 does not divide n, then 3 (hence 9) is invertible, so\n# R(k) == 0 iff 10^k - 1 == 0 iff 10^k == 1 mod n, giving\n# us A(n) = order of 10 modulo n in those cases\n# if 3 does divide n, then 9 is not invertible, so\n# R(k) == 0 mod n iff 10^k - 1 == 0 mod (9n) giving\n# us A(n) = order of 10 modulo (9n) in those cases\n\nfrom python.decorators import euler_timer\nfrom python.functions import order_mod_n\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n prime_max = 10 ** 5\n PRIMES = sieve(prime_max)\n found = []\n n = 2\n while len(found) < 25:\n n += 1\n if n > prime_max:\n prime_max *= 10\n PRIMES = sieve(prime_max)\n\n if n % 2 == 0 or n % 5 == 0 or n in PRIMES:\n continue\n\n basis = n\n if n % 3 == 0:\n basis = 9 * n\n\n if (n - 1) % order_mod_n(10, basis) == 0:\n found.append(n)\n if verbose:\n return ('%s.\\nAs a check, the first five values are calculated to be '\n '%s, as stated.' % (sum(found),\n ', '.join(str(num) for num in found[:5])))\n else:\n return sum(found)\n\nif __name__ == '__main__':\n print euler_timer(130)(main)(verbose=True)\n", "id": "11702918", "language": "Python", "matching_score": 2.7455432415008545, "max_stars_count": 7, "path": "python/complete/no130.py" }, { "content": "#!/usr/bin/env python\n\n# R(k) = (10^k - 1)/9\n# So the smallest k such that R(k) == 0 mod n is the order\n# of the element 10 in the multiplicative group of units\n# when 9 is invertible. When 9 is not invertible (i.e.\n# when 3 | n), we have R(k) = f*n <==> 10**k - 1 == f*(9*n)\n# hence A(n) = order of 10 modulo (9*n)\n\n# To start at 10**6 we need to verify A(n) <= n.\n# Consider R(1) mod n, R(2) mod n, ..., R(n + 1) mod n.\n# By pigeonhole, there exist i < j <= n + 1 with\n# R(i) mod n = R(j) mod n (since only n residues mod n)\n# Thus 0 = R(j) - R(i) mod n = (10**i)*R(j - i) mod n\n# Since (10, n) = 1, 10 is invertible, hence\n# (10**i)*R(j - i) mod n == 0 <==> R(j - i) mod n\n# But j - i <= n + 1 - i <= n since i >= 1, so\n# We are guaranteed there exists some k <= n with\n# R(k) <= n\n\nfrom fractions import gcd\n\nfrom python.decorators import euler_timer\nfrom python.functions import order_mod_n\n\n\ndef main(verbose=False):\n max_a = 0\n n = 10 ** 6\n while max_a <= 10 ** 6:\n n += 1\n if gcd(10, n) == 1:\n basis = n\n if n % 3 == 0:\n basis = 9 * n\n curr_order = order_mod_n(10, basis)\n if curr_order > max_a:\n max_a = curr_order\n return n\n\nif __name__ == '__main__':\n print euler_timer(129)(main)(verbose=True)\n", "id": "12344564", "language": "Python", "matching_score": 1.1667494773864746, "max_stars_count": 7, "path": "python/complete/no129.py" }, { "content": "#!/usr/bin/env python\n\n# Triangular: T_n = n(n+1)/2\n# Pentagonal: P_n = n(3n-1)/2\n# Hexagonal: H_n = n(2n-1)\n\n# T_285 = P_165 = H_143\n# Find the next such Triangular number.\n\n# T and H: a^2 + a = 4c^2 - 2c\n# 4a^2 + 4a + 1 = 16c^2 - 8c + 1\n# (2a + 1)^2 = (4c - 1)^2\n# 2a + 1 = 4c - 1\n# a = 2c - 1\n\n# T and P: a^2 + a = 3b^2 - b\n# 4a^2 + 4a + 1 = 12b^2 - 4b + 1\n# 3(2a + 1)^2 = 36b^2 - 12b + 1 + 2 = (6b - 1)^2 + 2\n# Solve 3x^2 - y^2 = 2\n# Required: x = 2a + 1 = 4c - 1, y = 6b - 1 (modular residues)\n\n# Base solution (x,y) = (1,1), if we view the river of the form\n# f((x,y)) = 3x^2 - y^2 with u_0 = (1,0) and v_0 = (0,1) we have\n# f(u_0) = 3, f(v_0) = -1 we have the general transformation along the\n# river u_{n+1} = 2u_n + 3v_n, v_{n+1} = u_n + 2v_n\n# which gives u_{n+2} - 2u_{n+1} - 2(u_{n+1} - 2u_n)\n# = 3v_{n+1} - 2(3v_n) = 3u_n which implies\n# u_{n+2} - 4u_{n+1} + u_n = 0 (and similarly for v_n)\n# We know also that since u + v gives the solutions to 3x^2 - y^2 = 2\n# that the solutions we want also follow this recurrence.\n# Let s_0 = (1,1) and s_1 = (3,5)\n# We seek solutions s_n such that s_n(x) = 3 mod 4 and s_n(y) = 5 mod 6.\n\n# s_n(x) Modulo 4:\n# f{n+2} = 4f{n+1} - f{n} = -f{n} mod 4\n# 1, 3, 3, 1, 1, 3, 3, 1, 1, 3, 3, ...\n# indices == 1 and 2 mod 4 give s_n(x) == 3\n\n# s_n(y) Modulo 6:\n# f{n+2} = 4f{n+1} - f{n}\n# (1, 5,) (1, 5,) (1, 5,) (1, 5,) (1, 5,) ...\n# indices == 1 mod 2\n\n# Combining we must have n == 1 mod 4\n\n# The solution we were given is a = 285, x = 2a + 1 = 571 which gives y = 989\n# and is at index 5\n\n# The next solution happens when n = 9\n# 2a + 1 = x_9\n# 8T_a + 1 = 4a^2 + 4a + 1 = x_9^2\n# T_a = (x_9^2 - 1)/8\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n a, b = 1, 3\n for i in range(8):\n a, b = b, 4 * b - a\n # Now b = x_9\n return (b ** 2 - 1) / 8\n\nif __name__ == '__main__':\n print euler_timer(45)(main)(verbose=True)\n", "id": "12170568", "language": "Python", "matching_score": 1.0650290250778198, "max_stars_count": 7, "path": "python/complete/no045.py" }, { "content": "#!/usr/bin/env python\n\n# Let p_n be the nth prime: 2, 3, 5, 7, 11, ..., and let r be the\n# remainder when (p_n - 1)**n + (p_n + 1)**n is divided by p_n**2.\n\n# For example, when n = 3, p_3 = 5, and 4**3 + 6**3 = 280 == 5 mod 25.\n\n# The least value of n for which the remainder first exceeds 10**9 is 7037.\n\n# Find the least value of n for which the remainder first exceeds 10**10.\n\n# ALGORITHM\n# As in 120,\n# (a+1)**n + (a-1)**n == na + 1 + (-1)**n(1 - na) mod a**2\n# If n even, (a+1)**n + (a-1)**n == 2 mod a**2\n# If n odd, (a+1)**n + (a-1)**n == 2an mod a**2\n\n# Clearly n even won't contribute to a positive result\n# 2*p_n*n < (p_n)**2 <==> 2*n < p_n which occurs for all n > 4\n# since p_5 = 11 > 2*5 and p must increase by at least 2\n# while n only increases by 1\n\n# In these cases, the remainder is simply 2*p_n*n\n# To get 2*n*p_n > 10**10, we need p_n**2 > n*p_n > 5*(10**9)\n# or p_n > 70710\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n # Since p_n > 70710, to be safe let's multiply by 10\n PRIMES = sieve(707100)\n # The odd primes are the even indices here\n prime_index = 1\n product = 2 * (1 * 2) # p_1 = 2\n while product < 10 ** 10:\n prime_index += 2\n prime = PRIMES[prime_index - 1]\n product = 2 * (prime_index * prime)\n return prime_index\n\nif __name__ == '__main__':\n print euler_timer(123)(main)(verbose=True)\n", "id": "1930917", "language": "Python", "matching_score": 1.9057202339172363, "max_stars_count": 7, "path": "python/complete/no123.py" }, { "content": "#!/usr/bin/env python\n\n# It is possible to write ten as the sum of primes in\n# exactly five different ways:\n\n# 7 + 3\n# 5 + 5\n# 5 + 3 + 2\n# 3 + 3 + 2 + 2\n# 2 + 2 + 2 + 2 + 2\n\n# What is the first value which can be written as the sum of\n# primes in over five thousand different ways?\n\n#################################\n# Let pp(k, n) represent the number of partitions of n\n# using only primes at least as large as p_k (the kth prime)\n\n# Since the primes are either all greater than p_k or one\n# of them is equal we have\n# pp(k, n) = pp(k+1, n) + pp(k, n - p_k)\n# p_0 = 2, we want to compute pp(0, n) for all n and continue\n# doing so until pp(0, n) > 5000\n\n# Boundary\n# p(k, n) = 0 if p_k > n\n# p(k, n) = 1 if p_k = n\n# pp(k, n) = pp(k+1, n) + pp(k, n - p_k)\n\n# -------------- EXAMPLE TABLE --------------\n# n\\k | 0 | 1 | 2 |\n# -----------------\n# 1 | 0 | 0 | 0 |\n# -----------------\n# 2 | 1 | 0 | 0 |\n# -----------------\n# 3 | 1 | 1 | 0 |\n# -----------------\n# 4 | 1 | 0 | 0 |\n# -----------------\n# 5 | 1 | 1 | 1 |\n# -----------------\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef prime_partitions(n, primes):\n p = {}\n for k in range(1, n + 1):\n p[(k, k)] = 1\n for i in range(k - 1, 0, -1):\n if i > k - i:\n p[(i, k)] = p[(i + 1, k)]\n else:\n p[(i, k)] = p[(i + 1, k)] + p[(i, k - i)]\n return p[(1, n)]\n\n\ndef main(verbose=False):\n max_prime_val = 10 ** 2\n PRIMES = sieve(max_prime_val)\n\n pp = {(0, 1): 0,\n (1, 1): 0,\n (2, 1): 0,\n (0, 2): 1,\n (1, 2): 0,\n (2, 2): 0}\n curr_val = -1\n n = 2\n curr_prime_index = 1\n while curr_val < 5000:\n n += 1\n if n > PRIMES[curr_prime_index]:\n curr_prime_index += 1\n if curr_prime_index >= len(PRIMES):\n raise ValueError(\"Primes is too small: %s\" % curr_val)\n\n prime_index = curr_prime_index\n # First reduce the prime_index, \"k\" until the prime itself\n # does not exceed n\n while PRIMES[prime_index] > n:\n pp[(prime_index, n)] = 0\n prime_index -= 1\n # If n is a prime number, then after doing so, we know that\n # PRIMES[prime_index] == n, hence pp(p_i, n) = 1 and we\n # can reduce the index further\n if PRIMES[prime_index] == n:\n pp[(prime_index, n)] = 1\n prime_index -= 1\n\n for index in range(prime_index, -1, -1):\n prime_val = PRIMES[index]\n if prime_val > n - prime_val:\n pp[(index, n)] = pp[(index + 1, n)]\n else:\n pp[(index, n)] = (pp[(index + 1, n)] +\n pp[(index, n - prime_val)])\n\n curr_val = pp[(0, n)]\n return n\n\nif __name__ == '__main__':\n print euler_timer(77)(main)(verbose=True)\n", "id": "8144091", "language": "Python", "matching_score": 2.588428497314453, "max_stars_count": 7, "path": "python/complete/no077.py" }, { "content": "#!/usr/bin/env python\n\n# It is possible to write five as a sum in exactly six different ways:\n\n# 4 + 1\n# 3 + 2\n# 3 + 1 + 1\n# 2 + 2 + 1\n# 2 + 1 + 1 + 1\n# 1 + 1 + 1 + 1 + 1\n\n# How many different ways can one hundred be written as a sum of at least\n# two positive integers?\n\n#################################\n# p(k, n) - represents the number of partitions of n using only natural\n# numbers at least as large as k\n\n# p(n) = 1 + sum_{k = 1}^{floor(n/2)} p(k, n - k)\n\n# p(k, n) = 0 if k > n\n# p(k, n) = 1 if k = n\n# p(k, n) = p(k + 1, n) + p(k, n - k) otherwise\n\n# Our final is p(1, n)\n\nfrom python.decorators import euler_timer\n\n\ndef partitions(n):\n from math import sqrt\n p = {}\n for k in range(1, n + 1):\n p[(k, k)] = 1\n for i in range(k - 1, 0, -1):\n if i > k - i:\n p[(i, k)] = p[(i + 1, k)]\n else:\n p[(i, k)] = p[(i + 1, k)] + p[(i, k - i)]\n return p[(1, n)]\n\n\ndef main(verbose=False):\n return partitions(100) - 1\n\nif __name__ == '__main__':\n print euler_timer(76)(main)(verbose=True)\n", "id": "1366131", "language": "Python", "matching_score": 1.8145649433135986, "max_stars_count": 7, "path": "python/complete/no076.py" }, { "content": "#!/usr/bin/env python\n\n# neighbors\n# a/b < c/d\n# need bc - ad = 1\n\n# The converse is also true. If\n# bc - ad = 1\n# for positive integers a,b,c and d with a < b and c < d then a/b and c/d\n# will be neighbours in the Farey sequence of order max(b,d).\n\n# How many elements would be contained in the set of reduced proper\n# fractions for D <= 1,000,000?\n\n#########################################################\n# |F_n| = |F_(n-1)| + PHI(n)\n# |F_0| = 1\n# |F_n| = 1 + sum_{i in 1 to n} PHI(i)\n# sum_{i in 1 to n} PHI(i)\n# sum_{d|n} PHI(d) = n\n\n# Algorithm 1:\n# Kill off all factors of n\n# Find max, kill off all factors\n# Repeat\n\n# MU(n) = 0 if n not square-free\n# else 1 if n is has an even number of prime factors\n# else -1 if n is has an odd number of prime factors\n# 2*sum_{i in 1 to n} PHI(i) = 1 + sum_{i in 1 to n} MU(i) floor(n/i)**2\n\n# Algorithm 2:\n# 1. Generate all n <= 10**6 with 1 factors by listing possible subsets of\n# primes\n# 2. Use formula\n\nfrom math import floor\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\nfrom python.functions import mu\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n D = 10 ** 6\n PRIMES = sieve(int(sqrt(D)) + 1)\n # We seek |F_D| = 1 + sum_{i in 1 to D} PHI(i)\n # 2*sum_{i in 1 to D} PHI(i) = 1 + sum_{i in 1 to D} MU(i) floor(D/i)**2\n # 2*|F_D| = 3 + sum_{i in 1 to D} MU(i) floor(D/i)**2\n mu_hash = {1: 1}\n running_sum = D ** 2 # i = 1\n for i in range(2, D + 1):\n running_sum += mu(i, mu_hash, PRIMES) * (int(floor(D * 1.0 / i)) ** 2)\n\n # They don't include 0/1 or 1/1 so we subtract 2\n return ((3 + running_sum) / 2 - 2)\n\nif __name__ == '__main__':\n print euler_timer(72)(main)(verbose=True)\n", "id": "3650737", "language": "Python", "matching_score": 3.759634256362915, "max_stars_count": 7, "path": "python/complete/no072.py" }, { "content": "#!/usr/bin/env python\n\n# neighbors\n# a/b < c/d\n# need bc - ad = 1\n\n# The converse is also true. If\n# bc - ad = 1\n# for positive integers a,b,c and d with a < b and c < d then a/b and c/d\n# will be neighbours in the Farey sequence of order max(b,d).\n\n# By listing the set of reduced proper fractions for D <= 1,000,000 in\n# ascending order of size, find the numerator of the fraction immediately\n# to the left of 3/7.\n\n#########################################################\n# c = 3, d = 7, 3b - 7a = 1\n# 0 + 2a == 1 mod 3, a == 2 mod 3\n# a = 3k + 2, b = 7k + 5\n# a < b <==> 3k + 2 < 7k + 5, -3 < 4k, -0.75 < k, k >= 0\n# a/b < 3/7 <==> 7a < 3b <==> 0 < 3b - 7a <==> ALWAYS\n# gcd(a,b) = (3k+2,7k+5) = (3k+2,k+1) = (k,k+1) = 1\n\n# b <= D\n# 7k + 5 <= D\n# k <= floor((D-5)/7)\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n D = 10 ** 6\n return 3 * int((D - 5) / 7.0) + 2\n\nif __name__ == '__main__':\n print euler_timer(71)(main)(verbose=True)\n", "id": "3835626", "language": "Python", "matching_score": 0.6316930055618286, "max_stars_count": 7, "path": "python/complete/no071.py" }, { "content": "#!/usr/bin/env python\n\n# M(N) = max_k (N/k)^k\n# This is a discrete function, but its derivative has value\n# f(k) = (N/k)^k, ln(f) = k ln(N) - k ln(k)\n# f'/f = (ln(f))' = ln(N) - (ln(k) + k(1/k)) = ln(N/k) - 1\n# f' > 0 iff ln(N/k) > 1 iff N/e > k\n# So f increases towards N/e and similar decreases away from N/e\n# Hence the max occurs either at the ceiling or floor of N/e\n\n# Let k_star(N) be the function such that\n# M(N) = P_max = f(k_star(N))\n\n# Since the numerator of P_max is k_star**k_star, M(N) is only\n# terminating if k_star is completely composed of 2's and 5's\n# With this in mind, we defind k_reduced(N) to be the quotient\n# that remains after all 2's and 5's are divided from k_star(N)\n# Finally, D(N) = -N iff M(N) is terminating iff k_reduced(N) = 1\n\nfrom fractions import gcd\nfrom math import ceil\nfrom math import e as EULER_e\nfrom math import floor\n\nfrom python.decorators import euler_timer\nfrom python.functions import robust_divide\n\n\ndef k_star(N):\n k_1 = int(floor(N / EULER_e))\n k_2 = int(ceil(N / EULER_e))\n if k_1 == k_2:\n return k_1\n elif k_2 - k_1 != 1:\n raise ValueError(\"Bad info with %s\" % N)\n return k_1 if (k_1 + 1) ** (k_1 + 1) > N * (k_1 ** k_1) else k_2\n\n\ndef k_reduced(N):\n k_st = k_star(N)\n k_induced = robust_divide(robust_divide(k_st, 5), 2)\n shared_factors = gcd(k_induced, N)\n return k_induced / shared_factors\n\n\ndef D(N):\n sign = 1 if k_reduced(N) > 1 else -1\n return sign * N\n\n\ndef main(verbose=False):\n MAX_N = 10 ** 4\n return sum(D(N) for N in range(5, MAX_N + 1))\n\nif __name__ == '__main__':\n print euler_timer(183)(main)(verbose=True)\n", "id": "8202538", "language": "Python", "matching_score": 1.8573535680770874, "max_stars_count": 7, "path": "python/complete/no183.py" }, { "content": "#!/usr/bin/env python\n\n# Consider the fraction, n/d, where n and d are positive integers.\n# If n < d and HCF(n,d)=1, it is called a reduced proper fraction.\n\n# If we list the set of reduced proper fractions for d <= 8 in\n# ascending order of size, we get:\n# 1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2,\n# 4/7, 3/5, 5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8\n\n# It can be seen that there are 3 fractions between 1/3 and 1/2.\n\n# How many fractions lie between 1/3 and 1/2 in the sorted set of\n# reduced proper fractions for d <= 12,000?\n\n# ALGO\n# We simply seek 1/3 <= n/d <= 1/2 with (n, d) = 1\n# This is equivalent to d/3 <= n <= d/2 or\n# ceil(d/3) <= n <= floor(d/2)\n# Note we'll never have conflicts since for d > 3,\n# (n, d) = 1 implies n/d = 1/2 or n/d = 1/3 is\n# impossible\n\nfrom fractions import gcd\nfrom math import ceil\nfrom math import floor\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n MAX_d = 12000\n\n count = 0\n for d in range(4, MAX_d + 1):\n low = int(ceil(d / 3.0))\n high = int(floor(d / 2.0))\n for n in range(low, high + 1):\n if gcd(d, n) == 1:\n count += 1\n return count\n\nif __name__ == '__main__':\n print euler_timer(73)(main)(verbose=True)\n", "id": "4315722", "language": "Python", "matching_score": 0.4015204608440399, "max_stars_count": 7, "path": "python/complete/no073.py" }, { "content": "# Define a juncture J to be the four cells straddling a river\n# We write J = (B, P, N, F) where B is the cell back,\n# P is the cell \"above\" the middle and \"N\" the cell below\n# (we often think of them as the positive/negative cells since\n# the serve that purpose along the river)\n# and F is the forward cell\n\n# A cell has ( (x, y), val ) where (x,y) is the point on the\n# lattice and val is the value it takes on the quadratic form\n\n# RIVER:\n# While traversing a river, if the value of F is positive,\n# we turn right, if it is negative we turn left\n# To turn left is: (B, P, N, F) -> (N, P, F, P + F)\n# To turn right is: (B, P, N, F) -> (P, F, N, N + F)\n# It can be thought of like this: the forward value must lie on\n# the river (if a river exists and we are already on it), hence\n# it must become the next positive cell in the juncture or the\n# next negative cell\n\n# POSITIVE ROOT:\n# Define a positive root to be a point on the river where two\n# positive valued cells meet, from which going forward all\n# values must increase (since on the river, the value behing\n# the two positives must be negative)\n\nimport operator\nfrom math import sqrt\n\nfrom python.functions import factors\nfrom python.functions import is_power\n\n\ndef plus(cell1, cell2, back_val):\n \"\"\"Moves forward between two cells.\n\n Requires a third cell, though truly only requires the value of that the\n form takes at the cell.\n \"\"\"\n (x1, y1), val1 = cell1\n (x2, y2), val2 = cell2\n # back_val, val1 + val2, val form an arithmetic progression\n val = 2 * (val1 + val2) - back_val\n x = x1 + x2\n y = y1 + y2\n return ((x, y), val)\n\n\ndef next_juncture_on_river(juncture):\n \"\"\"Moves along the river to the next juncture\n\n Turns \"left\" if the forward value if negative\n and \"right\" if it is positive\n \"\"\"\n B, P, N, F = juncture\n forward_val = F[1]\n if forward_val < 0:\n # turn left\n NEXT = plus(P, F, N[1])\n return (N, P, F, NEXT)\n elif forward_val > 0:\n # turn right\n NEXT = plus(N, F, P[1])\n return (P, F, N, NEXT)\n else:\n raise Exception(\"No infinite river here, found a lake.\")\n\n\ndef juncture_isom(juncture1, juncture2):\n \"\"\"Takes a juncture and checks if the cell values are all equal\"\"\"\n B1, P1, N1, F1 = juncture1\n B2, P2, N2, F2 = juncture2\n return ((B1[1] == B2[1]) and (P1[1] == P2[1]) and\n (N1[1] == N2[1]) and (F1[1] == F2[1]))\n\n\ndef seek_up_to_val(juncture, max_value):\n \"\"\"Returns all cells sprouting forth from a positive root.\n\n Stops at a cell value of max_value\n\n Takes advantage of fact that all values must increase away\n from the river (on positive side)\n \"\"\"\n B, P, N, F = juncture\n if F[1] > max_value:\n return []\n result = [F]\n\n turn_left = plus(P, F, N[1])\n J_left = (N, F, P, turn_left)\n result.extend(seek_up_to_val(J_left, max_value))\n\n turn_right = plus(N, F, P[1])\n J_right = (P, F, N, turn_right)\n result.extend(seek_up_to_val(J_right, max_value))\n return result\n\n\ndef all_positive_roots(form):\n \"\"\"Takes a quadratic form and gives all \"positive roots\" along river.\n\n Form is the coefficients on x and y\n e.g. if [a,b] = form, f(x,y) = ax**2 + by**2\n \"\"\"\n a, b = form\n B = ((1, -1), a + b)\n P = ((1, 0), a)\n N = ((0, 1), b)\n F = ((1, 1), a + b)\n J_init = (B, P, N, F)\n\n new_positives = []\n J_curr = next_juncture_on_river(J_init)\n # traverse the river until back to the beginning\n while not juncture_isom(J_init, J_curr):\n # we add a new positive if the forward\n # value is positive\n if J_curr[-1][1] > 0:\n new_positives.append(J_curr)\n J_curr = next_juncture_on_river(J_curr)\n\n # For each (B, P, N, F) in new_positives, we want to\n # transform to a root for positive values, which will\n # be (N, P, F, new_cell)\n result = []\n for new_positive in new_positives:\n B, P, N, F = new_positive\n new_cell = plus(P, F, N[1])\n result.append((N, P, F, new_cell))\n return result\n\n\ndef all_values_on_form(form, value):\n \"\"\"Returns all lattice points that produce a quadratic form.\n\n NOTE: The lattice returned will not necessarily be coprime.\n\n Given the recurrence for the form, these values\n can serve to determine *all* solutions for\n the given value due to the repeating nature\n of the infinite river\n \"\"\"\n factor_list = factors(value)\n valid_factors = [factor for factor in factor_list\n if is_power(value / factor, 2)]\n\n roots = all_positive_roots(form)\n found = set()\n for root in roots:\n candidates = seek_up_to_val(root, value)\n to_add = ([candidate for candidate in candidates\n if candidate[1] in valid_factors] +\n [candidate for candidate in root\n if candidate[1] in valid_factors])\n found.update(to_add)\n found = list(found)\n\n # We may get some duplicates from since when we include\n # values from the river, we don't check that they come from\n # a different iteration of the river\n x_mult, y_mult, _ = get_recurrence(form)\n checked = found[:]\n for candidate in found:\n coords, val = candidate\n next_x = sum(operator.mul(*pair) for pair in zip(coords, x_mult))\n next_y = sum(operator.mul(*pair) for pair in zip(coords, y_mult))\n if ((next_x, next_y), val) in found:\n checked.remove(((next_x, next_y), val))\n\n # Finally we must scale up factors to account for\n # the reduction by a square multiple\n result = []\n for cell in checked:\n (x, y), val = cell\n if val < value:\n ratio = int(sqrt(value / val))\n x *= ratio\n y *= ratio\n result.append((x, y))\n\n return result\n\n\ndef get_recurrence(form):\n \"\"\"Gets a recurrence along the river for the base, given a quadratic form.\n\n Input: quadratic form [a,b]\n\n Output: (x_mult, y_mult, relation) where y_mult is the coefficients\n of x and y (respectively) that lead to the recurrence on the\n lattice to the next x value (from a previous (x,y) tuple),\n y_mult is the analogue for the y value and relation is\n the recurrence relation on each coordinate (degree 2 recurrence)\n \"\"\"\n a, b = form\n B = ((1, -1), a + b)\n P = ((1, 0), a)\n N = ((0, 1), b)\n F = ((1, 1), a + b)\n J_init = (B, P, N, F)\n J_curr = next_juncture_on_river(J_init)\n # traverse the river until back to the beginning\n while not juncture_isom(J_init, J_curr):\n J_curr = next_juncture_on_river(J_curr)\n\n # Here (1,0) --> P = (a,b)\n # and (0,1) --> N = (c,d)\n # (x,y)-->x(a,b)+y(c,d)\n # x-->ax+cy\n # y-->bx+dy\n # x_{n+2}-ax_{n+1} - d(x_{n+1}-ax_n)\n # = cy_{n+1} - d(cy_n) = c(bx_n)\n # x_{n+2} = (cb - ad)x_n + (a+d)x_{n+1}\n # Similarly for y (can check if you want)\n a, b = J_curr[1][0]\n c, d = J_curr[2][0]\n return ((a, c), (b, d), (c * b - a * d, a + d))\n\n\ndef start_to_series(initial, multiplier, series='x'):\n \"\"\"Starts a series of values for points on a river.\n\n Input: initial is an initial lattice point; multiplier\n is either x_mult (or y_mult) which will transform a point\n to the next x (or y) value on the river; series is\n either x or y to determine which point is used\n\n Output: the first two values in the series which will determine\n all x (or y values) given the recurrence for the form\n \"\"\"\n next = sum(operator.mul(*pair) for pair in zip(initial, multiplier))\n if series == 'x':\n return [initial[0], next]\n elif series == 'y':\n return [initial[1], next]\n else:\n raise ValueError(\"Unrecoginized series type %s\" % series)\n", "id": "3101628", "language": "Python", "matching_score": 3.029526948928833, "max_stars_count": 7, "path": "python/conway_topograph.py" }, { "content": "#!/usr/bin/env python\n\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\nfrom python.conway_topograph import next_juncture_on_river\nfrom python.conway_topograph import juncture_isom\n\n\ndef minimal_solution(D):\n B = ((1, -1), 1 - D)\n P = ((1, 0), 1)\n N = ((0, 1), -D)\n F = ((1, 1), 1 - D)\n J_init = (B, P, N, F)\n J_curr = next_juncture_on_river(J_init)\n while not juncture_isom(J_init, J_curr):\n J_curr = next_juncture_on_river(J_curr)\n # since J = (B, P, N, F) the positive will\n # be at J[1]. Since P = ((x, y), val) and\n # We seek only (x,y), we want J[1][0]\n return J_curr[1][0]\n\n\ndef main(verbose=False):\n max_n = 1000\n non_squares = [n for n in range(1, max_n + 1) if n != int(sqrt(n)) ** 2]\n D_x_pair_min_solns = [(D, ) + minimal_solution(D) for D in non_squares]\n D_x_pair_min_solns.sort(key=lambda pair: pair[1])\n D, x, y = D_x_pair_min_solns[-1]\n if verbose:\n return \"%s.\\n%s^2 - %s*%s^2 = 1\" % (D, x, D, y)\n else:\n return D\n\nif __name__ == '__main__':\n print euler_timer(66)(main)(verbose=True)\n", "id": "9959165", "language": "Python", "matching_score": 0.7000411152839661, "max_stars_count": 7, "path": "python/complete/no066.py" }, { "content": "#!/usr/bin/env python\n\n# It is easily proved that no equilateral triangle exists with integral\n# length sides and integral area. However, the almost equilateral\n# triangle 5-5-6 has an area of 12 square units.\n\n# We shall define an almost equilateral triangle to be a triangle for\n# which two sides are equal and the third differs by no more than one unit.\n\n# Find the sum of the perimeters of all almost equilateral triangles\n# with integral side lengths and area and whose perimeters do not exceed\n# one billion (1,000,000,000).\n\n# We either have n-n-(n+1) or n-n-(n-1) as our triangle\n# dropping a height h, we see n**2 = h**2 + ((n + sign)/2)**2\n# ==> 3n**2 - 2*sign*n - 1= 4h**2\n# ==> (3*n - sign)**2 - 4 = 9n**2 - 6*sign*n - 3 = 12h**2\n# ==> (3*n - sign)**2 - 3(2h)**2 = 4\n\nfrom python.conway_topograph import all_values_on_form\nfrom python.conway_topograph import get_recurrence\nfrom python.conway_topograph import start_to_series\nfrom python.decorators import euler_timer\nfrom python.functions import recurrence_next\n\n\ndef solutions(limit):\n # We seek x_k^2 - 3y_k^2 = 4\n # Where 3*n - sign = x_k\n x_mult, y_mult, relation = get_recurrence([1, -3])\n starting_points = all_values_on_form([1, -3], 4)\n series = [start_to_series(initial, x_mult, 'x')\n for initial in starting_points]\n result = [pair[0] for pair in series\n if pair[0] % 3 != 0 and pair[0] > 0]\n while max(result) < 2 * limit:\n next = [pair[1] for pair in series\n if pair[1] % 3 != 0 and pair[1] > 0]\n result.extend(next)\n series = [recurrence_next(relation, values) for values in series]\n # We seek perimeters n + n + (n + sign) = 3n + sign\n # We currently have 3n - sign, so if we can determine the sign\n # If value == 1 mod 3, then 3n - sign == 1, hence sign = -1\n # and 3n + sign = 3n - 1 = value - 2\n # If value == -1 mod 3, then 3n - sign == -1, hence sign = 1\n # and 3n + sign = 3n + 1 = value + 2\n result = sorted(((value + 2) if value % 3 == 2 else (value - 2))\n for value in result)\n # The first two solutions are 1-1-0 and 1-1-2, which are both degenerate\n return [perimeter for perimeter in result\n if perimeter < limit and perimeter not in (2, 4)]\n\n\ndef main(verbose=False):\n # the first solutions up to a billion are returned in solutions(10**9)\n return sum(solutions(10 ** 9))\n\nif __name__ == '__main__':\n print euler_timer(94)(main)(verbose=True)\n", "id": "3408209", "language": "Python", "matching_score": 4.8545942306518555, "max_stars_count": 7, "path": "python/complete/no094.py" }, { "content": "#!/usr/bin/env python\n\n# Consider isosceles (b, L, L) with height h to b\n# b = 16, L = 17 gives h = 15\n# b = 272, L = 305 gives h = 273\n\n# Find SUM(L) for the twelve smallest isosceles triangles for which\n# h = b +/- 1 and b, L are positive integers.\n\n#############################\n# This is a question of quadratic forms\n# L**2 = h**2 + (b/2)**2\n# If b is odd, then we get a pythagorean triple\n# (2L, 2h, b) which reduces to primitive (X, Y, b')\n# Since it can only reduce by factors of b, we know\n# 1) b' is odd and\n# 2) no even factors were removed from 2L or 2h,\n# forcing X and Y to be even\n# This is a contradiction since no primitive pythagorean\n# triple has odd hypotenuse (can check)\n\n# So set\n# b = 2*B, h = 2*B + sign = 2*B + s\n# B**2 + (2*B + s)**2 = L**2\n# B**2 + (4*B**2 + 4*B*s + 1) = L**2\n# 25*B**2 + s*20*B + 4 + 1 = 5*L**2\n# 5*(L**2) - (5*B + 2*s)**2 = 1\n\nfrom python.conway_topograph import all_values_on_form\nfrom python.conway_topograph import get_recurrence\nfrom python.conway_topograph import start_to_series\nfrom python.decorators import euler_timer\nfrom python.functions import recurrence_next\n\n\ndef solutions(limit):\n # We seek 5x_k^2 - y_k^2 = 1\n # Where L = x_k\n x_mult, y_mult, relation = get_recurrence([5, -1])\n starting_points = all_values_on_form([5, -1], 1)\n series = [start_to_series(initial, x_mult, 'x')\n for initial in starting_points]\n result = [pair[0] for pair in series if pair[0] > 1]\n while len(result) < 2 * limit:\n next = [pair[1] for pair in series if pair[1] > 1]\n result.extend(next)\n series = [recurrence_next(relation, values) for values in series]\n return sorted(result)[:limit]\n\n\ndef main(verbose=False):\n # smallest 12 solutions returned in solutions(12)\n return sum(solutions(12))\n\nif __name__ == '__main__':\n print euler_timer(138)(main)(verbose=True)\n", "id": "1905873", "language": "Python", "matching_score": 2.1897122859954834, "max_stars_count": 7, "path": "python/complete/no138.py" }, { "content": "#!/usr/bin/env python\n\n# A_F(x) = xF_1 + x^2F_2 + ...\n# (x + x^2)A_F(x) = x^2F_1 + x^3F_2 + ... + x^3F_1 + x^4F_2 + ...\n# using F_k = F_{k-1} + F_{k-2}, F_1=1, F_2=1\n# (x + x^2)A_F(x) = x^2F_1 + x^3F_3 + x^4F_4 + ..\n# (x + x^2)A_F(x) = x^2F_1 - xF_1 - x^2F_2 + A_F(x)\n# A_F(x) = (-x)/(x^2 + x - 1)\n\n# A_F(x) = n ==> nx^2 + (n+1)x - n = 0\n# For x to be rational, we need to discriminant sqrt(b^2 - 4ac)\n# to be rational\n# D = (n + 1)^2 - 4n(-n) = 5n^2 + 2n + 1 = m^2\n# 25n^2 + 10n + 5 = 5m^2\n# (5n + 1)^2 + 4 = 5m^2\n# 5m^2 - (5n + 1)^2 = 4\n\n# a, b, c, d = 0, 2, 104, 714\n# n_prim4 = [0, 2, 104, 714]\n\nfrom python.conway_topograph import all_values_on_form\nfrom python.conway_topograph import get_recurrence\nfrom python.conway_topograph import start_to_series\nfrom python.decorators import euler_timer\nfrom python.functions import recurrence_next\n\n\ndef golden_nuggets(limit):\n # We seek 5x_k^2 - y_k^2 = 4\n # Where 5n + 1 = y_k\n x_mult, y_mult, relation = get_recurrence([5, -1])\n starting_points = all_values_on_form([5, -1], 4)\n series = [start_to_series(initial, y_mult, 'y')\n for initial in starting_points]\n nuggets = [pair[0] for pair in series\n if pair[0] % 5 == 1 and pair[0] > 1]\n while len(nuggets) < 2 * limit:\n next = [pair[1] for pair in series\n if pair[1] % 5 == 1 and pair[1] > 1]\n nuggets.extend(next)\n series = [recurrence_next(relation, values) for values in series]\n return sorted([(value - 1) / 5 for value in nuggets])[:limit]\n\n\ndef main(verbose=False):\n nuggets = golden_nuggets(15)\n if verbose:\n return ('%s.\\nAs a check, the 10th golden nugget is calculated '\n 'to be %s, as stated.' % (nuggets[-1], nuggets[10 - 1]))\n else:\n return nuggets[-1]\n\nif __name__ == '__main__':\n print euler_timer(137)(main)(verbose=True)\n", "id": "927948", "language": "Python", "matching_score": 3.5982401371002197, "max_stars_count": 7, "path": "python/complete/no137.py" }, { "content": "#!/usr/bin/env python\n\n# A_G(x) = xG_1 + x^2G_2 + ...\n# (x + x^2)A_G(x) = x^2G_1 + x^3G_2 + ... + x^3G_1 + x^4G_2 + ...\n# using G_k = G_{k-1} + G_{k-2}, G_1=1, G_2=4\n# (x + x^2)A_G(x) = x^2G_1 + x^3G_3 + x^4G_4 + ..\n# (x + x^2)A_G(x) = x^2G_1 - xG_1 - x^2G_2 + A_G(x)\n# A_G(x) = (-x - 3x^2)/(x^2 + x - 1)\n\n# A_G(x) = n ==> (n+3)x^2 + (n+1)x - n = 0\n# For x to be rational, we need to discriminant sqrt(b^2 - 4ac)\n# to be rational\n# D = (n + 1)^2 - 4(n + 3)(-n) = 5n^2 + 14n + 1 = m^2\n# 25n^2 + 70n + 49 = 5m^2 + 44\n# (5n + 7)^2 - 5m^2 = 44\n\nfrom python.conway_topograph import all_values_on_form\nfrom python.conway_topograph import get_recurrence\nfrom python.conway_topograph import start_to_series\nfrom python.decorators import euler_timer\nfrom python.functions import recurrence_next\n\n\ndef golden_nuggets(limit):\n # We seek x_k^2 - 5y_k^2 = 44\n # Where 5n + 7 = x_k\n x_mult, y_mult, relation = get_recurrence([1, -5])\n starting_points = all_values_on_form([1, -5], 44)\n series = [start_to_series(initial, x_mult, 'x')\n for initial in starting_points]\n nuggets = [pair[0] for pair in series\n if pair[0] % 5 == 2 and pair[0] > 7]\n while len(nuggets) < 2 * limit:\n next = [pair[1] for pair in series\n if pair[1] % 5 == 2 and pair[1] > 7]\n nuggets.extend(next)\n series = [recurrence_next(relation, values) for values in series]\n return sorted([(value - 7) / 5 for value in nuggets])[:limit]\n\n\ndef main(verbose=False):\n nuggets = golden_nuggets(30)\n if verbose:\n return ('%s.\\nAs a check, the 20th golden nugget is calculated '\n 'to be %s, as stated.' % (sum(nuggets), nuggets[20 - 1]))\n else:\n return sum(nuggets)\n\nif __name__ == '__main__':\n print euler_timer(140)(main)(verbose=True)\n", "id": "8089424", "language": "Python", "matching_score": 2.641993761062622, "max_stars_count": 7, "path": "python/complete/no140.py" }, { "content": "#!/usr/bin/env python\n\n# If a box contains twenty-one coloured discs, composed of fifteen blue\n# discs and six red discs, and two discs were taken at random, it can be\n# seen that the probability of taking two blue discs,\n# P(BB) = (15/21)(14/20) = 1/2.\n\n# The next such arrangement, for which there is exactly 50% chance of taking\n# two blue discs at random, is a box containing eighty-five blue discs and\n# thirty-five red discs.\n\n# By finding the first arrangement to contain over 10**12 = 1,000,000,000,000\n# discs in total, determine the number of blue discs that the box\n# would contain.\n\n# (b(b-1))/(T(T-1)) = (1/2) <==> 2(4b**2 - 4b) = 4T**2 - 4T\n# <==> 2(2*b - 1)**2 - (2*T - 1)**2 = 1\n# One can verify 2*x**2 - y**2 = 1 implies that x and y must be odd\n# so all solutions of this are desired by us\n\nfrom math import sqrt\n\nfrom python.conway_topograph import all_values_on_form\nfrom python.conway_topograph import get_recurrence\nfrom python.conway_topograph import start_to_series\nfrom python.decorators import euler_timer\nfrom python.functions import recurrence_next\n\n\ndef main(verbose=False):\n # y = 2T - 1, T > 10**12 implies the following:\n LOWER_LIMIT = 2 * (10 ** 12) - 1\n\n # We seek 2x^2 - y^2 = 1\n x_mult, y_mult, relation = get_recurrence([2, -1])\n starting_points = all_values_on_form([2, -1], 1)\n series = [start_to_series(initial, y_mult, 'y')\n for initial in starting_points]\n result = [pair[0] for pair in series]\n while max(result) <= LOWER_LIMIT:\n result.extend([pair[1] for pair in series])\n series = [recurrence_next(relation, values) for values in series]\n\n min_y = min(y for y in result if y > LOWER_LIMIT)\n min_x = sqrt((1 + min_y ** 2) / 2)\n return int((min_x + 1) / 2)\n\nif __name__ == '__main__':\n print euler_timer(100)(main)(verbose=True)\n", "id": "2199531", "language": "Python", "matching_score": 1.902222990989685, "max_stars_count": 7, "path": "python/complete/no100.py" }, { "content": "#!/usr/bin/env python\n\n# A bag contains one red disc and one blue disc. In a game of chance a player\n# takes a disc at random and its colour is noted. After each turn the disc is\n# returned to the bag, an extra red disc is added, and another disc is\n# taken at random.\n\n# The player... wins if they have taken more blue discs than red discs a\n# the end of the game.\n\n# ------------------------------------------------------------------------\n# P_n = prob(disc n is blue) = 1/(n + 1)\n\n# For n discs, let C_1-C_2-...-C_n be the colors drawn, let i_1,...,i_k be the\n# indices j such that disk i_j was drawn red. The probability of this event\n# is (i_1 * ... * i_k)/factorial(n + 1)\n\n# We can enumeratively define n_{j,k} to be the aggregate numerator\n# of all possible draws with j blues drawn out of k draws\n#\n# The initial conditions are n_{0,1} = 1, n_{1,1} = 1\n# The recurrence is defined by the fact that the n_{j + 1,k + 1} is\n# can only have the (k + 1)'st element be blue or red, hence\n# n_{j + 1,k + 1} = numer(blue)*n_{j,k} + numer(red)*n_{j + 1,k}\n# = n_{j,k} + (k + 1)*n_{j + 1,k}\n# except for the cases j = k, where n_{j,k} = numer(all blue) = 1\n# except for the cases j = 0, where n_{0,k} = k!\n\nfrom math import factorial\n\nfrom python.decorators import euler_timer\n\n\ndef iterative_numerator(n):\n numerators = {}\n for k in range(1, n + 1):\n for j in range(k + 1):\n if j == 0:\n numerators[(j, k)] = factorial(k)\n elif j == k:\n numerators[(j, k)] = 1\n else:\n numerators[(j, k)] = (numerators[(j - 1, k - 1)] +\n k * numerators[(j, k - 1)])\n min_blue = (n / 2) + 1\n count = 0\n for blue in range(min_blue, n + 1):\n count += numerators[(blue, n)]\n return count\n\n\ndef max_payout(n):\n # Integer division precludes floor operation\n return factorial(n + 1) / iterative_numerator(n)\n\n\ndef main(verbose=False):\n return max_payout(15)\n\nif __name__ == '__main__':\n print euler_timer(121)(main)(verbose=True)\n", "id": "4723711", "language": "Python", "matching_score": 0.9905200600624084, "max_stars_count": 7, "path": "python/complete/no121.py" }, { "content": "import os\nimport random\nimport threading\nimport uuid\n\nimport flask\n\n\nPORT = int(os.environ.get(\"PORT\", 15071))\nDEFAULT_PLAYERS = \"Joe,Eve,Tim\"\nDEBUG = \"DEBUG\" in os.environ\nAPP = flask.Flask(__name__)\nMETHODS = (\"GET\", \"POST\", \"PUT\", \"PATCH\", \"DELETE\")\nGAME = {\n \"players\": {},\n \"buried_cards\": [],\n \"all_moves\": [],\n \"active_value\": \"\",\n \"active_suit\": \"\",\n \"pending8\": None,\n \"consecutive_draw2\": 0,\n \"consecutive_skip4\": 0,\n \"winner\": None,\n}\nLOCK = threading.Lock()\nUNICODE_CARDS = {\n \"CLUBS\": \"\\u2663\",\n \"DIAMONDS\": \"\\u2666\",\n \"SPADES\": \"\\u2660\",\n \"HEARTS\": \"\\u2665\",\n}\nDECK = (\n (\"A\", \"CLUBS\"),\n (\"2\", \"CLUBS\"),\n (\"3\", \"CLUBS\"),\n (\"4\", \"CLUBS\"),\n (\"5\", \"CLUBS\"),\n (\"6\", \"CLUBS\"),\n (\"7\", \"CLUBS\"),\n (\"8\", \"CLUBS\"),\n (\"9\", \"CLUBS\"),\n (\"10\", \"CLUBS\"),\n (\"J\", \"CLUBS\"),\n (\"Q\", \"CLUBS\"),\n (\"K\", \"CLUBS\"),\n (\"A\", \"DIAMONDS\"),\n (\"2\", \"DIAMONDS\"),\n (\"3\", \"DIAMONDS\"),\n (\"4\", \"DIAMONDS\"),\n (\"5\", \"DIAMONDS\"),\n (\"6\", \"DIAMONDS\"),\n (\"7\", \"DIAMONDS\"),\n (\"8\", \"DIAMONDS\"),\n (\"9\", \"DIAMONDS\"),\n (\"10\", \"DIAMONDS\"),\n (\"J\", \"DIAMONDS\"),\n (\"Q\", \"DIAMONDS\"),\n (\"K\", \"DIAMONDS\"),\n (\"A\", \"SPADES\"),\n (\"2\", \"SPADES\"),\n (\"3\", \"SPADES\"),\n (\"4\", \"SPADES\"),\n (\"5\", \"SPADES\"),\n (\"6\", \"SPADES\"),\n (\"7\", \"SPADES\"),\n (\"8\", \"SPADES\"),\n (\"9\", \"SPADES\"),\n (\"10\", \"SPADES\"),\n (\"J\", \"SPADES\"),\n (\"Q\", \"SPADES\"),\n (\"K\", \"SPADES\"),\n (\"A\", \"HEARTS\"),\n (\"2\", \"HEARTS\"),\n (\"3\", \"HEARTS\"),\n (\"4\", \"HEARTS\"),\n (\"5\", \"HEARTS\"),\n (\"6\", \"HEARTS\"),\n (\"7\", \"HEARTS\"),\n (\"8\", \"HEARTS\"),\n (\"9\", \"HEARTS\"),\n (\"10\", \"HEARTS\"),\n (\"J\", \"HEARTS\"),\n (\"Q\", \"HEARTS\"),\n (\"K\", \"HEARTS\"),\n)\n\n\n@APP.route(\"/favicon.ico\", methods=(\"GET\",))\ndef favicon():\n return flask.send_from_directory(\n os.path.join(APP.root_path, \"static\"),\n \"favicon.ico\",\n mimetype=\"image/vnd.microsoft.icon\",\n )\n\n\n@APP.route(\"/admin\", methods=(\"GET\",))\ndef admin():\n players = []\n for player_uuid, player in GAME[\"players\"].items():\n url = f\"/player/{player_uuid}\"\n name = player[\"name\"]\n players.append((name, url))\n\n return flask.render_template(\"admin.html\", players=players)\n\n\n@APP.route(\"/active\", methods=(\"GET\",))\ndef active():\n with LOCK:\n return flask.jsonify({\"turn\": len(GAME[\"all_moves\"])})\n\n\ndef for_compare_cards(card):\n return DECK.index(card)\n\n\ndef can_play(card, player_uuid):\n # NOTE: This assumes the deck is locked.\n if player_uuid != GAME[\"active_player\"]:\n return False\n\n if GAME[\"winner\"] is not None:\n return False\n\n value, suit = card\n\n # If a 4, need to check first if there is an active \"skip streak\" open.\n if GAME[\"active_value\"] == \"4\" and GAME[\"consecutive_skip4\"] > 0:\n return value == \"4\"\n\n # If a 2, need to check first if there is an active \"draw 2 streak\" open.\n if GAME[\"active_value\"] == \"2\" and GAME[\"consecutive_draw2\"] > 0:\n return value == \"2\"\n\n if value == \"8\":\n return True\n\n # If an 8 is showing, only the suit can match.\n if GAME[\"active_value\"] == \"8\":\n return suit == GAME[\"active_suit\"]\n\n return value == GAME[\"active_value\"] or suit == GAME[\"active_suit\"]\n\n\ndef get_top_card_extra():\n top_card_value, _ = GAME[\"top_card\"]\n if top_card_value != \"8\":\n return \"\"\n\n active_suit = GAME[\"active_suit\"]\n if active_suit == \"\":\n return \"\"\n\n span_elt = suit_span(active_suit)\n return flask.Markup(f\" ({span_elt})\")\n\n\n@APP.route(\"/player/<player_uuid>\", methods=(\"GET\",))\ndef player(player_uuid):\n with LOCK:\n # NOTE: Just let a `KeyError` happen here (and below).\n player = GAME[\"players\"][player_uuid]\n name = player[\"name\"]\n winner = GAME[\"winner\"]\n game_over = winner is not None\n\n top_card_value, top_card_suit = GAME[\"top_card\"]\n top_card_display = f\"{top_card_value}{UNICODE_CARDS[top_card_suit]}\"\n\n active_player_uuid = GAME[\"active_player\"]\n\n moves = []\n choosing8 = (\n GAME[\"pending8\"] is not None and active_player_uuid == player_uuid\n )\n if choosing8:\n for new_suit in (\"CLUBS\", \"DIAMONDS\", \"SPADES\", \"HEARTS\"):\n extended = flask.Markup(\n f\"Change suit to {suit_span(new_suit)}\"\n )\n action = f\"CHANGE-{new_suit}\"\n moves.append((GAME[\"pending8\"], action, extended, True))\n\n for card in sorted(player[\"cards\"], key=for_compare_cards):\n value, suit = card\n as_display = f\"{value}{UNICODE_CARDS[suit]}\"\n can_play_here = can_play(card, player_uuid)\n moves.append((value, suit, as_display, can_play_here))\n\n if active_player_uuid == player_uuid:\n if GAME[\"consecutive_skip4\"] > 0:\n moves.append((\"0\", \"TAKESKIP\", \"Take Skip\", True),)\n elif GAME[\"consecutive_draw2\"] > 0:\n amount = 2 * GAME[\"consecutive_draw2\"]\n moves.append((str(amount), \"DRAW\", f\"Draw {amount}\", True),)\n elif not (choosing8 or game_over):\n moves.append((\"1\", \"DRAW\", \"Draw 1\", True),)\n\n active_player = GAME[\"players\"][active_player_uuid][\"name\"]\n active_player_count = len(GAME[\"players\"][active_player_uuid][\"cards\"])\n ordered_players = [\n (active_player, active_player_count, active_player_uuid == winner)\n ]\n\n current_uuid = active_player_uuid\n num_players = len(GAME[\"players\"])\n for _ in range(num_players - 1):\n current_uuid = GAME[\"players\"][current_uuid][\"next\"]\n current_name = GAME[\"players\"][current_uuid][\"name\"]\n current_count = len(GAME[\"players\"][current_uuid][\"cards\"])\n ordered_players.append(\n (current_name, current_count, current_uuid == winner)\n )\n\n return flask.render_template(\n \"player.html\",\n name=name,\n recent_moves=list(reversed(GAME[\"all_moves\"][-3:])),\n top_card_suit=top_card_suit,\n top_card=top_card_display,\n top_card_extra=get_top_card_extra(),\n ordered_players=ordered_players,\n moves=moves,\n player_uuid=player_uuid,\n current_turn=len(GAME[\"all_moves\"]),\n )\n\n\ndef suit_span(suit):\n return f'<span class=\"{suit}\">{UNICODE_CARDS[suit]}</span>'\n\n\ndef next_card():\n # NOTE: This assumes the deck is locked.\n if GAME[\"deck\"]:\n drawn_card = GAME[\"deck\"].pop()\n return drawn_card\n\n buried_cards = GAME[\"buried_cards\"]\n if not buried_cards:\n raise RuntimeError(\"There are no more cards left\")\n\n GAME[\"all_moves\"].append((\"Shuffled buried\", \"cards\", \"\"))\n GAME[\"buried_cards\"] = []\n random.shuffle(buried_cards)\n GAME[\"deck\"] = buried_cards\n return GAME[\"deck\"].pop()\n\n\n@APP.route(\"/play/<player_uuid>/<value>/<action>\", methods=(\"POST\",))\ndef play(player_uuid, value, action):\n for change_suit in (\"CLUBS\", \"DIAMONDS\", \"SPADES\", \"HEARTS\"):\n target_action = f\"CHANGE-{change_suit}\"\n if action == target_action:\n with LOCK:\n # TODO: Check if a winner (here and elsewhere).\n\n if player_uuid != GAME[\"active_player\"]:\n raise RuntimeError(\n \"Only active player can play an 8 change\",\n player_uuid,\n GAME[\"active_player\"],\n )\n\n old_suit = value\n if old_suit not in (\"CLUBS\", \"DIAMONDS\", \"SPADES\", \"HEARTS\"):\n raise RuntimeError(\"Old suit invalid\", old_suit)\n\n if GAME[\"pending8\"] != old_suit:\n raise RuntimeError(\"Invalid pending crazy 8\")\n\n GAME[\"pending8\"] = None\n GAME[\"active_suit\"] = change_suit\n GAME[\"active_player\"] = player_uuid\n span_elt = suit_span(change_suit)\n\n player = GAME[\"players\"][player_uuid]\n GAME[\"active_player\"] = player[\"next\"]\n name = player[\"name\"]\n as_display = f\"8{UNICODE_CARDS[old_suit]}\"\n\n with_markup = flask.Markup(\n f\"{name} changed to {span_elt} with\"\n )\n GAME[\"all_moves\"].append((with_markup, as_display, old_suit,))\n\n return flask.redirect(f\"/player/{player_uuid}\")\n\n if action == \"TAKESKIP\":\n with LOCK:\n if player_uuid != GAME[\"active_player\"]:\n raise RuntimeError(\n \"Only active player can take a skip\",\n player_uuid,\n GAME[\"active_player\"],\n )\n\n if value != \"0\":\n raise RuntimeError(\"Take skip value should be 0\", value)\n\n if GAME[\"consecutive_skip4\"] == 0:\n raise RuntimeError(\"Must have active skip streak\")\n\n player = GAME[\"players\"][player_uuid]\n name = player[\"name\"]\n GAME[\"all_moves\"].append((f\"{name} got\", \"skipped\", \"\"))\n GAME[\"consecutive_skip4\"] = 0\n GAME[\"active_player\"] = player[\"next\"]\n\n return flask.redirect(f\"/player/{player_uuid}\")\n\n if action == \"DRAW\":\n with LOCK:\n if player_uuid != GAME[\"active_player\"]:\n raise RuntimeError(\n \"Only active player can draw\",\n player_uuid,\n GAME[\"active_player\"],\n )\n\n player = GAME[\"players\"][player_uuid]\n name = player[\"name\"]\n if value == \"1\":\n if GAME[\"consecutive_draw2\"] != 0:\n raise RuntimeError(\n \"Invalid draw amount\", value, GAME[\"consecutive_draw2\"]\n )\n\n drawn_card = next_card()\n player[\"cards\"].append(drawn_card)\n GAME[\"all_moves\"].append((f\"{name} drew\", \"a card\", \"\"),)\n else:\n int_value = int(value)\n if int_value != 2 * GAME[\"consecutive_draw2\"]:\n raise RuntimeError(\n \"Invalid draw amount\", value, GAME[\"consecutive_draw2\"]\n )\n for _ in range(int_value):\n drawn_card = next_card()\n player[\"cards\"].append(drawn_card)\n GAME[\"all_moves\"].append(\n (f\"{name} drew\", f\"{value} cards\", \"\"),\n )\n GAME[\"consecutive_draw2\"] = 0\n\n GAME[\"active_player\"] = player[\"next\"]\n\n return flask.redirect(f\"/player/{player_uuid}\")\n\n suit = action\n card = value, suit\n if card not in DECK:\n raise RuntimeError(\"Invalid card\", card)\n\n with LOCK:\n # NOTE: Just let a `KeyError` happen here (and below).\n player = GAME[\"players\"][player_uuid]\n if card not in player[\"cards\"]:\n raise RuntimeError(\"Player does not hold card\", player, card)\n\n if not can_play(card, player_uuid):\n raise RuntimeError(\n \"Card cannot be played on top card for current player\",\n card,\n GAME[\"top_card\"],\n player_uuid,\n )\n\n top_card = GAME[\"top_card\"]\n GAME[\"buried_cards\"].append(top_card)\n GAME[\"top_card\"] = card\n GAME[\"active_value\"] = value\n if value == \"2\":\n GAME[\"consecutive_draw2\"] += 1\n if value == \"4\":\n GAME[\"consecutive_skip4\"] += 1\n GAME[\"active_suit\"] = suit\n player[\"cards\"].remove(card)\n name = player[\"name\"]\n if not player[\"cards\"]:\n GAME[\"winner\"] = player_uuid\n\n GAME[\"active_player\"] = player[\"next\"]\n if value == \"8\":\n GAME[\"pending8\"] = suit\n GAME[\"active_value\"] = \"\"\n GAME[\"active_suit\"] = \"\"\n GAME[\"active_player\"] = player_uuid\n\n as_display = f\"{value}{UNICODE_CARDS[suit]}\"\n GAME[\"all_moves\"].append((f\"{name} played\", as_display, suit),)\n\n return flask.redirect(f\"/player/{player_uuid}\")\n\n\n@APP.route(\"/\", defaults={\"path\": \"\"}, methods=METHODS)\n@APP.route(\"/<path:path>\", methods=METHODS)\ndef catch_all(path):\n return flask.jsonify({\"path\": path})\n\n\ndef start_game():\n players_str = os.environ.get(\"PLAYERS\", DEFAULT_PLAYERS)\n if players_str is None:\n raise OSError(\"PLAYERS must be supplied\")\n players = [player.strip() for player in players_str.upper().split(\",\")]\n num_players = len(players)\n if num_players < 2:\n raise OSError(\"Must have at least two players\")\n if len(set(players)) < num_players:\n raise OSError(\"Player names are not unique\", players_str)\n\n with LOCK:\n reverse_map = {}\n for player in players:\n player_uuid = str(uuid.uuid4())\n reverse_map[player] = player_uuid\n GAME[\"players\"][player_uuid] = {\"name\": player}\n\n for i, player in enumerate(players):\n player_uuid = reverse_map[player]\n next_index = (i + 1) % len(players)\n next_player_uuid = reverse_map[players[next_index]]\n GAME[\"players\"][player_uuid][\"next\"] = next_player_uuid\n\n new_deck = list(DECK)\n random.shuffle(new_deck)\n\n for _ in range(8):\n for player in players:\n player_uuid = reverse_map[player]\n cards = GAME[\"players\"][player_uuid].setdefault(\"cards\", [])\n dealt = new_deck.pop()\n cards.append(dealt)\n\n top_card = None\n # Bounded while loop\n for _ in range(1000):\n if top_card is not None:\n break\n\n top_card = new_deck.pop()\n value, _ = top_card\n if value in (\"2\", \"4\", \"8\"):\n new_deck.append(top_card)\n top_card = None\n random.shuffle(new_deck)\n\n GAME[\"top_card\"] = top_card\n top_card_value, top_card_suit = top_card\n GAME[\"active_value\"] = top_card_value\n GAME[\"active_suit\"] = top_card_suit\n GAME[\"deck\"] = new_deck\n GAME[\"active_player\"] = reverse_map[players[0]]\n\n\nif __name__ == \"__main__\":\n start_game()\n APP.run(host=\"0.0.0.0\", port=PORT, debug=DEBUG)\n", "id": "9064091", "language": "Python", "matching_score": 1.6758490800857544, "max_stars_count": 0, "path": "src/app.py" }, { "content": "from deck import random_deck\nimport game_play\nfrom player_types import RandomPlayer\n\n\ngame_play.DEBUG = True\n\n\ndef simulate(num_players=4):\n curr_deck = random_deck()\n players = [RandomPlayer() for _ in xrange(num_players)]\n game = game_play.Game(curr_deck, players)\n game.play()\n\n\nif __name__ == '__main__':\n simulate()\n", "id": "7925665", "language": "Python", "matching_score": 1.7093055248260498, "max_stars_count": 0, "path": "simulation.py" }, { "content": "import argparse\nimport random\nimport time\n\nimport deck\nimport game_play\nfrom player_types import RandomPlayer\n\n\nRESULTS_FILE_TMP = 'data/results-%d-%d%s.bindata'\nDEFAULT_SIMULATOR = 'DefaultSimulator'\nSEPARATOR = '|'\nSTATUS_UPDATE = 5 * 10**4\nSERIALIZED_OUTCOMES = {\n # Tuples of (is_dealer, won_bid, tricks)\n (True, 0, 0): chr(0),\n (True, 0, 1): chr(1),\n (True, 0, 2): chr(2),\n (True, 0, 3): chr(3),\n (True, 0, 4): chr(4),\n (True, 0, 5): chr(5),\n (True, 2, 0): chr(6),\n (True, 2, 1): chr(7),\n (True, 2, 2): chr(8),\n (True, 2, 3): chr(9),\n (True, 2, 4): chr(10),\n (True, 2, 5): chr(11),\n (True, 3, 0): chr(12),\n (True, 3, 1): chr(13),\n (True, 3, 2): chr(14),\n (True, 3, 3): chr(15),\n (True, 3, 4): chr(16),\n (True, 3, 5): chr(17),\n (True, 4, 0): chr(18),\n (True, 4, 1): chr(19),\n (True, 4, 2): chr(20),\n (True, 4, 3): chr(21),\n (True, 4, 4): chr(22),\n (True, 4, 5): chr(23),\n (True, 5, 0): chr(24),\n (True, 5, 1): chr(25),\n (True, 5, 2): chr(26),\n (True, 5, 3): chr(27),\n (True, 5, 4): chr(28),\n (True, 5, 5): chr(29),\n (False, 0, 0): chr(30),\n (False, 0, 1): chr(31),\n (False, 0, 2): chr(32),\n (False, 0, 3): chr(33),\n (False, 0, 4): chr(34),\n (False, 0, 5): chr(35),\n (False, 2, 0): chr(36),\n (False, 2, 1): chr(37),\n (False, 2, 2): chr(38),\n (False, 2, 3): chr(39),\n (False, 2, 4): chr(40),\n (False, 2, 5): chr(41),\n (False, 3, 0): chr(42),\n (False, 3, 1): chr(43),\n (False, 3, 2): chr(44),\n (False, 3, 3): chr(45),\n (False, 3, 4): chr(46),\n (False, 3, 5): chr(47),\n (False, 4, 0): chr(48),\n (False, 4, 1): chr(49),\n (False, 4, 2): chr(50),\n (False, 4, 3): chr(51),\n (False, 4, 4): chr(52),\n (False, 4, 5): chr(53),\n (False, 5, 0): chr(54),\n (False, 5, 1): chr(55),\n (False, 5, 2): chr(56),\n (False, 5, 3): chr(57),\n (False, 5, 4): chr(58),\n (False, 5, 5): chr(59),\n}\nDESERIALIZED_OUTCOMES = {val: key for key, val in SERIALIZED_OUTCOMES.items()}\n\n\nif SEPARATOR in deck.CARD_DESERIALIZE or SEPARATOR in DESERIALIZED_OUTCOMES:\n raise ValueError('Separator can not be used.')\n\n\nclass DefaultSimulator(object):\n\n def __init__(self, num_players):\n self.num_players = num_players\n\n @staticmethod\n def shuffle(deck):\n deck.shuffle()\n return deck\n\n def get_players(self):\n return [RandomPlayer() for _ in xrange(self.num_players)]\n\n\nclass AceQueenSimulator(object):\n\n def __init__(self, num_players):\n self.num_players = num_players\n\n @staticmethod\n def _swap_values(deck, index1, index2):\n deck.cards[index1], deck.cards[index2] = (deck.cards[index2],\n deck.cards[index1])\n\n def shuffle(self, deck):\n deck.shuffle()\n # Put Ace of Hearts in position 0 so that person 0 gets it.\n ace_hearts, = [i for i, card in enumerate(deck.cards)\n if card.suit == 'H' and card.value == 'A']\n if ace_hearts != 0:\n self._swap_values(deck, 0, ace_hearts)\n\n # Put Queen of Hearts in position `num_players` so that person 0\n # gets it as their second card.\n queen_hearts, = [i for i, card in enumerate(deck.cards)\n if card.suit == 'H' and card.value == 'Q']\n if queen_hearts != self.num_players:\n self._swap_values(deck, self.num_players, queen_hearts)\n\n # Make sure the last 3 cards are not hearts.\n protected_indices = [0, self.num_players]\n for multiplier in (2, 3, 4):\n index = multiplier * self.num_players\n # If a Heart, swap it out.\n if deck.cards[index].suit == 'H':\n non_heart_indices = [\n i for i, card in enumerate(deck.cards)\n if card.suit != 'H' and i not in protected_indices\n ]\n new_index = random.choice(non_heart_indices)\n self._swap_values(deck, index, new_index)\n # Make sure the value is not changed by future iterations.\n protected_indices.append(index)\n\n return deck\n\n def get_players(self):\n players = [RandomPlayer() for _ in xrange(self.num_players)]\n # Make sure first player always bids and picks hearts.\n players[0].random_bids = tuple(i for i in players[0].random_bids\n if i != -1)\n players[0]._choose_trump = lambda hand: 'H'\n # Make sure no other players ever bid.\n for player in players[1:]:\n player.random_bids = (-1,)\n return players\n\n\ndef simulate(num_players=4, simulator_class=DefaultSimulator):\n simulator = simulator_class(num_players)\n curr_deck = simulator.shuffle(deck.Deck())\n players = simulator.get_players()\n game = game_play.Game(curr_deck, players)\n game.play()\n\n hand_vals = [game.trump]\n for hand in game.hands:\n hand_vals.extend([card.serialize() for card in hand.played_cards])\n key = (hand.is_dealer, hand.won_bid, hand.tricks)\n hand_vals.append(SERIALIZED_OUTCOMES[key])\n\n return ''.join(hand_vals)\n\n\ndef long_simulate(n, simulator_class=DefaultSimulator):\n print 'Simulating {:,} games'.format(n)\n\n start = time.time()\n simulator_str = ''\n if simulator_class.__name__ != DEFAULT_SIMULATOR:\n simulator_str = '-%s' % (simulator_class.__name__,)\n\n results_file = RESULTS_FILE_TMP % (time.time(), n, simulator_str)\n print 'Saving in %s.' % (results_file,)\n with open(results_file, 'wb') as fh:\n # Write the first so that separator only appears before.\n # Assumes n > 0.\n fh.write(simulate(simulator_class=simulator_class))\n for i in xrange(2, n + 1):\n fh.write(SEPARATOR)\n fh.write(simulate(simulator_class=simulator_class))\n\n if i % STATUS_UPDATE == 0:\n message = '{:,} iterations: {} seconds'.format(\n i, time.time() - start)\n print message\n\n\nif __name__ == '__main__':\n simulator_classes = {\n DEFAULT_SIMULATOR: DefaultSimulator,\n 'ace_queen': AceQueenSimulator,\n }\n parser = argparse.ArgumentParser(description='Simulate Huk-A-Buk.')\n parser.add_argument('--num-games', dest='num_games', type=int,\n required=True, help='Number of games to simulate.')\n parser.add_argument('--simulator-class', dest='simulator_class',\n choices=tuple(simulator_classes.keys()),\n default=DEFAULT_SIMULATOR,\n help='Simulator to use for simulation.')\n args = parser.parse_args()\n\n simulator_class = simulator_classes[args.simulator_class]\n long_simulate(args.num_games, simulator_class=simulator_class)\n", "id": "3684741", "language": "Python", "matching_score": 3.9138991832733154, "max_stars_count": 0, "path": "long_simulation.py" }, { "content": "import argparse\nimport glob\nimport json\nimport os\n\nimport deck\nfrom long_simulation import DESERIALIZED_OUTCOMES\nfrom long_simulation import SEPARATOR\n\n\nclass DoNothingAnalyzer(object):\n\n def __call__(self, game_result):\n pass\n\n def post_result(self):\n pass\n\n\nclass AceQueenAnalyzer(object):\n\n def __init__(self):\n self.counts = {}\n\n def __call__(self, game_result):\n trump = game_result[0]\n # Unpack since we expect a single match.\n winning_bidder, = [hand for hand in game_result[1:]\n if hand[6] != 0]\n\n # Last 3 are: is_dealer, won_bid, tricks\n key = tuple(winning_bidder[5:])\n\n # First 5 are: cards\n trump_dealt = set([card.value for card in winning_bidder[:5]\n if card.suit == trump and card.from_original_hand])\n if trump_dealt == set(('A', 'Q')):\n self.counts[key] = self.counts.get(key, 0) + 1\n\n def post_result(self):\n results_by_bid = {\n 2: {},\n 3: {},\n 4: {},\n 5: {},\n }\n\n for key, val in self.counts.iteritems():\n _, won_bid, tricks = key\n curr_tricks = results_by_bid[won_bid]\n curr_tricks[tricks] = curr_tricks.get(tricks, 0) + val\n\n for bid_amt in sorted(results_by_bid.keys()):\n tricks_dict = results_by_bid[bid_amt]\n total_tricks = sum(tricks_dict.values())\n print 'Bid: %d, Total Tricks: %d' % (bid_amt, total_tricks)\n total_tricks = 1.0 * total_tricks\n for num_tricks in range(5 + 1):\n probability = tricks_dict.get(num_tricks, 0) / total_tricks\n print 'Pr(Win %d tricks) = %g' % (num_tricks, probability)\n\n print '=' * 60\n\n\ndef pick_filename():\n files = glob.glob('data/*.bindata')\n for i, filename in enumerate(files):\n print '%d: %s' % (i, filename)\n\n file_chosen = None\n while file_chosen is None:\n try:\n choice = raw_input('Which file? ')\n file_chosen = files[int(choice)]\n except (TypeError, ValueError, KeyError):\n pass\n\n return file_chosen\n\n\ndef read_game(file_handle, total_bytes):\n result = []\n separator_found = False\n while not separator_found:\n if file_handle.tell() >= total_bytes:\n break\n\n char = file_handle.read(1)\n if char == SEPARATOR:\n separator_found = True\n else:\n result.append(char)\n return ''.join(result)\n\n\ndef parse_game_line(game_line):\n num_players, remainder = divmod(len(game_line), 6)\n if remainder != 1:\n raise ValueError('Expected 6 characters per hand.')\n\n trump = game_line[0]\n game_line = game_line[1:]\n game_result = [trump]\n\n for index in range(num_players):\n cards = [deck.Card.deserialize(game_line[6 * index + i])\n for i in xrange(5)]\n cards.extend(DESERIALIZED_OUTCOMES[game_line[6 * index + 5]])\n game_result.append(cards)\n\n return game_result\n\n\ndef read_simulation(results_file, analyze_func):\n # os.fstat(fh.fileno()).st_size\n fh = open(results_file, 'rb')\n total_bytes = os.fstat(fh.fileno()).st_size\n\n try:\n while fh.tell() < total_bytes:\n game_line = read_game(fh, total_bytes)\n game_result = parse_game_line(game_line)\n analyze_func(game_result)\n finally:\n fh.close()\n\n\nif __name__ == '__main__':\n analyze_funcs = {\n 'do_nothing_analyze': DoNothingAnalyzer,\n 'analyze_ace_queen': AceQueenAnalyzer,\n }\n parser = argparse.ArgumentParser(\n description='Analyze simulated Huk-A-Buk games.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--filename', dest='filename',\n help='Filename containing simulated data.')\n parser.add_argument('--analyze-func', dest='analyze_func',\n choices=tuple(analyze_funcs.keys()),\n default='do_nothing_analyze',\n help='Function used to analyze simulated data.')\n args = parser.parse_args()\n filename = args.filename or pick_filename()\n\n analyzer = analyze_funcs[args.analyze_func]()\n read_simulation(filename, analyzer)\n analyzer.post_result()\n", "id": "3715152", "language": "Python", "matching_score": 1.6724176406860352, "max_stars_count": 0, "path": "analyze.py" }, { "content": "#!/usr/bin/env python\n\n# High Card - 1\n# One Pair - 2\n# Two Pairs - 3\n# Three of a Kind - 4\n# Straight - 5\n# Flush - 6\n# Full House - 7\n# Four of a Kind - 8\n# Straight Flush - 9\n# Royal Flush - 10\n\n# 2, 3, 4, 5, 6, 7, 8, 9, 10, Jack, Queen, King, Ace\n\n# The file, no054.txt, contains one-thousand random hands dealt to two players.\n# Each line of the file contains ten cards (separated by a single space): the\n# first five are Player 1's cards and the last five are Player 2's cards. You\n# can assume that all hands are valid (no invalid characters or repeated\n# cards), each player's hand is in no specific order, and in each hand there\n# is a clear winner.\n\n# Example Row in File\n# 7C 3C TH 5S 8H 8C 9C JD TC KD\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\n\nCARDS = dict([('2', 2),\n ('3', 3),\n ('4', 4),\n ('5', 5),\n ('6', 6),\n ('7', 7),\n ('8', 8),\n ('9', 9),\n ('T', 10),\n ('J', 11),\n ('Q', 12),\n ('K', 13),\n ('A', 14)])\n\n\ndef read_card(card_from_file):\n value, suit = card_from_file\n return (CARDS[value], suit)\n\n\ndef read_hand(hand_from_file):\n cards = [card.strip() for card in hand_from_file.split() if card]\n player1 = [read_card(card) for card in cards[:5]]\n player2 = [read_card(card) for card in cards[5:]]\n return (player1, player2)\n\n\ndef is_straight(hand):\n values = sorted(card[0] for card in hand)\n smallest = values[0]\n values = [value - smallest for value in values]\n return values == range(5)\n\n\ndef is_flush(hand):\n return (len(set(card[1] for card in hand)) == 1)\n\n\ndef is_royal(hand):\n return (min(card[0] for card in hand) >= 10)\n\n\ndef top_cards(hand):\n royal = is_royal(hand)\n straight = is_straight(hand)\n flush = is_flush(hand)\n if royal and flush:\n return (10, hand, [])\n if straight:\n if flush:\n return (9, hand, [])\n else:\n return (5, hand, [])\n else:\n if flush:\n return (6, hand, [])\n values = [card[0] for card in hand]\n num_cards = len(set(values))\n if num_cards == 2:\n val1, val2 = set(values)\n counts = sorted((values.count(val1), values.count(val2)))\n if counts == [2, 3]:\n return (7, hand, [])\n elif counts == [1, 4]:\n if values.count(val1) == 4:\n winner = val1\n else:\n winner = val2\n quad = [card for card in hand if card[0] == winner]\n kicker = [card for card in hand if card[0] != winner]\n return (8, quad, kicker)\n elif num_cards == 3:\n val1, val2, val3 = set(values)\n counts = sorted((values.count(val1),\n values.count(val2),\n values.count(val3)))\n if counts == [1, 2, 2]:\n if values.count(val1) == 1:\n loser = val1\n elif values.count(val2) == 1:\n loser = val2\n else:\n loser = val3\n pairs = [card for card in hand if card[0] != loser]\n kicker = [card for card in hand if card[0] == loser]\n return (3, pairs, kicker)\n elif counts == [1, 1, 3]:\n if values.count(val1) == 3:\n winner = val1\n elif values.count(val2) == 3:\n winner = val2\n else:\n winner = val3\n trips = [card for card in hand if card[0] == winner]\n kickers = [card for card in hand if card[0] != winner]\n kickers = sorted(kickers, key=lambda card: card[0])\n return (4, trips, kickers)\n elif num_cards == 4:\n for card in hand:\n if values.count(card[0]) == 2:\n winner = card[0]\n break\n pairs = [card for card in hand if card[0] == winner]\n kickers = [card for card in hand if card[0] != winner]\n kickers = sorted(kickers, key=lambda card: card[0])\n return (2, pairs, kickers)\n else:\n winner = max(values)\n high = [card for card in hand if card[0] == winner]\n kickers = [card for card in hand if card[0] != winner]\n kickers = sorted(kickers, key=lambda card: card[0])\n return (1, high, kickers)\n raise Exception(hand)\n\n\ndef compare_kickers(kicker1, kicker2):\n # assumes they are the same\n if len(kicker1) != len(kicker2):\n raise Exception(\"<NAME>\")\n\n num_cards = len(kicker1)\n for i in range(num_cards - 1, -1, -1):\n if kicker1[i] > kicker2[i]:\n return 1\n elif kicker1[i] < kicker2[i]:\n return 2\n raise Exception(\"Dan dumb 2\")\n return\n\n\ndef compare_hands(hand1, hand2):\n if hand1[0] < hand2[0]:\n return 2\n elif hand1[0] > hand2[0]:\n return 1\n\n hand_value = hand1[0]\n if hand_value in (1, 2, 4, 8):\n if hand1[1][0][0] > hand2[1][0][0]:\n return 1\n elif hand1[1][0][0] < hand2[1][0][0]:\n return 2\n else:\n return compare_kickers(hand1[2], hand2[2])\n elif hand_value in (3, 7):\n vals1 = sorted(list(set(card[0] for card in hand1[1])))\n vals2 = sorted(list(set(card[0] for card in hand2[1])))\n if vals1[1] > vals2[1]:\n return 1\n if vals1[1] < vals2[1]:\n return 2\n else:\n if vals1[0] > vals2[0]:\n return 1\n if vals1[0] < vals2[0]:\n return 2\n else:\n return compare_kickers(hand1[2], hand2[2])\n elif hand_value == 5:\n max1 = max(card[0] for card in hand1[1])\n max2 = max(card[0] for card in hand2[1])\n if max1 > max2:\n return 1\n elif max1 < max2:\n return 2\n else:\n raise Exception(\"Dan bad 3\")\n else:\n raise Exception(\"Dan bad 4\")\n raise Exception(\"Dan bad 5\")\n return\n\n\ndef main(verbose=False):\n data = get_data(54)\n hands = [read_hand(row) for row in data.split(\"\\n\") if row]\n hands = [(top_cards(entry[0]), top_cards(entry[1])) for entry in hands]\n\n count = 0\n for hand in hands:\n if compare_hands(*hand) == 1:\n count += 1\n return count\n\nif __name__ == '__main__':\n print euler_timer(54)(main)(verbose=True)\n", "id": "10005585", "language": "Python", "matching_score": 1.4964977502822876, "max_stars_count": 7, "path": "python/complete/no054.py" }, { "content": "import random\n\n\nCARD_VALUES = {\n 2: 2,\n 3: 3,\n 4: 4,\n 5: 5,\n 6: 6,\n 7: 7,\n 8: 8,\n 9: 9,\n 10: 10,\n 'J': 11,\n 'Q': 12,\n 'K': 13,\n 'A': 14,\n}\nCARD_SUITS = {\n 'H': u'\\u2665',\n 'S': u'\\u2660',\n 'C': u'\\u2663',\n 'D': u'\\u2666',\n}\nCARD_SERIALIZE = {\n # Tuples of (from_original_hand, suit, value)\n (True, 'H', 2): chr(0),\n (True, 'H', 3): chr(1),\n (True, 'H', 4): chr(2),\n (True, 'H', 5): chr(3),\n (True, 'H', 6): chr(4),\n (True, 'H', 7): chr(5),\n (True, 'H', 8): chr(6),\n (True, 'H', 9): chr(7),\n (True, 'H', 10): chr(8),\n (True, 'H', 'J'): chr(9),\n (True, 'H', 'Q'): chr(10),\n (True, 'H', 'K'): chr(11),\n (True, 'H', 'A'): chr(12),\n (True, 'S', 2): chr(13),\n (True, 'S', 3): chr(14),\n (True, 'S', 4): chr(15),\n (True, 'S', 5): chr(16),\n (True, 'S', 6): chr(17),\n (True, 'S', 7): chr(18),\n (True, 'S', 8): chr(19),\n (True, 'S', 9): chr(20),\n (True, 'S', 10): chr(21),\n (True, 'S', 'J'): chr(22),\n (True, 'S', 'Q'): chr(23),\n (True, 'S', 'K'): chr(24),\n (True, 'S', 'A'): chr(25),\n (True, 'C', 2): chr(26),\n (True, 'C', 3): chr(27),\n (True, 'C', 4): chr(28),\n (True, 'C', 5): chr(29),\n (True, 'C', 6): chr(30),\n (True, 'C', 7): chr(31),\n (True, 'C', 8): chr(32),\n (True, 'C', 9): chr(33),\n (True, 'C', 10): chr(34),\n (True, 'C', 'J'): chr(35),\n (True, 'C', 'Q'): chr(36),\n (True, 'C', 'K'): chr(37),\n (True, 'C', 'A'): chr(38),\n (True, 'D', 2): chr(39),\n (True, 'D', 3): chr(40),\n (True, 'D', 4): chr(41),\n (True, 'D', 5): chr(42),\n (True, 'D', 6): chr(43),\n (True, 'D', 7): chr(44),\n (True, 'D', 8): chr(45),\n (True, 'D', 9): chr(46),\n (True, 'D', 10): chr(47),\n (True, 'D', 'J'): chr(48),\n (True, 'D', 'Q'): chr(49),\n (True, 'D', 'K'): chr(50),\n (True, 'D', 'A'): chr(51),\n (False, 'H', 2): chr(52),\n (False, 'H', 3): chr(53),\n (False, 'H', 4): chr(54),\n (False, 'H', 5): chr(55),\n (False, 'H', 6): chr(56),\n (False, 'H', 7): chr(57),\n (False, 'H', 8): chr(58),\n (False, 'H', 9): chr(59),\n (False, 'H', 10): chr(60),\n (False, 'H', 'J'): chr(61),\n (False, 'H', 'Q'): chr(62),\n (False, 'H', 'K'): chr(63),\n (False, 'H', 'A'): chr(64),\n (False, 'S', 2): chr(65),\n (False, 'S', 3): chr(66),\n (False, 'S', 4): chr(67),\n (False, 'S', 5): chr(68),\n (False, 'S', 6): chr(69),\n (False, 'S', 7): chr(70),\n (False, 'S', 8): chr(71),\n (False, 'S', 9): chr(72),\n (False, 'S', 10): chr(73),\n (False, 'S', 'J'): chr(74),\n (False, 'S', 'Q'): chr(75),\n (False, 'S', 'K'): chr(76),\n (False, 'S', 'A'): chr(77),\n (False, 'C', 2): chr(78),\n (False, 'C', 3): chr(79),\n (False, 'C', 4): chr(80),\n (False, 'C', 5): chr(81),\n (False, 'C', 6): chr(82),\n (False, 'C', 7): chr(83),\n (False, 'C', 8): chr(84),\n (False, 'C', 9): chr(85),\n (False, 'C', 10): chr(86),\n (False, 'C', 'J'): chr(87),\n (False, 'C', 'Q'): chr(88),\n (False, 'C', 'K'): chr(89),\n (False, 'C', 'A'): chr(90),\n (False, 'D', 2): chr(91),\n (False, 'D', 3): chr(92),\n (False, 'D', 4): chr(93),\n (False, 'D', 5): chr(94),\n (False, 'D', 6): chr(95),\n (False, 'D', 7): chr(96),\n (False, 'D', 8): chr(97),\n (False, 'D', 9): chr(98),\n (False, 'D', 10): chr(99),\n (False, 'D', 'J'): chr(100),\n (False, 'D', 'Q'): chr(101),\n (False, 'D', 'K'): chr(102),\n (False, 'D', 'A'): chr(103),\n}\nCARD_DESERIALIZE = {val: key for key, val in CARD_SERIALIZE.items()}\n\n\nclass Card(object):\n\n def __init__(self, suit, value, from_original_hand=True):\n self.suit = suit\n self.value = value\n self.from_original_hand = from_original_hand\n self._validate()\n\n def _validate(self):\n if self.value not in CARD_VALUES:\n raise ValueError('Bad card value', self.value)\n if self.suit not in CARD_SUITS:\n raise ValueError('Bad card suit', self.suit)\n\n @property\n def pretty(self):\n return u'%2s%s' % (self.value, CARD_SUITS[self.suit])\n\n def is_better(self, other_card, trump, lead_suit):\n if self.suit == other_card.suit:\n return CARD_VALUES[self.value] > CARD_VALUES[other_card.value]\n\n # If the suits are different, then at most 1 is trump and at\n # most 1 is the lead suit.\n if self.suit == trump:\n return True\n elif other_card.suit == trump:\n return False\n\n if self.suit == lead_suit:\n return True\n elif other_card.suit == lead_suit:\n return False\n\n # If neither card is one of the relevant suits, their comparison\n # is irrelevant, but `self` is certainly not `is_better`.\n return False\n\n def serialize(self):\n return CARD_SERIALIZE[(self.from_original_hand, self.suit, self.value)]\n\n @classmethod\n def deserialize(cls, char):\n from_original_hand, suit, value = CARD_DESERIALIZE[char]\n return cls(suit, value, from_original_hand=from_original_hand)\n\n\nclass Deck(object):\n\n def __init__(self):\n self.current_index = 0\n self.cards = []\n for value in CARD_VALUES.keys():\n for suit in CARD_SUITS.keys():\n new_card = Card(suit, value)\n self.cards.append(new_card)\n\n def shuffle(self):\n random.shuffle(self.cards)\n self.current_index = 0\n\n def draw_card(self):\n result = self.cards[self.current_index]\n self.current_index += 1\n return result\n\n\ndef random_deck():\n deck = Deck()\n deck.shuffle()\n return deck\n", "id": "3867173", "language": "Python", "matching_score": 1.6300259828567505, "max_stars_count": 0, "path": "deck.py" }, { "content": "import random\n\nfrom deck import CARD_SUITS\nfrom game_play import CARDS_PER_HAND\n\n\nASSUMPTIONS = {\n 'non_win_fold': 0.4, # 40% chance of folding given non-winning bid.\n 'bids': {\n 5: 1, # Pr(5) = 1/1503\n 4: 2, # Pr(4) = 2/1503\n 3: 100, # Pr(3) = 100/1503\n 2: 700, # Pr(2) = 700/1503\n -1: 700, # Pr(No Bid) = 700/1503\n },\n # This makes the probability of a 3+ bid in a 4 person game equal to\n # 1 - (1 - 103.0/1503)**4 == 0.24720587624919865\n}\n\n\nclass RandomPlayer(object):\n\n MINIMUM_BID = 2\n\n def __init__(self, random_bids=None):\n if random_bids is None:\n random_bids = (( 5,) * ASSUMPTIONS['bids'][5] +\n ( 4,) * ASSUMPTIONS['bids'][4] +\n ( 3,) * ASSUMPTIONS['bids'][3] +\n ( 2,) * ASSUMPTIONS['bids'][2] +\n (-1,) * ASSUMPTIONS['bids'][-1])\n\n self.random_bids = random_bids\n\n def draw_cards(self, hand, unused_winning_bid):\n trump = hand.game.trump\n trump_cards = [card for card in hand.unplayed_cards\n if card.suit == trump]\n non_trump_cards = [card for card in hand.unplayed_cards\n if card.suit != trump]\n cards_to_ditch = len(non_trump_cards)\n\n # `randint` is inclusive\n if hand.won_bid != 0:\n # Winner can't fold.\n num_to_draw = random.randint(0, cards_to_ditch)\n else:\n # Fold 40% of the time.\n if random.random() < ASSUMPTIONS['non_win_fold']:\n num_to_draw = CARDS_PER_HAND + 1\n else:\n num_to_draw = random.randint(0, cards_to_ditch)\n\n if num_to_draw > CARDS_PER_HAND:\n return None\n\n # Keep a random subset of cards.\n # random.sample \"Chooses k unique random elements\"\n hand.unplayed_cards = trump_cards + random.sample(\n non_trump_cards, cards_to_ditch - num_to_draw)\n\n for _ in xrange(num_to_draw):\n new_card = hand.deck.draw_card()\n new_card.from_original_hand = False\n hand.unplayed_cards.append(new_card)\n\n return num_to_draw\n\n @staticmethod\n def _choose_trump(hand):\n hand_suits = [card.suit for card in hand.unplayed_cards]\n return random.choice(hand_suits)\n\n def make_bid(self, hand, max_bid):\n trump = self._choose_trump(hand)\n if hand.is_dealer and max_bid < self.MINIMUM_BID:\n return self.MINIMUM_BID, trump\n\n bid_val = random.choice(self.random_bids)\n if bid_val > max_bid:\n return bid_val, trump\n else:\n return None, None\n\n def play_card(self, hand, trump, cards_out):\n # Winning bidder must lead first hand with trump.\n if hand.won_bid != 0 and len(hand.unplayed_cards) == CARDS_PER_HAND:\n matching_cards = [card for card in hand.unplayed_cards\n if card.suit == trump]\n if len(matching_cards) == 0:\n raise ValueError('Winning bid has no trump. What the hell!')\n card_to_play = random.choice(matching_cards)\n elif cards_out:\n matching_cards = [card for card in hand.unplayed_cards\n if card.suit == cards_out[0].suit]\n # Follow suit if you can.\n if matching_cards:\n card_to_play = random.choice(matching_cards)\n else:\n card_to_play = random.choice(hand.unplayed_cards)\n else:\n card_to_play = random.choice(hand.unplayed_cards)\n\n hand.unplayed_cards.remove(card_to_play)\n hand.played_cards.append(card_to_play)\n return card_to_play\n", "id": "4147894", "language": "Python", "matching_score": 3.272347927093506, "max_stars_count": 0, "path": "player_types.py" }, { "content": "from deck import CARD_SUITS\n\n\nCARDS_PER_HAND = 5\nSEPARATOR = '=' * 60\nDEBUG = False\n\n\ndef print_method(value):\n if DEBUG:\n print(value)\n\n\ndef reorder_for_hand(hands, hand, make_last=False):\n matching_indices = [i for i, curr_hand in enumerate(hands)\n if curr_hand is hand]\n if len(matching_indices) != 1:\n raise ValueError('Not exactly one matching hand.')\n\n if make_last:\n index = matching_indices[0] + 1\n else:\n index = matching_indices[0]\n\n return hands[index:] + hands[:index]\n\n\nclass Game(object):\n\n def __init__(self, deck, players):\n self.hands = []\n for i, player in enumerate(players):\n hand_name = chr(i + 65)\n self.hands.append(PlayerHand(self, deck, player,\n hand_name=hand_name))\n\n # Make the last hand the dealer.\n self.hands[-1].is_dealer = True\n\n for _ in xrange(CARDS_PER_HAND):\n for hand in self.hands:\n hand.take_from_dealer()\n\n def play(self):\n self.get_bids()\n self.draw_cards()\n self.play_tricks()\n\n def get_bids(self):\n self.winning_bid = 1 # Maximum non-bid.\n winning_index = None\n\n for index, hand in enumerate(self.hands):\n curr_bid, trump = hand.bid(self.winning_bid)\n if curr_bid is not None:\n print_method('Player %s bids %d.' % (hand, curr_bid))\n if not curr_bid > self.winning_bid:\n raise ValueError('Bids can only increase.')\n self.winning_bid = curr_bid\n self.trump = trump\n winning_index = index\n else:\n print_method('Player %s does not bid.' % (hand,))\n\n self.winning_bidder = self.hands[winning_index]\n self.winning_bidder.won_bid = self.winning_bid\n # Re-order so that the winning bidder is last.\n self.hands = reorder_for_hand(self.hands, self.winning_bidder,\n make_last=True)\n message = ('%s won bid with %d tricks. Trump is %s.' % (\n self.winning_bidder, self.winning_bid, CARD_SUITS[self.trump]))\n print_method(message)\n\n def draw_cards(self):\n print_method(SEPARATOR)\n hands_after_draw = []\n for hand in self.hands:\n cards_drawn = hand.draw(self.winning_bid)\n if cards_drawn is not None:\n hands_after_draw.append(hand)\n print_method('Hand %s takes %d cards.' % (hand.pretty,\n cards_drawn))\n elif hand is self.winning_bidder:\n raise ValueError('Winning bidder must not fold')\n else:\n print_method('Hand %s folds.' % (hand.pretty,))\n self.hands = hands_after_draw\n\n # Re-order so that the winning bidder plays first.\n self.hands = reorder_for_hand(self.hands, self.winning_bidder,\n make_last=False)\n\n def play_tricks(self):\n print_method(SEPARATOR)\n for i in xrange(5):\n print_method('Trick %d:' % (i + 1,))\n self.play_trick()\n print_method(SEPARATOR)\n\n for hand in self.hands:\n if hand.won_bid != 0:\n print_method('%s finished with %d tricks after bidding %d.' % (\n hand, hand.tricks, hand.won_bid))\n else:\n print_method('%s finished with %d tricks.' % (hand,\n hand.tricks))\n\n def play_trick(self):\n cards_out = []\n for hand in self.hands:\n card_played = hand.play(self.trump, cards_out)\n print_method('Hand %s played %s.' % (hand, card_played.pretty))\n\n best_card = cards_out[0]\n lead_suit = cards_out[0].suit\n winning_index = 0\n for i, card in enumerate(cards_out[1:]):\n if card.is_better(best_card, self.trump, lead_suit):\n winning_index = i + 1\n best_card = card\n\n winning_hand = self.hands[winning_index]\n winning_hand.tricks += 1\n # Re-order so that the winning bidder is last.\n self.hands = reorder_for_hand(self.hands, winning_hand,\n make_last=False)\n print_method('%s wins.' % (winning_hand,))\n\n\nclass PlayerHand(object):\n\n def __init__(self, game, deck, player, hand_name=None):\n self.game = game\n self.hand_name = hand_name\n self.deck = deck\n self.player = player\n self.is_dealer = False\n self.won_bid = 0\n # Set the cards.\n self.played_cards = []\n self.unplayed_cards = []\n self.tricks = 0\n\n def __str__(self):\n return 'PlayerHand(%r)' % (self.hand_name,)\n\n @property\n def pretty(self):\n played_pretty_str = ','.join(card.pretty for card in self.played_cards)\n unplayed_pretty_str = ','.join(card.pretty\n for card in self.unplayed_cards)\n if played_pretty_str:\n if unplayed_pretty_str:\n return 'PlayerHand(%r, played=%s, unplayed=%s)' % (\n self.hand_name, played_pretty_str, unplayed_pretty_str)\n else:\n return 'PlayerHand(%r, played=%s)' % (self.hand_name,\n played_pretty_str)\n else:\n return 'PlayerHand(%r, unplayed=%s)' % (self.hand_name,\n unplayed_pretty_str)\n\n\n def take_from_dealer(self):\n card = self.deck.draw_card()\n self.unplayed_cards.append(card)\n if len(self.unplayed_cards) > CARDS_PER_HAND:\n raise ValueError('Too many cards')\n\n def bid(self, max_bid):\n return self.player.make_bid(self, max_bid)\n\n def draw(self, winning_bid):\n \"\"\"Determines if hand will play and draws if in.\n\n - Returns None if the player is folding.\n - Returns the number of cards played if playing.\n - If playing, updates \"unplayed_cards\".\n \"\"\"\n return self.player.draw_cards(self, winning_bid)\n\n def play(self, trump, cards_out):\n card_played = self.player.play_card(self, trump, cards_out)\n cards_out.append(card_played)\n return card_played\n", "id": "9008694", "language": "Python", "matching_score": 1.4109482765197754, "max_stars_count": 0, "path": "game_play.py" }, { "content": "import json\nimport os\nimport time\n\n\nDATA = {'turns': {}}\n\n\nclass Settings(object):\n FILENAME = None\n CURRENT_TURN = 0\n NAME_CHOICES = None\n\n\ndef set_filename():\n filename = raw_input('Set the filename? ').strip()\n if not filename:\n filename = str(int(time.time()))\n Settings.FILENAME = filename + '.json'\n\n\ndef save_game():\n with open(Settings.FILENAME, 'w') as fh:\n json.dump(DATA, fh)\n\n\ndef enter_names():\n names = {}\n while True:\n name = raw_input('Enter name: ')\n if name.strip() == '':\n break\n names[name] = -5\n DATA['names'] = names\n Settings.NAME_CHOICES = '\\n'.join([\n '%d: %s' % (i, name)\n for i, name in enumerate(names.keys())\n ])\n save_game()\n\n\ndef game_over():\n game_over = raw_input('Is the game over? [y/n] ')\n return game_over.lower().strip() == 'y'\n\n\ndef get_bidder():\n actual_bidder = None\n while actual_bidder is None:\n print(Settings.NAME_CHOICES)\n bidder = raw_input('Who won the bid? ')\n try:\n bidder = int(bidder)\n actual_bidder = Settings.NAME_CHOICES[bidder]\n except:\n if bidder in Settings.NAME_CHOICES:\n actual_bidder = bidder\n\n return actual_bidder\n\n\ndef get_bid():\n actual_bid = None\n while actual_bid is None:\n bid = raw_input('Bid amount? ')\n try:\n bid = int(bid)\n if bid in (2, 3, 4, 5):\n actual_bid = bid\n except:\n pass\n\n return actual_bid\n\n\ndef get_points():\n result = {}\n print '=' * 60\n print 'Scores for turn %d:' % (Settings.CURRENT_TURN,)\n for name in DATA['names'].keys():\n msg = 'Score for %r: ' % (name,)\n actual_score = None\n while actual_score is None:\n score = raw_input(msg)\n try:\n score = int(score)\n if score in (-5, 0, 1, 2, 3, 4, 5):\n actual_score = score\n except:\n pass\n\n result[name] = actual_score\n DATA['names'][name] += actual_score\n\n return result\n\n\n\ndef play_turn():\n turn = DATA['turns'].setdefault(Settings.CURRENT_TURN, {})\n turn['bidder'] = get_bidder()\n turn['bid'] = get_bid()\n turn['points'] = get_points()\n\n Settings.CURRENT_TURN += 1\n save_game()\n\n\ndef print_scores():\n print '=' * 60\n print 'Current scores:'\n print '-' * 60\n for name, score in DATA['names'].items():\n print '%r -> %d' % (name, score)\n print '=' * 60\n\n\ndef play_game():\n while not game_over():\n print_scores()\n play_turn()\n\n\ndef main():\n set_filename()\n enter_names()\n play_game()\n\n\nif __name__ == '__main__':\n main()\n", "id": "4820870", "language": "Python", "matching_score": 1.5194828510284424, "max_stars_count": 0, "path": "score_hukabuk.py" }, { "content": "#!/usr/bin/env python\n\n# Using no022.txt, a text file containing over five-thousand first names,\n# begin by sorting it into alphabetical order. Then working out the\n# alphabetical value for each name, multiply this value by its alphabetical\n# position in the list to obtain a name score.\n\n# For example, when the list is sorted into alphabetical order, COLIN,\n# which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list.\n# So, COLIN would obtain a score of 938 X 53 = 49714.\n\n# What is the total of all the name scores in the file?\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\n\n\ndef name_score(name):\n return sum((ord(letter.upper()) - ord('A') + 1) for letter in name)\n\n\ndef main(verbose=False):\n # The name file is a comma separated file with quotes\n names = sorted(get_data(22).strip('\"').split('\",\"'))\n return sum((i + 1) * name_score(name) for i, name in enumerate(names))\n\nif __name__ == '__main__':\n print euler_timer(22)(main)(verbose=True)\n", "id": "2730586", "language": "Python", "matching_score": 0.9103114604949951, "max_stars_count": 7, "path": "python/complete/no022.py" }, { "content": "#!/usr/bin/env python\n\n# By starting at the top of the triangle below and moving to adjacent\n# numbers on the row below, the maximum total from top to bottom is\n# 23. (3 -> 7 -> 4 -> 9)\n# 3\n# 7 4\n# 2 4 6\n# 8 5 9 3\n\n# That is, 3 + 7 + 4 + 9 = 23.\n\n# Find the maximum total from top to bottom in no067.txt, a 15K text file\n# containing a triangle with one-hundred rows.\n\n# NOTE: This is a much more difficult version of Problem 18. It is not possible\n# to try every route to solve this problem, as there are 2**99 altogether!\n# If you could check one trillion (10**12) routes every second it would take\n# over twenty billion years to check them all. There is an efficient\n# algorithm to solve it. ;)\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\nfrom python.functions import max_sum\n\n\ndef main(verbose=False):\n triangle = get_data(67).strip()\n TRIANGLE_MAT = [[int(elt) for elt in line.split()]\n for line in triangle.split(\"\\n\") if line]\n return max_sum(TRIANGLE_MAT)\n\nif __name__ == '__main__':\n print euler_timer(67)(main)(verbose=True)\n", "id": "7993683", "language": "Python", "matching_score": 4.295082092285156, "max_stars_count": 7, "path": "python/complete/no067.py" }, { "content": "#!/usr/bin/env python\n\n# By starting at the top of the triangle below and moving to adjacent\n# numbers on the row below, the maximum total from top to bottom is\n# 23. (3 -> 7 -> 4 -> 9)\n# 3\n# 7 4\n# 2 4 6\n# 8 5 9 3\n\n# Find the maximum total from top to bottom of the triangle below:\n# 75\n# 95 64\n# 17 47 82\n# 18 35 87 10\n# 20 04 82 47 65\n# 19 01 23 75 03 34\n# 88 02 77 73 07 63 67\n# 99 65 04 28 06 16 70 92\n# 41 41 26 56 83 40 80 70 33\n# 41 48 72 33 47 32 37 16 94 29\n# 53 71 44 65 25 43 91 52 97 51 14\n# 70 11 33 28 77 73 17 78 39 68 17 57\n# 91 71 52 38 17 14 91 43 58 50 27 29 48\n# 63 66 04 68 89 53 67 30 73 16 69 87 40 31\n# 04 62 98 27 23 09 70 98 73 93 38 53 60 04 23\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\nfrom python.functions import max_sum\n\n\ndef main(verbose=False):\n triangle = get_data(18).strip()\n TRIANGLE_MAT = [[int(elt) for elt in line.split()]\n for line in triangle.split(\"\\n\") if line]\n\n return max_sum(TRIANGLE_MAT)\n\nif __name__ == '__main__':\n print euler_timer(18)(main)(verbose=True)\n", "id": "3914327", "language": "Python", "matching_score": 1.2133023738861084, "max_stars_count": 7, "path": "python/complete/no018.py" }, { "content": "#!/usr/bin/env python\n\n# Find the sum of all the multiples of 3 or 5 below 1000.\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n return sum(i for i in range(1, 1000) if i % 3 == 0 or i % 5 == 0)\n\nif __name__ == '__main__':\n print euler_timer(1)(main)(verbose=True)\n", "id": "9373724", "language": "Python", "matching_score": 1.7444669008255005, "max_stars_count": 7, "path": "python/complete/no001.py" }, { "content": "#!/usr/bin/env python\n\n# Find the sum of all the primes below two million.\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n return sum(sieve(2000000 - 1))\n\nif __name__ == '__main__':\n print euler_timer(10)(main)(verbose=True)\n", "id": "826266", "language": "Python", "matching_score": 0.6715093851089478, "max_stars_count": 7, "path": "python/complete/no010.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import fill_count\n\n\ndef main(verbose=False):\n return fill_count(3, 50)\n\nif __name__ == '__main__':\n print euler_timer(114)(main)(verbose=True)\n", "id": "1643275", "language": "Python", "matching_score": 2.2831993103027344, "max_stars_count": 7, "path": "python/complete/no114.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import fill_count\n\n\ndef main(verbose=False):\n count = 2\n n = 50\n while count <= 10 ** 6:\n n += 1\n count = fill_count(50, n)\n return n\n\nif __name__ == '__main__':\n print euler_timer(115)(main)(verbose=True)\n", "id": "10586799", "language": "Python", "matching_score": 0.16223745048046112, "max_stars_count": 7, "path": "python/complete/no115.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport doctest\n\n\nINPUT = \"245318-765747\"\n\n\ndef meets_criteria(value):\n \"\"\"Determine if a number meets the criteria\n\n >>> meets_criteria(111111)\n True\n >>> meets_criteria(223450)\n False\n >>> meets_criteria(123789)\n False\n \"\"\"\n digits = [int(d) for d in str(value)]\n if len(digits) != 6:\n return False\n\n # No repeats.\n if len(set(digits)) == len(digits):\n return False\n\n adjacent_same = False\n prev = -1\n for d in digits:\n if d == prev:\n adjacent_same = True\n if d < prev:\n return False\n # On to the next iteration.\n prev = d\n\n return adjacent_same\n\n\ndef meets_criteria_strict(value):\n \"\"\"Determine if a number meets the criteria\n\n >>> meets_criteria_strict(112233)\n True\n >>> meets_criteria_strict(123444)\n False\n >>> meets_criteria_strict(111111)\n False\n >>> meets_criteria_strict(111122)\n True\n >>> meets_criteria_strict(223450)\n False\n >>> meets_criteria_strict(123789)\n False\n \"\"\"\n digits = [int(d) for d in str(value)]\n if len(digits) != 6:\n return False\n\n # No repeats.\n if len(set(digits)) == len(digits):\n return False\n\n streak_counts = []\n prev = -1\n current_streak = 0\n for i, d in enumerate(digits):\n if d < prev:\n return False\n\n if d == prev:\n current_streak += 1\n # Account for final iteration.\n if i == len(digits) - 1:\n streak_counts.append(current_streak)\n else:\n if current_streak != 0:\n streak_counts.append(current_streak)\n current_streak = 1\n\n # On to the next iteration.\n prev = d\n\n return 2 in streak_counts\n\n\ndef main():\n start, end = map(int, INPUT.split(\"-\"))\n count = sum(1 for value in range(start, end + 1) if meets_criteria(value))\n print(f\"Meets criteria: {count}\")\n count = sum(\n 1 for value in range(start, end + 1) if meets_criteria_strict(value)\n )\n print(f\"Meets criteria strict: {count}\")\n\n\nif __name__ == \"__main__\":\n doctest.testmod()\n main()\n", "id": "11386532", "language": "Python", "matching_score": 0.6192572116851807, "max_stars_count": 0, "path": "day04/main.py" }, { "content": "import threading\n\n\nclass Counter:\n def __init__(self):\n self._count = 0\n self._lock = threading.Lock()\n\n def increment(self):\n with self._lock:\n self._count += 1\n return self._count\n", "id": "268129", "language": "Python", "matching_score": 0.7335101366043091, "max_stars_count": 1, "path": "src/python/counter.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helpers for counting flops.\"\"\"\n\n\nimport fractions\n\n\n_DISPLAY_TEMPLATE = (\n \"{:4d} flops ({:4d} add, {:4d} sub, {:3d} multiply, {:3d} FMA)\"\n)\n\n\nclass Computation(object):\n \"\"\"Stateful manager of the number of flops.\"\"\"\n\n def __init__(self):\n self.add_count = 0\n self.sub_count = 0\n self.mul_count = 0\n self.fma_count = 0\n\n @property\n def count(self):\n return (\n self.add_count + self.sub_count + self.mul_count + self.fma_count\n )\n\n @property\n def display(self):\n return _DISPLAY_TEMPLATE.format(\n self.count,\n self.add_count,\n self.sub_count,\n self.mul_count,\n self.fma_count,\n )\n\n\nclass Float(object):\n \"\"\"A ``float``-like type that will increment a flop count.\n\n Args:\n value (float): The current value.\n computation (.Computation): The current computation\n in progress.\n \"\"\"\n\n def __init__(self, value, computation):\n self.value = value\n self.computation = computation\n\n def _get_value(self, other):\n if isinstance(other, Float):\n if other.computation is not self.computation:\n raise ValueError(\n \"Two `Float`s being combined should have the \"\n \"same parent computation.\"\n )\n return other.value\n elif isinstance(other, float):\n return other\n elif isinstance(other, int):\n other_float = float(other)\n if other == other_float:\n return other_float\n else:\n return None\n else:\n return None\n\n def __add__(self, other):\n value = self._get_value(other)\n if value is None:\n return NotImplemented\n\n self.computation.add_count += 1\n return Float(self.value + value, self.computation)\n\n def __radd__(self, other):\n value = self._get_value(other)\n if value is None:\n return NotImplemented\n\n self.computation.add_count += 1\n return Float(value + self.value, self.computation)\n\n def __sub__(self, other):\n value = self._get_value(other)\n if value is None:\n return NotImplemented\n\n self.computation.sub_count += 1\n return Float(self.value - value, self.computation)\n\n def __rsub__(self, other):\n value = self._get_value(other)\n if value is None:\n return NotImplemented\n\n self.computation.sub_count += 1\n return Float(value - self.value, self.computation)\n\n def __neg__(self):\n return Float(-self.value, self.computation)\n\n def __mul__(self, other):\n value = self._get_value(other)\n if value is None:\n return NotImplemented\n\n self.computation.mul_count += 1\n return Float(self.value * value, self.computation)\n\n def __rmul__(self, other):\n value = self._get_value(other)\n if value is None:\n return NotImplemented\n\n self.computation.mul_count += 1\n return Float(value * self.value, self.computation)\n\n def __truediv__(self, other):\n value = self._get_value(other)\n if value is None:\n return NotImplemented\n\n self.computation.mul_count += 1\n return Float(self.value / value, self.computation)\n\n def fma(self, val1, val2, val3):\n float1 = self._get_value(val1)\n float2 = self._get_value(val2)\n float3 = self._get_value(val3)\n if None in (float1, float2, float3):\n raise TypeError(\"Only `Float` or `float` allowed in fma\")\n\n self.computation.fma_count += 1\n frac1 = fractions.Fraction(float1)\n frac2 = fractions.Fraction(float2)\n frac3 = fractions.Fraction(float3)\n result = float(frac1 * frac2 + frac3)\n return Float(result, self.computation)\n", "id": "2833818", "language": "Python", "matching_score": 1.4583624601364136, "max_stars_count": 2, "path": "src/operation_count.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Run many experiments to confirm the flop counts for algorithms.\"\"\"\n\nfrom __future__ import print_function\n\nimport de_casteljau\nimport eft\nimport horner\nimport operation_count\nimport vs_method\n\n\nSEPARATOR = \"-\" * 80\n\n\ndef count_add_eft():\n parent = operation_count.Computation()\n val1 = operation_count.Float(1.5, parent)\n val2 = operation_count.Float(0.5 + 0.5 ** 52, parent)\n sum_, error = eft.add_eft(val1, val2)\n assert sum_.value == 2.0\n assert error.value == 0.5 ** 52\n assert parent.count == 6\n print(\" add_eft(): {}\".format(parent.display))\n\n\ndef count__split():\n parent = operation_count.Computation()\n val = operation_count.Float(1.0 + 0.5 ** 27, parent)\n high, low = eft._split(val)\n assert high.value == 1.0\n assert low.value == 0.5 ** 27\n assert parent.count == 4\n print(\" _split(): {}\".format(parent.display))\n\n\ndef count_multiply_eft():\n print(\"multiply_eft():\")\n for use_fma in (True, False):\n parent = operation_count.Computation()\n val1 = operation_count.Float(1.0 + 0.5 ** 40, parent)\n val2 = operation_count.Float(1.0 - 0.5 ** 40, parent)\n product, error = eft.multiply_eft(val1, val2, use_fma=use_fma)\n assert product.value == 1.0\n assert error.value == -0.5 ** 80\n if use_fma:\n description = \"with FMA: \"\n assert parent.count == 2\n else:\n description = \"w / out FMA: \"\n assert parent.count == 17\n print(\" {} {}\".format(description, parent.display))\n\n\ndef count__vec_sum():\n print(\"_vec_sum() (6(|p| - 1)):\")\n for size_p in range(1, 5 + 1):\n parent = operation_count.Computation()\n p = [operation_count.Float(1.0, parent)] * size_p\n eft._vec_sum(p)\n\n assert p[size_p - 1].value == float(size_p)\n assert parent.count == 6 * (size_p - 1)\n print(\" |p| = {}: {}\".format(size_p, parent.display))\n\n\ndef count_sum_k():\n print(\"sum_k() ((6K - 5)(|p| - 1)):\")\n for k in (2, 3, 4, 5):\n print(\" K = {}\".format(k))\n for size_p in range(1, 5 + 1):\n parent = operation_count.Computation()\n p = [operation_count.Float(1.0, parent)] * size_p\n total = eft.sum_k(p, k)\n\n assert total.value == float(size_p)\n assert parent.count == (6 * k - 5) * (size_p - 1)\n print(\" |p| = {}: {}\".format(size_p, parent.display))\n\n\ndef count_vs_method_basic():\n print(\"vs_method.basic() (5n + 1, w/o binomial):\")\n for degree in range(1, 5 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(0.25, parent)\n coeffs = tuple(\n operation_count.Float((-1.0) ** k, parent)\n for k in range(degree + 1)\n )\n p = vs_method.basic(x, coeffs)\n assert p.value == 0.5 ** degree\n assert parent.count == 5 * degree + 1\n print(\" degree {}: {}\".format(degree, parent.display))\n\n\ndef count_vs_method_compensated():\n print(\"vs_method.compensated() (26n + 7, w/o binomial):\")\n for degree in range(1, 5 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(0.25, parent)\n coeffs = tuple(\n operation_count.Float((-1.0) ** k, parent)\n for k in range(degree + 1)\n )\n p = vs_method.compensated(x, coeffs)\n assert p.value == 0.5 ** degree\n assert parent.count == 26 * degree + 7\n print(\" degree {}: {}\".format(degree, parent.display))\n\n\ndef count_horner_basic():\n print(\"horner.basic() (2n):\")\n for degree in range(1, 5 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(2.0, parent)\n coeffs = (operation_count.Float(1.0, parent),) * (degree + 1)\n p = horner.basic(x, coeffs)\n assert p.value == 2.0 ** (degree + 1) - 1\n assert parent.count == 2 * degree\n print(\" degree {}: {}\".format(degree, parent.display))\n\n\ndef horner_expected_total(K, n):\n r\"\"\"Get the expected flop count for compensated Horner's method.\n\n When using FMA, the count is\n\n .. math::\n\n (5 \\cdot 2^K - 8)n + \\left((K + 8) 2^K - 12K - 6\\right).\n \"\"\"\n return (5 * 2 ** K - 8) * n + ((K + 8) * 2 ** K - 12 * K - 6)\n\n\ndef horner_expected_fma(K, n):\n r\"\"\"Get the FMA count for compensated Horner's method.\n\n When using FMA, the count is\n\n .. math::\n\n \\left(2^{K - 1} - 1\\right)n - 2^{K - 1}(K - 3) - 2\n\n FMA (fused-multiply-add) instructions.\n \"\"\"\n return (2 ** (K - 1) - 1) * n - 2 ** (K - 1) * (K - 3) - 2\n\n\ndef count_horner_compensated():\n print(\"horner.compensated() (11n + 1):\")\n for degree in range(1, 5 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(2.0, parent)\n coeffs = (operation_count.Float(1.0, parent),) * (degree + 1)\n p = horner.compensated(x, coeffs)\n assert p.value == 2.0 ** (degree + 1) - 1\n assert parent.count == 11 * degree + 1\n print(\" degree {}: {}\".format(degree, parent.display))\n\n # NOTE: This is **the same** as ``horner.compensated()`` but uses\n # a different algorithm.\n print(\"horner.compensated_k(..., 2) (12n + 10):\")\n for degree in range(1, 5 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(2.0, parent)\n coeffs = (operation_count.Float(1.0, parent),) * (degree + 1)\n p = horner.compensated_k(x, coeffs, 2)\n assert p.value == 2.0 ** (degree + 1) - 1\n assert parent.count == horner_expected_total(2, degree)\n assert parent.fma_count == horner_expected_fma(2, degree)\n print(\" degree {}: {}\".format(degree, parent.display))\n\n\ndef count_horner_compensated3():\n print(\"horner.compensated3() (32n + 46, n >= 2):\")\n for degree in range(2, 6 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(2.0, parent)\n coeffs = (operation_count.Float(1.0, parent),) * (degree + 1)\n p = horner.compensated3(x, coeffs)\n assert p.value == 2.0 ** (degree + 1) - 1\n assert parent.count == horner_expected_total(3, degree)\n assert parent.fma_count == horner_expected_fma(3, degree)\n print(\" degree {}: {}\".format(degree, parent.display))\n\n print(\"horner.compensated_k(..., 3) (32n + 46, n >= 2):\")\n for degree in range(2, 6 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(2.0, parent)\n coeffs = (operation_count.Float(1.0, parent),) * (degree + 1)\n p = horner.compensated_k(x, coeffs, 3)\n assert p.value == 2.0 ** (degree + 1) - 1\n assert parent.count == horner_expected_total(3, degree)\n assert parent.fma_count == horner_expected_fma(3, degree)\n print(\" degree {}: {}\".format(degree, parent.display))\n\n\ndef count_horner_compensated4():\n print(\"horner.compensated_k(..., 4) (72n + 138, n >= 3):\")\n for degree in range(3, 7 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(2.0, parent)\n coeffs = (operation_count.Float(1.0, parent),) * (degree + 1)\n p = horner.compensated_k(x, coeffs, 4)\n assert p.value == 2.0 ** (degree + 1) - 1\n assert parent.count == horner_expected_total(4, degree)\n assert parent.fma_count == horner_expected_fma(4, degree)\n print(\" degree {}: {}\".format(degree, parent.display))\n\n\ndef count_horner_compensated5():\n print(\"horner.compensated_k(..., 5) (152n + 350, n >= 4):\")\n for degree in range(4, 8 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(2.0, parent)\n coeffs = (operation_count.Float(1.0, parent),) * (degree + 1)\n p = horner.compensated_k(x, coeffs, 5)\n assert p.value == 2.0 ** (degree + 1) - 1\n assert parent.count == horner_expected_total(5, degree)\n assert parent.fma_count == horner_expected_fma(5, degree)\n print(\" degree {}: {}\".format(degree, parent.display))\n\n\ndef count_horner_compensated6():\n print(\"horner.compensated_k(..., 6) (312n + 818, n >= 5):\")\n for degree in range(5, 9 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(2.0, parent)\n coeffs = (operation_count.Float(1.0, parent),) * (degree + 1)\n p = horner.compensated_k(x, coeffs, 6)\n assert p.value == 2.0 ** (degree + 1) - 1\n assert parent.count == horner_expected_total(6, degree)\n assert parent.fma_count == horner_expected_fma(6, degree)\n print(\" degree {}: {}\".format(degree, parent.display))\n\n\ndef count_de_casteljau_basic():\n print(\"de_casteljau.basic() ((3n^2 + 3n + 2) / 2 = 3 T_n + 1):\")\n for degree in range(1, 5 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(0.25, parent)\n coeffs = tuple(\n operation_count.Float((-1.0) ** k, parent)\n for k in range(degree + 1)\n )\n p = de_casteljau.basic(x, coeffs)\n assert p.value == 0.5 ** degree\n assert parent.count == 3 * (degree * (degree + 1) // 2) + 1\n print(\" degree {}: {}\".format(degree, parent.display))\n\n\ndef de_casteljau_expected_total(K, n):\n \"\"\"Get the expected flop count for the compensated de Casteljau method.\n\n When using FMA, the count is\n\n .. math::\n\n (15K^2 - 34K + 26)T_n + 6K^2 - 11K + 11.\n \"\"\"\n Tn = (n * (n + 1)) // 2\n return (15 * K ** 2 - 34 * K + 26) * Tn + 6 * K ** 2 - 11 * K + 11\n\n\ndef de_casteljau_expected_fma(K, n):\n \"\"\"Get the FMA count for the compensated de Casteljau method.\n\n When using FMA, the count is\n\n .. math::\n\n (3K - 4)T_n\n\n FMA (fused-multiply-add) instructions.\n \"\"\"\n Tn = (n * (n + 1)) // 2\n return (3 * K - 4) * Tn\n\n\ndef count_de_casteljau_compensated():\n print(\"de_casteljau.compensated() (9n^2 + 9n + 7 = 18 T_n + 13):\")\n for degree in range(1, 5 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(0.25, parent)\n coeffs = tuple(\n operation_count.Float((-1.0) ** k, parent)\n for k in range(degree + 1)\n )\n p = de_casteljau.compensated(x, coeffs)\n assert p.value == 0.5 ** degree\n assert parent.count == de_casteljau_expected_total(2, degree)\n assert parent.fma_count == de_casteljau_expected_fma(2, degree)\n print(\" degree {}: {}\".format(degree, parent.display))\n\n\ndef count_de_casteljau_compensated3():\n print(\n \"de_casteljau.compensated3() ((59n^2 + 59n + 16) / 2 = 59 T_n + 32):\"\n )\n for degree in range(1, 5 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(0.25, parent)\n coeffs = tuple(\n operation_count.Float((-1.0) ** k, parent)\n for k in range(degree + 1)\n )\n p = de_casteljau.compensated3(x, coeffs)\n assert p.value == 0.5 ** degree\n assert parent.count == de_casteljau_expected_total(3, degree)\n assert parent.fma_count == de_casteljau_expected_fma(3, degree)\n print(\" degree {}: {}\".format(degree, parent.display))\n\n\ndef count_de_casteljau_compensated4():\n print(\"de_casteljau.compensated4() (65n^2 + 65n + 9 = 130 T_n + 63):\")\n for degree in range(1, 5 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(0.25, parent)\n coeffs = tuple(\n operation_count.Float((-1.0) ** k, parent)\n for k in range(degree + 1)\n )\n p = de_casteljau.compensated4(x, coeffs)\n assert p.value == 0.5 ** degree\n assert parent.count == de_casteljau_expected_total(4, degree)\n assert parent.fma_count == de_casteljau_expected_fma(4, degree)\n print(\" degree {}: {}\".format(degree, parent.display))\n\n\ndef count_de_casteljau_compensated5():\n msg = (\n \"de_casteljau.compensated5() ((231n^2 + 231n + 20) / 2 \"\n \"= 231 T_n + 106):\"\n )\n print(msg)\n for degree in range(1, 5 + 1):\n parent = operation_count.Computation()\n x = operation_count.Float(0.25, parent)\n coeffs = tuple(\n operation_count.Float((-1.0) ** k, parent)\n for k in range(degree + 1)\n )\n p = de_casteljau.compensated5(x, coeffs)\n assert p.value == 0.5 ** degree\n assert parent.count == de_casteljau_expected_total(5, degree)\n assert parent.fma_count == de_casteljau_expected_fma(5, degree)\n print(\" degree {}: {}\".format(degree, parent.display))\n\n\ndef main():\n count_add_eft()\n print(SEPARATOR)\n count__split()\n print(SEPARATOR)\n count_multiply_eft()\n print(SEPARATOR)\n count__vec_sum()\n print(SEPARATOR)\n count_sum_k()\n print(SEPARATOR)\n count_vs_method_basic()\n print(SEPARATOR)\n count_vs_method_compensated()\n print(SEPARATOR)\n count_horner_basic()\n print(SEPARATOR)\n count_horner_compensated()\n print(SEPARATOR)\n count_horner_compensated3()\n print(SEPARATOR)\n count_horner_compensated4()\n print(SEPARATOR)\n count_horner_compensated5()\n print(SEPARATOR)\n count_horner_compensated6()\n print(SEPARATOR)\n count_de_casteljau_basic()\n print(SEPARATOR)\n count_de_casteljau_compensated()\n print(SEPARATOR)\n count_de_casteljau_compensated3()\n print(SEPARATOR)\n count_de_casteljau_compensated4()\n print(SEPARATOR)\n count_de_casteljau_compensated5()\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "11947325", "language": "Python", "matching_score": 2.9442591667175293, "max_stars_count": 2, "path": "scripts/compute_counts.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Collection of error-free transforms.\"\"\"\n\n\nimport fractions\n\n\ndef add_eft(val1, val2):\n # See: https://doi.org/10.1137/030601818\n sum_ = val1 + val2\n delta1 = sum_ - val1\n error = (val1 - (sum_ - delta1)) + (val2 - delta1)\n return sum_, error\n\n\ndef _split(val):\n # Helper for ``multiply_eft``.\n scaled = val * 134217729.0 # 134217729 == 2^{27} + 1.\n high_bits = scaled - (scaled - val)\n low_bits = val - high_bits\n return high_bits, low_bits\n\n\ndef _fma(val1, val2, val3):\n if (\n isinstance(val1, float)\n and isinstance(val2, float)\n and isinstance(val3, float)\n ):\n frac1 = fractions.Fraction(val1)\n frac2 = fractions.Fraction(val2)\n frac3 = fractions.Fraction(val3)\n return float(frac1 * frac2 + frac3)\n else:\n return val1.fma(val1, val2, val3)\n\n\ndef multiply_eft(val1, val2, use_fma=True):\n # See: https://doi.org/10.1109/TC.2008.215\n product = val1 * val2\n if use_fma:\n error = _fma(val1, val2, -product)\n else:\n high1, low1 = _split(val1)\n high2, low2 = _split(val2)\n error = low1 * low2 - (\n ((product - high1 * high2) - low1 * high2) - high1 * low2\n )\n\n return product, error\n\n\ndef _vec_sum(p):\n # See: https://doi.org/10.1137/030601818\n # Helper for ``sum_k``.\n # NOTE: This modifies ``p`` in place.\n n = len(p)\n for i in range(1, n):\n p[i], p[i - 1] = add_eft(p[i], p[i - 1])\n\n\ndef sum_k(p, k):\n # See: https://doi.org/10.1137/030601818\n p = list(p) # Make a copy to be modified.\n\n for _ in range(k - 1):\n _vec_sum(p)\n\n result = p[0]\n for p_val in p[1:]:\n result += p_val\n\n return result\n", "id": "8031556", "language": "Python", "matching_score": 1.1912461519241333, "max_stars_count": 2, "path": "src/eft.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Verify table in manuscript.\n\nThis performs compensated de Casteljau (``K = 2``) in both IEEE-754\ndouble precision and with exact arithmetic and verifies that the terms\nin the table are correct.\n\"\"\"\n\nimport fractions\n\nimport eft\n\n\nF = fractions.Fraction\nK = 1001\nROOT = 0.5\nU = F(1, 2 ** 53)\nMACH = 0.5 ** 53\nS = ROOT + K * MACH\nR_HAT, RHO = eft.add_eft(1.0, -S)\n\n\ndef exact_de_castlejau(b4_vals):\n b = {}\n b[4] = tuple(map(F, b4_vals))\n s = F(S)\n\n for k in (3, 2, 1, 0):\n prev_b = b[k + 1]\n b[k] = tuple(\n (1 - s) * b1 + s * b2 for b1, b2 in zip(prev_b, prev_b[1:])\n )\n\n return b\n\n\ndef stage1(b4_vals, exact_b):\n b04, b14, b24, b34, b44 = b4_vals\n\n P1, pi1 = eft.multiply_eft(R_HAT, b04)\n P2, pi2 = eft.multiply_eft(S, b14)\n b03, sigma3 = eft.add_eft(P1, P2)\n e03 = pi1 + pi2 + sigma3 + RHO * b04\n\n P1, pi1 = eft.multiply_eft(R_HAT, b14)\n P2, pi2 = eft.multiply_eft(S, b24)\n b13, sigma3 = eft.add_eft(P1, P2)\n e13 = pi1 + pi2 + sigma3 + RHO * b14\n\n P1, pi1 = eft.multiply_eft(R_HAT, b24)\n P2, pi2 = eft.multiply_eft(S, b34)\n b23, sigma3 = eft.add_eft(P1, P2)\n e23 = pi1 + pi2 + sigma3 + RHO * b24\n\n P1, pi1 = eft.multiply_eft(R_HAT, b34)\n P2, pi2 = eft.multiply_eft(S, b44)\n b33, sigma3 = eft.add_eft(P1, P2)\n e33 = pi1 + pi2 + sigma3 + RHO * b34\n\n # Verify columns 1 (bhat) and 2 (dbhat).\n assert F(b03) == F(1, 8) - 7 * (K * U) / 4 - U / 4\n assert F(b13) == -F(1, 8) + 5 * (K * U) / 4 + U / 4\n assert F(b23) == F(1, 8) - 3 * (K * U) / 4\n assert F(b33) == -F(1, 8) + (K * U) / 4\n assert F(e03) == U / 4\n assert F(e13) == -U / 4\n assert F(e23) == 0\n assert F(e33) == 0\n # Verify column 2 (d2b).\n exact_b03, exact_b13, exact_b23, exact_b33 = exact_b[3]\n assert exact_b03 - F(b03) - F(e03) == 0\n assert exact_b13 - F(b13) - F(e13) == 0\n assert exact_b23 - F(b23) - F(e23) == 0\n assert exact_b33 - F(b33) - F(e33) == 0\n\n b_vals = (b03, b13, b23, b33)\n e_vals = (e03, e13, e23, e33)\n return b_vals, e_vals\n\n\ndef stage2(b3_vals, e3_vals, exact_b):\n b03, b13, b23, b33 = b3_vals\n e03, e13, e23, e33 = e3_vals\n\n P1, pi1 = eft.multiply_eft(R_HAT, b03)\n P2, pi2 = eft.multiply_eft(S, b13)\n b02, sigma3 = eft.add_eft(P1, P2)\n ell02 = pi1 + pi2 + sigma3 + RHO * b03\n e02 = ell02 + S * e13 + R_HAT * e03\n\n P1, pi1 = eft.multiply_eft(R_HAT, b13)\n P2, pi2 = eft.multiply_eft(S, b23)\n b12, sigma3 = eft.add_eft(P1, P2)\n ell12 = pi1 + pi2 + sigma3 + RHO * b13\n e12 = ell12 + S * e23 + R_HAT * e13\n\n P1, pi1 = eft.multiply_eft(R_HAT, b23)\n P2, pi2 = eft.multiply_eft(S, b33)\n b22, sigma3 = eft.add_eft(P1, P2)\n ell22 = pi1 + pi2 + sigma3 + RHO * b23\n e22 = ell22 + S * e33 + R_HAT * e23\n\n # Verify columns 1 (bhat) and 2 (dbhat).\n assert F(b02) == -(K * U) / 2\n assert F(b12) == (K * U) / 2 + U / 8\n assert F(b22) == -(K * U) / 2\n assert F(e02) == 3 * (K * U) ** 2\n assert F(e12) == -U / 8 - 2 * (K * U) ** 2\n assert F(e22) == (K * U) ** 2\n # Verify column 2 (d2b).\n exact_b02, exact_b12, exact_b22 = exact_b[2]\n assert exact_b02 - F(b02) - F(e02) == 0\n assert exact_b12 - F(b12) - F(e12) == 0\n assert exact_b22 - F(b22) - F(e22) == 0\n\n b_vals = (b02, b12, b22)\n e_vals = (e02, e12, e22)\n return b_vals, e_vals\n\n\ndef stage3(b2_vals, e2_vals, exact_b):\n b02, b12, b22 = b2_vals\n e02, e12, e22 = e2_vals\n\n P1, pi1 = eft.multiply_eft(R_HAT, b02)\n P2, pi2 = eft.multiply_eft(S, b12)\n b01, sigma3 = eft.add_eft(P1, P2)\n ell01 = pi1 + pi2 + sigma3 + RHO * b02\n e01 = ell01 + S * e12 + R_HAT * e02\n\n P1, pi1 = eft.multiply_eft(R_HAT, b12)\n P2, pi2 = eft.multiply_eft(S, b22)\n b11, sigma3 = eft.add_eft(P1, P2)\n ell11 = pi1 + pi2 + sigma3 + RHO * b12\n e11 = ell11 + S * e22 + R_HAT * e12\n\n # Verify columns 1 (bhat) and 2 (dbhat).\n assert F(b01) == U / 16 + (K * U) ** 2 + 239 * U ** 2\n assert F(b11) == U / 16 - (K * U) ** 2 - 239 * U ** 2\n assert F(e01) == -U / 16 + (K * U) ** 2 / 2 - 239 * U ** 2\n assert F(e11) == -U / 16 - (K * U) ** 2 / 2 + 239 * U ** 2\n # Verify column 2 (d2b).\n exact_b01, exact_b11 = exact_b[1]\n assert exact_b01 - F(b01) - F(e01) == -5 * (K * U) ** 3\n assert exact_b11 - F(b11) - F(e11) == 3 * (K * U) ** 3\n\n b_vals = (b01, b11)\n e_vals = (e01, e11)\n return b_vals, e_vals\n\n\ndef stage4(b1_vals, e1_vals, exact_b):\n b01, b11 = b1_vals\n e01, e11 = e1_vals\n\n P1, pi1 = eft.multiply_eft(R_HAT, b01)\n P2, pi2 = eft.multiply_eft(S, b11)\n b00, sigma3 = eft.add_eft(P1, P2)\n ell00 = pi1 + pi2 + sigma3 + RHO * b01\n e00 = ell00 + S * e11 + R_HAT * e01\n\n # Verify columns 1 (bhat) and 2 (dbhat).\n assert F(b00) == U / 16\n assert F(e00) == -U / 16\n # Verify column 2 (d2b).\n exact_b00, = exact_b[0]\n assert exact_b00 - F(b00) - F(e00) == -4 * (K * U) ** 3 + 8 * (K * U) ** 4\n\n\ndef main():\n # First, verify S, R_HAT and RHO.\n assert F(S) == F(1, 2) + K * U\n assert F(RHO) == 0\n assert F(R_HAT) == F(1, 2) - K * U\n\n # p(s) = (2s - 1)^3 (s - 1)\n b4_vals = (1.0, -0.75, 0.5, -0.25, 0.0)\n exact_b = exact_de_castlejau(b4_vals)\n\n b3_vals, e3_vals = stage1(b4_vals, exact_b)\n b2_vals, e2_vals = stage2(b3_vals, e3_vals, exact_b)\n b1_vals, e1_vals = stage3(b2_vals, e2_vals, exact_b)\n stage4(b1_vals, e1_vals, exact_b)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "11075560", "language": "Python", "matching_score": 1.172666072845459, "max_stars_count": 2, "path": "scripts/verify_table.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nimport de_casteljau\nimport eft\n\n\ndef standard_residual(s, coeffs1, t, coeffs2):\n x1 = de_casteljau.basic(s, coeffs1[0, :])\n y1 = de_casteljau.basic(s, coeffs1[1, :])\n x2 = de_casteljau.basic(t, coeffs2[0, :])\n y2 = de_casteljau.basic(t, coeffs2[1, :])\n return np.array([[x1 - x2], [y1 - y2]])\n\n\ndef compensated_residual(s, coeffs1, t, coeffs2):\n x1, dx1 = de_casteljau._compensated_k(s, coeffs1[0, :], 2)\n y1, dy1 = de_casteljau._compensated_k(s, coeffs1[1, :], 2)\n x2, dx2 = de_casteljau._compensated_k(t, coeffs2[0, :], 2)\n y2, dy2 = de_casteljau._compensated_k(t, coeffs2[1, :], 2)\n\n dx, sigma = eft.add_eft(x1, -x2)\n tau = (dx1 - dx2) + sigma\n dx += tau\n dy, sigma = eft.add_eft(y1, -y2)\n tau = (dy1 - dy2) + sigma\n dy += tau\n\n return np.array([[dx], [dy]])\n\n\ndef newton(s0, coeffs1, t0, coeffs2, residual):\n max_iter = 50\n tol = 1e-15\n s = s0\n t = t0\n\n iterates = []\n for _ in range(max_iter):\n F = residual(s, coeffs1, t, coeffs2)\n # Compute the standard Jacobian.\n dx1 = de_casteljau.derivative(s, coeffs1[0, :])\n dy1 = de_casteljau.derivative(s, coeffs1[1, :])\n dx2 = de_casteljau.derivative(t, coeffs2[0, :])\n dy2 = de_casteljau.derivative(t, coeffs2[1, :])\n J = np.array([[dx1, -dx2], [dy1, -dy2]])\n # Solve for the updates.\n ds, dt = np.linalg.solve(J, F).flatten()\n # Apply the updates.\n s = s - ds\n t = t - dt\n iterates.append((s, t))\n # Return if the update is below the tolerance.\n if np.linalg.norm([ds, dt], ord=2) < tol: # 2-norm\n break\n\n return iterates\n", "id": "8518447", "language": "Python", "matching_score": 1.5354961156845093, "max_stars_count": 2, "path": "src/newton_bezier.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Performs Horner's method.\n\nHorner's method computes\n\n.. math::\n\n p(x) = a_n x^n + \\cdots a_1 x + a_0\n\nvia\n\n.. math::\n\n \\begin{align*}\n p_n &= a_n \\\\\n p_k &= p_{k + 1} x + a_k \\\\\n p(x) &= p_0\n \\end{align*}\n\nThis module provides both the standard version and a compensated version.\n\n.. note::\n\n This assumes throughout that ``coeffs`` is ordered from\n :math:`a_n` to :math:`a_0`.\n\"\"\"\n\nimport eft\n\n\ndef basic(x, coeffs):\n if not coeffs:\n return 0.0\n\n p = coeffs[0]\n for coeff in coeffs[1:]:\n p = p * x + coeff\n\n return p\n\n\ndef _compensated(x, coeffs):\n if not coeffs:\n return 0.0, [], []\n\n p = coeffs[0]\n e_pi = []\n e_sigma = []\n for coeff in coeffs[1:]:\n prod, e1 = eft.multiply_eft(p, x)\n p, e2 = eft.add_eft(prod, coeff)\n e_pi.append(e1)\n e_sigma.append(e2)\n\n return p, e_pi, e_sigma\n\n\ndef compensated(x, coeffs):\n p, e_pi, e_sigma = _compensated(x, coeffs)\n\n # Compute the error via standard Horner's.\n e = 0.0\n for e1, e2 in zip(e_pi, e_sigma):\n e = x * e + (e1 + e2)\n\n return p + e\n\n\ndef compensated3(x, coeffs):\n h1, p2, p3 = _compensated(x, coeffs)\n h2, p4, p5 = _compensated(x, p2)\n h3, p6, p7 = _compensated(x, p3)\n\n # Use standard Horner from here.\n h4 = basic(x, p4)\n h5 = basic(x, p5)\n h6 = basic(x, p6)\n h7 = basic(x, p7)\n\n # Now use 3-fold summation.\n p = [h1, h2, h3, h4, h5, h6, h7]\n return eft.sum_k(p, 3)\n\n\ndef compensated_k(x, coeffs, k):\n h = {}\n p = {1: coeffs}\n\n # First, \"filter\" off the errors from the interior\n # polynomials.\n for i in range(1, 2 ** (k - 1)):\n h[i], p[2 * i], p[2 * i + 1] = _compensated(x, p[i])\n\n # Then use standard Horner for the leaf polynomials.\n for i in range(2 ** (k - 1), 2 ** k):\n h[i] = basic(x, p[i])\n\n # Now use K-fold summation on everything in ``h`` (but keep the\n # order).\n to_sum = [h[i] for i in range(1, 2 ** k)]\n return eft.sum_k(to_sum, k)\n", "id": "4459175", "language": "Python", "matching_score": 1.6098724603652954, "max_stars_count": 2, "path": "src/horner.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Performs de Casteljau's method.\n\nde Casteljau's method evaluates a function in Bernstein-Bezier form\n\n.. math::\n\n p(s) = p_0 (1 - s)^n + \\cdots + p_j \\binom{n}{j} (1 - s)^{n - j} s^j +\n \\cdots + p_n s^n\n\nby progressively reducing the control points\n\n.. math::\n\n \\begin{align*}\n p_j^{(0)} &= p_j \\\\\n p_j^{(k + 1)} &= (1 - s) p_j^{(k)} + p_{j + 1}^{(k)} \\\\\n p(s) &= p_0^{(n)}.\n \\end{align*}\n\nThis module provides both the standard version and a compensated version.\n\n.. note::\n\n This assumes throughout that ``coeffs`` is ordered from\n :math:`p_n` to :math:`p_0`.\n\"\"\"\n\n\nimport eft\n\n\ndef basic(s, coeffs):\n \"\"\"Performs the \"standard\" de Casteljau algorithm.\"\"\"\n r = 1.0 - s\n\n degree = len(coeffs) - 1\n pk = list(coeffs)\n for k in range(degree):\n new_pk = []\n for j in range(degree - k):\n new_pk.append(r * pk[j] + s * pk[j + 1])\n # Update the \"current\" values.\n pk = new_pk\n\n return pk[0]\n\n\ndef local_error(errors, rho, delta_b):\n r\"\"\"Compute :math:`\\ell` from a list of errors.\n\n This assumes, but does not check, that there are at least two\n ``errors``.\n \"\"\"\n num_errs = len(errors)\n\n l_hat = errors[0] + errors[1]\n for j in range(2, num_errs):\n l_hat += errors[j]\n\n l_hat += rho * delta_b\n\n return l_hat\n\n\ndef local_error_eft(errors, rho, delta_b):\n r\"\"\"Perform an error-free transformation for computing :math:`\\ell`.\n\n This assumes, but does not check, that there are at least two\n ``errors``.\n \"\"\"\n num_errs = len(errors)\n new_errors = [None] * (num_errs + 1)\n\n l_hat, new_errors[0] = eft.add_eft(errors[0], errors[1])\n for j in range(2, num_errs):\n l_hat, new_errors[j - 1] = eft.add_eft(l_hat, errors[j])\n\n prod, new_errors[num_errs - 1] = eft.multiply_eft(rho, delta_b)\n l_hat, new_errors[num_errs] = eft.add_eft(l_hat, prod)\n\n return new_errors, l_hat\n\n\ndef _compensated_k(s, coeffs, K):\n r\"\"\"Performs a K-compensated de Casteljau.\n\n .. _JLCS10: https://doi.org/10.1016/j.camwa.2010.05.021\n\n Note that the order of operations exactly matches the `JLCS10`_ paper.\n For example, :math:`\\widehat{\\partial b}_j^{(k)}` is computed as\n\n .. math::\n\n \\widehat{ell}_{1, j}^{(k)} \\oplus \\left(s \\otimes\n \\widehat{\\partial b}_{j + 1}^{(k + 1)}\\right) \\oplus\n \\left(\\widehat{r} \\otimes \\widehat{\\partial b}_j^{(k + 1)}\\right)\n\n instead of \"typical\" order\n\n .. math::\n\n \\left(\\widehat{r} \\otimes \\widehat{\\partial b}_j^{(k + 1)}\\right)\n \\oplus \\left(s \\otimes \\widehat{\\partial b}_{j + 1}^{(k + 1)}\n \\right) \\oplus \\widehat{ell}_{1, j}^{(k)}.\n\n This is so that the term\n\n .. math::\n\n \\widehat{r} \\otimes \\widehat{\\partial b}_j^{(k + 1)}\n\n only has to be in one sum. We avoid an extra sum because\n :math:`\\widehat{r}` already has round-off error.\n \"\"\"\n r, rho = eft.add_eft(1.0, -s)\n\n degree = len(coeffs) - 1\n bk = {0: list(coeffs)}\n # NOTE: This will be shared, but is read only.\n all_zero = (0.0,) * (degree + 1)\n for F in range(1, K - 1 + 1):\n bk[F] = all_zero\n\n for k in range(degree):\n new_bk = {F: [] for F in range(K - 1 + 1)}\n\n for j in range(degree - k):\n # Update the \"level 0\" stuff.\n P1, pi1 = eft.multiply_eft(r, bk[0][j])\n P2, pi2 = eft.multiply_eft(s, bk[0][j + 1])\n S3, sigma3 = eft.add_eft(P1, P2)\n new_bk[0].append(S3)\n\n errors = [pi1, pi2, sigma3]\n delta_b = bk[0][j]\n\n for F in range(1, K - 2 + 1):\n new_errors, l_hat = local_error_eft(errors, rho, delta_b)\n P1, pi1 = eft.multiply_eft(s, bk[F][j + 1])\n S2, sigma2 = eft.add_eft(l_hat, P1)\n P3, pi3 = eft.multiply_eft(r, bk[F][j])\n S, sigma4 = eft.add_eft(S2, P3)\n new_bk[F].append(S)\n\n new_errors.extend([pi1, sigma2, pi3, sigma4])\n errors = new_errors\n delta_b = bk[F][j]\n\n # Update the \"level 2\" stuff.\n l_hat = local_error(errors, rho, delta_b)\n new_bk[K - 1].append(\n l_hat + s * bk[K - 1][j + 1] + r * bk[K - 1][j]\n )\n\n # Update the \"current\" values.\n bk = new_bk\n\n return tuple(bk[F][0] for F in range(K - 1 + 1))\n\n\ndef compensated(s, coeffs):\n b, db = _compensated_k(s, coeffs, 2)\n return eft.sum_k((b, db), 2)\n\n\ndef compensated3(s, coeffs):\n b, db, d2b = _compensated_k(s, coeffs, 3)\n return eft.sum_k((b, db, d2b), 3)\n\n\ndef compensated4(s, coeffs):\n b, db, d2b, d3b = _compensated_k(s, coeffs, 4)\n return eft.sum_k((b, db, d2b, d3b), 4)\n\n\ndef compensated5(s, coeffs):\n b, db, d2b, d3b, d4b = _compensated_k(s, coeffs, 5)\n return eft.sum_k((b, db, d2b, d3b, d4b), 5)\n", "id": "1697224", "language": "Python", "matching_score": 5.079838752746582, "max_stars_count": 2, "path": "src/de_casteljau.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Performs de Casteljau's method.\n\nde Casteljau's method evaluates a function in Bernstein-Bezier form\n\n.. math::\n\n p(s) = p_0 (1 - s)^n + \\cdots + p_j \\binom{n}{j} (1 - s)^{n - j} s^j +\n \\cdots + p_n s^n\n\nby progressively reducing the control points\n\n.. math::\n\n \\begin{align*}\n p_j^{(0)} &= p_j \\\\\n p_j^{(k + 1)} &= (1 - s) p_j^{(k)} + p_{j + 1}^{(k)} \\\\\n p(s) &= p_0^{(n)}.\n \\end{align*}\n\n.. note::\n\n This assumes throughout that ``coeffs`` is ordered from\n :math:`p_n` to :math:`p_0`.\n\"\"\"\n\n\ndef basic(s, coeffs):\n \"\"\"Performs the \"standard\" de Casteljau algorithm.\"\"\"\n r = 1 - s\n\n degree = len(coeffs) - 1\n pk = list(coeffs)\n for k in range(degree):\n new_pk = []\n for j in range(degree - k):\n new_pk.append(r * pk[j] + s * pk[j + 1])\n # Update the \"current\" values.\n pk = new_pk\n\n return pk[0]\n", "id": "643473", "language": "Python", "matching_score": 1.1709790229797363, "max_stars_count": 1, "path": "src/de_casteljau.py" }, { "content": "#!/usr/bin/env python\n\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\n\n\ndef forms_right_triangle(p, q):\n # squared val of sides\n squared_values = sorted([p[0] ** 2 + p[1] ** 2,\n q[0] ** 2 + q[1] ** 2,\n (p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2])\n # Get rid of identical points\n if 0 in squared_values:\n return False\n # Get rid of collinearity\n a, b, c = [sqrt(val) for val in squared_values]\n if a + b <= c:\n return False\n if b + c <= a:\n return False\n if c + a <= b:\n return False\n return (squared_values[0] + squared_values[1] == squared_values[2])\n\n\ndef main(verbose=False):\n n = 50\n result = set()\n points = [(x, y) for x in range(n + 1) for y in range(n + 1)]\n num_points = (n + 1) ** 2 # len(points), clearly\n # loop through all combinations of p and q\n for i in range(num_points - 1):\n for j in range(i + 1, num_points):\n p = points[i]\n q = points[j]\n if forms_right_triangle(p, q):\n result.add((p, q))\n return len(result)\n\nif __name__ == '__main__':\n print euler_timer(91)(main)(verbose=True)\n", "id": "272592", "language": "Python", "matching_score": 0.2019602358341217, "max_stars_count": 7, "path": "python/complete/no091.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\n\ndef from_serial_inner(bakeoff_module):\n def serial_inner(nodes, s_vals):\n dimension, _ = nodes.shape\n (num_vals,) = s_vals.shape\n evaluated = np.empty((dimension, num_vals), order=\"F\")\n for j in range(num_vals):\n evaluated[:, j] = bakeoff_module.serial_inner(nodes, s_vals[j])\n\n return evaluated\n\n return serial_inner\n\n\ndef do_verify(bakeoff_module):\n print(f\"Verifying: {bakeoff_module}\")\n functions = (\n bakeoff_module.forall1,\n bakeoff_module.forall2,\n bakeoff_module.forall3,\n bakeoff_module.do1,\n bakeoff_module.do2,\n bakeoff_module.do3,\n bakeoff_module.spread1,\n bakeoff_module.spread2,\n bakeoff_module.spread3,\n bakeoff_module.serial,\n from_serial_inner(bakeoff_module),\n bakeoff_module.vs_algorithm32,\n bakeoff_module.vs_algorithm53,\n bakeoff_module.vs_algorithm64,\n )\n nodes = np.asfortranarray([[1.0, 1.0, 2.0, 2.0], [0.0, 1.0, 0.0, 1.0]])\n s_vals = np.asfortranarray([0.0, 0.5, 1.0])\n expected = np.asfortranarray([[1.0, 1.5, 2.0], [0.0, 0.5, 1.0]])\n for fn in functions:\n evaluated = fn(nodes, s_vals)\n assert np.all(evaluated == expected)\n print(f\"Verified: {fn.__name__}\")\n", "id": "4812358", "language": "Python", "matching_score": 0.8413919806480408, "max_stars_count": 0, "path": "src/python-bakeoff/verify_shared.py" }, { "content": "#!/usr/bin/env python\n\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\n\n\ndef inner_circle(e1, e2, e3):\n # Using (e1 + e2 + e3 + e4)^2 = 2(e1^2 + e2^2 + e3^2 + e4^2)\n # A = e1^2 + e2^2 + e3^2; B = e1 + e2 + e3\n # We have e4 = B +/- sqrt(2(B^2 - A))\n # inner circle will have smaller radii, hence\n # larger eccentricity, so we set e4 = B + sqrt(...)\n from math import sqrt\n\n A = e1 ** 2 + e2 ** 2 + e3 ** 2\n B = e1 + e2 + e3\n return B + sqrt(2 * (B ** 2 - A))\n\n\ndef next_3(node):\n points, eccentricity = node\n e1, e2, e3 = points\n return [[(eccentricity, e1, e2), inner_circle(eccentricity, e1, e2)],\n [(eccentricity, e2, e3), inner_circle(eccentricity, e2, e3)],\n [(eccentricity, e3, e1), inner_circle(eccentricity, e3, e1)]]\n\n\ndef main(verbose=False):\n iterations = 10\n C = {}\n C[-1] = {0: [(1, 1, 1), 3 - 2 * sqrt(3)]}\n C[0] = {0: [(1, 1, 3 - 2 * sqrt(3)), 1],\n 1: [(1, 1, 3 - 2 * sqrt(3)), 1],\n 2: [(1, 1, 3 - 2 * sqrt(3)), 1]}\n C[1] = {}\n first_level = inner_circle(1, 1, 3 - 2 * sqrt(3))\n for i in range(3):\n C[1][i] = [(3 - 2 * sqrt(3), 1, 1), first_level]\n C[1][3] = [(1, 1, 1), inner_circle(1, 1, 1)]\n for i in range(2, iterations + 1):\n C[i] = {}\n for node, value in C[i - 1].items():\n n1, n2, n3 = next_3(value)\n C[i][3 * node] = n1\n C[i][3 * node + 1] = n2\n C[i][3 * node + 2] = n3\n\n total_area = (1.0 / C[-1][0][1]) ** 2\n covered_area = 0\n for i in range(iterations + 1):\n for j in C[i]:\n covered_area += (1.0 / C[i][j][1]) ** 2\n\n return round(1 - covered_area / total_area, 8)\n\nif __name__ == '__main__':\n print euler_timer(199)(main)(verbose=True)\n", "id": "3289536", "language": "Python", "matching_score": 1.4129918813705444, "max_stars_count": 7, "path": "python/complete/no199.py" }, { "content": "#!/usr/bin/env python\n\n# There exists exactly one Pythagorean triplet for which\n# a + b + c = 1000. Find the product abc.\n\nimport operator\n\nfrom python.decorators import euler_timer\n\n\ndef first_triplet(total):\n for a in range(1, total - 1):\n for b in range(1, total - a):\n c = total - a - b\n if a ** 2 + b ** 2 == c ** 2:\n return [a, b, c]\n\n return []\n\n\ndef main(verbose=False):\n return reduce(operator.mul, first_triplet(1000))\n\nif __name__ == '__main__':\n print euler_timer(9)(main)(verbose=True)\n", "id": "9652362", "language": "Python", "matching_score": 1.0431647300720215, "max_stars_count": 7, "path": "python/complete/no009.py" }, { "content": "#!/usr/bin/env python\n\n# Find the difference between the sum of the\n# squares of the first one hundred natural\n# numbers and the square of the sum.\n\nfrom python.decorators import euler_timer\nfrom python.functions import polygonal_number\n\n\ndef sum_first_n_sq(n):\n return n * (n + 1) * (2 * n + 1) / 6\n\n\ndef main(verbose=False):\n return abs(sum_first_n_sq(100) - polygonal_number(3, 100) ** 2)\n\nif __name__ == '__main__':\n print euler_timer(6)(main)(verbose=True)\n", "id": "8872329", "language": "Python", "matching_score": 0.1561160683631897, "max_stars_count": 7, "path": "python/complete/no006.py" }, { "content": "#!/usr/bin/env python\n\nfrom random import choice\n\nfrom python.decorators import euler_timer\n\nSQUARES = [\"GO\",\n \"A1\", \"CC1\", \"A2\", \"T1\", \"R1\", \"B1\", \"CH1\", \"B2\", \"B3\",\n \"JAIL\",\n \"C1\", \"U1\", \"C2\", \"C3\", \"R2\", \"D1\", \"CC2\", \"D2\", \"D3\",\n \"FP\",\n \"E1\", \"CH2\", \"E2\", \"E3\", \"R3\", \"F1\", \"F2\", \"U2\", \"F3\",\n \"G2J\",\n \"G1\", \"G2\", \"CC3\", \"G3\", \"R4\", \"CH3\", \"H1\", \"T2\", \"H2\"]\n\n\ndef roll_die(size):\n first_die = choice(range(1, size + 1))\n second_die = choice(range(1, size + 1))\n\n return (first_die + second_die, (first_die == second_die))\n\n\ndef back(square, step):\n index = SQUARES.index(square)\n new_index = (index - step) % len(SQUARES)\n return SQUARES[new_index]\n\n\ndef next_specific(square, next_type):\n if next_type not in [\"R\", \"U\"]:\n raise Exception(\"next_specific only intended for R and U\")\n\n # R1=5, R2=15, R3=25, R4=35\n index = SQUARES.index(square)\n if next_type == \"R\":\n if 0 <= index < 5 or 35 < index:\n return \"R1\"\n elif 5 < index < 15:\n return \"R2\"\n elif 15 < index < 25:\n return \"R3\"\n elif 25 < index < 35:\n return \"R4\"\n else:\n raise Exception(\"Case should not occur\")\n # U1=12, U2=28\n elif next_type == \"U\":\n if 0 <= index < 12 or index > 28:\n return \"U1\"\n elif 12 < index < 28:\n return \"U2\"\n else:\n return Exception(\"Case should not occur\")\n else:\n raise Exception(\"Case should not occur\")\n\n\ndef next_square(landing_square, chance_card, chest_card):\n if landing_square not in [\"CC1\", \"CC2\", \"CC3\", \"CH1\", \"CH2\", \"CH3\", \"G2J\"]:\n return (landing_square, chance_card, chest_card)\n\n if landing_square == \"G2J\":\n return (\"JAIL\", chance_card, chest_card)\n elif landing_square in [\"CC1\", \"CC2\", \"CC3\"]:\n # 1/16 Go, Jail\n # 14/16 Stay\n chest_card = (chest_card + 1) % 16\n if chest_card == 0:\n return (\"GO\", chance_card, chest_card)\n elif chest_card == 1:\n return (\"JAIL\", chance_card, chest_card)\n else:\n return (landing_square, chance_card, chest_card)\n elif landing_square in [\"CH1\", \"CH2\", \"CH3\"]:\n # 1/16 Go, Jail, C1, E3, H2, R1, next U, back 3\n # 1/8 Next R\n chance_card = (chance_card + 1) % 16\n if chance_card == 0:\n return (\"GO\", chance_card, chest_card)\n elif chance_card == 1:\n return (\"JAIL\", chance_card, chest_card)\n elif chance_card == 2:\n return (\"C1\", chance_card, chest_card)\n elif chance_card == 3:\n return (\"E3\", chance_card, chest_card)\n elif chance_card == 4:\n return (\"H2\", chance_card, chest_card)\n elif chance_card == 5:\n return (\"R1\", chance_card, chest_card)\n elif chance_card == 6:\n return (next_specific(landing_square, \"U\"),\n chance_card, chest_card)\n elif chance_card == 7:\n return next_square(back(landing_square, 3),\n chance_card, chest_card)\n elif chance_card in [8, 9]:\n return (next_specific(landing_square, \"R\"),\n chance_card, chest_card)\n else:\n return (landing_square, chance_card, chest_card)\n else:\n raise Exception(\"Case should not occur\")\n\n\ndef main(verbose=False):\n GAME_PLAY = 10 ** 6\n dice_size = 4\n visited = {\"GO\": 1}\n current = \"GO\"\n chance_card = 0\n chest_card = 0\n doubles = 0\n for place in xrange(GAME_PLAY):\n total, double = roll_die(dice_size)\n if double:\n doubles += 1\n else:\n doubles = 0\n\n if doubles == 3:\n doubles = 0\n current = \"JAIL\"\n else:\n index = SQUARES.index(current)\n landing_square = SQUARES[(index + total) % len(SQUARES)]\n (current, chance_card,\n chest_card) = next_square(landing_square, chance_card, chest_card)\n\n # if current is not in visited, sets to 1\n # (default 0 returned by get)\n visited[current] = visited.get(current, 0) + 1\n\n top_visited = sorted(visited.items(),\n key=lambda pair: pair[1],\n reverse=True)\n top_visited = [SQUARES.index(square[0]) for square in top_visited[:3]]\n\n return ''.join(str(index).zfill(2) for index in top_visited)\n\nif __name__ == '__main__':\n print euler_timer(84)(main)(verbose=True)\n", "id": "2630214", "language": "Python", "matching_score": 1.2343688011169434, "max_stars_count": 7, "path": "python/complete/no084.py" }, { "content": "#!/usr/bin/env python\n\n# Find the value of d < 1000 for which ^(1)/_(d) contains the\n# longest recurring cycle in its decimal fraction part.\n\nfrom python.decorators import euler_timer\nfrom python.functions import order_mod_n\nfrom python.functions import robust_divide\n\n\ndef main(verbose=False):\n max_index = -1\n max_block_size = -1\n for i in range(1, 1000):\n stripped_val = robust_divide(robust_divide(i, 2), 5)\n if stripped_val == 1:\n block_size = 0\n else:\n block_size = order_mod_n(10, stripped_val)\n if block_size > max_block_size:\n max_block_size = block_size\n max_index = i\n\n return max_index\n\nif __name__ == '__main__':\n print euler_timer(26)(main)(verbose=True)\n", "id": "5823378", "language": "Python", "matching_score": 0.6588681936264038, "max_stars_count": 7, "path": "python/complete/no026.py" }, { "content": "#!/usr/bin/env python\n\n# What is the first term in the Fibonacci sequence to contain 1000 digits?\n\nfrom python.decorators import euler_timer\nfrom python.functions import fibonacci_generator\n\n\ndef main(verbose=False):\n fib = fibonacci_generator()\n fib_index = 0\n for value in fib:\n # number of digits\n if len(str(value)) < 1000:\n fib_index += 1\n continue\n else:\n return fib_index\n\nif __name__ == '__main__':\n print euler_timer(25)(main)(verbose=True)\n", "id": "3610875", "language": "Python", "matching_score": 0.7970673441886902, "max_stars_count": 7, "path": "python/complete/no025.py" }, { "content": "#!/usr/bin/env python\n\n# 2**N binary digits can be placed in a circle so that all the N-digit\n# clockwise subsequences are distinct.\n\n# For N=3, two such circular arrangements are possible, ignoring\n# rotations:\n# (0,0,0,1,0,1,1,1) and (0,0,0,1,1,1,0,1)\n\n# For the first arrangement, the 3-digit subsequences, in clockwise\n# order, are: 000, 001, 010, 101, 011, 111, 110 and 100.\n\n# Each circular arrangement can be encoded as a number by concatenating\n# the binary digits starting with the subsequence of all zeros as the\n# most significant bits and proceeding clockwise. The two arrangements\n# for N=3 are thus represented as 23 and 29:\n\n# 00010111_2 = 23\n# 00011101_2 = 29\n\n# Calling S(N) the sum of the unique numeric representations, we can see\n# that S(3) = 23 + 29 = 52.\n\n# Find S(5).\n\n###########################\n\n# Since 2**N and it is a circular arrangement, all 2**N, N digit binaries\n# must be present. Hence we start with [0]*N. Since it can only occur once\n# we can actually pad it with ones on either side for [1] + [0]**N + [1]\n\nfrom python.decorators import euler_timer\n\n\ndef binary_array_to_integer(array):\n result = 0\n for val in array:\n result = 2 * result + val\n return result\n\n\ndef has_unique_subsequences(array, length):\n values = []\n for i in range(len(array) - length + 1):\n to_add = binary_array_to_integer(array[i:i + length])\n if to_add in values:\n return False\n values.append(to_add)\n return True\n\n\ndef add_value(array, length):\n # may return []\n result = [array[:] + [new_val] for new_val in [0, 1]\n if has_unique_subsequences(array[:] + [new_val], length)]\n return result\n\n\ndef all_valid_sequences(length):\n sequences = [[1] + [0] * length + [1]]\n while len(sequences[0]) < 2 ** length:\n next_sequences = []\n for sequence in sequences:\n next_sequences.extend(add_value(sequence, length))\n sequences = next_sequences[:]\n # After this step, we want to make it cyclic, so we overload and check\n sequences = [sequence + sequence[:length - 1] for sequence in sequences]\n sequences = [sequence[:2 ** length] for sequence in sequences\n if has_unique_subsequences(sequence, length)]\n # finally, we want to start at 0, but these all start at 1\n return [sequence[1:] + sequence[:1] for sequence in sequences]\n\n\ndef main(verbose=False):\n sequences = all_valid_sequences(5)\n return sum(binary_array_to_integer(sequence) for sequence in sequences)\n\nif __name__ == '__main__':\n print euler_timer(265)(main)(verbose=True)\n", "id": "8989200", "language": "Python", "matching_score": 2.2511003017425537, "max_stars_count": 7, "path": "python/complete/no265.py" }, { "content": "#!/usr/bin/env python\n\n# Find the unique positive integer whose square has the form\n# 1_2_3_4_5_6_7_8_9_0,\n# where each \"_\" is a single digit.\n\n# Since n**2 % 10 == 0, we know n = 10**k, hence\n# 1_2_3_4_5_6_7_8_9 = k**2\n# 10203040506070809 <= k**2 <= 19293949596979899\n# 101010101 <= k <= 138902662\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n for n in xrange(101010101, 138902662 + 1):\n if n % 250 in [43, 53, 83, 167, 197, 207]:\n val = n ** 2\n if str(val)[::2] == '123456789':\n return 10 * n\n\nif __name__ == '__main__':\n print euler_timer(206)(main)(verbose=True)\n", "id": "2560231", "language": "Python", "matching_score": 0.7328636646270752, "max_stars_count": 7, "path": "python/complete/no206.py" }, { "content": "#!/usr/bin/env python\n\n# Using computers, the incredible formula n^2 + 79n + 1601 was discovered,\n# which produces 80 primes for the consecutive values n = 0 to 79. The\n# product of the coefficients, 79 and 1601, is 126479.\n\n# Considering quadratics of the form:\n# n^2 + an + b, where |a| < 1000 and |b| < 1000\n\n# Find the product of the coefficients, a and b, for the quadratic expression\n# that produces the maximum number of primes for consecutive values of n,\n# starting with n = 0.\n\n# f(n + k) = (n + k)^2 + a(n + k) + b = f(n) + k^2 + ak + 2kn\n\n# Need b prime since f(0) = b, largest value is 997\n# We cheat and know n <= 79 since |b| < 1000\n# Hence |f(n)| <= |n|^2 + |a||n| + |b| <= 6241 + 79(1000) + 997 = 86238\n\n# Therefore, the biggest we ever have to worry about is 86238\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef polynomial_consecutive_primes(a, b, primes):\n # f(n + 1) = f(n) + 1 + a + 2n\n current = b\n index = 0\n while current in primes:\n current += 1 + a + 2 * index\n index += 1\n return index\n\n\ndef main(verbose=False):\n PRIMES = sieve(86238)\n b_choices = [prime for prime in PRIMES if prime < 1000]\n\n candidates = [(a, b, polynomial_consecutive_primes(a, b, PRIMES))\n for a in range(-999, 999 + 1)\n for b in b_choices]\n quantities = [entry[2] for entry in candidates]\n winner = candidates[quantities.index(max(quantities))]\n prod = winner[0] * winner[1]\n a = winner[0]\n b = winner[1]\n max_vals = winner[2]\n\n if verbose:\n return ('%s.\\nSetting a = %s and b = %s produces '\n '%s consecutive primes.' % (prod, a, b, max_vals))\n else:\n return prod\n\nif __name__ == '__main__':\n print euler_timer(27)(main)(verbose=True)\n", "id": "6428563", "language": "Python", "matching_score": 1.424242615699768, "max_stars_count": 7, "path": "python/complete/no027.py" }, { "content": "#!/usr/bin/env python\n\n# 1/x + 1/y = 1/n\n# n*x + n*y = x*y\n# (x - n)*(y - n) = n**2\n\n# For each factor f <= n dividing\n# n**2 we'll get a unique solution.\n# Since n**2 is a square, there will\n# be an odd number of factors, hence\n# if there are F(n) factors we will\n# have (F(n) - 1)/2 pairs where f1 != f2,\n# f1*f2 = n**2 and then f1 = f2 = n\n# So we have (F(n) + 1)/2 solutions\n\nimport operator\n\nfrom itertools import product as i_product\n\nfrom python.decorators import euler_timer\nfrom python.functions import power_up_to_digits\nfrom python.functions import prime_factors\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n prime_factors_hash = {}\n\n MINIMUM_SOLUTIONS = 1000\n # P^k < 10**5\n powers = [power_up_to_digits(prime, 5)\n for prime in [3, 5, 7]]\n products = [reduce(operator.mul, triple) for\n triple in list(i_product(*powers))]\n products = [product for product in sorted(products)\n if product > 2 * MINIMUM_SOLUTIONS][:20]\n\n PRIMES = sieve(100)\n\n max_prod = 10 ** 10\n res = []\n for product in products:\n factors = prime_factors(product, unique=False,\n hash_=prime_factors_hash)\n factors = [(factor - 1) / 2 for factor in factors][::-1]\n curr_prod = 1\n for i, exp in enumerate(factors):\n curr_prod = curr_prod * (PRIMES[i] ** exp)\n\n if curr_prod < max_prod:\n max_prod = curr_prod\n\n return max_prod\n\nif __name__ == '__main__':\n print euler_timer(108)(main)(verbose=True)\n", "id": "11157556", "language": "Python", "matching_score": 1.7837941646575928, "max_stars_count": 7, "path": "python/complete/no108.py" }, { "content": "import operator\nimport sys\n\nfrom fractions import gcd\nfrom math import factorial\nfrom math import log\nfrom math import sqrt\n\nfrom path import DATA_PATH\n\n\n# HELPER FUNCTIONS\n\ndef lcm(n, m):\n return n * m / (gcd(n, m))\n\n\ndef choose(n, k):\n return factorial(n) / (factorial(k) * factorial(n - k))\n\n\n# 8, 11, 13, 18, 22, 42, 54, 59, 67\ndef get_data(problem_number):\n \"\"\"Maps problem number to raw text from expected data file.\n\n Data is expected to be in euler_project/problem_data and\n to be named no---.txt where --- is the zero padded\n problem number\n \"\"\"\n filename = 'no%s.txt' % str(problem_number).zfill(3)\n absolute_path = '%s/%s' % (DATA_PATH, filename)\n with open(absolute_path) as fh:\n # fails if file doesn't exist\n result = fh.read()\n return result\n\n\n# 26\ndef robust_divide(n, quotient, include_count=False):\n if quotient in (-1, 1):\n raise ValueError(\"Please don't use %s as a quotient.\" % quotient)\n\n result = n\n count = 0\n while result % quotient == 0:\n count += 1\n result = result / quotient\n if include_count:\n return result, count\n else:\n return result\n\n\n# 2, 57, 65\ndef recurrence_next(relation, values):\n \"\"\"Gets next term in a recurrence based on relation.\n\n Assumes recurrence of length k satisfies\n f(n+k) = relation[0]*f(n) + relation[1]*f(n+1) + ...\n\n Values are also expected to be ordered [f(n),f(n+1),...]\n \"\"\"\n if len(relation) != len(values):\n raise ValueError(\"Poorly specified recurrence\")\n recurrence_order = len(relation)\n next_val = sum(relation[i] * values[i] for i in range(recurrence_order))\n return values[1:] + [next_val] # copies values (doesn't change inputs)\n\n\n# 4, 36, 55\ndef is_palindrome(n):\n return (str(n) == str(n)[::-1])\n\n\n# 46\ndef is_power(n, exponent):\n return n == (int(n ** (1.0 / exponent))) ** exponent\n\n\n# PROBLEM SPECIFIC METHODS\n\n# 18, 67\ndef max_sum(triangle_matrix):\n \"\"\"Finds maximum sum of path from top of triangle down to bottom.\n\n Input: Matrix of triangle e.g.\n 1\n 3 5\n 7 8 4\n becomes [[1], [3, 5], [7, 8, 4]]\n Uses memoization from the bottom layer up to work quickly\n Output: maximum value\n \"\"\"\n max_depth = len(triangle_matrix) - 1\n\n # Set bottom row for memoization\n depth = max_depth\n result = {}\n for i, entry in enumerate(triangle_matrix[depth]):\n result[(i, max_depth - depth)] = entry\n depth -= 1\n\n # Set each row moving up the triangle based on\n # the results one low below\n while depth >= 0:\n for i, entry in enumerate(triangle_matrix[depth]):\n val_left = result[(i, max_depth - depth - 1)]\n val_right = result[(i + 1, max_depth - depth - 1)]\n result[(i, max_depth - depth)] = entry + max(val_left, val_right)\n depth -= 1\n\n return result[(0, max_depth - depth - 1)]\n\n\n# 114, 115\ndef fill_count(m, n):\n count = 1\n MAX_k = (n + 1) / (m + 1)\n for k in range(1, MAX_k + 1):\n for sum_ai in range(m * k, n + 1 - k + 1):\n perm_count = 0\n for bottom in range(m, sum_ai / k + 1):\n for gp_ai in ascending(k, sum_ai, bottom, n + 1):\n perm_count += total_perms(gp_ai)\n add_value = perm_count * choose(n + 1 - sum_ai, k)\n count += add_value\n return count\n\n\n# 132, 133\ndef prime_divides_repunit_power10(prime, cap=-1):\n # Determines if a prime divides any repunit R(10**n)\n # if cap > 0, then we set a max on the value of n\n if prime in [2, 3, 5]:\n return False\n _, count_2 = robust_divide(prime - 1, 2, include_count=True)\n _, count_5 = robust_divide(prime - 1, 5, include_count=True)\n if cap > 0:\n count_2 = min(cap, count_2)\n count_5 = min(cap, count_5)\n if prime == (2 ** count_2) * (5 ** count_5) + 1:\n return True\n possible_exp = sorted((2 ** exp2) * (5 ** exp5)\n for exp2 in range(0, count_2 + 1)\n for exp5 in range(0, count_5 + 1))\n for exp in possible_exp:\n if (10 ** exp - 1) % prime == 0:\n return True\n return False\n\n\n# FIBONACCI METHODS\n\n# 25\ndef fibonacci_generator():\n \"\"\"a generator for Fibonacci numbers\"\"\"\n a, b = 0, 1\n while True:\n yield a\n a, b = b, a + b\n\n\n# PRIMES METHODS\n\ndef first_prime_divisor(n, prime_list=None):\n if n == 1:\n return [1, 1]\n elif n % 2 == 0:\n return [2, n / 2]\n\n if prime_list is not None:\n for p in prime_list:\n if n % p == 0:\n return [p, n / p]\n # To complete this loop, either the prime list was\n # insufficient or p is prime\n if is_prime(n, primes=prime_list):\n return [n, 1]\n else:\n raise ValueError(\"Prime list poorly specified\")\n else:\n divisor = 3\n while n % divisor != 0:\n divisor += 2\n return [divisor, n / divisor]\n raise ValueError(\"Bad input %s.\" % n)\n\n\n# 3, 12, 47\ndef prime_factors(n, unique=False, hash_=None):\n if n == 1:\n if isinstance(hash_, dict):\n hash_[1] = []\n return []\n if isinstance(hash_, dict) and n in hash_:\n return hash_[n]\n\n prime, quotient = first_prime_divisor(n)\n\n remaining, count = robust_divide(n, prime, include_count=True)\n if unique:\n result = [prime] + prime_factors(remaining,\n unique=unique,\n hash_=hash_)\n else:\n result = [prime] * count + prime_factors(remaining,\n unique=unique,\n hash_=hash_)\n\n if isinstance(hash_, dict):\n hash_[n] = result\n\n return result\n\n\n# 135\ndef factors(n, factor_hash=None, primes=None):\n if factor_hash is None:\n factor_hash = {}\n\n if n in factor_hash:\n return factor_hash[n]\n elif n == 1:\n factor_hash[1] = [1]\n return [1]\n\n if primes is not None and n in primes:\n factor_hash[n] = [1, n]\n return [1, n]\n\n prime, quotient = first_prime_divisor(n, prime_list=primes)\n\n to_add = factors(quotient, factor_hash, primes)[:] # Need a deep-ish copy\n to_add.extend([prime * factor for factor in to_add])\n\n factor_hash[n] = sorted(list(set(to_add)))\n return factor_hash[n]\n\n\n# 21, 23, 39\ndef all_factors(n, hash_={1: [1], 2: [1, 2], 3: [1, 3]}):\n \"\"\"Takes n and optional hash of factors\n\n Uses the hash to update a full list of factors for\n all integers 1 to n. Only updates if not in hash_.\n \"\"\"\n factor_hash = hash_.copy()\n if n in factor_hash:\n return factor_hash\n\n all_primes = sieve(n)\n\n for i in range(4, n + 1):\n if i not in factor_hash:\n reduced = first_prime_divisor(i, all_primes)\n # This will update factor hash\n factors(i, factor_hash=factor_hash, primes=all_primes)\n\n return factor_hash\n\n\n# 37, 41, 58\ndef is_prime(n, primes=None, failure_point=None):\n if n < 10:\n return n in [2, 3, 5, 7]\n\n # We safely assume n >= 10\n if n % 2 == 0 or n % 3 == 0 or n % 5 == 0 or n % 7 == 0:\n return False\n\n if failure_point is not None:\n if n >= failure_point:\n raise ValueError(\"%s is too large for is_prime.\" % n)\n\n if primes is not None:\n if n in primes:\n return True\n to_check = [prime for prime in primes if prime ** 2 <= n]\n for prime in to_check:\n if n % prime == 0:\n return False\n return True\n\n divisor_bound = int(sqrt(n))\n # From here, we know only +/- 1 mod 6 works, so\n # we start with 11 and 13 (primes > 10)\n divisor_minus, divisor_plus = 11, 13\n while divisor_minus <= divisor_bound:\n if n % divisor_minus == 0 or n % divisor_plus == 0:\n return False\n divisor_minus += 6\n divisor_plus += 6\n return True\n\n\n# 7, 10, 21, 27, 35, 37, 46, 49, 50, 51, 58, 60\ndef sieve(n):\n \"\"\"Sieve of Eratosthenes\n\n Returns all primes <= n\n \"\"\"\n to_check = [True] * (n + 1)\n final_check = int(sqrt(n)) # effectively the floor of sqrt(n)\n\n for i in xrange(2, final_check + 1):\n if to_check[i]:\n for j in xrange(i ** 2, n + 1, i):\n to_check[j] = False\n\n return [i for i in range(2, n + 1) if to_check[i]]\n\n\n# NUMBER THEORY AND ALGEBRA METHODS\n\n# 26\ndef order_mod_n(value, n, hash_=None, prime_list=None):\n if hash_ is None:\n hash_ = {}\n if n in hash_:\n return hash_[n]\n\n if gcd(value, n) != 1 or n == 1:\n raise ValueError(\"%s is not a unit modulo %s.\" % (value, n))\n\n prime, _ = first_prime_divisor(n, prime_list)\n quotient = robust_divide(n, prime)\n if quotient == 1:\n # at this point, n is not in the hash_ but must be a\n # prime power\n base_residue = value % n\n\n residue = base_residue\n exponent = 1\n while residue != 1:\n residue = (residue * base_residue) % n\n exponent += 1\n hash_[n] = exponent\n return exponent\n\n # Here, quotient > 1\n prime_power = n / quotient\n prime_order = order_mod_n(value, prime_power,\n hash_=hash_,\n prime_list=prime_list)\n quotient_order = order_mod_n(value, quotient,\n hash_=hash_,\n prime_list=prime_list)\n hash_[n] = lcm(prime_order, quotient_order)\n return hash_[n]\n\n\ndef polynomial_roots(coefficients):\n # Assumes coefficients = [a_0, a_1,..., a_n]\n # for f(x) = a_n x^n + ... + a_1 x + a_0\n if len(coefficients) != 3:\n raise ValueError(\"Only supporting quadratics at this time\")\n c, b, a = coefficients\n discriminant_rt = sqrt(b ** 2 - 4 * a * c)\n return [(-b + discriminant_rt) / (2.0 * a),\n (-b - discriminant_rt) / (2.0 * a)]\n\n\n# 6, 42, 44, 61\ndef polygonal_number(s, n):\n return n * ((s - 2) * n - (s - 4)) / 2\n\n\n# 42, 44, 61\ndef reverse_polygonal_number(sides, number, hash_=None):\n \"\"\"Computes n given the nth polygonal number (and the polygon size).\n\n The n-th polygonal number for s sides is:\n n*((s - 2)*n - (s - 4))/2\n \"\"\"\n if hash_ is not None and number in hash_:\n return hash_[number]\n root_plus, _ = polynomial_roots([-2 * number, 4 - sides, sides - 2])\n if root_plus != int(root_plus):\n result = -1\n else:\n result = int(root_plus)\n\n if hash_ is not None:\n hash_[number] = result\n return result\n\n\n# 72\ndef mu(n, hash_, primes):\n if n in hash_:\n return hash_[n]\n\n prime, _ = first_prime_divisor(n, prime_list=primes)\n if n % prime ** 2 == 0:\n hash_[n] = 0\n else:\n # if n/prime has a square, we will want mu(n) = 0\n # if mu(n/prime) = 1, we add 1 prime so we negate it\n # similarly if mu(n/prime) = -1\n hash_[n] = -mu(n / prime, hash_, primes)\n return hash_[n]\n\n\ndef extended_euclid(a, b):\n M = max(a, b)\n m = min(a, b)\n\n last = (M, [1, 0])\n curr = (m, [0, 1])\n while curr[0] > 1:\n next = last[0] % curr[0]\n factor = (last[0] - next) / curr[0]\n last, curr = curr, (next, [last[1][0] - factor * curr[1][0],\n last[1][1] - factor * curr[1][1]])\n result = curr[1]\n if a * result[0] + b * result[1] == 1:\n return result\n else:\n return result[::-1]\n\n\ndef inverse_mod_n(val, n):\n if gcd(val, n) > 1:\n raise Exception(\"Not invertible\")\n\n result, _ = extended_euclid(val, n)\n return result % n\n\n\n# Let r_i = 1/(a_(i+1) + 1/(a(i+2) + ...\n# Then 1/r_i = a_(i+1) + r_(i+1); a_i = floor(1/r_i)\n\n# We see we can write r_i = (A_i*rt(n) + B_i)/C_i\n# then 1/r_i = C_i(A_i*rt(n) - B_i)/(n*A_i**2 - B_i**2)\n\n# represent each r_i as r_i = (A, B, C) -> 1/r_i = a + r_(i + 1)\n# -> a = floor(1/r_i) = floor( C/(A rt(n) + B) )\n# -> r_(i + 1) = (C*A, C*B - a*(n*A**2 - B**2), n*A**2 - B**2)\n# -> r_(i + 1) = (A', B', C') #reduce\n# then r_(i+1) = ((C_i*A_i*rt(n) - [C_i*B_i + a_(i+1)*(n*A_i**2 - B_i**2)]) /\n# (n*A_i**2 - B_i**2))\ndef next_continued_fraction_triple(current, n):\n A, B, C = current\n a = int(C * (1.0) / (A * sqrt(n) + B))\n r = (C * A, -C * B - a * (n * A ** 2 - B ** 2), n * A ** 2 - B ** 2)\n d = gcd(gcd(r[0], r[1]), r[2])\n return (r[0] / d, r[1] / d, r[2] / d)\n\n\ndef continued_fraction_cycle(n):\n result = [int(sqrt(n))]\n init = curr_r = (1, -int(sqrt(n)), 1)\n\n result.append(int(curr_r[2] * (1.0) / (curr_r[0] * sqrt(n) + curr_r[1])))\n curr_r = next_continued_fraction_triple(curr_r, n)\n while curr_r != init:\n result.append(int(curr_r[2] * (1.0) /\n (curr_r[0] * sqrt(n) + curr_r[1])))\n curr_r = next_continued_fraction_triple(curr_r, n)\n return result\n\n\ndef power_up_to_digits(n, digits):\n return [n ** exp for exp in range(int(digits * log(10) / log(n)) + 1)]\n\n\n# LIST MANAGEMENT METHODS\n\n# 4, 23, 29, 56\ndef apply_to_list(func, list_, non_match=False):\n result = []\n for elt1 in list_:\n for elt2 in list_:\n if non_match:\n if elt1 != elt2:\n result.append(func(elt1, elt2))\n else:\n result.append(func(elt1, elt2))\n return result\n\n\n# 35, 41, 68, 121\ndef all_permutations(list_):\n result = [[]]\n for i in range(len(list_)):\n extended = []\n for perm in result:\n for position in range(i + 1):\n extended.append(perm[:position] + [list_[i]] + perm[position:])\n result = extended\n return result\n\n\n# 35, 41\ndef all_permutations_digits(n):\n digs = [dig for dig in str(n)]\n result = all_permutations(digs)\n return [int(\"\".join(perm)) for perm in result]\n\n\n# 49, 51, 60\ndef all_subsets(list_, size, unique=True):\n if len(list_) < size:\n if unique:\n raise ValueError(\"List too small.\")\n\n # Base case\n if size == 1:\n if unique:\n return [[element] for element in set(list_)]\n else:\n return [[element] for element in list_]\n\n if not unique:\n return reduce(operator.add, [[[element] + subset for subset in\n all_subsets(list_, size - 1, False)]\n for element in list_])\n\n # We can assume size > 1\n result = []\n for i in range(len(list_) - size + 1):\n curr = list_[i + 1:]\n result.extend([[list_[i]] + sub_list\n for sub_list in all_subsets(curr, size - 1)])\n return result\n\n\n# GRAPH THEORY METHODS\n\ndef astar(graph, start, target, heuristic, adjacent):\n closed_nodes = {start: (None, graph[start])}\n # node, parent, distance, don't store heuristic dist.\n\n open_nodes = {}\n for node in adjacent(start):\n if node in graph:\n open_nodes[node] = (start, graph[start] + graph[node])\n\n while target not in closed_nodes:\n min_val = None\n min_f = -1\n for node in open_nodes:\n val = open_nodes[node][1] + heuristic(node)\n if min_val is None:\n min_val = val\n min_f = node\n else:\n if val < min_val:\n min_val = val\n min_f = node\n\n closed_nodes[min_f] = open_nodes.pop(min_f)\n\n min_val = min_val - heuristic(min_f)\n for node in adjacent(min_f):\n if node not in graph or node in closed_nodes:\n continue\n if node in open_nodes:\n comp_val = open_nodes[node][1]\n new_val = min_val + graph[node]\n if new_val < comp_val:\n open_nodes[node] = (min_f, new_val)\n else:\n open_nodes[node] = (min_f, min_val + graph[node])\n\n return closed_nodes[target][1]\n\n\ndef prims_algo(adjacency_list):\n keys = adjacency_list.keys()\n vertices = [keys[0]]\n keys = set(keys)\n edges = []\n min_sum = 0\n while set(vertices) != keys:\n # Find next edge\n candidates = {}\n for vertex in vertices:\n for node, val in adjacency_list[vertex]:\n if node not in vertices:\n candidates[(vertex, node)] = val\n\n new_edge, val = sorted(candidates.items(), key=lambda pair: pair[1])[0]\n min_sum += val\n edges.append(new_edge)\n vertices.append(new_edge[1])\n\n return edges, min_sum\n\n\ndef total_perms(o_list):\n counts = []\n curr_entry = o_list[0]\n curr_count = 1\n for entry in o_list[1:]:\n if entry == curr_entry:\n curr_count += 1\n else:\n counts.append(curr_count)\n curr_entry = entry\n curr_count = 1\n counts.append(curr_count)\n\n denominator = reduce(operator.mul,\n [factorial(count) for count in counts])\n return factorial(sum(counts)) / denominator\n\n\ndef ascending(num, num_sum, min_num, prob_max):\n if num_sum < min_num:\n return []\n if num == 1:\n if num_sum == min_num:\n return [[num_sum]]\n else:\n return []\n\n next_sum = num_sum - min_num\n biggest = next_sum / (num - 1) # integer division intended\n biggest = min(biggest, prob_max)\n result = []\n for next_min in range(min_num, biggest + 1):\n result.extend([[min_num] + cand for cand in\n ascending(num - 1, next_sum, next_min, prob_max)])\n return result\n", "id": "2342347", "language": "Python", "matching_score": 3.669828414916992, "max_stars_count": 7, "path": "python/functions.py" }, { "content": "#!/usr/bin/env python\n\nimport operator\n\nfrom python.decorators import euler_timer\nfrom python.functions import polygonal_number\nfrom python.functions import reverse_polygonal_number\n\n\ndef all_polygonal(s, digits):\n result = []\n for i in range(10 ** digits):\n curr = polygonal_number(s, i)\n if curr >= 10 ** (digits - 1):\n if curr < 10 ** digits:\n result.append(curr)\n else:\n break\n\n return result\n\n\ndef possible_digits_key():\n left_digits = set()\n right_digits = set()\n\n four_digits = {}\n for sides in range(3, 9):\n four_digits[sides] = all_polygonal(sides, 4)\n left_digits.update([str(num)[:2] for num in four_digits[sides]])\n right_digits.update([str(num)[-2:] for num in four_digits[sides]])\n\n two_digit_candidates = left_digits.intersection(right_digits)\n\n result = {}\n for sides in range(3, 9):\n possible = [elt for elt in four_digits[sides]\n if str(elt)[:2] in two_digit_candidates\n and str(elt)[-2:] in two_digit_candidates]\n\n for number in possible:\n left = str(number)[:2]\n right = str(number)[-2:]\n # sets value to [] if not set, returns value at key\n result.setdefault(left, []).append((right, sides))\n\n return result\n\n\ndef find_paths(start_val, start_sides, length, possible):\n \"\"\"\n Starting from (XX, sides) we use the possible paths\n to move from one to the next, without repeating\n\n E.G. if start is ('95', 7) then we look in possible['95']\n and find [('91', 3), ('60', 5), ('17', 7)] so we will extend\n at that step to\n [[('95', 7), ('91', 3)], [('95', 7), ('60', 5)]]\n \"\"\"\n paths = [[(start_val, start_sides)]]\n\n for i in range(length - 1):\n new_paths = []\n for path in paths:\n digits = [elt[0] for elt in path]\n sides = [elt[1] for elt in path]\n\n last_val = path[-1][0]\n # Extends each path by 1 to all the other\n # possible matches which don't conflict\n # with something already in path\n to_add = [path + [elt] for elt in possible[last_val]\n if elt[0] not in digits\n and elt[1] not in sides]\n new_paths.extend(to_add)\n\n paths = new_paths[:]\n\n return paths\n\n\ndef successful_path():\n \"\"\"\n Returns the first set of 6 (XX, sides) pairs that form\n a path according to find_paths\n \"\"\"\n possible = possible_digits_key()\n start_points = set(reduce(operator.add, possible.values()))\n\n for start_point in start_points:\n value, sides = start_point\n for path in find_paths(value, sides, 6, possible):\n # Once we establish the path, we need to\n # see if the cycle can be completed\n # So we check if the first element in the path\n # is a possible value for the last value in\n # the path\n last_val = path[-1][0]\n candidate = possible[last_val]\n if path[0] in candidate:\n return path\n raise ValueError(\"Algorithm on 61 failed.\")\n\n\ndef main(verbose=False):\n COLORS = ['\\033[98m',\n '\\033[96m',\n '\\033[95m',\n '\\033[94m',\n '\\033[92m',\n '\\033[91m',\n '\\033[98m'] # Loops back to beginning\n ENDC = '\\033[0m'\n\n path = successful_path()\n result = [[path[i][0] + path[i + 1][0], path[i + 1][1]]\n for i in range(5)]\n result.append([path[-1][0] + path[0][0], path[0][1]])\n result = [elt + [reverse_polygonal_number(elt[1], int(elt[0]))]\n for elt in result]\n display = \"\"\n for i, entry in enumerate(result):\n value, sides, number = entry\n left = str(value)[:2]\n right = str(value)[-2:]\n colored_val = \"\".join([COLORS[i], left, ENDC,\n COLORS[i + 1], right, ENDC])\n display += \"\\nP_(%s,%s) = %s\" % (sides, number, colored_val)\n if verbose:\n return \"%s.%s\" % (sum(int(match[0]) for match in result), display)\n else:\n return sum(int(match[0]) for match in result)\n\nif __name__ == '__main__':\n print euler_timer(61)(main)(verbose=True)\n", "id": "899067", "language": "Python", "matching_score": 1.667951226234436, "max_stars_count": 7, "path": "python/complete/no061.py" }, { "content": "#!/usr/bin/env python\n\n# Pentagonal is n(3n-1)/2\n# Find (j,k) for which P_j + P_k and abs(P_j - P_k) = D is pentagonal and\n# D is minimized. What is D?\n\n# We will infinitely generate (k,j) = (1,2), (1,3), (2,3), (1,4), (2,4), ...\n# We will infinitely generate (k,j) = (1,2), (2,3), (1,3), (3,4), (2,4),\n# (1,4), (4,5), ...\n# Will check if P_j - P_k is pentagonal.\n# If it is, will then check if P_k + P_j is\n\n# NOTE: P_j - P_k is increasing in j and decreasing in k, so if we fix j, it is\n# minimized when k is largest, i.e. P_j - P_(j - 1)\n# = (3j**2 - j)/2 - (3(j - 1)**2 - (j - 1))/2 = 3j - 2\n\n# For k < j, P_j - P_k >= 3j - 2\n# So if we have established a difference D, if 3j - 2 >= D, we need not\n# consider it, hence we need k < j < (D + 2)/3\n\nfrom python.decorators import euler_timer\nfrom python.functions import polygonal_number\nfrom python.functions import reverse_polygonal_number\n\n\ndef increment_pair(pair):\n \"\"\"\n Increments pair by traversing through all\n k for a given j until 1 is reached, then starting\n back up with the next highest value for j\n\n E.G. [1,2]-->[2,3]-->[1,3]-->[3,4]-->[2,4]-->...\n \"\"\"\n k, j = pair\n k -= 1\n if k > 0:\n return [k, j]\n else:\n return [j, j + 1]\n\n\ndef main(verbose=False):\n # Not only finds the minimum, but also checks to make sure\n # it is the smallest. Since P_j - P_k >= P_j - P_(j-1) = 3j - 2\n # If 3j - 2 > D, then P_j - P_k > D, and we not longer need to\n # check the minimum\n pair = [1, 2]\n D = -1\n while D == -1 or 3 * pair[1] - 2 <= D:\n vals = [polygonal_number(5, val) for val in pair]\n difference = abs(vals[0] - vals[1])\n if D != -1 and difference > D:\n # since increment decreases the first argument, if\n # we go past the difference, we can stop checking\n # [k, j] for a fixed j, and we just bypass\n # by incrementing j\n last = pair[1]\n pair = [last, last + 1]\n else:\n if reverse_polygonal_number(5, difference) != -1:\n if reverse_polygonal_number(5, sum(vals)) != -1:\n if D == -1 or difference < D:\n D = difference\n pair = increment_pair(pair)\n return D\n\nif __name__ == '__main__':\n print euler_timer(44)(main)(verbose=True)\n", "id": "2932551", "language": "Python", "matching_score": 1.1474767923355103, "max_stars_count": 7, "path": "python/too_slow/no044.py" }, { "content": "#!/usr/bin/env python\n\n# We have 6 different initial choices\n# 3 choices for the face the bug travels on\n# and from there 2 choices for which side of\n# the face the bug will leave for the last corner\n# All 6 permutations of (a, b, c) will make up\n# these 6 possibilities, let the bug\n\n# Travel first on the face s_1 x s_2 and then on the\n# face s_2 by s_3, and meet the s_2 side a distance\n# x from the end furthest from the starting point\n# then the squared distance in the first face will be\n# d**2 = s_1**2 + (s_2 - x)**2 and along the\n# second face will be x**2 + s_3**2\n\n# We have f(x, y) = sqrt(s_1**2 + y**2) + sqrt(s_3**2 + x**2)\n# where x + y = s_2. Clearly swapping the roles of\n# s_1 and s_3 does nothing so we really only have 3\n# possibilites\n\n# Taking derivatives, the minimum occurs at\n# x = s_2*s_3/(s_1 + s_3)\n# yielding a minimum of sqrt(s_2**2 + (s_1 + s_3)**2)\n\n# Thus the minimum is one of\n# a**2 + (b + c)**2 = (a**2 + b**2 + c**2) + 2*b*c\n# b**2 + (c + a)**2 = (a**2 + b**2 + c**2) + 2*c*a\n# c**2 + (a + b)**2 = (a**2 + b**2 + c**2) + 2*a*b\n# If a <= b <= c, a*b <= c*a <= b*c,\n# hence the minimum occurs at c**2 + (a + b)**2\n\n# Let SP(M) = #{paths of max size MxMxM with integer shortes path}\n# Clearly SP(M) = SP(M - 1) + DELTA where\n# DELTA = #{paths of size axbxM with integer shortest path | a<=b<=M}\n\n# Initially SP(1) = 0 since the only cube is 1x1x1 and\n# the min distance is sqrt(5)\n\n# The difference SP(M) - SP(M - 1) can be computed with the assumption\n# that c = M (the largest side) hence we need to find all\n# pairs a <= b <= M with (a + b)**2 + M**2 an integer\n\n# clearly 2 <= a + b <= 2*M. Given k = a + b in this range, we have\n# k - M <= k - b = a = (a + a)/2 <= (a + b)/2 = k/2\n\nfrom python.decorators import euler_timer\nfrom python.functions import is_power\n\n\ndef unique_pairs(k, M):\n lower = max(k - M, 1)\n upper = k / 2 # integer division intended\n return upper - lower + 1\n\n\ndef main(verbose=False):\n TARGET = 10 ** 6\n M = 1\n solutions = 0\n while solutions < TARGET:\n M += 1\n # need a + b with (a + b)**2 + M**2\n for inferior_sum in xrange(2, 2 * M + 1):\n if is_power((inferior_sum) ** 2 + M ** 2, 2):\n solutions += unique_pairs(inferior_sum, M)\n return M\n\nif __name__ == '__main__':\n print euler_timer(86)(main)(verbose=True)\n", "id": "6870248", "language": "Python", "matching_score": 1.1971259117126465, "max_stars_count": 7, "path": "python/complete/no086.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport doctest\nimport pathlib\n\nimport numpy as np\n\n\nHERE = pathlib.Path(__file__).resolve().parent\n\n\ndef parse_move(move):\n \"\"\"Convert a directional text move into a vector and distance.\n\n >>> parse_move(\"R8\")\n (array([1, 0, 1]), 8)\n >>> parse_move(\"L5\")\n (array([-1, 0, 1]), 5)\n >>> parse_move(\"D3\")\n (array([ 0, -1, 1]), 3)\n >>> parse_move(\"U4\")\n (array([0, 1, 1]), 4)\n \"\"\"\n direction = move[:1] # Avoid IndexError using [0]\n distance = int(move[1:])\n if str(distance) != move[1:]:\n raise ValueError(\"Unexpected move\", move)\n if distance < 1:\n raise ValueError(\"Unexpected move\", move)\n\n if direction == \"U\":\n return np.array([0, 1, 1]), distance\n if direction == \"D\":\n return np.array([0, -1, 1]), distance\n if direction == \"R\":\n return np.array([1, 0, 1]), distance\n if direction == \"L\":\n return np.array([-1, 0, 1]), distance\n\n raise ValueError(\"Unexpected move\", move)\n\n\ndef wire_to_points(turns):\n \"\"\"Convert a series of turns into a list of points on a lattice.\n\n >>> wire_to_points(\"R8,U5,L5,D3\")\n [(0, 0, 0), (1, 0, 1), (2, 0, 2), (3, 0, 3), (4, 0, 4), (5, 0, 5), (6, 0, 6), (7, 0, 7), (8, 0, 8), (8, 1, 9), (8, 2, 10), (8, 3, 11), (8, 4, 12), (8, 5, 13), (7, 5, 14), (6, 5, 15), (5, 5, 16), (4, 5, 17), (3, 5, 18), (3, 4, 19), (3, 3, 20), (3, 2, 21)]\n >>> wire_to_points(\"U7,R6,D4,L4\")\n [(0, 0, 0), (0, 1, 1), (0, 2, 2), (0, 3, 3), (0, 4, 4), (0, 5, 5), (0, 6, 6), (0, 7, 7), (1, 7, 8), (2, 7, 9), (3, 7, 10), (4, 7, 11), (5, 7, 12), (6, 7, 13), (6, 6, 14), (6, 5, 15), (6, 4, 16), (6, 3, 17), (5, 3, 18), (4, 3, 19), (3, 3, 20), (2, 3, 21)]\n \"\"\"\n moves = turns.split(\",\")\n current_point = np.array([0, 0, 0])\n points = [tuple(current_point)]\n for move in moves:\n direction, distance = parse_move(move)\n for _ in range(distance):\n current_point += direction\n points.append(tuple(current_point))\n\n return points\n\n\ndef find_intersections(turns1, turns2):\n points1 = wire_to_points(turns1)\n points2 = wire_to_points(turns2)\n\n distance_mapping1 = {}\n for x_val, y_val, steps in points1:\n key = (x_val, y_val)\n if key in distance_mapping1:\n distance_mapping1[key] = min(steps, distance_mapping1[key])\n else:\n distance_mapping1[key] = steps\n\n common_points = {}\n for x_val, y_val, steps in points2:\n key = (x_val, y_val)\n if key == (0, 0):\n continue\n\n if key not in distance_mapping1:\n continue\n\n if key in common_points:\n steps1, steps2 = common_points[key]\n steps2 = min(steps, steps2)\n common_points[key] = steps1, steps2\n else:\n common_points[key] = distance_mapping1[key], steps\n\n if len(common_points) == 0:\n raise RuntimeError(\"Expected at least 1 intersection\", turns1, turns2)\n\n return common_points\n\n\ndef minimal_distance(turns1, turns2):\n \"\"\"Compute the minimal Manhattan distance to a point where wires cross.\n\n >>> minimal_distance(\"R8,U5,L5,D3\", \"U7,R6,D4,L4\")\n 6\n >>> minimal_distance(\n ... \"R75,D30,R83,U83,L12,D49,R71,U7,L72\",\n ... \"U62,R66,U55,R34,D71,R55,D58,R83\",\n ... )\n 159\n >>> minimal_distance(\n ... \"R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51\",\n ... \"U98,R91,D20,R16,D67,R40,U7,R15,U6,R7\",\n ... )\n 135\n \"\"\"\n common_points = find_intersections(turns1, turns2)\n common_iter = iter(common_points.keys())\n x_val, y_val = next(common_iter)\n min_distance = abs(x_val) + abs(y_val)\n for x_val, y_val in common_iter:\n min_distance = min(min_distance, abs(x_val) + abs(y_val))\n\n return min_distance\n\n\ndef minimal_distance_by_steps(turns1, turns2):\n \"\"\"Compute the minimal sum of steps to a point where wires cross.\n\n >>> minimal_distance_by_steps(\"R8,U5,L5,D3\", \"U7,R6,D4,L4\")\n 30\n >>> minimal_distance_by_steps(\n ... \"R75,D30,R83,U83,L12,D49,R71,U7,L72\",\n ... \"U62,R66,U55,R34,D71,R55,D58,R83\",\n ... )\n 610\n >>> minimal_distance_by_steps(\n ... \"R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51\",\n ... \"U98,R91,D20,R16,D67,R40,U7,R15,U6,R7\",\n ... )\n 410\n \"\"\"\n common_points = find_intersections(turns1, turns2)\n common_iter = iter(common_points.values())\n steps1, steps2 = next(common_iter)\n min_distance = steps1 + steps2\n for steps1, steps2 in common_iter:\n min_distance = min(min_distance, steps1 + steps2)\n\n return min_distance\n\n\ndef main():\n filename = HERE / \"input.txt\"\n with open(filename, \"r\") as file_obj:\n content = file_obj.read()\n\n turns1, turns2 = content.strip().split(\"\\n\")\n distance_manhattan = minimal_distance(turns1, turns2)\n print(f\"Minimal Manhattan Distance: {distance_manhattan}\")\n distance_by_steps = minimal_distance_by_steps(turns1, turns2)\n print(f\"Minimal Total Steps: {distance_by_steps}\")\n\n\nif __name__ == \"__main__\":\n doctest.testmod()\n main()\n", "id": "8259035", "language": "Python", "matching_score": 1.3192130327224731, "max_stars_count": 0, "path": "day03/main.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport itertools\nimport pathlib\n\nimport numpy as np\n\n\nHERE = pathlib.Path(__file__).resolve().parent\nMOON_NAMES = (\"Io\", \"Europa\", \"Ganymede\", \"Callisto\")\nMAX_CYCLE_LENGTH = 1000000\n\n\nclass Moon:\n def __init__(self, name, position):\n self.name = name\n self.position = np.array(position, dtype=int)\n self.num_values, = self.position.shape\n self.velocity = np.zeros([self.num_values], dtype=int)\n\n @classmethod\n def from_line_3d(cls, name, line):\n pre, value = line.split(\"<\")\n assert pre == \"\"\n value, post = value.split(\">\")\n assert post == \"\"\n x_eq, y_eq, z_eq = value.split(\", \")\n pre, x_val = x_eq.split(\"x=\")\n assert pre == \"\"\n pre, y_val = y_eq.split(\"y=\")\n assert pre == \"\"\n pre, z_val = z_eq.split(\"z=\")\n assert pre == \"\"\n position = int(x_val), int(y_val), int(z_val)\n return cls(name, position)\n\n def update_position(self):\n self.position += self.velocity\n\n def potential_energy(self):\n return np.sum(np.abs(self.position))\n\n def kinetic_energy(self):\n return np.sum(np.abs(self.velocity))\n\n def total_energy(self):\n return self.potential_energy() * self.kinetic_energy()\n\n def as_tuple(self):\n assert self.position.ndim == 1\n assert self.velocity.ndim == 1\n return tuple(self.position) + tuple(self.velocity)\n\n def prune(self, index):\n name = f\"{self.name}:{index}\"\n return Moon(name, [self.position[index]])\n\n\ndef update_velocities(moon1, moon2):\n assert moon1.num_values == moon2.num_values\n for i in range(moon1.num_values):\n if moon1.position[i] < moon2.position[i]:\n moon1.velocity[i] += 1\n moon2.velocity[i] += -1\n elif moon1.position[i] > moon2.position[i]:\n moon1.velocity[i] += -1\n moon2.velocity[i] += 1\n\n\ndef perform_timestep(moons):\n for moon1, moon2 in itertools.combinations(moons, 2):\n update_velocities(moon1, moon2)\n\n for moon in moons:\n moon.update_position()\n\n\ndef total_energy(moons):\n return sum(moon.total_energy() for moon in moons)\n\n\ndef prune_moons(moons, index):\n return [moon.prune(index) for moon in moons]\n\n\ndef as_tuple(moons):\n result = ()\n for moon in moons:\n result += moon.as_tuple()\n return result\n\n\ndef cycle_length(moons):\n seen = set()\n state0 = as_tuple(moons)\n seen.add(state0)\n for step in range(2, MAX_CYCLE_LENGTH):\n perform_timestep(moons)\n seen.add(as_tuple(moons))\n if len(seen) != step:\n assert as_tuple(moons) == state0\n return len(seen)\n\n raise RuntimeError(\"Did not complete\", [moon.name for moon in moons])\n\n\ndef part1(moons, num_steps):\n moons = copy.deepcopy(moons)\n for _ in range(num_steps):\n perform_timestep(moons)\n result = total_energy(moons)\n print(f\"Total energy: {result}\")\n\n\ndef part2(moons):\n moons = copy.deepcopy(moons)\n just_x = prune_moons(moons, 0)\n just_y = prune_moons(moons, 1)\n just_z = prune_moons(moons, 2)\n cycle_lengths = []\n for moons_reduced in (just_x, just_y, just_z):\n cycle_lengths.append(cycle_length(moons_reduced))\n print(f\"Universe repeats after: {np.lcm.reduce(cycle_lengths)}\")\n\n\ndef main():\n filename = HERE / \"input.txt\"\n with open(filename, \"r\") as file_obj:\n content = file_obj.read()\n\n lines = content.strip().split(\"\\n\")\n assert len(lines) == len(MOON_NAMES)\n\n moons = []\n for name, line in zip(MOON_NAMES, lines):\n moon = Moon.from_line_3d(name, line)\n moons.append(moon)\n\n part1(moons, 1000)\n part2(moons)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "11319015", "language": "Python", "matching_score": 1.8908741474151611, "max_stars_count": 0, "path": "day12/main.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Modify the ``/ID [<...> <...>]`` line in a PDF file.\n\nThis is provided to make generated manuscripts (via ``pdflatex``) be\nbitwise identical across runs.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\n\n\nSPLIT_TEXT = b\"\\n/ID [<\"\nID_LINE = u\"{0}> <{0}>]\"\n\n\ndef verify_id_snippet(id_snippet):\n \"\"\"Verifies an ID snippet being replaced.\n\n ``id_snippet`` is expected to be of the form\n ``'{X}> <{X}>]'`` where ``X`` is 32 characters.\n \"\"\"\n if len(id_snippet) != 69:\n raise ValueError(id_snippet, \"Invalid length\")\n\n actual_id = id_snippet[:32].decode(\"ascii\")\n if not (set(actual_id) <= set(\"0123456789ABCDEF\")):\n raise ValueError(actual_id, \"ID is not composed of hex characters.\")\n\n expected_line = ID_LINE.format(actual_id).encode(\"ascii\")\n if id_snippet != expected_line:\n raise ValueError(\n id_snippet, \"Unexpected ID line. Expected\", expected_line\n )\n\n\ndef do_replace(path, new_id):\n with open(path, \"rb\") as file_obj:\n contents = file_obj.read()\n\n # Assert that there is exactly one match.\n pre, post = contents.split(SPLIT_TEXT)\n # ID line expected to be of the form:\n # /ID [<...> <...>]\n id_snippet, post = post.split(b\"\\n\", 1)\n verify_id_snippet(id_snippet)\n # which would just leave `<...> <...>]` after the split.\n new_id_line = ID_LINE.format(new_id).encode(\"ascii\")\n\n new_contents = pre + SPLIT_TEXT + new_id_line + b\"\\n\" + post\n with open(path, \"wb\") as file_obj:\n file_obj.write(new_contents)\n\n print(\"Updated {}\".format(path))\n\n\ndef main():\n description = \"Modify the `/ID` property in a PDF generated by `pdflatex`.\"\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument(\n \"--base\", required=True, help=\"Base path for PDF file to be modified.\"\n )\n parser.add_argument(\n \"--id\", dest=\"id_\", required=True, help=\"The prescribed ID to add.\"\n )\n\n args = parser.parse_args()\n filename = \"{}.pdf\".format(args.base)\n do_replace(filename, args.id_)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "8378487", "language": "Python", "matching_score": 0.6214022040367126, "max_stars_count": 2, "path": "scripts/modify_pdf_id.py" }, { "content": "from __future__ import unicode_literals\n\nimport os\n\nimport setuptools\n\n\nFAKE_DIRS = (\n os.path.join(\n 'example',\n '.lib',\n 'libexample.dylib.dSYM',\n 'Contents',\n 'Resources',\n 'DWARF',\n ),\n)\nFAKE_FILES = (\n 'example.mod',\n os.path.join('example', '.lib', 'libexample.dylib'),\n os.path.join(\n 'example',\n '.lib',\n 'libexample.dylib.dSYM',\n 'Contents',\n 'Info.plist',\n ),\n os.path.join(\n 'example',\n '.lib',\n 'libexample.dylib.dSYM',\n 'Contents',\n 'Resources',\n 'DWARF',\n 'libexample.dylib',\n ),\n os.path.join('example', 'example.o'),\n)\n\n\ndef fake_compile():\n for dir_name in FAKE_DIRS:\n if not os.path.isdir(dir_name):\n os.makedirs(dir_name)\n\n for filename in FAKE_FILES:\n with open(filename, 'w') as file_obj:\n file_obj.write('GENERATED\\n')\n\n\ndef main():\n fake_compile()\n\n setuptools.setup(\n name='example',\n version='0.0.1',\n description='Frotz',\n author='<NAME>',\n author_email='<EMAIL>',\n long_description='Did you read me? Get it? README?',\n url='https://github.com/dhermes/foreign-fortran',\n packages=['example'],\n include_package_data=True,\n zip_safe=False,\n )\n\n\nif __name__ == '__main__':\n main()\n", "id": "1640442", "language": "Python", "matching_score": 0.7809553146362305, "max_stars_count": 0, "path": "setup.py" }, { "content": "\"\"\"Custom Sphinx domain for the Go programming language.\"\"\"\n\n\nimport sphinx.domains\nimport sphinx.locale\nimport sphinx.roles\nimport sphinx.util.logging\n\n\n_LOGGER = sphinx.util.logging.getLogger(__name__)\n\n\nclass GoNotImplemented:\n # TODO\n pass\n\n\nclass GoCallable:\n # TODO\n pass\n\n\nclass GoPackage:\n # TODO\n pass\n\n\nclass GoXRefRole(sphinx.roles.XRefRole):\n def process_link(self, env, refnode, has_explicit_title, title, target):\n \"\"\"Process a cross reference, e.g. for relative references.\n\n Args:\n env (sphinx.environment.BuildEnvironment): The current build\n environment.\n refnode (docutils.nodes.Element): The node being referred to.\n has_explicit_title (bool): Flag indicating if the reference has\n an explicit title.\n title (str): Title of the reference.\n target (str): Target for the reference\n\n Returns:\n Tuple[str, str]: Pair of\n\n * Title (for display purposes) of the cross referenced item\n * Actual target (after processing)\n \"\"\"\n refnode[\"go:interface\"] = env.ref_context.get(\"go:interface\")\n refnode[\"go:struct\"] = env.ref_context.get(\"go:struct\")\n refnode[\"go:package\"] = env.ref_context.get(\"go:package\")\n if not has_explicit_title:\n title = title.lstrip(\".\")\n target = target.lstrip(\"~\")\n if title[:1] == \"~\":\n title = title[1:]\n dot = title.rfind(\".\")\n if dot != -1:\n title = title[dot + 1 :]\n\n if target[:1] == \".\":\n target = target[1:]\n refnode[\"refspecific\"] = True\n\n return title, target\n\n\nclass GoDomain(sphinx.domains.Domain):\n \"\"\"Go language domain.\"\"\"\n\n name = \"go\"\n label = \"Go\"\n\n object_types = {\n \"alias-func\": sphinx.domains.ObjType(\n sphinx.locale._(\"alias-func\"), \"type\"\n ),\n \"const\": sphinx.domains.ObjType(sphinx.locale._(\"constant\"), \"value\"),\n \"constructor\": sphinx.domains.ObjType(\n sphinx.locale._(\"constructor\"), \"func\"\n ),\n \"field\": sphinx.domains.ObjType(sphinx.locale._(\"field\"), \"field\"),\n # NOTE: `file` will not be rendered\n \"func\": sphinx.domains.ObjType(sphinx.locale._(\"function\"), \"func\"),\n \"interface\": sphinx.domains.ObjType(\n sphinx.locale._(\"interface\"), \"type\"\n ),\n \"interface-method\": sphinx.domains.ObjType(\n sphinx.locale._(\"interface-method\"), \"method\"\n ),\n \"method\": sphinx.domains.ObjType(sphinx.locale._(\"method\"), \"method\"),\n \"package\": sphinx.domains.ObjType(\n sphinx.locale._(\"package\"), \"package\"\n ),\n \"struct\": sphinx.domains.ObjType(sphinx.locale._(\"struct\"), \"type\"),\n \"var\": sphinx.domains.ObjType(sphinx.locale._(\"variable\"), \"value\"),\n }\n directives = {\n \"alias-func\": GoNotImplemented,\n \"const\": GoNotImplemented,\n \"constructor\": GoNotImplemented,\n \"field\": GoNotImplemented,\n \"file\": GoNotImplemented,\n \"func\": GoCallable,\n \"interface\": GoNotImplemented,\n \"interface-method\": GoCallable,\n \"method\": GoCallable,\n \"package\": GoPackage,\n \"struct\": GoNotImplemented,\n \"var\": GoNotImplemented,\n }\n roles = {\n \"field\": GoXRefRole(),\n \"func\": GoXRefRole(fix_parens=True),\n \"method\": GoXRefRole(fix_parens=True),\n \"package\": GoXRefRole(),\n \"type\": GoXRefRole(),\n \"value\": GoXRefRole(),\n }\n initial_data = {\n \"objects\": {}, # fullname -> docname, node_id, objtype\n \"packages\": {}, # pkgname -> docname, node_id\n }\n\n @property\n def objects(self):\n \"\"\"Return the objects associated with the domain.\n\n Returns:\n Dict[str, Tuple[str, str, str]]: A mapping where keys are the\n full name of an object and the values are a triple of\n\n * document name\n * node ID\n * object type\n \"\"\"\n return self.data.setdefault(\"objects\", {})\n\n def note_object(self, fullname, objtype, node_id, location=None):\n \"\"\"Keep track / take note of a new object.\n\n Args:\n fullname (str): The full name of the object.\n objtype (str): The type of object being noted.\n node_id (str): The ID of the document node.\n location (Optional[Any]): Extra optional context to be passed to\n the logger.\n \"\"\"\n if fullname in self.objects:\n docname = self.objects[fullname][0]\n _LOGGER.warning(\n sphinx.locale.__(\n \"duplicate %s description of %s, other %s in %s\"\n ),\n objtype,\n fullname,\n objtype,\n docname,\n location=location,\n )\n\n self.objects[fullname] = (self.env.docname, node_id, objtype)\n\n @property\n def packages(self):\n \"\"\"Return the packages associated with the domain.\n\n Returns:\n Dict[str, Tuple[List[str], str]]: A mapping where keys are the\n package import path and the values are a pair of\n\n * filenames in the package\n * node ID\n \"\"\"\n return self.data.setdefault(\"packages\", {})\n\n def note_package(self, pkgname, node_id):\n \"\"\"Keep track / take note of a new package.\n\n Args:\n pkgname (str): The import name of the package.\n node_id (str): The ID of the document node.\n \"\"\"\n # TODO: This ma not make sense because a package is composed of lots\n # of files (i.e. a single `.docname` doesn't make sense).\n # Need to verify that `self.env.docname` is a Sphinx document not\n # a Go document.\n self.packages[pkgname] = (self.env.docname, node_id)\n\n def clear_doc(self, docname):\n \"\"\"Remove all members defined in a Sphinx document.\n\n Args:\n docname (str): The name of a Sphinx document.\n \"\"\"\n for fullname, (pkg_docname, _, _) in list(self.objects.items()):\n if pkg_docname == docname:\n del self.objects[fullname]\n\n for pkgname, (pkg_docname, _) in list(self.packages.items()):\n if pkg_docname == docname:\n del self.packages[pkgname]\n\n\ndef setup(app):\n \"\"\"Register this domain and return metadata about the domain.\n\n Args:\n app (sphinx.application.Sphinx): The root application object.\n\n Returns:\n dict: Metadata about the domain\n \"\"\"\n app.add_domain(GoDomain)\n\n return {\n \"version\": \"0.0.1\",\n \"env_version\": 1,\n \"parallel_read_safe\": False,\n \"parallel_write_safe\": False,\n }\n", "id": "10554148", "language": "Python", "matching_score": 1.1142772436141968, "max_stars_count": 0, "path": "src/python/sphinx_go/__init__.py" }, { "content": "import os\nimport glob\nimport json\n\nfrom BeautifulSoup import BeautifulSoup\n\nimport get_bracket_pool\nimport utils\n\n\ndef get_links(links, filename):\n \"\"\"Updates links with those found in filename.\"\"\"\n with open(filename, 'r') as fh:\n soup = BeautifulSoup(fh.read())\n\n entry_links = soup.findAll('a', {'class': 'entry'})\n for anchor in entry_links:\n bracket_name = anchor.text\n if bracket_name in links:\n raise KeyError(bracket_name, 'already exists')\n\n bracket_link = anchor['href']\n before, entry_id = bracket_link.split('entry?entryID=', 1)\n if before != '':\n raise ValueError('Expected link to begin with entry?...')\n entry_id = int(entry_id)\n links[bracket_name] = entry_id\n\n\ndef get_all_bracket_links():\n all_filenames = glob.glob(get_bracket_pool.BASE_FILENAME + '*')\n links = {}\n for filename in all_filenames:\n get_links(links, filename)\n\n # check_unique_entries\n if len(set(links.values())) != len(links):\n raise ValueError('Link entry IDs not unique.')\n\n return links\n\n\nif __name__ == '__main__':\n links = get_all_bracket_links()\n with open(utils.BRACKET_LINKS_FILE, 'w') as fh:\n json.dump(links, fh, indent=2, sort_keys=True,\n separators=(',', ': '))\n", "id": "10406073", "language": "Python", "matching_score": 1.1312676668167114, "max_stars_count": 1, "path": "parse_bracket_links.py" }, { "content": "# Copyright 2017 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helper to convert a JSON key file into a PEM PKCS#1 key.\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport json\nimport subprocess\nimport sys\n\ntry:\n import py\nexcept ImportError:\n py = None\n\n\nENV_VAR = 'GOOGLE_APPLICATION_CREDENTIALS'\n\n\ndef _require_env():\n json_filename = os.environ.get(ENV_VAR)\n if json_filename is None:\n msg = '{} is unset'.format(ENV_VAR)\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n return json_filename\n\n\ndef _require_file(json_filename):\n if not os.path.isfile(json_filename):\n msg = '{}={} is not a file.'.format(ENV_VAR, json_filename)\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef _require_json(json_filename):\n with open(json_filename, 'r') as file_obj:\n try:\n return json.load(file_obj)\n except:\n msg = '{}={} does not contain valid JSON.'.format(\n ENV_VAR, json_filename)\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef _require_private_key(key_json):\n pkcs8_pem = key_json.get('private_key')\n if pkcs8_pem is None:\n msg = '``private_key`` missing in JSON key file'\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n return pkcs8_pem\n\n\ndef _require_email(key_json):\n client_email = key_json.get('client_email')\n if client_email is None:\n msg = '``client_email`` missing in JSON key file'\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n return client_email\n\n\ndef get_key_json():\n json_filename = _require_env()\n _require_file(json_filename)\n key_json = _require_json(json_filename)\n return key_json, json_filename\n\n\ndef _require_py():\n if py is None:\n msg = 'py (https://pypi.org/project/py/) must be installed.'\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef _require_openssl():\n \"\"\"Check that ``openssl`` is on the PATH.\n\n Assumes :func:`_require_py` has been checked.\n \"\"\"\n if py.path.local.sysfind('openssl') is None:\n msg = '``openssl`` command line tool must be installed.'\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef _pkcs8_filename(pkcs8_pem, base):\n \"\"\"Create / check a PKCS#8 file.\n\n Exits with 1 if the file already exists and differs from\n ``pkcs8_pem``. If the file does not exists, creates it with\n ``pkcs8_pem`` as contents and sets permissions to 0400.\n\n Args:\n pkcs8_pem (str): The contents to be stored (or checked).\n base (str): The base file path (without extension).\n\n Returns:\n str: The filename that was checked / created.\n \"\"\"\n pkcs8_filename = '{}-PKCS8.pem'.format(base)\n if os.path.exists(pkcs8_filename):\n with open(pkcs8_filename, 'r') as file_obj:\n contents = file_obj.read()\n\n if contents != pkcs8_pem:\n msg = 'PKCS#8 file {} already exists.'.format(pkcs8_filename)\n print(msg, file=sys.stderr)\n sys.exit(1)\n else:\n with open(pkcs8_filename, 'w') as file_obj:\n file_obj.write(pkcs8_pem)\n # Protect the file from being read by other users..\n os.chmod(pkcs8_filename, 0o400)\n\n return pkcs8_filename\n\n\ndef _pkcs1_verify(pkcs8_filename, pkcs1_filename):\n \"\"\"Verify the contents of an existing PKCS#1 file.\n\n Does so by using ``openssl rsa`` to print to stdout and\n then checking against contents.\n\n Exits with 1 if:\n\n * The ``openssl`` command fails\n * The ``pkcs1_filename`` contents differ from what was produced\n by ``openssl``\n\n Args:\n pkcs8_filename (str): The PKCS#8 file to be converted.\n pkcs1_filename (str): The PKCS#1 file to check against.\n \"\"\"\n cmd = (\n 'openssl',\n 'rsa',\n '-in',\n pkcs8_filename,\n )\n process = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n return_code = process.wait()\n\n if return_code != 0:\n msg = 'Failed checking contents of {} against openssl.'.format(\n pkcs1_filename)\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n cmd_output = process.stdout.read().decode('utf-8')\n with open(pkcs1_filename, 'r') as file_obj:\n expected_contents = file_obj.read()\n\n if cmd_output != expected_contents:\n msg = 'PKCS#1 file {} already exists.'.format(pkcs1_filename)\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef _pkcs1_create(pkcs8_filename, pkcs1_filename):\n \"\"\"Create a existing PKCS#1 file from a PKCS#8 file.\n\n Does so by using ``openssl rsa -in * -out *``.\n\n Exits with 1 if the ``openssl`` command fails.\n\n Args:\n pkcs8_filename (str): The PKCS#8 file to be converted.\n pkcs1_filename (str): The PKCS#1 file to be created.\n \"\"\"\n cmd = (\n 'openssl',\n 'rsa',\n '-in',\n pkcs8_filename,\n '-out',\n pkcs1_filename,\n )\n process = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n return_code = process.wait()\n if return_code != 0:\n msg = 'Failed to convert {} to {} with openssl.'.format(\n pkcs8_filename, pkcs1_filename)\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef convert_key(pkcs8_pem, json_filename):\n _require_py()\n _require_openssl()\n\n base, _ = os.path.splitext(json_filename)\n pkcs8_filename = _pkcs8_filename(pkcs8_pem, base)\n\n pkcs1_filename = '{}-PKCS1.pem'.format(base)\n if os.path.exists(pkcs1_filename):\n _pkcs1_verify(pkcs8_filename, pkcs1_filename)\n else:\n _pkcs1_create(pkcs8_filename, pkcs1_filename)\n\n return pkcs1_filename\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\n description='Convert a JSON keyfile to dev_appserver values.')\n\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\n '--email', action='store_true',\n help='Requests that the email address be returned.')\n pkcs1_help = (\n 'Requests that a filename for the converted PKCS#1 file be returned.')\n group.add_argument(\n '--pkcs1', action='store_true', help=pkcs1_help)\n group.add_argument(\n '--clean', action='store_true',\n help='Clean up any created files.')\n\n return parser.parse_args()\n\n\ndef _clean(json_filename):\n base, _ = os.path.splitext(json_filename)\n pkcs1_filename = '{}-PKCS1.pem'.format(base)\n pkcs8_filename = '{}-PKCS8.pem'.format(base)\n\n for filename in (pkcs1_filename, pkcs8_filename):\n try:\n os.remove(filename)\n print('Removed {}'.format(filename))\n except OSError:\n pass\n\n\ndef main():\n args = get_args()\n\n key_json, json_filename = get_key_json()\n if args.email:\n print(_require_email(key_json))\n elif args.pkcs1:\n pkcs8_pem = _require_private_key(key_json)\n pkcs1_filename = convert_key(pkcs8_pem, json_filename)\n print(pkcs1_filename)\n elif args.clean:\n _clean(json_filename)\n else:\n raise RuntimeError('Options not set', args)\n\n\nif __name__ == '__main__':\n main()\n", "id": "3842327", "language": "Python", "matching_score": 2.3903844356536865, "max_stars_count": 0, "path": "convert_key.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\n\n\ntry:\n _PROC = subprocess.Popen(['git', '--help'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n HAS_GIT = _PROC.wait() == 0\n del _PROC\nexcept OSError: # pragma: NO COVER\n HAS_GIT = False\n", "id": "2664750", "language": "Python", "matching_score": 0.6561452150344849, "max_stars_count": 5, "path": "tests/utils.py" }, { "content": "import os\nimport subprocess\nimport sys\n\n\nLIB_DIRS = ('apiclient', 'httplib2', 'oauth2client', 'uritemplate',\n 'pytz', 'icalendar')\ndirectory_missing = False\nfor directory in LIB_DIRS:\n if not os.path.isdir(directory):\n directory_missing = True\n break\n\nif not directory_missing:\n print 'All dependencies already downloaded. Doing nothing and exiting.'\n sys.exit(0)\n\n\nGOOGLE_API_CLIENT_ZIP = ('https://google-api-python-client.googlecode.com'\n '/files/google-api-python-client-gae-1.2.zip')\nGOOGLE_API_CLIENT_FILENAME = os.path.split(GOOGLE_API_CLIENT_ZIP)[-1]\n\nPYTZ_ZIP = ('https://pypi.python.org/packages/source/g/gaepytz/'\n 'gaepytz-2011h.zip#md5=0f130ef491509775b5ed8c5f62bf66fb')\nPYTZ_FILENAME = 'gaepytz-2011h.zip'\nPYTZ_SUBDIR = 'gaepytz-2011h/pytz'\nPYTZ_MAINDIR = 'gaepytz-2011h/'\nPYTZ_CODE_DIR = 'pytz'\n\nICALENDAR_ZIP = ('https://pypi.python.org/packages/source/i/icalendar/'\n 'icalendar-3.6.2.zip#md5=e815c0bbef1097713555925235af0630')\nICALENDAR_FILENAME = 'icalendar-3.6.2.zip'\nICALENDAR_SUBDIR = 'icalendar-3.6.2/src/icalendar'\nICALENDAR_MAINDIR = 'icalendar-3.6.2/'\nICALENDAR_CODE_DIR = 'icalendar'\n\n\ndef get_git_root():\n \"\"\"Retrieves the current root of the git repository.\n\n Returns:\n String containing the current git root, if in a repository.\n \"\"\"\n return subprocess.check_output(\n ['git', 'rev-parse', '--show-toplevel']).strip()\n\n\nGIT_ROOT = get_git_root()\nprint 'Changing directory to', GIT_ROOT\nos.chdir(GIT_ROOT)\n\nprint '=' * 60\n\nif os.path.exists(GOOGLE_API_CLIENT_FILENAME):\n print 'google-api-python-client file already exists, please remove'\n sys.exit(1)\n\nprint 'Downloading google-api-python-client library'\nsubprocess.call(['wget', GOOGLE_API_CLIENT_ZIP])\nprint 'Unzipping google-api-python-client library'\nsubprocess.call(['unzip', '-oq', GOOGLE_API_CLIENT_FILENAME])\nprint 'Removing google-api-python-client library zip'\nsubprocess.call(['rm', '-f', GOOGLE_API_CLIENT_FILENAME])\n\nprint '=' * 60\n\nif os.path.exists(PYTZ_FILENAME):\n print 'gae-pytz file already exists, please remove'\n sys.exit(1)\n\nprint 'Downloading gae-pytz library'\nsubprocess.call(['wget', PYTZ_ZIP])\nprint 'Unzipping gae-pytz library'\nsubprocess.call(['unzip', '-oq', PYTZ_FILENAME])\nprint 'Removing existing gae-pytz code'\nsubprocess.call(['rm', '-fr', PYTZ_CODE_DIR])\nprint 'Moving library to project root'\nsubprocess.call(['mv', PYTZ_SUBDIR, GIT_ROOT])\nprint 'Removing gae-pytz unused files'\nsubprocess.call(['rm', '-fr', PYTZ_MAINDIR])\nprint 'Removing gae-pytz library zip'\nsubprocess.call(['rm', '-f', PYTZ_FILENAME])\n\nprint '=' * 60\n\nprint 'Downloading icalendar library'\nsubprocess.call(['wget', ICALENDAR_ZIP])\nprint 'Unzipping icalendar library'\nsubprocess.call(['unzip', '-oq', ICALENDAR_FILENAME])\n\nprint 'Removing existing icalendar code'\nsubprocess.call(['rm', '-fr', ICALENDAR_CODE_DIR])\n\nprint 'Moving library to project root'\nsubprocess.call(['mv', ICALENDAR_SUBDIR, GIT_ROOT])\nprint 'Removing icalendar unused files'\nsubprocess.call(['rm', '-fr', ICALENDAR_MAINDIR])\nprint 'Removing icalendar library zip'\nsubprocess.call(['rm', '-f', ICALENDAR_FILENAME])\n\nprint '=' * 60\n\nprint 'Updating iCal with App Engine specific hacks'\nsubprocess.call(['git', 'apply', 'ical.patch'])\n", "id": "11989527", "language": "Python", "matching_score": 1.4329513311386108, "max_stars_count": 1, "path": "setup_dependencies.py" }, { "content": "#!/usr/bin/env python\n\n# Libraries\nimport os\n\n# Local imports\nimport constants\n\n\nCURR_DIR = os.path.dirname(__file__)\n\n\ndef main():\n old_data_dir_path = os.path.join(CURR_DIR, constants.OLD_DATA_DIR)\n if not os.path.exists(old_data_dir_path):\n print 'Making directory:', old_data_dir_path\n os.makedirs(old_data_dir_path)\n elif not os.path.isdir(old_data_dir_path):\n raise OSError('%s is not a directory.' % old_data_dir_path)\n else:\n print 'Directory', old_data_dir_path, 'exists, doing nothing.'\n\n new_data_dir_path = os.path.join(CURR_DIR, constants.NEW_DATA_DIR)\n if not os.path.exists(new_data_dir_path):\n print 'Making directory:', new_data_dir_path\n os.makedirs(new_data_dir_path)\n elif not os.path.isdir(new_data_dir_path):\n raise OSError('%s is not a directory.' % new_data_dir_path)\n else:\n print 'Directory', new_data_dir_path, 'exists, doing nothing.'\n\n\nif __name__ == '__main__':\n main()\n\n", "id": "11856417", "language": "Python", "matching_score": 1.3372187614440918, "max_stars_count": 0, "path": "step2_make_data_folders.py" }, { "content": "#!/usr/bin/env python\n\n# In the 20 x 20 grid below, four numbers along a diagonal line have been\n# marked in red.\n\n# 08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08\n# 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00\n# 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65\n# 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91\n# 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80\n# 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50\n# 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70\n# 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21\n# 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72\n# 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95\n# 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92\n# 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57\n# 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58\n# 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40\n# 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66\n# 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69\n# 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36\n# 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16\n# 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54\n# 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48\n\n# The product of these numbers is 26 63 78 14 = 1788696.\n# What is the greatest product of four adjacent numbers in any\n# direction (up, down, left, right, or diagonally) in\n# the 20 x 20 grid?\n\nimport operator\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\n\n\ndef make_path(point, step, length):\n return [(point[0] + i * step[0], point[1] + i * step[1])\n for i in range(length)]\n\n\ndef convert(path, data):\n # Assumes path is made of points (x,y) where data[x][y] exists\n return reduce(operator.mul, [data[x][y] for x, y in path])\n\n\ndef main(verbose=False):\n DATA = get_data(11)\n DATA = [[int(entry) for entry in row.split()]\n for row in DATA.split(\"\\n\") if row]\n\n # UP/DOWN goes from DATA[x][y] to DATA[x+3][y] where 0 <= x, x+3, y <= 19\n vert = max(convert(make_path((x, y), (1, 0), 4), DATA) for x\n in range(0, 16 + 1) for y in range(19 + 1))\n\n # LEFT/RIGHT goes from DATA[x][y] to DATA[x][y+3]\n # where 0 <= x, y, y+3 <= 19\n horiz = max(convert(make_path((x, y), (0, 1), 4), DATA) for x\n in range(0, 19 + 1) for y in range(16 + 1))\n\n # DIAGONAL L->R goes from DATA[x][y] to DATA[x+3][y+3] via +[1,1]\n diag_l_r = max(convert(make_path((x, y), (1, 1), 4), DATA) for x\n in range(0, 16 + 1) for y in range(16 + 1))\n\n # DIAGONAL R->L goes from DATA[x][y] to DATA[x-3][y+3] via +[-1,1]\n diag_r_l = max(convert(make_path((x, y), (-1, 1), 4), DATA) for x\n in range(3, 19 + 1) for y in range(16 + 1))\n\n return max(vert, horiz, diag_l_r, diag_r_l)\n\nif __name__ == '__main__':\n print euler_timer(11)(main)(verbose=True)\n", "id": "8753358", "language": "Python", "matching_score": 1.081207036972046, "max_stars_count": 7, "path": "python/complete/no011.py" }, { "content": "#!/usr/bin/env python\n\nimport operator\n\nfrom python.decorators import euler_timer\nfrom python.functions import all_permutations\nfrom python.functions import get_data\n\n\ndef main(verbose=False):\n data = [[int(dig) for dig in row] for row\n in get_data(79).split(\"\\r\\n\") if row]\n\n all_values = list(set(reduce(operator.add, data)))\n for password in all_permutations(all_values):\n correct = True\n for left, middle, right in data:\n left_index = password.index(left)\n middle_index = password.index(middle)\n right_index = password.index(right)\n if not (left_index < middle_index < right_index):\n correct = False\n break\n if correct:\n break\n if not correct:\n raise Exception(\"No match found\")\n return ''.join(str(key) for key in password)\n\nif __name__ == '__main__':\n print euler_timer(79)(main)(verbose=True)\n", "id": "6345409", "language": "Python", "matching_score": 0.8952817320823669, "max_stars_count": 7, "path": "python/complete/no079.py" }, { "content": "#!/usr/bin/env python\n\n# Find the sum of the distinct squarefree numbers in the first 51\n# rows of Pascal's triangle.\n\nfrom python.decorators import euler_timer\nfrom python.functions import first_prime_divisor\nfrom python.functions import robust_divide\n\n\ndef pascal_next(row):\n if len(row) < 2:\n raise ValueError(\"Don't pass this, breaks algorithm\")\n\n result = [1]\n result.extend([row[i] + row[i + 1] for i in range(len(row) - 1)])\n # n = row[1], when n is odd the regular pascal row has\n # n + 1 = even number of elements, hence there is a repeat\n # in the middle\n if row[1] % 2 == 1:\n result.append(row[-1] * 2)\n return result\n\n\ndef unique_in_pascal(rows):\n if rows < 1:\n raise ValueError(\"Rows should be positive\")\n elif rows < 3:\n return [1]\n\n vals = set([1, 2])\n curr = [1, 2]\n for i in range(4, rows + 1):\n curr = pascal_next(curr)\n vals.update(curr)\n return sorted(list(vals))\n\n\ndef is_squarefree(n):\n if n == 1:\n return True\n\n quotient = n\n count = 1\n while count == 1:\n prime, _ = first_prime_divisor(quotient)\n quotient, count = robust_divide(quotient, prime, include_count=True)\n if quotient == 1:\n return (count == 1)\n return False\n\n\ndef main(verbose=False):\n NUM_ROWS = 51\n pascal_vals = unique_in_pascal(NUM_ROWS)\n return sum(val for val in pascal_vals if is_squarefree(val))\n\nif __name__ == '__main__':\n print euler_timer(203)(main)(verbose=True)\n", "id": "3043321", "language": "Python", "matching_score": 2.2383456230163574, "max_stars_count": 7, "path": "python/complete/no203.py" }, { "content": "#!/usr/bin/env python\n\n# The radical of n, rad(n), is the product of distinct prime factors of n.\n# rad(12) = 2*3, rad(343) = 7\n\n# If we calculate rad(n) for 1 <= n <= M, then sort them on rad(n), and\n# sorting on n if the radical values are equal, we get a sequence\n# E(k) for 1 <= k <= N\n\n# n r E\n# 1 1 1\n# 2 2 2\n# 4 2 3\n# 8 2 4\n# 3 3 5\n# 9 3 6\n# 5 5 7\n# 6 6 8\n# 7 7 9\n# 10 10 10\n\n# Given this sequence E when M = 10**5,\n# find E(10**4)\n\nfrom python.decorators import euler_timer\nfrom python.functions import first_prime_divisor\nfrom python.functions import sieve\n\n\ndef all_radicals(n):\n PRIMES = sieve(n)\n result = {1: 1}\n for i in range(2, n + 1):\n if i in PRIMES:\n result[i] = i\n prime, quotient = first_prime_divisor(i, PRIMES)\n if quotient % prime == 0:\n result[i] = result[quotient]\n else:\n result[i] = result[quotient] * prime\n return result\n\n\ndef sorted_radicals(n):\n rad_dict = all_radicals(n)\n rad_vals = {}\n # since we go in order, we have\n # no need to sort within each radical value\n for i in range(1, n + 1):\n value = rad_dict[i]\n # sets value to [] if not set, returns value at key\n rad_vals.setdefault(value, []).append(i)\n\n result = []\n for value in sorted(rad_vals):\n result.extend(rad_vals[value])\n\n return result\n\n\ndef main(verbose=False):\n MAX_n = 10 ** 5\n index = 10 ** 4\n return sorted_radicals(MAX_n)[index - 1]\n\nif __name__ == '__main__':\n print euler_timer(124)(main)(verbose=True)\n", "id": "2674625", "language": "Python", "matching_score": 1.3962047100067139, "max_stars_count": 7, "path": "python/complete/no124.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import all_factors\n\n\ndef main(verbose=False):\n n = 10000\n factors = all_factors(n - 1)\n\n # sum of proper divisors\n first_pass = [sum(factors[i]) - i for i in range(1, n)]\n max_out = max(first_pass)\n\n factors = all_factors(max_out, factors)\n\n # applying sum of divisors twice to i we have\n # s_1 = func(i), s_2 = func(s_1)\n # s_1 = sum(factors[i]) - i, s_2 = sum(factors[s_1]) - s_1\n # i == s_2 <==>\n # i == sum(factors[sum(factors[i]) - i]) - (sum(factors[i]) - i) <==>\n # sum(factors[sum(factors[i]) - i]) == sum(factors[i])\n # Similarly s_1 != i <==> sum(factors[i]) != 2*i\n result = [i for i in range(2, n) if\n sum(factors[sum(factors[i]) - i]) == sum(factors[i]) and\n sum(factors[i]) != 2 * i]\n\n if verbose:\n return '%s.\\nThe full list of such amicable numbers is %s.' % (\n sum(result), ', '.join(str(elt) for elt in result))\n else:\n return sum(result)\n\nif __name__ == '__main__':\n print euler_timer(21)(main)(verbose=True)\n", "id": "12593300", "language": "Python", "matching_score": 1.370092511177063, "max_stars_count": 7, "path": "python/complete/no021.py" }, { "content": "#!/usr/bin/env python\n\n# A number n is called abundant if the sum of its proper divisors exceeds n.\n\n# By mathematical analysis, it can be shown that all integers greater than\n# 28123 can be written as the sum of two abundant numbers.\n\n# Find the sum of all the positive integers which cannot be written as\n# the sum of two abundant numbers.\n\nimport operator\n\nfrom python.decorators import euler_timer\nfrom python.functions import all_factors\n\n\ndef abundant_numbers(n):\n factor_hash = all_factors(n)\n # sum of proper divisors\n return [i for i in range(2, n + 1) if i < sum(factor_hash[i]) - i]\n\n\ndef main(verbose=False):\n abundants = abundant_numbers(28123)\n sums = [False] * (28123 + 1)\n\n length = len(abundants)\n for index in range(length):\n for second_index in range(index, length):\n val1 = abundants[index]\n val2 = abundants[second_index]\n if (val1 + val2 <= 28123):\n sums[val1 + val2] = True\n\n # those with indices set to false are the ones which can't be written\n # as the sum of two abundant numbers, so we sum them\n return sum(i for i, bool_val in enumerate(sums) if not bool_val)\n\nif __name__ == '__main__':\n print euler_timer(23)(main)(verbose=True)\n", "id": "1021130", "language": "Python", "matching_score": 1.4938303232192993, "max_stars_count": 7, "path": "python/complete/no023.py" }, { "content": "#!/usr/bin/env python\n\n# We use a sieve type method to calculate\n# the sum of all proper divisors up to n\n# by looping through all possible factors\n# and adding to the sum for each number\n# that the factor divides into\n\nfrom python.decorators import euler_timer\n\n\ndef proper_divisor_sums(n):\n result = [0] * (n + 1)\n # loop over all possible divisors\n for divisor in xrange(1, n + 1):\n # loop over all numbers that\n # i divides properly (we want\n # the sum of proper divisors)\n for parent in xrange(2 * divisor, n + 1, divisor):\n result[parent] += divisor\n return result\n\n\ndef amicable_cycle(n, cycle_hash, divisors, break_point):\n if n in cycle_hash:\n return cycle_hash[n][1]\n\n cycle = [n]\n next = divisors[n]\n while (next not in cycle_hash and\n next not in cycle and\n next <= break_point):\n cycle.append(next)\n next = divisors[next]\n\n if next > break_point:\n set_val = [None]\n elif next in cycle_hash:\n set_val = cycle_hash[next][1]\n elif next in cycle:\n start = cycle.index(next)\n set_val = cycle[start:]\n else:\n raise Exception(\"Cycle should've occurred, check algorithm\")\n for val in cycle:\n cycle_hash[val] = (divisors[val], set_val[:])\n return cycle_hash[n][1]\n\n\ndef main(verbose=False):\n MAX_n = 10 ** 6\n divisors = proper_divisor_sums(MAX_n)\n chains = {1: (0, [0]),\n 2: (1, [0]),\n 3: (1, [0])}\n\n best_length = 1\n longest_chain = [0]\n for i in range(4, MAX_n + 1):\n chain = amicable_cycle(i, chains, divisors, MAX_n)\n if len(chain) > best_length:\n best_length = len(chain)\n longest_chain = chain[:]\n\n return min(longest_chain)\n\nif __name__ == '__main__':\n print euler_timer(95)(main)(verbose=True)\n", "id": "8768558", "language": "Python", "matching_score": 1.7587333917617798, "max_stars_count": 7, "path": "python/complete/no095.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import is_prime\nfrom python.functions import sieve\n\n\ndef prime_concat_partners(list_, primes, failure_point):\n result = {}\n\n length = len(list_)\n for first in range(length - 1):\n n1 = list_[first]\n for second in range(first + 1, length):\n n2 = list_[second]\n cand1 = int(str(n1) + str(n2))\n cand2 = int(str(n2) + str(n1))\n if is_prime(cand1, primes=primes, failure_point=failure_point):\n if is_prime(cand2, primes=primes, failure_point=failure_point):\n # sets value to [] if not set, returns value at key\n result.setdefault(n1, []).append(n2)\n result.setdefault(n2, []).append(n1)\n return result\n\n\ndef possible_pairings(partner_hash, length):\n # length = 1\n result = [[key] for key in partner_hash]\n for size in range(2, length + 1):\n next_iteration = []\n for subset in result:\n possible_additions = partner_hash[subset[0]]\n for val in subset:\n possible_additions = [entry for entry in possible_additions\n if entry in partner_hash[val]]\n next_iteration.extend([subset[:] + [candidate]\n for candidate in possible_additions])\n result = next_iteration\n return result\n\n\ndef main(verbose=False):\n MAX_n = 10 ** 4\n PRIMES = sieve(MAX_n)\n partner_hash = prime_concat_partners(PRIMES, PRIMES, MAX_n ** 2)\n valid = possible_pairings(partner_hash, 5)\n\n min_sum = 10 ** 10\n min_set = None\n for subset in valid:\n if sum(subset) < min_sum:\n min_sum = sum(subset)\n min_set = subset\n\n min_set = [str(prime) for prime in sorted(min_set)]\n if verbose:\n return '%s.\\nThis is obtained with the primes %s.' % (\n min_sum, ', '.join(min_set))\n else:\n return min_sum\n\nif __name__ == '__main__':\n print euler_timer(60)(main)(verbose=True)\n", "id": "674564", "language": "Python", "matching_score": 1.648804783821106, "max_stars_count": 7, "path": "python/too_slow/no060.py" }, { "content": "#!/usr/bin/env python\n\n# The arithmetic sequence, 1487, 4817, 8147, in which each of the terms\n# increases by 3330, is unusual in two ways: (i) each of the three\n# terms are prime, and, (ii) each of the 4-digit numbers are\n# permutations of one another.\n\n# There are no arithmetic sequences made up of three 1-, 2-, or 3-digit\n# primes, exhibiting this property, but there is one other 4-digit\n# increasing sequence.\n\n# What 12-digit number do you form by concatenating the three terms\n# in this sequence?\n\nfrom python.decorators import euler_timer\nfrom python.functions import all_subsets\nfrom python.functions import sieve\n\n\ndef find_arithmetic(list_):\n if len(list_) < 3:\n raise ValueError(\"List wrong size.\")\n\n candidates = all_subsets(list_, 3)\n for cand in candidates:\n if cand[0] + cand[2] == 2 * cand[1]:\n return cand\n return []\n\n\ndef main(verbose=False):\n primes = [prime for prime in sieve(10000) if prime > 999]\n primes_by_digits = {}\n for prime in primes:\n key = \"\".join(sorted(digit for digit in str(prime)))\n # sets value to [] if not set, returns value at key\n primes_by_digits.setdefault(key, []).append(prime)\n\n result = []\n for key in primes_by_digits:\n candidate = primes_by_digits[key]\n if len(candidate) >= 3:\n soln = find_arithmetic(candidate)\n if soln:\n result.append(\"\".join(str(num) for num in sorted(soln)))\n return result[0]\n\nif __name__ == '__main__':\n print euler_timer(49)(main)(verbose=True)\n", "id": "8636998", "language": "Python", "matching_score": 1.735528588294983, "max_stars_count": 7, "path": "python/complete/no049.py" }, { "content": "#!/usr/bin/env python\n\n# an n-digit number is >= 10^(n-1)\n# n*m >= 10^(n+m-2), must have at least n + m - 1 digits\n\n# subsets of cardinality 5,6\n\nfrom python.decorators import euler_timer\n\n\ndef all_orderings(list_):\n if len(list_) == 1:\n return [list_]\n\n result = []\n for elt in list_:\n sublist = list_[:]\n sublist.remove(elt)\n result.extend([[elt] + ordering\n for ordering in all_orderings(sublist)])\n\n return result\n\n\n# Will take a list and break it at various places, returning\n# the product of the integers formed\ndef possible_products(list_):\n result = []\n\n for i in range(1, len(list_)):\n left = list_[:i]\n left = int(\"\".join(str(elt) for elt in left))\n right = list_[i:]\n right = int(\"\".join(str(elt) for elt in right))\n result.append(left * right)\n\n return result\n\n\ndef main(verbose=False):\n products = set()\n candidates = all_orderings(range(1, 10))\n for candidate in candidates:\n prods = possible_products(candidate[:5])\n last4 = candidate[-4:]\n last4 = int(\"\".join(str(elt) for elt in last4))\n if last4 in prods:\n products.add(last4)\n\n prods = possible_products(candidate[:6])\n last3 = candidate[-3:]\n last3 = int(\"\".join(str(elt) for elt in last3))\n if last3 in prods:\n products.add(last3)\n\n return sum(products)\n\nif __name__ == '__main__':\n print euler_timer(32)(main)(verbose=True)\n", "id": "4373307", "language": "Python", "matching_score": 1.2353025674819946, "max_stars_count": 7, "path": "python/complete/no032.py" }, { "content": "#!/usr/bin/env python\n\n# What is the largest 1 to 9 pandigital 9-digit number that can be formed as\n# the concatenated product of an integer with (1,2, ... , n) where n > 1?\n\nfrom python.decorators import euler_timer\n\n\ndef is_pandigital_9(str_):\n for dig in [str(elt) for elt in range(1, 10)]:\n if str_.count(dig) != 1:\n return False\n return True\n\n\ndef all_pandigitals_1_to_n(n):\n to_mult = range(1, n + 1)\n multiplier = 1\n result = []\n\n curr = \"\".join(str(multiplier * elt) for elt in to_mult)\n while len(curr) < 10:\n if is_pandigital_9(curr):\n result.append(curr)\n multiplier += 1\n curr = \"\".join(str(multiplier * elt) for elt in to_mult)\n\n return result\n\n\ndef main(verbose=False):\n result = []\n for n in range(2, 10):\n result.extend(all_pandigitals_1_to_n(n))\n return max(int(elt) for elt in result)\n\nif __name__ == '__main__':\n print euler_timer(38)(main)(verbose=True)\n", "id": "6026468", "language": "Python", "matching_score": 1.5379005670547485, "max_stars_count": 7, "path": "python/complete/no038.py" }, { "content": "#!/usr/bin/env python\n\n# Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x,\n# and 6x, contain the same digits.\n\n# NOTE:\n# In order for x and 6x to have same digits, they must have same\n# num of digits\n\n# 10^(k - 1) <= x < 10^k has k digits\n# Also need 10^(k - 1) <= 6x < 10^k\n# Combining 10^(k - 1) <= x <= (10^k/6) <--- integer division\n\nfrom python.decorators import euler_timer\n\n\ndef same_digs(n, multiplier):\n candidates = [n * mult for mult in range(1, multiplier + 1)]\n cand_digs = [sorted(int(dig) for dig in str(element))\n for element in candidates]\n # we sort the digits so only the content of the digit list matters\n return (cand_digs.count(cand_digs[0]) == len(cand_digs))\n\n\ndef find_sequence_same_digs(digs, multiplier):\n for n in range(10 ** (digs - 1), 10 ** digs / multiplier + 1):\n if same_digs(n, multiplier):\n return (True, n)\n return (False, -1)\n\n\ndef find_sequence_same(multiplier):\n digits = 1\n found = False\n\n while not found:\n found, val = find_sequence_same_digs(digits, multiplier)\n digits += 1\n return val\n\n\ndef main(verbose=False):\n return find_sequence_same(6)\n\nif __name__ == '__main__':\n print euler_timer(52)(main)(verbose=True)\n", "id": "8404217", "language": "Python", "matching_score": 1.7026888132095337, "max_stars_count": 7, "path": "python/complete/no052.py" }, { "content": "#!/usr/bin/env python\n\n# The 5-digit number, 16807=7**5, is also a fifth power. Similarly, the 9-digit\n# number, 134217728=8**9, is a ninth power.\n\n# How many n-digit positive integers exist which are also an nth power?\n\n#######################################################\n# 10**(n - 1) <= N < 10**n have n digits\n# So if N = k**n for some k we have\n# 10**(1 - 1/n) <= k < 10, for k integer\n# if 9 < 10**(1 - 1/n), there are no such k\n# This occurs when ln(9) < (1 - 1/n)ln(10)\n# 1/(1 - ln(9)/ln(10)) < n\n\nfrom math import ceil\nfrom math import log\n\nfrom python.decorators import euler_timer\n\n\ndef num_n_digits(n):\n # 10**(1 - 1/n) <= k < 10, for k integer\n return 10 - int(ceil(10 ** (1 - 1.0 / n)))\n\n\ndef main(verbose=False):\n MAX_n = int((1 - log(9) / log(10)) ** (-1))\n return sum(num_n_digits(i) for i in range(1, MAX_n + 1))\n\nif __name__ == '__main__':\n print euler_timer(63)(main)(verbose=True)\n", "id": "5510844", "language": "Python", "matching_score": 1.4067240953445435, "max_stars_count": 7, "path": "python/complete/no063.py" }, { "content": "#!/usr/bin/env python\n\n# How many, not necessarily distinct, values of n_C_r, for 1 <= n <= 100,\n# are greater than one-million?\n\n# Witness\n# 1 3 3 1\n# When n is odd, the maximum occurs at r = (n - 1)/2 and (n + 1)/2\n# When n is odd there are (n + 1)/2 values that occur twice\n# 1 4 6 4 1\n# When n is even, the maximum occurs at r = n/2\n# When n is even there are (n - 1)/2 values that occur twice, and\n# one that occurs once\n\n# In either case we go up to n/2 (integer division) and then add on the\n# next integer, in the odd case, it is double, in the even case only single\n\n# n_C_r --> n_C_(r + 1), n!/(r!(n-r-1)!(n-r)) --> n!/((r+1) r!(n-r-1)!)\n\nfrom python.decorators import euler_timer\n\n\ndef num_over_limit(n, limit):\n \"\"\"\n Returns the number of values for n C r that are greater than limit\n \"\"\"\n if n == 1:\n if 1 > limit:\n return 2\n else:\n return 0\n\n prod = 1\n for r in range(n / 2 - 1):\n if prod > limit:\n return (n / 2 - r) * 2 + (n % 2) + 1\n prod = prod * (n - r) / (r + 1)\n if prod > limit:\n return 2 + (n % 2) + 1\n prod = prod * (n - n / 2 + 1) / (n / 2)\n if prod > limit:\n return (n % 2) + 1\n return 0\n\n\ndef all_over_limit(n_max, limit):\n \"\"\"\n Returns the number of values for n C r that are greater than limit\n as n goes from 1 up to n_max\n \"\"\"\n return sum(num_over_limit(n, limit) for n in range(1, n_max + 1))\n\n\ndef main(verbose=False):\n return all_over_limit(100, 10 ** 6)\n\nif __name__ == '__main__':\n print euler_timer(53)(main)(verbose=True)\n", "id": "4451837", "language": "Python", "matching_score": 0.7500569224357605, "max_stars_count": 7, "path": "python/complete/no053.py" }, { "content": "#!/usr/bin/env python\n\n# It turns out that 12 cm is the smallest length of wire that can be\n# bent to form an integer sided right angle triangle in exactly one\n# way, but there are many more examples.\n\n# 12 cm: (3,4,5)\n# 24 cm: (6,8,10)\n# 30 cm: (5,12,13)\n# 36 cm: (9,12,15)\n# 40 cm: (8,15,17)\n# 48 cm: (12,16,20)\n\n# In contrast, some lengths of wire, like 20 cm, cannot be bent to form\n# an integer sided right angle triangle, and other lengths allow more\n# than one solution to be found; for example, using 120 cm it is possible\n# to form exactly three different integer sided right angle triangles.\n\n# 120 cm: (30,40,50), (20,48,52), (24,45,51)\n\n# Given that L is the length of the wire, for how many values of L <= 1,500,000\n# can exactly one integer sided right angle triangle be formed?\n\n################################\n\n# All pythagorean triples can be represented:\n# k(m**2 - n**2), k*(2*m*n), k(m**2 + n**2)\n# With m > n and (m, n) == 1\n# Hence L = 2*k*m*(m + n)\n\n# m*(m + n) = L/(2k), m > n > 0 requires\n# m**2 < m*(m + n) < L/(2k)\n\n# Letting k = 1 gives a primitive solution\n\n# If m is even, then n must be odd, but\n# if m is odd, n may be odd as well and\n# (n, m) == 1 can still be satisfied\n\n# However, consider a primitive triple\n# (a, b, c). If both a and b are even,\n# then 2 | (a, b), which contradicts\n# primitive. If both a and b are odd,\n# then c**2 == a**2 + b**2 == 2 mod 4,\n# which is impossible. Hence WLOG\n# a is odd and b is even.\n\n# Applying this to (m, n), the triple\n# (m**2 - n**2, 2*m*n, m**2 + n**2)\n# we have 2*m*n even, hence b = 2*m*n\n# and we need m**2 - n**2 = a to be\n# odd, else the triple is not primitive.\n# Hence m and n need opposite parity.\n\nfrom fractions import gcd\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n MAX_n = 1500000\n number_solutions = {}\n\n max_m = int(sqrt(0.5 * MAX_n))\n for m in xrange(2, max_m + 1):\n # m, n need opposite parity\n n_parity = 0 if m % 2 else 1\n for n in xrange(n_parity, m, 2):\n if gcd(m, n) == 1:\n primitive = 2 * m * (m + n)\n # m is fixed, so as n\n # increases, primitive will\n # and we break the inner loop when\n # it exceeds the MAX_n\n if primitive > MAX_n:\n break\n # Once we have the perimeter of\n # the primitive triangle, we also\n # have 2*perimiter, 3*perimeter, 4*perimeter, etc.\n for perimeter in range(primitive, MAX_n + 1, primitive):\n # if perimeter is not in number_solutions, sets to 1\n # (default 0 returned by get)\n number_solutions[perimeter] = number_solutions.get(\n perimeter, 0) + 1\n\n return len([val for val in number_solutions.values() if val == 1])\n\nif __name__ == '__main__':\n print euler_timer(75)(main)(verbose=True)\n", "id": "10813287", "language": "Python", "matching_score": 1.5546743869781494, "max_stars_count": 7, "path": "python/complete/no075.py" }, { "content": "#!/usr/bin/env python\n\n# For p, we seek k, m, n > 0 such that n < m and 2*k*m*(m + n) = p\n\nfrom python.decorators import euler_timer\nfrom python.functions import all_factors\n\n\ndef all_triples(p, factors_hash=None):\n if factors_hash is None:\n factors_hash = {}\n\n if p % 2 == 1 or p < 2 or not isinstance(p, int):\n return []\n\n if p / 2 in factors_hash:\n choices_k = factors_hash[p / 2]\n else:\n choices_k = all_factors(p / 2, factors_hash)[p / 2]\n\n result = []\n for k in choices_k:\n if p / (2 * k) in factors_hash:\n choices_m = factors_hash[p / (2 * k)]\n else:\n choices_m = all_factors(p / (2 * k), factors_hash)[p / (2 * k)]\n\n # 2*k*m*(m + n) = p\n for m in choices_m:\n n = p / (2 * k * m) - m\n if n > 0 and m > n:\n result.append((k, m, n))\n\n return result\n\n\ndef convert_to_triangle(triple):\n k, m, n = triple\n a = k * (m ** 2 - n ** 2)\n b = k * (2 * m * n)\n c = k * (m ** 2 + n ** 2)\n return tuple(sorted((a, b, c)))\n\n\ndef all_triangles(p, factors_hash=None):\n triples = all_triples(p, factors_hash)\n return list(set(convert_to_triangle(triple) for triple in triples))\n\n\ndef all_triangles_up_to_n(n):\n factors_hash = all_factors(n)\n result = {}\n for p in range(2, n + 1, 2):\n result[p] = all_triangles(p, factors_hash)\n return result\n\n\ndef main(verbose=False):\n all_tri = all_triangles_up_to_n(1000)\n lengths = {}\n max_val = -1\n max_keys = []\n for key, value in all_tri.iteritems():\n curr_length = len(value)\n if curr_length > max_val:\n max_keys = [key]\n max_val = curr_length\n elif curr_length == max_val:\n max_keys.append(key)\n\n if len(max_keys) != 1:\n raise(\"Keys are not unique\")\n\n return max_keys[0]\n\nif __name__ == '__main__':\n print euler_timer(39)(main)(verbose=True)\n", "id": "12404344", "language": "Python", "matching_score": 1.446420431137085, "max_stars_count": 7, "path": "python/complete/no039.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\n\n\ndef translate(message, key):\n len_key = len(key)\n result = message[:]\n\n for i in range(len_key):\n for j in range(i, len(result), len_key):\n result[j] = result[j] ^ key[i]\n\n result = ''.join(chr(val) for val in result)\n return result\n\n\ndef main(verbose=False):\n message = get_data(59).split(',')\n\n message = [int(char) for char in message]\n\n possible_keys = []\n for ascii1 in range(97, 123):\n for ascii2 in range(97, 123):\n for ascii3 in range(97, 123):\n possible_keys.append([ascii1, ascii2, ascii3])\n\n for key in possible_keys:\n curr = translate(message, key)\n if (curr.upper().find('THE') != -1\n and curr.upper().find('IS') != -1\n and curr.upper().find('AND') != -1\n and curr.upper().find('OF') != -1\n and curr.upper().find('ARE') != -1):\n break\n\n key_as_word = ''.join(chr(val) for val in key)\n result = '\\n\\nActual Message:\\n%s\\n\\nThe key is: %s or %s.' % (\n curr, key_as_word, key)\n\n if verbose:\n return '%s%s' % (sum(ord(letter) for letter in curr), result)\n else:\n return sum(ord(letter) for letter in curr)\n\nif __name__ == '__main__':\n print euler_timer(59)(main)(verbose=True)\n", "id": "6749732", "language": "Python", "matching_score": 1.226428508758545, "max_stars_count": 7, "path": "python/complete/no059.py" }, { "content": "#!/usr/bin/env python\n\n# By converting each letter in a word to a number corresponding to\n# its alphabetical position and adding these values we form a word\n# value. For example, the word value for SKY is\n# 19 + 11 + 25 = 55 = t_(10). If the word value is a triangle number\n# then we shall call the word a triangle word.\n\n# Using words.txt (right click and 'Save Link/Target As...'), a 16K\n# text file containing nearly two-thousand common English words,\n# how many are triangle words?\n\n# I've renamed words.txt as no042.txt\n\nimport string\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\nfrom python.functions import reverse_polygonal_number\n\n\ndef word_to_value(word):\n letters = string.uppercase\n return sum(letters.find(letter) + 1 for letter in word)\n\n\ndef num_triangle():\n # Assumes file is \"A\",\"ABILITIY\",\"ABLE\",...\n words = get_data(42).strip('\"').split('\",\"')\n vals = [word_to_value(word) for word in words]\n triangle_hash = {}\n count = 0\n for val in vals:\n if reverse_polygonal_number(3, val, triangle_hash) != -1:\n count += 1\n return count\n\n\ndef main(verbose=False):\n return num_triangle()\n\nif __name__ == '__main__':\n print euler_timer(42)(main)(verbose=True)\n", "id": "11995741", "language": "Python", "matching_score": 1.2800109386444092, "max_stars_count": 7, "path": "python/complete/no042.py" }, { "content": "#!/usr/bin/env python\n\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\n\n\ndef same_signature(n, word):\n digits = [dig for dig in str(n)]\n dig_set = set(digits)\n letter_set = set(word)\n if len(dig_set) != len(letter_set):\n return (False, None)\n\n first_found = [word.find(letter) for letter in letter_set]\n translated = word\n for index in first_found:\n translated = translated.replace(word[index], digits[index])\n\n if translated == str(n):\n return (True, [(word[index], digits[index]) for index\n in first_found])\n else:\n return (False, None)\n\n\ndef strings_by_length(data):\n result = {}\n for val in data:\n length = len(str(val))\n # sets value to [] if not set, returns value at key\n result.setdefault(length, []).append(val)\n return result\n\n\ndef main(verbose=False):\n data = get_data(98)[1:-1].split('\",\"')\n words = strings_by_length(data)\n\n max_len = int(sqrt(10) ** max(words))\n squares = strings_by_length(\n [i ** 2 for i in range(1, int(sqrt(10) ** max(words)))])\n\n max_val = 0\n for word_length in sorted(words.keys())[::-1]:\n total_words = len(words[word_length])\n for first in range(total_words - 1):\n for second in range(first + 1, total_words):\n first_word = words[word_length][first]\n second_word = words[word_length][second]\n # check anagrams\n if sorted(first_word) == sorted(second_word):\n for square in squares[word_length]:\n val, translation = same_signature(square, first_word)\n if val:\n translated = second_word\n for letter, digit in translation:\n translated = translated.replace(letter, digit)\n new_number = int(translated)\n if new_number in squares[word_length]:\n to_add = max(square, new_number)\n if to_add > max_val:\n max_val = to_add\n if max_val > 0:\n return max_val\n raise Exception(\"Program failed to find solution\")\n\nif __name__ == '__main__':\n print euler_timer(98)(main)(verbose=True)\n", "id": "12131591", "language": "Python", "matching_score": 1.6621519327163696, "max_stars_count": 7, "path": "python/complete/no098.py" }, { "content": "#!/usr/bin/env python\n\nimport operator\n\nfrom python.decorators import euler_timer\nfrom python.functions import all_permutations\nfrom python.functions import all_subsets\n\n\n# a D b D c D d, for D in {+,-,*,/}\n# = {operator.add, operator.sub, operator.mul, operator.div}\n\n# ALL POSSIBLE PARENTHESIS PLACEMENTS\n# (a b) c d\n# a b (c d)\n# (a b) (c d)\n# (a b c) d\n# ((a b) c) d\n# (a (b c)) d\n# a (b c d)\n# a ((b c) d)\n# a (b (c d))\n\ndef do_operations_no_paren(operators, numbers):\n if len(operators) + 1 != len(numbers):\n raise Exception(\"MISDEED\")\n\n if len(numbers) == 1:\n return numbers[0]\n\n for i, op in enumerate(operators):\n if op in [operator.mul, operator.div]:\n new_number = op(numbers[i], numbers[i + 1])\n new_numbers = numbers[:i] + [new_number] + numbers[i + 2:]\n new_operators = operators[:i] + operators[i + 1:]\n return do_operations_no_paren(new_operators, new_numbers)\n\n # no mul or div found\n new_number = op(numbers[0], numbers[1])\n new_numbers = [new_number] + numbers[2:]\n new_operators = operators[1:]\n return do_operations_no_paren(new_operators, new_numbers)\n\n\ndef results(signs, numbers):\n # parentheses first\n # multiply or divide before add or subtract\n # left to right after all\n a, b, c, d = numbers\n s1, s2, s3 = signs\n\n result = []\n try:\n val = do_operations_no_paren([s2, s3], [s1(a, b), c, d])\n result.append(val)\n except ZeroDivisionError:\n pass\n try:\n val = do_operations_no_paren([s1, s2], [a, b, s3(c, d)])\n result.append(val)\n except ZeroDivisionError:\n pass\n try:\n val = do_operations_no_paren([s2], [s1(a, b), s3(c, d)])\n result.append(val)\n except ZeroDivisionError:\n pass\n try:\n val = do_operations_no_paren(\n [s3],\n [do_operations_no_paren([s1, s2], [a, b, c]), d])\n result.append(val)\n except ZeroDivisionError:\n pass\n try:\n val = do_operations_no_paren([s3], [s2(s1(a, b), c), d])\n result.append(val)\n except ZeroDivisionError:\n pass\n try:\n val = do_operations_no_paren([s3], [s1(a, s2(b, c)), d])\n result.append(val)\n except ZeroDivisionError:\n pass\n try:\n val = do_operations_no_paren(\n [s1],\n [a, do_operations_no_paren([s2, s3], [b, c, d])])\n result.append(val)\n except ZeroDivisionError:\n pass\n try:\n val = do_operations_no_paren([s1], [a, s3(s2(b, c), d)])\n result.append(val)\n except ZeroDivisionError:\n pass\n try:\n val = do_operations_no_paren([s1], [a, s2(b, s3(c, d))])\n result.append(val)\n except ZeroDivisionError:\n pass\n\n return [int(n) for n in result if int(n) == n]\n\n\ndef most_consecutive(dig_cands, sign_cands):\n all_encountered = []\n for perm in all_permutations(dig_cands):\n for sign_set in sign_cands:\n for number in results(sign_set, perm):\n if number > 0 and number not in all_encountered:\n all_encountered.append(number)\n biggest = 1\n while biggest + 1 in all_encountered:\n biggest = biggest + 1\n return biggest\n\n\ndef main(verbose=False):\n SIGNS = [operator.add, operator.sub, operator.mul, operator.div]\n SIGN_CANDS = []\n for sign1 in SIGNS:\n for sign2 in SIGNS:\n for sign3 in SIGNS:\n SIGN_CANDS.append([sign1, sign2, sign3])\n special_range = [n * 1.0 for n in range(1, 10)]\n DIG_CANDS = all_subsets(special_range, 4)\n\n max_tuple = None\n max_val = 0\n for dig_cand in DIG_CANDS:\n length = most_consecutive(dig_cand, SIGN_CANDS)\n if length > max_val:\n max_val = length\n max_tuple = dig_cand\n return ''.join(str(int(n)) for n in max_tuple)\n\nif __name__ == '__main__':\n print euler_timer(93)(main)(verbose=True)\n", "id": "11997087", "language": "Python", "matching_score": 1.0535227060317993, "max_stars_count": 7, "path": "python/complete/no093.py" }, { "content": "#!/usr/bin/env python\n\n# A particular school offers cash rewards to children with good attendance\n# and punctuality. If they are absent for three consecutive days or late\n# on more than one occasion then they forfeit their prize.\n\n# During an n-day period a trinary string is formed for each child\n# consisting of L's (late), O's (on time), and A's (absent).\n\n# Although there are eighty-one trinary strings for a 4-day period\n# that can be formed, exactly forty-three strings would lead to a prize:\n\n# How many \"prize\" strings exist over a 30-day period?\n\n#######################################\n# Let T{n} be the total number of such days\n# We split this into several sequences\n# O_N{n} - most recent day is on time, no L encountered\n# O_L{n} - most recent day is on time, L encountered\n# L_L{n} - most recent day is late, L encountered\n# A_N{n} - most recent day is absent, day prior was not, no L encountered\n# A_L{n} - most recent day is absent, day prior was not, L encountered\n# T_N{n} - most recent two days are absent, day prior was not, no L encountered\n# T_L{n} - most recent two days are absent, day prior was not, L encountered\n\nfrom python.decorators import euler_timer\n\n\ndef prize_strings(n):\n index = 1\n O_N = 1\n O_L = 0\n L_L = 1\n A_N = 1\n A_L = 0\n T_N = 0\n T_L = 0\n while index < n:\n # after O, next can be anything:\n # O_L-->O_L,A_L; O_N-->O_N,A_N,L_L\n # after one A, next can be anything\n # A_L-->T_L,O_L; A_N-->O_N,T_N,L_L\n # after two A's, next can not be A\n # T_L-->O_L; T_N-->O_N,L_L\n # after L is encountered, no more L\n # L_L-->O_L,A_L\n index += 1\n O_N, O_L, L_L, A_N, A_L, T_N, T_L = (O_N + A_N + T_N,\n O_L + A_L + T_L + L_L,\n O_N + A_N + T_N,\n O_N,\n O_L + L_L,\n A_N,\n A_L)\n return O_N + O_L + L_L + A_N + A_L + T_N + T_L\n\n\ndef main(verbose=False):\n return prize_strings(30)\n\nif __name__ == '__main__':\n print euler_timer(191)(main)(verbose=True)\n", "id": "10076446", "language": "Python", "matching_score": 1.1119890213012695, "max_stars_count": 7, "path": "python/complete/no191.py" }, { "content": "#!/usr/bin/env python\n\n# Find the greatest product of five consecutive\n# digits in the 1000-digit number.\n\nimport operator\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\n\n\ndef product_consec_digits(number, consecutive):\n \"\"\"\n Returns the largest product of \"consecutive\"\n consecutive digits from number\n \"\"\"\n digits = [int(dig) for dig in str(number)]\n max_start = len(digits) - consecutive\n return [reduce(operator.mul, digits[i:i + consecutive])\n for i in range(max_start + 1)]\n\n\ndef main(verbose=False):\n n = int(\"\".join(line.strip() for line in get_data(8).split(\"\\n\")))\n\n return max(product_consec_digits(n, 5))\n\nif __name__ == '__main__':\n print euler_timer(8)(main)(verbose=True)\n", "id": "109293", "language": "Python", "matching_score": 1.653878092765808, "max_stars_count": 7, "path": "python/complete/no008.py" }, { "content": "#!/usr/bin/env python\n\n# Work out the first ten digits of the sum of the following\n# one-hundred 50-digit numbers. (In data)\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\n\n\ndef main(verbose=False):\n number = get_data(13)\n total = sum(int(line) for line in number.split(\"\\n\") if line)\n return str(total)[:10]\n\nif __name__ == '__main__':\n print euler_timer(13)(main)(verbose=True)\n", "id": "748000", "language": "Python", "matching_score": 0.6345264315605164, "max_stars_count": 7, "path": "python/complete/no013.py" }, { "content": "#!/usr/bin/env python\n\n# no089.txt contains one thousand numbers written in valid,\n# but not necessarily minimal, Roman numerals; that is, they\n# are arranged in descending units and obey the subtractive\n# pair rule (see FAQ for the definitive rules for this problem).\n\n# Find the number of characters saved by writing each of these\n# in their minimal form.\n\n# I < V,X\n# X < L,C\n# C < D,M\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\n\nVALUES = {'I': 1,\n 'V': 5,\n 'X': 10,\n 'L': 50,\n 'C': 100,\n 'D': 500,\n 'M': 1000}\n\n\ndef pos_neg(val, next):\n if val not in ('I', 'X', 'C') or not next:\n return 1\n\n if val == 'I' and next in ('V', 'X'):\n return -1\n elif val == 'X' and next in ('L', 'C'):\n return -1\n elif val == 'C' and next in ('D', 'M'):\n return -1\n return 1\n\n\ndef actual(numeral):\n pairs = ([(numeral[ind], numeral[ind + 1]) for\n ind in range(len(numeral) - 1)] +\n [(numeral[-1], None)])\n\n return sum(VALUES[pair[0]] * pos_neg(*pair) for pair in pairs)\n\n\ndef to_roman(n):\n ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)\n nums = ('M', 'CM', 'D', 'CD', 'C', 'XC', 'L',\n 'XL', 'X', 'IX', 'V', 'IV', 'I')\n result = \"\"\n for i in range(len(ints)):\n count = int(n / ints[i])\n result += nums[i] * count\n n -= ints[i] * count\n return result\n\n\ndef main(verbose=False):\n data = [num for num in get_data(89).split(\"\\n\") if num]\n original_digits = len(\"\".join(data))\n best = [to_roman(actual(numeral)) for numeral in data]\n return original_digits - len(\"\".join(best))\n\nif __name__ == '__main__':\n print euler_timer(89)(main)(verbose=True)\n", "id": "8509896", "language": "Python", "matching_score": 1.7225655317306519, "max_stars_count": 7, "path": "python/complete/no089.py" }, { "content": "#!/usr/bin/env python\n\n# Find the sum of all the numbers that can be written\n# as the sum of fifth powers of their digits.\n\n# n = d-digits, sum(n) <= d*9^5 = 59049d, n >= 10^(d-1),\n# so sum(n) = n implies 10*(9**5 d) >= 10**d,\n# ln(10 * 9**5) + ln(d) >= d ln(10), so d <= 6\n\nfrom python.decorators import euler_timer\n\n\ndef sum_of_digits_powers(n, power):\n return sum(int(dig) ** power for dig in str(n))\n\n\ndef main(verbose=False):\n valid = [i for i in xrange(2, 999999 + 1)\n if sum_of_digits_powers(i, 5) == i]\n\n if verbose:\n return '%s.\\nThe numbers satisfying this property are: %s.' % (\n sum(valid), ', '.join(str(num) for num in valid))\n else:\n return sum(valid)\n\nif __name__ == '__main__':\n print euler_timer(30)(main)(verbose=True)\n", "id": "9931024", "language": "Python", "matching_score": 1.1492751836776733, "max_stars_count": 7, "path": "python/complete/no030.py" }, { "content": "#!/usr/bin/env python\n\n# An irrational decimal fraction is created by concatenating\n# the positive integers:\n# 0.123456789101112131415161718192021...\n\n# It can be seen that the 12th digit of the fractional part is 1.\n\n# If d_n represents the nth digit of the fractional part, find the\n# value of the following expression.\n# d_1 X d_10 X d_100 X d_1000 X d_10000 X d_100000 X d_1000000\n\nimport operator\n\nfrom python.decorators import euler_timer\n\n\ndef num_digs_with_up_to_d_digits(d):\n # The smallest number with d + 1 digits is 10**d\n # S = sum_(i = 1)^d i*9*10**(i - 1) = 0.9 * sum_(i = 1)^d i 10**i\n # 10S - S = 0.9 sum_(i = 1)^d i 10**(i + 1) - 0.9 sum_(i = 1)^d i 10**i\n # 10S = sum_(i = 2)^(d + 1) (i - 1) 10**i - sum_(i = 1)^d i 10**i\n # 10S = d*10**(d + 1) - sum_(i = 1)^d 10**i\n # 90S = 9*d*10**(d + 1) - 10**(d + 1) + 10\n # 9S = (9*d - 1)*10**d + 1\n return ((9 * d - 1) * 10 ** d + 1) / 9\n\n\ndef nth_digit_of_frac_part(n):\n num_digits = 1\n while num_digs_with_up_to_d_digits(num_digits) < n:\n num_digits += 1\n\n # We know the nth digit occurs in the block of integers with num_digits\n # digits. We want to determine which digit in the block it is\n place_in_digits = n - num_digs_with_up_to_d_digits(num_digits - 1)\n digit_place_in_number = (place_in_digits - 1) % num_digits + 1\n # intended to be integer division\n numbers_prior = (place_in_digits - 1) / num_digits\n\n # Since there are numbers_prior numbers of num_digits digits prior to\n # the number we are interested in, we need to calculate which number it is\n # The smallest number with num_digits digits is 10**(num_digits - 1)\n num_of_interest = str(10 ** (num_digits - 1) + numbers_prior)\n return int(num_of_interest[digit_place_in_number - 1])\n\n\ndef main(verbose=False):\n # d_1 X d_10 X d_100 X d_1000 X d_10000 X d_100000 X d_1000000\n result = [nth_digit_of_frac_part(10 ** exponent) for exponent in range(7)]\n digit_display = ['d_%s = %s' % (10 ** i, digit)\n for i, digit in enumerate(result)]\n if verbose:\n return '%s.\\nThe digits are as follows: %s' % (\n reduce(operator.mul, result), ', '.join(digit_display))\n else:\n return reduce(operator.mul, result)\n\nif __name__ == '__main__':\n print euler_timer(40)(main)(verbose=True)\n", "id": "10647499", "language": "Python", "matching_score": 1.4299291372299194, "max_stars_count": 7, "path": "python/complete/no040.py" }, { "content": "#!/usr/bin/env python\n\n# Find the sum of all numbers which are equal to the sum\n# of the factorial of their digits.\n\n# We know if n has d digits, then sum(n) <= d*9! = 362880d and n >= 10*(d-1)\n# hence sum(n) = n implies 3628800d >= 10^d. We must have d <= 7.\n\nfrom math import factorial\n\nfrom python.decorators import euler_timer\nfrom python.functions import ascending\n\n\ndef main(verbose=False):\n result = []\n # We have at most 7 digits, so we consider all ascending\n # lists of digits with a digit sum between 1 and 63.\n # Since ascending requires the first element of the digit lists\n # is *equal to* the minimum, we allow an 8th digit which is\n # simply a padded zero\n for digit_sum in range(1, 63 + 1):\n for choice in ascending(8, digit_sum, 0, 9):\n choice = choice[1:]\n non_zero = [digit for digit in choice if digit != 0]\n factorial_sum = sum(factorial(digit) for digit in non_zero)\n possible_zeros = 7 - len(non_zero)\n\n # Can fill out the number with zeros (up to 7 digits)\n for zeros_add in range(possible_zeros + 1):\n factorial_digits = [int(digit) for digit in str(factorial_sum)]\n if (sorted(factorial_digits) ==\n sorted(non_zero + [0] * zeros_add)):\n result.append(factorial_sum)\n\n factorial_sum += 1 # Add factorial(0)\n\n result = [val for val in result if val not in [1, 2]]\n\n if verbose:\n return \"%s.\\nThe full list of numbers is as follows: %s.\" % (\n sum(result), \", \".join(str(number) for number in result))\n else:\n return sum(result)\n\nif __name__ == '__main__':\n print euler_timer(34)(main)(verbose=True)\n", "id": "5580731", "language": "Python", "matching_score": 1.5690516233444214, "max_stars_count": 7, "path": "python/complete/no034.py" }, { "content": "#!/usr/bin/env python\n\n# a_n ... a_2 a_1\n# Gives s_1 s_2 ... s_2 s_1\n# where s_1 = a_n + a_1, s_k = a_k + a_{n + 1 - k}\n# We have a choice s_k even, odd and s_k > 10, < 10\n# With these choices, we can solve the entire problem\n\n# For n < 10**9, we need to consider digits 1,...,9\n# We use the fact that s_k = s_{n + 1 - k}\n\n# We represent each sum by the possibility that\n# it is >9 or <10 and even or odd\n\nfrom python.decorators import euler_timer\nfrom python.functions import all_subsets\n\n\ndef all_choices(n):\n result = []\n number_digit_sums = (n + 1) / 2\n\n candidates = [('>=10', 1), ('>=10', 0), ('<10', 1), ('<10', 0)]\n for subset in all_subsets(candidates, number_digit_sums, unique=False):\n if n % 2 == 1:\n even_index = (n + 1) / 2\n signature = subset[even_index - 1]\n # parity\n if signature[1] == 1:\n continue\n\n to_add = {}\n for k in range(1, n + 1):\n digit_sum_index = min(k, n + 1 - k)\n to_add[k] = subset[digit_sum_index - 1]\n result.append(to_add)\n\n return result\n\n\ndef valid_choice(choice, n):\n # all indices except final\n if choice[1][1] == 0:\n return False\n\n for k in range(2, n + 1):\n previous = choice[k - 1]\n curr = choice[k]\n\n parity = curr[1]\n if previous[0] == '>=10':\n parity = (parity + 1) % 2\n\n if parity == 0:\n return False\n\n return True\n\n\ndef choice_to_count(choice, n):\n number_digit_sums = (n + 1) / 2\n\n result = 1\n for sum_index in range(1, number_digit_sums + 1):\n signature = choice[sum_index]\n if signature == ('<10', 0):\n if sum_index == 1:\n # 0 not allowed as a lead digit\n result *= 16\n elif n % 2 == 1 and sum_index == number_digit_sums:\n # if the final digit sum, it is 2*a_k\n result *= 5\n else:\n result *= 25\n elif signature == ('<10', 1):\n if sum_index == 1:\n # 0 not allowed as a lead digit\n result *= 20\n else:\n result *= 30\n elif signature == ('>=10', 0):\n if n % 2 == 1 and sum_index == number_digit_sums:\n # if the final digit sum, it is 2*a_k\n result *= 4\n else:\n result *= 25\n elif signature == ('>=10', 1):\n result *= 20\n else:\n raise Exception(\"Signature not recognized\")\n\n return result\n\n\ndef main(verbose=False):\n running_sum = 0\n for digits in range(1, 9 + 1):\n for choice in all_choices(digits):\n if valid_choice(choice, digits):\n running_sum += choice_to_count(choice, digits)\n return running_sum\n\nif __name__ == '__main__':\n print euler_timer(145)(main)(verbose=True)\n", "id": "5446465", "language": "Python", "matching_score": 1.41804838180542, "max_stars_count": 7, "path": "python/complete/no145.py" }, { "content": "#!/usr/bin/env python\n\n# Increasing number:\n# For up to n digits, each number can be\n# a_1...a_n\n# d_1 = a_1, d_2 = a_2 - a_1, etc. where\n# d_1 + d_2 + ... + d_j = a_j <= 9\n# We need to allow the sum to be less than 9,\n# so we have some other B such that\n# d_1 + d_2 + ... + d_n + B = 9, then\n# we have (n + 1) bins for 9 objects. To\n# organize n dividers and 9 objects, we need\n# a total of (n + 9 C 9) increasing sequences\n# We must subtract 1 since 0,0,...,0 is allowed here\n\n# Decreasing number:\n# We consider each length separately, for a\n# number of length L, we have\n# a_L...a_1\n# similarly we have d_1 = a_1, d_2 = a_2 - a_1, etc.\n# And there are (L + 9 C 9) - 1 that are valid\n# (the all zero again does not produce a number)\n# Using the identity (k C k) + ... (n C k) = ((n + 1) C (k + 1))\n# and the fact that (a C k) = 0 for a < k, summing\n# over L = 1 to n, we have\n# (n + 10 C 10) - n that are valid\n\n# For each number of digits, the number k*(11...1) for\n# 1 <= k <= 9 is both incr. and decr., so we have\n# overcounted 9*n numbers\n\n# Also, the length 1 case a_1 = 0 is overcounted\n\n# All together we have\n# (n + 10 C 10) - n + [(n + 9 C 9) - 1] - 9*n - 1\n\nfrom python.decorators import euler_timer\nfrom python.functions import choose\n\n\ndef main(verbose=False):\n n = 100\n return choose(n + 10, 10) + choose(n + 9, 9) - 10 * n - 2\n\nif __name__ == '__main__':\n print euler_timer(113)(main)(verbose=True)\n", "id": "6215304", "language": "Python", "matching_score": 1.4717657566070557, "max_stars_count": 7, "path": "python/complete/no113.py" }, { "content": "#!/usr/bin/env python\n\n# Let X = 2**t, then X**2 = 4**t = X + k\n# 4X**2 - 4X + 1 = 4k + 1\n# 4*k + 1 must be a perfect square\n# k = n(n + 1) for some n > 0 (since k > 0)\n\n# t is an integer if X is a power of 2\n# but X = (1 + sqrt(4k + 1))/2\n# but X = (1 + 2*n + 1)/2 = n + 1\n# So we need n + 1 = 2**p for a power of p\n# in order for the count to change\n\n# When n = 2**L - 1, we have L total perfect\n# <= n, hence P(n) = L/(2**L - 1)\n# We seek the first such (n, L) with P(n) < 1/12345\n# i.e. 12345*L + 1 < 2**L, so the ratio makes\n# its first switch over between 2**(L - 1) - 1\n# and 2**L - 1. So we set the initial value\n# to L - 1 and loop from 2**(L - 1) to 2**L - 1\n# to find where the threshold breaks\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n L = 1\n while 12345 * L + 1 >= 2 ** L:\n L += 1\n\n count = L - 1\n for n in range(2 ** (L - 1), 2 ** L):\n if 12345 * count < n:\n return n * (n + 1)\n raise Exception(\"Program failed to find solution\")\n\nif __name__ == '__main__':\n print euler_timer(207)(main)(verbose=True)\n", "id": "9531684", "language": "Python", "matching_score": 0.8941459059715271, "max_stars_count": 7, "path": "python/complete/no207.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport doctest\nimport pathlib\n\n\nHERE = pathlib.Path(__file__).resolve().parent\nOPCODE_ADD = 1\nOPCODE_MULTIPLY = 2\nOPCODE_HALT = 99\nEXPECTED_OUTPUT = 19690720\n\n\ndef validate_index(value, length):\n if value != int(value):\n raise ValueError(\"Non-integer encountered\", value)\n\n if value < 0 or value >= length:\n raise ValueError(\"Index outside of range\", value, length)\n\n\ndef binary_operation_info(instruction, running_program):\n if len(instruction) != 4:\n raise ValueError(\"Invalid program\", running_program)\n\n input_index1 = instruction[1]\n input_index2 = instruction[2]\n output_index = instruction[3]\n\n length = len(running_program)\n validate_index(input_index1, length)\n validate_index(input_index2, length)\n validate_index(output_index, length)\n\n return (\n running_program[input_index1],\n running_program[input_index2],\n output_index,\n )\n\n\ndef run_intcode(program):\n \"\"\"Run a program as a series of quartet instructions.\n\n The first value in the quartet is the opcode (ADD, MULTIPLY or HALT),\n the second and third values are the input locations and the fourth value\n is the output location.\n\n >>> run_intcode([1, 9, 10, 3, 2, 3, 11, 0, 99, 30, 40, 50])\n [3500, 9, 10, 70, 2, 3, 11, 0, 99, 30, 40, 50]\n >>> run_intcode([1, 0, 0, 0, 99])\n [2, 0, 0, 0, 99]\n >>> run_intcode([2, 3, 0, 3, 99])\n [2, 3, 0, 6, 99]\n >>> run_intcode([2, 4, 4, 5, 99, 0])\n [2, 4, 4, 5, 99, 9801]\n >>> run_intcode([1, 1, 1, 4, 99, 5, 6, 0, 99])\n [30, 1, 1, 4, 2, 5, 6, 0, 99]\n \"\"\"\n running_program = copy.deepcopy(program)\n length = len(program)\n\n for start in range(0, length, 4):\n instruction = running_program[start : start + 4]\n opcode = instruction[0]\n if opcode == OPCODE_HALT:\n return running_program\n\n input_value1, input_value2, output_index = binary_operation_info(\n instruction, running_program\n )\n if opcode == OPCODE_ADD:\n running_program[output_index] = input_value1 + input_value2\n elif opcode == OPCODE_MULTIPLY:\n running_program[output_index] = input_value1 * input_value2\n else:\n raise RuntimeError(\"Invalid program\", program)\n\n return running_program\n\n\ndef run_parameterized_program(program, noun, verb):\n # NOTE: This modifies `program` but probably doesn't need to.\n program[1] = noun\n program[2] = verb\n program_output = run_intcode(program)\n return program_output[0]\n\n\ndef inputs_search(program, expected_output):\n for noun in range(100):\n for verb in range(100):\n output = run_parameterized_program(program, noun, verb)\n if output == expected_output:\n return noun, verb\n\n raise RuntimeError(\"No match found\")\n\n\ndef main():\n filename = HERE / \"input.txt\"\n with open(filename, \"r\") as file_obj:\n content = file_obj.read()\n\n program = [int(value) for value in content.split(\",\")]\n output1202 = run_parameterized_program(program, 12, 2)\n print(f\"Program output at position 0: {output1202}\")\n\n noun, verb = inputs_search(program, EXPECTED_OUTPUT)\n print(f\"{noun:02}{verb:02} produces {EXPECTED_OUTPUT}\")\n\n\nif __name__ == \"__main__\":\n doctest.testmod()\n main()\n", "id": "6084071", "language": "Python", "matching_score": 1.1061269044876099, "max_stars_count": 0, "path": "day02/main.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport copy\nimport pathlib\n\n\nHERE = pathlib.Path(__file__).resolve().parent\nMAX_ITERATIONS = 10000\nFUEL = \"FUEL\"\nORE = \"ORE\"\nDEBUG = False\nTRILLION = 1000000000000\nEXAMPLE_CONTENT1 = \"\"\"\\\n10 ORE => 10 A\n1 ORE => 1 B\n7 A, 1 B => 1 C\n7 A, 1 C => 1 D\n7 A, 1 D => 1 E\n7 A, 1 E => 1 FUEL\"\"\"\nEXAMPLE_CONTENT2 = \"\"\"\\\n9 ORE => 2 A\n8 ORE => 3 B\n7 ORE => 5 C\n3 A, 4 B => 1 AB\n5 B, 7 C => 1 BC\n4 C, 1 A => 1 CA\n2 AB, 3 BC, 4 CA => 1 FUEL\"\"\"\nEXAMPLE_CONTENT3 = \"\"\"\\\n157 ORE => 5 NZVS\n165 ORE => 6 DCFZ\n44 XJWVT, 5 KHKGT, 1 QDVJ, 29 NZVS, 9 GPVTF, 48 HKGWZ => 1 FUEL\n12 HKGWZ, 1 GPVTF, 8 PSHF => 9 QDVJ\n179 ORE => 7 PSHF\n177 ORE => 5 HKGWZ\n7 DCFZ, 7 PSHF => 2 XJWVT\n165 ORE => 2 GPVTF\n3 DCFZ, 7 NZVS, 5 HKGWZ, 10 PSHF => 8 KHKGT\"\"\"\nEXAMPLE_CONTENT4 = \"\"\"\\\n2 VPVL, 7 FWMGM, 2 CXFTF, 11 MNCFX => 1 STKFG\n17 NVRVD, 3 JNWZP => 8 VPVL\n53 STKFG, 6 MNCFX, 46 VJHF, 81 HVMC, 68 CXFTF, 25 GNMV => 1 FUEL\n22 VJHF, 37 MNCFX => 5 FWMGM\n139 ORE => 4 NVRVD\n144 ORE => 7 JNWZP\n5 MNCFX, 7 RFSQX, 2 FWMGM, 2 VPVL, 19 CXFTF => 3 HVMC\n5 VJHF, 7 MNCFX, 9 VPVL, 37 CXFTF => 6 GNMV\n145 ORE => 6 MNCFX\n1 NVRVD => 8 CXFTF\n1 VJHF, 6 MNCFX => 4 RFSQX\n176 ORE => 6 VJHF\"\"\"\nEXAMPLE_CONTENT5 = \"\"\"\\\n171 ORE => 8 CNZTR\n7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL\n114 ORE => 4 BHXH\n14 VRPVC => 6 BMBT\n6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL\n6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT\n15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW\n13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW\n5 BMBT => 4 WPTQ\n189 ORE => 9 KTJDG\n1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP\n12 VRPVC, 27 CNZTR => 2 XDBXC\n15 KTJDG, 12 BHXH => 5 XCVML\n3 BHXH, 2 VRPVC => 7 MZWV\n121 ORE => 7 VRPVC\n7 XCVML => 6 RJRHP\n5 BHXH, 4 VRPVC => 5 LTCX\"\"\"\n\n\ndef debug(value, **kwargs):\n if not DEBUG:\n return\n print(value, **kwargs)\n\n\ndef parse_reagent(reagent):\n count, compound = reagent.split(\" \")\n return int(count), compound\n\n\ndef parse_line(line):\n inputs_str, result = line.split(\" => \")\n inputs = tuple(\n parse_reagent(command) for command in inputs_str.split(\", \")\n )\n return inputs, parse_reagent(result)\n\n\ndef unparse_reagent(pair):\n count, compound = pair\n return f\"{count} {compound}\"\n\n\ndef unparse_reaction(inputs, output):\n output_str = unparse_reagent(output)\n input_str = \", \".join(unparse_reagent(pair) for pair in inputs)\n return f\"{output_str} <= {input_str}\"\n\n\ndef update_compounds(all_compounds, extra, reactions_by_output):\n all_compounds_next = collections.defaultdict(int)\n extra_next = copy.deepcopy(extra)\n for compound, count in all_compounds.items():\n if compound == ORE:\n all_compounds_next[compound] += count\n continue\n\n inputs, output = reactions_by_output[compound]\n debug(\n f\"{unparse_reaction(inputs, output)} to satisfy \"\n f\"({count} {compound})\"\n )\n output_count, output_compound = output\n assert output_compound == compound\n reaction_multiple, remainder = divmod(count, output_count)\n if remainder != 0:\n reaction_multiple += 1\n leftover = output_count - remainder\n assert reaction_multiple * output_count - leftover == count\n extra_next[output_compound] += leftover\n debug(f\"++: extra_next : {dict(extra_next)}\")\n\n for input_count, input_compound in inputs:\n all_compounds_next[input_compound] += (\n input_count * reaction_multiple\n )\n debug(f\"++: all_compounds_next: {dict(all_compounds_next)}\")\n\n # Convert to list so we can modify `extra_next` in the loop.\n extra_pairs = list(extra_next.items())\n for compound, count in extra_pairs:\n if compound not in all_compounds_next:\n continue\n to_remove = min(all_compounds_next[compound], count)\n all_compounds_next[compound] -= to_remove\n if all_compounds_next[compound] == 0:\n all_compounds_next.pop(compound)\n extra_next[compound] -= to_remove\n debug(f\"--: all_compounds_next: {dict(all_compounds_next)}\")\n debug(f\"--: extra_next : {dict(extra_next)}\")\n\n return all_compounds_next, extra_next\n\n\ndef just_ore(all_compounds):\n if len(all_compounds) != 1:\n return False\n\n return ORE in all_compounds\n\n\ndef determine_ore(reactions_by_output, fuel):\n all_compounds = collections.defaultdict(int)\n all_compounds[FUEL] = fuel\n extra = collections.defaultdict(int)\n debug(\"================\")\n debug(f\"-1: all_compounds : {dict(all_compounds)}\")\n debug(f\"-1: extra : {dict(extra)}\")\n for i in range(MAX_ITERATIONS):\n debug(\"================\")\n all_compounds, extra = update_compounds(\n all_compounds, extra, reactions_by_output\n )\n debug(f\"{i:02}: all_compounds : {dict(all_compounds)}\")\n debug(f\"{i:02}: extra : {dict(extra)}\")\n if just_ore(all_compounds):\n break\n\n return all_compounds[ORE]\n\n\ndef binary_search(predicate, start, end):\n assert predicate(start)\n if start == end:\n return start\n\n assert start < end\n assert not predicate(end)\n if start + 1 == end:\n return start\n\n midpoint = (start + end) // 2\n if predicate(midpoint):\n debug(f\"new_start: {midpoint} | new_end: {end}\")\n return binary_search(predicate, midpoint, end)\n\n debug(f\"new_start: {start} | new_end: {midpoint}\")\n return binary_search(predicate, start, midpoint)\n\n\nclass EnoughFuelPredicate:\n def __init__(self, reactions_by_output, total_ore):\n self.reactions_by_output = reactions_by_output\n self.total_ore = total_ore\n\n def __call__(self, fuel):\n ore_for_fuel = determine_ore(self.reactions_by_output, fuel)\n return ore_for_fuel <= self.total_ore\n\n\ndef how_much_fuel_under(reactions_by_output, total_ore):\n predicate = EnoughFuelPredicate(reactions_by_output, total_ore)\n min_fuel = 1\n max_fuel = 2\n # Find an upper bound on how much fuel can be produced, doubling the\n # interval at every step.\n while predicate(max_fuel):\n min_fuel = max_fuel\n max_fuel *= 2\n # Perform a binary search.\n debug(f\"min_fuel: {min_fuel} | max_fuel: {max_fuel}\")\n return binary_search(predicate, min_fuel, max_fuel)\n\n\ndef parse_content(content):\n lines = content.strip().split(\"\\n\")\n reactions_by_output = {}\n for line in lines:\n inputs, output = parse_line(line)\n _, compound = output\n if compound in reactions_by_output:\n raise KeyError(\n f\"Expected exactly one reaction to produce {compound}\"\n )\n reactions_by_output[compound] = inputs, output\n\n return reactions_by_output\n\n\ndef test():\n reactions_by_output1 = parse_content(EXAMPLE_CONTENT1)\n assert determine_ore(reactions_by_output1, 1) == 31\n\n reactions_by_output2 = parse_content(EXAMPLE_CONTENT2)\n assert determine_ore(reactions_by_output2, 1) == 165\n\n reactions_by_output3 = parse_content(EXAMPLE_CONTENT3)\n assert determine_ore(reactions_by_output3, 1) == 13312\n assert how_much_fuel_under(reactions_by_output3, TRILLION) == 82892753\n\n reactions_by_output4 = parse_content(EXAMPLE_CONTENT4)\n assert determine_ore(reactions_by_output4, 1) == 180697\n assert how_much_fuel_under(reactions_by_output4, TRILLION) == 5586022\n\n reactions_by_output5 = parse_content(EXAMPLE_CONTENT5)\n assert determine_ore(reactions_by_output5, 1) == 2210736\n assert how_much_fuel_under(reactions_by_output5, TRILLION) == 460664\n\n\ndef main():\n filename = HERE / \"input.txt\"\n with open(filename, \"r\") as file_obj:\n content = file_obj.read()\n\n reactions_by_output = parse_content(content)\n part1 = determine_ore(reactions_by_output, 1)\n print(f\"ORE for 1 Fuel: {part1}\")\n part2 = how_much_fuel_under(reactions_by_output, TRILLION)\n part2_ore = determine_ore(reactions_by_output, part2)\n part2_ore_over = determine_ore(reactions_by_output, part2 + 1)\n print(f\"{part2_ore:,} ORE is needed for {part2} Fuel,\")\n print(f\" {part2_ore_over:,} ORE is needed for {part2 + 1} Fuel\")\n\n\nif __name__ == \"__main__\":\n test()\n main()\n", "id": "11576945", "language": "Python", "matching_score": 1.546745777130127, "max_stars_count": 0, "path": "day14/main.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport doctest\nimport pathlib\n\n\nHERE = pathlib.Path(__file__).resolve().parent\n\n\ndef fuel_needed(mass):\n \"\"\"Compute fuel required for a module given the mass.\n\n Fuel required to launch a given module is based on its mass. Specifically,\n to find the fuel required for a module, take its mass, divide by three,\n round down, and subtract 2.\n\n For example:\n\n * For a mass of 12, divide by 3 and round down to get 4, then subtract 2\n to get 2.\n * For a mass of 14, dividing by 3 and rounding down still yields 4, so the\n fuel required is also 2.\n * For a mass of 1969, the fuel required is 654.\n * For a mass of 100756, the fuel required is 33583.\n\n >>> fuel_needed(12)\n 2\n >>> fuel_needed(14)\n 2\n >>> fuel_needed(1969)\n 654\n >>> fuel_needed(100756)\n 33583\n \"\"\"\n return max(mass // 3 - 2, 0)\n\n\ndef fuel_needed_account_for_fuel(mass):\n \"\"\"Compute fuel required for a module (and its fuel) given the mass.\n\n So, for each module mass, calculate its fuel and add it to the total.\n Then, treat the fuel amount you just calculated as the input mass and\n repeat the process, continuing until a fuel requirement is zero or\n negative. For example:\n\n * A module of mass 14 requires 2 fuel. This fuel requires no further fuel\n (2 divided by 3 and rounded down is 0, which would call for a negative\n fuel), so the total fuel required is still just 2.\n * At first, a module of mass 1969 requires 654 fuel. Then, this fuel\n requires 216 more fuel (654 / 3 - 2). 216 then requires 70 more fuel,\n which requires 21 fuel, which requires 5 fuel, which requires no further\n fuel. So, the total fuel required for a module of mass 1969 is\n 654 + 216 + 70 + 21 + 5 = 966.\n * The fuel required by a module of mass 100756 and its fuel is:\n 33583 + 11192 + 3728 + 1240 + 411 + 135 + 43 + 12 + 2 = 50346.\n\n >>> fuel_needed_account_for_fuel(14)\n 2\n >>> fuel_needed_account_for_fuel(1969)\n 966\n >>> fuel_needed_account_for_fuel(100756)\n 50346\n \"\"\"\n total_fuel = 0\n last_used = fuel_needed(mass)\n while last_used > 0:\n total_fuel += last_used\n last_used = fuel_needed(last_used)\n\n return total_fuel\n\n\ndef main():\n filename = HERE / \"input.txt\"\n with open(filename, \"r\") as file_obj:\n content = file_obj.read()\n\n total_fuel = 0\n total_fuel_with_accounting = 0\n for line in content.strip().split(\"\\n\"):\n mass = int(line)\n total_fuel += fuel_needed(mass)\n total_fuel_with_accounting += fuel_needed_account_for_fuel(mass)\n\n print(f\"Total fuel: {total_fuel}\")\n print(f\"Total fuel with accounting: {total_fuel_with_accounting}\")\n\n\nif __name__ == \"__main__\":\n doctest.testmod()\n main()\n", "id": "7205363", "language": "Python", "matching_score": 0.6055516004562378, "max_stars_count": 0, "path": "day01/main.py" }, { "content": "# Copyright 2017 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport __builtin__\nimport _pyio\nimport errno\nimport imp\nimport os\nimport sys\n\nfrom google.appengine.ext import vendor\ntry:\n from google.appengine.tools import devappserver2\n from google.appengine.tools.devappserver2.python.runtime import stubs\nexcept ImportError:\n devappserver2 = None\n stubs = None\n\n\nBUILTIN_OPEN = __builtin__.open\nPYIO_OPEN = _pyio.open\n\n\ndef stub_replace(mod_name):\n \"\"\"Replace a module from the SDK/Rutime.\n\n Replaces it with a stub provided in ``stubs/``.\n\n Used for\n\n * ``subprocess``: Needed by concurrency primitives. We don't actually\n use these primitives in most of our libraries, but we do use the\n interface.\n * ``_multiprocessing``: Imported un-protected in ``__init__.py`` for\n ``multiprocessing`` but not provided in GAE SDK.\n * ``ctypes``: Imported by ``setuptools.windows_support`` (gets imported\n when ``setuptools`` does).\n \"\"\"\n sys.modules.pop(mod_name, None)\n file_obj, filename, details = imp.find_module(mod_name, ['stubs'])\n sys.modules[mod_name] = imp.load_module(\n mod_name, file_obj, filename, details)\n\n\ndef _open_avoid_devnull(filename, mode='r', **kwargs):\n \"\"\"Replacement for the ``open`` builtin.\n\n Helper for :func:`patch_open_for_devnull`.\n\n Works exactly the same as ``open`` unless ``filename`` is ``os.devnull``\n (e.g. ``'/dev/null'``). In that case, just opens a dummy file (the\n ``requirements.txt`` in the current directory).\n \"\"\"\n if filename == os.devnull:\n mode = 'r'\n filename = 'requirements.txt'\n\n return BUILTIN_OPEN(filename, mode, **kwargs)\n\n\ndef _io_open_avoid_devnull(filename, mode='r', **kwargs):\n \"\"\"Replacement for the ``_pyio.open`` helper.\n\n Helper for :func:`patch_open_for_devnull`.\n\n Works exactly the same as ``_pyio.open`` unless ``filename`` is\n ``os.devnull`` (e.g. ``'/dev/null'``). In that case, just opens a\n dummy file (the ``requirements.txt`` in the current directory).\n \"\"\"\n if filename == os.devnull:\n mode = 'r'\n filename = 'requirements.txt'\n\n return PYIO_OPEN(filename, mode, **kwargs)\n\n\ndef patch_open_for_devnull():\n \"\"\"Patch the ``open`` builtin to avoid opening ``os.devnull``.\n\n On **import** ``dill`` calls::\n\n f = open(os.devnull, 'rb', buffering=0)\n FileType = type(f)\n f.close()\n\n so it has a type it can re-use. This is a problem on GAE, where the\n file pointed to by ``os.devnull`` cannot be accessed.\n \"\"\"\n __builtin__.open = _open_avoid_devnull\n _pyio.open = _io_open_avoid_devnull\n\n\n\ndef _fake_file_init(self, filename, mode='r', buffering=-1, **kwargs):\n \"\"\"Replacement for constructor in ``FakeFile`` class.\n\n Helper for :func:`patch_dev_fake_file`.\n\n Used as a stub because ``FakeFile`` incorrectly uses ``bufsize``.\n \"\"\"\n if stubs is None:\n raise RuntimeError(\n 'Expected stubs to import successfully on dev_appserver')\n\n if mode not in stubs.FakeFile.ALLOWED_MODES:\n raise IOError(errno.EROFS, 'Read-only file system', filename)\n\n visible = stubs.FakeFile.is_file_accessible(filename)\n if visible != stubs.FakeFile.Visibility.OK:\n stubs.log_access_check_fail(filename, visible)\n raise IOError(errno.EACCES, 'file not accessible', filename)\n\n super(stubs.FakeFile, self).__init__(\n filename, mode, buffering, **kwargs)\n\n\ndef patch_dev_fake_file():\n \"\"\"Workaround for the ``devappserver`` file stub.\n\n .. _docs: https://docs.python.org/2/library/functions.html#file\n\n The ``FakeFile`` class (only present on ``dev``, not prod) incorrectly\n thinks that the third argument to file is ``bufsize``. The Python `docs`_\n note that this is ``buffering`` and ``dill`` (correctly) uses this as\n a keyword argument when determining the ``FileType``.\n\n See :func:`patch_open_for_devnull` for details on why\n ``FileType`` is used.\n \"\"\"\n if devappserver2 is None:\n # NOTE: This means we are running in production.\n return\n\n stubs.FakeFile.__init__ = _fake_file_init\n\n\n\ndef clear_imports(mod_name):\n \"\"\"Remove cached imports for a module.\n\n We may want to do this if we provide an over-ride in ``lib/`` for an\n out-of-date package that comes with the SDK (or accidentally comes\n in the environment running the ``dev_appserver``).\n \"\"\"\n for key in sys.modules.keys():\n if key.startswith(mod_name):\n del sys.modules[key]\n\n\ndef all_updates():\n vendor.add('lib')\n stub_replace('subprocess')\n stub_replace('_multiprocessing')\n stub_replace('ctypes')\n patch_open_for_devnull()\n patch_dev_fake_file()\n clear_imports('google.protobuf')\n clear_imports('pkg_resources')\n clear_imports('setuptools')\n clear_imports('six')\n\n\nall_updates()\n", "id": "4807021", "language": "Python", "matching_score": 2.838635206222534, "max_stars_count": 0, "path": "language-app/appengine_config.py" }, { "content": "# Copyright 2017 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport _multiprocessing\nimport functools\nimport imp\nimport io\nimport logging\nimport os\nimport subprocess\nimport sys\nimport unittest\n\nimport boltons.tbutils\nimport flask\nimport google.auth\nimport google.protobuf\ntry:\n import grpc\n grpc_info = None\nexcept ImportError as exc:\n grpc = None\n grpc_info = sys.exc_info()\nimport pkg_resources\nimport setuptools\nimport six\n\nfrom google.appengine.api import app_identity\n\n\napp = flask.Flask(__name__)\n\nMAIN_HTML = \"\"\"\\\n<html>\n <ul>\n <li><a href=\"/info\">Environment Info</a></li>\n <li><a href=\"/auth-check\">Auth Info</a></li>\n <li><a href=\"/import\">Package Import Check</a></li>\n <li><a href=\"/unit-tests\">Unit Test Output</a></li>\n <li><a href=\"/system-tests\">System Test Output</a></li>\n </ul>\n</html>\n\"\"\"\n\n\ndef code_block(*lines):\n html_lines = ['<pre>']\n for line in lines:\n html_lines.append(flask.escape(line))\n html_lines.append('</pre>')\n return '\\n'.join(html_lines)\n\n\nclass PrettyErrors(object):\n\n def __init__(self, callable_):\n self.callable_ = callable_\n functools.update_wrapper(self, self.callable_)\n\n def __call__(self, *args, **kwargs):\n try:\n return self.callable_(*args, **kwargs)\n except:\n exc_info = boltons.tbutils.ExceptionInfo.from_current()\n exc_str = exc_info.get_formatted()\n return code_block(exc_str)\n\n\n@app.route('/')\ndef main():\n return MAIN_HTML\n\n\n@app.route('/info')\n@PrettyErrors\ndef info():\n if grpc is None:\n exc_info = boltons.tbutils.ExceptionInfo.from_exc_info(*grpc_info)\n grpc_msg = exc_info.get_formatted()\n else:\n try:\n dist = pkg_resources.get_distribution('grpcio')\n grpc_msg = '\\n'.join([\n '>>> grpc',\n repr(grpc),\n '>>> dist = pkg_resources.get_distribution(\\'grpcio\\')',\n '>>> dist',\n repr(dist),\n ])\n except pkg_resources.DistributionNotFound:\n exc_info = boltons.tbutils.ExceptionInfo.from_current()\n grpc_msg = '\\n'.join([\n '>>> grpc',\n repr(grpc),\n '>>> dist = pkg_resources.get_distribution(\\'grpcio\\')',\n exc_info.get_formatted(),\n ])\n\n return code_block(\n '>>> import sys',\n '>>> sys',\n repr(sys),\n '>>> sys.executable',\n repr(sys.executable),\n '>>> sys.prefix',\n repr(sys.prefix),\n '>>> getattr(sys, \\'real_prefix\\', None)',\n repr(getattr(sys, 'real_prefix', None)),\n '>>> import subprocess',\n '>>> subprocess',\n repr(subprocess),\n '>>> import _multiprocessing',\n '>>> _multiprocessing',\n repr(_multiprocessing),\n '>>> import os',\n '>>> os',\n repr(os),\n '>>> os.devnull',\n repr(os.devnull),\n '>>> os.path.exists(os.devnull)',\n repr(os.path.exists(os.devnull)),\n '>>> import six',\n '>>> six',\n repr(six),\n '>>> six.__version__',\n repr(six.__version__),\n '>>> import setuptools',\n '>>> setuptools',\n repr(setuptools),\n '>>> import pkg_resources',\n '>>> pkg_resources',\n repr(pkg_resources),\n '>>> import google.protobuf',\n '>>> google.protobuf',\n repr(google.protobuf),\n '>>> google.protobuf.__version__',\n repr(google.protobuf.__version__),\n '>>> import grpc',\n grpc_msg,\n )\n\n\ndef load_module(path):\n dirname, basename = os.path.split(path)\n mod_name, extension = os.path.splitext(basename)\n assert extension == '.py'\n file_obj, filename, details = imp.find_module(mod_name, [dirname])\n return imp.load_module(\n mod_name, file_obj, filename, details)\n\n\n@app.route('/unit-tests')\n@PrettyErrors\ndef unit_tests():\n test_mods = []\n for dirpath, _, filenames in os.walk('unit-tests'):\n for filename in filenames:\n if not filename.endswith('.py'):\n continue\n if filename == '__init__.py':\n continue\n test_mods.append(os.path.join(dirpath, filename))\n\n mod_objs = [load_module(path) for path in test_mods]\n suite = unittest.TestSuite()\n for mod_obj in mod_objs:\n tests = unittest.defaultTestLoader.loadTestsFromModule(mod_obj)\n suite.addTest(tests)\n\n stream = io.BytesIO()\n test_result = unittest.TextTestRunner(\n stream=stream, verbosity=2).run(suite)\n\n return code_block(\n '>>> import imp',\n '>>> import os',\n '>>> import unittest',\n '>>>',\n '>>> test_mods = []',\n '>>> for dirpath, _, filenames in os.walk(\\'unit-tests\\'):',\n '... for filename in filenames:',\n '... if not filename.endswith(\\'.py\\'):',\n '... continue',\n '... if filename == \\'__init__.py\\':',\n '... continue',\n '... test_mods.append(os.path.join(dirpath, filename))',\n '...',\n '>>> for path in test_mods:',\n '... print(path)',\n '...',\n '\\n'.join(test_mods),\n '>>>',\n '>>> def load_module(path):',\n '... dirname, basename = os.path.split(path)',\n '... mod_name, extension = os.path.splitext(basename)',\n '... assert extension == \\'.py\\'',\n '... file_obj, filename, details = imp.find_module(mod_name, [dirname])',\n '... return imp.load_module(',\n '... mod_name, file_obj, filename, details)',\n '...',\n '>>>',\n '>>> mod_objs = [load_module(path) for path in test_mods]',\n '>>>',\n '>>> suite = unittest.TestSuite()',\n '>>> for mod_obj in mod_objs:',\n '... tests = unittest.defaultTestLoader.loadTestsFromModule(mod_obj)',\n '... suite.addTest(tests)',\n '...',\n '>>> unittest.TextTestRunner(verbosity=2).run(suite)',\n stream.getvalue(),\n )\n\n\n@app.route('/import')\n@PrettyErrors\ndef import_():\n from google.cloud import language\n\n return code_block(\n '>>> from google.cloud import language',\n '>>> language',\n repr(language),\n )\n\n\n@app.route('/auth-check')\n@PrettyErrors\ndef auth_check():\n credentials, project = google.auth.default()\n key_name, signature = app_identity.sign_blob(b'abc')\n scope = 'https://www.googleapis.com/auth/userinfo.email'\n token, expiry = app_identity.get_access_token(scope)\n return code_block(\n '>>> import google.auth',\n '>>> credentials, project = google.auth.default()',\n '>>> credentials',\n repr(credentials),\n '>>> project',\n repr(project),\n '>>> credentials.__dict__',\n repr(credentials.__dict__),\n '>>> from google.appengine.api import app_identity',\n '>>> app_identity',\n repr(app_identity),\n # ALSO: get_access_token_uncached\n # (scopes, service_account_id=None)\n '>>> scope = \\'https://www.googleapis.com/auth/userinfo.email\\'',\n '>>> token, expiry = app_identity.get_access_token(scope)',\n '>>> token',\n repr(token[:6] + b'...'),\n '>>> expiry',\n repr(expiry),\n '>>> app_identity.get_application_id()',\n repr(app_identity.get_application_id()),\n '>>> app_identity.get_default_gcs_bucket_name()',\n repr(app_identity.get_default_gcs_bucket_name()),\n '>>> app_identity.get_default_version_hostname()',\n repr(app_identity.get_default_version_hostname()),\n '>>> app_identity.get_public_certificates()',\n repr(app_identity.get_public_certificates()),\n '>>> app_identity.get_service_account_name()',\n repr(app_identity.get_service_account_name()),\n '>>> key_name, signature = app_identity.sign_blob(b\\'abc\\')',\n '>>> key_name',\n repr(key_name),\n '>>> signature',\n repr(signature[:16] + b'...'),\n )\n\n\n@app.route('/system-tests')\n@PrettyErrors\ndef system_tests():\n # NOTE: We intentionally import at run-time.\n from google.cloud import language_v1\n from google.cloud.language_v1 import enums\n\n scopes = language_v1.LanguageServiceClient._ALL_SCOPES\n credentials, _ = google.auth.default(scopes=scopes)\n logging.info('credentials: %r', credentials)\n client = language_v1.LanguageServiceClient(credentials=credentials)\n logging.info('client: %r', client)\n content = 'Hello, world!'\n type_ = enums.Document.Type.PLAIN_TEXT\n document = {'content': content, 'type': type_}\n logging.info('document: %r', document)\n response = client.analyze_sentiment(document)\n logging.info('response: %r', response)\n\n return code_block(\n '>>> from google.cloud import language_v1',\n '>>> from google.cloud.language_v1 import enums',\n '>>>',\n '>>> scopes = language_v1.LanguageServiceClient._ALL_SCOPES',\n '>>> scopes',\n repr(scopes),\n '>>> credentials, _ = google.auth.default(scopes=scopes)',\n '>>> credentials',\n repr(credentials),\n '>>>',\n '>>> client = language_v1.LanguageServiceClient(credentials=credentials)',\n '>>> client',\n repr(client),\n '>>>',\n '>>> content = \\'Hello, world!\\'',\n '>>> type_ = enums.Document.Type.PLAIN_TEXT',\n '>>> document = {\\'content\\': content, \\'type\\': type_}',\n '>>> response = client.analyze_sentiment(document)',\n '>>>',\n '>>> response',\n repr(response)\n )\n\n\n@app.errorhandler(500)\ndef server_error(exc):\n # Log the error and stacktrace (``logging.exception`` will\n # automatically add the stacktrace).\n logging.exception('An error occurred during a request.')\n return 'An internal error occurred.', 500\n", "id": "7171355", "language": "Python", "matching_score": 2.9516868591308594, "max_stars_count": 0, "path": "language-app/main.py" }, { "content": "# Copyright 2017 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport flask\nimport google.auth\nimport google.auth.transport.requests\n\n\napp = flask.Flask(__name__)\n\nSCOPE = 'https://www.googleapis.com/auth/userinfo.email'\nMAIN_HTML = \"\"\"\\\n<html>\n <pre>\n >>> import google.auth\n >>> scope = {scope}\n >>> credentials, _ = google.auth.default(scopes=(scope,))\n >>> credentials\n {credentials}\n >>> credentials.token is None\n {is_none}\n >>>\n >>> import google.auth.transport.requests\n >>> request = google.auth.transport.requests.Request()\n >>> credentials.refresh(request)\n >>> credentials.token\n {token!r}\n</html>\n\"\"\"\n\n\n@app.route('/')\ndef main():\n credentials, _ = google.auth.default(scopes=(SCOPE,))\n is_none = credentials.token is None\n request = google.auth.transport.requests.Request()\n credentials.refresh(request)\n\n if credentials.token is None:\n token_str = ''\n else:\n token_str = credentials.token[:15] + '...'\n\n return MAIN_HTML.format(\n scope=SCOPE,\n credentials=flask.escape(repr(credentials)),\n is_none=is_none,\n token=token_str,\n )\n", "id": "4365596", "language": "Python", "matching_score": 0.17803214490413666, "max_stars_count": 0, "path": "main.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport queue\nimport struct\nimport time\n\n\n# Block a `get()` from the queue for 2 seconds.\nQUEUE_GET_TIMEOUT = 2.0\nQUEUE_EMPTY = object() # Sentinel\n\n\ndef _queue_get(queue_, timeout):\n try:\n return queue_.get(block=True, timeout=timeout)\n except queue.Empty:\n return QUEUE_EMPTY\n\n\ndef save_log_worker(filename, log_queue, done_event):\n \"\"\"Worker to save log messages from a queue to a file.\n\n This is intended to be launched in a thread to avoid blocking socket\n I/O with the requisite file I/O.\n\n Args:\n filename (pathlib.Path): The file where the replay log will be written.\n log_queue (queue.Queue): The queue where log lines will be pushed.\n done_event (threading.Event): An event indicating the proxy that is\n generating log lines is done.\n \"\"\"\n with open(filename, \"wb\") as file_obj:\n while True:\n if done_event.is_set() and log_queue.empty():\n return\n\n value = _queue_get(log_queue, QUEUE_GET_TIMEOUT)\n if value is QUEUE_EMPTY:\n continue\n\n # NOTE: We assume items in the queue are of type\n # `Tuple[int, bytes, socket.socket, socket.socket]`.\n time_ns, tcp_chunk, client_socket, server_socket = value\n client_ip, client_port = client_socket.getpeername()\n server_ip, server_port = server_socket.getpeername()\n\n ts_bytes = struct.pack(\">Q\", time_ns)\n chunk_length = struct.pack(\">I\", len(tcp_chunk))\n description = (\n f\"{client_ip}:{client_port:d} {server_ip}:{server_port:d}\"\n )\n log_line = (\n ts_bytes\n + description.encode(\"ascii\")\n + b\" \"\n + chunk_length\n + tcp_chunk\n )\n file_obj.write(log_line)\n", "id": "12182255", "language": "Python", "matching_score": 1.9594016075134277, "max_stars_count": 0, "path": "server/_save_replay_log.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\n\nfrom tcp_h2_describe._serve import serve_proxy\n\n\nDESCRIPTION = \"\"\"\\\nRun `tcp-h2-describe` reverse proxy server.\n\nThis will forward traffic to a proxy port along to an already running\nHTTP/2 server. For each HTTP/2 frame forwarded (either client->server or\nserver->client) a description will be printed to the console explaining what\neach byte in the frame means.\n\"\"\"\n\n\ndef get_args():\n \"\"\"Get the command line arguments for ``tcp-h2-describe``.\n\n Returns:\n Tuple[int, int, Optional[str]]: A triple of\n * The port for the \"describe\" proxy\n * The port for the server that is being proxied\n * The hostname for the server that is being proxied (or :data:`None` if\n not provided)\n \"\"\"\n parser = argparse.ArgumentParser(\n description=DESCRIPTION,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"tcp-h2-describe\",\n )\n parser.add_argument(\n \"--proxy-port\",\n dest=\"proxy_port\",\n type=int,\n default=24909,\n help='The port that will be used for running the \"describe\" proxy.',\n )\n parser.add_argument(\n \"--server-host\",\n dest=\"server_host\",\n help=\"The hostname for the server that is being proxied.\",\n )\n parser.add_argument(\n \"--server-port\",\n dest=\"server_port\",\n type=int,\n default=80,\n help=\"The port for the server that is being proxied.\",\n )\n\n args = parser.parse_args()\n return args.proxy_port, args.server_port, args.server_host\n\n\ndef main():\n proxy_port, server_port, server_host = get_args()\n kwargs = {}\n if server_host is not None:\n kwargs[\"server_host\"] = server_host\n\n serve_proxy(proxy_port, server_port, **kwargs)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "4750957", "language": "Python", "matching_score": 2.8380661010742188, "max_stars_count": 0, "path": "src/tcp_h2_describe/__main__.py" }, { "content": "#!/usr/bin/env python\n\n\"\"\"Display a system notification on macOS.\n\nosascript -e 'display notification \"Lorem ipsum dolor sit amet\" with title \"Title\"\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport subprocess\nimport sys\n\n\ndef notify(title, message):\n apple_script = \"display notification {} with title {}\".format(\n json.dumps(message), json.dumps(title)\n )\n subprocess.check_call([\"osascript\", \"-e\", apple_script])\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\n description=\"Display a system notification on macOS.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"title\", nargs=\"?\", default=\"Interruption\", help=\"Notification title.\"\n )\n parser.add_argument(\n \"messages\", metavar=\"message\", nargs=1, help=\"Notification message.\"\n )\n\n args = parser.parse_args()\n message, = args.messages\n return args.title, message\n\n\ndef main():\n if sys.platform != \"darwin\":\n print(\"This tool is intended for macOS\", file=sys.stderr)\n sys.exit(1)\n\n title, message = get_args()\n notify(title, message)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "595391", "language": "Python", "matching_score": 1.2927111387252808, "max_stars_count": 1, "path": "annoy.py" }, { "content": "from __future__ import print_function\n\nimport argparse\nimport json\nimport os\n\nimport get_brackets_html\nimport utils\n\n\nwith open(utils.TEAM_MAP_FILENAME, 'r') as fh:\n TEAM_MAP = json.load(fh)\n# Convert string keys to integers.\nTEAM_MAP = {int(key): val for key, val in TEAM_MAP.items()}\nROUND_BREAKS = frozenset([64, 96, 112, 120, 124, 126, 127])\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Check correctness of parse_brackets_html.py')\n parser.add_argument('--entry-id', dest='entry_id', required=True)\n args = parser.parse_args()\n loc_filename = '{}.json'.format(args.entry_id)\n bracket_filename = os.path.join(\n get_brackets_html.BRACKETS_DIR, loc_filename)\n\n with open(bracket_filename, 'r') as fh:\n winners = json.load(fh)\n winners = {int(key): val for key, val in winners.items()}\n\n for game_id in sorted(winners.keys()):\n if game_id in ROUND_BREAKS:\n print('=' * 60)\n winner_of = winners[game_id]\n winner_name = TEAM_MAP[winner_of]\n msg = '{:3d} -> {}'.format(game_id, winner_name)\n print(msg)\n\n\nif __name__ == '__main__':\n main()\n", "id": "11583921", "language": "Python", "matching_score": 1.1244282722473145, "max_stars_count": 1, "path": "check_parse_brackets.py" }, { "content": "#!/usr/bin/env python\n\nimport argparse\nimport numpy as np\nimport time\n\n\nfrom butterfly_algorithm import load_whale\nfrom butterfly_algorithm import solve\n\n\ndef dft_data(N):\n N_vals = np.arange(N, dtype=np.float64)\n t = 2 * np.pi * N_vals\n s = N_vals / N\n return t, s\n\n\ndef main(M=11, L=None):\n _, data = load_whale()\n N = len(data)\n if L is None:\n L = int(np.floor(np.log2(N)))\n print 'N = %d points, M = %d truncated terms, L = %d refinements' % (\n N, M, L)\n t, s = dft_data(N)\n\n start = time.time()\n f_hat = solve(t, s, data, L, M=M)\n duration = time.time() - start\n print('total computation time: %g' % (duration,))\n\n fft_f_hat = np.fft.fft(data, n=N)\n print('2-norm: %g' % (np.linalg.norm(f_hat - fft_f_hat, ord=2),))\n print('sup-norm: %g' % (np.linalg.norm(f_hat - fft_f_hat, ord=np.inf),))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Run Butterfly on whale test data.')\n parser.add_argument('--M', dest='M', type=int, default=11,\n help='Size of Taylor series truncation.')\n parser.add_argument('--L', dest='L', type=int,\n help='Number of grid refinement levels.')\n args = parser.parse_args()\n main(M=args.M, L=args.L)\n", "id": "9043672", "language": "Python", "matching_score": 1.721379280090332, "max_stars_count": 0, "path": "butterfly_on_whale.py" }, { "content": "#!/usr/bin/env python\n\nimport argparse\nimport os\nimport time\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--iterations', metavar='N', type=int,\n help='number of times to run the benchmark')\nparser.add_argument('--sum', action='store_true',\n help='sum the times (default: find the average)')\nparser.add_argument('--fast', action='store_true',\n help='just execute the short running problems')\nargs = parser.parse_args()\niterations = 1 if args.iterations is None else args.iterations\n\nif args.fast:\n problem_list = [1, 2, 3, 5, 6, 8, 13, 15, 16, 18, 19, 20, 24, 28, 40, 45]\nelse:\n problem_list = range(1, 50 + 1)\n\nfor problem_number in problem_list:\n path = 'complete/no%03d.py' % problem_number\n if os.path.exists(path):\n module = 'complete.no%03d' % problem_number\n else:\n module = 'too_slow.no%03d' % problem_number\n\n main = __import__(module, fromlist=['main'])\n begin = time.time()\n for i in range(iterations):\n result = main.main()\n end = time.time()\n\n total = end - begin\n if not args.sum:\n total = total * 1.0 / iterations\n\n print '%s: %s, %sms' % (problem_number, result, int(1000 * total))\n", "id": "11218811", "language": "Python", "matching_score": 0.729081928730011, "max_stars_count": 7, "path": "python/benchmark.py" }, { "content": "import time\nfrom math import floor\nfrom math import log10\n\n\ndef euler_timer(problem_number):\n\n def result_timer(method):\n def timed_method(*args, **kwargs):\n result = ('The answer to Euler Project,'\n ' question %s is:' % problem_number)\n\n start_time = time.time()\n method_result = method(*args, **kwargs)\n finish_time = time.time()\n\n runtime = finish_time - start_time\n exponent = int(floor(log10(runtime)))\n time_statement = 'This solution ran in %sE%s seconds.' % (\n round(10 ** (-exponent) * runtime, 3), exponent)\n\n result = '%s %s\\n\\n%s' % (result, method_result, time_statement)\n\n return result\n\n return timed_method\n\n return result_timer\n", "id": "7727433", "language": "Python", "matching_score": 0.4370669722557068, "max_stars_count": 7, "path": "python/decorators.py" }, { "content": "#!/usr/bin/env python\n\n# The fraction 49/98 is a curious fraction, as an inexperienced mathematician\n# in attempting to simplify it may incorrectly believe that 49/98 = 4/8,\n# which is correct, is obtained by cancelling the 9s.\n\n# We shall consider fractions like, 30/50 = 3/5, to be trivial examples.\n\n# There are exactly four non-trivial examples of this type of fraction, less\n# than one in value, and containing two digits in the numerator\n# and denominator.\n\n# If the product of these four fractions is given in its lowest common\n# terms, find the value of the denominator.\n\nimport operator\n\nfrom fractions import gcd\n\nfrom python.decorators import euler_timer\n\n\ndef canceled_pair(numer, denom):\n shared = set(str(numer)).intersection(set(str(denom)))\n result_n = [dig for dig in str(numer)]\n result_d = [dig for dig in str(denom)]\n for dig in shared:\n result_n.remove(dig) # Only removes first instance\n result_d.remove(dig) # Only removes first instance\n result_n = int(\"\".join(result_n)) if result_n else 0\n result_d = int(\"\".join(result_d)) if result_d else 0\n return result_n, result_d\n\n\ndef equals_canceled_pair(numer, denom):\n c_num, c_denom = canceled_pair(numer, denom)\n if c_num == numer and c_denom == denom:\n return False\n elif 10 * c_num == numer:\n return False\n elif c_num == 0 and c_denom == 0:\n return False\n return (c_num * denom == c_denom * numer)\n\n\ndef main(verbose=False):\n pairs = [(numer, denom)\n for numer in range(10, 99)\n for denom in range(numer + 1, 100)\n if equals_canceled_pair(numer, denom)]\n num = reduce(operator.mul, [pair[0] for pair in pairs])\n denom = reduce(operator.mul, [pair[1] for pair in pairs])\n return denom / (gcd(num, denom))\n\nif __name__ == '__main__':\n print euler_timer(33)(main)(verbose=True)\n", "id": "479705", "language": "Python", "matching_score": 1.7209852933883667, "max_stars_count": 7, "path": "python/complete/no033.py" }, { "content": "#!/usr/bin/env python\n\n# What is the smallest positive number that is evenly divisible\n# by all of the numbers from 1 to 20?\n\nfrom fractions import gcd\n\nfrom python.decorators import euler_timer\n\n\ndef min_product(n):\n if n < 2:\n return 1\n\n prod = min_product(n - 1)\n shared = gcd(prod, n)\n return prod * n / shared\n\n\ndef main(verbose=False):\n return min_product(20)\n\nif __name__ == '__main__':\n print euler_timer(5)(main)(verbose=True)\n", "id": "6779231", "language": "Python", "matching_score": 0.3579101264476776, "max_stars_count": 7, "path": "python/complete/no005.py" }, { "content": "#!/usr/bin/env python\n\n# How many Sundays fell on the first of the month during\n# the twentieth century (1 Jan 1901 to 31 Dec 2000)?\n\n# A leap year occurs on any year evenly divisible by 4, but not\n# on a century unless it is divisible by 400.\n\nfrom python.decorators import euler_timer\n\n\ndef leap_year(year):\n if year % 4 == 0:\n if year % 100 == 0:\n return year % 400 == 0\n else:\n return True\n else:\n return False\n\n\ndef month_lengths(year):\n result = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n if leap_year(year):\n result[1] = 29\n return result\n\n\ndef main(verbose=False):\n # We call the days of the week 0 - Sunday,...,6 - Saturday modulo 7\n # 1 Jan 1900 was a Monday. i.e. equal to 1\n jan_1_1901 = (1 + sum(month_lengths(1900))) % 7\n\n date = jan_1_1901\n count = 0 if date else 1\n for year in range(1901, 2001):\n months = month_lengths(year)\n for month in months:\n date = (date + month) % 7\n if date == 0:\n count += 1\n\n # The final date will be Jan 1 2001, so we need to\n # disallow it if it was bad\n if date == 0:\n count -= 1\n return count\n\nif __name__ == '__main__':\n print euler_timer(19)(main)(verbose=True)\n", "id": "10924269", "language": "Python", "matching_score": 1.32723069190979, "max_stars_count": 7, "path": "python/complete/no019.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\n\n\ndef inc_or_dec(n):\n digs = [dig for dig in str(n)]\n if sorted(digs) == digs:\n return True\n elif sorted(digs) == digs[::-1]:\n return True\n else:\n return False\n\n\ndef main(verbose=False):\n n = 21780\n B = 19602 # 90%\n while 100 * B != 99 * n:\n n += 1\n if not inc_or_dec(n):\n B += 1\n return n\n\nif __name__ == '__main__':\n print euler_timer(112)(main)(verbose=True)\n", "id": "11637010", "language": "Python", "matching_score": 0.898737370967865, "max_stars_count": 7, "path": "python/complete/no112.py" }, { "content": "#!/usr/bin/env python\n\n# If all the numbers from 1 to 1000 (one thousand) inclusive were written\n# out in words, how many letters would be used?\n\n# NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and\n# forty-two) contains 23 letters and 115 (one hundred and fifteen) contains\n# 20 letters. The use of \"and\" when writing out numbers is in compliance\n# with British usage.\n\n# Assume less than or equal to 1000\n\nfrom python.decorators import euler_timer\n\n\ndef words(n):\n tens = {2: \"twenty\", 3: \"thirty\", 4: \"forty\", 5: \"fifty\", 6: \"sixty\",\n 7: \"seventy\", 8: \"eighty\", 9: \"ninety\"}\n teens = {10: \"ten\", 11: \"eleven\", 12: \"twelve\", 13: \"thirteen\",\n 14: \"fourteen\", 15: \"fifteen\", 16: \"sixteen\", 17: \"seventeen\",\n 18: \"eighteen\", 19: \"nineteen\"}\n ones = {1: \"one\", 2: \"two\", 3: \"three\", 4: \"four\", 5: \"five\",\n 6: \"six\", 7: \"seven\", 8: \"eight\", 9: \"nine\"}\n\n if n == 1000:\n return \"one thousand\"\n\n result = []\n digs = [int(dig) for dig in str(n).zfill(3)]\n\n if digs[0] != 0:\n if n != 100 * digs[0]:\n result.extend([ones[digs[0]], \"hundred\", \"and\"])\n else:\n return \"%s hundred\" % ones[digs[0]]\n\n if digs[1] == 1:\n result.append(teens[10 * digs[1] + digs[2]])\n return \" \".join(result)\n elif digs[1] != 0:\n result.append(tens[digs[1]])\n\n # Here we can safely ignore teens since we return in that loop\n if digs[2] != 0:\n result.append(ones[digs[2]])\n\n return \" \".join(result)\n\n\ndef num_letters_in_word(n):\n result = words(n)\n result = \"\".join(result.split())\n result = \"\".join(result.split(\"-\"))\n return len(result)\n\n\ndef main(verbose=False):\n return sum(num_letters_in_word(i) for i in range(1, 1001))\n\nif __name__ == '__main__':\n print euler_timer(17)(main)(verbose=True)\n", "id": "4682202", "language": "Python", "matching_score": 0.552334725856781, "max_stars_count": 7, "path": "python/complete/no017.py" }, { "content": "#!/usr/bin/env python\n\n# P = 28433 x 2^7830457 + 1 is a big prime number\n\n# Find the last ten digits of this prime number.\n\n# P mod 10**10 is relevant to us\n# From CRT, we can find this in 2**10 and 5**10\n# P == 1 mod 2**10 since 2**7830457 == 0\n\n# Since 2 is a unit in integers mod 5**10, we know it has\n# order dividing phi(5**10) = 4(5**9), which has 3*10 = 30 factors\n# 1, 5,...,1*(5**9)\n# 2,10,...,2*(5**9)\n# 4,20,...,4*(5**9)\n\nfrom python.decorators import euler_timer\nfrom python.functions import order_mod_n\n\n\ndef unit_a_null_b(a, b):\n k = 1\n while (k * a + 1) % b != 0:\n k += 1\n return (k * a + 1) % (a * b)\n\n\ndef main(verbose=False):\n # We need to find the residue of P modulo 5**10\n # since we already know the residue modulo 2**10\n actual_exponent = 7830457 % order_mod_n(2, 5 ** 10)\n # want to find 2 raised to this exponent\n power_of_two = 1\n for i in range(actual_exponent):\n power_of_two = (2 * power_of_two) % 5 ** 10\n residue = (28433 * power_of_two + 1) % 5 ** 10\n\n unit_two_null_five = unit_a_null_b(2 ** 10, 5 ** 10)\n unit_five_null_two = unit_a_null_b(5 ** 10, 2 ** 10)\n return (1 * unit_two_null_five + residue * unit_five_null_two) % 10 ** 10\n\nif __name__ == '__main__':\n print euler_timer(97)(main)(verbose=True)\n", "id": "10792203", "language": "Python", "matching_score": 1.8663642406463623, "max_stars_count": 7, "path": "python/complete/no097.py" }, { "content": "#!/usr/bin/env python\n\n# P = f_1*...*f_k, where (f_i, f_j) = 1 if i != j\n# This forces (f_i, P/f_i) = 1, hence there\n# exist s, r that satisfy:\n# s (P/f_i) + r f_i = 1\n# Set e_i = s (P/f_i)\n# Then e_i + 0 == 1 mod f_i\n# and by definition e_i = s*(P/f_i) = s*0 mod f_j for all j != i\n\n# For any x**3 == 1 mod n, we know (x, n) = 1 since x has finite\n# order mod n. Thus by CRT, x = (x_1 mod f_1, ..., x_k mod f_k)\n# for the coprime factors f_1*...*f_k = n. Given units as\n# above, the sum X = e_1*x_1 + ... + e_k*x_k, necessarily has\n# X mod f_i = 0*x_1 + ... + 1*x_i + ... + 0*x_k = x_i, hence X = x.\n\n# Thus we need to find all cube roots in the factors dividing our n,\n# all of which happen to be prime. We can then combine all possiblities\n# to reconstruct X and then sum the X values\n\nfrom itertools import product as i_product\n\nfrom python.decorators import euler_timer\nfrom python.functions import extended_euclid\nfrom python.functions import prime_factors\n\n\ndef find_cube_roots(prime):\n # Won't check, but assumes prime is prime\n # in a prime field x^3 == 1 implies x == 1 or x^2 + x + 1 == 0\n # since a domain, the latter is satisfied if\n # (2x + 1)**2 == -3 mod prime (so we handle 2 and 3 differently)\n if prime in [2, 3]:\n return [1]\n\n # The inverse of 2 is (prime + 1)/2\n # If L(q,p) is the legendre symbol, for p != 2 or 3 we know\n # L(-3, p) = L(-1, p)*L(3, p) = (-1)**(floor((p+1)/6)+(p-1)/2)\n if (-1) ** ((prime + 1) / 6 + (prime - 1) / 2) == -1:\n return [1]\n\n for i in xrange(1, prime):\n if (i ** 2 + 3) % prime == 0:\n break\n # So we know i and prime - i are the square roots of 3\n return sorted([1,\n ((prime + 1) * (i - 1) / 2) % prime,\n ((prime + 1) * (prime - i - 1)) / 2 % prime])\n\n\ndef main(verbose=False):\n product = 13082761331670030\n factors = prime_factors(product)\n\n candidate_lists = []\n for factor in factors:\n candidate_lists.append([(factor, root)\n for root in find_cube_roots(factor)])\n\n result = list(i_product(*candidate_lists))\n\n coprime_units = {}\n for factor in factors:\n _, multiplier = extended_euclid(factor, product / factor)\n coprime_units[factor] = multiplier * (product / factor)\n\n vals = []\n for pairing in result:\n count = 0\n for prime, residue in pairing:\n count += residue * coprime_units[prime]\n count = count % product\n vals.append(count)\n\n return sum(vals) - 1 # 1 is in there as (1,1,...,1)\n\nif __name__ == '__main__':\n print euler_timer(271)(main)(verbose=True)\n", "id": "12366813", "language": "Python", "matching_score": 2.0009896755218506, "max_stars_count": 7, "path": "python/complete/no271.py" }, { "content": "#!/usr/bin/env python\n\n# We begin with a sorted list of values to choose from.\n# If the value is not found, we log our biggest value and\n# save the number of digits. We then start with 10**d\n# and 2*(10**d), the next biggest values with only digits\n# less than 3. We wish to find some x*(10**d) + y, where\n# both x and y have only 1s and 2s and y has d digits\n# or less. We want to x*(10**d) + y == 0 mod n\n\n# Since we have all possible values y with d digits or less,\n# we want to find the smallest match x such that\n# x*(10**d) is in the set of residues (-y) % n\n\nfrom itertools import product as i_product\n\nfrom python.decorators import euler_timer\n\n\ndef find(n, value_list):\n for value in value_list:\n if value % n == 0:\n return value\n\n digs = len(str(max(value_list)))\n needed_residues = sorted(set((-value) % n for value in value_list))\n\n residue = (10 ** digs) % n\n actual_list = [1, 2]\n residue_form = [(residue * val) % n for val in actual_list]\n while set(residue_form).intersection(needed_residues) == set():\n next = []\n for val in actual_list:\n next.extend([10 * val, 10 * val + 1, 10 * val + 2])\n actual_list = next\n residue_form = [(residue * val) % n for val in actual_list]\n\n best_match = min(val for val in actual_list\n if (residue * val) % n in needed_residues)\n best_opposites = [val for val in value_list\n if val % n == (-(best_match * residue)) % n]\n return (10 ** digs) * best_match + min(best_opposites)\n\n\ndef main(verbose=False):\n MAX_DIGITS = 12\n candidate_lists = [['0', '1', '2']] * MAX_DIGITS\n\n values = list(i_product(*candidate_lists))\n values = [int(''.join(value)) for value in values][1:]\n\n running_sum = 0\n for n in range(1, 10000 + 1):\n running_sum += find(n, values) / n\n return running_sum\n\nif __name__ == '__main__':\n print euler_timer(303)(main)(verbose=True)\n", "id": "1745342", "language": "Python", "matching_score": 1.319130301475525, "max_stars_count": 7, "path": "python/complete/no303.py" }, { "content": "#!/usr/bin/env python\n\n# The \"Pentagonal Number Theorem\" (a recurrence for the\n# partition function) states:\n\n# p(k) = p(k - 1) + p(k - 2) - p(k - 5) - p(k - 7) + p(k - 12) + p(k - 15)\n\n# where p(0) is taken to equal 1, p(k) is zero for negative k, and the sum is\n# taken over all generalized pentagonal numbers of the form (n*(3*n-1))/2\n# for n running over positive and negative integers (successively taking\n# n = 1, -1, 2, -2, 3, -3, ...) generates the values 1, 2, 5, 7, 12, 15, ...\n# The signs in the summation continue to alternate +, +, -, -, +, +\n\n# for 1 <= k < 2, p(k) = p(k - 1)\n# for 2 <= k < 5, p(k) = p(k - 1) + p(k - 2)\n# for 5 <= k < 7, p(k) = p(k - 1) + p(k - 2) - p(k - 5)\n# etc.\n# With this in mind, we calculate the partition values for each\n# segment until we find one congruent to 0 mod 10**6\n\nfrom python.decorators import euler_timer\nfrom python.functions import polygonal_number\n\n\ndef find_residue(residue):\n p = {0: 1}\n\n pentagonal = []\n pent_index = 1\n found_match = False\n while not found_match:\n if pent_index > 0:\n next_index = -pent_index\n else:\n next_index = abs(pent_index) + 1\n\n begin_val = polygonal_number(5, pent_index)\n end_val = polygonal_number(5, next_index)\n pentagonal.append(begin_val)\n for n in range(begin_val, end_val):\n # doesn't include end_val\n p[n] = 0\n for index, val in enumerate(pentagonal):\n if (index / 2) % 2 == 0:\n p[n] = (p[n] + p[n - val]) % residue\n else:\n p[n] = (p[n] - p[n - val]) % residue\n\n if p[n] == 0:\n found_match = True\n return n\n pent_index = next_index\n\n raise Exception(\"Should not reach this line\")\n\n\ndef main(verbose=False):\n return find_residue(10 ** 6)\n\nif __name__ == '__main__':\n print euler_timer(78)(main)(verbose=True)\n", "id": "3441054", "language": "Python", "matching_score": 1.660668969154358, "max_stars_count": 7, "path": "python/complete/no078.py" }, { "content": "#!/usr/bin/env python\n\n# Find the sum of all the even-valued terms\n# in the sequence which do not exceed four million.\n\n# f_0 = 0, f_1 = 1, f_(n+2) = f_(n+1) + f_n mod 2 generates\n# 0, 1, 1, 0, 1, 1, 0, 1, 1, ... mod 2\n# The even terms are f_(3k)\n# f_(3k+6) = f_(3k+5) + f_(3k+4)\n# = f_(3k+3) + 2*f_(3k+4)\n# = 3*f_(3k+3) + 2*f_(3k+2)\n# = 4*f_(3k+3) + 2*f_(3k+2) - f_(3k+3)\n# = 4*f_(3k+3) + f_(3k)\n\nfrom python.decorators import euler_timer\nfrom python.functions import recurrence_next\n\n\ndef main(verbose=False):\n a, b = 0, 2\n running_sum = 0\n while b <= 4000000:\n running_sum += b\n a, b = recurrence_next([1, 4], [a, b])\n return running_sum\n\nif __name__ == '__main__':\n print euler_timer(2)(main)(verbose=True)\n", "id": "2777235", "language": "Python", "matching_score": 0.5969381928443909, "max_stars_count": 7, "path": "python/complete/no002.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import recurrence_next\n\n\ndef zero_absent(relation, initial_values, modulus):\n initial = [value % modulus for value in initial_values]\n curr = initial[:]\n\n if 0 in initial:\n return False\n\n curr = [value % modulus for value in recurrence_next(relation, curr)]\n while curr != initial:\n if 0 in curr:\n return False\n curr = [value % modulus for value in recurrence_next(relation, curr)]\n return True\n\n\ndef main(verbose=False):\n relation = [1, 1, 1]\n initial_values = [1, 1, 1]\n NUMBER_SUCCESSES = 124\n\n found = [27]\n modulus = 29\n while len(found) < NUMBER_SUCCESSES:\n if zero_absent(relation, initial_values, modulus):\n found.append(modulus)\n modulus += 2\n return found[NUMBER_SUCCESSES - 1]\n\nif __name__ == '__main__':\n print euler_timer(225)(main)(verbose=True)\n", "id": "8568589", "language": "Python", "matching_score": 0.8941404819488525, "max_stars_count": 7, "path": "python/complete/no225.py" }, { "content": "#!/usr/bin/env python\n\n# Algorithm:\n# Each pair of points defines a line\n# Of these three lines, exactly two will cross the origin at an\n# x value between the points defining the line\n# The two lines that cross the origin define the range at y = 0\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\n\n\ndef get_points(line):\n result = [int(val) for val in line.split(\",\")]\n return [(result[2 * i], result[2 * i + 1]) for i in range(3)]\n\n\ndef cross_zero(p, q):\n if p[0] == q[0]:\n if 0 <= max(p[1], q[1]) and 0 >= min(p[1], q[1]):\n return (p[0], True)\n else:\n return (-10 ** 10, False)\n m = ((p[1] - q[1]) * 1.0) / (p[0] - q[0])\n b = p[1] - m * p[0]\n if m == 0:\n if p[1] != 0:\n return (-10 ** 10, False)\n else:\n return ((p[0], q[0]), True)\n zero_val = (-b * 1.0) / m\n if zero_val <= max(p[0], q[0]) and zero_val >= min(p[0], q[0]):\n return (zero_val, True)\n else:\n return (-10 ** 10, False)\n\n\ndef zero_in(points):\n a, b, c = points\n crosses = [cross_zero(a, b), cross_zero(b, c), cross_zero(c, a)]\n crosses = [cross[0] for cross in crosses if cross[1]]\n if len(crosses) == 0:\n return False\n elif len(crosses) == 1:\n M = max(crosses[0])\n m = min(crosses[0])\n return (0 >= m and 0 <= M)\n return (0 >= min(crosses) and 0 <= max(crosses))\n\n\ndef main(verbose=False):\n data = [get_points(line) for line in get_data(102).split(\"\\n\") if line]\n count = 0\n for points in data:\n if zero_in(points):\n count += 1\n return count\n\nif __name__ == '__main__':\n print euler_timer(102)(main)(verbose=True)\n", "id": "1420731", "language": "Python", "matching_score": 1.0059449672698975, "max_stars_count": 7, "path": "python/complete/no102.py" }, { "content": "#!/usr/bin/env python\n\n# Beginning with the trivial [[0,1]], we can\n# loop through all primes and elect to add\n# a new prime or not, branching the recursion\n# at each step and updating the final sum\n\n# Example:\n# sum = 0\n\n# Branch 1 (pairs=[[0,1]], primes = [5,13]):\n# 5 out\n# pairs = [[0,1]], primes = [13] --> 2a\n# don't contribute to sum\n# or\n# 5 in\n# pairs = [[1,2]], primes = [13] --> 2b\n# add sum([1]) to sum, sum = 1\n\n# Branch 2a (pairs=[[0,1]], primes = [13]):\n# 13 out\n# pairs = [[0,1]], primes = [] --> No branch\n# don't contribute to sum\n# or\n# 13 in\n# pairs = [[2,3]], primes = [] --> No branch\n# add sum([2]) to sum, sum = 3\n\n# Branch 2b (pairs=[[1,2]], primes = [13]):\n# 13 out\n# pairs = [[1,2]], primes = [] --> No branch\n# don't contribute to sum\n# or\n# 13 in\n# pairs = [[1,8], [4,7]], primes = [] --> No branch\n# add sum([1,4]) to sum, sum = 8\n\n# No branches remaining, so final value is 8.\n\n# As a check, we have 5, 13, and 65 which can be decomposed\n# as 5 = 1**2 + 2**2, 13 = 2**2 + 3**2, and\n# 65 = 1**2 + 8**2 = 4**2 + 7**2, hence 1 + 2 + 1 + 4 = 8 is correct\n\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef find_raw_solution(prime):\n max_n = int(sqrt(prime))\n for x in range(1, max_n + 1):\n y = int(sqrt(prime - x ** 2))\n if x ** 2 + y ** 2 == prime:\n return [x, y]\n return None\n\n\n# Using\n# (a**2 + b**2)*(c**2 + d**2) = (a*c - b*d)**2 + (a*d + b*c)**2\n# = (a*c + b*d)**2 + (a*d - b*c)**2\n# Clearly, combining two nontrivial (sorted) pairs gives rise to\n# two more sorted pairs\ndef multiply_pair(pair1, pair2):\n a, b = pair1\n c, d = pair2\n\n first_pair = sorted([abs(a * c - b * d), a * d + b * c])\n second_pair = sorted([a * c + b * d, abs(a * d - b * c)])\n if first_pair == second_pair:\n return [first_pair]\n else:\n return [first_pair, second_pair]\n\n\ndef squarefree_sum(pairs, primes):\n result = 0\n # Here is the branch point, add the next prime or don't\n # If we don't, there is no need to sum the values\n if primes == []:\n return result\n\n next_pairs = []\n prime_pair = find_raw_solution(primes[0])\n remaining_primes = primes[1:]\n for pair in pairs:\n new_pairs = multiply_pair(pair, prime_pair)\n result += sum(new_pair[0] for new_pair in new_pairs)\n next_pairs.extend(new_pairs)\n # next_pairs has the prime added\n result += squarefree_sum(next_pairs, remaining_primes)\n # Don't add the prime\n result += squarefree_sum(pairs, remaining_primes)\n return result\n\n\ndef main(verbose=False):\n primes = [prime for prime in sieve(150)\n if prime % 4 == 1]\n initial = [[0, 1]]\n return squarefree_sum(initial, primes)\n\nif __name__ == '__main__':\n print euler_timer(273)(main)(verbose=True)\n", "id": "11959800", "language": "Python", "matching_score": 1.776479959487915, "max_stars_count": 7, "path": "python/complete/no273.py" }, { "content": "#!/usr/bin/env python\n\nfrom math import log\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef hamming_type(max_n, primes):\n # assumes primes is sorted\n if primes == []:\n return 1\n\n count = 0\n prime = primes[0]\n max_power = int(log(max_n) / log(prime))\n for power in range(max_power + 1):\n # each prime can contribute as few as zero\n # and as many as max_power factors\n # by removing prime from the list, we count\n # all such numbers with exactly power factors\n # of prime\n count += hamming_type(max_n / (prime ** power), primes[1:])\n return count\n\n\ndef main(verbose=False):\n PRIMES = sieve(100)\n return hamming_type(10 ** 9, PRIMES)\n\nif __name__ == '__main__':\n print euler_timer(204)(main)(verbose=True)\n", "id": "12378075", "language": "Python", "matching_score": 1.4959638118743896, "max_stars_count": 7, "path": "python/complete/no204.py" }, { "content": "#!/usr/bin/env python\n\n# p_1^2 + p_2^3 + p_3^4 = n\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef max_p(k, n):\n return int(n ** (1.0 / k))\n\n\ndef relevant_triples(n):\n result = []\n top_sieve = sieve(max_p(2, n))\n next3 = [prime for prime in top_sieve\n if prime <= max_p(4, n)]\n for p3 in next3:\n next2 = [prime for prime in top_sieve\n if prime <= max_p(3, n - p3 ** 4)]\n for p2 in next2:\n next1 = [prime for prime in top_sieve\n if prime <= max_p(2, n - p3 ** 4 - p2 ** 3)]\n for p1 in next1:\n if p1 ** 2 + p2 ** 3 + p3 ** 4 < n:\n result.append(p1 ** 2 + p2 ** 3 + p3 ** 4)\n return set(result)\n\n\ndef main(verbose=False):\n return len(relevant_triples(5 * 10 ** 7))\n\nif __name__ == '__main__':\n print euler_timer(87)(main)(verbose=True)\n", "id": "11747658", "language": "Python", "matching_score": 1.117214560508728, "max_stars_count": 7, "path": "python/complete/no087.py" }, { "content": "#!/usr/bin/env python\n\n# D: x --> x/3 ==> D**(-1): y --> 3*y\n# U: x --> (4*x + 2)/3 ==> U**(-1): y --> (3*y - 2)/4\n# d: x --> (2*x - 1)/3 ==> d**(-1): y --> (3*y + 1)/2\n\n# Starting with a_1, if we have any sequence (x_1,x_2,...,x_n)=s\n# then a_{n + 1} = s(a_1) = x_n(...(x_2(x_1(a_1)))),\n# forcing a_1 = x_1**(-1)(x_2**(-1)(...(x_n**(-1)(a_{n + 1}))))\n# Clearly from composing D**(-1), U**(-1), and d**(-1), we'll get\n# a function which f:y --> ((3**A)*y + B)/C for some values\n# A, B, C. Initially with y, A = 0, B = 0 and C = 1\n# NOTE: C will always be a power of 2\n\n# Hence given s = (x_1,...) we can construct x**(-1) and easily\n# update A, B and C based on the choice of each x_1\n\nfrom python.decorators import euler_timer\nfrom python.functions import inverse_mod_n\n\n\ndef sequence(letters, p_3, c, P_2):\n if letters == '':\n return p_3, c, P_2\n\n to_apply = letters[-1]\n if to_apply == 'D':\n return sequence(letters[:-1], p_3 + 1, 3 * c, P_2)\n elif to_apply == 'U':\n return sequence(letters[:-1], p_3 + 1, 3 * c - 2 * P_2, 4 * P_2)\n elif to_apply == 'd':\n return sequence(letters[:-1], p_3 + 1, 3 * c + P_2, 2 * P_2)\n\n\ndef main(verbose=False):\n p_3, c, P_2 = sequence('UDDDUdddDDUDDddDdDddDDUDDdUUDd', 0, 0, 1)\n # Here a_1 = s**(-1)(y) = ((3**(p_3))*y + c)/P_2\n # Since we need a_1 > 10**15, (3**(p_3))*y > (10**15)*P_2 - c\n min_y = ((10 ** 15) * P_2 - c) / (3 ** p_3)\n\n # We also need (3**(p_3))*y == -c mod P_2\n y_residue = inverse_mod_n(3 ** (p_3), P_2) * (-c) % P_2\n\n # integer division intended to find nearest multiple of P_2\n y = P_2 * (min_y / P_2) + y_residue\n if y < min_y:\n y += P_2\n\n return ((3 ** p_3) * y + c) / P_2\n\nif __name__ == '__main__':\n print euler_timer(277)(main)(verbose=True)\n", "id": "9176646", "language": "Python", "matching_score": 1.3440245389938354, "max_stars_count": 7, "path": "python/complete/no277.py" }, { "content": "#!/usr/bin/env python\n\nfrom math import floor\nfrom math import log\n\nfrom python.decorators import euler_timer\nfrom python.functions import extended_euclid\nfrom python.functions import inverse_mod_n\nfrom python.functions import robust_divide\n\n\ndef unit_a_zero_b(a, b):\n _, multiplier = extended_euclid(a, b)\n return (multiplier * b) % (a * b)\n\n\ndef num_factors_fact(n, factor):\n result = 0\n power = factor\n while n >= power:\n result += n / power\n power = factor * power\n return result\n\n\ndef last5(n):\n if n < 8:\n # The remaining part is always divisible by\n # 2**5 for n > 7, these are special cases\n solutions = {0: 1,\n 1: 1,\n 2: 2,\n 3: 6,\n 4: 24,\n 5: 12,\n 6: 72,\n 7: 504}\n return solutions[n]\n\n if n <= 5 ** 5:\n residues = {}\n for i in range(1, n + 1):\n to_add = robust_divide(i, 5)\n # if to_add is not in residues, sets to 1\n # (default 0 returned by get)\n residues[to_add] = residues.get(to_add, 0) + 1\n else:\n residues = {}\n for residue in range(1, 5 ** 5):\n if residue % 5 != 0:\n residues[residue] = (n - residue) / (5 ** 5) + 1\n max_power = int(floor(log(n) / log(5)))\n for power in range(1, max_power + 1):\n biggest_quotient = n / (5 ** power)\n for residue in range(1, 5 ** 5):\n if residue % 5 != 0:\n residues[residue] += ((biggest_quotient - residue) /\n (5 ** 5) + 1)\n\n product = 1\n for residue, power in residues.items():\n power_apply = power % (4 * (5 ** 4)) # PHI(5**5)\n product = (product * (residue ** power_apply)) % (5 ** 5)\n fives = num_factors_fact(n, 5) % (4 * (5 ** 4)) # PHI(5**5)\n inverse = inverse_mod_n(2, 5 ** 5)\n product = (product * (inverse ** fives)) % (5 ** 5)\n\n return (product * unit_a_zero_b(5 ** 5, 2 ** 5)) % 10 ** 5\n\n\ndef main(verbose=False):\n if last5(9) != 36288:\n raise Exception(\"Fails for n = 9\")\n elif last5(10) != 36288:\n raise Exception(\"Fails for n = 10\")\n elif last5(20) != 17664:\n raise Exception(\"Fails for n = 20\")\n\n return last5(10 ** 12)\n\nif __name__ == '__main__':\n print euler_timer(160)(main)(verbose=True)\n", "id": "2842734", "language": "Python", "matching_score": 1.417214274406433, "max_stars_count": 7, "path": "python/complete/no160.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\n\n\ndef special_perms(num_2, num_3):\n if num_3 == 0:\n return [[2] * num_2]\n elif num_2 == 0:\n return [[3] * num_3]\n\n result = ([[2] + perm for perm in special_perms(num_2 - 1, num_3)] +\n [[3] + perm for perm in special_perms(num_2, num_3 - 1)])\n return result\n\n\ndef cumulative_sum(list_):\n result = [list_[0]]\n for entry in list_[1:]:\n result.append(result[-1] + entry)\n return result\n\n\n# 2*x + 3*y = 32\n# implies y = 2*y_star, x = 16 - 3*y_star for 0 <= y_star <= 5\n\ndef main(verbose=False):\n break_rows = []\n for y_star in range(5 + 1):\n x = 16 - 3 * y_star\n y = 2 * y_star\n for perm in special_perms(x, y):\n to_add = cumulative_sum(perm)\n if to_add[-1] != 32:\n raise ValueError(\"Unexpected output from cumulative_sum\")\n break_rows.append(to_add[:-1])\n\n acceptable_next = {}\n for i, row in enumerate(break_rows):\n to_add = []\n for j, onto_row in enumerate(break_rows):\n # No matching breakpoints\n if set(row).intersection(onto_row) == set():\n to_add.append(j)\n acceptable_next[i] = to_add\n\n # for each key, this will list the number of acceptable\n # walls that end with the given key. Hence only 1\n # wall of height 1 ends with each key\n num_blocks_ending = {}\n for key in acceptable_next:\n num_blocks_ending[key] = {1: 1}\n\n blocks = 1\n while blocks < 10:\n blocks += 1\n for key, value in acceptable_next.items():\n for onto in value:\n to_add = num_blocks_ending[key][blocks - 1]\n # if blocks is not in num_blocks_ending[onto], sets to to_add\n # (default 0 returned by get)\n new_val = num_blocks_ending[onto].get(blocks, 0) + to_add\n num_blocks_ending[onto][blocks] = new_val\n\n # we finally add together all walls of height 10 ending\n # with any key (so we loop over all keys)\n result = 0\n for key in num_blocks_ending:\n result += num_blocks_ending[key][10]\n return result\n\nif __name__ == '__main__':\n print euler_timer(215)(main)(verbose=True)\n", "id": "2778498", "language": "Python", "matching_score": 1.5121567249298096, "max_stars_count": 7, "path": "python/complete/no215.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\n\n\ndef column_by_column(data):\n result = {}\n size = len(data)\n # First column is set\n for row in range(size):\n result[(row, 0)] = data[row][0]\n\n for column in range(1, size):\n for row in range(size):\n set_val = result[(row, column - 1)] + data[row][column]\n for under in range(row):\n val = (result[(under, column - 1)] +\n sum(data[ind][column] for ind in range(under, row + 1)))\n if val < set_val:\n set_val = val\n for over in range(row + 1, size):\n val = (result[(over, column - 1)] +\n sum(data[ind][column] for ind in range(row, over + 1)))\n if val < set_val:\n set_val = val\n result[(row, column)] = set_val\n\n return min(result[(row, size - 1)] for row in range(size))\n\n\ndef main(verbose=False):\n data = [[int(entry) for entry in row.split(\",\")]\n for row in get_data(82).split(\"\\n\") if row]\n\n return column_by_column(data)\n\nif __name__ == '__main__':\n print euler_timer(82)(main)(verbose=True)\n", "id": "8884644", "language": "Python", "matching_score": 1.4706350564956665, "max_stars_count": 7, "path": "python/complete/no082.py" }, { "content": "#!/usr/bin/env python\n\nfrom math import log\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\n\n\ndef main(verbose=False):\n data = [row.split(\",\") for row in get_data(99).split(\"\\n\") if row]\n\n max_val = -1\n winner = None\n for i, row in enumerate(data):\n log_val = int(row[1]) * log(int(row[0]))\n if log_val > max_val:\n max_val = log_val\n winner = i\n\n return winner + 1 # account for 0 vs. 1 initial index\n\nif __name__ == '__main__':\n print euler_timer(99)(main)(verbose=True)\n", "id": "3285462", "language": "Python", "matching_score": 0.7542092204093933, "max_stars_count": 7, "path": "python/complete/no099.py" }, { "content": "#!/usr/bin/env python\n\nfrom math import exp\nfrom math import log\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\n\n\ndef is_1_9_pandigital(n):\n digs = [int(dig) for dig in str(n)]\n if len(digs) != 9:\n return False\n return sorted(digs) == range(1, 10)\n\n\ndef log_fib(n):\n root_plus = 0.5 * (1 + sqrt(5))\n root_ratio = 0.5 * (sqrt(5) - 3)\n return n * log(root_plus) - 0.5 * log(5) + log(1 - root_ratio ** n)\n\n\ndef main(verbose=False):\n # 10**(d - 1) <= N < 10**d\n # d <= log(N)/log(10) + 1 < d + 1\n k = 2\n a, b = 1, 1\n solution_found = False\n while not solution_found:\n if is_1_9_pandigital(b):\n log_val = log_fib(k)\n digits = int(log_val / log(10) + 1)\n log_last_9 = log_val - (digits - 9) * log(10)\n last_9 = int(exp(log_last_9))\n if is_1_9_pandigital(last_9):\n solution_found = True\n a, b = b, (a + b) % (10 ** 9)\n if not solution_found:\n k += 1\n return k\n\nif __name__ == '__main__':\n print euler_timer(104)(main)(verbose=True)\n", "id": "4832433", "language": "Python", "matching_score": 1.7783674001693726, "max_stars_count": 7, "path": "python/complete/no104.py" }, { "content": "#!/usr/bin/env python\n\n# Find the last ten digits of the series 1^1 + 2^2 + 3^3 + ... + 1000^1000.\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n result = 0\n for i in range(1, 1000 + 1):\n result = (result + pow(i, i, 10 ** 10)) % 10 ** 10\n return result\n\nif __name__ == '__main__':\n print euler_timer(48)(main)(verbose=True)\n", "id": "8786674", "language": "Python", "matching_score": 1.6131007671356201, "max_stars_count": 7, "path": "python/complete/no048.py" }, { "content": "#!/usr/bin/env python\n\n# Find the sum of the digits in the number 100!\n\nfrom math import factorial\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n return sum(int(digit) for digit in str(factorial(100)))\n\nif __name__ == '__main__':\n print euler_timer(20)(main)(verbose=True)\n", "id": "10178424", "language": "Python", "matching_score": 1.1545748710632324, "max_stars_count": 7, "path": "python/complete/no020.py" }, { "content": "#!/usr/bin/env python\n\nimport operator\n\nfrom math import factorial\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n n = 50\n count = 0\n for red in range(0, n + 1, 2):\n for green in range(0, n + 1 - red, 3):\n for blue in range(0, n + 1 - red - green, 4):\n black = n - red - green - blue\n blocks = black + red / 2 + green / 3 + blue / 4\n denominator = reduce(operator.mul, [factorial(black),\n factorial(red / 2),\n factorial(green / 3),\n factorial(blue / 4)])\n count += factorial(blocks) / denominator\n return count\n\nif __name__ == '__main__':\n print euler_timer(117)(main)(verbose=True)\n", "id": "3273170", "language": "Python", "matching_score": 2.3807311058044434, "max_stars_count": 7, "path": "python/complete/no117.py" }, { "content": "#!/usr/bin/env python\n\nfrom math import factorial\n\nfrom python.decorators import euler_timer\n\n\ndef main(verbose=False):\n n = 50\n count = 0\n for red in range(2, n + 1, 2):\n black = n - red\n blocks = black + red / 2\n count += factorial(blocks) / (factorial(black) * factorial(red / 2))\n for green in range(3, n + 1, 3):\n black = n - green\n blocks = black + green / 3\n count += factorial(blocks) / (factorial(black) * factorial(green / 3))\n for blue in range(4, n + 1, 4):\n black = n - blue\n blocks = black + blue / 4\n count += factorial(blocks) / (factorial(black) * factorial(blue / 4))\n return count\n\nif __name__ == '__main__':\n print euler_timer(116)(main)(verbose=True)\n", "id": "10185975", "language": "Python", "matching_score": 0.21701794862747192, "max_stars_count": 7, "path": "python/complete/no116.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Evaluates a polynomial in Bernstein form via the `VS method`_.\n\n.. _VS method: https://doi.org/10.1016/0167-8396(86)90018-X\n.. _survey: http://dx.doi.org/10.1016/j.amc.2015.08.086\n\nThis method has been described in the 1D case in a `survey`_\npaper that followed the original formulation.\n\"\"\"\n\nimport fractions\nimport math\n\nimport eft\n\n\ndef binomial(n, k):\n numerator = math.factorial(n)\n denominator = math.factorial(k) * math.factorial(n - k)\n result = fractions.Fraction(numerator, denominator)\n if float(result) != result:\n raise ValueError(n, k)\n return float(result)\n\n\ndef basic(s, coeffs):\n n = len(coeffs) - 1\n r = 1.0 - s\n\n result = coeffs[0]\n\n s_pow = 1.0\n for j in range(1, n + 1):\n s_pow = s * s_pow\n binom_val = binomial(n, j)\n result = r * result + binom_val * s_pow * coeffs[j]\n\n return result\n\n\ndef compensated(s, coeffs):\n n = len(coeffs) - 1\n r, rho = eft.add_eft(1.0, -s)\n\n pk = coeffs[0]\n dpk = 0.0\n\n s_pow = 1.0\n ds = 0.0\n for j in range(1, n + 1):\n # Update ``s^k``, using\n # s^k = s_pow + ds ==> s^{k + 1} = s(s_pow) + s(ds)\n s_pow, pi_s = eft.multiply_eft(s, s_pow)\n ds = s * ds + pi_s\n # Now, update ``pk`` and ``dpk``.\n P1, pi1 = eft.multiply_eft(r, pk)\n local_err = pi1 + rho * pk\n P2, pi2 = eft.multiply_eft(coeffs[j], binomial(n, j))\n local_err += P2 * ds + pi2 * s_pow\n P3, pi3 = eft.multiply_eft(P2, s_pow)\n local_err += pi3\n pk, sigma4 = eft.add_eft(P1, P3)\n local_err += sigma4\n dpk = r * dpk + local_err\n\n return pk + dpk\n", "id": "8184072", "language": "Python", "matching_score": 3.081354856491089, "max_stars_count": 2, "path": "src/vs_method.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Evaluates a polynomial in Bernstein form via the `VS method`_.\n\n.. _VS method: https://doi.org/10.1016/0167-8396(86)90018-X\n.. _survey: http://dx.doi.org/10.1016/j.amc.2015.08.086\n\nThis method has been described in the 1D case in a `survey`_\npaper that followed the original formulation.\n\"\"\"\n\nimport utils\n\n\ndef basic(s, coeffs):\n n = len(coeffs) - 1\n r = 1.0 - s\n if s >= 0.5:\n sigma = r / s\n multiplier = s\n else:\n sigma = s / r\n multiplier = r\n coeffs = coeffs[::-1]\n\n result = coeffs[0]\n for j in range(1, n + 1):\n binom_val = utils.binomial(n, j)\n modified_coeff = binom_val * coeffs[j]\n result = result * sigma + modified_coeff\n\n for _ in range(n):\n result = multiplier * result\n\n return result\n", "id": "6946691", "language": "Python", "matching_score": 0.6107862591743469, "max_stars_count": 1, "path": "src/vs_method.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport fractions\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport de_casteljau\nimport utils\n\n\nF = fractions.Fraction\nGAMMA15 = utils.gamma(15)\nALPHA = 0.5\n\n\ndef bound4(N):\n r\"\"\"Compute an a priori error bound.\n\n Will be :math:`\\gamma_{15} \\widetilde{p}(s) / p(s)` where\n :math:`p(s) = (1 - 4s)^5 = [(1 - s) - 3s]^n` and\n :math:`\\widetilde{p}(s) = [(1 - s) + 3s]^n = (1 + 2s)^5`\n\n Evaluations at the point :math:`s = a_N = 1/4 + 6/(16N)`.\n \"\"\"\n aN = F(1, 4) + F(6, 16 * N)\n p_exact = (1 - 4 * aN) ** 5\n ptilde_exact = (1 + 2 * aN) ** 5\n bound = GAMMA15 * ptilde_exact / p_exact\n return float(abs(bound))\n\n\ndef error4(N):\n aN = F(1, 4) + F(6, 16 * N)\n coeffs = (1, -3, 9, -27, 81, -243)\n p_exact = de_casteljau.basic(aN, coeffs)\n p_computed = de_casteljau.basic(float(aN), coeffs)\n err_exact = abs((F(p_computed) - p_exact) / p_exact)\n return float(err_exact)\n\n\ndef bound5(N):\n r\"\"\"Compute an a priori error bound.\n\n Will be :math:`\\gamma_{15} \\widetilde{p}(s) / p(s)` where\n :math:`p(s) = (1 - 5s)^5 = [(1 - s) - 4s]^n` and\n :math:`\\widetilde{p}(s) = [(1 - s) + 4s]^n = (1 + 3s)^5`\n\n Evaluations at the point :math:`s = b_N = 1/5 + 8/(25N)`.\n \"\"\"\n bN = F(1, 5) + F(8, 25 * N)\n p_exact = (1 - 5 * bN) ** 5\n ptilde_exact = (1 + 3 * bN) ** 5\n bound = GAMMA15 * ptilde_exact / p_exact\n return float(abs(bound))\n\n\ndef error5(N):\n bN = F(1, 5) + F(8, 25 * N)\n coeffs = (1, -4, 16, -64, 256, -1024)\n p_exact = de_casteljau.basic(bN, coeffs)\n p_computed = de_casteljau.basic(float(bN), coeffs)\n err_exact = abs((F(p_computed) - p_exact) / p_exact)\n return float(err_exact)\n\n\ndef bound6(N):\n r\"\"\"Compute an a priori error bound.\n\n Will be :math:`\\gamma_{15} \\widetilde{p}(s) / p(s)` where\n :math:`p(s) = (1 - 6s)^5 = [(1 - s) - 5s]^n` and\n :math:`\\widetilde{p}(s) = [(1 - s) + 5s]^n = (1 + 4s)^5`\n\n Evaluations at the point :math:`s = c_N = 1/6 + 10/(36N)`.\n \"\"\"\n cN = F(1, 6) + F(10, 36 * N)\n p_exact = (1 - 6 * cN) ** 5\n ptilde_exact = (1 + 4 * cN) ** 5\n bound = GAMMA15 * ptilde_exact / p_exact\n return float(abs(bound))\n\n\ndef error6(N):\n cN = F(1, 6) + F(10, 36 * N)\n coeffs = (1, -5, 25, -125, 625, -3125)\n p_exact = de_casteljau.basic(cN, coeffs)\n p_computed = de_casteljau.basic(float(cN), coeffs)\n err_exact = abs((F(p_computed) - p_exact) / p_exact)\n return float(err_exact)\n\n\ndef main(filename=None):\n figure = plt.figure()\n ax = figure.gca()\n\n bounds = []\n for exponent in range(1, 45 + 1):\n N = 2.1 ** exponent\n fN = F(N)\n bounds.append(\n (\n N,\n bound4(fN),\n bound5(fN),\n bound6(fN),\n error4(fN),\n error5(fN),\n error6(fN),\n )\n )\n\n bounds = np.array(bounds)\n ax.loglog(\n bounds[:, 0], bounds[:, 1], alpha=ALPHA, color=\"black\", label=\"Bound\"\n )\n # NOTE: We intentionally omit bound5() and bound6() since they are\n # essentially identical.\n ax.loglog(\n bounds[:, 0],\n bounds[:, 4],\n marker=\"o\",\n linestyle=\"none\",\n markersize=7,\n markeredgewidth=1,\n markerfacecolor=\"none\",\n label=\"$u(s)$\",\n )\n ax.loglog(\n bounds[:, 0],\n bounds[:, 5],\n marker=\"d\",\n linestyle=\"none\",\n label=\"$v(s)$\",\n )\n ax.loglog(\n bounds[:, 0],\n bounds[:, 6],\n marker=\"o\",\n linestyle=\"none\",\n markersize=4,\n color=\"black\",\n zorder=2,\n label=\"$w(s)$\",\n )\n # Label the axes.\n ax.set_xlabel(\"$N$\")\n ax.set_ylabel(\"Relative Forward Error\")\n # Add the legend.\n ax.legend(framealpha=1.0, frameon=True)\n # Set the major x- and y-ticks.\n ax.set_xticks([1e0, 1e3, 1e6, 1e9, 1e12])\n ax.set_yticks([1e-15, 1e0, 1e15, 1e30, 1e45, 1e60])\n\n if filename is None:\n plt.show()\n else:\n path = utils.get_path(filename)\n figure.savefig(path, bbox_inches=\"tight\")\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\nif __name__ == \"__main__\":\n utils.set_styles()\n main(filename=\"against_a_priori.pdf\")\n", "id": "3727368", "language": "Python", "matching_score": 4.8612236976623535, "max_stars_count": 1, "path": "scripts/curious_intro.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport fractions\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport de_casteljau\nimport utils\n\n\nF = fractions.Fraction\nGAMMA3 = utils.gamma(3)\nALPHA = 0.5\n\n\ndef custom_de_casteljau(s, n):\n \"\"\"Evaluate :math:`(1 - 5s)^n` in a \"special\" way.\n\n Does so via ``p{n} = 1``, ``p{k} = (1 - s) p{k+1} - 4s p{k + 1}``.\n \"\"\"\n p = 1\n r = 1 - s\n scaled = 4 * s\n for _ in range(n):\n p = r * p - scaled * p\n return p\n\n\ndef abs_phi(s):\n s = F(s)\n return abs((1 + 3 * s) / (1 - 5 * s))\n\n\ndef bounds_curbed(s, n):\n phi = abs_phi(s)\n bound1 = utils.gamma(3 * n) * phi ** n\n bound2 = (1 + phi * GAMMA3) ** n - 1\n if bound1 <= 0:\n raise ValueError(bound1, float(bound1))\n if bound2 <= 0:\n raise ValueError(bound2, float(bound2))\n\n computed_p = custom_de_casteljau(s, n)\n exact_p = custom_de_casteljau(F(s), n)\n if not isinstance(exact_p, F):\n raise TypeError(exact_p)\n observed_err = abs((computed_p - exact_p) / exact_p)\n\n return float(bound1), float(bound2), float(observed_err)\n\n\ndef main(filename=None):\n bound_vals = []\n\n for exponent in range(1, 45 + 1):\n N = 2.1 ** exponent\n bN = F(1, 5) + F(8, 25 * F(N))\n bound1, bound2, observed_err = bounds_curbed(float(bN), 5)\n bound_vals.append((N, bound1, bound2, observed_err))\n\n bound_vals = np.array(bound_vals)\n\n figure = plt.figure()\n ax = figure.gca()\n # Add the \"curbed\" plot.\n ax.loglog(\n bound_vals[:, 0],\n bound_vals[:, 1],\n color=\"black\",\n alpha=ALPHA,\n linestyle=\":\",\n label=r\"Na\\\"ive Bound\",\n )\n ax.loglog(\n bound_vals[:, 0],\n bound_vals[:, 2],\n color=\"black\",\n alpha=ALPHA,\n label=\"Improved Bound\",\n )\n ax.loglog(\n bound_vals[:, 0],\n bound_vals[:, 3],\n marker=\"o\",\n linestyle=\"none\",\n markersize=5,\n label=\"Observed Error\",\n )\n ax.legend(framealpha=1.0, frameon=True)\n # Label the axes.\n ax.set_xlabel(\"$N$\")\n ax.set_ylabel(\"Relative Forward Error\")\n # Set the major x- and y-ticks.\n ax.set_xticks([1e0, 1e3, 1e6, 1e9, 1e12])\n ax.set_yticks([1e-15, 1e0, 1e15, 1e30, 1e45, 1e60])\n\n if filename is None:\n plt.show()\n else:\n path = utils.get_path(filename)\n figure.savefig(path, bbox_inches=\"tight\")\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\nif __name__ == \"__main__\":\n utils.set_styles()\n main(filename=\"curbed_condition.pdf\")\n", "id": "4706312", "language": "Python", "matching_score": 1.399492621421814, "max_stars_count": 1, "path": "scripts/curbed_errors.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport fractions\n\nimport matplotlib.pyplot as plt\n\nimport de_casteljau\nimport vs_method\nimport utils\n\n\nF = fractions.Fraction\n# f(s) = (s - 1/20)(s - 2/20) ... (s - 19/20)(s - 20/20)\nWILKINSON1 = (\n float.fromhex(\"0x1.8e9b4e661311ep-26\"),\n float.fromhex(\"-0x1.02de7c6051641p-24\"),\n float.fromhex(\"0x1.1e770f8cd0529p-23\"),\n float.fromhex(\"-0x1.1423646be98bbp-22\"),\n float.fromhex(\"0x1.d65c5a1952526p-22\"),\n float.fromhex(\"-0x1.654ea80cca589p-21\"),\n float.fromhex(\"0x1.e74f07afab5fdp-21\"),\n float.fromhex(\"-0x1.2b909657bbd25p-20\"),\n float.fromhex(\"0x1.4cd9eaa240078p-20\"),\n float.fromhex(\"-0x1.4e9588fcf799ep-20\"),\n float.fromhex(\"0x1.302ad9a026e8fp-20\"),\n float.fromhex(\"-0x1.f346dff3600b4p-21\"),\n float.fromhex(\"0x1.70b1f41d35ef2p-21\"),\n float.fromhex(\"-0x1.e74f07afab5fdp-22\"),\n float.fromhex(\"0x1.1dd88670a1e07p-22\"),\n float.fromhex(\"-0x1.25f9b84fd3738p-23\"),\n float.fromhex(\"0x1.03e5133863565p-24\"),\n float.fromhex(\"-0x1.7df414bbc06e1p-26\"),\n float.fromhex(\"0x1.b3fd7328f4de7p-28\"),\n float.fromhex(\"-0x1.3ee2a51e75a7fp-30\"),\n float.fromhex(\"0x0.0p+0\"),\n)\n# g(s) = (s - 2/2)(s - 2/4) ... (s - 2/2^{19})(s - 2/2^{20})\nWILKINSON2 = (\n float.fromhex(\"0x1.0000000000000p-190\"),\n float.fromhex(\"-0x1.9997800000000p-175\"),\n float.fromhex(\"0x1.cbe01cc2d7943p-160\"),\n float.fromhex(\"-0x1.5e5b5d5f563cfp-145\"),\n float.fromhex(\"0x1.5faf54aece13bp-131\"),\n float.fromhex(\"-0x1.c5acfc21c2483p-118\"),\n float.fromhex(\"0x1.70884aaeef9a9p-105\"),\n float.fromhex(\"-0x1.731dccc8a7da7p-93\"),\n float.fromhex(\"0x1.c9d249cd6378bp-82\"),\n float.fromhex(\"-0x1.57076b1c416fcp-71\"),\n float.fromhex(\"0x1.3678531b90eb4p-61\"),\n float.fromhex(\"-0x1.525677d4ef30bp-52\"),\n float.fromhex(\"0x1.bb4180365b8a2p-44\"),\n float.fromhex(\"-0x1.5cd503454aebdp-36\"),\n float.fromhex(\"0x1.49772c71e764ap-29\"),\n float.fromhex(\"-0x1.741a90baf536fp-23\"),\n float.fromhex(\"0x1.f16d53866846bp-18\"),\n float.fromhex(\"-0x1.7f99def2a0b19p-13\"),\n float.fromhex(\"0x1.401687e02e12fp-9\"),\n float.fromhex(\"-0x1.d926bcbd9b881p-7\"),\n float.fromhex(\"0x0.0p+0\"),\n)\n# h(s) = (s - 1/2)^{20} = [-1/2(1 - s) + 1/2s]^{20}\nMULTIPLE_ROOT = (\n 0.5 ** 20,\n -0.5 ** 20,\n 0.5 ** 20,\n -0.5 ** 20,\n 0.5 ** 20,\n -0.5 ** 20,\n 0.5 ** 20,\n -0.5 ** 20,\n 0.5 ** 20,\n -0.5 ** 20,\n 0.5 ** 20,\n -0.5 ** 20,\n 0.5 ** 20,\n -0.5 ** 20,\n 0.5 ** 20,\n -0.5 ** 20,\n 0.5 ** 20,\n -0.5 ** 20,\n 0.5 ** 20,\n -0.5 ** 20,\n 0.5 ** 20,\n)\n\n\ndef do_plot(ax, s_vals, coeffs, title, add_legend=False, add_ylabel=False):\n bounds = []\n rel_errors1 = []\n rel_errors2 = []\n n = len(coeffs) - 1\n gamma3n = utils.gamma(3 * n)\n exact_coeffs = tuple(map(F, coeffs))\n abs_coeffs = tuple(map(abs, exact_coeffs))\n\n for s in s_vals:\n exact_s = F(s)\n exact_p = de_casteljau.basic(exact_s, exact_coeffs)\n if not isinstance(exact_p, F):\n raise TypeError(exact_p)\n\n exact_p_tilde = de_casteljau.basic(exact_s, abs_coeffs)\n a_priori_bound = gamma3n * exact_p_tilde / abs(exact_p)\n bounds.append(float(a_priori_bound))\n\n error1 = F(de_casteljau.basic(s, coeffs)) - exact_p\n rel_errors1.append(float(abs(error1 / exact_p)))\n\n error2 = F(vs_method.basic(s, coeffs)) - exact_p\n rel_errors2.append(float(abs(error2 / exact_p)))\n\n size = 5\n ax.semilogy(\n s_vals,\n bounds,\n marker=\"o\",\n markersize=size,\n linestyle=\":\",\n alpha=0.5,\n color=\"black\",\n label=\"Bound\",\n )\n ax.semilogy(\n s_vals,\n rel_errors1,\n marker=\"o\",\n markersize=size,\n label=r\"$\\mathtt{DeCasteljau}$\",\n )\n ax.semilogy(\n s_vals,\n rel_errors2,\n marker=\"s\",\n markersize=size,\n label=r\"$\\mathtt{VS}$\",\n )\n if add_legend:\n ax.legend(\n loc=\"lower center\",\n framealpha=1.0,\n frameon=True,\n markerscale=1.25,\n fontsize=16,\n )\n # Label the axes.\n ax.set_xlabel(\"$s$\", fontsize=20)\n if add_ylabel:\n ax.set_ylabel(\"Relative Forward Error\", fontsize=20)\n # Set the axis title.\n ax.set_title(title, fontsize=20)\n\n\ndef main(filename=None):\n figure, (ax1, ax2, ax3) = plt.subplots(1, 3)\n\n s_vals1 = [(2 * i + 1) / 72.0 for i in range(35 + 1)]\n do_plot(\n ax1, s_vals1, WILKINSON1, \"$f(s)$\", add_legend=True, add_ylabel=True\n )\n s_vals2 = [i / 39.0 for i in range(1, 38 + 1)]\n do_plot(ax2, s_vals2, WILKINSON2, \"$g(s)$\")\n s_vals3 = [4 * i / 100.0 for i in range(1, 24 + 1)]\n do_plot(ax3, s_vals3, MULTIPLE_ROOT, \"$h(s)$\")\n\n if filename is None:\n plt.show()\n else:\n figure.set_size_inches(12.99, 5.33)\n figure.subplots_adjust(\n left=0.06,\n bottom=0.09,\n right=0.98,\n top=0.96,\n wspace=0.15,\n hspace=0.20,\n )\n path = utils.get_path(filename)\n figure.savefig(path, bbox_inches=\"tight\")\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\nif __name__ == \"__main__\":\n utils.set_styles()\n main(filename=\"compare_dp15.pdf\")\n", "id": "10758684", "language": "Python", "matching_score": 3.2057528495788574, "max_stars_count": 1, "path": "scripts/dp15.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Make a scatter plot of complex roots.\n\nThis will display the roots of :math:`p(s) = (1 - 5s)^n + 2^{d} (1 - 3s)^n`\n:math:`d = 30` and :math:`n` odd.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport mpmath\n\nimport plot_utils\n\n\ndef add_plot(ax, ctx, n, d):\n # (1 - 5s) = 2^d (3s - 1) = (1 + w) (3s - 1)\n w_vals = [ctx.root(2.0 ** d, n, k=k) - 1 for k in range(n)]\n x_vals = []\n y_vals = []\n for w in w_vals:\n root = (2 + w) / (8 + 3 * w)\n x_vals.append(plot_utils.to_float(root.real))\n y_vals.append(plot_utils.to_float(root.imag))\n\n ax.plot(x_vals, y_vals, marker=\"o\", markersize=2.0, linestyle=\"none\")\n ax.set_title(\"$n = {}$\".format(n), fontsize=plot_utils.TEXT_SIZE)\n\n\ndef main():\n figure, (ax1, ax2, ax3) = plt.subplots(1, 3)\n\n ctx = mpmath.MPContext()\n ctx.prec = 500\n add_plot(ax1, ctx, 5, 30)\n add_plot(ax2, ctx, 15, 30)\n add_plot(ax3, ctx, 25, 30)\n\n for ax in (ax1, ax2, ax3):\n ax.tick_params(labelsize=plot_utils.TICK_SIZE)\n\n filename = \"root_plots.pdf\"\n figure.set_size_inches(6.4, 2.4)\n figure.subplots_adjust(\n left=0.08, bottom=0.11, right=0.99, top=0.92, wspace=0.29, hspace=0.2\n )\n path = plot_utils.get_path(\"compensated-newton\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "973278", "language": "Python", "matching_score": 3.915269613265991, "max_stars_count": 2, "path": "scripts/compensated-newton/root_plots.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport plot_utils\n\n\ndef main():\n # u_t + u_x = 0\n # u(x, t) = u(x - t, 0)\n min_x = -1.5\n max_x = 3.5\n x_vals = np.linspace(min_x, max_x, 2048 + 1)\n # u(x, 0) = x^3\n u0 = x_vals * x_vals * x_vals\n u2 = (x_vals - 2.0) * (x_vals - 2.0) * (x_vals - 2.0)\n\n figure = plt.figure()\n ax = figure.gca()\n\n ax.plot(x_vals, u0, label=\"$u(x, 0)$\", color=plot_utils.BLUE)\n ax.plot(x_vals, u2, label=\"$u(x, 2)$\", color=plot_utils.GREEN)\n ax.annotate(\n \"\",\n xy=(2.875, 0.75),\n xytext=(1.0, 0.75),\n arrowprops={\"arrowstyle\": \"->\", \"linewidth\": 2.0, \"color\": \"black\"},\n )\n\n ax.legend(loc=\"upper left\", fontsize=plot_utils.TEXT_SIZE)\n ax.axis(\"scaled\")\n ax.set_xlim(min_x, max_x)\n ax.set_ylim(-1.1, 1.1)\n ax.set_xlabel(\"$x$\", fontsize=plot_utils.TEXT_SIZE)\n ax.set_ylabel(\"$u$\", rotation=0, fontsize=plot_utils.TEXT_SIZE)\n ax.xaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n ax.yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n\n figure.set_size_inches(4.8, 2.4)\n figure.subplots_adjust(\n left=0.12, bottom=0.1, right=0.99, top=1.04, wspace=0.2, hspace=0.2\n )\n filename = \"simple_transport.pdf\"\n path = plot_utils.get_path(\"solution-transfer\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "9215313", "language": "Python", "matching_score": 2.6539382934570312, "max_stars_count": 2, "path": "scripts/solution-transfer/simple_transport.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport fractions\n\nimport bezier\nimport matplotlib.patches\nimport matplotlib.path\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport shapely.geometry\n\nimport plot_utils\n\n\nF = fractions.Fraction\n\n\ndef shoelace_for_area(nodes):\n _, num_nodes = nodes.shape\n if num_nodes == 3:\n shoelace = ((2, 0, 1), (1, 0, 2), (2, 1, 2))\n scale_factor = 6.0\n else:\n raise NotImplementedError\n\n result = 0.0\n for multiplier, index1, index2 in shoelace:\n result += multiplier * (\n nodes[0, index1] * nodes[1, index2]\n - nodes[1, index1] * nodes[0, index2]\n )\n\n return result / scale_factor\n\n\ndef compute_area(*edges):\n area = 0.0\n for edge in edges:\n area += shoelace_for_area(edge.nodes)\n return area\n\n\ndef edges_to_polygon(edge1, edge2, edge3, s_vals):\n # NOTE: This assumes ``s_vals[0] == 0.0`` and ``s_vals[-1] < 1.0``.\n N = len(s_vals)\n polygon_nodes = np.empty((2, 3 * N), order=\"F\")\n polygon_nodes[:, :N] = edge1.evaluate_multi(s_vals)\n polygon_nodes[:, N : 2 * N] = edge2.evaluate_multi(s_vals)\n polygon_nodes[:, 2 * N :] = edge3.evaluate_multi(s_vals)\n return polygon_nodes\n\n\ndef bezier_triangle_area():\n nodes = np.asfortranarray(\n [\n [0.0, 0.5, 1.0, 0.125, 0.375, 0.25],\n [0.0, 0.0, 0.25, 0.5, 0.375, 1.0],\n ]\n )\n surface = bezier.Surface.from_nodes(nodes)\n exact_area = F(37, 96)\n edge1, edge2, edge3 = surface.edges\n\n figure, all_axes = plt.subplots(2, 3)\n all_axes = all_axes.flatten()\n surface.plot(256, ax=all_axes[0])\n all_axes[0].set_title(\"Curved\", fontsize=plot_utils.TEXT_SIZE)\n\n error_vals = []\n for n in range(1, 20 + 1):\n N = 2 ** n\n s_vals = np.linspace(0.0, 1.0, N + 1)[:-1]\n polygon_nodes = edges_to_polygon(edge1, edge2, edge3, s_vals)\n polygon = shapely.geometry.Polygon(polygon_nodes.T)\n # Compute the relative error.\n poly_area = F(polygon.area)\n rel_error = abs(poly_area - exact_area) / exact_area\n error_vals.append((N, float(rel_error)))\n\n if n in (1, 2, 3, 4):\n ax = all_axes[n]\n # Wrap-around the first node so the polygon is closed.\n polygon_nodes = np.hstack([polygon_nodes, polygon_nodes[:, :1]])\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(polygon_nodes.T), alpha=0.625\n )\n ax.add_patch(patch)\n ax.plot(\n polygon_nodes[0, :],\n polygon_nodes[1, :],\n marker=\"o\",\n markersize=4,\n )\n ax.set_title(\"$N = {:d}$\".format(N), fontsize=plot_utils.TEXT_SIZE)\n\n for ax in all_axes[:5]:\n ax.axis(\"equal\")\n ax.set_xlim(-0.05, 1.05)\n ax.set_ylim(-0.05, 1.05)\n for ax in all_axes[:3]:\n ax.set_xticklabels([])\n for ax in all_axes[(1, 2, 4),]:\n ax.set_yticklabels([])\n\n error_vals = np.array(error_vals)\n ax = all_axes[5]\n ax.loglog(\n error_vals[:, 0],\n error_vals[:, 1],\n basex=2,\n marker=\"o\",\n markersize=4,\n label=\"Polygonal\",\n )\n surface_area = F(compute_area(edge1, edge2, edge3))\n curved_rel_error = float(abs(exact_area - surface_area) / exact_area)\n ax.loglog(\n [error_vals[0, 0], error_vals[-1, 0]],\n [curved_rel_error, curved_rel_error],\n basex=2,\n color=\"black\",\n linestyle=\"dashed\",\n label=\"Curved\",\n )\n ax.legend(loc=\"lower left\", fontsize=plot_utils.TEXT_SIZE)\n ax.set_title(\"Area Estimates\", fontsize=plot_utils.TEXT_SIZE)\n ax.set_xlabel(\n \"Line Segments per Side ($N$)\", fontsize=plot_utils.TEXT_SIZE\n )\n ax.set_ylabel(\"Relative Error\", fontsize=plot_utils.TEXT_SIZE)\n ax.yaxis.tick_right()\n ax.yaxis.set_label_position(\"right\")\n\n all_axes[0].yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[3].yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[5].yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[3].xaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[4].xaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[5].xaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n\n all_axes[3].set_xticks([0.1, 0.5, 0.9])\n all_axes[4].set_xticks([0.1, 0.5, 0.9])\n all_axes[5].set_xticks(\n [2.0 ** 4, 2.0 ** 8, 2.0 ** 12, 2.0 ** 16, 2.0 ** 20]\n )\n\n figure.set_size_inches(6.0, 4.2)\n figure.subplots_adjust(\n left=0.06, bottom=0.11, right=0.9, top=0.95, wspace=0.03, hspace=0.15\n )\n filename = \"polygon_vs_curved.pdf\"\n path = plot_utils.get_path(\"solution-transfer\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef contains_column(vec, mat):\n d, = vec.shape\n rows, cols = mat.shape\n if rows != d:\n raise ValueError(mat.shape, mat, vec)\n broadcast_eq = vec.reshape((d, 1)) == mat\n eq_by_col = np.all(broadcast_eq, axis=0)\n return np.any(eq_by_col)\n\n\ndef intersection_area():\n # NOTE: These are surfaces 30Q and 31Q from the ``bezier`` project.\n nodes1 = np.asfortranarray(\n [\n [-0.25, 0.1875, 0.625, -0.25, 0.1875, -0.25],\n [-0.25, -0.25, -0.25, 0.1875, 0.1875, 0.625],\n ]\n )\n surface1 = bezier.Surface.from_nodes(nodes1)\n edge11, edge12, edge13 = surface1.edges\n nodes2 = np.asfortranarray(\n [\n [0.0, 0.5, 1.0, 0.0625, 0.4375, -0.125],\n [-1.0, -0.5, 0.0, -0.5, 0.0, 0.0],\n ]\n )\n surface2 = bezier.Surface.from_nodes(nodes2)\n edge21, edge22, edge23 = surface2.edges\n\n exact_area = F(7, 48)\n\n figure, all_axes = plt.subplots(2, 3)\n all_axes = all_axes.flatten()\n\n ax = all_axes[0]\n intersection, = surface1.intersect(surface2)\n surface1.plot(256, ax=ax, color=plot_utils.BLUE)\n surface2.plot(256, ax=ax, color=plot_utils.GREEN)\n intersection.plot(256, ax=ax, color=plot_utils.RED)\n ax.set_title(\"Curved\", fontsize=plot_utils.TEXT_SIZE)\n\n error_vals = []\n for n in range(1, 20 + 1):\n N = 2 ** n\n s_vals = np.linspace(0.0, 1.0, N + 1)[:-1]\n polygon_nodes1 = edges_to_polygon(edge11, edge12, edge13, s_vals)\n polygon1 = shapely.geometry.Polygon(polygon_nodes1.T)\n polygon_nodes2 = edges_to_polygon(edge21, edge22, edge23, s_vals)\n polygon2 = shapely.geometry.Polygon(polygon_nodes2.T)\n\n poly_intersect = polygon1.intersection(polygon2)\n if not isinstance(poly_intersect, shapely.geometry.Polygon):\n raise TypeError(poly_intersect)\n # Compute the relative error.\n approximate_area = F(poly_intersect.area)\n rel_error = abs(approximate_area - exact_area) / exact_area\n error_vals.append((N, float(rel_error)))\n # Plot the \"approximate\" surfaces as well as their\n # polygonal intersection.\n if n in (1, 2, 3, 4):\n ax = all_axes[n]\n for polygon_nodes, color in (\n (polygon_nodes1, plot_utils.BLUE),\n (polygon_nodes2, plot_utils.GREEN),\n ):\n # Wrap-around the first node so the polygon is closed.\n polygon_nodes = np.hstack(\n [polygon_nodes, polygon_nodes[:, :1]]\n )\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(polygon_nodes.T),\n alpha=0.625,\n color=color,\n )\n ax.add_patch(patch)\n ax.plot(\n polygon_nodes[0, :],\n polygon_nodes[1, :],\n color=color,\n marker=\"o\",\n markersize=4,\n )\n boundary_x, boundary_y = poly_intersect.exterior.coords.xy\n boundary = np.empty((len(boundary_x), 2))\n boundary[:, 0] = boundary_x\n boundary[:, 1] = boundary_y\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(boundary),\n alpha=0.625,\n color=plot_utils.RED,\n )\n new_nodes = []\n for i, node in enumerate(boundary):\n if not (\n contains_column(node, polygon_nodes1)\n or contains_column(node, polygon_nodes2)\n ):\n new_nodes.append(i)\n\n if new_nodes:\n ax.plot(\n boundary[new_nodes, 0],\n boundary[new_nodes, 1],\n color=plot_utils.RED,\n marker=\"o\",\n markersize=4,\n linestyle=\"none\",\n )\n ax.plot(\n boundary[:-1, 0],\n boundary[:-1, 1],\n color=plot_utils.RED,\n marker=\"o\",\n markersize=4,\n markeredgewidth=1,\n markerfacecolor=\"none\",\n linestyle=\"none\",\n )\n ax.add_patch(patch)\n ax.set_title(\"$N = {:d}$\".format(N), fontsize=plot_utils.TEXT_SIZE)\n\n for ax in all_axes[:5]:\n ax.axis(\"equal\")\n ax.set_xlim(-0.3125, 1.0625)\n ax.set_ylim(-1.08125, 0.70625)\n for ax in all_axes[:3]:\n ax.set_xticklabels([])\n for ax in all_axes[(1, 2, 4),]:\n ax.set_yticklabels([])\n\n error_vals = np.array(error_vals)\n ax = all_axes[5]\n ax.loglog(\n error_vals[:, 0],\n error_vals[:, 1],\n basex=2,\n marker=\"o\",\n markersize=4,\n label=\"Polygonal\",\n )\n curved_poly_area = F(compute_area(*intersection._edges))\n curved_rel_error = float(abs(exact_area - curved_poly_area) / exact_area)\n ax.loglog(\n [error_vals[0, 0], error_vals[-1, 0]],\n [curved_rel_error, curved_rel_error],\n basex=2,\n color=\"black\",\n linestyle=\"dashed\",\n label=\"Curved\",\n )\n ax.legend(loc=\"lower left\", fontsize=plot_utils.TEXT_SIZE)\n ax.set_title(\"Intersection Area Estimates\", fontsize=plot_utils.TEXT_SIZE)\n ax.set_xlabel(\n \"Line Segments per Side ($N$)\", fontsize=plot_utils.TEXT_SIZE\n )\n ax.set_ylabel(\"Relative Error\", fontsize=plot_utils.TEXT_SIZE)\n ax.yaxis.tick_right()\n ax.yaxis.set_label_position(\"right\")\n\n all_axes[0].yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[3].yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[5].yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[3].xaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[4].xaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[5].xaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n\n all_axes[3].set_xticks([-0.4, 0.1, 0.6, 1.1])\n all_axes[4].set_xticks([-0.4, 0.1, 0.6, 1.1])\n all_axes[5].set_xticks(\n [2.0 ** 4, 2.0 ** 8, 2.0 ** 12, 2.0 ** 16, 2.0 ** 20]\n )\n\n figure.set_size_inches(6.0, 4.2)\n figure.subplots_adjust(\n left=0.08, bottom=0.11, right=0.9, top=0.95, wspace=0.03, hspace=0.15\n )\n filename = \"polygon_vs_curved_intersection.pdf\"\n path = plot_utils.get_path(\"solution-transfer\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main():\n bezier_triangle_area()\n intersection_area()\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "8460608", "language": "Python", "matching_score": 6.756405353546143, "max_stars_count": 2, "path": "scripts/solution-transfer/polygon_vs_curved.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This is mostly copied from elsewhere.\n\nIn particular: ``scripts/solution-transfer/polygon_vs_curved.py``.\n\"\"\"\n\nimport fractions\n\nimport bezier\nimport matplotlib.patches\nimport matplotlib.path\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport shapely.geometry\n\nimport plot_utils\n\n\nF = fractions.Fraction\n\n\ndef shoelace_for_area(nodes):\n _, num_nodes = nodes.shape\n if num_nodes == 3:\n shoelace = ((2, 0, 1), (1, 0, 2), (2, 1, 2))\n scale_factor = 6.0\n else:\n raise NotImplementedError\n\n result = 0.0\n for multiplier, index1, index2 in shoelace:\n result += multiplier * (\n nodes[0, index1] * nodes[1, index2]\n - nodes[1, index1] * nodes[0, index2]\n )\n\n return result / scale_factor\n\n\ndef compute_area(*edges):\n area = 0.0\n for edge in edges:\n area += shoelace_for_area(edge.nodes)\n return area\n\n\ndef edges_to_polygon(edge1, edge2, edge3, s_vals):\n # NOTE: This assumes ``s_vals[0] == 0.0`` and ``s_vals[-1] < 1.0``.\n N = len(s_vals)\n polygon_nodes = np.empty((2, 3 * N), order=\"F\")\n polygon_nodes[:, :N] = edge1.evaluate_multi(s_vals)\n polygon_nodes[:, N : 2 * N] = edge2.evaluate_multi(s_vals)\n polygon_nodes[:, 2 * N :] = edge3.evaluate_multi(s_vals)\n return polygon_nodes\n\n\ndef bezier_triangle_area(slide_num):\n nodes = np.asfortranarray(\n [\n [0.0, 0.5, 1.0, 0.125, 0.375, 0.25],\n [0.0, 0.0, 0.25, 0.5, 0.375, 1.0],\n ]\n )\n surface = bezier.Surface.from_nodes(nodes)\n exact_area = F(37, 96)\n edge1, edge2, edge3 = surface.edges\n\n figure, all_axes = plt.subplots(2, 3)\n all_axes = all_axes.flatten()\n surface.plot(256, ax=all_axes[0])\n all_axes[0].set_title(\"Curved\", fontsize=plot_utils.TEXT_SIZE)\n\n error_vals = []\n for n in range(1, 20 + 1):\n N = 2 ** n\n s_vals = np.linspace(0.0, 1.0, N + 1)[:-1]\n polygon_nodes = edges_to_polygon(edge1, edge2, edge3, s_vals)\n polygon = shapely.geometry.Polygon(polygon_nodes.T)\n # Compute the relative error.\n poly_area = F(polygon.area)\n rel_error = abs(poly_area - exact_area) / exact_area\n error_vals.append((N, float(rel_error)))\n\n if n in (1, 2, 3, 4):\n ax = all_axes[n]\n # Wrap-around the first node so the polygon is closed.\n polygon_nodes = np.hstack([polygon_nodes, polygon_nodes[:, :1]])\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(polygon_nodes.T), alpha=0.625\n )\n ax.add_patch(patch)\n ax.plot(\n polygon_nodes[0, :],\n polygon_nodes[1, :],\n marker=\"o\",\n markersize=4,\n )\n ax.set_title(\"$N = {:d}$\".format(N), fontsize=plot_utils.TEXT_SIZE)\n\n for ax in all_axes[:5]:\n ax.axis(\"equal\")\n ax.set_xlim(-0.05, 1.05)\n ax.set_ylim(-0.05, 1.05)\n for ax in all_axes[:3]:\n ax.set_xticklabels([])\n for ax in all_axes[(1, 2, 4),]:\n ax.set_yticklabels([])\n\n error_vals = np.array(error_vals)\n ax = all_axes[5]\n line1, = ax.loglog(\n error_vals[:, 0],\n error_vals[:, 1],\n basex=2,\n color=plot_utils.BLUE,\n marker=\"o\",\n markersize=4,\n label=\"Polygonal\",\n )\n if slide_num < 5:\n line1.set_visible(False)\n ax.loglog(\n error_vals[:slide_num, 0],\n error_vals[:slide_num, 1],\n basex=2,\n color=plot_utils.BLUE,\n marker=\"o\",\n markersize=4,\n label=\"Polygonal\",\n )\n\n surface_area = F(compute_area(edge1, edge2, edge3))\n curved_rel_error = float(abs(exact_area - surface_area) / exact_area)\n line2, = ax.loglog(\n [error_vals[0, 0], error_vals[-1, 0]],\n [curved_rel_error, curved_rel_error],\n basex=2,\n color=\"black\",\n linestyle=\"dashed\",\n label=\"Curved\",\n )\n if slide_num != 6:\n line2.set_visible(False)\n\n ax.set_title(\"Area Estimates\", fontsize=plot_utils.TEXT_SIZE)\n ax.set_xlabel(\n \"Line Segments per Side ($N$)\", fontsize=plot_utils.TEXT_SIZE\n )\n ax.set_ylabel(\"Relative Error\", fontsize=plot_utils.TEXT_SIZE)\n ax.yaxis.tick_right()\n ax.yaxis.set_label_position(\"right\")\n\n all_axes[0].yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[3].yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[5].yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[3].xaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[4].xaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[5].xaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n\n all_axes[3].set_xticks([0.1, 0.5, 0.9])\n all_axes[4].set_xticks([0.1, 0.5, 0.9])\n all_axes[5].set_xticks(\n [2.0 ** 4, 2.0 ** 8, 2.0 ** 12, 2.0 ** 16, 2.0 ** 20]\n )\n\n if slide_num < 6:\n all_axes[0].set_visible(False)\n if slide_num < 4:\n all_axes[4].set_visible(False)\n if slide_num < 3:\n all_axes[3].set_visible(False)\n if slide_num < 2:\n all_axes[2].set_visible(False)\n\n figure.set_size_inches(6.0, 4.2)\n figure.subplots_adjust(\n left=0.06, bottom=0.11, right=0.9, top=0.95, wspace=0.03, hspace=0.15\n )\n filename = \"polygon_vs_curved{}.pdf\".format(slide_num)\n path = plot_utils.get_path(\"slides\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main():\n bezier_triangle_area(1)\n bezier_triangle_area(2)\n bezier_triangle_area(3)\n bezier_triangle_area(4)\n bezier_triangle_area(5)\n bezier_triangle_area(6)\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "6600163", "language": "Python", "matching_score": 4.388790607452393, "max_stars_count": 2, "path": "scripts/slides/polygon_vs_curved.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport bezier\nimport matplotlib.patches\nimport matplotlib.path\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport plot_utils\n\n\nALPHA = 0.375\n# b(s, t) = [(1 - s - t)^2 + t^2, s^2 + t^2]\nNODES = np.asfortranarray(\n [[1.0, 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 1.0]]\n)\n\n\ndef boundary_leak(slide_num):\n bez_triangle = bezier.Surface(NODES, degree=2, _copy=False)\n figure, ((ax1, ax2), (ax3, ax4)) = plt.subplots(\n 2, 2, sharex=True, sharey=True\n )\n # Add reference triangle to both top subplots.\n for ax in (ax1, ax2):\n if slide_num == 1:\n ax.plot([0, 1], [0, 0], color=plot_utils.BLUE)\n ax.plot([1, 0], [0, 1], color=\"black\", alpha=ALPHA)\n ax.plot([0, 0], [1, 0], color=\"black\", alpha=ALPHA)\n elif slide_num == 2:\n ax.plot([0, 1], [0, 0], color=\"black\", alpha=ALPHA)\n ax.plot([1, 0], [0, 1], color=plot_utils.BLUE)\n ax.plot([0, 0], [1, 0], color=\"black\", alpha=ALPHA)\n elif slide_num == 3:\n ax.plot([0, 1], [0, 0], color=\"black\", alpha=ALPHA)\n ax.plot([1, 0], [0, 1], color=\"black\", alpha=ALPHA)\n ax.plot([0, 0], [1, 0], color=plot_utils.BLUE)\n else:\n ax.plot([0, 1], [0, 0], color=\"black\", alpha=ALPHA)\n ax.plot([1, 0], [0, 1], color=\"black\", alpha=ALPHA)\n ax.plot([0, 0], [1, 0], color=\"black\", alpha=ALPHA)\n\n # Add the \"wrong\" triangle to both bottom subplots.\n edge1, edge2, edge3 = bez_triangle.edges\n for ax in (ax3, ax4):\n bez_triangle.plot(256, ax=ax, color=\"black\")\n ax.patches[-1].set_alpha(ALPHA)\n # Remove the black boundary and add colored.\n ax.lines.pop()\n if slide_num == 1:\n edge1.plot(256, ax=ax, color=plot_utils.BLUE)\n elif slide_num == 2:\n edge2.plot(256, ax=ax, color=plot_utils.BLUE)\n elif slide_num == 3:\n edge3.plot(256, ax=ax, color=plot_utils.BLUE)\n\n # det(J) = 4[t^2 + (s - 1) t + (s - s^2)]\n sv = np.linspace(0.0, 1.0 / 5.0, 250)\n sqrt_part = np.sqrt((1.0 - sv) * (1.0 - 5.0 * sv))\n jacobian_zero = np.empty((499, 2))\n jacobian_zero[:250, 0] = sv\n jacobian_zero[:250, 1] = 0.5 * (1.0 - sv - sqrt_part)\n jacobian_zero[250:, 0] = sv[-2::-1]\n jacobian_zero[250:, 1] = 0.5 * (1.0 - sv[-2::-1] + sqrt_part[-2::-1])\n ax2.plot(\n jacobian_zero[:, 0],\n jacobian_zero[:, 1],\n color=\"black\",\n linestyle=\"dashed\",\n )\n # Now, compute the image of the det(J) = 0 curve under b(s, t).\n poly4 = np.empty((598, 2))\n sv = jacobian_zero[::-1, 0]\n tv = jacobian_zero[::-1, 1]\n poly4[:499, 0] = (1.0 - sv - tv) * (1.0 - sv - tv) + sv * sv\n poly4[:499, 1] = sv * sv + tv * tv\n ax4.plot(poly4[:499, 0], poly4[:499, 1], color=\"black\", linestyle=\"dashed\")\n # Combine this with the image b(0, t)\n tv = np.linspace(0.0, 1.0, 100)[1:]\n poly4[499:, 0] = (1.0 - tv) * (1.0 - tv)\n poly4[499:, 1] = tv * tv\n if slide_num == 6:\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(poly4), color=\"black\", alpha=1.5 * ALPHA\n )\n ax4.add_patch(patch)\n\n for ax in (ax1, ax2, ax3, ax4):\n ax.set_aspect(\"equal\")\n # One axis sets all axis\n ax1.set_xticklabels([])\n ax1.set_yticklabels([])\n\n if slide_num < 5:\n ax4.set_visible(False)\n if slide_num < 4:\n ax2.set_visible(False)\n\n figure.set_size_inches(3.2, 3.3)\n figure.subplots_adjust(\n left=0.01, bottom=0.01, right=0.99, top=0.99, wspace=0.04, hspace=0.0\n )\n filename = \"boundary_leak{}.pdf\".format(slide_num)\n path = plot_utils.get_path(\"slides\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef inverted_element(num_columns):\n bez_triangle = bezier.Surface(NODES, degree=2, _copy=False)\n figure, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(\n 2, 3, sharex=True, sharey=True\n )\n for ax in (ax1, ax2, ax3):\n ax.plot([0, 1, 0, 0], [0, 0, 1, 0], color=\"black\")\n\n # The \"left\" edge b(0, t) = [(1 - t)^2 + t^2, t^2] lies on the algebraic\n # curve 4x = (1 + x - y)^2. Plugging b(s, t) into this algebraic curve\n # we find interior points as well.\n sv = np.linspace(0.0, 2.0 / 5.0, 250)\n sqrt_part = np.sqrt((2.0 - 5.0 * sv) / (2.0 - sv))\n poly1 = np.empty((501, 2))\n poly1[:250, 0] = sv\n poly1[:250, 1] = 0.5 * (1.0 - sv + sqrt_part)\n poly1[250:499, 0] = sv[-2::-1]\n poly1[250:499, 1] = 0.5 * (1.0 - sv[-2::-1] - sqrt_part[-2::-1])\n poly1[499, :] = 1.0, 0.0\n poly1[500, :] = 0.0, 1.0\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(poly1), color=plot_utils.GREEN\n )\n ax1.add_patch(patch)\n for ax in (ax2, ax3):\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(poly1), color=plot_utils.GREEN, alpha=ALPHA\n )\n ax.add_patch(patch)\n\n for ax in (ax4, ax5, ax6):\n bez_triangle.plot(256, ax=ax, color=plot_utils.GREEN)\n for edge in bez_triangle.edges:\n edge.plot(256, ax=ax, color=\"black\")\n ax4.patches[-1].set_alpha(1.0)\n ax5.patches[-1].set_alpha(ALPHA)\n ax6.patches[-1].set_alpha(ALPHA)\n\n # det(J) = 4[t^2 + (s - 1) t + (s - s^2)]\n sv = np.linspace(0.0, 1.0 / 5.0, 250)\n sqrt_part = np.sqrt((1.0 - sv) * (1.0 - 5.0 * sv))\n poly2 = np.empty((500, 2))\n poly2[:250, 0] = sv\n poly2[:250, 1] = 0.5 * (1.0 - sv - sqrt_part)\n poly2[250:499, 0] = sv[-2::-1]\n poly2[250:499, 1] = 0.5 * (1.0 - sv[-2::-1] + sqrt_part[-2::-1])\n poly2[499, :] = 0.0, 0.0\n for ax in (ax1, ax2, ax3):\n ax.plot(\n poly2[:499, 0], poly2[:499, 1], color=\"black\", linestyle=\"dashed\"\n )\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(poly2), color=plot_utils.RED\n )\n ax3.add_patch(patch)\n for ax in (ax1, ax2):\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(poly2), color=\"black\", alpha=ALPHA\n )\n ax.add_patch(patch)\n\n # Shared slice in between two curves.\n poly3 = np.empty((997, 2))\n poly3[:499, :] = poly2[498::-1, :]\n poly3[499:, :] = poly1[497::-1, :]\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(poly3), color=plot_utils.BLUE\n )\n ax2.add_patch(patch)\n for ax in (ax1, ax3):\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(poly3), color=\"black\", alpha=ALPHA\n )\n ax.add_patch(patch)\n\n # Now, compute the image of the det(J) = 0 curve under b(s, t).\n poly4 = np.empty((598, 2))\n sv = poly2[:499, 0][::-1]\n tv = poly2[:499, 1][::-1]\n poly4[:499, 0] = (1.0 - sv - tv) * (1.0 - sv - tv) + sv * sv\n poly4[:499, 1] = sv * sv + tv * tv\n for ax in (ax4, ax5, ax6):\n ax.plot(\n poly4[:499, 0], poly4[:499, 1], color=\"black\", linestyle=\"dashed\"\n )\n # Combine this with the image b(0, t)\n tv = np.linspace(0.0, 1.0, 100)[1:]\n poly4[499:, 0] = (1.0 - tv) * (1.0 - tv)\n poly4[499:, 1] = tv * tv\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(poly4), color=\"black\", alpha=ALPHA\n )\n ax4.add_patch(patch)\n for ax, color in ((ax5, plot_utils.BLUE), (ax6, plot_utils.RED)):\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(poly4), color=color\n )\n ax.add_patch(patch)\n\n for ax in (ax1, ax2, ax3, ax4, ax5, ax6):\n ax.set_aspect(\"equal\")\n # One axis sets all axis\n ax1.set_xticklabels([])\n ax1.set_yticklabels([])\n\n ax2.text(\n 0.3,\n 0.35,\n r\"$+$\",\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n fontsize=16,\n fontweight=\"bold\",\n color=\"black\",\n )\n ax2.add_patch(\n matplotlib.patches.Circle(\n (0.3, 0.35),\n radius=0.06,\n fill=False,\n edgecolor=\"black\",\n linewidth=1.0,\n )\n )\n ax3.text(\n 0.1,\n 0.42,\n r\"$-$\",\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n fontsize=16,\n fontweight=\"bold\",\n color=\"black\",\n )\n ax3.add_patch(\n matplotlib.patches.Circle(\n (0.1, 0.422),\n radius=0.06,\n fill=False,\n edgecolor=\"black\",\n linewidth=1.0,\n )\n )\n\n if num_columns < 3:\n ax3.set_visible(False)\n ax6.set_visible(False)\n if num_columns < 2:\n ax2.set_visible(False)\n ax5.set_visible(False)\n\n figure.set_size_inches(4.8, 3.3)\n figure.subplots_adjust(\n left=0.01, bottom=0.01, right=0.99, top=0.99, wspace=0.04, hspace=0.0\n )\n filename = \"inverted_element{}.pdf\".format(num_columns)\n path = plot_utils.get_path(\"slides\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main():\n boundary_leak(1)\n boundary_leak(2)\n boundary_leak(3)\n boundary_leak(4)\n boundary_leak(5)\n boundary_leak(6)\n inverted_element(1)\n inverted_element(2)\n inverted_element(3)\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "6267112", "language": "Python", "matching_score": 4.250737190246582, "max_stars_count": 2, "path": "scripts/slides/inverted_element.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport bezier\nimport bezier._geometric_intersection\nimport bezier._helpers\nimport matplotlib.patches\nimport matplotlib.path\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport plot_utils\n\n\ndef simple_axis(ax):\n ax.axis(\"scaled\")\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n\n\ndef image1():\n figure, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex=True, sharey=True)\n nodes = np.asfortranarray([[0.0, 1.0, 2.0, 4.0], [0.0, 4.0, 0.0, 3.0]])\n curve = bezier.Curve(nodes, degree=2)\n left, right = curve.subdivide()\n curve.plot(256, ax=ax1, alpha=0.25, color=\"black\")\n left.plot(256, ax=ax1)\n curve.plot(256, ax=ax2)\n curve.plot(256, ax=ax3, alpha=0.25, color=\"black\")\n right.plot(256, ax=ax3)\n ax1.text(\n 2.5,\n 0.25,\n r\"$\\left[0, \\frac{1}{2}\\right]$\",\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n fontsize=plot_utils.TEXT_SIZE,\n )\n ax2.text(\n 2.5,\n 0.25,\n r\"$\\left[0, 1\\right]$\",\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n fontsize=plot_utils.TEXT_SIZE,\n )\n ax3.text(\n 2.5,\n 0.25,\n r\"$\\left[\\frac{1}{2}, 1\\right]$\",\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n fontsize=plot_utils.TEXT_SIZE,\n )\n\n for ax in (ax1, ax2, ax3):\n simple_axis(ax)\n\n figure.set_size_inches(6.0, 1.5)\n figure.subplots_adjust(\n left=0.01, bottom=0.01, right=0.99, top=0.99, wspace=0.04, hspace=0.2\n )\n filename = \"subdivide_curve.pdf\"\n path = plot_utils.get_path(\"bezier-intersection\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef add_patch(curve, ax, color):\n left, right, bottom, top = bezier._helpers.bbox(curve.nodes)\n polygon = np.array(\n [\n [left, bottom],\n [right, bottom],\n [right, top],\n [left, top],\n [left, bottom],\n ]\n )\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(polygon), color=color, alpha=0.625\n )\n ax.add_patch(patch)\n\n\ndef plot_with_bbox(curve, ax, color):\n curve.plot(256, color=color, ax=ax)\n add_patch(curve, ax, color)\n\n\ndef bbox_intersect(curve1, curve2):\n enum_val = bezier._geometric_intersection.bbox_intersect(\n curve1.nodes, curve2.nodes\n )\n return enum_val != 2\n\n\ndef refine_candidates(left, right):\n new_left = []\n for curve in left:\n new_left.extend(curve.subdivide())\n\n new_right = []\n for curve in right:\n new_right.extend(curve.subdivide())\n\n keep_left = []\n keep_right = []\n for curve1 in new_left:\n for curve2 in new_right:\n if bbox_intersect(curve1, curve2):\n keep_left.append(curve1)\n if curve2 not in keep_right:\n keep_right.append(curve2)\n\n return keep_left, keep_right\n\n\ndef image2():\n nodes15 = np.asfortranarray([[0.25, 0.625, 1.0], [0.625, 0.25, 1.0]])\n curve15 = bezier.Curve(nodes15, degree=2)\n nodes25 = np.asfortranarray([[0.0, 0.25, 0.75, 1.0], [0.5, 1.0, 1.5, 0.5]])\n curve25 = bezier.Curve(nodes25, degree=3)\n\n figure, all_axes = plt.subplots(2, 3, sharex=True, sharey=True)\n ax1, ax2, ax3, ax4, ax5, ax6 = all_axes.flatten()\n\n color1 = plot_utils.BLUE\n color2 = plot_utils.GREEN\n plot_with_bbox(curve15, ax1, color1)\n plot_with_bbox(curve25, ax1, color2)\n\n left, right = refine_candidates([curve15], [curve25])\n for curve in left:\n plot_with_bbox(curve, ax2, color1)\n for curve in right:\n plot_with_bbox(curve, ax2, color2)\n\n for ax in (ax3, ax4, ax5, ax6):\n left, right = refine_candidates(left, right)\n curve15.plot(256, color=color1, alpha=0.5, ax=ax)\n for curve in left:\n plot_with_bbox(curve, ax, color=color1)\n curve25.plot(256, color=color2, alpha=0.5, ax=ax)\n for curve in right:\n plot_with_bbox(curve, ax, color2)\n\n for ax in (ax1, ax2, ax3, ax4, ax5, ax6):\n simple_axis(ax)\n ax.set_xlim(-0.05, 1.05)\n ax.set_ylim(0.4, 1.15)\n\n figure.set_size_inches(6.0, 2.8)\n figure.subplots_adjust(\n left=0.01, bottom=0.01, right=0.99, top=0.99, wspace=0.04, hspace=0.04\n )\n filename = \"subdivision_process.pdf\"\n path = plot_utils.get_path(\"bezier-intersection\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef image3():\n figure, (ax1, ax2, ax3) = plt.subplots(1, 3)\n\n control_pts1a = np.asfortranarray([[0.0, 0.375, 1.0], [0.0, 0.5, 0.125]])\n curve1a = bezier.Curve(control_pts1a, degree=2)\n control_pts1b = np.asfortranarray(\n [[0.25, -0.125, 0.5], [-0.125, 0.375, 1.0]]\n )\n curve1b = bezier.Curve(control_pts1b, degree=2)\n plot_with_bbox(curve1a, ax1, plot_utils.BLUE)\n plot_with_bbox(curve1b, ax1, plot_utils.GREEN)\n\n control_pts2a = np.asfortranarray([[0.0, 0.75, 1.0], [1.0, 0.75, 0.0]])\n curve2a = bezier.Curve(control_pts2a, degree=2)\n control_pts2b = np.asfortranarray(\n [[0.375, 0.625, 1.375], [1.375, 0.625, 0.375]]\n )\n curve2b = bezier.Curve(control_pts2b, degree=2)\n plot_with_bbox(curve2a, ax2, plot_utils.BLUE)\n plot_with_bbox(curve2b, ax2, plot_utils.GREEN)\n\n control_pts3a = np.asfortranarray([[0.0, 0.25, 1.0], [-0.25, 0.25, -0.75]])\n curve3a = bezier.Curve(control_pts3a, degree=2)\n control_pts3b = np.asfortranarray([[1.0, 1.5, 2.0], [-1.0, -1.5, -1.0]])\n curve3b = bezier.Curve(control_pts3b, degree=2)\n plot_with_bbox(curve3a, ax3, plot_utils.BLUE)\n plot_with_bbox(curve3b, ax3, plot_utils.GREEN)\n\n for ax in (ax1, ax2, ax3):\n simple_axis(ax)\n\n ax1.set_xlim(-0.2, 1.1)\n ax1.set_ylim(-0.2, 1.1)\n ax1.set_title(\"MAYBE\", fontsize=plot_utils.TEXT_SIZE)\n ax2.set_xlim(-0.1, 1.5)\n ax2.set_ylim(-0.1, 1.5)\n ax2.set_title(\"MAYBE\", fontsize=plot_utils.TEXT_SIZE)\n ax3.set_xlim(-0.1, 2.1)\n ax3.set_ylim(-1.7, 0.5)\n ax3.set_title(\"NO\", fontsize=plot_utils.TEXT_SIZE)\n\n figure.set_size_inches(6.0, 2.2)\n figure.subplots_adjust(\n left=0.01, bottom=0.01, right=0.99, top=0.9, wspace=0.04, hspace=0.2\n )\n filename = \"bbox_check.pdf\"\n path = plot_utils.get_path(\"bezier-intersection\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef image4():\n figure, all_axes = plt.subplots(2, 7)\n all_axes = all_axes.flatten()\n\n nodes15 = np.asfortranarray([[0.25, 0.625, 1.0], [0.625, 0.25, 1.0]])\n curve15 = bezier.Curve(nodes15, degree=2)\n nodes25 = np.asfortranarray([[0.0, 0.25, 0.75, 1.0], [0.5, 1.0, 1.5, 0.5]])\n curve25 = bezier.Curve(nodes25, degree=3)\n\n color1 = plot_utils.BLUE\n curve15.plot(256, ax=all_axes[0], color=color1)\n color2 = plot_utils.GREEN\n curve25.plot(256, ax=all_axes[0], color=color2)\n\n choices1 = [1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1]\n choices2 = [1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1]\n first = curve15\n second = curve25\n for i in range(13):\n ax = all_axes[i + 1]\n index1 = choices1[i]\n index2 = choices2[i]\n first = first.subdivide()[index1]\n second = second.subdivide()[index2]\n first.plot(256, ax=ax, color=color1)\n second.plot(256, ax=ax, color=color2)\n # After splitting, put the bounding box on the previous axis.\n prev_ax = all_axes[i]\n add_patch(first, prev_ax, color1)\n add_patch(second, prev_ax, color2)\n\n for ax in all_axes:\n ax.axis(\"equal\")\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n\n figure.set_size_inches(6.4, 2.4)\n figure.subplots_adjust(\n left=0.01, bottom=0.01, right=0.99, top=0.99, wspace=0.06, hspace=0.04\n )\n filename = \"subdivision_linearized.pdf\"\n path = plot_utils.get_path(\"bezier-intersection\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main():\n image1()\n image2()\n image3()\n image4()\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "2038828", "language": "Python", "matching_score": 4.502538204193115, "max_stars_count": 2, "path": "scripts/bezier-intersection/subdivision.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport bezier\nimport bezier._helpers\nimport matplotlib.patches\nimport matplotlib.path\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport plot_utils\n\n\ndef add_patch(triangle, ax, color):\n # NOTE: This is mostly copy-pasta from ``subdivision.py``.\n left, right, bottom, top = bezier._helpers.bbox(triangle.nodes)\n polygon = np.array(\n [\n [left, bottom],\n [right, bottom],\n [right, top],\n [left, top],\n [left, bottom],\n ]\n )\n patch = matplotlib.patches.PathPatch(\n matplotlib.path.Path(polygon), facecolor=color, alpha=0.625\n )\n ax.add_patch(patch)\n\n\ndef image1():\n nodes = np.asfortranarray(\n [[0.0, 1.5, 3.0, 0.75, 2.25, 0.0], [0.0, -0.5, 0.0, 1.0, 1.5, 2.0]]\n )\n triangle = bezier.Surface(nodes, degree=2, _copy=False)\n point = triangle.evaluate_cartesian(0.25, 0.125)\n xv, yv = point.flatten()\n sub_triangles = triangle.subdivide()\n edges = triangle.edges\n\n figure, all_axes = plt.subplots(2, 2, sharex=True, sharey=True)\n all_axes = all_axes.flatten()\n for ax, sub_triangle in zip(all_axes, sub_triangles):\n # Add the bounding box for the sub triangle.\n add_patch(sub_triangle, ax, plot_utils.GREEN)\n # Add the triangle boundary to each subplot.\n for edge in edges:\n edge.plot(256, ax=ax, color=plot_utils.BLUE)\n # Add the sub triangle.\n sub_triangle.plot(256, ax=ax, color=plot_utils.BLUE)\n # Add the point to be found.\n ax.plot(\n [xv],\n [yv],\n color=\"black\",\n marker=\"o\",\n markersize=3,\n linestyle=\"none\",\n )\n\n for ax in all_axes:\n ax.set_aspect(\"equal\")\n # One axis sets all axis\n all_axes[0].set_xticklabels([])\n all_axes[0].set_yticklabels([])\n\n figure.set_size_inches(4.0, 3.0)\n figure.subplots_adjust(\n left=0.01, bottom=0.01, right=0.99, top=0.99, wspace=0.03, hspace=0.04\n )\n filename = \"locate_in_triangle.pdf\"\n path = plot_utils.get_path(\"bezier-intersection\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main():\n image1()\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "4456799", "language": "Python", "matching_score": 3.0549392700195312, "max_stars_count": 2, "path": "scripts/bezier-intersection/locate_in_triangle.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This is mostly copied from ``scripts/solution-transfer/distort.py``.\"\"\"\n\nimport bezier\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport plot_utils\n\n\nALPHA = 0.375\n\n\ndef point_on_characteristic(xv, yv, t):\n yt = yv + t\n xt = xv + (yt * yt * yt - yv * yv * yv) / 3\n return xt, yt\n\n\ndef get_title(t):\n if t == int(t):\n return \"$t = {:d}.0$\".format(int(t))\n else:\n return \"$t = {:g}$\".format(t)\n\n\ndef distort_cubic_tri(num_columns):\n node1 = np.array([-0.75, 0.0])\n node2 = np.array([2.25, -1.5])\n node3 = np.array([1.5, 1.5])\n control_points = np.array(\n [\n node1,\n 0.5 * (node1 + node2),\n node2,\n 0.5 * (node1 + node3),\n 0.5 * (node2 + node3),\n node3,\n ]\n )\n\n figure, all_axes = plt.subplots(2, 3, sharex=True, sharey=True)\n min_y = -1.65\n max_y = 2.8\n control_x = control_points[:, 0]\n control_y = control_points[:, 1]\n bezier_nodes = np.empty((2, len(control_x)), order=\"F\")\n\n # First add characteristic curves to the top row of axes.\n for i, xv in enumerate(control_x):\n yv = control_y[i]\n min_t = min_y - yv\n max_t = max_y - yv\n t_vals = np.linspace(min_t, max_t, 100)\n to_plot = point_on_characteristic(xv, yv, t_vals)\n for index, ax in enumerate(all_axes[0, :]):\n if index == num_columns:\n break\n ax.plot(\n to_plot[0], to_plot[1], color=plot_utils.GREEN, alpha=ALPHA\n )\n\n for index, ax_top in enumerate(all_axes[0, :]):\n if index == num_columns:\n break\n t = 0.5 * index\n xt, yt = point_on_characteristic(control_x, control_y, t)\n\n corner_x = xt[(0, 2, 5, 0),]\n corner_y = yt[(0, 2, 5, 0),]\n ax_top.plot(corner_x, corner_y)\n\n title = get_title(t)\n ax_top.set_title(title, fontsize=plot_utils.TEXT_SIZE)\n\n # Now plot the curved element in the \"below\" axis\".\n ax_below = all_axes[1, index]\n # NOTE: This assumes quadratic nodes.\n bezier_nodes[:, 0] = xt[0], yt[0]\n bezier_nodes[:, 1] = (\n 2.0 * xt[1] - 0.5 * xt[0] - 0.5 * xt[2],\n 2.0 * yt[1] - 0.5 * yt[0] - 0.5 * yt[2],\n )\n bezier_nodes[:, 2] = xt[2], yt[2]\n bezier_nodes[:, 3] = (\n 2.0 * xt[3] - 0.5 * xt[0] - 0.5 * xt[5],\n 2.0 * yt[3] - 0.5 * yt[0] - 0.5 * yt[5],\n )\n bezier_nodes[:, 4] = (\n 2.0 * xt[4] - 0.5 * xt[2] - 0.5 * xt[5],\n 2.0 * yt[4] - 0.5 * yt[2] - 0.5 * yt[5],\n )\n bezier_nodes[:, 5] = xt[5], yt[5]\n surface = bezier.Surface.from_nodes(bezier_nodes)\n surface.plot(256, ax=ax_below)\n\n # Add \"nodes\" to both plots.\n for ax in (ax_top, ax_below):\n ax.plot(\n xt,\n yt,\n color=\"black\",\n marker=\"o\",\n linestyle=\"none\",\n markersize=4,\n )\n # Add shadow \"nodes\" to top row for \"next\" plots.\n for next_index in range(index + 1, 3):\n if next_index == num_columns:\n break\n ax = all_axes[0, next_index]\n ax.plot(\n xt,\n yt,\n color=\"black\",\n alpha=0.5 - 0.25 * (next_index - index - 1),\n marker=\"o\",\n linestyle=\"none\",\n markersize=4,\n )\n\n for ax in all_axes.flatten():\n ax.axis(\"scaled\")\n # One axis, all axes (since sharex/sharey).\n ax1 = all_axes[0, 0]\n ax1.set_xlim(-1.0, 5.9)\n ax1.set_ylim(min_y, max_y)\n ax1.set_xticks([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])\n ax1.set_xticklabels([\"$0.0$\", \"$1.0$\", \"$2.0$\", \"$3.0$\", \"$4.0$\", \"$5.0$\"])\n ax1.set_yticks([-1.5, -0.5, 0.5, 1.5, 2.5])\n ax1.set_yticklabels([\"$-1.5$\", \"$-0.5$\", \"$0.5$\", \"$1.5$\", \"$2.5$\"])\n ax1.yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[1, 0].yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n for ax in all_axes[1, :]:\n ax.xaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n\n for ax in all_axes[:, num_columns:].flatten():\n ax.axis(\"off\")\n\n figure.set_size_inches(6.0, 2.9)\n figure.subplots_adjust(\n left=0.07, bottom=0.05, right=0.99, top=0.97, wspace=0.04, hspace=-0.1\n )\n filename = \"element_distortion{}.pdf\".format(num_columns)\n path = plot_utils.get_path(\"slides\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main():\n distort_cubic_tri(1)\n distort_cubic_tri(2)\n distort_cubic_tri(3)\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "8841726", "language": "Python", "matching_score": 7.232850551605225, "max_stars_count": 2, "path": "scripts/slides/element_distortion.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport bezier\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.spatial.qhull\nimport shapely.geometry\n\nimport plot_utils\n\n\nALPHA = 0.375\nNODES_X = np.array(\n [\n -1.0,\n -0.5,\n 0.0,\n 0.5,\n 1.0,\n -1.0,\n -0.5,\n 0.0,\n 0.5,\n 1.0,\n -1.0,\n -0.5,\n 0.0,\n 0.5,\n 1.0,\n -1.0,\n -0.5,\n 0.0,\n 0.5,\n 1.0,\n -1.0,\n -0.5,\n 0.0,\n 0.5,\n 1.0,\n ]\n)\nNODES_Y = np.array(\n [\n -1.0,\n -1.0,\n -1.0,\n -1.0,\n -1.0,\n -0.5,\n -0.5,\n -0.5,\n -0.5,\n -0.5,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 1.0,\n 1.0,\n 1.0,\n 1.0,\n 1.0,\n ]\n)\nTRIANGLES = np.array(\n [\n [0, 1, 6],\n [0, 6, 5],\n [1, 2, 7],\n [1, 7, 6],\n [2, 3, 8],\n [2, 8, 7],\n [3, 4, 9],\n [3, 9, 8],\n [5, 6, 11],\n [5, 11, 10],\n [6, 7, 12],\n [6, 12, 11],\n [7, 8, 13],\n [7, 13, 12],\n [8, 9, 14],\n [8, 14, 13],\n [10, 11, 16],\n [10, 16, 15],\n [11, 12, 17],\n [11, 17, 16],\n [12, 13, 18],\n [12, 18, 17],\n [13, 14, 19],\n [13, 19, 18],\n [15, 16, 21],\n [15, 21, 20],\n [16, 17, 22],\n [16, 22, 21],\n [17, 18, 23],\n [17, 23, 22],\n [18, 19, 24],\n [18, 24, 23],\n ],\n dtype=np.int32,\n)\n# NOTE: These must be in order along the edge.\nBOUNDARY_INDICES = (0, 1, 2, 3, 4, 9, 14, 19, 24, 23, 22, 21, 20, 15, 10, 5)\n\n\ndef point_on_characteristic(xv, yv, t):\n yt = yv + t\n xt = xv + (yt * yt * yt - yv * yv * yv) / 3\n return xt, yt\n\n\ndef get_title(t):\n if t == int(t):\n return \"$t = {:d}.0$\".format(int(t))\n else:\n return \"$t = {:g}$\".format(t)\n\n\ndef plot_distorted():\n figure, all_axes = plt.subplots(1, 5, sharex=True, sharey=True)\n all_axes = all_axes.flatten()\n for index in range(5):\n ax = all_axes[index]\n t = index / 4.0\n xt, yt = point_on_characteristic(NODES_X, NODES_Y, t)\n ax.triplot(xt, yt, TRIANGLES, color=plot_utils.BLUE, linewidth=0.9)\n\n title = get_title(t)\n ax.set_title(title, fontsize=plot_utils.TEXT_SIZE)\n # Set the axis.\n ax.axis(\"scaled\")\n ax.set_xlim(-1.35, 3.6)\n ax.set_ylim(-1.35, 2.35)\n\n all_axes[0].yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n for ax in all_axes:\n ax.xaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n\n all_axes[0].set_yticks([-1.0, 0.5, 2.0])\n all_axes[0].set_xticks([-1.0, 1.0, 3.0])\n all_axes[0].set_xticklabels([\"$-1.0$\", \"$1.0$\", \"$3.0$\"])\n figure.set_size_inches(6.4, 1.45)\n figure.subplots_adjust(\n left=0.07, bottom=0.02, right=0.99, top=0.98, wspace=0.07, hspace=0.2\n )\n filename = \"mesh_distortion.pdf\"\n path = plot_utils.get_path(\"solution-transfer\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef distort_cubic_tri():\n node1 = np.array([-0.75, 0.0])\n node2 = np.array([2.25, -1.5])\n node3 = np.array([1.5, 1.5])\n control_points = np.array(\n [\n node1,\n 0.5 * (node1 + node2),\n node2,\n 0.5 * (node1 + node3),\n 0.5 * (node2 + node3),\n node3,\n ]\n )\n\n figure, all_axes = plt.subplots(2, 3, sharex=True, sharey=True)\n min_y = -1.65\n max_y = 2.8\n control_x = control_points[:, 0]\n control_y = control_points[:, 1]\n bezier_nodes = np.empty((2, len(control_x)), order=\"F\")\n\n # First add characteristic curves to the top row of axes.\n for i, xv in enumerate(control_x):\n yv = control_y[i]\n min_t = min_y - yv\n max_t = max_y - yv\n t_vals = np.linspace(min_t, max_t, 100)\n to_plot = point_on_characteristic(xv, yv, t_vals)\n for ax in all_axes[0, :]:\n ax.plot(\n to_plot[0], to_plot[1], color=plot_utils.GREEN, alpha=ALPHA\n )\n\n for index, ax_top in enumerate(all_axes[0, :]):\n t = 0.5 * index\n xt, yt = point_on_characteristic(control_x, control_y, t)\n\n corner_x = xt[(0, 2, 5, 0),]\n corner_y = yt[(0, 2, 5, 0),]\n ax_top.plot(corner_x, corner_y)\n\n title = get_title(t)\n ax_top.set_title(title, fontsize=plot_utils.TEXT_SIZE)\n\n # Now plot the curved element in the \"below\" axis\".\n ax_below = all_axes[1, index]\n # NOTE: This assumes quadratic nodes.\n bezier_nodes[:, 0] = xt[0], yt[0]\n bezier_nodes[:, 1] = (\n 2.0 * xt[1] - 0.5 * xt[0] - 0.5 * xt[2],\n 2.0 * yt[1] - 0.5 * yt[0] - 0.5 * yt[2],\n )\n bezier_nodes[:, 2] = xt[2], yt[2]\n bezier_nodes[:, 3] = (\n 2.0 * xt[3] - 0.5 * xt[0] - 0.5 * xt[5],\n 2.0 * yt[3] - 0.5 * yt[0] - 0.5 * yt[5],\n )\n bezier_nodes[:, 4] = (\n 2.0 * xt[4] - 0.5 * xt[2] - 0.5 * xt[5],\n 2.0 * yt[4] - 0.5 * yt[2] - 0.5 * yt[5],\n )\n bezier_nodes[:, 5] = xt[5], yt[5]\n surface = bezier.Surface.from_nodes(bezier_nodes)\n surface.plot(256, ax=ax_below)\n\n # Add \"nodes\" to both plots.\n for ax in (ax_top, ax_below):\n ax.plot(\n xt,\n yt,\n color=\"black\",\n marker=\"o\",\n linestyle=\"none\",\n markersize=4,\n )\n # Add shadow \"nodes\" to top row for \"next\" plots.\n for next_index in range(index + 1, 3):\n ax = all_axes[0, next_index]\n ax.plot(\n xt,\n yt,\n color=\"black\",\n alpha=0.5 - 0.25 * (next_index - index - 1),\n marker=\"o\",\n linestyle=\"none\",\n markersize=4,\n )\n\n for ax in all_axes.flatten():\n ax.axis(\"scaled\")\n # One axis, all axes (since sharex/sharey).\n ax1 = all_axes[0, 0]\n ax1.set_xlim(-1.0, 5.9)\n ax1.set_ylim(min_y, max_y)\n ax1.set_xticks([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])\n ax1.set_xticklabels([\"$0.0$\", \"$1.0$\", \"$2.0$\", \"$3.0$\", \"$4.0$\", \"$5.0$\"])\n ax1.set_yticks([-1.5, -0.5, 0.5, 1.5, 2.5])\n ax1.set_yticklabels([\"$-1.5$\", \"$-0.5$\", \"$0.5$\", \"$1.5$\", \"$2.5$\"])\n ax1.yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n all_axes[1, 0].yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n for ax in all_axes[1, :]:\n ax.xaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n\n figure.set_size_inches(6.0, 2.9)\n figure.subplots_adjust(\n left=0.07, bottom=0.05, right=0.99, top=0.97, wspace=0.04, hspace=-0.1\n )\n filename = \"element_distortion.pdf\"\n path = plot_utils.get_path(\"solution-transfer\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef remesh():\n figure, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex=True, sharey=True)\n\n # Update to 1-second ahead in time.\n xt, yt = point_on_characteristic(NODES_X, NODES_Y, 1.0)\n nodes = np.empty((len(NODES_X), 2))\n nodes[:, 0] = xt\n nodes[:, 1] = yt\n for ax in (ax1, ax2):\n ax.triplot(\n nodes[:, 0],\n nodes[:, 1],\n TRIANGLES,\n color=plot_utils.BLUE,\n linewidth=1.0,\n )\n\n # Do a Delaunay triangulation and discard exterior triangles.\n tessellation = scipy.spatial.qhull.Delaunay(nodes)\n polygon1 = shapely.geometry.Polygon(nodes[BOUNDARY_INDICES, :])\n to_keep = []\n for i, tri in enumerate(tessellation.simplices):\n polygon2 = shapely.geometry.Polygon(nodes[tri, :])\n intersection = polygon1.intersection(polygon2)\n int_area = intersection.area\n if int_area == polygon2.area:\n to_keep.append(i)\n elif int_area != 0:\n raise NotImplementedError\n\n triangles_new = tessellation.simplices[to_keep, :]\n for ax in (ax2, ax3):\n ax.triplot(\n nodes[:, 0],\n nodes[:, 1],\n triangles_new,\n color=plot_utils.GREEN,\n linewidth=1.0,\n )\n\n ax1.set_yticks([-0.5, 1.0, 2.5])\n for ax in (ax1, ax2, ax3):\n ax.axis(\"scaled\")\n ax.set_xlim(-1.3, 3.6)\n ax.set_ylim(-0.75, 2.75)\n ax.set_xticks([-1.0, 1.0, 3.0])\n ax.set_xticklabels([\"$-1.0$\", \"$1.0$\", \"$3.0$\"])\n ax.xaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n ax.yaxis.set_tick_params(labelsize=plot_utils.TICK_SIZE)\n\n ax1.set_title(\"Before Remeshing\", fontsize=plot_utils.TEXT_SIZE)\n ax3.set_title(\"After Remeshing\", fontsize=plot_utils.TEXT_SIZE)\n\n figure.set_size_inches(6.0, 1.8)\n figure.subplots_adjust(\n left=0.06, bottom=0.01, right=0.99, top=1.0, wspace=0.04, hspace=0.2\n )\n filename = \"distortion_remesh.pdf\"\n path = plot_utils.get_path(\"solution-transfer\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main():\n plot_distorted()\n distort_cubic_tri()\n remesh()\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "3177111", "language": "Python", "matching_score": 6.669266700744629, "max_stars_count": 2, "path": "scripts/solution-transfer/distort.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This is mostly copied from ``scripts/solution-transfer/distort.py``.\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport plot_utils\n\n\nLABEL_SIZE = 14.0\nFONT_SIZE = 20.0\nNODES_X = np.array(\n [\n -1.0,\n -0.5,\n 0.0,\n 0.5,\n 1.0,\n -1.0,\n -0.5,\n 0.0,\n 0.5,\n 1.0,\n -1.0,\n -0.5,\n 0.0,\n 0.5,\n 1.0,\n -1.0,\n -0.5,\n 0.0,\n 0.5,\n 1.0,\n -1.0,\n -0.5,\n 0.0,\n 0.5,\n 1.0,\n ]\n)\nNODES_Y = np.array(\n [\n -1.0,\n -1.0,\n -1.0,\n -1.0,\n -1.0,\n -0.5,\n -0.5,\n -0.5,\n -0.5,\n -0.5,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 1.0,\n 1.0,\n 1.0,\n 1.0,\n 1.0,\n ]\n)\nTRIANGLES = np.array(\n [\n [0, 1, 6],\n [0, 6, 5],\n [1, 2, 7],\n [1, 7, 6],\n [2, 3, 8],\n [2, 8, 7],\n [3, 4, 9],\n [3, 9, 8],\n [5, 6, 11],\n [5, 11, 10],\n [6, 7, 12],\n [6, 12, 11],\n [7, 8, 13],\n [7, 13, 12],\n [8, 9, 14],\n [8, 14, 13],\n [10, 11, 16],\n [10, 16, 15],\n [11, 12, 17],\n [11, 17, 16],\n [12, 13, 18],\n [12, 18, 17],\n [13, 14, 19],\n [13, 19, 18],\n [15, 16, 21],\n [15, 21, 20],\n [16, 17, 22],\n [16, 22, 21],\n [17, 18, 23],\n [17, 23, 22],\n [18, 19, 24],\n [18, 24, 23],\n ],\n dtype=np.int32,\n)\n\n\ndef point_on_characteristic(xv, yv, t):\n yt = yv + t\n xt = xv + (yt * yt * yt - yv * yv * yv) / 3\n return xt, yt\n\n\ndef get_title(t):\n if t == int(t):\n return \"$t = {:d}.0$\".format(int(t))\n else:\n return \"$t = {:g}$\".format(t)\n\n\ndef plot_distorted():\n figure, all_axes = plt.subplots(2, 3, sharex=True, sharey=True)\n all_axes = all_axes.flatten()\n for index in range(6):\n ax = all_axes[index]\n t = index / 5.0\n xt, yt = point_on_characteristic(NODES_X, NODES_Y, t)\n ax.triplot(xt, yt, TRIANGLES, color=plot_utils.BLUE)\n\n title = get_title(t)\n ax.set_title(title, fontsize=FONT_SIZE)\n # Set the axis.\n ax.axis(\"scaled\")\n ax.set_xlim(-1.35, 3.6)\n ax.set_ylim(-1.35, 2.35)\n\n for ax in all_axes:\n ax.tick_params(labelsize=LABEL_SIZE, which=\"both\")\n\n all_axes[0].set_yticks([-1.0, 0.5, 2.0])\n all_axes[0].set_xticks([-1.0, 1.0, 3.0])\n all_axes[0].set_xticklabels([\"$-1.0$\", \"$1.0$\", \"$3.0$\"])\n\n figure.set_size_inches(8.74, 4.8)\n figure.subplots_adjust(\n left=0.06, bottom=0.06, right=0.99, top=0.93, wspace=0.0, hspace=0.2\n )\n filename = \"mesh_distortion.pdf\"\n path = plot_utils.get_path(\"slides\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main():\n plot_distorted()\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "6925306", "language": "Python", "matching_score": 4.139621257781982, "max_stars_count": 2, "path": "scripts/slides/distort.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport bezier\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport plot_utils\n\n\nLABEL_SIZE = 14.0\nFONT_SIZE = 20.0\n\n\ndef image1():\n figure, (ax1, ax2) = plt.subplots(1, 2)\n nodes1 = np.asfortranarray([[0.0, 3.0, 7.0], [5.0, 0.0, 8.0]])\n triangle1 = bezier.Surface(nodes1, degree=1)\n triangle1.plot(256, ax=ax1)\n\n nodes2 = np.asfortranarray(\n [[0.0, 1.0, 2.0, 2.0, 2.0, 0.0], [0.0, 0.0, 0.0, 1.0, 2.0, 2.0]]\n )\n triangle2 = bezier.Surface(nodes2, degree=2)\n triangle2.plot(256, ax=ax2)\n\n params = np.asfortranarray([[0.125, 0.125], [0.125, 0.75]])\n points1 = triangle1.evaluate_cartesian_multi(params)\n ax1.plot(points1[0, :], points1[1, :], marker=\"o\", color=\"black\")\n points2 = triangle2.evaluate_cartesian_multi(params)\n ax2.plot(points2[0, :], points2[1, :], marker=\"o\", color=\"black\")\n\n for ax in (ax1, ax2):\n ax.tick_params(labelsize=LABEL_SIZE, which=\"both\")\n ax.axis(\"equal\")\n\n ax1.set_title(\"Convex\", fontsize=FONT_SIZE)\n ax2.set_title(\"Not (Necessarily) Convex\", fontsize=FONT_SIZE)\n\n figure.set_size_inches(8.74, 4.8)\n figure.subplots_adjust(\n left=0.05, bottom=0.06, right=0.99, top=0.93, wspace=0.12, hspace=0.2\n )\n filename = \"not_convex.pdf\"\n path = plot_utils.get_path(\"slides\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef image2():\n figure, (ax1, ax2) = plt.subplots(1, 2)\n\n nodes1a = np.asfortranarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n nodes2a = np.asfortranarray([[-0.125, 1.0, 0.125], [-0.0625, 0.5, 0.375]])\n nodes1b = np.asfortranarray(\n [[0.0, 0.375, 1.0, 0.25, 0.75, 0.5], [0.0, 0.375, 0.0, 0.5, 0.5, 1.0]]\n )\n nodes2b = np.asfortranarray(\n [\n [1.0, 0.625, 0.0, 0.75, 0.25, 0.5],\n [0.375, -0.125, 0.375, -0.1875, -0.1875, -0.75],\n ]\n )\n info = ((nodes1a, nodes2a, ax1), (nodes1b, nodes2b, ax2))\n\n for nodes1, nodes2, ax in info:\n triangle1 = bezier.Surface.from_nodes(nodes1)\n triangle2 = bezier.Surface.from_nodes(nodes2)\n intersections = triangle1.intersect(triangle2)\n\n triangle1.plot(256, ax=ax, color=plot_utils.BLUE)\n triangle2.plot(256, ax=ax, color=plot_utils.GREEN)\n for intersection in intersections:\n intersection.plot(256, ax=ax, color=plot_utils.RED)\n\n for ax in (ax1, ax2):\n ax.tick_params(labelsize=LABEL_SIZE, which=\"both\")\n ax.axis(\"equal\")\n\n ax1.set_title(\"Convex Intersection\", fontsize=FONT_SIZE)\n ax2.set_title(\"Multiple Intersections\", fontsize=FONT_SIZE)\n\n figure.set_size_inches(8.74, 4.8)\n figure.subplots_adjust(\n left=0.06, bottom=0.06, right=0.99, top=0.93, wspace=0.18, hspace=0.2\n )\n filename = \"split_intersection.pdf\"\n path = plot_utils.get_path(\"slides\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main():\n image1()\n image2()\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "11127592", "language": "Python", "matching_score": 3.7724175453186035, "max_stars_count": 2, "path": "scripts/slides/curved_vs_straight.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport fractions\n\nimport bezier\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport newton_bezier\nimport plot_utils\n\n\nCOEFFS1 = np.asfortranarray([[-2.0, -2.0, 6.0], [2.0, 0.0, 2.0]])\nCOEFFS2 = np.asfortranarray([[-4.0, -4.0, 12.0], [5.0, -3.0, 5.0]])\nF = fractions.Fraction\nU = 0.5 ** 53\n\n\ndef image1():\n figure = plt.figure()\n ax = figure.gca()\n\n curve1 = bezier.Curve(COEFFS1, degree=2, _copy=False)\n curve2 = bezier.Curve(COEFFS2, degree=2, _copy=False)\n\n curve1.plot(256, ax=ax, color=plot_utils.BLUE)\n ax.lines[-1].set_label(\"$b_1(s)$\")\n curve2.plot(256, ax=ax, color=plot_utils.GREEN)\n ax.lines[-1].set_label(\"$b_2(t)$\")\n ax.plot(\n [0.0],\n [1.0],\n marker=\"o\",\n markersize=4.0,\n color=\"black\",\n linestyle=\"none\",\n )\n\n ax.legend(fontsize=plot_utils.TEXT_SIZE)\n ax.tick_params(labelsize=plot_utils.TICK_SIZE)\n ax.axis(\"scaled\")\n\n figure.set_size_inches(5.4, 1.6)\n figure.subplots_adjust(\n left=0.05, bottom=0.09, right=0.99, top=0.99, wspace=0.2, hspace=0.2\n )\n filename = \"tangent_intersection.pdf\"\n path = plot_utils.get_path(\"compensated-newton\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef image2():\n expected_s = 0.5\n expected_t = 0.5\n s0 = 1.0 - 0.5 ** 40\n t0 = 0.75 + 0.5 ** 20\n\n iterates1 = newton_bezier.newton(\n s0, COEFFS1, t0, COEFFS2, newton_bezier.standard_residual\n )\n iterates2 = newton_bezier.newton(\n s0, COEFFS1, t0, COEFFS2, newton_bezier.compensated_residual\n )\n errors1 = []\n errors2 = []\n for iterates, errors in ((iterates1, errors1), (iterates2, errors2)):\n for n, (s_val, t_val) in enumerate(iterates):\n rel_error_s = F(s_val) / F(expected_s) - 1\n if rel_error_s <= 0:\n raise ValueError(s_val, rel_error_s)\n rel_error_t = F(t_val) / F(expected_t) - 1\n if rel_error_t <= 0:\n raise ValueError(t_val, rel_error_t)\n errors.append((n, float(rel_error_s), float(rel_error_t)))\n\n figure, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True)\n min_x = -2.0\n max_x = 51.0\n errors1 = np.array(errors1)\n errors2 = np.array(errors2)\n for index, ax in ((1, ax1), (2, ax2)):\n ax.semilogy(\n errors1[:, 0],\n errors1[:, index],\n marker=\"o\",\n linestyle=\"none\",\n markersize=7,\n markeredgewidth=1,\n markerfacecolor=\"none\",\n label=\"Standard\",\n )\n ax.semilogy(\n errors2[:, 0],\n errors2[:, index],\n color=\"black\",\n marker=\"o\",\n linestyle=\"none\",\n markersize=3,\n label=\"Compensated\",\n )\n ax.semilogy(\n [min_x, max_x],\n [np.cbrt(U), np.cbrt(U)],\n linestyle=\"dashed\",\n color=\"black\",\n )\n ax.semilogy(\n [min_x, max_x],\n [np.cbrt(U * U), np.cbrt(U * U)],\n linestyle=\"dashed\",\n color=\"black\",\n )\n\n ax.set_yscale(\"log\", basey=2)\n ax.set_xlabel(\"Iteration\", fontsize=plot_utils.TEXT_SIZE)\n\n ax1.set_ylabel(\"Relative Error\", fontsize=plot_utils.TEXT_SIZE)\n ax1.set_title(\"$s$\", fontsize=plot_utils.TEXT_SIZE)\n ax1.set_xlim(min_x, max_x)\n ax2.set_title(\"$t$\", fontsize=plot_utils.TEXT_SIZE)\n ax2.legend(loc=\"upper right\", fontsize=plot_utils.TEXT_SIZE)\n\n ax2.set_yticks([np.cbrt(U), np.cbrt(U * U)], minor=True)\n ax2.set_yticklabels(\n [r\"$\\sqrt[3]{\\mathbf{u}}$\", r\"$\\sqrt[3]{\\mathbf{u}^2}$\"], minor=True\n )\n plt.setp(ax1.get_yticklabels(minor=True), visible=False)\n ax2.tick_params(\n axis=\"y\",\n which=\"minor\",\n direction=\"out\",\n left=0,\n right=1,\n labelleft=0,\n labelright=1,\n )\n\n ax1.tick_params(labelsize=plot_utils.TICK_SIZE)\n ax2.tick_params(labelsize=plot_utils.TICK_SIZE)\n ax2.tick_params(labelsize=plot_utils.TEXT_SIZE, which=\"minor\")\n\n figure.set_size_inches(6.4, 2.6)\n figure.subplots_adjust(\n left=0.09, bottom=0.16, right=0.93, top=0.9, wspace=0.04, hspace=0.2\n )\n filename = \"newton_linear_converge.pdf\"\n path = plot_utils.get_path(\"compensated-newton\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main():\n image1()\n image2()\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "11893434", "language": "Python", "matching_score": 6.775042533874512, "max_stars_count": 2, "path": "scripts/compensated-newton/tangent_intersection.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This is mostly copied from elsewhere.\n\nIn particular, ``scripts/compensated-newton/tangent_intersection.py``.\n\nThe only change is ``b_1 --> b_0`` and ``b_2 --> b_1``.\n\"\"\"\n\nimport fractions\n\nimport bezier\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport newton_bezier\nimport plot_utils\n\n\nCOEFFS1 = np.asfortranarray([[-2.0, -2.0, 6.0], [2.0, 0.0, 2.0]])\nCOEFFS2 = np.asfortranarray([[-4.0, -4.0, 12.0], [5.0, -3.0, 5.0]])\nF = fractions.Fraction\nU = 0.5 ** 53\n\n\ndef image1():\n figure = plt.figure()\n ax = figure.gca()\n\n curve1 = bezier.Curve(COEFFS1, degree=2, _copy=False)\n curve2 = bezier.Curve(COEFFS2, degree=2, _copy=False)\n\n curve1.plot(256, ax=ax, color=plot_utils.BLUE)\n ax.lines[-1].set_label(\"$b_0(s)$\")\n curve2.plot(256, ax=ax, color=plot_utils.GREEN)\n ax.lines[-1].set_label(\"$b_1(t)$\")\n ax.plot(\n [0.0],\n [1.0],\n marker=\"o\",\n markersize=4.0,\n color=\"black\",\n linestyle=\"none\",\n )\n\n ax.legend(fontsize=plot_utils.TEXT_SIZE)\n ax.tick_params(labelsize=plot_utils.TICK_SIZE)\n ax.axis(\"scaled\")\n\n figure.set_size_inches(5.4, 1.6)\n figure.subplots_adjust(\n left=0.05, bottom=0.09, right=0.99, top=0.99, wspace=0.2, hspace=0.2\n )\n filename = \"tangent_intersection.pdf\"\n path = plot_utils.get_path(\"slides\", filename)\n figure.savefig(path)\n print(\"Saved {}\".format(filename))\n plt.close(figure)\n\n\ndef main():\n image1()\n\n\nif __name__ == \"__main__\":\n plot_utils.set_styles()\n main()\n", "id": "4731246", "language": "Python", "matching_score": 1.660910725593567, "max_stars_count": 2, "path": "scripts/slides/tangent_intersection.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pure Python helper methods for intersecting B |eacute| zier shapes.\n\n.. |eacute| unicode:: U+000E9 .. LATIN SMALL LETTER E WITH ACUTE\n :trim:\n\"\"\"\n\nimport enum\n\nimport numpy as np\n\nfrom bezier.hazmat import curve_helpers\nfrom bezier.hazmat import helpers as _py_helpers\n\n\n# For ``full_newton()``.\nZERO_THRESHOLD = 0.5 ** 10 # ~1e-3\n\"\"\"float: The bound below values are considered to be \"too close\" to ``0``.\"\"\"\nMAX_NEWTON_ITERATIONS = 10\n\"\"\"int: The maximum number of iterations for Newton's method to converge.\"\"\"\nNEWTON_ERROR_RATIO = 0.5 ** 36\n\"\"\"float: Cap on error ratio during Newton's method\n\nEqual to :math:`2^{-36}`. See :func:`.newton_iterate` for more details.\n\"\"\"\nNEWTON_NO_CONVERGE = \"\"\"\\\nUnsupported multiplicity.\n\nNewton's method failed to converge to a solution under the\nfollowing assumptions:\n\n- The starting ``s-t`` values were already near a solution\n- The root / solution has multiplicity 1 or 2\n - 1: The root is \"simple\", i.e. the curves are not tangent\n and have no self-intersections at the point of intersection.\n - 2: The root is a double root, i.e. the curves are tangent\n but have different curvatures at the point of intersection.\n\nThe failure to converge may have been caused by one of:\n\n- The root was of multiplicity greater than 2\n- The curves don't actually intersect, though they come very close\n- Numerical issues caused the iteration to leave the region\n of convergence\n\"\"\"\n\n\ndef newton_refine(s, nodes1, t, nodes2):\n r\"\"\"Apply one step of 2D Newton's method.\n\n .. note::\n\n There is also a Fortran implementation of this function, which\n will be used if it can be built.\n\n We want to use Newton's method on the function\n\n .. math::\n\n F(s, t) = B_1(s) - B_2(t)\n\n to refine :math:`\\left(s_{\\ast}, t_{\\ast}\\right)`. Using this,\n and the Jacobian :math:`DF`, we \"solve\"\n\n .. math::\n\n \\left[\\begin{array}{c}\n 0 \\\\ 0 \\end{array}\\right] \\approx\n F\\left(s_{\\ast} + \\Delta s, t_{\\ast} + \\Delta t\\right) \\approx\n F\\left(s_{\\ast}, t_{\\ast}\\right) +\n \\left[\\begin{array}{c c}\n B_1'\\left(s_{\\ast}\\right) &\n - B_2'\\left(t_{\\ast}\\right) \\end{array}\\right]\n \\left[\\begin{array}{c}\n \\Delta s \\\\ \\Delta t \\end{array}\\right]\n\n and refine with the component updates :math:`\\Delta s` and\n :math:`\\Delta t`.\n\n .. note::\n\n This implementation assumes the curves live in\n :math:`\\mathbf{R}^2`.\n\n For example, the curves\n\n .. math::\n\n \\begin{align*}\n B_1(s) &= \\left[\\begin{array}{c} 0 \\\\ 0 \\end{array}\\right] (1 - s)^2\n + \\left[\\begin{array}{c} 2 \\\\ 4 \\end{array}\\right] 2s(1 - s)\n + \\left[\\begin{array}{c} 4 \\\\ 0 \\end{array}\\right] s^2 \\\\\n B_2(t) &= \\left[\\begin{array}{c} 2 \\\\ 0 \\end{array}\\right] (1 - t)\n + \\left[\\begin{array}{c} 0 \\\\ 3 \\end{array}\\right] t\n \\end{align*}\n\n intersect at the point\n :math:`B_1\\left(\\frac{1}{4}\\right) = B_2\\left(\\frac{1}{2}\\right) =\n \\frac{1}{2} \\left[\\begin{array}{c} 2 \\\\ 3 \\end{array}\\right]`.\n\n However, starting from the wrong point we have\n\n .. math::\n\n \\begin{align*}\n F\\left(\\frac{3}{8}, \\frac{1}{4}\\right) &= \\frac{1}{8}\n \\left[\\begin{array}{c} 0 \\\\ 9 \\end{array}\\right] \\\\\n DF\\left(\\frac{3}{8}, \\frac{1}{4}\\right) &=\n \\left[\\begin{array}{c c}\n 4 & 2 \\\\ 2 & -3 \\end{array}\\right] \\\\\n \\Longrightarrow \\left[\\begin{array}{c} \\Delta s \\\\ \\Delta t\n \\end{array}\\right] &= \\frac{9}{64} \\left[\\begin{array}{c}\n -1 \\\\ 2 \\end{array}\\right].\n \\end{align*}\n\n .. image:: ../../images/newton_refine1.png\n :align: center\n\n .. testsetup:: newton-refine1, newton-refine2, newton-refine3\n\n import numpy as np\n import bezier\n from bezier.hazmat.intersection_helpers import newton_refine\n\n machine_eps = np.finfo(np.float64).eps\n\n def realroots(*coeffs):\n all_roots = np.roots(coeffs)\n return all_roots[np.where(all_roots.imag == 0.0)].real\n\n .. doctest:: newton-refine1\n\n >>> import bezier\n >>> import numpy as np\n >>> nodes1 = np.asfortranarray([\n ... [0.0, 2.0, 4.0],\n ... [0.0, 4.0, 0.0],\n ... ])\n >>> nodes2 = np.asfortranarray([\n ... [2.0, 0.0],\n ... [0.0, 3.0],\n ... ])\n >>> s, t = 0.375, 0.25\n >>> new_s, new_t = newton_refine(s, nodes1, t, nodes2)\n >>> 64.0 * (new_s - s)\n -9.0\n >>> 64.0 * (new_t - t)\n 18.0\n\n .. testcleanup:: newton-refine1\n\n import make_images\n curve1 = bezier.Curve(nodes1, degree=2)\n curve2 = bezier.Curve(nodes2, degree=1)\n make_images.newton_refine1(s, new_s, curve1, t, new_t, curve2)\n\n For \"typical\" curves, we converge to a solution quadratically.\n This means that the number of correct digits doubles every\n iteration (until machine precision is reached).\n\n .. image:: ../../images/newton_refine2.png\n :align: center\n\n .. doctest:: newton-refine2\n\n >>> nodes1 = np.asfortranarray([\n ... [0.0, 0.25, 0.5, 0.75, 1.0],\n ... [0.0, 2.0 , -2.0, 2.0 , 0.0],\n ... ])\n >>> nodes2 = np.asfortranarray([\n ... [0.0, 0.25, 0.5, 0.75, 1.0],\n ... [1.0, 0.5 , 0.5, 0.5 , 0.0],\n ... ])\n >>> # The expected intersection is the only real root of\n >>> # 28 s^3 - 30 s^2 + 9 s - 1.\n >>> expected, = realroots(28, -30, 9, -1)\n >>> s_vals = [0.625, None, None, None, None]\n >>> t = 0.625\n >>> np.log2(abs(expected - s_vals[0]))\n -4.399...\n >>> s_vals[1], t = newton_refine(s_vals[0], nodes1, t, nodes2)\n >>> np.log2(abs(expected - s_vals[1]))\n -7.901...\n >>> s_vals[2], t = newton_refine(s_vals[1], nodes1, t, nodes2)\n >>> np.log2(abs(expected - s_vals[2]))\n -16.010...\n >>> s_vals[3], t = newton_refine(s_vals[2], nodes1, t, nodes2)\n >>> np.log2(abs(expected - s_vals[3]))\n -32.110...\n >>> s_vals[4], t = newton_refine(s_vals[3], nodes1, t, nodes2)\n >>> np.allclose(s_vals[4], expected, rtol=6 * machine_eps, atol=0.0)\n True\n\n .. testcleanup:: newton-refine2\n\n import make_images\n curve1 = bezier.Curve(nodes1, degree=4)\n curve2 = bezier.Curve(nodes2, degree=4)\n make_images.newton_refine2(s_vals, curve1, curve2)\n\n However, when the intersection occurs at a point of tangency,\n the convergence becomes linear. This means that the number of\n correct digits added each iteration is roughly constant.\n\n .. image:: ../../images/newton_refine3.png\n :align: center\n\n .. doctest:: newton-refine3\n\n >>> nodes1 = np.asfortranarray([\n ... [0.0, 0.5, 1.0],\n ... [0.0, 1.0, 0.0],\n ... ])\n >>> nodes2 = np.asfortranarray([\n ... [0.0, 1.0],\n ... [0.5, 0.5],\n ... ])\n >>> expected = 0.5\n >>> s_vals = [0.375, None, None, None, None, None]\n >>> t = 0.375\n >>> np.log2(abs(expected - s_vals[0]))\n -3.0\n >>> s_vals[1], t = newton_refine(s_vals[0], nodes1, t, nodes2)\n >>> np.log2(abs(expected - s_vals[1]))\n -4.0\n >>> s_vals[2], t = newton_refine(s_vals[1], nodes1, t, nodes2)\n >>> np.log2(abs(expected - s_vals[2]))\n -5.0\n >>> s_vals[3], t = newton_refine(s_vals[2], nodes1, t, nodes2)\n >>> np.log2(abs(expected - s_vals[3]))\n -6.0\n >>> s_vals[4], t = newton_refine(s_vals[3], nodes1, t, nodes2)\n >>> np.log2(abs(expected - s_vals[4]))\n -7.0\n >>> s_vals[5], t = newton_refine(s_vals[4], nodes1, t, nodes2)\n >>> np.log2(abs(expected - s_vals[5]))\n -8.0\n\n .. testcleanup:: newton-refine3\n\n import make_images\n curve1 = bezier.Curve(nodes1, degree=2)\n curve2 = bezier.Curve(nodes2, degree=1)\n make_images.newton_refine3(s_vals, curve1, curve2)\n\n Unfortunately, the process terminates with an error that is not close\n to machine precision :math:`\\varepsilon` when\n :math:`\\Delta s = \\Delta t = 0`.\n\n .. testsetup:: newton-refine3-continued\n\n import numpy as np\n import bezier\n from bezier.hazmat.intersection_helpers import newton_refine\n\n nodes1 = np.asfortranarray([\n [0.0, 0.5, 1.0],\n [0.0, 1.0, 0.0],\n ])\n nodes2 = np.asfortranarray([\n [0.0, 1.0],\n [0.5, 0.5],\n ])\n\n .. doctest:: newton-refine3-continued\n\n >>> s1 = t1 = 0.5 - 0.5**27\n >>> np.log2(0.5 - s1)\n -27.0\n >>> s2, t2 = newton_refine(s1, nodes1, t1, nodes2)\n >>> s2 == t2\n True\n >>> np.log2(0.5 - s2)\n -28.0\n >>> s3, t3 = newton_refine(s2, nodes1, t2, nodes2)\n >>> s3 == t3 == s2\n True\n\n Due to round-off near the point of tangency, the final error\n resembles :math:`\\sqrt{\\varepsilon}` rather than machine\n precision as expected.\n\n .. note::\n\n The following is not implemented in this function. It's just\n an exploration on how the shortcomings might be addressed.\n\n However, this can be overcome. At the point of tangency, we want\n :math:`B_1'(s) \\parallel B_2'(t)`. This can be checked numerically via\n\n .. math::\n\n B_1'(s) \\times B_2'(t) = 0.\n\n For the last example (the one that converges linearly), this is\n\n .. math::\n\n 0 = \\left[\\begin{array}{c} 1 \\\\ 2 - 4s \\end{array}\\right] \\times\n \\left[\\begin{array}{c} 1 \\\\ 0 \\end{array}\\right] = 4 s - 2.\n\n With this, we can modify Newton's method to find a zero of the\n over-determined system\n\n .. math::\n\n G(s, t) = \\left[\\begin{array}{c} B_0(s) - B_1(t) \\\\\n B_1'(s) \\times B_2'(t) \\end{array}\\right] =\n \\left[\\begin{array}{c} s - t \\\\ 2 s (1 - s) - \\frac{1}{2} \\\\\n 4 s - 2\\end{array}\\right].\n\n Since :math:`DG` is :math:`3 \\times 2`, we can't invert it. However,\n we can find a least-squares solution:\n\n .. math::\n\n \\left(DG^T DG\\right) \\left[\\begin{array}{c}\n \\Delta s \\\\ \\Delta t \\end{array}\\right] = -DG^T G.\n\n This only works if :math:`DG` has full rank. In this case, it does\n since the submatrix containing the first and last rows has rank two:\n\n .. math::\n\n DG = \\left[\\begin{array}{c c} 1 & -1 \\\\\n 2 - 4 s & 0 \\\\\n 4 & 0 \\end{array}\\right].\n\n Though this avoids a singular system, the normal equations have a\n condition number that is the square of the condition number of the matrix.\n\n Starting from :math:`s = t = \\frac{3}{8}` as above:\n\n .. testsetup:: newton-refine4\n\n import numpy as np\n from bezier.hazmat import helpers\n\n def modified_update(s, t):\n minus_G = np.asfortranarray([\n [t - s],\n [0.5 - 2.0 * s * (1.0 - s)],\n [2.0 - 4.0 * s],\n ])\n DG = np.asfortranarray([\n [1.0, -1.0],\n [2.0 - 4.0 * s, 0.0],\n [4.0, 0.0],\n ])\n DG_t = np.asfortranarray(DG.T)\n\n LHS = helpers.matrix_product(DG_t, DG)\n RHS = helpers.matrix_product(DG_t, minus_G)\n delta_params = np.linalg.solve(LHS, RHS)\n delta_s, delta_t = delta_params.flatten()\n return s + delta_s, t + delta_t\n\n .. doctest:: newton-refine4\n\n >>> s0, t0 = 0.375, 0.375\n >>> np.log2(0.5 - s0)\n -3.0\n >>> s1, t1 = modified_update(s0, t0)\n >>> s1 == t1\n True\n >>> 1040.0 * s1\n 519.0\n >>> np.log2(0.5 - s1)\n -10.022...\n >>> s2, t2 = modified_update(s1, t1)\n >>> s2 == t2\n True\n >>> np.log2(0.5 - s2)\n -31.067...\n >>> s3, t3 = modified_update(s2, t2)\n >>> s3 == t3 == 0.5\n True\n\n Args:\n s (float): Parameter of a near-intersection along the first curve.\n nodes1 (numpy.ndarray): Nodes of first curve forming intersection.\n t (float): Parameter of a near-intersection along the second curve.\n nodes2 (numpy.ndarray): Nodes of second curve forming intersection.\n\n Returns:\n Tuple[float, float]: The refined parameters from a single Newton\n step.\n\n Raises:\n ValueError: If the Jacobian is singular at ``(s, t)``.\n \"\"\"\n # NOTE: We form -F(s, t) since we want to solve -DF^{-1} F(s, t).\n func_val = curve_helpers.evaluate_multi(\n nodes2, np.asfortranarray([t])\n ) - curve_helpers.evaluate_multi(nodes1, np.asfortranarray([s]))\n if np.all(func_val == 0.0):\n # No refinement is needed.\n return s, t\n\n # NOTE: This assumes the curves are 2D.\n jac_mat = np.empty((2, 2), order=\"F\")\n jac_mat[:, :1] = curve_helpers.evaluate_hodograph(s, nodes1)\n jac_mat[:, 1:] = -curve_helpers.evaluate_hodograph(t, nodes2)\n # Solve the system.\n singular, delta_s, delta_t = _py_helpers.solve2x2(jac_mat, func_val[:, 0])\n if singular:\n raise ValueError(\"Jacobian is singular.\")\n\n return s + delta_s, t + delta_t\n\n\nclass NewtonSimpleRoot: # pylint: disable=too-few-public-methods\n r\"\"\"Callable object that facilitates Newton's method.\n\n This is meant to be used to compute the Newton update via:\n\n .. math::\n\n DF(s, t) \\left[\\begin{array}{c}\n \\Delta s \\\\ \\Delta t \\end{array}\\right] = -F(s, t).\n\n Args:\n nodes1 (numpy.ndarray): Control points of the first curve.\n first_deriv1 (numpy.ndarray): Control points of the curve\n :math:`B_1'(s)`.\n nodes2 (numpy.ndarray): Control points of the second curve.\n first_deriv2 (numpy.ndarray): Control points of the curve\n :math:`B_2'(t)`.\n \"\"\"\n\n def __init__(self, nodes1, first_deriv1, nodes2, first_deriv2):\n self.nodes1 = nodes1\n self.first_deriv1 = first_deriv1\n self.nodes2 = nodes2\n self.first_deriv2 = first_deriv2\n\n def __call__(self, s, t):\n r\"\"\"This computes :math:`F = B_1(s) - B_2(t)` and :math:`DF(s, t)`.\n\n .. note::\n\n There is **almost** identical code in :func:`.newton_refine`, but\n that code can avoid computing the ``first_deriv1`` and\n ``first_deriv2`` nodes in cases that :math:`F(s, t) = 0` whereas\n this function assumes they have been given.\n\n In the case that :math:`DF(s, t)` is singular, the assumption is that\n the intersection has a multiplicity higher than one (i.e. the root is\n non-simple). **Near** a simple root, it must be the case that\n :math:`DF(s, t)` has non-zero determinant, so due to continuity, we\n assume the Jacobian will be invertible nearby.\n\n Args:\n s (float): The parameter where we'll compute :math:`B_1(s)` and\n :math:`DF(s, t)`.\n t (float): The parameter where we'll compute :math:`B_2(t)` and\n :math:`DF(s, t)`.\n\n Returns:\n Tuple[Optional[numpy.ndarray], numpy.ndarray]: Pair of\n\n * The LHS matrix ``DF``, a ``2 x 2`` array. If ``F == 0`` then\n this matrix won't be computed and :data:`None` will be returned.\n * The RHS vector ``F``, a ``2 x 1`` array.\n \"\"\"\n s_vals = np.asfortranarray([s])\n b1_s = curve_helpers.evaluate_multi(self.nodes1, s_vals)\n t_vals = np.asfortranarray([t])\n b2_t = curve_helpers.evaluate_multi(self.nodes2, t_vals)\n func_val = b1_s - b2_t\n if np.all(func_val == 0.0):\n return None, func_val\n\n else:\n jacobian = np.empty((2, 2), order=\"F\")\n jacobian[:, :1] = curve_helpers.evaluate_multi(\n self.first_deriv1, s_vals\n )\n jacobian[:, 1:] = -curve_helpers.evaluate_multi(\n self.first_deriv2, t_vals\n )\n return jacobian, func_val\n\n\nclass NewtonDoubleRoot: # pylint: disable=too-few-public-methods\n r\"\"\"Callable object that facilitates Newton's method for double roots.\n\n This is an augmented version of :class:`NewtonSimpleRoot`.\n\n For non-simple intersections (i.e. multiplicity greater than 1),\n the curves will be tangent, which forces :math:`B_1'(s) \\times B_2'(t)`\n to be zero. Unfortunately, that quantity is also equal to the\n determinant of the Jacobian, so :math:`DF` will not be full rank.\n\n In order to produce a system that **can** be solved, an\n an augmented function is computed:\n\n .. math::\n\n G(s, t) = \\left[\\begin{array}{c}\n F(s, t) \\\\ \\hline\n B_1'(s) \\times B_2'(t)\n \\end{array}\\right]\n\n The use of :math:`B_1'(s) \\times B_2'(t)` (with lowered degree in\n :math:`s` and :math:`t`) means that the rank deficiency in\n :math:`DF` can be fixed in:\n\n .. math::\n\n DG(s, t) = \\left[\\begin{array}{c | c}\n B_1'(s) & -B_2'(t) \\\\ \\hline\n B_1''(s) \\times B_2'(t) & B_1'(s) \\times B_2''(t)\n \\end{array}\\right]\n\n (This may not always be full rank, but in the double root / multiplicity\n 2 case it will be full rank near a solution.)\n\n Rather than finding a least squares solution to the overdetermined system\n\n .. math::\n\n DG(s, t) \\left[\\begin{array}{c}\n \\Delta s \\\\ \\Delta t \\end{array}\\right] = -G(s, t)\n\n we find a solution to the square (and hopefully full rank) system:\n\n .. math::\n\n DG^T DG \\left[\\begin{array}{c}\n \\Delta s \\\\ \\Delta t \\end{array}\\right] = -DG^T G.\n\n Forming :math:`DG^T DG` squares the condition number, so it would be\n \"better\" to use :func:`~numpy.linalg.lstsq` (which wraps the LAPACK routine\n ``dgelsd``). However, using :func:`.solve2x2` is **much** more\n straightforward and in practice this is just as accurate.\n\n Args:\n nodes1 (numpy.ndarray): Control points of the first curve.\n first_deriv1 (numpy.ndarray): Control points of the curve\n :math:`B_1'(s)`.\n second_deriv1 (numpy.ndarray): Control points of the curve\n :math:`B_1''(s)`.\n nodes2 (numpy.ndarray): Control points of the second curve.\n first_deriv2 (numpy.ndarray): Control points of the curve\n :math:`B_2'(t)`.\n second_deriv2 (numpy.ndarray): Control points of the curve\n :math:`B_2''(t)`.\n \"\"\"\n\n def __init__(\n self,\n nodes1,\n first_deriv1,\n second_deriv1,\n nodes2,\n first_deriv2,\n second_deriv2,\n ):\n self.nodes1 = nodes1\n self.first_deriv1 = first_deriv1\n self.second_deriv1 = second_deriv1\n self.nodes2 = nodes2\n self.first_deriv2 = first_deriv2\n self.second_deriv2 = second_deriv2\n\n def __call__(self, s, t):\n r\"\"\"This computes :math:`DG^T G` and :math:`DG^T DG`.\n\n If :math:`DG^T DG` is not full rank, this means either :math:`DG`\n was not full rank or that it was, but with a relatively high condition\n number. So, in the case that :math:`DG^T DG` is singular, the\n assumption is that the intersection has a multiplicity higher than two.\n\n Args:\n s (float): The parameter where we'll compute :math:`G(s, t)` and\n :math:`DG(s, t)`.\n t (float): The parameter where we'll compute :math:`G(s, t)` and\n :math:`DG(s, t)`.\n\n Returns:\n Tuple[Optional[numpy.ndarray], Optional[numpy.ndarray]]: Pair of\n\n * The LHS matrix ``DG^T DG``, a ``2 x 2`` array. If ``G == 0`` then\n this matrix won't be computed and :data:`None` will be returned.\n * The RHS vector ``DG^T G``, a ``2 x 1`` array.\n \"\"\"\n s_vals = np.asfortranarray([s])\n b1_s = curve_helpers.evaluate_multi(self.nodes1, s_vals)\n b1_ds = curve_helpers.evaluate_multi(self.first_deriv1, s_vals)\n t_vals = np.asfortranarray([t])\n b2_t = curve_helpers.evaluate_multi(self.nodes2, t_vals)\n b2_dt = curve_helpers.evaluate_multi(self.first_deriv2, t_vals)\n func_val = np.empty((3, 1), order=\"F\")\n func_val[:2, :] = b1_s - b2_t\n func_val[2, :] = _py_helpers.cross_product(b1_ds[:, 0], b2_dt[:, 0])\n if np.all(func_val == 0.0):\n return None, func_val[:2, :]\n\n else:\n jacobian = np.empty((3, 2), order=\"F\")\n jacobian[:2, :1] = b1_ds\n jacobian[:2, 1:] = -b2_dt\n if self.second_deriv1.size == 0:\n jacobian[2, 0] = 0.0\n else:\n jacobian[2, 0] = _py_helpers.cross_product(\n curve_helpers.evaluate_multi(self.second_deriv1, s_vals)[\n :, 0\n ],\n b2_dt[:, 0],\n )\n if self.second_deriv2.size == 0:\n jacobian[2, 1] = 0.0\n else:\n jacobian[2, 1] = _py_helpers.cross_product(\n b1_ds[:, 0],\n curve_helpers.evaluate_multi(self.second_deriv2, t_vals)[\n :, 0\n ],\n )\n modified_lhs = _py_helpers.matrix_product(jacobian.T, jacobian)\n modified_rhs = _py_helpers.matrix_product(jacobian.T, func_val)\n return modified_lhs, modified_rhs\n\n\ndef newton_iterate(evaluate_fn, s, t):\n \"\"\"Perform a Newton iteration.\n\n .. warning::\n\n In this function, we assume that :math:`s` and :math:`t` are nonzero,\n this makes convergence easier to detect since \"relative error\" at\n ``0.0`` is not a useful measure.\n\n There are several tolerance / threshold quantities used below:\n\n * :math:`10` (:attr:`MAX_NEWTON_ITERATIONS`) iterations will be done before\n \"giving up\". This is based on the assumption that we are already starting\n near a root, so quadratic convergence should terminate quickly.\n * :math:`\\\\tau = \\\\frac{1}{4}` is used as the boundary between linear\n and superlinear convergence. So if the current error\n :math:`\\\\|p_{n + 1} - p_n\\\\|` is not smaller than :math:`\\\\tau` times\n the previous error :math:`\\\\|p_n - p_{n - 1}\\\\|`, then convergence\n is considered to be linear at that point.\n * :math:`\\\\frac{2}{3}` of all iterations must be converging linearly\n for convergence to be stopped (and moved to the next regime). This\n will only be checked after 4 or more updates have occurred.\n * :math:`\\\\tau = 2^{-36}` (:attr:`NEWTON_ERROR_RATIO`) is used to\n determine that an update is sufficiently small to stop iterating. So if\n the error :math:`\\\\|p_{n + 1} - p_n\\\\|` smaller than :math:`\\\\tau` times\n size of the term being updated :math:`\\\\|p_n\\\\|`, then we\n exit with the \"correct\" answer.\n\n It is assumed that ``evaluate_fn`` will use a Jacobian return value of\n :data:`None` to indicate that :math:`F(s, t)` is exactly ``0.0``. We\n **assume** that if the function evaluates to exactly ``0.0``, then we are\n at a solution. It is possible however, that badly parameterized curves\n can evaluate to exactly ``0.0`` for inputs that are relatively far away\n from a solution (see issue\n `#21 <https://github.com/dhermes/bezier/issues/21>`__).\n\n Args:\n evaluate_fn (Callable[Tuple[float, float], \\\n Tuple[Optional[numpy.ndarray], numpy.ndarray]]): A callable\n which takes :math:`s` and :math:`t` and produces the (optional)\n Jacobian matrix (``2 x 2``) and an evaluated function (``2 x 1``)\n value.\n s (float): The (first) parameter where the iteration will start.\n t (float): The (second) parameter where the iteration will start.\n\n Returns:\n Tuple[bool, float, float]: The triple of\n\n * Flag indicating if the iteration converged.\n * The current :math:`s` value when the iteration stopped.\n * The current :math:`t` value when the iteration stopped.\n \"\"\"\n # Several quantities will be tracked throughout the iteration:\n # * norm_update_prev: ||p{n} - p{n-1}|| = ||dp{n-1}||\n # * norm_update : ||p{n+1} - p{n} || = ||dp{n} ||\n # * linear_updates : This is a count on the number of times that\n # ``dp{n}`` \"looks like\" ``dp{n-1}`` (i.e.\n # is within a constant factor of it).\n norm_update_prev = None\n norm_update = None\n linear_updates = 0 # Track the number of \"linear\" updates.\n current_s = s\n current_t = t\n for index in range(MAX_NEWTON_ITERATIONS):\n jacobian, func_val = evaluate_fn(current_s, current_t)\n if jacobian is None:\n return True, current_s, current_t\n\n singular, delta_s, delta_t = _py_helpers.solve2x2(\n jacobian, func_val[:, 0]\n )\n if singular:\n break\n\n norm_update_prev = norm_update\n norm_update = np.linalg.norm([delta_s, delta_t], ord=2)\n # If ||p{n} - p{n-1}|| > 0.25 ||p{n-1} - p{n-2}||, then that means\n # our convergence is acting linear at the current step.\n if index > 0 and norm_update > 0.25 * norm_update_prev:\n linear_updates += 1\n # If ``>=2/3`` of the updates have been linear, we are near a\n # non-simple root. (Make sure at least 5 updates have occurred.)\n if index >= 4 and 3 * linear_updates >= 2 * index:\n break\n\n # Determine the norm of the \"old\" solution before updating.\n norm_soln = np.linalg.norm([current_s, current_t], ord=2)\n current_s -= delta_s\n current_t -= delta_t\n if norm_update < NEWTON_ERROR_RATIO * norm_soln:\n return True, current_s, current_t\n\n return False, current_s, current_t\n\n\ndef full_newton_nonzero(s, nodes1, t, nodes2):\n r\"\"\"Perform a Newton iteration until convergence to a solution.\n\n This is the \"implementation\" for :func:`full_newton`. In this\n function, we assume that :math:`s` and :math:`t` are nonzero.\n\n Args:\n s (float): The parameter along the first curve where the iteration\n will start.\n nodes1 (numpy.ndarray): Control points of the first curve.\n t (float): The parameter along the second curve where the iteration\n will start.\n nodes2 (numpy.ndarray): Control points of the second curve.\n\n Returns:\n Tuple[float, float]: The pair of :math:`s` and :math:`t` values that\n Newton's method converged to.\n\n Raises:\n NotImplementedError: If Newton's method doesn't converge in either the\n multiplicity 1 or 2 cases.\n \"\"\"\n # NOTE: We somewhat replicate code in ``evaluate_hodograph()``\n # here. This is so we don't re-compute the nodes for the first\n # (and possibly second) derivatives every time they are evaluated.\n _, num_nodes1 = np.shape(nodes1)\n first_deriv1 = (num_nodes1 - 1) * (nodes1[:, 1:] - nodes1[:, :-1])\n _, num_nodes2 = np.shape(nodes2)\n first_deriv2 = (num_nodes2 - 1) * (nodes2[:, 1:] - nodes2[:, :-1])\n evaluate_fn = NewtonSimpleRoot(nodes1, first_deriv1, nodes2, first_deriv2)\n converged, current_s, current_t = newton_iterate(evaluate_fn, s, t)\n if converged:\n return current_s, current_t\n\n # If Newton's method did not converge, then assume the root is not simple.\n second_deriv1 = (num_nodes1 - 2) * (\n first_deriv1[:, 1:] - first_deriv1[:, :-1]\n )\n second_deriv2 = (num_nodes2 - 2) * (\n first_deriv2[:, 1:] - first_deriv2[:, :-1]\n )\n evaluate_fn = NewtonDoubleRoot(\n nodes1,\n first_deriv1,\n second_deriv1,\n nodes2,\n first_deriv2,\n second_deriv2,\n )\n converged, current_s, current_t = newton_iterate(\n evaluate_fn, current_s, current_t\n )\n if converged:\n return current_s, current_t\n\n raise NotImplementedError(NEWTON_NO_CONVERGE)\n\n\ndef full_newton(s, nodes1, t, nodes2):\n r\"\"\"Perform a Newton iteration until convergence to a solution.\n\n This assumes :math:`s` and :math:`t` are sufficiently close to an\n intersection. It **does not** govern the maximum distance away\n that the solution can lie, though the subdivided intervals that contain\n :math:`s` and :math:`t` could be used.\n\n To avoid round-off issues near ``0.0``, this reverses the direction\n of a curve and replaces the parameter value :math:`\\nu` with\n :math:`1 - \\nu` whenever :math:`\\nu < \\tau` (here we use a threshold\n :math:`\\tau` equal to :math:`2^{-10}`, i.e. :attr:`ZERO_THRESHOLD`).\n\n Args:\n s (float): The parameter along the first curve where the iteration\n will start.\n nodes1 (numpy.ndarray): Control points of the first curve.\n t (float): The parameter along the second curve where the iteration\n will start.\n nodes2 (numpy.ndarray): Control points of the second curve.\n\n Returns:\n Tuple[float, float]: The pair of :math:`s` and :math:`t` values that\n Newton's method converged to.\n \"\"\"\n if s < ZERO_THRESHOLD:\n reversed1 = np.asfortranarray(nodes1[:, ::-1])\n if t < ZERO_THRESHOLD:\n reversed2 = np.asfortranarray(nodes2[:, ::-1])\n refined_s, refined_t = full_newton_nonzero(\n 1.0 - s, reversed1, 1.0 - t, reversed2\n )\n return 1.0 - refined_s, 1.0 - refined_t\n\n else:\n refined_s, refined_t = full_newton_nonzero(\n 1.0 - s, reversed1, t, nodes2\n )\n return 1.0 - refined_s, refined_t\n\n else:\n if t < ZERO_THRESHOLD:\n reversed2 = np.asfortranarray(nodes2[:, ::-1])\n refined_s, refined_t = full_newton_nonzero(\n s, nodes1, 1.0 - t, reversed2\n )\n return refined_s, 1.0 - refined_t\n\n else:\n return full_newton_nonzero(s, nodes1, t, nodes2)\n\n\nclass IntersectionClassification(enum.Enum):\n \"\"\"Enum classifying the \"interior\" curve in an intersection.\n\n Provided as the output values for :func:`.classify_intersection`.\n \"\"\"\n\n FIRST = 0\n \"\"\"The first curve is on the interior.\"\"\"\n SECOND = 1\n \"\"\"The second curve is on the interior.\"\"\"\n OPPOSED = 2\n \"\"\"Tangent intersection with opposed interiors.\"\"\"\n TANGENT_FIRST = 3\n \"\"\"Tangent intersection, first curve is on the interior.\"\"\"\n TANGENT_SECOND = 4\n \"\"\"Tangent intersection, second curve is on the interior.\"\"\"\n IGNORED_CORNER = 5\n \"\"\"Intersection at a corner, interiors don't intersect.\"\"\"\n TANGENT_BOTH = 6\n \"\"\"Tangent intersection, both curves are interior from some perspective.\"\"\"\n COINCIDENT = 7\n \"\"\"Intersection is actually an endpoint of a coincident segment.\"\"\"\n COINCIDENT_UNUSED = 8\n \"\"\"Unused because the edges are moving in opposite directions.\"\"\"\n\n\nclass Intersection: # pylint: disable=too-few-public-methods\n \"\"\"Representation of a curve-curve intersection.\n\n Args:\n index_first (int): The index of the first curve within a list of\n curves. Expected to be used to index within the three edges of\n a triangle.\n s (float): The parameter along the first curve where the\n intersection occurs.\n index_second (int): The index of the second curve within a list of\n curves. Expected to be used to index within the three edges of\n a triangle.\n t (float): The parameter along the second curve where the\n intersection occurs.\n interior_curve (Optional[ \\\n ~bezier.hazmat.intersection_helpers.IntersectionClassification]):\n The classification of the intersection.\n \"\"\"\n\n __slots__ = (\"index_first\", \"s\", \"index_second\", \"t\", \"interior_curve\")\n\n def __init__(self, index_first, s, index_second, t, interior_curve=None):\n self.index_first = index_first\n \"\"\"int: Index of the first curve within a list of edges.\"\"\"\n self.s = s\n \"\"\"float: The intersection parameter for the first curve.\"\"\"\n self.index_second = index_second\n \"\"\"int: Index of the second curve within a list of edges.\"\"\"\n self.t = t\n \"\"\"float: The intersection parameter for the second curve.\"\"\"\n self.interior_curve = interior_curve\n \"\"\"IntersectionClassification: Which of the curves is on the interior.\n\n See :func:`.classify_intersection` for more details.\n \"\"\"\n\n @property\n def __dict__(self):\n \"\"\"dict: Dictionary of current intersection's property namespace.\n\n This is just a stand-in property for the usual ``__dict__``. This\n class defines ``__slots__`` so by default would not provide a\n ``__dict__``.\n\n This also means that the current object can't be modified by the\n returned dictionary.\n \"\"\"\n return {\n \"index_first\": self.index_first,\n \"s\": self.s,\n \"index_second\": self.index_second,\n \"t\": self.t,\n \"interior_curve\": self.interior_curve,\n }\n\n\nclass IntersectionStrategy(enum.Enum):\n \"\"\"Enum determining the type of intersection algorithm to use.\"\"\"\n\n GEOMETRIC = 0\n \"\"\"Geometric approach to intersection (via subdivision).\"\"\"\n ALGEBRAIC = 1\n \"\"\"Algebraic approach to intersection (via implicitization).\"\"\"\n", "id": "2712714", "language": "Python", "matching_score": 6.067283630371094, "max_stars_count": 165, "path": "src/python/bezier/hazmat/intersection_helpers.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pure Python generic geometry and floating point helpers.\"\"\"\n\nimport bisect\n\nimport numpy as np\n\n\n_EPS = 0.5 ** 40\n\n\ndef vector_close(vec1, vec2, eps=_EPS):\n r\"\"\"Checks that two vectors are equal to some threshold.\n\n Does so by computing :math:`s_1 = \\|v_1\\|_2` and\n :math:`s_2 = \\|v_2\\|_2` and then checking if\n\n .. math::\n\n \\|v_1 - v_2\\|_2 \\leq \\varepsilon \\min(s_1, s_2)\n\n where :math:`\\varepsilon = 2^{-40} \\approx 10^{-12}` is a fixed\n threshold. In the rare case that one of ``vec1`` or ``vec2`` is\n the zero vector (i.e. when :math:`\\min(s_1, s_2) = 0`) instead\n checks that the other vector is close enough to zero:\n\n .. math::\n\n \\|v_1\\|_2 = 0 \\Longrightarrow \\|v_2\\|_2 \\leq \\varepsilon\n\n .. note::\n\n This function assumes that both vectors have finite values,\n i.e. that no NaN or infinite numbers occur. NumPy provides\n :func:`numpy.allclose` for coverage of **all** cases.\n\n .. note::\n\n There is also a Fortran implementation of this function, which\n will be used if it can be built.\n\n Args:\n vec1 (numpy.ndarray): First vector (1D) for comparison.\n vec2 (numpy.ndarray): Second vector (1D) for comparison.\n eps (float): Error threshold. Defaults to :math:`2^{-40}`.\n\n Returns:\n bool: Flag indicating if they are close to precision.\n \"\"\"\n # NOTE: This relies on ``vec1`` and ``vec2`` being one-dimensional\n # vectors so NumPy doesn't try to use a matrix norm.\n size1 = np.linalg.norm(vec1, ord=2)\n size2 = np.linalg.norm(vec2, ord=2)\n if size1 == 0:\n return size2 <= eps\n\n elif size2 == 0:\n return size1 <= eps\n\n else:\n upper_bound = eps * min(size1, size2)\n return np.linalg.norm(vec1 - vec2, ord=2) <= upper_bound\n\n\ndef in_interval(value, start, end):\n \"\"\"Checks if a ``value`` is an interval (inclusive).\n\n .. note::\n\n The current implementation does the most basic check,\n however, in the future, a more generic check may be desired\n that allows wiggle room around the endpoints to account\n for round-off.\n\n .. note::\n\n There is also a Fortran implementation of this function, which\n will be used if it can be built.\n\n Args:\n value (float): The value to check.\n start (float): The (inclusive) start of the interval.\n end (float): The (inclusive) end of the interval.\n\n Returns:\n bool: Indicating if the value is in the interval.\n \"\"\"\n return start <= value <= end\n\n\ndef bbox(nodes):\n \"\"\"Get the bounding box for set of points.\n\n .. note::\n\n There is also a Fortran implementation of this function, which\n will be used if it can be built.\n\n Args:\n nodes (numpy.ndarray): A set of points.\n\n Returns:\n Tuple[float, float, float, float]: The left, right,\n bottom and top bounds for the box.\n \"\"\"\n left, bottom = np.min(nodes, axis=1)\n right, top = np.max(nodes, axis=1)\n return left, right, bottom, top\n\n\ndef contains_nd(nodes, point):\n r\"\"\"Predicate indicating if a point is within a bounding box.\n\n .. note::\n\n There is also a Fortran implementation of this function, which\n will be used if it can be built.\n\n Args:\n nodes (numpy.ndarray): A set of points.\n point (numpy.ndarray): A 1D NumPy array representing a point\n in the same dimension as ``nodes``.\n\n Returns:\n bool: Indicating containment.\n \"\"\"\n min_vals = np.min(nodes, axis=1)\n if not np.all(min_vals <= point):\n return False\n\n max_vals = np.max(nodes, axis=1)\n if not np.all(point <= max_vals):\n return False\n\n return True\n\n\ndef cross_product(vec0, vec1):\n r\"\"\"Compute the cross product of vectors in :math:`\\mathbf{R}^2`.\n\n Utilizes the fact that\n\n .. math::\n\n \\left[\\begin{array}{c} A \\\\ B \\\\ 0 \\end{array}\\right] \\times\n \\left[\\begin{array}{c} C \\\\ D \\\\ 0 \\end{array}\\right] =\n \\left[\\begin{array}{c} 0 \\\\ 0 \\\\ AD - BC \\end{array}\\right]\n\n and just returns the :math:`z` component.\n\n .. note::\n\n There is also a Fortran implementation of this function, which\n will be used if it can be built.\n\n Args:\n vec0 (numpy.ndarray): A vector as a 1D NumPy array with two values.\n vec1 (numpy.ndarray): A vector as a 1D NumPy array with two values.\n\n Returns:\n float: The cross product (or rather, its :math:`z` component).\n \"\"\"\n return vec0[0] * vec1[1] - vec0[1] * vec1[0]\n\n\ndef matrix_product(mat1, mat2):\n \"\"\"Compute the product of two Fortran contiguous matrices.\n\n This is to avoid the overhead of NumPy converting to C-contiguous\n before computing a matrix product.\n\n Does so via ``A B = (B^T A^T)^T`` since ``B^T`` and ``A^T`` will be\n C-contiguous without a copy, then the product ``P = B^T A^T`` will\n be C-contiguous and we can return the view ``P^T`` without a copy.\n\n Args:\n mat1 (numpy.ndarray): The left-hand side matrix.\n mat2 (numpy.ndarray): The right-hand side matrix.\n\n Returns:\n numpy.ndarray: The product of the two matrices.\n \"\"\"\n return np.dot(mat2.T, mat1.T).T # pylint: disable=no-member\n\n\ndef wiggle_interval(value, wiggle=0.5 ** 44):\n r\"\"\"Check if ``value`` is in :math:`\\left[0, 1\\right]`.\n\n Allows a little bit of wiggle room outside the interval. A value\n within ``wiggle`` of ``0.0`` will be converted to ``0.0`` and similar\n for ``1.0``.\n\n .. note::\n\n There is also a Fortran implementation of this function, which\n will be used if it can be built.\n\n Args:\n value (float): Value to check in interval.\n wiggle (Optional[float]): The amount of wiggle room around the\n the endpoints ``0.0`` and ``1.0``. Defaults to :math:`2^{-44}`.\n\n Returns:\n Tuple[float, bool]: Pair of\n\n * The ``value`` if it's in the interval, or ``0.0`` or ``1.0``\n if the value lies slightly outside. If the ``value`` is\n too far outside the unit interval, will be NaN.\n * Boolean indicating if the ``value`` is inside the unit interval.\n \"\"\"\n if -wiggle < value < wiggle:\n return 0.0, True\n\n elif wiggle <= value <= 1.0 - wiggle:\n return value, True\n\n elif 1.0 - wiggle < value < 1.0 + wiggle:\n return 1.0, True\n\n else:\n return np.nan, False\n\n\ndef cross_product_compare(start, candidate1, candidate2):\n \"\"\"Compare two relative changes by their cross-product.\n\n This is meant to be a way to determine which vector is more \"inside\"\n relative to ``start``.\n\n .. note::\n\n This is a helper for :func:`simple_convex_hull`.\n\n Args:\n start (numpy.ndarray): The start vector (as 1D NumPy array with\n 2 elements).\n candidate1 (numpy.ndarray): The first candidate vector (as 1D\n NumPy array with 2 elements).\n candidate2 (numpy.ndarray): The second candidate vector (as 1D\n NumPy array with 2 elements).\n\n Returns:\n float: The cross product of the two differences.\n \"\"\"\n delta1 = candidate1 - start\n delta2 = candidate2 - start\n return cross_product(delta1, delta2)\n\n\ndef in_sorted(values, value):\n \"\"\"Checks if a value is in a sorted list.\n\n Uses the :mod:`bisect` builtin to find the insertion point for\n ``value``.\n\n Args:\n values (List[int]): Integers sorted in ascending order.\n value (int): Value to check if contained in ``values``.\n\n Returns:\n bool: Indicating if the value is contained.\n \"\"\"\n index = bisect.bisect_left(values, value)\n if index >= len(values):\n return False\n\n return values[index] == value\n\n\ndef simple_convex_hull(points):\n r\"\"\"Compute the convex hull for a set of points.\n\n .. _wikibooks: https://en.wikibooks.org/wiki/Algorithm_Implementation/\\\n Geometry/Convex_hull/Monotone_chain\n\n This uses Andrew's monotone chain convex hull algorithm and this code\n used a `wikibooks`_ implementation as motivation. The code there\n is licensed CC BY-SA 3.0.\n\n .. note::\n\n There is also a Fortran implementation of this function, which\n will be used if it can be built. Note that ``scipy.spatial.ConvexHull``\n can do this as well (via Qhull), but that would require a hard\n dependency on ``scipy`` and that helper computes much more than we need.\n\n .. note::\n\n This computes the convex hull in a \"naive\" way. It's expected that\n internal callers of this function will have a small number of points\n so ``n log n`` vs. ``n^2`` vs. ``n`` aren't that relevant.\n\n Args:\n points (numpy.ndarray): A ``2 x N`` array (``float64``) of points.\n\n Returns:\n numpy.ndarray: The ``2 x N`` array (``float64``) of ordered points in\n the polygonal convex hull.\n \"\"\"\n # NOTE: There is no corresponding \"enable\", but the disable only applies\n # in this lexical scope.\n # pylint: disable=too-many-branches\n if points.size == 0:\n return points\n\n # First, drop duplicates.\n unique_points = np.unique(points, axis=1)\n _, num_points = unique_points.shape\n if num_points < 2:\n return unique_points\n\n # Then sort the data in left-to-right order (and break ties by y-value).\n points = np.empty((2, num_points), order=\"F\")\n for index, xy_val in enumerate(\n sorted(tuple(column) for column in unique_points.T)\n ):\n points[:, index] = xy_val\n # After sorting, if there are only 2 points, return.\n if num_points < 3:\n return points\n\n # Build lower hull\n lower = [0, 1]\n for index in range(2, num_points):\n point2 = points[:, index]\n while len(lower) >= 2:\n point0 = points[:, lower[-2]]\n point1 = points[:, lower[-1]]\n if cross_product_compare(point0, point1, point2) > 0:\n break\n\n lower.pop()\n\n lower.append(index)\n # Build upper hull\n upper = [num_points - 1]\n for index in range(num_points - 2, -1, -1):\n # Don't consider indices from the lower hull (other than the ends).\n if index > 0 and in_sorted(lower, index):\n continue\n\n point2 = points[:, index]\n while len(upper) >= 2:\n point0 = points[:, upper[-2]]\n point1 = points[:, upper[-1]]\n if cross_product_compare(point0, point1, point2) > 0:\n break\n\n upper.pop()\n\n upper.append(index)\n # **Both** corners are double counted.\n size_polygon = len(lower) + len(upper) - 2\n polygon = np.empty((2, size_polygon), order=\"F\")\n for index, column in enumerate(lower[:-1]):\n polygon[:, index] = points[:, column]\n index_start = len(lower) - 1\n for index, column in enumerate(upper[:-1]):\n polygon[:, index + index_start] = points[:, column]\n return polygon\n\n\ndef is_separating(direction, polygon1, polygon2):\n \"\"\"Checks if a given ``direction`` is a separating line for two polygons.\n\n .. note::\n\n This is a helper for :func:`polygon_collide`.\n\n Args:\n direction (numpy.ndarray): A 1D ``2``-array (``float64``) of a\n potential separating line for the two polygons.\n polygon1 (numpy.ndarray): A ``2 x N`` array (``float64``) of ordered\n points in a polygon.\n polygon2 (numpy.ndarray): A ``2 x N`` array (``float64``) of ordered\n points in a polygon.\n\n Returns:\n bool: Flag indicating if ``direction`` is a separating line.\n \"\"\"\n # NOTE: We assume throughout that ``norm_squared != 0``. If it **were**\n # zero that would mean the ``direction`` corresponds to an\n # invalid edge.\n norm_squared = direction[0] * direction[0] + direction[1] * direction[1]\n params = []\n vertex = np.empty((2,), order=\"F\")\n for polygon in (polygon1, polygon2):\n _, polygon_size = polygon.shape\n min_param = np.inf\n max_param = -np.inf\n for index in range(polygon_size):\n vertex[:] = polygon[:, index]\n param = cross_product(direction, vertex) / norm_squared\n min_param = min(min_param, param)\n max_param = max(max_param, param)\n params.append((min_param, max_param))\n # NOTE: The indexing is based on:\n # params[0] = (min_param1, max_param1)\n # params[1] = (min_param2, max_param2)\n return params[0][0] > params[1][1] or params[0][1] < params[1][0]\n\n\ndef polygon_collide(polygon1, polygon2):\n \"\"\"Determines if two **convex** polygons collide.\n\n .. _SAT: https://en.wikipedia.org/wiki/Hyperplane_separation_theorem\n .. _see also: https://hackmd.io/s/ryFmIZrsl\n\n This code uses the Separating axis theorem (`SAT`_) to quickly\n determine if the polygons intersect. `See also`_.\n\n .. note::\n\n There is also a Fortran implementation of this function, which\n will be used if it can be built.\n\n Args:\n polygon1 (numpy.ndarray): A ``2 x N`` array (``float64``) of ordered\n points in a polygon.\n polygon2 (numpy.ndarray): A ``2 x N`` array (``float64``) of ordered\n points in a polygon.\n\n Returns:\n bool: Flag indicating if the two polygons collide.\n \"\"\"\n direction = np.empty((2,), order=\"F\")\n for polygon in (polygon1, polygon2):\n _, polygon_size = polygon.shape\n for index in range(polygon_size):\n # NOTE: When ``index == 0`` this will \"wrap around\" and refer\n # to index ``-1``.\n direction[:] = polygon[:, index] - polygon[:, index - 1]\n if is_separating(direction, polygon1, polygon2):\n return False\n\n return True\n\n\ndef solve2x2(lhs, rhs):\n \"\"\"Solve a square 2 x 2 system via LU factorization.\n\n This is meant to be a stand-in for LAPACK's ``dgesv``, which just wraps\n two calls to ``dgetrf`` and ``dgetrs``. We wrap for two reasons:\n\n * We seek to avoid exceptions as part of the control flow (which is\n what :func:`numpy.linalg.solve` does).\n * We seek to avoid excessive type- and size-checking, since this\n special case is already known.\n\n Args:\n lhs (numpy.ndarray): A ``2 x 2`` array of real numbers.\n rhs (numpy.ndarray): A 1D array of 2 real numbers.\n\n Returns:\n Tuple[bool, float, float]: A triple of\n\n * A flag indicating if ``lhs`` is a singular matrix.\n * The first component of the solution.\n * The second component of the solution.\n \"\"\"\n # A <--> lhs[0, 0]\n # B <--> lhs[0, 1]\n # C <--> lhs[1, 0]\n # D <--> lhs[1, 1]\n # E <--> rhs[0]\n # F <--> rhs[1]\n if np.abs(lhs[1, 0]) > np.abs(lhs[0, 0]):\n # NOTE: We know there is no division by zero here since ``C``\n # is **strictly** bigger than **some** value (in magnitude).\n # [A | B][x] = [E]\n # [C | D][y] [F]\n ratio = lhs[0, 0] / lhs[1, 0]\n # r = A / C\n # [A - rC | B - rD][x] [E - rF]\n # [C | D ][y] = [F ]\n # ==> 0x + (B - rD) y = E - rF\n denominator = lhs[0, 1] - ratio * lhs[1, 1]\n if denominator == 0.0:\n return True, None, None\n\n y_val = (rhs[0] - ratio * rhs[1]) / denominator\n # Cx + Dy = F ==> x = (F - Dy) / C\n x_val = (rhs[1] - lhs[1, 1] * y_val) / lhs[1, 0]\n return False, x_val, y_val\n\n else:\n if lhs[0, 0] == 0.0:\n return True, None, None\n\n # [A | B][x] = [E]\n # [C | D][y] [F]\n ratio = lhs[1, 0] / lhs[0, 0]\n # r = C / A\n # [A | B ][x] = [E ]\n # [C - rA | D - rB][y] [F - rE]\n # ==> 0x + (D - rB) y = F - rE\n denominator = lhs[1, 1] - ratio * lhs[0, 1]\n if denominator == 0.0:\n return True, None, None\n\n y_val = (rhs[1] - ratio * rhs[0]) / denominator\n # Ax + By = E ==> x = (E - B y) / A\n x_val = (rhs[0] - lhs[0, 1] * y_val) / lhs[0, 0]\n return False, x_val, y_val\n\n\nclass UnsupportedDegree(NotImplementedError):\n \"\"\"Custom exception to indicate the given degree is unsupported.\n\n This is intentionally a subclass of a :exc:`NotImplementedError`\n since it's intended to indicate a lack of an implementation. For\n example, :meth:`.Curve.reduce_` uses hard-coded matrices for\n a small subset of possible degrees, so the implementation is\n **degree-specific**:\n\n .. doctest:: unsupported-degree\n :options: +NORMALIZE_WHITESPACE\n\n >>> import bezier\n >>> import numpy as np\n >>> degree = 5\n >>> nodes = np.empty((2, degree + 1), order=\"F\")\n >>> curve = bezier.Curve(nodes, degree=degree)\n >>> curve.reduce_()\n Traceback (most recent call last):\n ...\n bezier.hazmat.helpers.UnsupportedDegree: The only degrees supported at\n this time are 1, 2, 3 and\n 4 (degree=5)\n\n Args:\n degree (int): The degree that is not possible to support.\n supported (Tuple[int, ...]): The degrees that are\n actually supported by the failing method.\n \"\"\"\n\n def __init__(self, degree, supported=()):\n super().__init__()\n self.degree = degree\n \"\"\"int: The degree that the caller attempted to use.\"\"\"\n self.supported = supported\n \"\"\"Tuple[int, ...]: The degrees supported by the failing method.\"\"\"\n\n def __str__(self):\n num_supported = len(self.supported)\n if num_supported == 0:\n return f\"degree={self.degree}\"\n\n degrees_str = [str(degree) for degree in self.supported]\n if num_supported == 1:\n msg = \"The only degree supported at this time is \" + degrees_str[0]\n else:\n msg = (\n \"The only degrees supported at this time are \"\n + \", \".join(degrees_str[:-1])\n + \" and \"\n + degrees_str[-1]\n )\n return f\"{msg} (degree={self.degree})\"\n", "id": "8960382", "language": "Python", "matching_score": 2.1735055446624756, "max_stars_count": 165, "path": "src/python/bezier/hazmat/helpers.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\ntry:\n import bezier\nexcept ImportError: # pragma: NO COVER\n bezier = None\n\n\nWRONG_FLAGS_TEMPLATE = \"\"\"\\\nArrays are not Fortran contiguous\narray1 flags =\n{}\narray2 flags =\n{}\n\"\"\"\nWRONG_TYPE_TEMPLATE = \"\"\"\\\nArrays have different types\narray1({}) =\n{!r}\narray2({}) =\n{!r}\n\"\"\"\nWRONG_SHAPE_TEMPLATE = \"\"\"\\\nArrays have different shapes\narray1{} =\n{!r}\narray2{} =\n{!r}\n\"\"\"\nNOT_EQ_TEMPLATE = \"\"\"\\\nArrays not equal\narray1 =\n{!r}\narray2 =\n{!r}\n\"\"\"\n\n\ndef get_random(seed):\n import numpy as np\n\n return np.random.RandomState(seed=seed) # pylint: disable=no-member\n\n\ndef binary_round(value, num_bits):\n # NOTE: This assumes ``value`` is not Inf/-Inf/NaN or\n # a subnormal number.\n hex_val = value.hex()\n # NOTE: `pre` is either \"\" or \"-\".\n pre, hex_digits = hex_val.split(\"0x1.\")\n hex_digits, post = hex_digits.split(\"p\")\n assert len(hex_digits) == 13\n all_bits = f\"{int(hex_digits, 16):052b}\"\n assert len(all_bits) == 52\n truncated_bits = all_bits[:num_bits] + \"0\" * (52 - num_bits)\n truncated_hex = f\"{int(truncated_bits, 2):013x}\"\n python_hex = pre + \"0x1.\" + truncated_hex + \"p\" + post\n return float.fromhex(python_hex)\n\n\ndef get_random_nodes(shape, seed, num_bits):\n import functools\n import numpy as np\n\n random_state = get_random(seed)\n nodes = np.asfortranarray(random_state.random_sample(shape))\n # Round the nodes to ``num_bits`` bits to avoid round-off.\n to_vectorize = functools.partial(binary_round, num_bits=num_bits)\n return np.vectorize(to_vectorize)(nodes)\n\n\ndef ref_triangle_uniform_nodes(pts_exponent):\n import numpy as np\n\n # Using the exponent means that we will divide by\n # 2**exp, which can be done without roundoff (for small\n # enough exponents).\n pts_per_side = 2 ** pts_exponent + 1\n total = ((pts_per_side + 1) * pts_per_side) // 2\n result = np.zeros((total, 2), order=\"F\")\n index = 0\n for y_val in range(pts_per_side):\n remaining = pts_per_side - y_val\n for x_val in range(remaining):\n result[index, :] = x_val, y_val\n index += 1\n result /= pts_per_side - 1.0\n return result\n\n\ndef check_plot_call(test_case, call, expected, **kwargs):\n import numpy as np\n\n # Unpack the call as name, positional args, keyword args\n _, positional, keyword = call\n test_case.assertEqual(keyword, kwargs)\n test_case.assertEqual(len(positional), 2)\n test_case.assertEqual(\n np.asfortranarray(positional[0]), np.asfortranarray(expected[0, :])\n )\n test_case.assertEqual(\n np.asfortranarray(positional[1]), np.asfortranarray(expected[1, :])\n )\n\n\ndef needs_speedup(test_class):\n if bezier is None:\n has_speedup = False # pragma: NO COVER\n else:\n has_speedup = bezier._HAS_SPEEDUP\n decorator = unittest.skipUnless(has_speedup, \"No speedup available\")\n return decorator(test_class)\n\n\ndef almost(test_case, expected, actual, num_ulps):\n import numpy as np\n\n test_case.assertNotEqual(expected, 0.0)\n delta = num_ulps * np.spacing(expected)\n test_case.assertAlmostEqual(actual, expected, delta=delta)\n\n\nclass NumPyTestCase(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n import numpy as np\n\n super().__init__(*args, **kwargs)\n self.addTypeEqualityFunc(np.ndarray, self.assertArrayEqual)\n\n def assertArrayEqual(self, arr1, arr2, msg=None):\n import numpy as np\n\n if (\n not arr1.flags.f_contiguous or not arr2.flags.f_contiguous\n ): # pragma: NO COVER\n standard_msg = WRONG_FLAGS_TEMPLATE.format(arr1.flags, arr2.flags)\n self.fail(self._formatMessage(msg, standard_msg))\n if arr1.dtype is not arr2.dtype: # pragma: NO COVER\n standard_msg = WRONG_TYPE_TEMPLATE.format(\n arr1.dtype, arr1, arr2.dtype, arr2\n )\n self.fail(self._formatMessage(msg, standard_msg))\n if arr1.shape != arr2.shape: # pragma: NO COVER\n standard_msg = WRONG_SHAPE_TEMPLATE.format(\n arr1.shape, arr1, arr2.shape, arr2\n )\n self.fail(self._formatMessage(msg, standard_msg))\n if not np.all(arr1 == arr2): # pragma: NO COVER\n standard_msg = NOT_EQ_TEMPLATE.format(arr1, arr2)\n self.fail(self._formatMessage(msg, standard_msg))\n", "id": "11456931", "language": "Python", "matching_score": 2.1494686603546143, "max_stars_count": 165, "path": "tests/unit/utils.py" }, { "content": "def _pretty_after(a, k):\n positional = \", \".join(repr(arg) for arg in a)\n keyword = \", \".join(\n \"{}={!r}\".format(name, value) for name, value in k.items()\n )\n if positional:\n if keyword:\n return \", {}, {}\".format(positional, keyword)\n else:\n return \", {}\".format(positional)\n else:\n if keyword:\n return \", {}\".format(keyword)\n else:\n return \"\"\n\n\nclass A:\n def __init__(self, x, y, *, z=10):\n print(\"v3: __init__({}, {!r}, {!r}, z={!r})\".format(self, x, y, z))\n self.x = x\n self.y = y\n self.z = z\n\n def __new__(cls, *args, **kwargs):\n msg = _pretty_after(args, kwargs)\n print(\"v3: __new__({}{})\".format(cls.__name__, msg))\n result = super(A, cls).__new__(cls)\n print(\" -> {}\".format(result))\n return result\n\n def __getnewargs__(self, *args, **kwargs):\n template = \"v3: __getnewargs__({}{})\"\n print(template.format(self, _pretty_after(args, kwargs)))\n raise NotImplementedError\n\n def __getstate__(self, *args, **kwargs):\n template = \"v3: __getstate__({}{})\"\n print(template.format(self, _pretty_after(args, kwargs)))\n raise NotImplementedError\n\n def __setstate__(self, state):\n template = \"v3: __setstate__({}, state={!r})\"\n print(template.format(self, state))\n self.x = state[\"x\"]\n self.y = state[\"y\"]\n self.z = 10\n", "id": "262051", "language": "Python", "matching_score": 0.4326200485229492, "max_stars_count": 0, "path": "v3/p2pkg/tickle.py" }, { "content": "\"\"\"Simple module containing a shared base handler for Jinja2 rendering.\"\"\"\n\n\nimport webapp2\nfrom webapp2_extras import jinja2\n\n\nclass BaseHandler(webapp2.RequestHandler):\n \"\"\"Base handler for rendering Jinja2 templates.\"\"\"\n\n @webapp2.cached_property\n def jinja2(self):\n \"\"\"Cached property holding a Jinja2 instance.\n\n Returns:\n A Jinja2 object for the current app.\n \"\"\"\n return jinja2.get_jinja2(app=self.app)\n\n def render_response(self, template, **context):\n \"\"\"Use Jinja2 instance to render template and write to output.\n\n Args:\n template: filename (relative to $PROJECT/templates) that we are\n rendering.\n context: keyword arguments corresponding to variables in template.\n \"\"\"\n rendered_value = self.jinja2.render_template(template, **context)\n self.response.write(rendered_value)\n", "id": "3072875", "language": "Python", "matching_score": 2.1227638721466064, "max_stars_count": 1, "path": "application/base_handler.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Handler utility library for persistent-cal.\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport functools\nimport logging\nimport sys\nimport traceback\nimport types\n\n# App engine specific libraries\nfrom google.appengine.api import mail\nfrom google.appengine.api import urlfetch_errors\nfrom google.appengine.ext.deferred import defer\nfrom google.appengine.ext.deferred import PermanentTaskFailure\nfrom google.appengine.ext import ndb\nfrom google.appengine import runtime\nimport webapp2\nfrom webapp2_extras import jinja2\n\n\nADMINS = {}\nADMINS_KEY = 'admins'\n# Without using the kwarg 'app' in get_jinja2, webapp2.get_app() is\n# used, which returns the active app instance.\n# [Reference: http://webapp-improved.appspot.com/api/webapp2.html]\nJINJA2_RENDERER = jinja2.get_jinja2()\nRENDERED_500_PAGE = JINJA2_RENDERER.render_template('500.html')\n\n\nclass AdminsTo(ndb.Model):\n \"\"\"Model for representing a list of project admins.\n\n See http://code.google.com/appengine/docs/python/mail/emailmessagefields.html\n \"\"\"\n admins = ndb.UserProperty(repeated=True)\n\n def AsString(self):\n \"\"\"Uses the user emails to create a comma separated list for an email.\"\"\"\n return ', '.join([admin.email() for admin in self.admins])\n\n\ndef DeferFunctionDecorator(method):\n \"\"\"Decorator that allows a function to accept a defer_now argument.\n\n Args:\n method: a callable object\n\n Returns:\n A new function which will do the same work as method, will also\n accept a defer_now keyword argument, and will log the arguments\n passed in. In the case that defer_now=True, the new function\n will spawn a task in the deferred queue at /workers.\n \"\"\"\n @functools.wraps(method)\n def DeferrableMethod(*args, **kwargs):\n \"\"\"Returned function that uses method from outside scope\n\n Adds behavior for logging and deferred queue.\n \"\"\"\n logging.info('{method.func_name} called with: {locals!r}'.format(\n method=method, locals=locals()))\n\n defer_now = kwargs.pop('defer_now', False)\n if defer_now:\n kwargs['defer_now'] = False\n kwargs['_url'] = '/workers'\n\n defer(DeferrableMethod, *args, **kwargs)\n else:\n return method(*args, **kwargs)\n\n return DeferrableMethod\n\n\n@DeferFunctionDecorator\ndef EmailAdmins(error_msg):\n \"\"\"Sends email to admins with the preferred message, with option to defer.\n\n Uses the template error_notify.templ to generate an email with the {error_msg}\n sent to the list of admins in ADMINS['TO'].\n\n Args:\n error_msg: A string containing an error to be sent to admins by email\n \"\"\"\n if 'TO' not in ADMINS:\n admins_to = ndb.Key(AdminsTo, ADMINS_KEY).get()\n ADMINS['TO'] = admins_to.AsString()\n\n sender = 'Persistent Cal Errors <<EMAIL>>'\n subject = 'Persistent Cal Error: Admin Notify'\n body = JINJA2_RENDERER.render_template('error_notify.templ', error=error_msg)\n mail.send_mail(sender=sender, to=ADMINS['TO'],\n subject=subject, body=body)\n\n\nclass DeadlineDecorator(object):\n \"\"\"Decorator for HTTP verbs to handle GAE timeout.\n\n Args:\n method: a callable object, expected to be a method of an object from\n a class that inherits from webapp.RequestHandler\n\n Returns:\n A new function which calls {method}, catches certain errors\n and responds to them gracefully\n \"\"\"\n\n def __init__(self, method):\n \"\"\"Constructor for DeadlineDecorator.\n\n Args:\n method: a callable object, expected to be a method of an object from\n a class that inherits from webapp.RequestHandler\n \"\"\"\n self.method = method\n\n def __get__(self, instance, cls):\n \"\"\"Descriptor to make this callable bind to a handler.\n\n Args:\n instance: A (likely Handler) object which owns this callable.\n cls: The (unused) class of `instance`.\n\n See:\n http://stackoverflow.com/a/10421444/1068170\n http://stackoverflow.com/a/12505646/1068170\n https://docs.python.org/2/howto/descriptor.html\n\n Returns:\n Bound `instancemethod` object which both calls this method and\n is bound to another instance.\n \"\"\"\n return types.MethodType(self, instance, cls)\n\n @property\n def __name__(self):\n \"\"\"Pretty name property for binding to objects as a methods.\"\"\"\n return self.__class__.__name__ + '_instance'\n\n def __call__(self, req_self, *args, **kwargs): # pylint:disable-msg=W0142\n \"\"\"Enhanced call to method stored on decorator class.\n\n Tries to execute the method with the arguments. If either a\n PermanentTaskFailure is thrown (from deferred library) or if one of the two\n DeadlineExceededError's is thrown (inherits directly from BaseException)\n administrators are emailed and then cleanup occurs.\n\n Args:\n req_self: The object to be passed to self.method. Expect it to be an\n instance of a descendant of webapp.RequestHandler.\n \"\"\"\n try:\n self.method(req_self, *args, **kwargs)\n except PermanentTaskFailure:\n # In this case, the function can't be run, so we alert but do not\n # raise the error, returning a 200 status code, hence killing the task.\n msg = 'Permanent failure attempting to execute task.'\n logging.exception(msg)\n EmailAdmins(msg, defer_now=True) # pylint:disable-msg=E1123\n except (runtime.DeadlineExceededError,\n urlfetch_errors.DeadlineExceededError):\n # pylint:disable-msg=W0142\n traceback_info = ''.join(traceback.format_exception(*sys.exc_info()))\n traceback.print_exc()\n EmailAdmins(traceback_info, defer_now=True) # pylint:disable-msg=E1123\n\n req_self.response.clear()\n req_self.response.set_status(500)\n req_self.response.out.write(RENDERED_500_PAGE)\n\n\nclass ExtendedHandler(webapp2.RequestHandler):\n \"\"\"A custom version of GAE webapp2.RequestHandler.\n\n This subclass of webapp2.RequestHandler defines a handle_exception\n function that will email administrators when an exception\n occurs. In addition, the __new__ method is overridden\n to allow custom wrappers to be placed around the HTTP verbs\n before an instance is created.\n \"\"\"\n\n def __new__(cls, *args, **kwargs): # pylint:disable-msg=W0142\n \"\"\"Constructs the object.\n\n This is explicitly intended for Google App Engine's webapp2.RequestHandler.\n Requests only suport 7 of the 9 HTTP verbs, 4 of which we will\n decorate: get, post, put and delete. The other three supported\n (head, options, trace) may be added at a later time.\n Args:\n cls: A reference to the class\n\n Reference: ('http://code.google.com/appengine/docs/python/tools/'\n 'webapp/requesthandlerclass.html')\n \"\"\"\n verbs = ('get', 'post', 'put', 'delete')\n\n for verb in verbs:\n method = getattr(cls, verb, None)\n # We only re-bind the methods that are `instancemethod`s and have\n # not already been wrapped.\n if (isinstance(method, types.MethodType) and\n not isinstance(method.im_func, DeadlineDecorator)):\n setattr(cls, verb, DeadlineDecorator(method))\n\n return super(ExtendedHandler, cls).__new__(cls, *args, **kwargs)\n\n @webapp2.cached_property\n def Jinja2(self):\n \"\"\"Cached property holding a Jinja2 instance.\"\"\"\n return jinja2.get_jinja2(app=self.app)\n\n def RenderResponse(self, template, **context): # pylint:disable-msg=W0142\n \"\"\"Use Jinja2 instance to render template and write to output.\n\n Args:\n template: filename (relative to $PROJECT/templates) that we are rendering\n context: keyword arguments corresponding to variables in template\n \"\"\"\n rendered_value = self.Jinja2.render_template(template, **context)\n self.response.write(rendered_value)\n\n # pylint:disable-msg=C0103,W0613\n def handle_exception(self, exception, debug_mode):\n \"\"\"Custom handler for all GAE errors that inherit from Exception.\n\n Args:\n exception: the exception that was thrown\n debug_mode: True if the web application is running in debug mode\n \"\"\"\n traceback_info = ''.join(traceback.format_exception(*sys.exc_info()))\n traceback.print_exc()\n EmailAdmins(traceback_info, defer_now=True) # pylint:disable-msg=E1123\n\n self.response.clear()\n self.response.set_status(500)\n self.response.out.write(RENDERED_500_PAGE)\n", "id": "5972847", "language": "Python", "matching_score": 4.465190410614014, "max_stars_count": 1, "path": "handler_utils.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Handlers for interactive admin console (web-based).\n\nThese templates are largly borrowed from:\n$PYTHON_LIB/google/appengine/ext/admin/templates/\nand these handlers from\n$PYTHON_LIB/google/appengine/ext/admin/__init__.py\n\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport cStringIO\nimport sys\nimport traceback\n\n# App engine specific libraries\nfrom google.appengine.ext.admin import get_xsrf_token\nfrom google.appengine.ext.admin import xsrf_required\nimport webapp2\n\n# App specific libraries\nfrom handler_utils import ExtendedHandler\n\n\nINTERACTIVE_PATH = '/admin/interactive'\nINTERACTIVE_EXECUTE_PATH = INTERACTIVE_PATH + '/execute'\n\n\nclass InteractivePageHandler(ExtendedHandler):\n \"\"\"Shows our interactive console HTML.\"\"\"\n\n def get(self): # pylint:disable-msg=C0103\n \"\"\"Serves interactive console.\"\"\"\n application_name = self.request.environ.get('APPLICATION_ID', '')\n xsrf_token = get_xsrf_token()\n self.RenderResponse('interactive.html',\n application_name=application_name,\n interactive_execute_path=INTERACTIVE_EXECUTE_PATH,\n xsrf_token=xsrf_token)\n\n\nclass InteractiveExecuteHandler(ExtendedHandler):\n \"\"\"Executes the Python code submitted in a POST within this context.\n\n For obvious reasons, this should only be available to administrators\n of the applications.\n \"\"\"\n\n @xsrf_required\n def post(self): # pylint:disable-msg=C0103\n \"\"\"Handles POSTed code from interactive console.\"\"\"\n save_stdout = sys.stdout\n results_io = cStringIO.StringIO()\n try:\n sys.stdout = results_io\n\n\n code = self.request.get('code')\n code = code.replace('\\r\\n', '\\n')\n\n try:\n compiled_code = compile(code, '<string>', 'exec')\n exec(compiled_code, globals()) # pylint:disable-msg=W0122\n except Exception: # pylint:disable-msg=W0703\n traceback.print_exc(file=results_io)\n finally:\n sys.stdout = save_stdout\n\n results = results_io.getvalue()\n self.RenderResponse('interactive-output.html', output=results)\n\n\nAPPLICATION = webapp2.WSGIApplication([\n (INTERACTIVE_PATH, InteractivePageHandler),\n (INTERACTIVE_EXECUTE_PATH, InteractiveExecuteHandler)],\n debug=True)\n", "id": "7195650", "language": "Python", "matching_score": 1.0098880529403687, "max_stars_count": 1, "path": "interactive.py" }, { "content": "from __future__ import print_function\n\nimport base64\nimport codecs\nimport os\nimport time\n\nfrom selenium import webdriver\n\nimport local_settings\nimport utils\n\n\nBASE_URI = ('http://games.espn.com/tournament-challenge-bracket/'\n '2017/en/group?groupID=')\nGROUP_URI = '{}{:d}'.format(BASE_URI, local_settings.GROUP_ID)\n\nLINKS_DIR = os.path.join(local_settings.YEAR, 'links_html')\nBASE_FILENAME = os.path.join(LINKS_DIR, base64.b64encode(GROUP_URI))\n\n\ndef _write_content(driver, page_number):\n filename = '{}-{:02d}.html'.format(BASE_FILENAME, page_number)\n with codecs.open(filename, 'w', 'utf-8') as fh:\n msg = 'Writing to {}'.format(filename)\n print(msg)\n fh.write(driver.page_source)\n\n\ndef _get_current_page(driver):\n try:\n pg_num_elts = driver.find_elements_by_class_name('pageNumber')\n curr_page, = [elt for elt in pg_num_elts\n if elt.tag_name == 'strong']\n return int(curr_page.text)\n except:\n # In the case that the driver becomes unattached partway through,\n # we catch any possible exception.\n return -1\n\n\ndef _click_next(driver, page_number):\n \"\"\"Clicks next page link (JS only).\n\n Returns boolean indicating if another page exists.\n \"\"\"\n curr_page = -1\n while curr_page == -1:\n curr_page = _get_current_page(driver)\n\n if curr_page != page_number:\n raise ValueError('Expected page number to match.')\n\n next_page_links = driver.find_elements_by_class_name('nextPage')\n if len(next_page_links) == 0:\n return False\n elif len(next_page_links) != 1:\n raise ValueError('Expected exactly one next page link.')\n\n # Increment before clicking.\n page_number += 1\n if page_number > local_settings.NUM_PAGES:\n return False\n\n next_page_links[0].click()\n while _get_current_page(driver) != page_number:\n print('New page has not loaded. Sleeping 0.5 seconds.')\n time.sleep(0.5)\n\n return True\n\n\ndef _get_all_pages(driver):\n driver.get(GROUP_URI)\n\n page_number = 1\n _write_content(driver, page_number)\n\n while _click_next(driver, page_number):\n page_number += 1\n _write_content(driver, page_number)\n\n\ndef get_all_pages():\n driver = webdriver.Firefox()\n try:\n _get_all_pages(driver)\n finally:\n driver.close()\n\n\nif __name__ == '__main__':\n utils.prepare_directory(LINKS_DIR)\n get_all_pages()\n", "id": "10494993", "language": "Python", "matching_score": 3.372985601425171, "max_stars_count": 1, "path": "get_bracket_pool.py" }, { "content": "from __future__ import print_function\n\nimport json\nimport os\nimport requests\n\nimport local_settings\nimport utils\n\n\nBASE_URI = ('http://games.espn.com/tournament-challenge-bracket/'\n '2017/en/entry?entryID=')\nBRACKETS_DIR = os.path.join(\n local_settings.YEAR, 'brackets_html')\n\n\ndef get_bracket_ids():\n with open(utils.BRACKET_LINKS_FILE, 'r') as fh:\n bracket_dict = json.load(fh)\n return bracket_dict.values()\n\n\ndef download_bracket(entry_id):\n filename = os.path.join(BRACKETS_DIR,\n str(entry_id) + '.html')\n if os.path.exists(filename):\n msg = 'Exists: {}'.format(filename)\n print(msg)\n return\n\n uri = '%s%d' % (BASE_URI, entry_id)\n response = requests.get(uri)\n if response.status_code != 200:\n raise ValueError('Failed', response, entry_id)\n with open(filename, 'w') as fh:\n msg = 'Writing {}'.format(filename)\n print(msg)\n fh.write(response.content)\n response.close()\n\n\nif __name__ == '__main__':\n utils.prepare_directory(BRACKETS_DIR)\n for entry_id in get_bracket_ids():\n download_bracket(entry_id)\n", "id": "4233066", "language": "Python", "matching_score": 1.2926650047302246, "max_stars_count": 1, "path": "get_brackets_html.py" }, { "content": "#!/usr/bin/env python\n\n# Libraries\nimport __builtin__\nimport base64\nimport datetime\nimport email\nimport email.header\nimport imapclient\nimport os\nimport re\nimport shutil\nimport subprocess\n\n# Local imports\nimport account_settings\n\n\nHOST = 'imap.googlemail.com'\nPORT = 993\nSSL = True\n\nFULL_MSG_FIELD = 'RFC822'\nDATE_FIELD = 'INTERNALDATE'\nSUBJECT_FIELD = 'BODY[HEADER.FIELDS (SUBJECT)]'\nFROM_FIELD = 'BODY[HEADER.FIELDS (FROM)]'\nFETCH_FIELDS = [FULL_MSG_FIELD, DATE_FIELD, SUBJECT_FIELD, FROM_FIELD]\n\nif hasattr(__builtin__, '__IPYTHON__'):\n CURRENT_DIR = os.getcwd()\nelse:\n CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\nSTUDENTS_DIR = os.path.join(CURRENT_DIR, 'students')\nCHECKPOINT_FILE = os.path.join(STUDENTS_DIR, 'CHECKPOINT')\nFILENAME_RE = re.compile('^(\\d{8})_?(hw|HW)(\\d{1}).(tar|tar.gz|zip)$')\nDATETIME_STRING_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'\nSEPARATOR = ('=' * 70 + '\\n') * 3\n\n\ndef login_to_server(username, password):\n print 'Logging in to server as:', username\n server = imapclient.IMAPClient(HOST, port=PORT, ssl=SSL, use_uid=True)\n server.login(username, password)\n return server\n\n\ndef get_attachment(imap_payload_dict):\n # Assumes a single attachment.\n msg_date = imap_payload_dict[DATE_FIELD]\n\n msg = email.message_from_string(imap_payload_dict[FULL_MSG_FIELD])\n attachment = None\n for part in msg.walk():\n part_filename = part.get_filename()\n if part_filename is not None:\n if attachment is not None:\n raise ValueError('Expected a single attachment.')\n attachment = (part_filename, part.get_payload())\n\n if attachment is None:\n return None\n\n payload = ''.join(attachment[1].split())\n payload_bytes = base64.urlsafe_b64decode(payload)\n return msg_date, attachment[0], payload_bytes\n\n\ndef parse_filename(filename):\n match = FILENAME_RE.match(filename)\n\n if match is None:\n print 'Could not parse attachment filename:', filename\n proceed = raw_input('Would you like to proceed? [y/N] ')\n if proceed.lower() != 'y':\n # Re-raise the error.\n raise\n else:\n return\n\n student_id, _, assignment, zip_type = match.groups()\n return student_id, assignment, zip_type\n\n\ndef create_folder(student_id, assignment, msg_date):\n student_directory = os.path.join(STUDENTS_DIR, student_id)\n if os.path.exists(student_directory):\n if not os.path.isdir(student_directory):\n raise OSError('File %s should be a student\\'s directory.' %\n (student_directory,))\n else:\n os.mkdir(student_directory)\n\n assignment_directory = os.path.join(student_directory, assignment)\n timestamp_fi = os.path.join(assignment_directory, 'TIMESTAMP')\n update_directory = False\n if os.path.exists(assignment_directory):\n if not os.path.isdir(assignment_directory):\n raise OSError('File %s should be an assignment directory.' %\n (assignment_directory,))\n\n with open(timestamp_fi, 'r') as fh:\n saved_msg_date = datetime.datetime.strptime(fh.read(),\n DATETIME_STRING_FORMAT)\n\n if saved_msg_date < msg_date:\n print 'Conflict in', assignment_directory\n print 'Assignment already saved at time',\n print saved_msg_date.strftime(DATETIME_STRING_FORMAT)\n print 'We will over-write, since received at',\n print msg_date.strftime(DATETIME_STRING_FORMAT)\n shutil.rmtree(assignment_directory)\n update_directory = True\n else:\n update_directory = True\n\n if update_directory:\n os.mkdir(assignment_directory)\n timestamp_fi = os.path.join(assignment_directory, 'TIMESTAMP')\n with open(timestamp_fi, 'w') as fh:\n fh.write(msg_date.strftime(DATETIME_STRING_FORMAT))\n\n return assignment_directory, update_directory\n\n\ndef save_email(imap_payload_dict):\n attachment = get_attachment(imap_payload_dict)\n if attachment is None:\n print 'Nothing to save:'\n print 'From:', imap_payload_dict[FROM_FIELD],\n print 'Subject:', imap_payload_dict[SUBJECT_FIELD]\n return\n\n msg_date, filename, payload_bytes = attachment\n [(filename, _)] = email.header.decode_header(filename)\n parsed = parse_filename(filename)\n if parsed is None:\n return\n\n student_id, assignment, zip_type = parsed\n directory, new_directory = create_folder(student_id, assignment, msg_date)\n if not new_directory:\n print 'Directory %r already has newer content.' % (directory,)\n return\n\n full_path = os.path.join(directory, filename)\n with open(full_path, 'w') as fh:\n fh.write(payload_bytes)\n\n try:\n if zip_type == 'zip':\n subprocess.check_call(['unzip', full_path,\n '-d', os.path.dirname(full_path)])\n elif zip_type == 'tar':\n subprocess.check_call(['tar', '-xvf', full_path,\n '--directory', os.path.dirname(full_path)])\n elif zip_type == 'tar.gz':\n subprocess.check_call(['tar', '-zxvf', full_path,\n '--directory', os.path.dirname(full_path)])\n else:\n raise ValueError('Unexpected zip type: %s' % (zip_type,))\n except subprocess.CalledProcessError as err:\n print 'An error has occurred:', err.returncode\n print 'From:', ' '.join(err.cmd)\n proceed = raw_input('Would you like to proceed? [y/N] ')\n if proceed.lower() != 'y':\n # Re-raise the error.\n raise\n\n\ndef make_data_dir():\n if os.path.exists(STUDENTS_DIR):\n if not os.path.isdir(STUDENTS_DIR):\n raise OSError('File %s should be a directory.' %\n (STUDENTS_DIR,))\n else:\n os.mkdir(STUDENTS_DIR)\n\n\ndef get_email_content(last_uid=None):\n server = login_to_server(account_settings.USERNAME,\n account_settings.PASSWORD)\n server.select_folder(account_settings.FOLDER_NAME, readonly=True)\n print 'Getting message IDs (IDs local to folder).'\n folder_msg_ids = server.search()\n if last_uid is not None:\n # In the case that `last_uid` is the max, this will return [last_uid],\n # when we actually want [].\n folder_msg_ids = [msg_id for msg_id in folder_msg_ids\n if msg_id > last_uid]\n print 'Retrieved %d message IDs.' % len(folder_msg_ids)\n\n if folder_msg_ids:\n # NOTE: This could be problematic if there are too many messages.\n folder_msg_contents = server.fetch(folder_msg_ids, FETCH_FIELDS)\n print 'Retrieved full emails from server.'\n else:\n folder_msg_contents = {}\n\n return server, folder_msg_contents\n\n\ndef determine_work_checkpoint():\n if os.path.exists(CHECKPOINT_FILE):\n with open(CHECKPOINT_FILE, 'r') as fh:\n return int(fh.read())\n\n\ndef set_work_checkpoint(last_uid):\n # NOTE: This assumes the label will be unchanged server side.\n # If the account owner deletes messages, then the labels\n # may change.\n with open(CHECKPOINT_FILE, 'w') as fh:\n fh.write('%d' % (last_uid,))\n\n\ndef main():\n make_data_dir()\n\n last_uid = determine_work_checkpoint()\n\n server, folder_msg_contents = get_email_content(last_uid=last_uid)\n\n for imap_payload_dict in folder_msg_contents.itervalues():\n save_email(imap_payload_dict)\n print SEPARATOR\n\n if folder_msg_contents:\n set_work_checkpoint(max(folder_msg_contents.keys()))\n\n server.logout()\n\n\nif __name__ == '__main__':\n # H/T: http://stackoverflow.com/a/9093598/1068170\n if hasattr(__builtin__, '__IPYTHON__'):\n print 'In IPYTHON, not running main().'\n else:\n main()\n", "id": "11912363", "language": "Python", "matching_score": 3.144251823425293, "max_stars_count": 0, "path": "get_assignments.py" }, { "content": "# Libraries\nimport imapclient\n\n# Local imports\nimport constants\n\n\ndef login_to_server(username, password):\n print 'Logging in to server as:', username\n server = imapclient.IMAPClient(constants.HOST, port=constants.PORT,\n ssl=constants.SSL, use_uid=True)\n server.login(username, password)\n return server\n", "id": "3326010", "language": "Python", "matching_score": 0.3460898697376251, "max_stars_count": 0, "path": "utils.py" }, { "content": "#!/usr/bin/env python\n\n# Libraries\nimport imapclient\nimport imaplib\nimport json\nimport os\n\n# Local imports\nimport account_settings\nimport constants\nimport utils\n\n\n# Ordered tuple of folders which we need to consider first since\n# we expect them to be the most common.\nSPECIAL_FOLDERS = ('[Gmail]/All Mail', 'INBOX')\nCURR_DIR = os.path.dirname(__file__)\nFETCH_BLOCK_SIZE = 4000\nSKIPPED_SENTINEL = object()\n\n\ndef save_old_folders(server):\n all_folders_fi = os.path.join(CURR_DIR, constants.OLD_DATA_DIR,\n constants.ALL_FOLDERS_FI)\n if os.path.exists(all_folders_fi):\n raise OSError('All folders file: %s already exists.' % all_folders_fi)\n\n print 'Getting all folders'\n all_folders = server.list_folders()\n all_folders = [triple[2] for triple in all_folders]\n\n # Remove the special folders from the list.\n for folder in SPECIAL_FOLDERS:\n all_folders.remove(folder)\n\n # Put the special folders in front (in reverse order so the first\n # is the last to be put in front).\n for folder in reversed(SPECIAL_FOLDERS):\n all_folders.insert(0, folder)\n\n print '=' * 70\n\n print 'Saving', all_folders_fi\n with open(all_folders_fi, 'w') as fh:\n json.dump(all_folders, fh)\n print 'Saved', all_folders_fi\n\n return all_folders\n\n\ndef get_folder_contents(server, folder_msg_ids):\n num_folder_msg_ids = len(folder_msg_ids)\n\n result = {}\n for index in xrange(0, num_folder_msg_ids, FETCH_BLOCK_SIZE):\n begin_item = index + 1\n end_item = min(index + FETCH_BLOCK_SIZE, num_folder_msg_ids)\n print 'Fetching %d through %d of %d.' % (begin_item, end_item,\n num_folder_msg_ids)\n\n msg_ids_slice = folder_msg_ids[index:index + FETCH_BLOCK_SIZE]\n folder_msg_contents = server.fetch(\n msg_ids_slice, [constants.GMAIL_ID_FIELD, constants.MSG_ID_FIELD])\n if sorted(folder_msg_contents.keys()) != sorted(msg_ids_slice):\n raise ValueError('Keys retrieved differ from those requested.')\n result.update(folder_msg_contents)\n\n return result\n\n\ndef get_current_folder_data(server, folder, msg_to_folder, folder_data):\n current_folder_data = {}\n\n # Have to use readonly=True since some folders (like Chats) are READONLY.\n try:\n server.select_folder(folder, readonly=True)\n except imaplib.IMAP4.error as err:\n print 'Folder', folder, 'does not exist'\n print 'Error:', err\n return SKIPPED_SENTINEL\n\n print 'Getting folder local message IDs'\n folder_msg_ids = server.search()\n print 'Retrieved %d message IDs.' % len(folder_msg_ids)\n print 'Getting all \"Message-ID\" headers and Gmail Message IDs.'\n folder_msg_contents = get_folder_contents(server, folder_msg_ids)\n\n print 'Adding message data to dictionary'\n for folder_msg_id, msg_dict in folder_msg_contents.iteritems():\n gmail_msg_id = msg_dict[constants.GMAIL_ID_FIELD]\n msg_id = msg_dict[constants.MSG_ID_KEY]\n\n if gmail_msg_id in msg_to_folder:\n # Update the message entry in the original folder.\n actual_folder = msg_to_folder[gmail_msg_id]\n actual_folder_data = folder_data[actual_folder]\n # Update the message entry in the original folder.\n actual_msg_entry = actual_folder_data[gmail_msg_id]\n actual_msg_entry['other_folders'].append((folder, folder_msg_id))\n # Make sure data agrees.\n if actual_msg_entry['msg_id'] != msg_id:\n raise ValueError('Mis-matching message IDs for same message.')\n else:\n msg_to_folder[gmail_msg_id] = folder\n current_folder_data[gmail_msg_id] = {'msg_id': msg_id,\n 'other_folders': [],\n 'folder_msg_id': folder_msg_id}\n\n return current_folder_data\n\n\ndef get_all_folder_data(server, all_folders):\n skipped_folders_fi = os.path.join(CURR_DIR, constants.OLD_DATA_DIR,\n constants.SKIPPED_FOLDERS_FI)\n if os.path.exists(skipped_folders_fi):\n raise OSError('Skipped foldersfile: %s already exists.' %\n skipped_folders_fi)\n\n folder_data_fi = os.path.join(CURR_DIR, constants.OLD_DATA_DIR,\n constants.FOLDER_DATA_FI)\n if os.path.exists(folder_data_fi):\n raise OSError('Folder data file: %s already exists.' % folder_data_fi)\n\n msg_to_folder = {}\n folder_data = {}\n skipped_folders = []\n for folder in all_folders:\n print '=' * 70\n print 'Beginning folder:', folder\n current_folder_data = get_current_folder_data(server, folder,\n msg_to_folder, folder_data)\n if current_folder_data is SKIPPED_SENTINEL:\n skipped_folders.append(folder)\n else:\n folder_data[folder] = current_folder_data\n\n print '=' * 70\n\n print 'Saving', skipped_folders_fi\n with open(skipped_folders_fi, 'w') as fh:\n json.dump(skipped_folders, fh)\n print 'Saved', skipped_folders_fi\n\n print '=' * 70\n\n print 'Saving', folder_data_fi\n with open(folder_data_fi, 'w') as fh:\n json.dump(folder_data, fh)\n print 'Saved', folder_data_fi\n\n return folder_data\n\n\ndef main():\n server = utils.login_to_server(account_settings.OLD_USERNAME,\n account_settings.OLD_PASSWORD)\n all_folders = save_old_folders(server)\n folder_data = get_all_folder_data(server, all_folders)\n server.logout()\n\n\nif __name__ == '__main__':\n main()\n", "id": "11287659", "language": "Python", "matching_score": 3.710721969604492, "max_stars_count": 0, "path": "step3_get_old_message_info.py" }, { "content": "#!/usr/bin/env python\n\n# Libraries\nimport imapclient\nimport json\nimport os\nimport re\n\n# Local imports\nimport account_settings\nimport constants\nimport utils\n\n\nRELABEL_MAP = {\n '[Gmail]/Chats': account_settings.CHATS_NEW_LABEL,\n}\nCURR_DIR = os.path.dirname(__file__)\nMIGRATION_PROGRESS_FI = os.path.join(CURR_DIR, constants.NEW_DATA_DIR,\n constants.MIGRATION_PROGRESS_FI)\nPRINT_INTERVAL = 1000\n\n\ndef store_migration_progress(migration_progress):\n with open(MIGRATION_PROGRESS_FI, 'w') as fh:\n json.dump(migration_progress, fh)\n\n\ndef read_migration_progress():\n if not os.path.exists(MIGRATION_PROGRESS_FI):\n migration_progress = {}\n store_migration_progress(migration_progress)\n else:\n with open(MIGRATION_PROGRESS_FI, 'r') as fh:\n migration_progress = json.load(fh)\n\n return migration_progress\n\n\ndef get_new_folder_msg_id(folder_uid, append_status):\n append_status_template = (r'^\\[APPENDUID %d (\\d+)\\] \\(Success\\)$' %\n folder_uid)\n match = re.match(append_status_template, append_status)\n return int(match.groups()[0])\n\n\ndef add_msg_to_new_account(msg_data_dict, folder_new, folder_uid,\n server_old, server_new):\n msg_id = msg_data_dict['msg_id']\n\n other_folders = msg_data_dict['other_folders']\n folder_msg_id = msg_data_dict['folder_msg_id']\n\n # Get the message from the old folder / account.\n msg_contents = server_old.fetch([folder_msg_id],\n constants.FULL_MSG_FIELDS)\n flags = msg_contents[folder_msg_id][constants.FLAGS_FIELD]\n msg = msg_contents[folder_msg_id][constants.FULL_MSG_FIELD]\n msg_time = msg_contents[folder_msg_id][constants.DATE_FIELD]\n\n # Add the message to the new account.\n append_status = server_new.append(folder_new, msg.encode('utf-8'),\n flags=flags, msg_time=msg_time)\n new_folder_msg_id = get_new_folder_msg_id(folder_uid, append_status)\n for other_folder, _ in other_folders:\n server_new.copy([new_folder_msg_id], other_folder)\n\n\ndef migrate(folder_data, skipped_folders, migration_progress,\n server_old, server_new):\n total_msgs = sum([len(val) for val in folder_data.values()\n if val is not None])\n count = 0\n for folder, gmail_msg_dict in folder_data.iteritems():\n if folder in skipped_folders:\n print 'Skipping', folder, 'since in skipped folders.'\n continue\n else:\n print 'Beginning', folder\n list_of_completed = migration_progress.setdefault(folder, [])\n\n # Open folders for files.\n server_old.select_folder(folder, readonly=True)\n folder_new = folder\n if folder in RELABEL_MAP:\n print 'Re-labeling new folder.'\n folder_new = RELABEL_MAP[folder]\n if not server_new.folder_exists(folder_new):\n server_new.create_folder(folder_new)\n print 'Created', folder_new\n\n new_folder_info = server_new.select_folder(folder_new)\n folder_uid = new_folder_info['UIDVALIDITY']\n\n for gmail_id, msg_data_dict in gmail_msg_dict.iteritems():\n count += 1\n if count % PRINT_INTERVAL == 0:\n print 'Folder: %s, %d / %d' % (folder, count, total_msgs)\n\n if gmail_id in list_of_completed:\n continue\n\n add_msg_to_new_account(msg_data_dict, folder_new, folder_uid,\n server_old, server_new)\n list_of_completed.append(gmail_id)\n store_migration_progress(migration_progress)\n\n\ndef main():\n folder_data_fi = os.path.join(CURR_DIR, constants.OLD_DATA_DIR,\n constants.FOLDER_DATA_FI)\n with open(folder_data_fi, 'r') as fh:\n folder_data = json.load(fh)\n\n skipped_folders_fi = os.path.join(CURR_DIR, constants.OLD_DATA_DIR,\n constants.SKIPPED_FOLDERS_FI)\n with open(skipped_folders_fi, 'r') as fh:\n skipped_folders = json.load(fh)\n\n migration_progress = read_migration_progress()\n\n # Log-in to both old and new servers.\n server_old = utils.login_to_server(account_settings.OLD_USERNAME,\n account_settings.OLD_PASSWORD)\n server_new = utils.login_to_server(account_settings.NEW_USERNAME,\n account_settings.NEW_PASSWORD)\n migrate(folder_data, skipped_folders, migration_progress,\n server_old, server_new)\n\n # Log-out of both servers.\n server_old.logout()\n server_new.logout()\n\n\nif __name__ == '__main__':\n main()\n", "id": "8869135", "language": "Python", "matching_score": 5.618786334991455, "max_stars_count": 0, "path": "step4_move_old_data.py" }, { "content": "HOST = 'imap.googlemail.com'\nPORT = 993\nSSL = True\n\nOLD_DATA_DIR = 'old_data'\nNEW_DATA_DIR = 'new_data'\nALL_FOLDERS_FI = 'all_folders.json'\nFOLDER_DATA_FI = 'folder_data.json'\nSKIPPED_FOLDERS_FI = 'skipped_folders.json'\nMIGRATION_PROGRESS_FI = 'migration_progress.json'\n\nGMAIL_ID_FIELD = 'X-GM-MSGID'\nFLAGS_FIELD = 'FLAGS'\nFULL_MSG_FIELD = 'RFC822'\nDATE_FIELD = 'INTERNALDATE'\nFULL_MSG_FIELDS = [FLAGS_FIELD, FULL_MSG_FIELD, DATE_FIELD]\nMSG_ID_FIELD = 'BODY.PEEK[HEADER.FIELDS (MESSAGE-ID)]'\nMSG_ID_KEY = 'BODY[HEADER.FIELDS (MESSAGE-ID)]'\nSUBJECT_FIELD = 'BODY.PEEK[HEADER.FIELDS (Subject)]'\nSUBJECT_KEY = 'BODY[HEADER.FIELDS (SUBJECT)]'\n", "id": "7346385", "language": "Python", "matching_score": 1.6342202425003052, "max_stars_count": 0, "path": "constants.py" }, { "content": "#!/usr/bin/env python\n\n# Libraries\nimport imapclient\n\n# Local imports\nimport account_settings\nimport utils\n\n\ndef get_all_folders(username, password):\n server = utils.login_to_server(username, password)\n\n all_folders = server.list_folders()\n all_folders = [triple[2] for triple in all_folders]\n\n return all_folders, server\n\n\ndef main():\n all_folders_old, server_old = get_all_folders(\n account_settings.OLD_USERNAME, account_settings.OLD_PASSWORD)\n # We don't need the old server connection any longer.\n server_old.logout()\n\n all_folders_new, server_new = get_all_folders(\n account_settings.NEW_USERNAME, account_settings.NEW_PASSWORD)\n\n uncreated_old_folders = set(all_folders_old).difference(all_folders_new)\n\n for folder in uncreated_old_folders:\n server_new.create_folder(folder)\n print 'Created', folder\n\n server_new.logout()\n\n\nif __name__ == '__main__':\n main()\n", "id": "8016078", "language": "Python", "matching_score": 2.2324910163879395, "max_stars_count": 0, "path": "step1_make_new_labels.py" }, { "content": "IMPORT_MESSAGE = \"\"\"\\\nTo use this tool, remove the line raising the import error and add\nyour GMail address and password to the variables listed below.\nIt assumes you are migrating from an \"OLD_\" account to a \"NEW_\"\naccount.\n\nWhen adding your password, make sure to use an application specific\npassword (ASP) if you have two-factor authentication enabled for\nyour account.\n\"\"\"\nraise ImportError(IMPORT_MESSAGE)\n\n# Be careful not to commit your password to GitHub. To ensure that\n# this won't occur, you can run\n# git update-index --assume-unchanged account_settings.py\n# and then your repository will ignore all your changes.\n# Reference: http://blog.pagebakers.nl/2009/01/29/\nOLD_USERNAME = '<EMAIL>'\nOLD_PASSWORD = ''\nNEW_USERNAME = '<EMAIL>'\nNEW_PASSWORD = ''\n\nCHATS_NEW_LABEL = 'Old Account Chats'\n", "id": "8127366", "language": "Python", "matching_score": 0.7625445127487183, "max_stars_count": 0, "path": "account_settings.py" }, { "content": "#!/usr/bin/env python\n\n# p_1^2 + p_2^3 + p_3^4 = n\n\nfrom python.decorators import euler_timer\nfrom python.functions import all_factors\n\n\ndef nontrivial_factorizations(n):\n factor_hash = {1: [1]}\n factor_hash = all_factors(n, factor_hash)\n result = {1: [[]], 2: [[2]]}\n value_hash = {}\n for i in range(3, n + 1):\n to_add = [[i]]\n for factor in factor_hash[i]:\n if factor > 1 and factor ** 2 <= i:\n for subset1 in result[factor]:\n for subset2 in result[i / factor]:\n cand = sorted(subset1 + subset2)\n if cand not in to_add:\n to_add.append(cand)\n for match in to_add:\n new_k = i + len(match) - sum(match)\n if new_k > 1 and new_k not in value_hash:\n value_hash[new_k] = i\n result[i] = to_add\n return result, value_hash\n\n\ndef main(verbose=False):\n MAX_k = 12000\n MAX_n = MAX_k + 1000\n _, value_hash = nontrivial_factorizations(MAX_n)\n final_list = []\n for desired in range(2, MAX_k + 1):\n if desired not in value_hash:\n raise Exception(\"Subset not large enough, raise MAX_n.\")\n if value_hash[desired] not in final_list:\n final_list.append(value_hash[desired])\n return sum(final_list)\n\nif __name__ == '__main__':\n print euler_timer(88)(main)(verbose=True)\n", "id": "7581951", "language": "Python", "matching_score": 1.7351998090744019, "max_stars_count": 7, "path": "python/complete/no088.py" }, { "content": "#!/usr/bin/env python\n\n# It can be verified that there are 23 positive integers less\n# than 1000 that are divisible by at least four distinct primes\n# less than 100.\n\n# Find how many positive integers less than 10**16 are divisible\n# by at least four distinct primes less than 100.\n\n\n######################\n# 2*3*5*7*11*13*17*19*23*29*31*37*41 = 304250263527210 < 10**16 but\n# 304250263527210*43 = 13082761331670030 > 10**16\n\n# Therefore we have at most 13 primes in our products\n\n# Let N_{p_1,...,p_k} = {n | n < L, p_1, ... , p_k all divide n}\n# One can easily show N_{p_1,...,p_k} = floor((L - 1)/(p_1*...*p_k))\n\n# By PIE, our desired number is\n# sum_{s in S} N_{s}\n# where S is the set of all subsets of size 4 of the primes under 100\n\n# Since N_{p1,...,p5} can arise as an intersection of (5 C 4) subsets of\n# size 4, we need to subtract off (5 C 4) - 1 = (4 C 3)\n\n# Similarly N_{p1,...,p6} can arise as an intersection of (6 C 4) subsets of\n# size 4 of which we've already counted (5 C 4), so we need to add back\n# in (6 C 4) - (5 C 4) = (5 C 3), etc.\n\n# In general, we need to add/subtract back (index - 1 C 3)\n\nimport operator\n\nfrom math import log\n\nfrom python.decorators import euler_timer\nfrom python.functions import all_subsets\nfrom python.functions import choose\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n primes = sieve(100)\n\n MAX_n = 10 ** 16\n\n product_factor_pairs = [(1, 0)]\n product_hash = {0: [1]}\n for num_factors in range(1, 13 + 1):\n product_hash[num_factors] = []\n\n # (1,0) becomes\n # (1,0), (p, 1) becomes\n # (1,0), (p, 1), (q,1), (q*p, 2) etc.\n for prime in primes:\n to_add = []\n for product, num_factors in product_factor_pairs:\n if prime * product < MAX_n:\n to_add.append((prime * product, num_factors + 1))\n product_hash[num_factors + 1].append(prime * product)\n product_factor_pairs += to_add\n\n result = 0\n sign = -1\n for num_factors in range(4, 13 + 1):\n sign = -sign\n PIE_factor = sign * choose(num_factors - 1, 3)\n current_sum = 0\n for product in product_hash[num_factors]:\n current_sum += MAX_n / product # integer division\n result += PIE_factor * current_sum\n\n return result\n\nif __name__ == '__main__':\n print euler_timer(268)(main)(verbose=True)\n", "id": "10662226", "language": "Python", "matching_score": 2.0731873512268066, "max_stars_count": 7, "path": "python/complete/no268.py" }, { "content": "#!/usr/bin/env python\n\n# The problem is a generalization of one when n = 32\n# Let E(N)_n = the expected value when the y_i, x_i\n# have n bits. Then\n# E(N)_n = sum_{k=0 to n} (n choose k)/(2**n) [E(N)_{n - k} + 1]\n# Since k of the bits are set with probability (n choose k)/(2**n)\n# Once k bits are set, they remain set forever, so our\n# expected time going forward if E(N)_{n - k} + 1 since it takes\n# 1 step to first set k bits and E(N)_{n - k} steps to set the\n# remaining (n - k) bits\n\n# 2**n E(N)_n = 2**n + sum_{k=0 to n} (n choose k) E(N)_{n - k}\n# Reworking this, with E(N)_{n - n} = E(N)_0 = 0, we have\n# (2**n - 1) E(N)_n = 2**n + sum_{k=1 to n} (n choose k) E(N)_{n - k}\n\nfrom python.decorators import euler_timer\nfrom python.functions import choose\n\n\ndef main(verbose=False):\n expected_hash = {0: 0}\n for n in range(1, 32 + 1):\n to_add = 2 ** n\n for k in range(1, n + 1):\n to_add += choose(n, k) * expected_hash[n - k]\n expected_hash[n] = (to_add * 1.0) / (2 ** n - 1)\n\n return round(expected_hash[32], 10)\n\nif __name__ == '__main__':\n print euler_timer(323)(main)(verbose=True)\n", "id": "5371889", "language": "Python", "matching_score": 0.6573811173439026, "max_stars_count": 7, "path": "python/complete/no323.py" }, { "content": "#!/usr/bin/env python\n\n# If we take 47, reverse and add, 47 + 74 = 121, which is palindromic.\n# Not all numbers produce palindromes so quickly. For example,\n\n# 349 + 943 = 1292,\n# 1292 + 2921 = 4213\n# 4213 + 3124 = 7337\n\n# That is, 349 took three iterations to arrive at a palindrome.\n\n# Although no one has proved it yet, it is thought that some numbers,\n# like 196, never produce a palindrome. A number that never forms a\n# palindrome through the reverse and add process is called a Lychrel\n# number. Due to the theoretical nature of these numbers, and for\n# the purpose of this problem, we shall assume that a number is Lychrel\n# until proven otherwise. In addition you are given that for every\n# number below ten-thousand, it will either (i) become a palindrome\n# in less than fifty iterations, or, (ii) no one, with all the computing\n# power that exists, has managed so far to map it to a palindrome. In fact,\n# 10677 is the first number to be shown to require over fifty iterations\n# before producing a palindrome:\n# 4668731596684224866951378664 (53 iterations, 28-digits).\n\n# Surprisingly, there are palindromic numbers that are themselves\n# Lychrel numbers; the first example is 4994.\n\n# How many Lychrel numbers are there below ten-thousand?\n\nfrom python.decorators import euler_timer\nfrom python.functions import is_palindrome\n\n\ndef next_lychrel_value(n):\n return n + int(str(n)[::-1])\n\n\ndef update_hash(n, max_iterations, hash_={}):\n \"\"\"\n Uses the hash values and continually updates\n the sequence until a palindrome is found or until\n the number of iterations exceeds max_iterations\n \"\"\"\n curr = next_lychrel_value(n)\n to_add = {0: n, 1: curr}\n index = 1\n while not is_palindrome(curr) and index <= max_iterations:\n if curr in hash_:\n covered = hash_[curr].copy()\n for i in range(1, max(covered) + 1):\n to_add[index + i] = covered[i]\n index += max(covered)\n else:\n curr = next_lychrel_value(curr)\n index += 1\n to_add[index] = curr\n hash_[n] = to_add\n return to_add\n\n\ndef main(verbose=False):\n lychrel_sequences = {}\n for i in range(1, 10000):\n update_hash(i, 50, lychrel_sequences)\n\n # We begin with the inital assumption that every number\n # is a Lychrel number, and reduce the count every time\n # we encounter a number which is not\n count = 9999\n for key, value in lychrel_sequences.items():\n if 50 in value:\n iterations = 50\n else:\n iterations = max(value)\n\n if is_palindrome(value[iterations]):\n count -= 1\n\n return count\n\nif __name__ == '__main__':\n print euler_timer(55)(main)(verbose=True)\n", "id": "3359776", "language": "Python", "matching_score": 1.8132753372192383, "max_stars_count": 7, "path": "python/complete/no055.py" }, { "content": "#!/usr/bin/env python\n\n# The palindromic number 595 is interesting because it can be written\n# as the sum of consecutive squares:\n# 6**2 + 7**2 + 8**2 + 9**2 + 10**2 + 11**2 + 12**2\n\n# There are exactly eleven palindromes below one-thousand that can\n# be written as consecutive square sums, and the sum of these palindromes\n# is 4164. Note that 1 = 0**2 + 1**2 has been excluded.\n\n# Find the sum of all the numbers less than 10**8 that are both palindromic\n# and can be written as the sum of consecutive squares.\n\n# NOTE: I am assuming we need at least 2 squares, i.e. 121 = 11**2 doesn't\n# count this assumption is because the use of plural in the question\n\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\nfrom python.functions import is_palindrome\n\n\ndef palindromic_square_sums(n):\n # first populate all pairs that add to less than n\n # 2k**2 < k**2 + (k + 1)**2 < n\n MAX_k = int(round(sqrt(n / 2.0)))\n curr = [index ** 2 + (index + 1) ** 2 for index in range(1, MAX_k)]\n curr = [num for num in curr if num < n]\n\n result = [num for num in curr if is_palindrome(num)]\n num_squares = 2\n while curr:\n num_squares += 1\n curr = [curr[i] + (i + num_squares) ** 2 for i in range(len(curr))]\n curr = [num for num in curr if num < n]\n result.extend([num for num in curr if is_palindrome(num)])\n\n return set(result)\n\n\ndef main(verbose=False):\n PROBLEM_MAX = 10 ** 8\n return sum(palindromic_square_sums(PROBLEM_MAX))\n\nif __name__ == '__main__':\n print euler_timer(125)(main)(verbose=True)\n", "id": "10800406", "language": "Python", "matching_score": 1.6487880945205688, "max_stars_count": 7, "path": "python/complete/no125.py" }, { "content": "#!/usr/bin/env python\n\n# Find the sum of all numbers, less than one million, which\n# are palindromic in base 10 and base 2.\n\nfrom python.decorators import euler_timer\nfrom python.functions import is_palindrome\n\n\ndef binary_incrementer(str_):\n digs = [int(dig) for dig in str_]\n digs[-1] += 1\n for i in range(len(digs) - 1, -1, -1):\n if digs[i] > 1:\n temp = digs[i]\n digs[i] = temp % 2\n if i == 0:\n digs = [temp / 2] + digs\n else:\n digs[i - 1] += temp / 2 # int division intended\n else:\n break\n return ''.join(str(dig) for dig in digs)\n\n\ndef all_base10_base2_palindromes(n):\n result = []\n base_10 = 1\n base_2 = '1'\n\n while base_10 < n:\n if is_palindrome(base_10) and is_palindrome(base_2):\n result.append(base_10)\n base_10 += 1\n base_2 = binary_incrementer(base_2)\n return result\n\n\ndef main(verbose=False):\n ans = all_base10_base2_palindromes(10 ** 6)\n if verbose:\n return '%s.\\nThe full list of palindromes is: %s' % (\n sum(ans), ', '.join(str(number) for number in ans))\n else:\n return sum(ans)\n\nif __name__ == '__main__':\n print euler_timer(36)(main)(verbose=True)\n", "id": "6540161", "language": "Python", "matching_score": 1.444155216217041, "max_stars_count": 7, "path": "python/complete/no036.py" }, { "content": "#!/usr/bin/env python\n\n# Find the largest palindrome made from the\n# product of two 3-digit numbers\n\n# 100**2 = 10000 <= a*b <= 998001 = 999**2\n\nimport operator\n\nfrom python.decorators import euler_timer\nfrom python.functions import apply_to_list\nfrom python.functions import is_palindrome\n\n\ndef main(verbose=False):\n products = apply_to_list(operator.mul, range(100, 1000))\n return max(elt for elt in products if is_palindrome(elt))\n\nif __name__ == '__main__':\n print euler_timer(4)(main)(verbose=True)\n", "id": "9739954", "language": "Python", "matching_score": 1.9421006441116333, "max_stars_count": 7, "path": "python/complete/no004.py" }, { "content": "#!/usr/bin/env python\n\n# Considering natural numbers of the form, a^(b), where a, b < 100, what\n# is the maximum digital sum?\n\nimport operator\n\nfrom python.decorators import euler_timer\nfrom python.functions import apply_to_list\n\n\ndef digit_sum(n):\n return sum(int(dig) for dig in str(n))\n\n\ndef main(verbose=False):\n return max(digit_sum(val)\n for val in apply_to_list(operator.pow, range(1, 100)))\n\nif __name__ == '__main__':\n print euler_timer(56)(main)(verbose=True)\n", "id": "1429717", "language": "Python", "matching_score": 2.479093313217163, "max_stars_count": 7, "path": "python/complete/no056.py" }, { "content": "#!/usr/bin/env python\n\nimport operator\n\nfrom python.decorators import euler_timer\nfrom python.functions import apply_to_list\n\n\ndef main(verbose=False):\n n = 100\n powers = apply_to_list(operator.pow, range(2, n + 1))\n return len(set(powers))\n\nif __name__ == '__main__':\n print euler_timer(29)(main)(verbose=True)\n", "id": "9237317", "language": "Python", "matching_score": 0.2679876685142517, "max_stars_count": 7, "path": "python/complete/no029.py" }, { "content": "#!/usr/bin/env python\n\n# LAGRANGE POLYNOMIAL FOR F:\n# For data points (x_1, f(x_1))...(x_n, f(x_n))\n# We have Lagrange base polynomials\n# l_i(x) = prod_{j neq i} (x - x_j)/(x_i - x_j)\n# L(x) = sum_i f(x_i) l_i(x)\n\n# For our problem, we seek OP(n, n + 1), but this is\n# L_n(n + 1) for L_n generated by the first n points\n# x_i = i, with f given as 1 - n + n^2 ...\n\n# If OP(n, n + 1) != f(n + 1), then we count the value\n# We stop when n = 11\n\n# f(x)(1 + x) = 1 + x^11\n\n# We have L_n(n + 1) = sum_i f(i) l_i(n + 1)\n\nimport operator\n\nfrom python.decorators import euler_timer\n\n\ndef lagrange(input_val, index, points):\n numerator = reduce(operator.mul, [input_val - points[i] for i in\n range(len(points)) if i != index])\n denominator = reduce(operator.mul, [points[index] - points[i] for i in\n range(len(points)) if i != index])\n return numerator * 1.0 / denominator\n\n\ndef n_value_approximation(func, n, input_val):\n if n == 1:\n return func(1)\n points = range(1, n + 1)\n return sum(func(points[i]) * lagrange(input_val, i, points)\n for i in range(len(points)))\n\n\ndef main(verbose=False):\n def func(x):\n return ((1 + x ** 11) * (1.0)) / (1 + x)\n result = 0\n for k in range(1, 11):\n val = n_value_approximation(func, k, k + 1)\n if val != func(k + 1):\n result += val\n return int(result)\n\nif __name__ == '__main__':\n print euler_timer(101)(main)(verbose=True)\n", "id": "9871721", "language": "Python", "matching_score": 2.1415839195251465, "max_stars_count": 7, "path": "python/complete/no101.py" }, { "content": "#!/usr/bin/env python\n\n# Let S_m = (x_1, x_2, ... , x_m) be the m-tuple of positive real\n# numbers with x_1 + x_2 + ... + x_m = m for which\n# P_m = x_1 * x_2^2 * ... * x_m^m is maximised.\n\n# For example, it can be verified that [P_10] = 4112\n# ([] is the integer part function).\n\n# Find SUM[P_m] for 2 <= m <= 15.\n\n# -------- LAGRANGE --------\n# maximize f(x,...) given g(x,....) = c\n# set ratio of partials equal to lambda\n# Since g = x_1 + ... + x_m\n# We need d(P_m)/d(x_i) = i P_m/x_i = lambda\n# Hence i/x_i = 1/x_1, x_i = i*x_1\n# m = x_1(1 + ... + m) = x_1(m)(m+1)/2\n# x_1 = 2/(m + 1)\n# P_m = (2/m+1)**(m*(m+1)/2)*(1*2**2*...*m**m)\n\n# P_10 = (2/11)**(55)*(1*4*...*(10**10)) = 4112.0850028536197\n\nimport operator\n\nfrom math import floor\n\nfrom python.decorators import euler_timer\n\n\ndef P(m):\n return reduce(operator.mul,\n [((2 * n) / (1.0 * (m + 1))) ** n for n in range(1, m + 1)])\n\n\ndef main(verbose=False):\n return int(sum(floor(P(n)) for n in range(2, 16)))\n\nif __name__ == '__main__':\n print euler_timer(190)(main)(verbose=True)\n", "id": "11538711", "language": "Python", "matching_score": 0.5508347749710083, "max_stars_count": 7, "path": "python/complete/no190.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import get_data\nfrom python.functions import prims_algo\n\n\ndef main(verbose=False):\n data = [row.split(',') for row in get_data(107).split('\\r\\n') if row]\n\n adjacency = {}\n size = len(data)\n network_sum = 0\n # UNDIRECTED\n for node in range(size - 1):\n for dest in range(node + 1, size):\n if data[node][dest] != '-':\n value = int(data[node][dest])\n network_sum += value\n # sets value to [] if not set, returns value at key\n adjacency.setdefault(node, []).append((dest, value))\n adjacency.setdefault(dest, []).append((node, value))\n\n _, min_sum = prims_algo(adjacency)\n\n return network_sum - min_sum\n\nif __name__ == '__main__':\n print euler_timer(107)(main)(verbose=True)\n", "id": "11649274", "language": "Python", "matching_score": 1.4839566946029663, "max_stars_count": 7, "path": "python/complete/no107.py" }, { "content": "#!/usr/bin/env python\n\nfrom python.decorators import euler_timer\nfrom python.functions import astar\nfrom python.functions import get_data\n\n\ndef main(verbose=False):\n data = [[int(entry) for entry in row.split(\",\")]\n for row in get_data(81).split(\"\\n\") if row]\n\n arranged_data = {}\n size = len(data)\n for i in range(size):\n for j in range(size):\n arranged_data[(i, j)] = data[i][j]\n\n MINIMUM = min(arranged_data.values())\n\n def heuristic(node):\n return (2 * size - 2 - sum(node)) * MINIMUM\n\n def adjacent(node):\n node_x, node_y = node\n return [(node_x + 1, node_y), (node_x, node_y + 1)]\n\n return astar(arranged_data,\n (0, 0),\n (size - 1, size - 1),\n heuristic,\n adjacent)\n\nif __name__ == '__main__':\n print euler_timer(81)(main)(verbose=True)\n", "id": "527358", "language": "Python", "matching_score": 0.9226318597793579, "max_stars_count": 7, "path": "python/complete/no081.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport math\nimport pathlib\n\nimport numpy as np\n\n\nHERE = pathlib.Path(__file__).resolve().parent\nASTEROID = 1\nVACANT = 0\n\n\ndef to_int(value):\n if value == \"#\": # Asteroid\n return ASTEROID\n if value == \".\": # Empty\n return VACANT\n raise ValueError(\"Unexpected element\", value)\n\n\ndef parse_asteroids(content):\n lines = content.strip().split(\"\\n\")\n rows = len(lines)\n columns = len(lines[0])\n\n values = np.zeros((rows, columns), dtype=np.uint8)\n for row, line in enumerate(lines):\n assert len(line) == columns\n values[row, :] = [to_int(char) for char in line]\n\n return values\n\n\ndef normalize_direction(a, b):\n if a == 0 and b == 0:\n return 1, (0, 0)\n\n common_factor = abs(math.gcd(a, b))\n a_without, remainder = divmod(a, common_factor)\n assert remainder == 0\n b_without, remainder = divmod(b, common_factor)\n assert remainder == 0\n return common_factor, (a_without, b_without)\n\n\ndef line_of_sight(values, target_row, target_column):\n rows, columns = values.shape\n asteroids = collections.defaultdict(list)\n\n for row in range(rows):\n delta_row = target_row - row # points down\n for column in range(columns):\n if row == target_row and column == target_column:\n continue\n\n delta_column = column - target_column # points right\n if values[row, column] != ASTEROID:\n continue\n\n factor, direction = normalize_direction(delta_row, delta_column)\n asteroids[direction].append(factor)\n\n return asteroids\n\n\ndef all_line_of_sight(values):\n rows, columns = values.shape\n counts = np.zeros((rows, columns), dtype=np.uint64)\n\n for row in range(rows):\n for column in range(columns):\n if values[row, column] != ASTEROID:\n counts[row, column] = 0\n continue\n\n asteroids = line_of_sight(values, row, column)\n counts[row, column] = len(asteroids)\n\n return counts\n\n\ndef all_vaporized(values, target_row, target_column):\n rows, columns = values.shape\n asteroids = line_of_sight(values, target_row, target_column)\n directions_theta = {}\n\n for direction, multipliers in asteroids.items():\n # Row indices **increase** in the downward y direction.\n d_row, d_column = direction\n theta = np.arctan2(d_row, d_column) # Row indices are y, columns are x\n directions_theta[theta] = (direction, sorted(multipliers))\n\n removed = []\n sorted_theta = np.array(sorted(directions_theta.keys(), reverse=True))\n # This is in the range [-pi, pi] (but reverse, since we go counter clockwise)\n in_first_quadrant, = np.where(\n np.logical_and(0 < sorted_theta, sorted_theta <= np.pi / 2)\n )\n start_index = min(in_first_quadrant)\n laser_theta = np.array(\n list(sorted_theta[start_index:]) + list(sorted_theta[:start_index])\n )\n\n updated = True\n while updated:\n updated = False\n\n for theta in laser_theta:\n direction, multipliers = directions_theta[theta]\n if not multipliers:\n continue\n\n multiplier = multipliers[0]\n delta_row, delta_column = direction\n delta_row *= multiplier # Un-normalize\n delta_column *= multiplier # Un-normalize\n # Undo delta_row = target_row - row\n row = target_row - delta_row\n assert 0 <= row < rows\n # Undo delta_column = column - target_column\n column = target_column + delta_column\n assert 0 <= column < columns, (column, columns)\n removed.append((row, column))\n updated = True\n\n # Update for next iteration.\n multipliers[:] = multipliers[1:]\n\n return removed\n\n\ndef main():\n filename = HERE / \"input.txt\"\n with open(filename, \"r\") as file_obj:\n content = file_obj.read().strip()\n\n values = parse_asteroids(content)\n all_counts = all_line_of_sight(values)\n best_count = max(all_counts.flatten())\n print(best_count)\n\n row_matches, col_matches = np.where(all_counts == best_count)\n row_match, = row_matches\n col_match, = col_matches\n removed = all_vaporized(values, row_match, col_match)\n print(removed[200 - 1][::-1])\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "7785875", "language": "Python", "matching_score": 1.9376977682113647, "max_stars_count": 0, "path": "day10/main.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pathlib\n\nimport numpy as np\nimport PIL.Image\n\n\nHERE = pathlib.Path(__file__).resolve().parent\n\n\ndef main():\n filename = HERE / \"input.txt\"\n with open(filename, \"r\") as file_obj:\n content = file_obj.read().strip()\n\n width = 25\n height = 6\n layer_size = width * height\n num_layers, remainder = divmod(len(content), layer_size)\n assert remainder == 0\n\n visible = 7 * np.ones((height, width), dtype=np.uint8)\n\n best_layer = -1\n fewest_zeros = layer_size + 1\n for i in range(num_layers):\n layer = content[layer_size * i : layer_size * (i + 1)]\n num_zeros = layer.count(\"0\")\n if num_zeros < fewest_zeros:\n fewest_zeros = num_zeros\n best_layer = i\n\n cell_index = 0\n for row in range(height):\n for col in range(width):\n if visible[row, col] == 7:\n pixel = int(layer[cell_index])\n if pixel in (0, 1):\n visible[row, col] = pixel\n elif pixel != 2:\n raise ValueError(\"Bad layer\", layer)\n # For next iteration\n cell_index += 1\n\n assert cell_index == len(layer), (cell_index, len(layer))\n\n layer = content[layer_size * best_layer : layer_size * (best_layer + 1)]\n num_ones = layer.count(\"1\")\n num_twos = layer.count(\"2\")\n print(num_ones * num_twos)\n\n assert np.all(visible != 7)\n image = PIL.Image.fromarray(255 - 255 * visible)\n image.save(HERE / \"image.png\")\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "1122782", "language": "Python", "matching_score": 1.8822005987167358, "max_stars_count": 0, "path": "day08/main.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pathlib\n\nimport networkx as nx\n\n\nHERE = pathlib.Path(__file__).resolve().parent\nCOM = \"COM\"\n\n\ndef get_parent(g, node):\n if node == COM:\n return None\n\n predecessors = list(g.predecessors(node))\n if len(predecessors) != 1:\n raise ValueError(\"Unexpected parents\", g, node, predecessors)\n return predecessors[0]\n\n\ndef all_predecessors(g, node):\n if node == COM:\n return []\n\n parents = []\n parent = get_parent(g, node)\n while parent is not None:\n parents.append(parent)\n parent = get_parent(g, parent)\n\n return parents\n\n\ndef main():\n filename = HERE / \"input.txt\"\n with open(filename, \"r\") as file_obj:\n content = file_obj.read()\n\n g = nx.DiGraph()\n for pair in content.strip().split(\"\\n\"):\n parent_name, name = pair.split(\")\")\n g.add_edge(parent_name, name)\n\n count = 0\n for node in g.nodes:\n count += len(all_predecessors(g, node))\n\n print(count)\n\n you_predecessors = all_predecessors(g, \"YOU\")[::-1]\n san_predecessors = all_predecessors(g, \"SAN\")[::-1]\n shared_you = [\n i\n for i, value in enumerate(you_predecessors)\n if value in san_predecessors\n ]\n assert shared_you == list(range(min(shared_you), max(shared_you) + 1))\n shared_san = [\n i\n for i, value in enumerate(san_predecessors)\n if value in you_predecessors\n ]\n assert shared_san == shared_you\n you_moves = len(you_predecessors[max(shared_you) :]) - 1\n san_moves = len(san_predecessors[max(shared_you) :]) - 1\n print(you_moves + san_moves)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "10153675", "language": "Python", "matching_score": 3.9518957138061523, "max_stars_count": 0, "path": "day06/main.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pathlib\n\n\nHERE = pathlib.Path(__file__).resolve().parent\n\n\ndef main():\n filename = HERE / \"input.txt\"\n with open(filename, \"r\") as file_obj:\n content = file_obj.read()\n\n print(len(content))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "2833611", "language": "Python", "matching_score": 0.32700973749160767, "max_stars_count": 0, "path": "day25/main.py" }, { "content": "import pickle\nimport sys\n\n\nclass RenameUnpickler(pickle.Unpickler):\n def find_class(self, module, name):\n renamed_module = module\n if module == \"ppkg\":\n renamed_module = \"p2pkg\"\n elif module == \"ppkg.wickle\":\n renamed_module = \"p2pkg.tickle\"\n\n return super(RenameUnpickler, self).find_class(renamed_module, name)\n\n\ndef renamed_load(file_obj):\n return RenameUnpickler(file_obj).load()\n\n\ndef main():\n # Enable `import p2pkg.tickle`\n sys.path.append(\"v3\")\n\n with open(\"a.v1.pkl\", \"rb\") as file_obj:\n a = renamed_load(file_obj)\n\n print(\"=\" * 60)\n print(\"a = {}\".format(a))\n print(\"a.__dict__ = {}\".format(a.__dict__))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "8864038", "language": "Python", "matching_score": 2.9435226917266846, "max_stars_count": 0, "path": "load_v3.py" }, { "content": "import pickle\nimport sys\n\n\ndef main():\n # Enable `import ppkg.wickle`\n sys.path.append(\"v2\")\n\n with open(\"a.v1.pkl\", \"rb\") as file_obj:\n a = pickle.load(file_obj)\n\n print(\"=\" * 60)\n print(\"a = {}\".format(a))\n print(\"a.__dict__ = {}\".format(a.__dict__))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "5242134", "language": "Python", "matching_score": 2.5575859546661377, "max_stars_count": 0, "path": "load_v2.py" }, { "content": "import pickle\nimport sys\n\n\ndef main():\n sys.path.append(\"v1\")\n import ppkg.wickle\n\n a = ppkg.wickle.A(11.0, 12.5)\n with open(\"a.v1.pkl\", \"wb\") as file_obj:\n pickle.dump(a, file_obj)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "8975582", "language": "Python", "matching_score": 0.10861030966043472, "max_stars_count": 0, "path": "save_v1.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pathlib\nimport pickle\n\nimport bakeoff\nimport bakeoff_opt\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nHERE = pathlib.Path(__file__).resolve().parent\nSTORED_RESULTS = HERE / \"timeit_results.pkl\"\nBAKEOFF_FUNCTIONS = (\n bakeoff.forall1,\n bakeoff.forall2,\n bakeoff.forall3,\n bakeoff.do1,\n bakeoff.do2,\n bakeoff.do3,\n bakeoff.spread1,\n bakeoff.spread2,\n bakeoff.spread3,\n bakeoff.serial,\n bakeoff.vs_algorithm32,\n bakeoff.vs_algorithm53,\n bakeoff.vs_algorithm64,\n)\nBAKEOFF_OPT_FUNCTIONS = (\n bakeoff_opt.forall1,\n bakeoff_opt.forall2,\n bakeoff_opt.forall3,\n bakeoff_opt.do1,\n bakeoff_opt.do2,\n bakeoff_opt.do3,\n bakeoff_opt.spread1,\n bakeoff_opt.spread2,\n bakeoff_opt.spread3,\n bakeoff_opt.serial,\n bakeoff_opt.vs_algorithm32,\n bakeoff_opt.vs_algorithm53,\n bakeoff_opt.vs_algorithm64,\n)\n\n\ndef fn_name(fn):\n return f\"{fn.__module__.replace('._binary', '')}.{fn.__qualname__}\"\n\n\ndef verify_implementations(nodes, s_vals, substring_match=None):\n points = bakeoff.serial(nodes, s_vals)\n functions = BAKEOFF_FUNCTIONS + BAKEOFF_OPT_FUNCTIONS\n\n equals = {}\n for fn in functions:\n key = fn_name(fn)\n points_also = fn(nodes, s_vals)\n if key in equals:\n raise KeyError(key)\n\n if np.all(points == points_also):\n equals[key] = \"EQUAL\"\n elif np.allclose(points, points_also):\n equals[key] = \"ALLCLOSE\"\n else:\n equals[key] = \"DIFFERENT\"\n\n if substring_match is None:\n return equals\n\n return {\n key: value for key, value in equals.items() if substring_match in key\n }\n\n\ndef generate_nodes(num_nodes, num_values, seed):\n # TODO: Cache outputs?\n random_state = np.random.RandomState(seed=seed)\n x = sorted(random_state.randint(1000, size=num_nodes))\n y = sorted(random_state.randint(1000, size=num_nodes))\n nodes = np.asfortranarray([x, y], dtype=np.float64)\n\n s_vals = np.linspace(0.0, 1.0, num_values)\n\n return nodes, s_vals\n\n\ndef _compare_pair(name_timeit_result):\n _, timeit_result = name_timeit_result\n # Sort by average, break (very unlikely) ties with stdev.\n return timeit_result.average, timeit_result.stdev\n\n\ndef sort_results(timeit_results):\n \"\"\"Sort results by average running time\n\n Assumes ``timeit_results`` contains pairs of the form.\n \"\"\"\n return sorted(timeit_results, key=_compare_pair)\n\n\ndef get_timeit_results():\n # Load previous results from disk.\n # NOTE: This is a bad idea if these benchmarks are being\n # run on multiple machines.\n if not STORED_RESULTS.is_file():\n return {}\n\n with open(STORED_RESULTS, \"rb\") as file_obj:\n return pickle.load(file_obj)\n\n\ndef store_timeit_results(results_cache):\n with open(STORED_RESULTS, \"wb\") as file_obj:\n pickle.dump(results_cache, file_obj)\n\n\ndef timeit(get_ipython, fn, *args, **kwargs):\n \"\"\"Invoke the IPython timeit line magic.\n\n Determined how this worked via:\n\n >>> import dis\n >>>\n >>> def capture_line_magic(fn):\n ... timeit_result = %timeit -o -q fn()\n ... return timeit_result\n ...\n >>> dis.dis(capture_line_magic)\n \"\"\"\n timeit_result = get_ipython().run_line_magic(\n \"timeit\", \"-o -q fn(*args, **kwargs)\"\n )\n return timeit_result\n\n\ndef time_function(get_ipython, results_cache, fn, num_nodes, num_values, seed):\n key = (fn_name(fn), num_nodes, num_values, seed)\n if key not in results_cache:\n nodes, s_vals = generate_nodes(num_nodes, num_values, seed)\n results_cache[key] = timeit(get_ipython, fn, nodes, s_vals)\n\n return results_cache[key]\n\n\ndef new_axis():\n figure = plt.figure(figsize=(18, 12), dpi=80)\n return figure.gca()\n\n\ndef plot_data_nodes(\n get_ipython, results_cache, functions, num_nodes_list, num_values, seed\n):\n ax = new_axis()\n\n for fn in functions:\n x_vals = []\n y_vals = []\n y_below = []\n y_above = []\n for num_nodes in num_nodes_list:\n timeit_result = time_function(\n get_ipython, results_cache, fn, num_nodes, num_values, seed\n )\n # 2 std deviations ~= 95%\n below = timeit_result.average - 2.0 * timeit_result.stdev\n above = timeit_result.average + 2.0 * timeit_result.stdev\n # If the running time goes non-positive, ignore the datapoint\n if below <= 0.0:\n continue\n\n x_vals.append(num_nodes)\n y_vals.append(timeit_result.average)\n y_below.append(below)\n y_above.append(above)\n\n (line,) = ax.loglog(x_vals, y_vals, marker=\"o\", label=fn.__name__)\n ax.fill_between(\n x_vals, y_below, y_above, alpha=0.5, color=line.get_color()\n )\n\n ax.set_xscale(\"log\", basex=2)\n ax.set_yscale(\"log\", basey=2)\n ax.set_title(f\"Number of Input Values: {num_values}\")\n ax.set_xlabel(\"Number of Nodes\")\n ax.set_ylabel(\"Average Evaluation Time (s)\")\n ax.axis(\"scaled\")\n ax.legend()\n\n\ndef _compare_times(\n get_ipython, results_cache, functions, num_nodes, num_values, seed\n):\n timeit_results = []\n for fn in functions:\n name = fn.__name__\n timeit_result = time_function(\n get_ipython, results_cache, fn, num_nodes, num_values, seed\n )\n timeit_results.append((name, timeit_result))\n\n timeit_results = sort_results(timeit_results)\n max_width = max(len(name) for name, _ in timeit_results)\n\n for name, timeit_result in timeit_results:\n print(f\"{name:{max_width}}: {timeit_result}\")\n\n\ndef compare_bakeoff_times(\n get_ipython, results_cache, num_nodes, num_values, seed\n):\n print(\"Non-Optimized Implementations\")\n print(\"-----------------------------\")\n _compare_times(\n get_ipython,\n results_cache,\n BAKEOFF_FUNCTIONS,\n num_nodes,\n num_values,\n seed,\n )\n\n print(\"\")\n\n print(\"Optimized Implementations\")\n print(\"-------------------------\")\n _compare_times(\n get_ipython,\n results_cache,\n BAKEOFF_OPT_FUNCTIONS,\n num_nodes,\n num_values,\n seed,\n )\n\n\ndef plot_data_values(\n get_ipython, results_cache, functions, num_nodes, num_values_list, seed\n):\n ax = new_axis()\n\n for fn in functions:\n x_vals = []\n y_vals = []\n y_below = []\n y_above = []\n for num_values in num_values_list:\n timeit_result = time_function(\n get_ipython, results_cache, fn, num_nodes, num_values, seed\n )\n # 2 std deviations ~= 95%\n below = timeit_result.average - 2.0 * timeit_result.stdev\n above = timeit_result.average + 2.0 * timeit_result.stdev\n # If the running time goes non-positive, ignore the datapoint\n if below <= 0.0:\n continue\n\n x_vals.append(num_values)\n y_vals.append(timeit_result.average)\n y_below.append(below)\n y_above.append(above)\n\n (line,) = ax.loglog(x_vals, y_vals, marker=\"o\", label=fn.__name__)\n ax.fill_between(\n x_vals, y_below, y_above, alpha=0.5, color=line.get_color()\n )\n\n ax.set_xscale(\"log\", basex=2)\n ax.set_yscale(\"log\", basey=2)\n ax.set_title(f\"Number of Nodes: {num_nodes}\")\n ax.set_xlabel(\"Number of Input Values\")\n ax.set_ylabel(\"Average Evaluation Time (s)\")\n ax.axis(\"scaled\")\n ax.legend()\n", "id": "3491126", "language": "Python", "matching_score": 4.286068439483643, "max_stars_count": 0, "path": "nb_helpers.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.mplot3d\nimport numpy as np\n\nimport nb_helpers\n\n\ndef plot_3d(log_N_vals, log_k_vals, log_timeit_vals, fn_name):\n fig = plt.figure()\n ax = fig.gca(projection=\"3d\")\n\n ax.plot_trisurf(\n log_N_vals[fn_name],\n log_k_vals[fn_name],\n log_timeit_vals[fn_name],\n linewidth=0.2,\n antialiased=True,\n )\n\n ax.set_xlabel(r\"$\\log_2 N$\")\n ax.set_ylabel(r\"$\\log_2 k$\")\n ax.set_zlabel(r\"$\\log_2 T$\")\n ax.set_title(fn_name)\n\n plt.show()\n\n\ndef main():\n matplotlib.rc(\"mathtext\", fontset=\"cm\", rm=\"serif\")\n\n results_cache = nb_helpers.get_timeit_results()\n log_N_vals = collections.defaultdict(list)\n log_k_vals = collections.defaultdict(list)\n log_timeit_vals = collections.defaultdict(list)\n\n for key, timeit_result in results_cache.items():\n fn_name, num_nodes, num_values, _ = key\n log_N_vals[fn_name].append(np.log2(num_nodes))\n log_k_vals[fn_name].append(np.log2(num_values))\n log_timeit_vals[fn_name].append(np.log2(timeit_result.average))\n\n selected_functions = (\n \"bakeoff_opt.forall1\",\n \"bakeoff_opt.serial\",\n \"bakeoff_opt.spread1\",\n \"bakeoff_opt.vs_algorithm64\",\n )\n for fn_name in selected_functions:\n plot_3d(log_N_vals, log_k_vals, log_timeit_vals, fn_name)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "10853118", "language": "Python", "matching_score": 0.5565770268440247, "max_stars_count": 0, "path": "plot_trisurf.py" }, { "content": "import pandas as pd\nfrom hvplot import hvPlot, patch\nfrom holoviews import Store, Scatter\nfrom holoviews.element.comparison import ComparisonTestCase\n\n\nclass TestOverrides(ComparisonTestCase):\n\n def setUp(self):\n patch('pandas')\n self.df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=['x', 'y'])\n\n def test_define_default_options(self):\n hvplot = hvPlot(self.df, width=42, height=42)\n curve = hvplot(y='y')\n opts = Store.lookup_options('bokeh', curve, 'plot')\n self.assertEqual(opts.options.get('width'), 42)\n self.assertEqual(opts.options.get('height'), 42)\n\n def test_define_custom_method(self):\n hvplot = hvPlot(self.df, {'custom_scatter': {'width': 42, 'height': 42}})\n custom_scatter = hvplot.custom_scatter(y='y')\n scatter = hvplot.scatter(y='y')\n custom_opts = Store.lookup_options('bokeh', custom_scatter, 'plot')\n opts = Store.lookup_options('bokeh', scatter, 'plot')\n self.assertEqual(custom_opts.options.get('width'), 42)\n self.assertEqual(custom_opts.options.get('height'), 42)\n self.assertNotEqual(opts.options.get('width'), 42)\n self.assertNotEqual(opts.options.get('height'), 42)\n\n def test_define_customize_method(self):\n hvplot = hvPlot(self.df, {'scatter': {'width': 42, 'height': 42}})\n custom_scatter = hvplot.scatter(y='y')\n curve = hvplot.line(y='y')\n custom_opts = Store.lookup_options('bokeh', custom_scatter, 'plot')\n opts = Store.lookup_options('bokeh', curve, 'plot')\n self.assertEqual(custom_opts.options.get('width'), 42)\n self.assertEqual(custom_opts.options.get('height'), 42)\n self.assertNotEqual(opts.options.get('width'), 42)\n self.assertNotEqual(opts.options.get('height'), 42)\n\n def test_attempt_to_override_kind_on_method(self):\n hvplot = hvPlot(self.df, {'scatter': {'kind': 'line'}})\n self.assertIsInstance(hvplot.scatter(y='y'), Scatter)\n", "id": "4714560", "language": "Python", "matching_score": 3.02354097366333, "max_stars_count": 0, "path": "hvplot/tests/testoverrides.py" }, { "content": "from unittest import SkipTest\nfrom parameterized import parameterized\n\nfrom holoviews import NdOverlay\nfrom holoviews.element import Curve, Area, Scatter\nfrom holoviews.element.comparison import ComparisonTestCase\nfrom hvplot import patch\n\n\nclass TestChart1D(ComparisonTestCase):\n\n def setUp(self):\n try:\n import pandas as pd\n except:\n raise SkipTest('Pandas not available')\n patch('pandas')\n self.df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=['x', 'y'])\n\n @parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])\n def test_wide_chart(self, kind, element):\n plot = self.df.hvplot(kind=kind)\n obj = NdOverlay({'x': element(self.df, 'index', 'x').redim(x='value'),\n 'y': element(self.df, 'index', 'y').redim(y='value')}, 'Variable')\n self.assertEqual(plot, obj)\n\n @parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])\n def test_wide_chart_labels(self, kind, element):\n plot = self.df.hvplot(kind=kind, value_label='Test', group_label='Category')\n obj = NdOverlay({'x': element(self.df, 'index', 'x').redim(x='Test'),\n 'y': element(self.df, 'index', 'y').redim(y='Test')}, 'Category')\n self.assertEqual(plot, obj)\n\n @parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])\n def test_tidy_chart(self, kind, element):\n plot = self.df.hvplot(x='x', y='y', kind=kind)\n self.assertEqual(plot, element(self.df, 'x', 'y'))\n \n @parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])\n def test_tidy_chart_index(self, kind, element):\n plot = self.df.hvplot(x='index', y='y', kind=kind)\n self.assertEqual(plot, element(self.df, 'index', 'y'))\n\n @parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])\n def test_tidy_chart_index_by(self, kind, element):\n plot = self.df.hvplot(x='index', y='y', by='x', kind=kind)\n obj = NdOverlay({1: element(self.df[self.df.x==1], 'index', 'y'),\n 3: element(self.df[self.df.x==3], 'index', 'y'),\n 5: element(self.df[self.df.x==5], 'index', 'y')}, 'x')\n self.assertEqual(plot, obj)\n \n @parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])\n def test_use_index_disabled(self, kind, element):\n with self.assertRaises(ValueError):\n self.df.hvplot(use_index=False, kind=kind)\n\n @parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])\n def test_tidy_chart_ranges(self, kind, element):\n plot = self.df.hvplot(x='x', y='y', kind=kind, xlim=(0, 3), ylim=(5, 10))\n self.assertEqual(plot.kdims[0].range, (0, 3))\n self.assertEqual(plot.vdims[0].range, (5, 10))\n\n @parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])\n def test_wide_chart_ranges(self, kind, element):\n plot = self.df.hvplot(kind=kind, xlim=(0, 3), ylim=(5, 10))\n self.assertEqual(plot.last.kdims[0].range, (0, 3))\n self.assertEqual(plot.last.vdims[0].range, (5, 10))\n\n def test_area_stacked(self):\n plot = self.df.hvplot.area(stacked=True)\n obj = NdOverlay({'x': Area(self.df, 'index', 'x').redim(x='value'),\n 'y': Area(self.df, 'index', 'y').redim(y='value')}, 'Variable')\n self.assertEqual(plot, Area.stack(obj))\n", "id": "345967", "language": "Python", "matching_score": 2.8663294315338135, "max_stars_count": 0, "path": "hvplot/tests/testcharts.py" }, { "content": "from unittest import SkipTest\nfrom parameterized import parameterized\n\nfrom holoviews.element import Image, QuadMesh\nfrom holoviews.element.comparison import ComparisonTestCase\n\n\nclass TestChart2D(ComparisonTestCase):\n def setUp(self):\n try:\n import xarray as xr\n import numpy as np\n except:\n raise SkipTest('XArray not available')\n import hvplot.xarray # noqa\n data = np.arange(0, 60).reshape(6, 10)\n x = np.arange(10)\n y = np.arange(6)\n self.da = xr.DataArray(data,\n coords={'y': y, 'x': x},\n dims=('y', 'x'))\n\n @parameterized.expand([('image', Image), ('quadmesh', QuadMesh)])\n def test_plot_resolution(self, kind, element):\n plot = self.da.hvplot(kind=kind)\n assert all(plot.data.x.diff('x').round(0) == 1)\n assert all(plot.data.y.diff('y').round(0) == 1)\n\n @parameterized.expand([('image', Image), ('quadmesh', QuadMesh)])\n def test_plot_resolution_with_rasterize(self, kind, element):\n plot = self.da.hvplot(kind=kind, dynamic=False, rasterize=True,\n x_sampling=5, y_sampling=2)\n assert all(plot.data.x.diff('x').round(0) == 5)\n assert all(plot.data.y.diff('y').round(0) == 2)\n", "id": "3698545", "language": "Python", "matching_score": 0.6816021800041199, "max_stars_count": 0, "path": "hvplot/tests/testoperations.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\n\nclass Test_check_output(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(*args, **kwargs):\n from ci_diff_helper._utils import check_output\n return check_output(*args, **kwargs)\n\n def _helper(self, ret_val, expected_result):\n import mock\n\n arg1 = 'foo'\n arg2 = 'bar'\n check_mock = mock.patch('subprocess.check_output',\n return_value=ret_val)\n with check_mock as mocked:\n result = self._call_function_under_test(arg1, arg2)\n mocked.assert_called_once_with((arg1, arg2))\n self.assertEqual(result, expected_result)\n\n def test_bytes(self):\n ret_val = b'abc\\n'\n expected_result = u'abc'\n self._helper(ret_val, expected_result)\n\n def test_unicode(self):\n ret_val = b'abc\\n\\tab'\n expected_result = u'abc\\n\\tab'\n self._helper(ret_val, expected_result)\n\n def _err_helper(self, ignore_err=False):\n import subprocess\n import mock\n\n kwargs = {}\n if ignore_err:\n kwargs['ignore_err'] = True\n check_mock = mock.patch(\n 'subprocess.check_output',\n side_effect=subprocess.CalledProcessError(1, ''))\n\n arg = 'hello-is-it-me'\n with check_mock as mocked:\n result = self._call_function_under_test(arg, **kwargs)\n # We can only get here in the ignore_err case.\n mocked.assert_called_once_with(\n (arg,), stderr=subprocess.PIPE)\n self.assertIsNone(result)\n\n def test_ignore_err(self):\n self._err_helper(ignore_err=True)\n\n def test_uncaught_err(self):\n import subprocess\n\n with self.assertRaises(subprocess.CalledProcessError):\n self._err_helper()\n\n def test_bad_keywords(self):\n with self.assertRaises(TypeError):\n self._call_function_under_test(huh='bad-kw')\n\n\nclass Test_pr_from_commit(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(merge_subject):\n from ci_diff_helper._utils import pr_from_commit\n return pr_from_commit(merge_subject)\n\n def test_no_id(self):\n subject = 'No pound sign.'\n result = self._call_function_under_test(subject)\n self.assertIsNone(result)\n\n def test_too_many_ids(self):\n subject = '#1234 then #5678 too many.'\n result = self._call_function_under_test(subject)\n self.assertIsNone(result)\n\n def test_non_int_id(self):\n subject = '#x will not match the regex.'\n result = self._call_function_under_test(subject)\n self.assertIsNone(result)\n\n def test_valid_id(self):\n expected = 88901\n subject = 'Merge pull request #{:d} from queso/cheese'.format(expected)\n result = self._call_function_under_test(subject)\n self.assertEqual(result, expected)\n", "id": "7477714", "language": "Python", "matching_score": 4.3923468589782715, "max_stars_count": 5, "path": "tests/test__utils.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Shared utilities for ci-diff-helper.\"\"\"\n\nimport re\nimport subprocess\n\n\n_PR_ID_REGEX = re.compile(r'#(\\d+)')\nUNSET = object() # Sentinel for unset config values.\n\n\ndef check_output(*args, **kwargs):\n \"\"\"Run a command on the operation system.\n\n If the command fails a :class:`~subprocess.CalledProcessError`\n will occur. However, if you would like to silently ignore this\n error, pass the ``ignore_err`` flag::\n\n >>> print(check_output('false', ignore_err=True))\n None\n\n Args:\n args (tuple): Arguments to pass to ``subprocess.check_output``.\n kwargs (dict): Keyword arguments for this helper. Currently the\n only accepted keyword argument is ``ignore_err`.\n\n Returns:\n str: The raw STDOUT from the command (converted from bytes\n if necessary).\n\n Raises:\n TypeError: If any unrecognized keyword arguments are used.\n CalledProcessError: If ``ignore_err`` is not :data:`True` and\n the system call fails.\n \"\"\"\n ignore_err = kwargs.pop('ignore_err', False)\n if kwargs:\n raise TypeError('Got unexpected keyword argument(s)',\n list(kwargs.keys()))\n\n try:\n kwargs = {}\n if ignore_err:\n kwargs['stderr'] = subprocess.PIPE # Swallow stderr.\n cmd_output = subprocess.check_output(args, **kwargs)\n # On Python 3, this returns bytes (from STDOUT), so we\n # convert to a string.\n cmd_output_str = cmd_output.decode('utf-8')\n # Also strip the output since it usually has a trailing newline.\n return cmd_output_str.strip()\n except subprocess.CalledProcessError:\n if ignore_err:\n return\n else:\n raise\n\n\ndef pr_from_commit(merge_subject):\n \"\"\"Get pull request ID from a commit message.\n\n .. note::\n\n This assumes we know the commit is a merge commit.\n\n Args:\n merge_subject (str): The subject of a merge commit.\n\n Returns:\n int: The PR ID extracted from the commit subject. If no integer\n can be uniquely extracted, returns :data:`None`.\n \"\"\"\n matches = _PR_ID_REGEX.findall(merge_subject)\n if len(matches) == 1:\n # NOTE: We don't need to catch a ValueError since the regex\n # guarantees the match will be all digits.\n return int(matches[0])\n", "id": "9767128", "language": "Python", "matching_score": 1.0798542499542236, "max_stars_count": 5, "path": "ci_diff_helper/_utils.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\n\nclass Test__rate_limit_info(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(response):\n from ci_diff_helper import _github\n return _github._rate_limit_info(response)\n\n def test_it(self):\n import sys\n import mock\n import requests\n from ci_diff_helper import _github\n\n response = requests.Response()\n remaining = '17'\n response.headers[_github._RATE_REMAINING_HEADER] = remaining\n rate_limit = '60'\n response.headers[_github._RATE_LIMIT_HEADER] = rate_limit\n rate_reset = '1475953149'\n response.headers[_github._RATE_RESET_HEADER] = rate_reset\n with mock.patch('six.print_') as mocked:\n self._call_function_under_test(response)\n msg = _github._RATE_LIMIT_TEMPLATE.format(\n remaining, rate_limit, rate_reset)\n self.assertEqual(mocked.call_count, 2)\n mocked.assert_any_call(msg, file=sys.stderr)\n mocked.assert_any_call(_github._GH_ENV_VAR_MSG,\n file=sys.stderr)\n\n\nclass Test__get_headers(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test():\n from ci_diff_helper import _github\n return _github._get_headers()\n\n def test_without_auth(self):\n import mock\n\n with mock.patch('os.environ', new={}):\n headers = self._call_function_under_test()\n\n self.assertEqual(headers, {})\n\n def test_with_auth(self):\n import mock\n from ci_diff_helper import environment_vars as env\n\n token = '<PASSWORD>'\n mock_env = {env.GH_TOKEN: token}\n with mock.patch('os.environ', new=mock_env):\n headers = self._call_function_under_test()\n\n expected = {'Authorization': 'token ' + token}\n self.assertEqual(headers, expected)\n\n\nclass Test__maybe_fail(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(response):\n from ci_diff_helper import _github\n return _github._maybe_fail(response)\n\n def test_success(self):\n import mock\n import requests\n from six.moves import http_client\n\n response = mock.Mock(spec=requests.Response,\n status_code=http_client.OK)\n self._call_function_under_test(response)\n\n response.raise_for_status.assert_not_called()\n\n def test_failure(self):\n import mock\n import requests\n from six.moves import http_client\n\n response = requests.Response()\n response.status_code = http_client.FORBIDDEN\n\n to_patch = 'ci_diff_helper._github._rate_limit_info'\n with mock.patch(to_patch) as patched:\n with self.assertRaises(requests.HTTPError):\n self._call_function_under_test(response)\n patched.assert_called_once_with(response)\n\n\nclass Test_commit_compare(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(slug, start, finish):\n from ci_diff_helper import _github\n return _github.commit_compare(slug, start, finish)\n\n @staticmethod\n def _make_response(payload):\n import json\n import requests\n from six.moves import http_client\n\n response = requests.Response()\n response.status_code = http_client.OK\n response._content = json.dumps(payload).encode('utf-8')\n return response\n\n def test_success(self):\n import mock\n\n from ci_diff_helper import _github\n\n payload = {'hi': 'bye'}\n response = self._make_response(payload)\n\n patch_get = mock.patch('requests.get', return_value=response)\n slug = 'a/b'\n start = '1234'\n finish = '6789'\n expected_url = _github._GH_COMPARE_TEMPLATE.format(\n slug, start, finish)\n\n headers_mock = mock.Mock(\n return_value=mock.sentinel.headers)\n fail_mock = mock.Mock()\n with mock.patch.multiple('ci_diff_helper._github',\n _get_headers=headers_mock,\n _maybe_fail=fail_mock):\n with patch_get as mocked_get:\n result = self._call_function_under_test(\n slug, start, finish)\n\n self.assertEqual(result, payload)\n\n # Verify mocks.\n headers_mock.assert_called_once_with()\n fail_mock.assert_called_once_with(response)\n mocked_get.assert_called_once_with(\n expected_url, headers=mock.sentinel.headers)\n\n\nclass Test_pr_info(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(slug, pr_id):\n from ci_diff_helper import _github\n return _github.pr_info(slug, pr_id)\n\n @staticmethod\n def _make_response(payload):\n import json\n import requests\n from six.moves import http_client\n\n response = requests.Response()\n response.status_code = http_client.OK\n response._content = json.dumps(payload).encode('utf-8')\n return response\n\n def test_success(self):\n import mock\n\n from ci_diff_helper import _github\n\n base_sha = '04facb05d80e871107892b3635e24fee60a4fc36'\n payload = {'base': {'sha': base_sha}}\n response = self._make_response(payload)\n\n patch_get = mock.patch('requests.get', return_value=response)\n slug = 'a/b'\n pr_id = 808\n expected_url = _github._GH_PR_TEMPLATE.format(slug, pr_id)\n\n headers_mock = mock.Mock(\n return_value=mock.sentinel.headers)\n fail_mock = mock.Mock()\n with mock.patch.multiple('ci_diff_helper._github',\n _get_headers=headers_mock,\n _maybe_fail=fail_mock):\n with patch_get as mocked_get:\n result = self._call_function_under_test(\n slug, pr_id)\n\n self.assertEqual(result, payload)\n\n # Verify mocks.\n headers_mock.assert_called_once_with()\n fail_mock.assert_called_once_with(response)\n mocked_get.assert_called_once_with(\n expected_url, headers=mock.sentinel.headers)\n", "id": "7309963", "language": "Python", "matching_score": 5.246423244476318, "max_stars_count": 5, "path": "tests/test__github.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helper to make calls to the GitHub API.\"\"\"\n\nimport os\nimport sys\n\nimport requests\nimport six\nfrom six.moves import http_client\n\nfrom ci_diff_helper import environment_vars as env\n\n\n_GH_COMPARE_TEMPLATE = 'https://api.github.com/repos/{}/compare/{}...{}'\n_GH_PR_TEMPLATE = 'https://api.github.com/repos/{}/pulls/{:d}'\n_RATE_REMAINING_HEADER = 'X-RateLimit-Remaining'\n_RATE_LIMIT_HEADER = 'X-RateLimit-Limit'\n_RATE_RESET_HEADER = 'X-RateLimit-Reset'\n_RATE_LIMIT_TEMPLATE = '{:>25}: {{}}\\n{:>25}: {{}}\\n{:>25}: {{}}'.format(\n _RATE_REMAINING_HEADER, _RATE_LIMIT_HEADER, _RATE_RESET_HEADER)\n_GH_ENV_VAR_MSG = (\n 'You can avoid being rate limited by storing a GitHub OAuth '\n 'token in the {} environment variable').format(env.GH_TOKEN)\n\n\ndef _rate_limit_info(response):\n \"\"\"Print response rate limit information to stderr.\n\n Args:\n response (requests.Response): A GitHub API response.\n \"\"\"\n remaining = response.headers.get(_RATE_REMAINING_HEADER)\n rate_limit = response.headers.get(_RATE_LIMIT_HEADER)\n rate_reset = response.headers.get(_RATE_RESET_HEADER)\n msg = _RATE_LIMIT_TEMPLATE.format(remaining, rate_limit, rate_reset)\n six.print_(msg, file=sys.stderr)\n six.print_(_GH_ENV_VAR_MSG, file=sys.stderr)\n\n\ndef _get_headers():\n \"\"\"Get headers for GitHub API request.\n\n Attempts to add a GitHub token to headers if available.\n\n Returns:\n dict: The headers for a GitHub API request.\n \"\"\"\n headers = {}\n github_token = os.getenv(env.GH_TOKEN, None)\n if github_token is not None:\n headers['Authorization'] = 'token ' + github_token\n\n return headers\n\n\ndef _maybe_fail(response):\n \"\"\"Fail and print info if an API request was not successful.\n\n Args:\n response (requests.models.Response): A ``requests`` response\n from a GitHub API request.\n\n Raises:\n requests.exceptions.HTTPError: If the GitHub API request fails.\n \"\"\"\n if response.status_code != http_client.OK:\n _rate_limit_info(response)\n response.raise_for_status()\n\n\ndef commit_compare(slug, start, finish):\n \"\"\"Makes GitHub API request to compare two commits.\n\n Args:\n slug (str): The GitHub repo slug for the current build.\n Of the form ``{organization}/{repository}``.\n start (str): The start commit in a range.\n finish (str): The last commit in a range.\n\n Returns:\n dict: The parsed JSON payload of the request.\n\n Raises:\n requests.exceptions.HTTPError: If the GitHub API request fails.\n \"\"\"\n api_url = _GH_COMPARE_TEMPLATE.format(slug, start, finish)\n\n headers = _get_headers()\n response = requests.get(api_url, headers=headers)\n _maybe_fail(response)\n\n return response.json()\n\n\ndef pr_info(slug, pr_id):\n \"\"\"Makes GitHub API request to info about a pull request.\n\n Args:\n slug (str): The GitHub repo slug for the current build.\n Of the form ``{organization}/{repository}``.\n pr_id (int): The pull request ID.\n\n Returns:\n dict: The pull request information.\n\n Raises:\n requests.exceptions.HTTPError: If the GitHub API request fails.\n \"\"\"\n api_url = _GH_PR_TEMPLATE.format(slug, pr_id)\n\n headers = _get_headers()\n response = requests.get(api_url, headers=headers)\n _maybe_fail(response)\n\n return response.json()\n", "id": "1327455", "language": "Python", "matching_score": 1.8653913736343384, "max_stars_count": 5, "path": "ci_diff_helper/_github.py" }, { "content": "#!/usr/bin/env python\n\n# Copyright (C) 2010-2011 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Test persistent-cal CLI tool.\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport argparse\nimport cookielib\nimport cStringIO\nimport getpass\nimport simplejson\nimport sys\nimport unittest\nimport urllib\nimport urllib2\n\n# App specific libraries\nimport persistent_cal\n\n\nACSID_VAL = 'A'\nADD = '/add'\nADD_ERROR_MAP = {'contained': 'contained:fail',\n 'limit': 'limit:fail',\n 'user': 'no_user:fail',\n 'whitelist': 'whitelist:fail'}\nADD_UNEXPECTED_MAP = {'not_list': None,\n 'too_long_list': range(5)}\nAPPLICATION_AUTH_CORRECT_URL = 'application_auth'\nAPPLICATION_AUTH_NO_COOKIE_URL = 'not_application_auth'\nAUTH_VAL = 'c'\nCALENDAR_LINK_VALID = 'calendar_link_valid'\nCLIENT_AUTH_BAD_CONTENT_URL = 'client_login_bad_content'\nCLIENT_AUTH_BAD_ROWS_URL = 'client_login_bad_rows'\nCLIENT_AUTH_CORRECT_URL = 'client_login'\nCLIENT_LOGIN_BAD_AUTH = 'Error=BadAuthentication\\n'\nCLIENT_LOGIN_BAD_SECOND_FACTOR = 'Info=InvalidSecondFactor\\n'\nCLIENT_LOGIN_VALID_AUTH = {'valid': 'SID=a\\nLSID=b\\nAuth=%s\\n' % AUTH_VAL,\n 'bad_rows': 'LSID=b\\nAuth=%s\\n' % AUTH_VAL,\n 'bad_content': 'row1\\nrow2\\nrow3\\n'}\nEMAIL = '<EMAIL>'\nEMAIL_ASP = 'specific'\nEMAIL_NOT_VALID_PASSWORD = '<PASSWORD>'\nEMAIL_PASSWORD = 'password'\nFREQUENCY = '/freq'\nFREQUENCY_ERROR_MAP = {'none': 'no_cal:fail',\n 'user': 'no_user:fail',\n 'wrong': 'wrong_freq:fail'}\nFREQUENCY_RESPONSES = {'week': 'once a week',\n 'two-day': 'every two days',\n 'day': 'once a day',\n 'half-day': 'twice a day',\n 'six-hrs': 'every six hours',\n 'three-hrs': 'every three hours'}\nFREQUENCY_UNEXPECTED_MAP = {'not_list': None,\n 'too_long_list': range(3),\n 'incongruous_list': ['a', 'b']}\nGETINFO = '/getinfo'\nGETINFO_ERROR_MAP = {'cal': 'no_cal:fail',\n 'user': 'no_user:fail'}\nGETINFO_SUCCESS = [[CALENDAR_LINK_VALID], 'once a week']\nGETINFO_UNEXPECTED_MAP = {'not_list': None,\n 'too_long_list': range(3),\n 'first_arg_not_list': ['', '']}\n\n\ndef RaiseHTTPError(url='', code=200, msg='', response_data=''):\n \"\"\"Raises a mock HTTPError with crucial named arguments.\n\n This error will be a minimal mock, with headers set to None and\n a cStringIO object used as the body (fp).\n\n Args:\n url: A url to cite in HTTP error\n code: HTTP response code\n msg: exception message passed to urllib2.HTTPError\n response_data: data to be placed in cStringIO object for body\n\n Raises:\n urllib2.HTTPError: uses args (or defaults values) to raise error\n \"\"\"\n hdrs = None\n fp = cStringIO.StringIO(response_data)\n raise urllib2.HTTPError(url, code, msg, hdrs, fp)\n\n\ndef CookieBuilder(name='ACSID', value=ACSID_VAL):\n \"\"\"Mock cookie builder.\n\n For the purposes of the library we are testing, only the\n name and value attributes are used from the cookie, so\n we only require them and provide False-y values for all the\n remaining 15 required attributes.\n\n Args:\n name: cookie name\n value: cookie value\n\n Returns:\n cookielib.Cookie object with mostly False-y attributes, name\n and value set\n \"\"\"\n return cookielib.Cookie(\n version=None, name=name, value=value,\n port=None, port_specified=False,\n domain='', domain_specified=False, domain_initial_dot=False,\n path=None, path_specified=False, secure=False, expires=None,\n discard=False, comment=None, comment_url=None, rest={})\n\n\ndef MockCookieProcessor(cookie_jar):\n \"\"\"Mock cookie processor to be used with MockBuildOpener.\n\n Uses external scope to add cookies to the cookie_jar in the processor.\n The mock CookieBuilder function is used to set only name and value in the\n cookie(s) added.\n\n Args:\n cookie_jar: a cookielib.CookieJar object\n\n Returns:\n function which uses external scope to set a cookie\n \"\"\"\n\n def Update(name, value):\n \"\"\"Function to be returned, changes cookie_jar from external scope.\"\"\"\n cookie = CookieBuilder(name=name, value=value)\n cookie_jar.set_cookie(cookie)\n\n return Update\n\n\ndef ClientLoginOpener(pw_dict, url, data, response_key='valid'):\n \"\"\"Mock urlopen to be used to test Client Login functions.\n\n Args:\n pw_dict: A dictionary of password data, with usernames as keys\n and values equal to passwords or (ASP, password) pairs\n url: string value of requested url\n data: POST data sent with urlopen\n response_key: a key used with the global CLIENT_LOGIN_VALID_AUTH\n to determine what the response body will be in the case of\n a successful request\n\n Returns:\n cStringIO object containing either a BadAuthentication message, one\n or the valid responses in CLIENT_LOGIN_VALID_AUTH\n\n Raises:\n urllib2.HTTPError: in the case that an unexpected error causes a response\n not to be set or a request is sent without a matching email/password\n combination.\n \"\"\"\n for email, password in pw_dict.items():\n email_enc = urllib.urlencode({'Email': email})\n asp_enc = None\n if isinstance(password, tuple):\n app_specific_pass, password = password\n asp_enc = urllib.urlencode({'Passwd': app_specific_pass})\n pw_enc = urllib.urlencode({'Passwd': password})\n\n if data is not None and email_enc in data:\n response = None\n if asp_enc is not None:\n if asp_enc in data:\n return cStringIO.StringIO(CLIENT_LOGIN_VALID_AUTH[response_key])\n else:\n response = CLIENT_LOGIN_BAD_AUTH\n if pw_enc in data:\n response += CLIENT_LOGIN_BAD_SECOND_FACTOR\n elif pw_enc in data:\n return cStringIO.StringIO(CLIENT_LOGIN_VALID_AUTH[response_key])\n else:\n response = CLIENT_LOGIN_BAD_AUTH\n\n if response is not None:\n RaiseHTTPError(url=url, code=403,\n msg='Forbidden', response_data=response)\n\n RaiseHTTPError(url=url, code=403,\n msg='Forbidden', response_data=CLIENT_LOGIN_BAD_AUTH)\n\n\ndef AddSubscriptionOpener(request, data):\n \"\"\"Mock urlopen to be used to test with AddSubscription endpoint.\n\n Args:\n request: a urllib2.Request object with header and url data\n data: post data sent with request\n\n Returns:\n cStringIO object containing either an API error message from the globals\n ADD_ERROR_MAP or ADD_UNEXPECTED_MAP or a valid response.\n\n Raises:\n urllib2.HTTPError: in the case that the correct cookie is not set or the\n necessary data is not sent to trigger a 200 response\n \"\"\"\n correct_cookie = 'ACSID=%s' % AUTH_VAL\n calendar_link_enc = urllib.urlencode({'calendar-link': CALENDAR_LINK_VALID})\n\n if request.get_header('Cookie', '') == correct_cookie:\n if calendar_link_enc in data:\n response = simplejson.dumps([CALENDAR_LINK_VALID])\n return cStringIO.StringIO(response)\n else:\n for add_error in ADD_ERROR_MAP:\n add_error_enc = urllib.urlencode({'calendar-link': add_error})\n if add_error_enc in data:\n response = simplejson.dumps(ADD_ERROR_MAP[add_error])\n return cStringIO.StringIO(response)\n\n for add_unexpected in ADD_UNEXPECTED_MAP:\n add_unexpected_enc = urllib.urlencode({'calendar-link': add_unexpected})\n if add_unexpected_enc in data:\n response = simplejson.dumps(ADD_UNEXPECTED_MAP[add_unexpected])\n return cStringIO.StringIO(response)\n\n RaiseHTTPError(url=request, code=404)\n\n\ndef ChangeFrequencyOpener(request, data):\n \"\"\"Mock urlopen to be used to test with ChangeFrequency endpoint.\n\n Args:\n request: a urllib2.Request object with header and url data\n data: post data sent with request\n\n Returns:\n cStringIO object containing either an API error message from the globals\n FREQUENCY_ERROR_MAP or FREQUENCY_UNEXPECTED_MAP or a valid response.\n\n Raises:\n urllib2.HTTPError: in the case that the correct cookie is not set, the\n necessary data is not sent to trigger a 200 response or the method\n of the request is not PUT.\n \"\"\"\n correct_cookie = 'ACSID=%s' % AUTH_VAL\n\n if (request.get_header('Cookie', '') == correct_cookie and\n request.get_method() == 'PUT'):\n for value in FREQUENCY_RESPONSES:\n value_enc = urllib.urlencode({'frequency': value})\n if value_enc in data:\n response = simplejson.dumps([FREQUENCY_RESPONSES[value], value])\n return cStringIO.StringIO(response)\n\n for freq_error in FREQUENCY_ERROR_MAP:\n freq_error_enc = urllib.urlencode({'frequency': freq_error})\n if freq_error_enc in data:\n response = simplejson.dumps(FREQUENCY_ERROR_MAP[freq_error])\n return cStringIO.StringIO(response)\n\n for freq_unexpected in FREQUENCY_UNEXPECTED_MAP:\n freq_unexpected_enc = urllib.urlencode({'frequency': freq_unexpected})\n if freq_unexpected_enc in data:\n response = simplejson.dumps(FREQUENCY_UNEXPECTED_MAP[freq_unexpected])\n return cStringIO.StringIO(response)\n\n RaiseHTTPError(url=request, code=404)\n\n\ndef GetInfoOpener(request, error=None):\n \"\"\"Mock urlopen to be used to test with GetInfo endpoint.\n\n Args:\n request: a urllib2.Request object with header and url data\n error: An error to trigger a mock API error response\n\n Returns:\n cStringIO object containing either an API error message from the globals\n GETINFO_ERROR_MAP or GETINFO_UNEXPECTED_MAP or a valid response.\n\n Raises:\n urllib2.HTTPError: in the case that the correct cookie is not set or\n error is set to an unexpected value\n \"\"\"\n correct_cookie = 'ACSID=%s' % AUTH_VAL\n\n if request.get_header('Cookie', '') == correct_cookie:\n if error is None:\n response = simplejson.dumps(GETINFO_SUCCESS)\n return cStringIO.StringIO(response)\n elif error in GETINFO_ERROR_MAP or error in GETINFO_UNEXPECTED_MAP:\n if error in GETINFO_ERROR_MAP:\n response = simplejson.dumps(GETINFO_ERROR_MAP[error])\n else:\n response = simplejson.dumps(GETINFO_UNEXPECTED_MAP[error])\n return cStringIO.StringIO(response)\n\n RaiseHTTPError(url=request, code=404)\n\n\ndef MockOpener(pw_dict):\n \"\"\"Returns mock urlopen function which accounts for user credentials.\"\"\"\n\n def URLOpen(url, data=None):\n \"\"\"Mock urlopen function to be returned.\"\"\"\n if url == CLIENT_AUTH_CORRECT_URL:\n return ClientLoginOpener(pw_dict, url, data)\n elif url == CLIENT_AUTH_BAD_ROWS_URL:\n return ClientLoginOpener(pw_dict, url, data, response_key='bad_rows')\n elif url == CLIENT_AUTH_BAD_CONTENT_URL:\n return ClientLoginOpener(pw_dict, url, data, response_key='bad_content')\n elif isinstance(url, urllib2.Request):\n full_url = url.get_full_url()\n if full_url == ADD:\n return AddSubscriptionOpener(url, data)\n elif full_url == FREQUENCY:\n return ChangeFrequencyOpener(url, data)\n elif full_url == GETINFO:\n return GetInfoOpener(url)\n elif full_url in GETINFO_ERROR_MAP or full_url in GETINFO_UNEXPECTED_MAP:\n return GetInfoOpener(url, error=full_url)\n\n RaiseHTTPError(url=url, code=404,\n msg='Resource not found.', response_data='')\n\n return URLOpen\n\n\nclass MockBuildOpener(object):\n \"\"\"Mock urllib2.build_opener class, interacts with MockCookieProcessor.\"\"\"\n\n def __init__(self, cookie_processor):\n self.cookie_processor = cookie_processor\n\n def open(self, request_url): # pylint: disable-msg=C6409\n if request_url == APPLICATION_AUTH_CORRECT_URL:\n self.cookie_processor(name='ACSID', value=ACSID_VAL)\n elif request_url == APPLICATION_AUTH_NO_COOKIE_URL:\n pass\n else:\n RaiseHTTPError(url=request_url, code=404)\n\n\nclass MockAPIAuthManager(object):\n \"\"\"Mock APIAuthManager class for testing MakeRequest.\"\"\"\n\n def __init__(self, value):\n self.value = value\n\n def GetApplicationAuth(self):\n return self.value\n\n\nclass TestPrintMessageList(unittest.TestCase):\n \"\"\"Test PrintMessageList helper function.\"\"\"\n\n sys_out = None\n\n def setUp(self): # pylint: disable-msg=C6409\n \"\"\"Configure the test case so stdout can be read.\"\"\"\n self.sys_out = sys.stdout\n sys.stdout = cStringIO.StringIO()\n\n def testList(self): # pylint: disable-msg=C6409\n \"\"\"Tests list input.\"\"\"\n msg_list = ['line1', 'line2 extra length']\n actual_value = ('+--------------------+\\n'\n '| line1 |\\n'\n '| line2 extra length |\\n'\n '+--------------------+\\n')\n persistent_cal.PrintMessageList(msg_list)\n self.assertEqual(sys.stdout.getvalue(), # pylint: disable-msg=E1103\n actual_value)\n\n def testString(self):\n \"\"\"Tests string input.\"\"\"\n msg = 'txt'\n actual_value = ('+-----+\\n'\n '| txt |\\n'\n '+-----+\\n')\n persistent_cal.PrintMessageList(msg)\n self.assertEqual(sys.stdout.getvalue(), # pylint: disable-msg=E1103\n actual_value)\n\n def tearDown(self): # pylint: disable-msg=C6409\n sys.stdout = self.sys_out\n\n\nclass TestGetParser(unittest.TestCase):\n \"\"\"Test GetParser helper function.\"\"\"\n\n def setUp(self): # pylint: disable-msg=C6409\n self.parser = persistent_cal.GetParser('test')\n\n def assertAttrsNotSet(self, obj, attrs):\n \"\"\"Helper method to make sure each attr in attrs is not set on obj.\"\"\"\n for attr in attrs:\n self.assertFalse(hasattr(obj, attr))\n\n def testInvalidArgs(self): # pylint: disable-msg=C6409\n \"\"\"Tests invalid arguments passed to GetParser.\"\"\"\n self.assertRaises(SystemExit, self.parser.parse_args, [])\n self.assertRaises(SystemExit, self.parser.parse_args, ['a'])\n\n self.assertRaises(SystemExit, self.parser.parse_args, ['add'])\n self.assertRaises(SystemExit, self.parser.parse_args, ['add',\n 'link',\n 'extra'])\n\n self.assertRaises(SystemExit, self.parser.parse_args, ['chg'])\n self.assertRaises(SystemExit, self.parser.parse_args, ['chg', 'a'])\n\n self.assertRaises(SystemExit, self.parser.parse_args, ['getinfo', 'a'])\n\n # Only one subcommand should work at once\n self.assertRaises(SystemExit, self.parser.parse_args, ['add',\n 'link',\n 'chg',\n '1'])\n self.assertRaises(SystemExit, self.parser.parse_args, ['getinfo',\n 'add',\n 'link'])\n\n def testValidArgs(self): # pylint: disable-msg=C6409\n \"\"\"Tests valid arguments passed to GetParser.\"\"\"\n parsed = self.parser.parse_args(['add', 'link'])\n self.assertTrue(isinstance(parsed, argparse.Namespace))\n self.assertAttrsNotSet(parsed, ['chg', 'getinfo'])\n self.assertEqual(parsed.add, 'link')\n\n parsed = self.parser.parse_args(['chg', '1'])\n self.assertTrue(isinstance(parsed, argparse.Namespace))\n self.assertAttrsNotSet(parsed, ['add', 'getinfo'])\n self.assertEqual(parsed.chg, 1)\n\n parsed = self.parser.parse_args(['getinfo'])\n self.assertTrue(isinstance(parsed, argparse.Namespace))\n self.assertAttrsNotSet(parsed, ['add', 'chg'])\n self.assertEqual(parsed.getinfo, True)\n\n\nclass TestAPIAuthManagerBasic(unittest.TestCase):\n \"\"\"Test APIAuthManager instance init and state functions.\"\"\"\n\n def setUp(self): # pylint: disable-msg=C6409\n self.auth_manager = persistent_cal.APIAuthManager(EMAIL,\n application_id='test')\n\n def testInit(self): # pylint: disable-msg=C6409\n \"\"\"Tests init for APIAuthManager.\"\"\"\n self.assertEqual(self.auth_manager.state, 0)\n self.assertEqual(self.auth_manager.client_auth, None)\n self.assertEqual(self.auth_manager.application_auth, None)\n self.assertEqual(self.auth_manager.email, EMAIL)\n self.assertEqual(self.auth_manager.application_id, 'test')\n\n def testState(self): # pylint: disable-msg=C6409\n \"\"\"Tests derived property state.\"\"\"\n self.assertEqual(self.auth_manager.state, 0)\n\n self.auth_manager.client_auth = 'mock'\n self.assertEqual(self.auth_manager.state, 2)\n\n self.auth_manager.application_auth = 'mock'\n self.assertEqual(self.auth_manager.state, 3)\n\n self.auth_manager.client_auth = None\n self.assertEqual(self.auth_manager.state, 1)\n\n\nclass TestAPIAuthManagerClientAuthHelper(unittest.TestCase):\n \"\"\"Test APIAuthManager instance Client Auth Helper function.\"\"\"\n\n urlopen = None\n\n def setUp(self): # pylint: disable-msg=C6409\n \"\"\"Configure the test case.\n\n We replace urlopen with a MockOpener that authenticates with the global\n constant EMAIL and has an ASP and regular password to simulate the possible\n auth errors that can occur.\n \"\"\"\n self.urlopen = urllib2.urlopen\n urllib2.urlopen = MockOpener({EMAIL: (EMAIL_ASP, EMAIL_PASSWORD)})\n\n self.auth_manager = persistent_cal.APIAuthManager(EMAIL,\n application_id='test')\n\n def testGetClientAuthResponseValid(self): # pylint: disable-msg=C6409\n \"\"\"Test valid request to _GetClientAuthResponse.\"\"\"\n auth_response = self.auth_manager._GetClientAuthResponse(\n EMAIL_ASP, client_login=CLIENT_AUTH_CORRECT_URL)\n self.assertEqual(CLIENT_LOGIN_VALID_AUTH['valid'], auth_response)\n\n def testGetClientAuthResponseASPNeeded(self): # pylint: disable-msg=C6409\n \"\"\"Test request with password when ASP is needed.\"\"\"\n auth_response = self.auth_manager._GetClientAuthResponse(\n EMAIL_PASSWORD, client_login=CLIENT_AUTH_CORRECT_URL)\n self.assertEqual(CLIENT_LOGIN_BAD_AUTH + CLIENT_LOGIN_BAD_SECOND_FACTOR,\n auth_response)\n\n def testGetClientAuthResponseWrongPassword(self): # pylint: disable-msg=C6409\n \"\"\"Test request with wrong password sent.\"\"\"\n auth_response = self.auth_manager._GetClientAuthResponse(\n EMAIL_NOT_VALID_PASSWORD, client_login=CLIENT_AUTH_CORRECT_URL)\n self.assertEqual(CLIENT_LOGIN_BAD_AUTH, auth_response)\n\n def testGetClientAuthResponseNoCnxn(self): # pylint: disable-msg=C6409\n \"\"\"Test request when no connection can be made.\"\"\"\n self.assertRaises(persistent_cal.AuthException,\n self.auth_manager._GetClientAuthResponse,\n EMAIL_ASP, client_login=None)\n try:\n self.auth_manager._GetClientAuthResponse(EMAIL_ASP, client_login=None)\n except persistent_cal.AuthException as exc:\n self.assertEqual(exc.message, 'Could not connect to Google.')\n\n def tearDown(self): # pylint: disable-msg=C6409\n urllib2.urlopen = self.urlopen\n\n\nclass TestAPIAuthManagerClientAuth(unittest.TestCase):\n \"\"\"Test APIAuthManager instance Client Auth function.\"\"\"\n\n urlopen = None\n getpass_fn = None\n\n def setUp(self): # pylint: disable-msg=C6409\n self.urlopen = urllib2.urlopen\n urllib2.urlopen = MockOpener({EMAIL: (EMAIL_ASP, EMAIL_PASSWORD)})\n\n self.getpass_fn = getpass.getpass\n getpass.getpass = lambda prompt: EMAIL_PASSWORD\n\n self.auth_manager = persistent_cal.APIAuthManager(EMAIL,\n application_id='test')\n\n def testGetClientAuthValid(self): # pylint: disable-msg=C6409\n \"\"\"Test valid request to GetClientAuth.\"\"\"\n getpass.getpass = lambda prompt: EMAIL_ASP # correct password from setUp\n\n client_auth = self.auth_manager.GetClientAuth(\n client_login=CLIENT_AUTH_CORRECT_URL)\n self.assertEqual(AUTH_VAL, client_auth)\n self.assertEqual(self.auth_manager.client_auth, client_auth)\n\n def testGetClientAuthASPNeeded(self): # pylint: disable-msg=C6409\n \"\"\"Request made to correct URL with password when ASP is needed.\n\n Since the actual password is given and an ASP is needed, the error\n message is longer and more descriptive.\n \"\"\"\n getpass.getpass = lambda prompt: EMAIL_PASSWORD\n\n self.assertRaises(persistent_cal.AuthException,\n self.auth_manager.GetClientAuth,\n client_login=CLIENT_AUTH_CORRECT_URL)\n try:\n self.auth_manager.GetClientAuth(client_login=CLIENT_AUTH_CORRECT_URL)\n except persistent_cal.AuthException as exc:\n self.assertEqual(len(exc.message), 4)\n self.assertEqual(exc.message[:2], ['Authentication failed.', ''])\n\n self.assertEqual(self.auth_manager.client_auth, None)\n\n def testGetClientAuthWrongPassword(self): # pylint: disable-msg=C6409\n \"\"\"Request made to correct URL with invalid password.\"\"\"\n getpass.getpass = lambda prompt: EMAIL_NOT_VALID_PASSWORD\n\n self.assertRaises(persistent_cal.AuthException,\n self.auth_manager.GetClientAuth,\n client_login=CLIENT_AUTH_CORRECT_URL)\n try:\n self.auth_manager.GetClientAuth(client_login=CLIENT_AUTH_CORRECT_URL)\n except persistent_cal.AuthException as exc:\n self.assertEqual(exc.message, ['Authentication failed.'])\n\n self.assertEqual(self.auth_manager.client_auth, None)\n\n def testGetClientAuthInvalidContent(self): # pylint: disable-msg=C6409\n \"\"\"Request with correct password, bad content returned.\n\n In the MockOpener, we set up two URLs that will execute the same\n authentication steps, but will return content that will not\n be successfully parsed. The first will be a response with the\n correct value Auth= contained, but which does not have 3 rows;\n this will be at CLIENT_AUTH_BAD_ROWS_URL. The second will be a response\n with 3 rows, but no correct {val}= at the beginning of each row; this\n will be at CLIENT_AUTH_BAD_CONTENT_URL.\n \"\"\"\n getpass.getpass = lambda prompt: EMAIL_ASP\n\n self.assertRaises(persistent_cal.AuthException,\n self.auth_manager.GetClientAuth,\n client_login=CLIENT_AUTH_BAD_ROWS_URL)\n try:\n self.auth_manager.GetClientAuth(client_login=CLIENT_AUTH_BAD_ROWS_URL)\n except persistent_cal.AuthException as exc:\n self.assertEqual(exc.message, 'Client login failed.')\n\n self.assertEqual(self.auth_manager.client_auth, None)\n\n self.assertRaises(persistent_cal.AuthException,\n self.auth_manager.GetClientAuth,\n client_login=CLIENT_AUTH_BAD_CONTENT_URL)\n try:\n self.auth_manager.GetClientAuth(client_login=CLIENT_AUTH_BAD_CONTENT_URL)\n except persistent_cal.AuthException as exc:\n self.assertEqual(exc.message, 'Client login failed.')\n\n self.assertEqual(self.auth_manager.client_auth, None)\n\n def tearDown(self): # pylint: disable-msg=C6409\n urllib2.urlopen = self.urlopen\n getpass.getpass = self.getpass_fn\n\n\nclass TestAPIAuthManagerApplicationAuth(unittest.TestCase):\n \"\"\"Test APIAuthManager instance Application Auth functions.\"\"\"\n\n urlopen = None\n getpass_fn = None\n build_opener = None\n cookie_processor = None\n\n def setUp(self): # pylint: disable-msg=C6409\n self.auth_manager = persistent_cal.APIAuthManager(EMAIL,\n application_id='test')\n\n self.urlopen = urllib2.urlopen\n urllib2.urlopen = MockOpener({EMAIL: EMAIL_PASSWORD})\n\n self.getpass_fn = getpass.getpass\n getpass.getpass = lambda prompt: EMAIL_PASSWORD # Will be valid throughout\n\n self.build_opener = urllib2.build_opener\n urllib2.build_opener = MockBuildOpener\n self.cookie_processor = urllib2.HTTPCookieProcessor\n urllib2.HTTPCookieProcessor = MockCookieProcessor\n\n def testGetApplicationAuthValid(self): # pylint: disable-msg=C6409\n \"\"\"Test request to valid request url.\"\"\"\n request_url = APPLICATION_AUTH_CORRECT_URL\n application_auth = self.auth_manager.GetApplicationAuth(\n request_url=request_url, client_login=CLIENT_AUTH_CORRECT_URL)\n self.assertEqual(ACSID_VAL, application_auth)\n self.assertEqual(self.auth_manager.application_auth, application_auth)\n\n def testGetApplicationAuthNoCookie(self): # pylint: disable-msg=C6409\n \"\"\"Test request to valid request url that doesn't set cookie.\"\"\"\n request_url = APPLICATION_AUTH_NO_COOKIE_URL\n self.assertRaises(persistent_cal.AuthException,\n self.auth_manager.GetApplicationAuth,\n request_url=request_url,\n client_login=CLIENT_AUTH_CORRECT_URL)\n try:\n self.auth_manager.GetApplicationAuth(\n request_url=request_url, client_login=CLIENT_AUTH_CORRECT_URL)\n except persistent_cal.AuthException as exc:\n self.assertEqual(len(exc.message), 3)\n self.assertEqual(exc.message[:2], ['Authentication error.', ''])\n\n def testGetApplicationAuthInvalid(self): # pylint: disable-msg=C6409\n \"\"\"Test request to invalid request url with valid credentials.\"\"\"\n request_url = ''\n self.assertRaises(persistent_cal.AuthException,\n self.auth_manager.GetApplicationAuth,\n request_url=request_url,\n client_login=CLIENT_AUTH_CORRECT_URL)\n try:\n self.auth_manager.GetApplicationAuth(\n request_url=request_url, client_login=CLIENT_AUTH_CORRECT_URL)\n except persistent_cal.AuthException as exc:\n self.assertEqual(len(exc.message), 3)\n self.assertEqual(exc.message[:2], ['Connection error.', ''])\n\n def tearDown(self): # pylint: disable-msg=C6409\n urllib2.urlopen = self.urlopen\n getpass.getpass = self.getpass_fn\n\n urllib2.build_opener = self.build_opener\n urllib2.HTTPCookieProcessor = self.cookie_processor\n\n\nclass TestAddSubscription(unittest.TestCase):\n \"\"\"Test AddSubscription function for authenticated API calls.\"\"\"\n\n urlopen = None\n\n def setUp(self): # pylint: disable-msg=C6409\n self.application_auth = AUTH_VAL\n\n self.urlopen = urllib2.urlopen\n urllib2.urlopen = MockOpener({EMAIL: EMAIL_PASSWORD})\n\n def testIncorrectPayload(self): # pylint: disable-msg=C6409\n \"\"\"Test correct add endpoint with bad payload data.\"\"\"\n payload = {}\n\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.AddSubscription,\n self.application_auth, payload, add_endpoint=ADD)\n\n try:\n persistent_cal.AddSubscription(self.application_auth,\n payload, add_endpoint=ADD)\n except persistent_cal.APIUseException as exc:\n self.assertEqual(exc.message,\n ['Unexpected behavior: library error.', '',\n 'No calendar link was specified in the payload.'])\n\n def testValidRequest(self): # pylint: disable-msg=C6409\n \"\"\"Valid request sent to valid URL.\n\n By default, the mock opener will only accept add requests at the global ADD\n and will behave correctly given the calendar-link value CALENDAR_LINK_VALID.\n \"\"\"\n payload = {'calendar-link': CALENDAR_LINK_VALID}\n\n api_response = persistent_cal.AddSubscription(self.application_auth,\n payload,\n add_endpoint=ADD)\n self.assertEqual(api_response, ['Success!', '',\n 'Your current subscriptions are:',\n CALENDAR_LINK_VALID])\n\n def testBadRequest(self): # pylint: disable-msg=C6409\n \"\"\"Valid request sent to invalid URL.\n\n We send two requests: one to the URL '' which is Invalid and another with\n the cookie value set to ''.\n \"\"\"\n bad_add_endpoint = ''\n payload = {'calendar-link': CALENDAR_LINK_VALID}\n\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.AddSubscription,\n self.application_auth, payload,\n add_endpoint=bad_add_endpoint)\n\n try:\n persistent_cal.AddSubscription(self.application_auth,\n payload, add_endpoint=bad_add_endpoint)\n except persistent_cal.APIUseException as exc:\n self.assertEqual(len(exc.message), 3)\n self.assertEqual(exc.message[:2], ['Connection error.', ''])\n\n bad_application_auth = ''\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.AddSubscription,\n bad_application_auth, payload,\n add_endpoint=ADD)\n\n try:\n persistent_cal.AddSubscription(bad_application_auth,\n payload, add_endpoint=ADD)\n except persistent_cal.APIUseException as exc:\n self.assertEqual(len(exc.message), 3)\n self.assertEqual(exc.message[:2], ['Connection error.', ''])\n\n def testBadAPIResponses(self): # pylint: disable-msg=C6409\n \"\"\"Valid API request sent with data that the API rejects.\n\n The AddSubscriptionOpener has predefined values that it will explicitly\n reject, defined in ADD_ERROR_MAP. By sending a valid request with the keys\n for ADD_ERROR_MAP in the payload, we can stub out the validation\n done server side by the API.\n \"\"\"\n actual_error_response_map = persistent_cal.ERROR_RESPONSES['add']\n\n for add_error in ADD_ERROR_MAP:\n payload = {'calendar-link': add_error}\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.AddSubscription,\n self.application_auth, payload,\n add_endpoint=ADD)\n\n try:\n persistent_cal.AddSubscription(self.application_auth,\n payload, add_endpoint=ADD)\n except persistent_cal.APIUseException as exc:\n api_add_error = ADD_ERROR_MAP[add_error]\n self.assertEqual(exc.message, actual_error_response_map[api_add_error])\n\n def testUnexpectedAPIResponses(self): # pylint: disable-msg=C6409\n \"\"\"Valid API request with unexpected response.\n\n We use the AddSubscriptionOpener to return JSON objects which are not\n expected as responses by the API. Explicitly, objects which are either\n not a list or are a list of length greater than 4. The object and keys\n are in the global ADD_UNEXPECTED_MAP.\n \"\"\"\n for add_unexpected in ADD_UNEXPECTED_MAP:\n payload = {'calendar-link': add_unexpected}\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.AddSubscription,\n self.application_auth, payload,\n add_endpoint=ADD)\n\n try:\n persistent_cal.AddSubscription(self.application_auth,\n payload, add_endpoint=ADD)\n except persistent_cal.APIUseException as exc:\n self.assertEqual(exc.message, 'An unexpected error occurred.')\n\n def tearDown(self): # pylint: disable-msg=C6409\n urllib2.urlopen = self.urlopen\n\n\nclass TestChangeFrequency(unittest.TestCase):\n \"\"\"Test ChangeFrequency function for authenticated API calls.\"\"\"\n\n urlopen = None\n\n def setUp(self): # pylint: disable-msg=C6409\n self.application_auth = AUTH_VAL\n\n self.urlopen = urllib2.urlopen\n urllib2.urlopen = MockOpener({EMAIL: EMAIL_PASSWORD})\n\n def testIncorrectPayload(self): # pylint: disable-msg=C6409\n \"\"\"Test correct frequency endpoint with bad payload data.\"\"\"\n payload = {}\n\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.ChangeFrequency,\n self.application_auth, payload, freq_endpoint=FREQUENCY)\n\n try:\n persistent_cal.ChangeFrequency(self.application_auth,\n payload, freq_endpoint=FREQUENCY)\n except persistent_cal.APIUseException as exc:\n self.assertEqual(exc.message,\n ['Unexpected behavior: library error.', '',\n 'No frequency was specified in the HTTP payload.'])\n\n def testValidRequest(self): # pylint: disable-msg=C6409\n \"\"\"Valid request sent to valid URL.\n\n By default, the mock opener will only accept add requests at the global\n FREQUENCY.\n \"\"\"\n for frequency, desc in persistent_cal.FREQUENCY_MAP.items():\n update_line = ('Your subscriptions will be updated %s.' %\n FREQUENCY_RESPONSES[desc])\n\n payload = {'frequency': frequency}\n api_response = persistent_cal.ChangeFrequency(self.application_auth,\n payload,\n freq_endpoint=FREQUENCY)\n self.assertEqual(api_response, ['Success!', '', update_line])\n\n payload = {'frequency': desc}\n api_response = persistent_cal.ChangeFrequency(self.application_auth,\n payload,\n freq_endpoint=FREQUENCY)\n self.assertEqual(api_response, ['Success!', '', update_line])\n\n def testBadRequest(self): # pylint: disable-msg=C6409\n \"\"\"Valid request sent to invalid URL or with wrong method.\n\n We send two requests: one to the URL '' which is Invalid and another\n with the cookie value set to ''.\n \"\"\"\n payload = {'frequency': 1}\n\n bad_freq_endpoint = ''\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.ChangeFrequency,\n self.application_auth, payload,\n freq_endpoint=bad_freq_endpoint)\n\n try:\n persistent_cal.ChangeFrequency(self.application_auth,\n payload, freq_endpoint=bad_freq_endpoint)\n except persistent_cal.APIUseException as exc:\n self.assertEqual(len(exc.message), 3)\n self.assertEqual(exc.message[:2], ['Connection error.', ''])\n\n bad_application_auth = ''\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.ChangeFrequency,\n bad_application_auth, payload,\n freq_endpoint=FREQUENCY)\n\n try:\n persistent_cal.ChangeFrequency(bad_application_auth,\n payload, freq_endpoint=FREQUENCY)\n except persistent_cal.APIUseException as exc:\n self.assertEqual(len(exc.message), 3)\n self.assertEqual(exc.message[:2], ['Connection error.', ''])\n\n def testBadAPIResponses(self): # pylint: disable-msg=C6409\n \"\"\"Valid API request sent with data that the API rejects.\n\n The ChangeFrequencyOpener has predefined values that it will explicitly\n reject, defined in FREQUENCY_ERROR_MAP. By sending a valid request with the\n keys for FREQUENCY_ERROR_MAP in the payload, we can stub out the validation\n done server side by the API.\n \"\"\"\n actual_error_response_map = persistent_cal.ERROR_RESPONSES['chg']\n\n for freq_error in FREQUENCY_ERROR_MAP:\n payload = {'frequency': freq_error}\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.ChangeFrequency,\n self.application_auth, payload,\n freq_endpoint=FREQUENCY)\n\n try:\n persistent_cal.ChangeFrequency(self.application_auth,\n payload, freq_endpoint=FREQUENCY)\n except persistent_cal.APIUseException as exc:\n api_freq_error = FREQUENCY_ERROR_MAP[freq_error]\n self.assertEqual(exc.message, actual_error_response_map[api_freq_error])\n\n def testUnexpectedAPIResponses(self): # pylint: disable-msg=C6409\n \"\"\"Valid API request with unexpected response.\n\n We use the ChangeFrequencyOpener to return JSON objects which are not\n expected as responses by the API. Explicitly, objects which are either\n not a list, are a list of length not equal to 2 or a list of length 2\n with second element not equal to the frequency. The object and keys\n are in the global FREQUENCY_UNEXPECTED_MAP.\n \"\"\"\n for freq_unexpected in FREQUENCY_UNEXPECTED_MAP:\n payload = {'frequency': freq_unexpected}\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.ChangeFrequency,\n self.application_auth, payload,\n freq_endpoint=FREQUENCY)\n\n try:\n persistent_cal.ChangeFrequency(self.application_auth,\n payload, freq_endpoint=FREQUENCY)\n except persistent_cal.APIUseException as exc:\n self.assertEqual(exc.message, 'An unexpected error occurred.')\n\n def tearDown(self): # pylint: disable-msg=C6409\n urllib2.urlopen = self.urlopen\n\n\nclass TestGetInfo(unittest.TestCase):\n \"\"\"Test GetInfo function for authenticated API calls.\"\"\"\n\n urlopen = None\n\n def setUp(self): # pylint: disable-msg=C6409\n self.application_auth = AUTH_VAL\n\n self.urlopen = urllib2.urlopen\n urllib2.urlopen = MockOpener({EMAIL: EMAIL_PASSWORD})\n\n def testValidRequest(self): # pylint: disable-msg=C6409\n \"\"\"Valid request sent to valid URL.\n\n By default, the mock opener will only accept add requests at the global\n GETINFO or to the keys in GETINFO_ERROR_MAP and GETINFO_UNEXPECTED_MAP.\n (It turns the keys into the error keyword and will not give a valid\n response, so only GETINFO will be valid.)\n \"\"\"\n api_response = persistent_cal.GetInfo(self.application_auth,\n getinfo_endpoint=GETINFO)\n calendars, verbose_freq = GETINFO_SUCCESS\n result = ['Your subscriptions will be updated %s.' % verbose_freq,\n '', 'Your current subscriptions are:'] + calendars\n self.assertEqual(api_response, result)\n\n def testBadRequest(self): # pylint: disable-msg=C6409\n \"\"\"Valid request sent to invalid URL or with bad cookie.\n\n We send two requests: one to the URL '' which is Invalid and another to\n with the cookie value set to ''.\n \"\"\"\n bad_getinfo_endpoint = ''\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.GetInfo,\n self.application_auth,\n getinfo_endpoint=bad_getinfo_endpoint)\n\n try:\n persistent_cal.GetInfo(self.application_auth,\n getinfo_endpoint=bad_getinfo_endpoint)\n except persistent_cal.APIUseException as exc:\n self.assertEqual(len(exc.message), 3)\n self.assertEqual(exc.message[:2], ['Connection error.', ''])\n\n bad_application_auth = ''\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.GetInfo,\n bad_application_auth,\n getinfo_endpoint=GETINFO)\n\n try:\n persistent_cal.GetInfo(bad_application_auth,\n getinfo_endpoint=GETINFO)\n except persistent_cal.APIUseException as exc:\n self.assertEqual(len(exc.message), 3)\n self.assertEqual(exc.message[:2], ['Connection error.', ''])\n\n def testBadAPIResponses(self): # pylint: disable-msg=C6409\n \"\"\"Valid API request sent with data that the API rejects.\n\n The MockOpener will direct requests to special error links which are\n defined in GETINFO_ERROR_MAP. This are passed to GetInfoOpener as a keyword\n argument. By sending a valid request with the, we can stub out the\n validation done server side by the API.\n \"\"\"\n actual_error_response_map = persistent_cal.ERROR_RESPONSES['getinfo']\n\n for getinfo_error_endpoint in GETINFO_ERROR_MAP:\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.GetInfo,\n self.application_auth,\n getinfo_endpoint=getinfo_error_endpoint)\n\n try:\n persistent_cal.GetInfo(self.application_auth,\n getinfo_endpoint=getinfo_error_endpoint)\n except persistent_cal.APIUseException as exc:\n api_getinfo_error = GETINFO_ERROR_MAP[getinfo_error_endpoint]\n self.assertEqual(exc.message,\n actual_error_response_map[api_getinfo_error])\n\n def testUnexpectedAPIResponses(self): # pylint: disable-msg=C6409\n \"\"\"Valid API request with unexpected response.\n\n The MockOpener will direct requests to special error links which are\n defined in GETINFO_UNEXPECTED_MAP. From there, we use the GetInfoOpener to\n return JSON objects which are not expected as responses by the API.\n Explicitly, objects which are either not a list, are a list of length not\n equal to 2 or a list of length 2 with first element not equal to the a list.\n The object and keys are in the global GETINFO_UNEXPECTED_MAP.\n \"\"\"\n for getinfo_unexpected_endpoint in GETINFO_UNEXPECTED_MAP:\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.GetInfo,\n self.application_auth,\n getinfo_endpoint=getinfo_unexpected_endpoint)\n\n try:\n persistent_cal.GetInfo(self.application_auth,\n getinfo_endpoint=getinfo_unexpected_endpoint)\n except persistent_cal.APIUseException as exc:\n self.assertEqual(exc.message, 'An unexpected error occurred.')\n\n def tearDown(self): # pylint: disable-msg=C6409\n urllib2.urlopen = self.urlopen\n\n\nclass TestMakeRequest(unittest.TestCase):\n \"\"\"Test MakeRequest function for authenticated API calls.\"\"\"\n\n def setUp(self): # pylint: disable-msg=C6409\n \"\"\"Configure the test case.\n\n We replace the API specific functions with a function which returns the\n arguments. In addition, we simulate raw_input in each test with a constant\n function. Finally, the APIAuthManager object is mocked to as well, since\n we aren't relying on authentication to test the API specific functions.\n \"\"\"\n self.add_subscription = persistent_cal.AddSubscription\n persistent_cal.AddSubscription = lambda *args: ('add',) + args\n self.change_frequency = persistent_cal.ChangeFrequency\n persistent_cal.ChangeFrequency = lambda *args: ('chg',) + args\n self.get_info = persistent_cal.GetInfo\n persistent_cal.GetInfo = lambda *args: ('getinfo',) + args\n\n self.raw_input = __builtins__.raw_input\n\n self.api_auth_manager = persistent_cal.APIAuthManager\n persistent_cal.APIAuthManager = MockAPIAuthManager\n\n def testAddSubscription(self): # pylint: disable-msg=C6409\n \"\"\"Test valid arguments passed to AddSubscription.\"\"\"\n first_arg = 'a'\n __builtins__.raw_input = lambda prompt: first_arg\n\n add_value = 'link'\n parsed_args = argparse.Namespace(add=add_value)\n\n request_result = persistent_cal.MakeRequest(parsed_args)\n self.assertEqual(request_result, ('add',\n first_arg,\n {'calendar-link': add_value}))\n\n def testChangeFrequency(self): # pylint: disable-msg=C6409\n \"\"\"Test valid arguments passed to ChangeFrequency.\"\"\"\n first_arg = 'b'\n __builtins__.raw_input = lambda prompt: first_arg\n\n freq_value = 1\n parsed_args = argparse.Namespace(chg=freq_value)\n\n request_result = persistent_cal.MakeRequest(parsed_args)\n self.assertEqual(request_result, ('chg',\n first_arg,\n {'frequency': freq_value}))\n\n def testGetInfo(self): # pylint: disable-msg=C6409\n \"\"\"Test valid arguments passed to GetInfo.\"\"\"\n first_arg = 'c'\n __builtins__.raw_input = lambda prompt: first_arg\n\n parsed_args = argparse.Namespace(getinfo=True)\n\n request_result = persistent_cal.MakeRequest(parsed_args)\n self.assertEqual(request_result, ('getinfo', first_arg))\n\n def testInvalidArguments(self): # pylint: disable-msg=C6409\n \"\"\"Test invalid arguments.\"\"\"\n first_arg = 'd'\n __builtins__.raw_input = lambda prompt: first_arg\n\n parsed_args = argparse.Namespace()\n\n self.assertRaises(persistent_cal.APIUseException,\n persistent_cal.MakeRequest, parsed_args)\n try:\n persistent_cal.MakeRequest(parsed_args)\n except persistent_cal.APIUseException as exc:\n self.assertEqual(exc.message,\n 'Request attempted without valid arguments.')\n\n def tearDown(self): # pylint: disable-msg=C6409\n persistent_cal.AddSubscription = self.add_subscription\n persistent_cal.ChangeFrequency = self.change_frequency\n persistent_cal.GetInfo = self.get_info\n\n __builtins__.raw_input = self.raw_input\n\n persistent_cal.APIAuthManager = self.api_auth_manager\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "469694", "language": "Python", "matching_score": 9.429931640625, "max_stars_count": 1, "path": "scripts/persistent_cal_test.py" }, { "content": "#!/usr/bin/env python\n\n# Copyright (C) 2010-2011 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Command Line Tool for persistent-cal.\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\nimport sys\n\n\n# Constants\nAPP_ID = 'persistent-cal'\nADD_ENDPOINT = 'http://%s.appspot.com/add' % APP_ID\nFREQ_ENDPOINT = 'http://%s.appspot.com/freq' % APP_ID\nGETINFO_ENDPOINT = 'http://%s.appspot.com/getinfo' % APP_ID\nLOGIN_ENDPOINT = 'http://%s.appspot.com/_ah/login?auth=' % APP_ID\nCLIENT_LOGIN = 'https://www.google.com/accounts/ClientLogin'\nAPPLICATION_AUTH_URL_TEMPLATE = LOGIN_ENDPOINT + '%s'\nREQUIRED_LIBRARIES = ['argparse',\n 'cookielib',\n 'getpass',\n 'simplejson',\n 'urllib',\n 'urllib2']\nFREQUENCY_MAP = {56: 'three-hrs',\n 28: 'six-hrs',\n 14: 'half-day',\n 7: 'day',\n 4: 'two-day',\n 1: 'week'}\nFREQUENCY_CHOICES = [str(key) for key in sorted(FREQUENCY_MAP.keys())]\nERROR_RESPONSES = {\n 'add': {'whitelist:fail': ['Feed is not on whitelist.', '',\n 'See http://%s.appspot.com/about.' % APP_ID],\n 'limit:fail': ['You have reached the maximum number of feeds.', '',\n 'See http://%s.appspot.com/about.' % APP_ID],\n 'contained:fail': ('You are already subscribed to this '\n 'calendar feed.'),\n 'no_user:fail': 'No user was provided.'},\n 'chg': {'no_cal:fail': 'You have no calendar to update.',\n 'wrong_freq:fail':\n ['The value given is not a valid frequency.', '',\n 'This value represents the number of times per week that your',\n 'personal calendar will be synced with your subscribed calendars.',\n 'Choices are: %s.' % (', '.join(FREQUENCY_CHOICES))],\n 'no_user:fail': 'No user was provided.'},\n 'getinfo': {'no_user:fail': 'No user was provided.',\n 'no_cal:fail': 'You have never used %s.' % APP_ID}}\n\n\nclass MessageException(Exception):\n \"\"\"Base exception for holding a printable message.\"\"\"\n message = None\n\n def __init__(self, message, *args, **kwargs):\n super(MessageException, self).__init__(*args, **kwargs)\n self.message = message\n\n\nclass AuthException(MessageException):\n \"\"\"Thrown when an authentication error occurs.\"\"\"\n\n\nclass APIUseException(MessageException):\n \"\"\"Thrown when the API is used incorrectly or returns an error.\"\"\"\n\n\nclass APIAuthManager(object):\n \"\"\"Class for handling AppEngine application auth via Client Login.\"\"\"\n\n client_auth = None\n application_auth = None\n\n def __init__(self, email, application_id=APP_ID):\n self.email = email\n self.application_id = application_id\n\n def __repr__(self):\n return 'APIAuthManager(%s, app=%s, state=%s)' % (self.email,\n self.application_id,\n self.status)\n\n @property\n def state(self):\n \"\"\"State value using client auth as 1st binary bit and app auth as 2nd.\"\"\"\n if self.client_auth is None:\n return 0 if self.application_auth is None else 1\n else:\n return 2 if self.application_auth is None else 3\n\n @property\n def status(self):\n \"\"\"Verbose status message for each auth state.\"\"\"\n state_map = {0: 'No Auth Completed',\n 1: 'Login Corrupted',\n 2: 'Client Login Completed',\n 3: 'Auth Complete'}\n return state_map[self.state]\n\n def _GetClientAuthResponse(self, password, client_login=CLIENT_LOGIN):\n \"\"\"Submit client login request and return response body.\n\n Args:\n password: <PASSWORD> <PASSWORD> <PASSWORD>\n client_login: the url used to make client login requests, defaults to the\n global value CLIENT_LOGIN\n\n Returns:\n auth_response: The body of the client login response if successful,\n else None\n\n Raises:\n AuthException: in the case that the auth_response is not set\n \"\"\"\n params = urllib.urlencode( # pylint:disable-msg=E0602\n {'accountType': 'GOOGLE',\n 'service': 'ah',\n 'source': self.application_id,\n 'Email': self.email,\n 'Passwd': password})\n\n auth_response = None\n try:\n auth_cnxn = urllib2.urlopen( # pylint:disable-msg=E0602\n client_login, params)\n auth_response = auth_cnxn.read()\n auth_cnxn.close()\n except urllib2.HTTPError as exc: # pylint:disable-msg=E0602\n if exc.code in (401, 403):\n auth_response = exc.read()\n\n if auth_response is None:\n raise AuthException('Could not connect to Google.')\n\n return auth_response\n\n def GetClientAuth(self, client_login=CLIENT_LOGIN):\n \"\"\"Get Auth Token for user from client login.\n\n The body of the response is expected to be three lines, beginning\n with SID=, LSID= and Auth=, and in that order. If the authentication\n was not successful, we expect 'Error=BadAuthentication' to be in the\n response. If the failure was caused by a user having two factor\n authentication activated, we expect 'Info=InvalidSecondFactor' to\n be in the response as well\n\n In the case that the method is called after login has completed, we\n reset all auth values and rely on the method to reset the client auth.\n\n Args:\n client_login: the url used to make client login requests, defaults to the\n global value CLIENT_LOGIN\n\n Returns:\n self.client_auth: the final value of client auth. If by the end of the\n method, it has not been set, AuthException will be raised.\n\n Raises:\n AuthException: in the case that _GetClientAuthResponse returns None, the\n response contains BadAuthentication or is an unexpected format.\n \"\"\"\n self.client_auth = None\n self.application_auth = None\n\n password = <PASSWORD>pass('Password: ') # pylint:disable-msg=E0602\n auth_response = self._GetClientAuthResponse(password, client_login)\n\n if 'Error=BadAuthentication' in auth_response:\n to_raise = ['Authentication failed.']\n if 'Info=InvalidSecondFactor' in auth_response:\n to_raise.extend(['',\n 'Two factor authorization is not supported.',\n 'Please use an application specific password.'])\n raise AuthException(to_raise)\n\n auth_rows = [row for row in auth_response.split('\\n') if row]\n if len(auth_rows) == 3:\n sid_row, lsid_row, auth_row = auth_rows\n if (sid_row.startswith('SID=') and lsid_row.startswith('LSID=') and\n auth_row.startswith('Auth=')):\n self.client_auth = auth_row.lstrip('Auth=')\n\n if self.client_auth is None:\n raise AuthException('Client login failed.')\n\n return self.client_auth\n\n def GetApplicationAuth(self, request_url=None, client_login=CLIENT_LOGIN):\n \"\"\"Obtain application specific cookie by using self.client_auth.\n\n In order to make API requests, we need to use the Google account cookie to\n authenticate with the application and then keep the application specific\n cookie for later use.\n\n In the case that the method is called after login has completed, we\n reset the application auth value and rely on the method to reset it.\n\n Args:\n request_url: application specific url to request a cookie given a\n client auth token has been obtained. In default case, is set to None,\n and the script will set it with the client auth token and a url\n template.\n client_login: the url used to make client login requests, defaults to the\n global value CLIENT_LOGIN\n\n Returns:\n self.application_auth: the final value of the application cookie. If by\n the end of the method, it has not been set, AuthException\n will be raised.\n\n Raises:\n AuthException: in the case that GetClientAuth raises it, the inital\n request fails, or no ACSID= cookie is returned\n \"\"\"\n self.application_auth = None\n if self.state < 2:\n self.GetClientAuth(client_login)\n\n if request_url is None:\n request_url = APPLICATION_AUTH_URL_TEMPLATE % self.client_auth\n\n cookie_jar = cookielib.CookieJar() # pylint:disable-msg=E0602\n opener = urllib2.build_opener( # pylint:disable-msg=E0602\n urllib2.HTTPCookieProcessor(cookie_jar)) # pylint:disable-msg=E0602\n\n try:\n opener.open(request_url)\n except urllib2.HTTPError: # pylint:disable-msg=E0602\n raise AuthException(\n ['Connection error.', '',\n 'Could not reach %s to obtain a cookie.' % self.application_id])\n\n for cookie in cookie_jar:\n if cookie.name == 'ACSID':\n self.application_auth = cookie.value\n break\n\n if self.application_auth is None:\n raise AuthException(\n ['Authentication error.', '',\n 'Could not retrieve cookie from %s.' % self.application_id])\n\n return self.application_auth\n\n\ndef PrintMessageList(msg_list):\n \"\"\"Print a list with a nice box format.\n\n The input is surrounded by a box with | on edges, - across the top and\n + in each corner.\n\n Args:\n msg_list: A string or a list of strings.\n \"\"\"\n if isinstance(msg_list, str) or isinstance(msg_list, unicode):\n msg_list = [msg_list]\n\n length = max(len(line) for line in msg_list)\n result = ['| %s |' % line.ljust(length) for line in msg_list]\n header = '+%s+' % ('-' * (length + 2))\n result = [header] + result + [header]\n\n print('\\n'.join(result))\n\n\ndef AddSubscription(application_auth, payload, add_endpoint=ADD_ENDPOINT):\n \"\"\"Attempts to add a calendar link for the authenticated user via the API.\n\n If the payload dictionary does not contain the key 'calendar-link', we return\n an error message. If it does, the calendar link is sent to the API and either\n the new subscription list is returned by the API or an JSON error message\n is returned explaining why the link was rejected.\n\n Args:\n application_auth: the ACSID cookie specific to the application and the user\n payload: a dictionary corresponding to the necessary data to make an API\n request. For this function, we expect the key 'calendar-link' and the\n value we expect to be any string.\n add_endpoint: The API endpoint for making add requests. By default this is\n set to the global ADD_ENDPOINT.\n\n Returns:\n A list of lines or a string instance to be passed to PrintMessageList to\n alert the user of failure or success\n\n Raises:\n APIUseException: in the case that no link is in the http payload, the\n request times out, the API returns an error in ERROR_RESPONSES\n or the response is not a list of length 4 or less\n \"\"\"\n calendar_link = payload.get('calendar-link', None)\n if calendar_link is None:\n raise APIUseException(['Unexpected behavior: library error.', '',\n 'No calendar link was specified in the payload.'])\n\n request = urllib2.Request(add_endpoint) # pylint:disable-msg=E0602\n request.add_header('Cookie', 'ACSID=%s' % application_auth)\n params = urllib.urlencode( # pylint:disable-msg=E0602\n {'calendar-link': calendar_link})\n\n try:\n add_subs_cnxn = urllib2.urlopen(request, params) # pylint:disable-msg=E0602\n response_val = simplejson.loads( # pylint:disable-msg=E0602\n add_subs_cnxn.read())\n add_subs_cnxn.close()\n except urllib2.HTTPError: # pylint:disable-msg=E0602\n raise APIUseException([\n 'Connection error.', '',\n 'Could not reach %s to add %s.' % (APP_ID, calendar_link)])\n\n # Output may be a list, which is unhashable\n if response_val in ERROR_RESPONSES['add'].keys():\n raise APIUseException(ERROR_RESPONSES['add'][response_val])\n\n if type(response_val) != list or len(response_val) > 4:\n raise APIUseException('An unexpected error occurred.')\n\n return ['Success!', '', 'Your current subscriptions are:'] + response_val\n\n\ndef ChangeFrequency(application_auth, payload, freq_endpoint=FREQ_ENDPOINT):\n \"\"\"Attempts to change the frequency for the authenticated user via the API.\n\n If the payload dictionary does not contain the key 'frequency', we return an\n error message. If the frequency provided is contained in FREQUENCY_MAP, we\n transform the value and let the API determine if the transformed value is\n valid. If the cookie value is not valid or the server can't be reached, an\n error message is returned to be printed by the caller.\n\n In the case of success, the API returns a JSON tuple (verbose, short) where\n verbose if the human readable version of the frequency and short is the\n version used as a shorthand.\n\n Args:\n application_auth: the ACSID cookie specific to the application and the user\n payload: a dictionary corresponding to the necessary data to make an API\n request. For this function, we expect the key 'frequency' and the value\n we expect to be a key in the constant FREQUENCY_MAP.\n freq_endpoint: The API endpoint for making frequency change requests. By\n default this is set to the global FREQ_ENDPOINT.\n\n Returns:\n A list of lines or a string instance to be passed to PrintMessageList to\n alert the user of failure or success\n\n Raises:\n APIUseException: in the case that frequency is not in the http payload, the\n request times out, the API returns an error in ERROR_RESPONSES\n or the response is not a list of length 2\n \"\"\"\n frequency = payload.get('frequency', None)\n if frequency is None:\n raise APIUseException(['Unexpected behavior: library error.', '',\n 'No frequency was specified in the HTTP payload.'])\n\n if frequency in FREQUENCY_MAP:\n frequency = FREQUENCY_MAP[frequency]\n\n request = urllib2.Request(freq_endpoint) # pylint:disable-msg=E0602\n request.add_header('Cookie', 'ACSID=%s' % application_auth)\n request.get_method = lambda: 'PUT'\n params = urllib.urlencode( # pylint:disable-msg=E0602\n {'frequency': frequency})\n\n try:\n add_subs_cnxn = urllib2.urlopen(request, params) # pylint:disable-msg=E0602\n response_val = simplejson.loads( # pylint:disable-msg=E0602\n add_subs_cnxn.read())\n add_subs_cnxn.close()\n except urllib2.HTTPError: # pylint:disable-msg=E0602\n raise APIUseException(\n ['Connection error.', '',\n 'Could not reach %s to change freq to %s.' % (APP_ID, frequency)])\n\n # Output may be a list, which is unhashable\n if response_val in ERROR_RESPONSES['chg'].keys():\n raise APIUseException(ERROR_RESPONSES['chg'][response_val])\n\n if (type(response_val) != list or len(response_val) != 2 or\n response_val[1] != frequency):\n raise APIUseException('An unexpected error occurred.')\n\n return ['Success!', '',\n 'Your subscriptions will be updated %s.' % response_val[0]]\n\n\ndef GetInfo(application_auth, getinfo_endpoint=GETINFO_ENDPOINT):\n \"\"\"Attempts to get subscription info for the authenticated user via the API.\n\n If the cookie value is not valid or the server can't be reached, an\n error message is returned to be printed by the caller.\n\n In the case of success, the API returns a JSON tuple (calendars, frequency)\n where calendars is a list of calendar subscriptions and frequency is the\n the human readable version of the frequency.\n\n Args:\n application_auth: the ACSID cookie specific to the application and the user\n getinfo_endpoint: The API endpoint for making information requests. By\n default this is set to the global FREQ_ENDPOINT.\n\n Returns:\n A list of lines or a string instance to be passed to PrintMessageList to\n alert the user of failure or success\n\n Raises:\n APIUseException: in the case that the request times out, the API returns an\n error in ERROR_RESPONSES or the response is not a list of length 2\n with first value a list as well\n \"\"\"\n request = urllib2.Request(getinfo_endpoint) # pylint:disable-msg=E0602\n request.add_header('Cookie', 'ACSID=%s' % application_auth)\n\n try:\n add_subs_cnxn = urllib2.urlopen(request) # pylint:disable-msg=E0602\n response_val = simplejson.loads( # pylint:disable-msg=E0602\n add_subs_cnxn.read())\n add_subs_cnxn.close()\n except urllib2.HTTPError: # pylint:disable-msg=E0602\n raise APIUseException(['Connection error.', '',\n 'Could not reach %s to get info.' % APP_ID])\n\n # Output may be a list, which is unhashable\n if response_val in ERROR_RESPONSES['getinfo'].keys():\n raise APIUseException(ERROR_RESPONSES['getinfo'][response_val])\n\n if (type(response_val) != list or len(response_val) != 2 or\n type(response_val[0]) != list):\n raise APIUseException('An unexpected error occurred.')\n\n calendars, verbose_freq = response_val\n return ['Your subscriptions will be updated %s.' % verbose_freq,\n '', 'Your current subscriptions are:'] + calendars\n\n\ndef MakeRequest(parsed_args):\n \"\"\"Attempts to perform a requested action via the API.\n\n This is intended to be used with the results of parsed arguments (via\n argparse). This will handle all authentication steps and will prompt\n the user for email and password. Only the actions 'add', 'frequency' and\n 'getinfo' are accepted and mapped to the relevant subfunctions with the\n relevant authentication cookie and payload data. If the action succeeds, a\n response message from the relevant subfunction is returned.\n\n Args:\n parsed_args: an ArgumentParser object.\n\n Returns:\n A list of lines or a string instance to be passed to PrintMessageList to\n alert the user of success\n\n Raises:\n APIUseException: when none of the predefined API actions have been\n sent with the parsed args\n \"\"\"\n auth_manager = APIAuthManager(raw_input('Email address: '))\n application_auth = auth_manager.GetApplicationAuth()\n\n api_actions = [('add', AddSubscription, 'calendar-link'),\n ('chg', ChangeFrequency, 'frequency'),\n ('getinfo', GetInfo, None)]\n\n for action, method, attr in api_actions:\n value = getattr(parsed_args, action, None)\n if value is None:\n continue\n\n method_args = (application_auth,)\n if attr is not None:\n method_args += ({attr: value},)\n\n return method(*method_args)\n\n raise APIUseException('Request attempted without valid arguments.')\n\n\ndef ImportOrFail(scope=locals()):\n \"\"\"Attempts to import the needed Python packages or fail with message.\n\n Since a command line tool, this is included to give the users an informative\n message that will allow them to get their environment configured correctly.\n This will attempt to import each library in REQUIRED_LIBRARIES. If any fail,\n a message describing how to install is returned.\n\n Args:\n scope: A scope dictionary (intended to be locals()) to add the imports to\n\n Returns:\n A tuple (success, msg_list) where\n success: Boolean indicating whether all imports succeeded\n msg_list: a list of strings to be printed by PrintMessageList in the case\n of import failure\n \"\"\"\n imports_needed = []\n for library in REQUIRED_LIBRARIES:\n try:\n scope[library] = __import__(library)\n except ImportError:\n imports_needed.append(library)\n\n if imports_needed:\n msg_list = ['Failed to import necessary libraries.', '',\n 'To successfully use the %s command line tool,' % APP_ID,\n 'consider installing the missing libraries via:']\n\n for library in imports_needed:\n msg_list.append('sudo pip install %s' % library)\n\n msg_list.extend(['', 'If you do not have pip installed, easy_install is a',\n 'worthy replacement, but you should get pip though.',\n '', 'If you have neither, visit;',\n 'http://www.pip-installer.org/en/latest/installing.html'])\n return (False, msg_list)\n\n return (True, None)\n\n\ndef GetParser(app_id=APP_ID):\n \"\"\"Create arg parser specific to the API to allow script to be used in CLI.\n\n Args:\n app_id: application ID, with default as to the global value\n\n Returns:\n An argparse.ArgumentParser object with mappings to the subfunctions relevant\n to the API as well as help text\n \"\"\"\n parser = argparse.ArgumentParser( # pylint:disable-msg=E0602\n prog=app_id, description='Command Line Tool for persistent-cal')\n subparsers = parser.add_subparsers(help='persistent-cal subcommands')\n\n parser_add = subparsers.add_parser('add', help='Add subscription to calendar')\n parser_add.add_argument(\n 'add', metavar='link', type=unicode,\n help='external calendar link to add as a subscription')\n\n parser_chg = subparsers.add_parser(\n 'chg', help='Change frequency of calendar updates')\n\n parser_chg.add_argument(\n 'chg', metavar='freq', type=int,\n help=('number of times per week that your personal '\n 'calendar will be synced with your subscribed calendars'))\n\n parser_getinfo = subparsers.add_parser(\n 'getinfo', help='Get existing calendar info')\n parser_getinfo.add_argument('getinfo', action='store_true')\n\n return parser\n\n\ndef main():\n args = GetParser().parse_args()\n\n try:\n result = MakeRequest(args)\n PrintMessageList(result)\n except (AuthException, APIUseException) as exc:\n PrintMessageList(exc.message)\n except KeyboardInterrupt:\n print('\\n')\n PrintMessageList(['Sorry I couldn\\'t be more helpful.',\n 'That hurts when you cancel me!'])\n\n\nparent_scope = locals()\nsuccess, msg_list = ImportOrFail(parent_scope)\nif not success:\n PrintMessageList(msg_list)\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n", "id": "2546425", "language": "Python", "matching_score": 3.504387378692627, "max_stars_count": 1, "path": "scripts/persistent_cal.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Handler classes for all requests to persistent-cal.\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport datetime\nimport json\nimport logging\n\n# App engine specific libraries\nfrom google.appengine.api import users\nfrom google.appengine.ext import deferred\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext.webapp.util import login_required\nimport webapp2\n\n# App specific libraries\nfrom google_api_utils import InitCredentials\nfrom handler_utils import ExtendedHandler\nfrom library import UpdateString\nfrom library import UpdateUserSubscriptions\nfrom library import WhiteList\nfrom models import UserCal\nfrom time_utils import ConvertToInterval\n\n\nCREDENTIALS = None\n# split week in 56 3 hour windows, and assign the entire list based on these\n# windows (two day is really 42 hours, 14 units)\nFREQUENCIES = {'three-hrs': [val for val in range(56)],\n 'six-hrs': [2*val for val in range(56/2)],\n 'half-day': [4*val for val in range(56/4)],\n 'day': [8*val for val in range(56/8)],\n 'two-day': [14*val for val in range(56/14)],\n 'week': [56*val for val in range(56/56)]}\nMAX_RETRIES = 3\n\n\nclass MainHandler(ExtendedHandler):\n \"\"\"Handles get requests to /; provides a UI for managing subscribed feeds.\"\"\"\n\n @login_required\n def get(self): # pylint:disable-msg=C0103\n \"\"\"Main UI for persistent-cal.\n\n If a user is not logged in, login_required will force them to log in before\n reaching this page. Once they arrive, if they do not have a user calendar\n in the datastore, one will be created for them and they will be set to\n update once a week in the current interval.\n\n The user's email, calendar subscriptions and frequency of updates are then\n surfaced through the UI via a template.\n \"\"\"\n\n # guaranteed to be a user since login_required\n current_user = users.get_current_user()\n user_cal = ndb.Key(UserCal, current_user.user_id()).get()\n if user_cal is None:\n base_interval = ConvertToInterval(datetime.datetime.utcnow())\n user_cal = UserCal(key=ndb.Key(UserCal, current_user.user_id()),\n owner=current_user,\n calendars=[],\n update_intervals=[base_interval])\n user_cal.put()\n\n # pylint:disable-msg=E1103\n self.RenderResponse('index.html',\n id=current_user.email(),\n calendars=json.dumps(user_cal.calendars),\n frequency=UpdateString(user_cal.update_intervals))\n\n\nclass AddSubscription(ExtendedHandler):\n \"\"\"Handles post requests to /add and will change add a user calendar feed.\"\"\"\n\n def post(self): # pylint:disable-msg=C0103\n \"\"\"Handles post requests to /add.\n\n First validates the calendar-link from the post request against a whitelist\n of accepted calendar feed links and then validates the user. If either of\n these fail, nothing in the datastore is updated and an appropriate error\n message is returned to the caller. (The AJAX call will handle each of these\n errors.)\n\n Once validated, queries the datastore for the user calendar. If it does not\n exist, one is created in the datastore. If it exists and the item can be\n added, the user calendar is updated in the datastore. If it exists and the\n feed is already subscribed to or the user has already reached four feeds,\n no update will occur and an appropriate error message is returned to the\n caller. (The AJAX call will handle each of these errors.)\n\n In the valid case, the main Google calendar is updated with the events from\n the new feed, the user calendar entry is updated in the datastore and the\n caller will receive the calendar subscription list. (The AJAX call will\n handle this JSON and update the list for the user.)\n \"\"\"\n link = self.request.get('calendar-link', '').strip()\n valid, _ = WhiteList(link)\n if not valid:\n self.response.out.write(json.dumps('whitelist:fail'))\n logging.info('whitelist:fail')\n return\n\n current_user = users.get_current_user()\n if current_user is None:\n self.response.out.write(json.dumps('no_user:fail'))\n logging.info('no_user:fail')\n return\n\n user_cal = ndb.Key(UserCal, current_user.user_id()).get()\n if user_cal is None:\n user_cal = UserCal(key=ndb.Key(UserCal, current_user.user_id()),\n owner=current_user,\n calendars=[link])\n elif link not in user_cal.calendars and len(user_cal.calendars) < 4:\n user_cal.calendars.append(link) # pylint:disable-msg=E1103\n else:\n if len(user_cal.calendars) >= 4: # pylint:disable-msg=E1103\n msg = 'limit:fail'\n else:\n # link must be in user_cal.calendars already\n msg = 'contained:fail'\n self.response.out.write(json.dumps(msg))\n logging.info(msg)\n return\n\n user_cal.put() # pylint:disable-msg=E1103\n\n global CREDENTIALS # pylint:disable-msg=W0603\n if CREDENTIALS is None:\n logging.info('Credentials initialized')\n CREDENTIALS = InitCredentials()\n\n # pylint:disable-msg=E1123\n UpdateUserSubscriptions(user_cal, credentials=CREDENTIALS, defer_now=True)\n # pylint:disable-msg=E1103\n self.response.out.write(json.dumps(user_cal.calendars))\n\n\nclass ChangeFrequency(ExtendedHandler):\n \"\"\"Handles put requests to /freq and will change frequency for a user.\"\"\"\n\n def put(self): # pylint:disable-msg=C0103\n \"\"\"Handles put requests to /freq.\n\n Validates the user, the user calendar, and the frequency value from the\n post request. If any of those three are not valid, nothing in the datastore\n is updated and an appropriate error message is returned to the caller. (The\n AJAX call will handle each of these errors.)\n\n If they are correct, the UserCal entry in the datastore will have the\n update_intervals column updated and the caller will receive the verbose\n description of the update as well as the frequency value for the\n <select> element.\n \"\"\"\n # Make sure change has been requested by a user before doing any work\n current_user = users.get_current_user()\n if current_user is None:\n self.response.out.write(json.dumps('no_user:fail'))\n logging.info('no_user:fail')\n return\n\n frequency = self.request.get('frequency', None)\n\n user_cal = ndb.Key(UserCal, current_user.user_id()).get()\n if frequency in FREQUENCIES and user_cal is not None:\n if user_cal.update_intervals: # pylint:disable-msg=E1103\n base_interval = user_cal.update_intervals[0] # pylint:disable-msg=E1103\n else:\n base_interval = ConvertToInterval(datetime.datetime.utcnow())\n\n update_intervals = [(base_interval + delta_val) % 56\n for delta_val in FREQUENCIES[frequency]]\n\n user_cal.update_intervals = update_intervals\n user_cal.put() # pylint:disable-msg=E1103\n self.response.out.write(UpdateString(update_intervals))\n else:\n if user_cal is None:\n msg = 'no_cal:fail'\n else:\n msg = 'wrong_freq:fail'\n self.response.out.write(json.dumps(msg))\n logging.info(msg)\n return\n\n\nclass GetInfoHandler(ExtendedHandler):\n \"\"\"Handles get requests to /getinfo and returns calendar & frequency info.\"\"\"\n\n def get(self): # pylint:disable-msg=C0103\n \"\"\"Handles get requests to /getinfo.\"\"\"\n current_user = users.get_current_user()\n if current_user is None:\n self.response.out.write(json.dumps('no_user:fail'))\n logging.info('no_user:fail')\n return\n\n user_cal = ndb.Key(UserCal, current_user.user_id()).get()\n if user_cal is None:\n self.response.out.write(json.dumps('no_cal:fail'))\n logging.info('no_cal:fail')\n return\n\n # pylint:disable-msg=E1103\n freq_data = json.loads(UpdateString(user_cal.update_intervals))\n user_info = json.dumps((user_cal.calendars, freq_data[0]))\n self.response.out.write(user_info)\n\n\nclass DeferredHandler(deferred.TaskHandler, ExtendedHandler):\n \"\"\"A webapp handler class that processes deferred invocations.\"\"\"\n\n def post(self): # pylint:disable-msg=C0103\n \"\"\"Custom post handler for deferred queue.\n\n Uses the run_from_request method from deferred.TaskHandler to attempt to run\n a deferred job. Uses the post wrapper defined in ExtendedHandler to handle\n any errors that may occur in run_from_request.\n \"\"\"\n try:\n retry_count = int(\n self.request.headers.get('X-AppEngine-TaskRetryCount'))\n logging.debug('Retry count: %d', retry_count)\n except (ValueError, TypeError):\n logging.debug('Getting retry count failed.')\n retry_count = 0\n\n if retry_count < MAX_RETRIES:\n self.run_from_request()\n else:\n raise deferred.PermanentTaskFailure('Exceeded number of retries.')\n\n\nclass OwnershipVerifyHandler(ExtendedHandler):\n \"\"\"Handles / as well as redirects for login required.\"\"\"\n\n def get(self): # pylint:disable-msg=C0103\n \"\"\"Serves a static HTML file with verification data.\"\"\"\n self.RenderResponse('googlef7560eebc24762bb.html')\n\n\nclass AboutHandler(ExtendedHandler):\n \"\"\"Serves the static about page.\"\"\"\n\n def get(self): # pylint:disable-msg=C0103\n \"\"\"Serves a static HTML file with an about page.\"\"\"\n self.RenderResponse('about.html')\n\n\nclass AboutRedirect(ExtendedHandler):\n \"\"\"Redirects to the correct about page.\"\"\"\n\n def get(self): # pylint:disable-msg=C0103\n \"\"\"Redirects to /about.\"\"\"\n self.redirect('/about')\n\n\nclass Throw404(ExtendedHandler):\n \"\"\"Catches all non-specified (404) requests.\"\"\"\n\n def get(self): # pylint:disable-msg=C0103\n \"\"\"Serves a static HTML file with a 404 page.\"\"\"\n self.error(404)\n self.RenderResponse('404.html')\n\n\nAPPLICATION = webapp2.WSGIApplication([\n ('/', MainHandler),\n ('/workers', DeferredHandler),\n ('/add', AddSubscription),\n ('/freq', ChangeFrequency),\n ('/getinfo', GetInfoHandler),\n ('/googlef7560eebc24762bb.html', OwnershipVerifyHandler),\n ('/about', AboutHandler),\n ('/about.html', AboutRedirect),\n ('/.*', Throw404),\n ], debug=True)\n", "id": "10999692", "language": "Python", "matching_score": 6.230465888977051, "max_stars_count": 1, "path": "main.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Handler for cron update requests made from persistent-cal.\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport datetime\n\n# App engine specific libraries\nimport webapp2\n\n# App specific libraries\nfrom google_api_utils import InitCredentials\nfrom handler_utils import ExtendedHandler\nfrom library import MonthlyCleanup\nfrom library import UpdateUserSubscriptions\nfrom models import UserCal\nfrom time_utils import ConvertToInterval\n\n\nclass MainHandler(ExtendedHandler):\n \"\"\"Handles cron requests to /cron.\n\n This handler carries out updates for any user scheduled to get an update\n during that update interval.\n \"\"\"\n\n def get(self): # pylint:disable-msg=C0103\n \"\"\"Updates every three hours.\"\"\"\n # ('http://code.google.com/appengine/docs/python/tools/webapp/'\n # 'requestclass.html#Request_headers')\n # http://docs.webob.org/en/latest/reference.html#headers\n # \"Keys are case-insensitive.\"\n if self.request.headers.get('X-AppEngine-Cron', '') != 'true':\n # Check header for X-AppEngine-Cron: true\n # Don't run if not\n return\n\n now = datetime.datetime.utcnow()\n now_interval = ConvertToInterval(now)\n credentials = None\n\n current_users = UserCal.query(UserCal.update_intervals == now_interval)\n for user_cal in current_users:\n if user_cal.calendars:\n if credentials is None:\n credentials = InitCredentials()\n # pylint:disable-msg=E1123\n UpdateUserSubscriptions(user_cal, credentials=credentials,\n defer_now=True)\n\n\nclass CleanupHandler(ExtendedHandler):\n \"\"\"Handles cron requests to /cron-monthly.\n\n Cleans up any events older than three months by using MonthlyCleanup.\n \"\"\"\n\n def get(self): # pylint:disable-msg=C0103\n \"\"\"Updates once a month.\"\"\"\n if self.request.headers.get('X-AppEngine-Cron', '') != 'true':\n return\n\n now = datetime.datetime.utcnow()\n MonthlyCleanup(now.date(), defer_now=True) # pylint:disable-msg=E1123\n\n\nAPPLICATION = webapp2.WSGIApplication([\n ('/cron', MainHandler),\n ('/cron-monthly', CleanupHandler),\n ], debug=True)\n", "id": "8238198", "language": "Python", "matching_score": 3.2770159244537354, "max_stars_count": 1, "path": "cron.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Extended function library for request handlers for persistent-cal.\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport datetime\nimport json\nimport logging\nimport re\n\n# Third-party libraries\nfrom icalendar import Calendar\n\n# App engine specific libraries\nfrom google.appengine.api import urlfetch\nfrom google.appengine.api import urlfetch_errors\nfrom google.appengine.ext import ndb\nfrom google.appengine import runtime\n\n# App specific libraries\nfrom custom_exceptions import BadInterval\nfrom handler_utils import DeferFunctionDecorator\nfrom handler_utils import EmailAdmins\nfrom models import Event\nimport time_utils\n\n\nCALENDAR_ID = '<EMAIL>'\nRESPONSES = {1: ['once a week', 'week'],\n 4: ['every two days', 'two-day'],\n 7: ['once a day', 'day'],\n 14: ['twice a day', 'half-day'],\n 28: ['every six hours', 'six-hrs'],\n 56: ['every three hours', 'three-hrs']}\n\n\ndef UpdateString(update_intervals):\n \"\"\"Calculates a short and long message to represent frequency of updates.\n\n Args:\n update_intervals: A list of interval numbers (between 0 and 55) that\n represent the times an update will occur\n\n Returns:\n A two-tuple of the long and short message (respectively) corresponding to\n the frequency. This is intended to be sent via AJAX and hence the\n tuple is turned into json before being returned.\n\n Raises:\n BadInterval in the case that the length of update_intervals is not\n a key in the constant RESPONSES\n \"\"\"\n length = len(update_intervals)\n if length not in RESPONSES:\n raise BadInterval(length)\n else:\n return json.dumps(RESPONSES[length])\n\n\ndef WhiteList(link):\n \"\"\"Determines if a link is on the whitelist and transforms it if needed.\n\n Args:\n link: A url corresponding to a calendar feed\n\n Returns:\n A tuple (valid, transformed) where valid is a boolean which indicates\n whether the link is on the whitelist and transformed is an\n (possibly different) equivalent value of link which is used\n internally.\n \"\"\"\n # If WhiteList is updated, event parsing must be as well\n valid = False\n transformed = link\n # In rare cases of `ndb` failure, the `link` retrieved from the\n # datastore will not be the property type (string) and will instead\n # be _BaseValue.\n if isinstance(transformed, ndb.model._BaseValue):\n transformed = transformed.b_val\n\n pattern_tripit = ('^(?P<protocol>(http|https|webcal)://|)www.tripit.com/feed/'\n 'ical/private/[A-Za-z0-9-]+/tripit.ics$')\n tripit_match = re.match(pattern_tripit, link)\n if tripit_match is not None:\n valid = True\n\n protocol = tripit_match.group('protocol')\n transformed = 'https://{}'.format(link[len(protocol):])\n\n return valid, transformed\n\n\n@DeferFunctionDecorator\ndef MonthlyCleanup(relative_date):\n \"\"\"Deletes events older than three months.\n\n Will delete events from the datastore that are older than three months. First\n checks that the date provided is at most two days prior to the current one.\n\n NOTE: This would seem to argue that relative_date should not be provided, but\n we want to use the relative_date from the server that is executing the cron\n job, not the one executing the cleanup (as there may be some small\n differences). In the case that relative_date does not pass this check, we log\n and send and email to the admins, but do not raise an error. This is done so\n this can be removed from the task queue in the case of the invalid input.\n\n Args:\n relative_date: date provided by calling script. Expected to be current date.\n \"\"\"\n prior_date_day = relative_date.day\n\n prior_date_month = relative_date.month - 3\n if prior_date_month < 1:\n prior_date_year = relative_date.year - 1\n prior_date_month += 12\n else:\n prior_date_year = relative_date.year\n\n prior_date = datetime.date(year=prior_date_year,\n month=prior_date_month,\n day=prior_date_day)\n\n today = datetime.date.today()\n if today - relative_date > datetime.timedelta(days=2):\n msg = ('MonthlyCleanup called with bad date {relative_date} '\n 'on {today}.'.format(relative_date=relative_date, today=today))\n logging.info(msg)\n EmailAdmins(msg, defer_now=True) # pylint:disable-msg=E1123\n return\n\n prior_date_as_str = time_utils.FormatTime(prior_date)\n old_events = Event.query(Event.end_date <= prior_date_as_str)\n for event in old_events:\n event.delete()\n\n\n@DeferFunctionDecorator\ndef UpdateUpcoming(user_cal, upcoming, credentials=None):\n \"\"\"Updates the GCal inst. by deleting events removed from extern. calendar.\n\n If the new upcoming events list is different from that on the user_cal, it\n will iterate through the difference and address events that no longer belong.\n Such events would have been previously marked as upcoming (and stored in\n UserCal.upcoming) and would not have occurred by the time UpdateUpcoming was\n called. For such events, the user will be removed from the list of attendees.\n If there are other remaining users, the event will be updated, else it will be\n deleted from both the datastore and GCal.\n\n Args:\n user_cal: a UserCal object that will have upcoming events updated\n upcoming: a list of UID strings representing events in the subscribed feeds\n of the user that have not occurred yet (i.e. they are upcoming)\n credentials: An OAuth2Credentials object used to build a service object.\n In the case the credentials is the default value of None, future\n methods will attempt to get credentials from the default credentials.\n \"\"\"\n upcoming.sort()\n if user_cal.upcoming != upcoming:\n now = datetime.datetime.utcnow()\n for uid in user_cal.upcoming:\n if uid not in upcoming:\n event = ndb.Key(Event, uid).get()\n if event.end.to_datetime() > now: # pylint:disable-msg=E1103\n # If federated identity not set, User.__cmp__ only uses email\n event.attendees.remove(user_cal.owner) # pylint:disable-msg=E1103\n if not event.attendees: # pylint:disable-msg=E1103\n event.delete(credentials=credentials) # pylint:disable-msg=E1103\n else:\n event.update(credentials=credentials) # pylint:disable-msg=E1103\n\n user_cal.upcoming = upcoming\n user_cal.put()\n\n\n# pylint:disable-msg=R0913\n@DeferFunctionDecorator\ndef UpdateUserSubscriptions(user_cal, credentials=None, links=None,\n link_index=0, upcoming=None, last_used_uid=None):\n \"\"\"Updates a list of calendar subscriptions for a user.\n\n Loops through each subscription URL in links (or user_cal.calendars) and calls\n UpdateSubscription for each URL. Keeps a list of upcoming events which will\n be updated by UpdateUpcoming upon completion. If the application encounters\n one of the two DeadlineExceededError's while the events are being processed,\n the function calls itself, but uses the upcoming, link_index and\n last_used_uid keyword arguments to save the current processing state.\n\n Args:\n user_cal: a UserCal object that will have upcoming subscriptions updated\n credentials: An OAuth2Credentials object used to build a service object.\n In the case the credentials is the default value of None, future\n methods will attempt to get credentials from the default credentials.\n links: a list of URLs to the .ics subscription feeds. This is None by\n default, in which case user_cal.calendars is used.\n link_index: a placeholder index within the list of links which is 0 by\n default. This is intended to be passed in only by calls from\n UpdateUserSubscriptions.\n upcoming: a list of UID strings representing events in the subscribed feeds\n of the user that have not occurred yet (i.e. they are upcoming). By\n default this value is None and transformed to [] within the function.\n last_used_uid: a placeholder UID which is None by default. This is intended\n to be passed in only by calls from UpdateUserSubscriptions. In the case\n it is not None, it will serve as a starting index within the set of UIDs\n from the first subscription (first element of links) that is updated.\n \"\"\"\n if links is None:\n links = user_cal.calendars\n\n if link_index > 0:\n links = links[link_index:]\n upcoming = upcoming or []\n\n # Set default values for link index and last used uid variables. These\n # are used to to pick up where the loop left off in case the task encounters\n # one of the DeadlineExceededError's.\n index = 0\n uid = None\n\n try:\n for index, link in enumerate(links):\n # In the case last_used_uid is not None, we may be picking up in the\n # middle of the feed for the first link in {links}\n if index == 0 and last_used_uid is not None:\n uid_generator = UpdateSubscription(link, user_cal.owner,\n credentials=credentials,\n start_uid=last_used_uid)\n else:\n uid_generator = UpdateSubscription(link, user_cal.owner,\n credentials=credentials)\n\n for uid, is_upcoming, failed in uid_generator:\n if is_upcoming:\n upcoming.append(uid)\n elif failed:\n msg = 'silently failed operation on {uid} from {link}'.format(\n uid=uid, link=link)\n logging.info(msg)\n EmailAdmins(msg, defer_now=True) # pylint:disable-msg=E1123\n except (runtime.DeadlineExceededError, urlfetch_errors.DeadlineExceededError):\n # NOTE: upcoming has possibly been updated inside the try statement\n # pylint:disable-msg=E1123\n UpdateUserSubscriptions(user_cal, credentials=credentials, links=links,\n link_index=index, upcoming=upcoming,\n last_used_uid=uid, defer_now=True)\n return\n\n # If the loop completes without timing out\n # pylint:disable-msg=E1123\n UpdateUpcoming(user_cal, upcoming, credentials=credentials, defer_now=True)\n\n\ndef UpdateSubscription(link, current_user, credentials=None, start_uid=None):\n \"\"\"Updates the GCal instance with the events in link for the current_user.\n\n Args:\n link: Link to calendar feed being subscribed to\n current_user: a User instance corresponding to the user that is updating\n credentials: An OAuth2Credentials object used to build a service object.\n In the case the credentials is the default value of None, future\n methods will attempt to get credentials from the default credentials.\n start_uid: a placeholder UID which is None by default. This is intended\n to be passed in only by calls from UpdateUserSubscriptions. In the case\n it is not None, it will serve as a starting index within the set of\n event UIDs from {link}.\n\n Returns:\n A generator instance which yields tuples (uid, is_upcoming, failed) tuples\n where uid is the id of an event, is_upcoming is a boolean that is True\n if and only if the event has not occurred yet (i.e. is upcoming) and\n failed is a boolean that is True if and only if the three attempts to\n add or update the event fail.\n \"\"\"\n logging.info('UpdateSubscription called with: {!r}'.format(locals()))\n\n valid, link = WhiteList(link)\n if not valid:\n # Do nothing if not on the whitelist\n # http://www.python.org/dev/peps/pep-0255/ (Specification: Return)\n return\n\n now = datetime.datetime.utcnow()\n\n import_feed = urlfetch.fetch(link, deadline=60)\n\n # In the case of failure, do nothing and notify Admin.\n if import_feed.status_code != 200:\n error_msg = '{} resulted in non-200 status code: {:d}'.format(\n link, import_feed.status_code)\n logging.debug(error_msg)\n EmailAdmins(error_msg, defer_now=True) # pylint:disable-msg=E1123\n return\n\n ical = Calendar.from_ical(import_feed.content)\n\n start_index = 0\n if start_uid is not None:\n # pylint:disable-msg=E1103\n uid_list = [component.get('uid', '') for component in ical.walk()]\n if start_uid in uid_list:\n start_index = uid_list.index(start_uid)\n\n for component in ical.walk()[start_index:]: # pylint:disable-msg=E1103\n if component.name != 'VEVENT':\n msg = ('iCal at {link} has unexpected event type '\n '{component.name}'.format(link=link, component=component))\n logging.info(msg)\n if component.name != 'VCALENDAR':\n EmailAdmins(msg, defer_now=True) # pylint:disable-msg=E1123\n else:\n event, failed = Event.from_ical_event(component, current_user,\n credentials=credentials)\n\n uid = event.key.id()\n if failed:\n yield (uid, False, True)\n else:\n is_upcoming = event.end.to_datetime() > now\n yield (uid, is_upcoming, False)\n", "id": "6561128", "language": "Python", "matching_score": 4.770837783813477, "max_stars_count": 1, "path": "library.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Model classes for persistent-cal.\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport datetime\nimport logging\n\n# App engine specific libraries\nfrom google.appengine.ext import ndb\n\n# App specific libraries\nfrom custom_exceptions import InappropriateAPIAction\nfrom custom_exceptions import MissingUID\nfrom custom_exceptions import UnexpectedDescription\nfrom google_api_utils import AttemptAPIAction\nimport time_utils\n\n\nCALENDAR_ID = '<EMAIL>'\n\n\nclass TimeKeyword(ndb.Model): # pylint:disable-msg=R0904\n \"\"\"Model for representing a time with an associated keyword as well.\n\n This is in place because the API specification calls for times to be\n represented as {'dateTime': '2012-01-01T12:00:00.000Z'} or\n {'date': '2012-01-01'}, so both the string value and the keyword are\n useful to keep around.\n \"\"\"\n # pylint:disable-msg=E1101\n keyword = ndb.StringProperty(required=True)\n value = ndb.StringProperty(required=True)\n\n @classmethod\n # pylint:disable-msg=C0103\n def from_ical_event(cls, ical_event, ical_attr):\n \"\"\"Class method to parse a TimeKeyword from an ical_event and keyword.\n\n It creates a new instance, parsing the value from the ical_event using the\n ical_attr provided.\n\n Args:\n ical_event: an icalendar.cal.Event object to be parsed\n ical_attr: The attribute to be parsed from the iCal instance\n\n Returns:\n An instance of TimeKeyword from the parsing\n \"\"\"\n value = time_utils.FormatTime(ical_event.get(ical_attr).dt)\n keyword = 'dateTime' if value.endswith('Z') else 'date'\n return cls(keyword=keyword, value=value)\n\n def as_dict(self): # pylint:disable-msg=C0103\n \"\"\"Returns the TimeKeyword as a dictionary with keyword as key for value.\"\"\"\n return {self.keyword: self.value}\n\n def to_datetime(self): # pylint:disable-msg=C0103\n \"\"\"Returns the TimeKeyword as a datetime.datetime.\n\n This will likely throw an error if keyword is not one of date or dateTime.\n\n Returns:\n A datetime.datetime instance parsed from the values\n \"\"\"\n time_parse = None\n if self.keyword == 'date':\n time_parse = '%Y-%m-%d'\n elif self.keyword == 'dateTime':\n time_parse = '%Y-%m-%dT%H:%M:%S.000Z'\n\n return datetime.datetime.strptime(self.value, time_parse)\n\n def __eq__(self, other):\n \"\"\"Custom equality function using only the attributes.\n\n Args:\n other: The other value to be compared against\n \"\"\"\n if not isinstance(other, TimeKeyword):\n return False\n return self.keyword == other.keyword and self.value == other.value\n\n def __ne__(self, other):\n \"\"\"Custom negation of equality function using only the attributes.\n\n Args:\n other: The other value to be compared against\n \"\"\"\n return not self.__eq__(other)\n\n def __repr__(self):\n return 'TimeKeyword({!r})'.format(self.as_dict())\n\n\ndef ConvertedDescription(ical_event):\n \"\"\"Parses and converts a description from an iCal event.\n\n Args:\n ical_event: an icalendar.cal.Event object to be parsed\n\n Returns:\n Two strings description and location parsed from {ical_event}\n \"\"\"\n uid = unicode(ical_event.get('uid', ''))\n description = unicode(ical_event.get('description', ''))\n location = unicode(ical_event.get('location', ''))\n\n # The phrase 'No destination specified' does not match its\n # counterpart in the description, so we transform {location}.\n if location == 'No destination specified':\n location = 'an unspecified location'\n\n # Check description is formed as we expect\n if not uid.startswith('item-'):\n target = ' is in {} '.format(location)\n if description.count(target) != 1:\n raise UnexpectedDescription(description)\n\n # remove name from the description\n description = 'In {location} {description}'.format(\n location=location, description=description.split(target)[1])\n\n return description, location\n\n\nclass Event(ndb.Model): # pylint:disable-msg=R0904\n \"\"\"Holds data for a calendar event (including shared attendees).\"\"\"\n # pylint:disable-msg=E1101\n description = ndb.TextProperty(default='')\n start = ndb.StructuredProperty(TimeKeyword, required=True)\n end = ndb.StructuredProperty(TimeKeyword, required=True)\n location = ndb.StringProperty(default='')\n summary = ndb.StringProperty(required=True)\n attendees = ndb.UserProperty(repeated=True)\n gcal_edit = ndb.StringProperty()\n sequence = ndb.IntegerProperty(default=0)\n\n def insert(self, credentials=None): # pylint:disable-msg=C0103\n \"\"\"Will insert the event into GCal and then put the values into datastore.\n\n Args:\n credentials: An OAuth2Credentials object used to build a service object.\n In the case the credentials is the default value of None, future\n methods will attempt to get credentials from the default credentials.\n\n Returns:\n A boolean value indicating whether the operation was successful.\n\n Raises:\n InappropriateAPIAction in the case that a corresponding GCal event has\n already been inserted\n \"\"\"\n if self.gcal_edit is not None:\n raise InappropriateAPIAction('Insert attempted when id already set.')\n\n event_data = self.as_dict()\n event_data.pop('id')\n\n inserted_event = AttemptAPIAction('insert', credentials=credentials,\n calendarId=CALENDAR_ID, body=event_data)\n if inserted_event is None:\n return False # failed\n\n self.gcal_edit = inserted_event['id']\n self.sequence = inserted_event.get('sequence', 0)\n self.put()\n\n return True\n\n def update(self, credentials=None): # pylint:disable-msg=C0103\n \"\"\"Will update the event in GCal and then put updated values to datastore.\n\n Args:\n credentials: An OAuth2Credentials object used to build a service object.\n In the case the credentials is the default value of None, future\n methods will attempt to get credentials from the default credentials.\n\n Returns:\n A boolean value indicating whether the operation was successful.\n\n Raises:\n InappropriateAPIAction in the case that there is no GCal event to update\n \"\"\"\n if self.gcal_edit is None:\n raise InappropriateAPIAction('Update attempted when id not set.')\n\n log_msg = '{} updated'.format(self.gcal_edit)\n updated_event = AttemptAPIAction('update', log_msg=log_msg,\n credentials=credentials,\n calendarId=CALENDAR_ID,\n eventId=self.gcal_edit,\n body=self.as_dict())\n\n if updated_event is None:\n return False # failed\n\n sequence = updated_event.get('sequence', None)\n if sequence is not None:\n self.sequence = sequence\n self.put()\n\n return True\n\n # pylint:disable-msg=C0103,W0221\n def delete(self, credentials=None):\n \"\"\"Will delete the event in GCal and then delete from the datastore.\n\n Args:\n credentials: An OAuth2Credentials object used to build a service object.\n In the case the credentials is the default value of None, future\n methods will attempt to get credentials from the default credentials.\n\n Raises:\n InappropriateAPIAction in the case that there is no GCal event to delete\n \"\"\"\n if self.gcal_edit is None:\n raise InappropriateAPIAction('Update attempted when id not set.')\n\n log_msg = '{} deleted'.format(self.gcal_edit)\n delete_response = AttemptAPIAction('delete', log_msg=log_msg,\n credentials=credentials,\n calendarId=CALENDAR_ID,\n eventId=self.gcal_edit)\n if delete_response is None:\n return\n\n self.key.delete()\n\n @classmethod\n # pylint:disable-msg=C0103\n def from_ical_event(cls, ical_event, current_user, credentials=None):\n \"\"\"Class method to update/add an event from an ical_event.\n\n It either retrieves an existing instance and updates it, or if no such\n object exists, creates a new one with the attributes from the ical_event.\n\n Args:\n ical_event: an icalendar.cal.Event object to be parsed\n current_user: a User instance corresponding to the user that is updating\n credentials: An OAuth2Credentials object used to build a service object.\n In the case the credentials is the default value of None, future\n methods will attempt to get credentials from the default credentials.\n\n Returns:\n A pair event, failed where event is an Event object that has been inserted\n or updated and failed is a boolean indicating failure (or lack of).\n\n Raises:\n MissingUID in the case that there is no UID in the iCal event\n \"\"\"\n uid = ical_event.get('uid', None)\n if uid is None:\n raise MissingUID(ical_event)\n # convert from type icalendar.prop.vText to unicode\n uid = unicode(uid)\n\n event_data = {}\n summary = ical_event.get('summary', None)\n if not summary:\n summary = '(No Title)'\n # convert from type icalendar.prop.vText to unicode\n event_data['summary'] = unicode(summary)\n\n description, location = ConvertedDescription(ical_event)\n event_data['description'] = description\n event_data['location'] = location\n\n event_data['start'] = TimeKeyword.from_ical_event(ical_event, 'dtstart')\n event_data['end'] = TimeKeyword.from_ical_event(ical_event, 'dtend')\n\n event = ndb.Key(cls, uid).get()\n if event is not None:\n changed = False\n for attr, value in event_data.iteritems():\n if getattr(event, attr) != value:\n setattr(event, attr, value)\n logging.info('{attr} changed for {uid}'.format(attr=attr, uid=uid))\n changed = True\n\n if current_user not in event.attendees: # pylint:disable-msg=E1103\n event.attendees.append(current_user) # pylint:disable-msg=E1103\n logging.info('attendees changed for {uid}'.format(uid=uid))\n changed = True\n\n success = True\n if changed:\n # pylint:disable-msg=E1103\n success = event.update(credentials=credentials)\n return event, not success\n else:\n # pylint:disable-msg=W0142\n event = cls(key=ndb.Key(cls, uid), attendees=[current_user], **event_data)\n success = event.insert(credentials=credentials)\n return event, not success\n\n @ndb.ComputedProperty\n def end_date(self): # pylint:disable-msg=C0103\n \"\"\"Derived property that turns end into a date string.\"\"\"\n end_datetime = self.end.to_datetime()\n end_date = end_datetime.date()\n return end_date.strftime('%Y-%m-%d')\n\n def attendee_emails(self): # pylint:disable-msg=C0103\n \"\"\"Returns a list of dictionaries corresponding to attendee emails.\"\"\"\n return [{'email': attendee.email()} for attendee in self.attendees]\n\n def as_dict(self): # pylint:disable-msg=C0103\n \"\"\"Returns the Event as a dictionary corresponding to the API spec.\n\n Returns:\n A dictionary to be used with the API client library representing all\n the data in the model object.\n \"\"\"\n return {'start': self.start.as_dict(),\n 'end': self.end.as_dict(),\n 'summary': self.summary,\n 'location': self.location,\n 'description': self.description,\n 'id': self.gcal_edit,\n 'sequence': self.sequence,\n 'attendees': self.attendee_emails()}\n\n def __repr__(self):\n return 'Event(name={})'.format(self.key.id())\n\n\nclass UserCal(ndb.Model): # pylint:disable-msg=R0903\n \"\"\"Holds data for a calendar event (including shared owners).\"\"\"\n # pylint:disable-msg=E1101\n owner = ndb.UserProperty(required=True)\n calendars = ndb.StringProperty(repeated=True)\n update_intervals = ndb.IntegerProperty(repeated=True)\n upcoming = ndb.StringProperty(repeated=True)\n\n def put(self): # pylint:disable-msg=C0103\n \"\"\"Customized put function that first sorts the list in upcoming.\"\"\"\n self.upcoming = sorted(set(self.upcoming))\n super(UserCal, self).put()\n\n def __repr__(self):\n return 'UserCal(owner={owner},name={name})'.format(owner=self.owner.email(),\n name=self.key.id())\n", "id": "1915143", "language": "Python", "matching_score": 5.378830909729004, "max_stars_count": 1, "path": "models.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"DB migration for to remove who and event_data from Event.\n\nIntended to be run through the remote API:\n\nremote_api_shell.py -s persistent-cal.appspot.com\n\ns~persistent-cal> import os\ns~persistent-cal> import sys\ns~persistent-cal> sys.path.append('/path/to/persistent-cal')\ns~persistent-cal> # or sys.path.append(os.getcwd())\ns~persistent-cal> os.environ['HTTP_HOST'] = 'persistent-cal.appspot.com'\ns~persistent-cal> from db_migration_2012_07_02 import UpdateEvents\ns~persistent-cal> UpdateEvents()\n\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport json\n\n# App engine specific libraries\nfrom google.appengine.api import users\nfrom google.appengine.ext import db\nfrom google.appengine.ext import ndb\n\n# App specific libraries\nimport models\n\n\nclass TimeKeyword(db.Model): # pylint:disable-msg=R0904\n \"\"\"Dummy TimeKeyword model for migration.\"\"\"\n keyword = db.StringProperty(required=True)\n value = db.StringProperty(required=True)\n\n\nclass TimeKeywordProperty(db.Property):\n \"\"\"Property for representing Dummy TimeKeyword object.\"\"\"\n\n data_type = TimeKeyword\n\n # pylint:disable-msg=C0103\n def get_value_for_datastore(self, model_instance):\n time_val = super(TimeKeywordProperty, self).get_value_for_datastore(\n model_instance)\n return json.dumps(time_val.as_dict())\n\n # pylint:disable-msg=C0103\n def make_value_from_datastore(self, value):\n try:\n value_dict = json.loads(value)\n if isinstance(value_dict, dict) and len(value_dict) == 1:\n key = value_dict.keys()[0]\n return TimeKeyword(keyword=key,\n value=value_dict[key])\n except ValueError:\n pass\n return None\n\n # pylint:disable-msg=C0103\n def validate(self, value):\n if value is not None and not isinstance(value, TimeKeyword):\n raise db.BadValueError(\n 'Property {name} must be convertible to a '\n 'TimeKeyword instance ({value}).'.format(name=self.name, value=value))\n return super(TimeKeywordProperty, self).validate(value)\n\n # pylint:disable-msg=C0103\n def empty(self, value):\n return not value\n\n\nclass Event(db.Model): # pylint:disable-msg=R0904\n \"\"\"Dummy Event model for migration.\"\"\"\n description = db.TextProperty(default='')\n start = TimeKeywordProperty(required=True)\n end = TimeKeywordProperty(required=True)\n location = db.StringProperty(default='')\n summary = db.StringProperty(required=True)\n attendees = db.ListProperty(users.User, required=True)\n gcal_edit = db.StringProperty()\n sequence = db.IntegerProperty(default=0)\n\n\ndef TransformEvent(event):\n \"\"\"Takes Event object to new specification.\"\"\"\n uid = event.key().name()\n end = models.TimeKeyword(keyword=event.end.keyword,\n value=event.end.value)\n start = models.TimeKeyword(keyword=event.start.keyword,\n value=event.start.value)\n\n new_event = models.Event(key=ndb.Key(models.Event, uid),\n description=event.description,\n start=start,\n end=end,\n location=event.location,\n summary=event.summary,\n attendees=event.attendees,\n gcal_edit=event.gcal_edit,\n sequence=event.sequence)\n\n return new_event\n\n\ndef UpdateEvents():\n \"\"\"Updates events.\"\"\"\n events = Event.all()\n for event in events:\n new_event = TransformEvent(event)\n new_event.put()\n", "id": "3520242", "language": "Python", "matching_score": 4.25601863861084, "max_stars_count": 1, "path": "migrations/db_migration_2012_07_02.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"DB migration for Model Update on 2012-04-26.\n\nIntended to be run through the remote API:\n\nremote_api_shell.py -s persistent-cal.appspot.com\n\ns~persistent-cal> import sys\ns~persistent-cal> sys.path.append('/path/to/persistent-cal')\ns~persistent-cal> from db_migration_2012_04_26 import UpdateEvents\ns~persistent-cal> UpdateEvents()\n\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport json\n\n# App specific libraries\nfrom models import Event\nfrom models import TimeKeyword\nfrom models import UserCal\n\n\ndef GetUserDict():\n \"\"\"Gets a user dictionary based on current state of UserCal DB.\"\"\"\n user_dict = {}\n\n user_cals = UserCal.all()\n for user_cal in user_cals:\n user = user_cal.owner\n\n uid = user.user_id()\n if uid in user_dict:\n raise Exception('Key collision: %s' % uid)\n\n user_dict[uid] = user\n\n return user_dict\n\n\ndef TransformEventData(event, user_dict):\n \"\"\"Takes Event object to new specification.\"\"\"\n # First make a copy\n new_event = Event(key_name=event.key().name(),\n who=event.who,\n event_data=event.event_data,\n end_date=event.end_date,\n gcal_edit=event.gcal_edit)\n\n\n event_data = json.loads(event.event_data)\n\n # Add in new (also non-required) attributes\n new_event.description = event_data['description']\n new_event.location = event_data['location']\n new_event.summary = event_data['summary']\n\n start = event_data['start']\n if not isinstance(start, dict) or len(start) != 1:\n raise Exception('Start not singleton dictionary')\n key = start.keys()[0]\n start_tkw = TimeKeyword(keyword=key, value=start[key])\n new_event.start = start_tkw\n\n end = event_data['end']\n if not isinstance(end, dict) or len(end) != 1:\n raise Exception('End not singleton dictionary')\n key = end.keys()[0]\n end_tkw = TimeKeyword(keyword=key, value=end[key])\n new_event.end = end_tkw\n\n new_event.attendees = [user_dict[uid] for uid in event.who]\n\n return new_event\n\n\ndef UpdateEvents():\n user_dict = GetUserDict()\n\n events = Event.all()\n for event in events:\n new_event = TransformEventData(event, user_dict)\n new_event.put()\n", "id": "9440977", "language": "Python", "matching_score": 4.867071151733398, "max_stars_count": 1, "path": "migrations/db_migration_2012_04_26.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"DB migration for to remove who and event_data from Event.\n\nIntended to be run through the remote API:\n\nremote_api_shell.py -s persistent-cal.appspot.com\n\ns~persistent-cal> import sys\ns~persistent-cal> sys.path.append('/path/to/persistent-cal')\ns~persistent-cal> from db_migration_2012_04_30 import UpdateEvents\ns~persistent-cal> UpdateEvents()\n\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\nfrom models import Event\n\n\ndef TransformEvent(event):\n \"\"\"Takes Event object to new specification.\"\"\"\n new_location = event.location\n if new_location == 'None':\n new_location = ''\n new_event = Event(key_name=event.key().name(),\n description=event.description,\n start=event.start,\n end=event.end,\n location=new_location,\n summary=event.summary,\n attendees=event.attendees,\n gcal_edit=event.gcal_edit)\n\n return new_event\n\n\ndef UpdateEvents():\n events = Event.all()\n for event in events:\n new_event = TransformEvent(event)\n new_event.put()\n", "id": "11797955", "language": "Python", "matching_score": 2.292163372039795, "max_stars_count": 1, "path": "migrations/db_migration_2012_04_30.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"DB migration for Model Update on 2012-04-25.\n\nIntended to be run through the remote API:\n\nremote_api_shell.py -s persistent-cal.appspot.com\n\ns~persistent-cal> import sys\ns~persistent-cal> sys.path.append('/path/to/persistent-cal')\ns~persistent-cal> from db_migration_2012_04_25 import UpdateEvents\ns~persistent-cal> UpdateEvents()\n\nNote:\n We may move gcal_edit into event_data as the 'id' key, but not here.\n\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport json\n\n# App engine specific libraries\nfrom google.appengine.ext import db\n\n# App specific libraries\nfrom library import JsonAscii\nfrom models import Event\n\n\ndef TransformEventData(event_data):\n \"\"\"Takes Event object to new specification.\"\"\"\n new_event_data = {}\n\n new_event_data['summary'] = event_data['summary']\n new_event_data['description'] = event_data['description']\n\n # Where\n new_event_data['location'] = event_data['location']\n\n # When\n start = event_data['when:from']\n if start.endswith('Z'):\n new_event_data['start'] = {'dateTime': start}\n else:\n new_event_data['start'] = {'date': start}\n\n end = event_data['when:to']\n if end.endswith('Z'):\n new_event_data['end'] = {'dateTime': end}\n else:\n new_event_data['end'] = {'date': end}\n\n return new_event_data\n\n\ndef UpdateEvents():\n events = Event.all()\n for event in events:\n event_data = json.loads(event.event_data)\n new_event_data = TransformEventData(event_data)\n\n event.event_data_old = db.Text(JsonAscii(event_data))\n event.event_data = db.Text(JsonAscii(new_event_data))\n\n event.put()\n", "id": "6406193", "language": "Python", "matching_score": 3.5863564014434814, "max_stars_count": 1, "path": "migrations/db_migration_2012_04_25.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2011 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"DB migration for Model Update on 2011-12-22.\n\nIntended to be run through the remote API:\n\nremote_api_shell.py -s persistent-cal.appspot.com\n\ns~persistent-cal> import sys\ns~persistent-cal> sys.path.append('/path/to/persistent-cal')\ns~persistent-cal> from db_migration_2011_12_22 import UpdateEvents\ns~persistent-cal> UpdateEvents()\n\nNote:\n Since pre-migration, each event in the DB will not have an end_date. In\n order to account for this, we temporarily change the model from\n\n end_date = db.StringProperty(required=True)\n\n to\n\n end_date = db.StringProperty(default='01-01-1970', required=True)\n\n for the purposes of this migration.\n\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport datetime\nimport json\n\n# App engine specific libraries\nfrom google.appengine.ext import db\n\n# App specific libraries\nfrom library import JsonAscii\nfrom models import Event\n\n\ndef StringToDayString(time_as_str):\n \"\"\"Takes time as string (date or datetime) and returns date as string.\"\"\"\n time_parse = '%Y-%m-%d'\n try:\n converted_val = datetime.datetime.strptime(time_as_str, time_parse)\n return time_as_str\n except ValueError:\n pass\n\n time_parse += 'T%H:%M:%S.000Z'\n try:\n converted_val = datetime.datetime.strptime(time_as_str, time_parse)\n converted_val = converted_val.date()\n return converted_val.strftime('%Y-%m-%d')\n except ValueError:\n pass\n\n raise Exception('StringToDayString failed with %s' % time_as_str)\n\n\ndef UpdateEvents():\n events = Event.all()\n for event in events:\n event_data = json.loads(event.event_data)\n\n # removing email, irrelevant\n event_data.pop('email', None)\n event.event_data = db.Text(JsonAscii(event_data))\n\n # adding new column 'end_date'\n end_date = StringToDayString(event_data['when:to'])\n event.end_date = end_date\n\n event.put()\n", "id": "7568418", "language": "Python", "matching_score": 2.1319804191589355, "max_stars_count": 1, "path": "migrations/db_migration_2011_12_22.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Time utility library for persistent-cal with no App Engine depencies.\n\nProvides time related parsing and conversion functions.\n\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport datetime\n\n\ndef ConvertToInterval(timestamp):\n \"\"\"Converts a datetime timestamp to a time interval for a cron job.\n\n Args:\n timestamp: a datetime.datetime object\n\n Returns:\n A value between 0 and 55 corresponding to the interval the timestamp\n falls in. In this calculation, 12am on Monday is interval 0 and each\n interval lasts 3 hours.\n \"\"\"\n # In datetime, Monday is day 0, sunday is day 6\n # Since 8 intervals in a day, multiply by 8. Round up hours.\n interval = 8*timestamp.weekday() + timestamp.hour/3 + 1\n\n # If we are exactly on an hour that is a multiple of three\n # we do not wish to round up since floor(x) == ceil(x), contrary\n # to all other cases where ceil(x) == floor(x) + 1\n relative_seconds = sum([3600*(timestamp.hour % 3 == 0),\n 60*timestamp.minute,\n timestamp.second,\n timestamp.microsecond/1000.0])\n if relative_seconds < 300: # under 5 minutes past schedule\n interval -= 1\n\n return interval % 56\n\n\ndef FormatTime(time_value):\n \"\"\"Takes a datetime object and returns a formatted time stamp.\n\n Args:\n time_value: a datetime.datetime or datetime.date object\n\n Returns:\n A string value of the datetime object formatted according to the values\n set in time_parse below\n \"\"\"\n # Fails if not datetime.datetime or datetime.date\n\n # strftime('%Y-%m-%dT%H:%M:%S.000Z') for datetime\n # strftime('%Y-%m-%d') for date\n\n # Default TZ is UTC/GMT (as is TZ in GCal)\n time_parse = '%Y-%m-%d'\n if isinstance(time_value, datetime.datetime):\n time_parse += 'T%H:%M:%S.000Z'\n return time_value.strftime(time_parse)\n elif isinstance(time_value, datetime.date):\n return time_value.strftime(time_parse)\n", "id": "3641981", "language": "Python", "matching_score": 1.6910194158554077, "max_stars_count": 1, "path": "time_utils.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Exceptions module for persistent-cal.\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\nclass Error(Exception):\n \"\"\"Base error class for library functions.\"\"\"\n\n\nclass BadInterval(Error):\n \"\"\"Error corresponding to an unanticipated number of update intervals.\"\"\"\n\n\nclass InappropriateAPIAction(Error):\n \"\"\"Error corresponding to an insert that should not be occurring.\"\"\"\n\n\nclass CredentialsLoadError(Error):\n \"\"\"Error when credentials are not loaded correctly from a specified file.\"\"\"\n\n\nclass MissingUID(Error):\n \"\"\"Error corresponding to missing UID in an event.\"\"\"\n\n\nclass UnexpectedDescription(Error):\n \"\"\"Error corresponding to an unexpected event description.\"\"\"\n", "id": "11380556", "language": "Python", "matching_score": 1.7870570421218872, "max_stars_count": 1, "path": "custom_exceptions.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Tool for kicking off an update at a specific interval.\n\nIntended to be run through the remote API:\n\nremote_api_shell.py -s persistent-cal.appspot.com\n\ns~persistent-cal> import sys\ns~persistent-cal> sys.path.append('/path/to/persistent-cal')\ns~persistent-cal> from force_update_intervals import ForceUpdate\ns~persistent-cal> now_intervals = [3, 4] # integers between 0 and 55 inclusive\ns~persistent-cal> ForceUpdate(now_intervals)\n\nNote:\n This is intended to be used when an update or set of updates fail and a bug\n is fixed that allows those updates to work.\n\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport os\n\n# App specific libraries\nfrom google_api_utils import InitCredentials\nfrom library import UpdateUserSubscriptions\nfrom models import UserCal\n\n\nCREDENTIALS = InitCredentials()\n\n\nos.environ['HTTP_HOST'] = 'persistent-cal.appspot.com'\n\n\ndef ForceUpdate(now_intervals):\n \"\"\"Forces an update outside of a cron job for a list of update intervals.\"\"\"\n legitimate_intervals = list(set(range(56)).intersection(now_intervals))\n matching_users = UserCal.query(\n UserCal.update_intervals.IN(legitimate_intervals))\n for user_cal in matching_users:\n # pylint:disable-msg=E1123\n UpdateUserSubscriptions(user_cal, credentials=CREDENTIALS, defer_now=True)\n print(user_cal)\n", "id": "3686795", "language": "Python", "matching_score": 3.364341974258423, "max_stars_count": 1, "path": "scripts/force_update_intervals.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"DB migration for Model Update on 2012-04-26.\n\nIntended to be run through the remote API:\n\nremote_api_shell.py -s persistent-cal.appspot.com\n\ns~persistent-cal> import sys\ns~persistent-cal> sys.path.append('/path/to/persistent-cal')\ns~persistent-cal> from db_migration_2012_04_26_part3 import UpdateUserCals\ns~persistent-cal> UpdateUserCals()\n\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# App specific libraries\nfrom models import UserCal\n\n\ndef UpdateUserCals():\n user_cals = UserCal.all()\n for user_cal in user_cals:\n # Since we over-wrote put, it will sort itself\n user_cal.put()\n", "id": "8842097", "language": "Python", "matching_score": 2.194147825241089, "max_stars_count": 1, "path": "migrations/db_migration_2012_04_26_part3.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"DB migration for Calendar v2 to v3 upgrade on 2012-03-03.\n\nIntended to be run through the remote API:\n\nremote_api_shell.py -s persistent-cal.appspot.com\n\ns~persistent-cal> import sys\ns~persistent-cal> sys.path.append('/path/to/persistent-cal')\ns~persistent-cal> from db_migration_2012_03_03 import UpdateEvents\ns~persistent-cal> UpdateEvents()\n\nNote:\n We are adding a gcal_edit_old entry, because the gcal_edit entry will\n be removed eventually\n\n gcal_edit_old = db.StringProperty()\n\n for the purposes of this migration.\n\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# App specific libraries\nfrom models import Event\n\n\ndef UpdateEvents():\n events = Event.all()\n for event in events:\n event.gcal_edit_old = event.gcal_edit\n event.put()\n", "id": "6556670", "language": "Python", "matching_score": 4.1889119148254395, "max_stars_count": 1, "path": "migrations/db_migration_2012_03_03.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"DB migration for eliminating Event.gcal_edit_old value\n\nIntended to be run through the remote API:\n\nremote_api_shell.py -s persistent-cal.appspot.com\n\ns~persistent-cal> import sys\ns~persistent-cal> sys.path.append('/path/to/persistent-cal')\ns~persistent-cal> from db_migration_2012_04_12 import UpdateEvents\ns~persistent-cal> UpdateEvents()\n\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\nfrom models import Event\n\n\ndef UpdateEvents():\n events = Event.all()\n for event in events:\n # Edit to the model will simply remove the field\n new_event = Event(key_name=event.key().name(),\n who=event.who,\n event_data=event.event_data,\n end_date=event.end_date,\n gcal_edit=event.gcal_edit)\n new_event.put()\n", "id": "8588889", "language": "Python", "matching_score": 1.5044423341751099, "max_stars_count": 1, "path": "migrations/db_migration_2012_04_12.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Debugging tool to get current state of main calendar.\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport json\n\n# Third-party libraries\nimport gdata.calendar.client\nimport gdata.gauth\n\n# App specific libraries\n# from library import InitGCAL\nfrom secret_key import CONSUMER_KEY\nfrom secret_key import CONSUMER_SECRET\nfrom secret_key import TOKEN\nfrom secret_key import TOKEN_SECRET\n\n\ndef main():\n \"\"\"Processes main calendar event feed and writes some event data to a file.\n\n get_calendar_event_feed uses a default desired_class of\n `gdata.calendar.data.CalendarEventFeed`\n hence the get_feed request uses this class to convert the response\n\n In order to retrieve the full feed, the total_results field is analyzed\n from the first request and a new request is sent with max-results set in\n the query to the total number of events.\n\n The CalendarEventFeed class has an event field which holds a list of\n CalendarEventEntry classes. Each CalendarEventEntry class has a when field\n and a who field (which we use) and a get_edit_link member function which we\n also use.\n\n In the result, we use the href from the edit link as a key for each event and\n write the start and end times from the when field as well as all the email\n addresses of the attendees from the who field to a dictionary. This dictionary\n is then written to a file as serialized JSON.\n\n raises Exception if an edit link is encountered more than once\n \"\"\"\n gcal = gdata.calendar.client.CalendarClient(source='persistent-cal')\n auth_token = gdata.gauth.OAuthHmacToken(consumer_key=CONSUMER_KEY,\n consumer_secret=CONSUMER_SECRET,\n token=TOKEN,\n token_secret=TOKEN_SECRET,\n auth_state=3)\n gcal.auth_token = auth_token\n uri = ('https://www.google.com/calendar/feeds/'\n 'vhoam1gb7uqqoqevu91liidi80%40group.calendar.google.com/private/full')\n\n feed = gcal.get_calendar_event_feed(uri=uri)\n total_results = int(feed.total_results.text)\n if total_results > 25:\n uri = '%s?max-results=%s' % (uri, total_results)\n feed = gcal.get_calendar_event_feed(uri=uri)\n\n result = {}\n for event in feed.entry:\n # each event is [CalendarEventEntry]\n when = event.when # when is [When]\n curr_starts = [t.start for t in when] # [string]\n curr_ends = [t.end for t in when] # [string]\n # who is [gdata.data.Who]\n who = [v.email for v in event.who]\n # each v.email is string\n gcal_edit = event.get_edit_link().href # string\n if gcal_edit in result:\n raise Exception('Hmmmmmmm, duplicate')\n else:\n result[gcal_edit] = {'starts': curr_starts,\n 'ends': curr_ends,\n 'who': who}\n\n with open('curr_state_cal.json', 'wb') as fh:\n json.dump(result, fh)\n\n\nif __name__ == '__main__':\n main()\n", "id": "12197331", "language": "Python", "matching_score": 2.7215890884399414, "max_stars_count": 1, "path": "scripts/curr_state_cal.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Gets all events from GCal.\"\"\"\n\n\n# General libraries\nimport json\nimport os\n\n# Third-party libraries\n\n# App specific libraries\nfrom google_api_utils import InitService\nfrom library import CALENDAR_ID\nfrom models import Event\n\n\nos.environ['HTTP_HOST'] = 'persistent-cal.appspot.com'\n\n\ndef main():\n \"\"\"Main function. Retrieves all events from the datastore and GCal.\n\n Must be run from within remote_api.\n \"\"\"\n gcal_edits = []\n for event in Event.all():\n gcal_edits.append(event.gcal_edit)\n\n service = InitService()\n events = {}\n for gcal_edit in gcal_edits:\n event = service.events().get(calendarId=CALENDAR_ID,\n eventId=gcal_edit).execute()\n events[gcal_edit] = event\n\n with open('gcal_events.json', 'w') as fh:\n json.dump(events, fh)\n\n return events\n\n\nif __name__ == '__main__':\n main()\n", "id": "12386703", "language": "Python", "matching_score": 2.7567903995513916, "max_stars_count": 1, "path": "scripts/get_event_data.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"DB migration for Calendar v2 to v3 upgrade on 2012-03-03.\n\nIntended to be run through the remote API:\n\nremote_api_shell.py -s persistent-cal.appspot.com\n\ns~persistent-cal> import sys\ns~persistent-cal> sys.path.append('/path/to/persistent-cal')\ns~persistent-cal> from db_migration_2012_03_03 import UpdateEvents\ns~persistent-cal> UpdateEvents()\n\nNote:\n We are transforming the gcal_edit value from the full link to the link\n id at the end of url:\n\n https://www.google.com/calendar/feeds/{CAL_ID}/private/full/{EVENT_ID}\n\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General imports\nimport httplib2\n\n# Third Party Imports\nfrom apiclient.discovery import build\nfrom apiclient.errors import HttpError\nfrom oauth2client.file import Storage\n\n# App specific libraries\nfrom models import Event\nfrom secret_key import DEVELOPER_KEY\n\n\ndef UpdateEvents():\n # OAuth2 credentials already stored\n storage = Storage('calendar.dat')\n credentials = storage.get()\n\n http = httplib2.Http()\n http = credentials.authorize(http)\n service = build(serviceName='calendar', version='v3', http=http,\n developerKey=DEVELOPER_KEY)\n\n cal_id = 'vhoam1<EMAIL>'\n events = Event.all()\n for event in events:\n event.gcal_edit = event.gcal_edit.split('/')[-1]\n\n event_id = event.gcal_edit\n try:\n service.events().get(calendarId=cal_id, eventId=event_id).execute()\n event.put()\n except HttpError as e:\n print('%s failed with: %s %s' % (event_id, type(e), e))\n", "id": "7258014", "language": "Python", "matching_score": 3.4236133098602295, "max_stars_count": 1, "path": "migrations/db_migration_2012_03_03_part2.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Google API utility library for persistent-cal.\"\"\"\n\n\n__author__ = '<EMAIL> (<NAME>)'\n\n\n# General libraries\nimport datetime\nimport json\nimport logging\nimport os\nimport time\n\n# Third-party libraries\nfrom apiclient.discovery import DISCOVERY_URI\nfrom apiclient.discovery import _add_query_parameter\nfrom apiclient.discovery import build_from_document\nfrom apiclient.errors import HttpError\nfrom apiclient.errors import InvalidJsonError\nimport httplib2\nfrom oauth2client.appengine import CredentialsModel\nfrom oauth2client.appengine import StorageByKeyName\nimport uritemplate\n\n# App engine specific libraries\nfrom google.appengine.ext import ndb\n\n# App specific libraries\nfrom custom_exceptions import CredentialsLoadError\n\n\nCALENDAR_API_NAME = 'calendar'\nCALENDAR_API_VERSION = 'v3'\nCREDENTIALS_KEYNAME = 'calendar.dat'\nDISCOVERY_DOC_MAX_AGE = datetime.timedelta(days=7)\nSECRET_KEY = {}\nSECRET_KEY_DB_KEY = 'secret_key'\n\n\nclass SecretKey(ndb.Model):\n \"\"\"Model for representing a project secret keys.\"\"\"\n client_id = ndb.StringProperty(required=True)\n client_secret = ndb.StringProperty(required=True)\n developer_key = ndb.StringProperty(required=True)\n\n\nclass DiscoveryDocument(ndb.Model):\n \"\"\"Model for representing a discovery document.\"\"\"\n document = ndb.StringProperty(required=True, indexed=False)\n updated = ndb.DateTimeProperty(auto_now=True, indexed=False)\n\n @property\n def expired(self):\n now = datetime.datetime.utcnow()\n return now - self.updated > DISCOVERY_DOC_MAX_AGE\n\n @classmethod\n def build(cls, serviceName, version, credentials, **kwargs):\n discoveryServiceUrl = kwargs.pop('discoveryServiceUrl', DISCOVERY_URI)\n key = ndb.Key(cls, serviceName, cls, version, cls, discoveryServiceUrl)\n discovery_doc = key.get()\n\n if discovery_doc is None or discovery_doc.expired:\n # If None, RetrieveDiscoveryDoc() will use Defaults\n document = RetrieveDiscoveryDoc(\n serviceName, version, credentials=credentials,\n discoveryServiceUrl=discoveryServiceUrl)\n discovery_doc = cls(key=key, document=document)\n discovery_doc.put()\n\n http = kwargs.get('http', None)\n if http is None:\n http = httplib2.Http()\n kwargs['http'] = credentials.authorize(http)\n return build_from_document(\n discovery_doc.document, discoveryServiceUrl, **kwargs)\n\n\ndef InitCredentials(keyname=CREDENTIALS_KEYNAME):\n \"\"\"Initializes an OAuth2Credentials object from a file.\n\n Args:\n keyname: The key name of the credentials object in the data store. Defaults\n to CREDENTIALS_KEYNAME.\n\n Returns:\n An OAuth2Credentials object.\n \"\"\"\n storage = StorageByKeyName(CredentialsModel, keyname, 'credentials')\n credentials = storage.get()\n\n if credentials is None or credentials.invalid == True:\n raise CredentialsLoadError('No credentials retrieved.')\n\n return credentials\n\n\ndef InitService(credentials=None, keyname=CREDENTIALS_KEYNAME):\n \"\"\"Initializes a service object to make calendar requests.\n\n Args:\n credentials: An OAuth2Credentials object used to build a service object.\n In the case the credentials is None, attempt to get credentials using\n the credentials found at key {keyname}.\n keyname: The key name of the credentials object in the data store. Defaults\n to CREDENTIALS_KEYNAME.\n\n Returns:\n A Resource object intended for making calls to an Apiary API.\n\n Raises:\n CredentialsLoadError in the case that no credentials are passed in and they\n can't be loaded from the specified file\n \"\"\"\n if credentials is None:\n credentials = InitCredentials(keyname=keyname)\n\n if 'DEVELOPER_KEY' not in SECRET_KEY:\n secret_key = ndb.Key(SecretKey, SECRET_KEY_DB_KEY).get()\n SECRET_KEY['DEVELOPER_KEY'] = secret_key.developer_key\n\n return DiscoveryDocument.build(CALENDAR_API_NAME,\n CALENDAR_API_VERSION,\n credentials,\n developerKey=SECRET_KEY['DEVELOPER_KEY'])\n\n\ndef RetrieveDiscoveryDoc(serviceName, version, credentials=None,\n discoveryServiceUrl=DISCOVERY_URI):\n params = {'api': serviceName, 'apiVersion': version}\n requested_url = uritemplate.expand(discoveryServiceUrl, params)\n\n # REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment\n # variable that contains the network address of the client sending the\n # request. If it exists then add that to the request for the discovery\n # document to avoid exceeding the quota on discovery requests.\n if 'REMOTE_ADDR' in os.environ:\n requested_url = _add_query_parameter(requested_url, 'userIp',\n os.environ['REMOTE_ADDR'])\n\n if credentials is None:\n credentials = InitCredentials()\n http = httplib2.Http()\n http = credentials.authorize(http)\n\n resp, content = http.request(requested_url)\n\n if resp.status >= 400:\n raise HttpError(resp, content, uri=requested_url)\n\n try:\n service = json.loads(content)\n except ValueError:\n raise InvalidJsonError(\n 'Bad JSON: {} from {}'.format(content, requested_url))\n\n return content\n\n\ndef AttemptAPIAction(http_verb, num_attempts=3, log_msg=None,\n credentials=None, **kwargs):\n \"\"\"Attempt an API action a predetermined number of times before failing.\n\n Args:\n http_verb: The HTTP verb of the intended request. Examle: get, update.\n num_attempts: The number of attempts to make before failing the request.\n Defaults to 3.\n log_msg: The log message to report upon success. Defaults to None.\n credentials: An OAuth2Credentials object used to build a service object.\n kwargs: The keyword arguments to be passed to the API request.\n\n Returns:\n The result of the API request\n \"\"\"\n service = InitService(credentials=credentials)\n\n # pylint:disable-msg=E1101\n api_action = getattr(service.events(), http_verb, None)\n if api_action is None:\n return None\n\n attempts = int(num_attempts) if int(num_attempts) > 0 else 0\n while attempts:\n try:\n result = api_action(**kwargs).execute()\n\n if log_msg is None:\n log_msg = '{id_} changed via {verb}'.format(id_=result['id'],\n verb=http_verb)\n logging.info(log_msg)\n\n return result\n except (httplib2.HttpLib2Error, HttpError) as exc:\n logging.info(exc)\n attempts -= 1\n time.sleep(3)\n\n return None\n", "id": "859349", "language": "Python", "matching_score": 5.280161380767822, "max_stars_count": 1, "path": "google_api_utils.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (C) 2010-2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Gets new credentials using the keys in the datastore.\n\nFrom Sample in project source wiki:\nhttp://code.google.com/p/google-api-python-client/wiki/OAuth2#Command-Line\n\"\"\"\n\n\n# General libraries\nimport os\n\n# Third-party libraries\nfrom oauth2client.appengine import CredentialsModel\nfrom oauth2client.appengine import StorageByKeyName\nfrom oauth2client.client import OAuth2WebServerFlow\nfrom oauth2client.tools import run\n\n# App specific libraries\nfrom google_api_utils import SecretKey\nfrom google_api_utils import SECRET_KEY_DB_KEY\n\n\nos.environ['HTTP_HOST'] = 'persistent-cal.appspot.com'\n\n\ndef main():\n \"\"\"Main function. Attempts to get credentials and runs OAuth2 if invalid.\n\n Must be run from within remote_api.\n \"\"\"\n storage = StorageByKeyName(CredentialsModel, 'calendar.dat', 'credentials')\n credentials = storage.get()\n\n if credentials is None or credentials.invalid == True:\n secret_key = ndb.Key(SecretKey, SECRET_KEY_DB_KEY).get()\n flow = OAuth2WebServerFlow(\n client_id=secret_key.client_id,\n client_secret=secret_key.client_secret,\n scope='https://www.googleapis.com/auth/calendar',\n user_agent='persistent-cal-auth')\n\n credentials = run(flow, storage)\n\n return credentials\n\n\nif __name__ == '__main__':\n main()\n", "id": "10842524", "language": "Python", "matching_score": 0.3986304998397827, "max_stars_count": 1, "path": "scripts/get_new_credentials.py" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2\nimport users_pb2 as users__pb2\n\n\nclass UsersStub(object):\n # missing associated documentation comment in .proto file\n pass\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.AddUser = channel.unary_unary(\n '/users.v1.Users/AddUser',\n request_serializer=users__pb2.User.SerializeToString,\n response_deserializer=users__pb2.AddUserResponse.FromString,\n )\n self.GetUsers = channel.unary_stream(\n '/users.v1.Users/GetUsers',\n request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n response_deserializer=users__pb2.User.FromString,\n )\n\n\nclass UsersServicer(object):\n # missing associated documentation comment in .proto file\n pass\n\n def AddUser(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetUsers(self, request, context):\n # missing associated documentation comment in .proto file\n pass\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_UsersServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'AddUser': grpc.unary_unary_rpc_method_handler(\n servicer.AddUser,\n request_deserializer=users__pb2.User.FromString,\n response_serializer=users__pb2.AddUserResponse.SerializeToString,\n ),\n 'GetUsers': grpc.unary_stream_rpc_method_handler(\n servicer.GetUsers,\n request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n response_serializer=users__pb2.User.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'users.v1.Users', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n", "id": "3342154", "language": "Python", "matching_score": 2.907686710357666, "max_stars_count": 0, "path": "_grpc/users_pb2_grpc.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport concurrent.futures\nimport os\nimport random\nimport textwrap\nimport threading\nimport time\n\nimport grpc\nimport grpc_reflection.v1alpha.reflection\n\nimport users_pb2\nimport users_pb2_grpc\n\n\n_MAX_ID = 2 ** 64 - 1\n_ONE_DAY_IN_SECONDS = 60 * 60 * 24\n\n\ndef _new_id(locked_container, max_attempts):\n \"\"\"Compute a new (random) ID.\n\n The container is named ``locked_container``; the assumption is that the\n caller is using a mutex to prevent other code from modifying the container\n while this function executes.\n\n Args:\n locked_container (Container): A container that has IDs as indices,\n e.g. ``10 in locked_container``.\n max_attempts (int): The maximum number of random numbers to be\n generated.\n\n Returns:\n int: The randomly generated ID.\n\n Raises:\n RuntimeError: If no random number can be generated.\n \"\"\"\n for _ in range(max_attempts):\n # NOTE: Don't use 0, because the 0-value corresponds to unset.\n new_id = random.randint(1, _MAX_ID)\n if new_id not in locked_container:\n return new_id\n\n raise RuntimeError(\n f\"Failed to generate a new ID in {max_attempts} attempts\"\n )\n\n\nclass Users:\n def __init__(self, *args, **kwargs):\n self._database = {}\n # NOTE: We hold this lock for **all** operations on ``_database``.\n self._lock = threading.Lock()\n super().__init__(*args, **kwargs)\n\n def AddUser(self, request, context):\n \"\"\"Add a user to the database.\n\n Args:\n request (users_pb2.User): The request from the API.\n context (grpc._server._Context): A request context.\n\n Returns:\n users_pb2.AddUserResponse: The response containing the ID in the\n DB for the inserted user.\n \"\"\"\n print(f\"AddUser:\\n{textwrap.indent(str(request), ' ')}\")\n\n if request.id != 0:\n message = \"`id` cannot be set on user creation\"\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n context.set_details(message)\n return users_pb2.AddUserResponse()\n\n with self._lock:\n inserted_id = _new_id(self._database, 5)\n self._database[inserted_id] = (\n request.first_name,\n request.last_name,\n )\n return users_pb2.AddUserResponse(user_id=inserted_id)\n\n def GetUsers(self, request, unused_context):\n \"\"\"Get all users from the database.\n\n Args:\n request (google.protobuf.empty_pb2.Empty): The request from\n the API.\n unused_context (grpc._server._Context): A request context.\n\n Returns:\n Generator[users_pb2.Users]: The response stream containing all\n users in the DB.\n \"\"\"\n print(\"GetUsers: (empty)\")\n\n with self._lock:\n user_ids = sorted(self._database.keys())\n for user_id in user_ids:\n # NOTE: This **does not** try to catch a ``KeyError`` because\n # it assumes the ID **must** be in there.\n first_name, last_name = self._database[user_id]\n yield users_pb2.User(\n id=user_id, first_name=first_name, last_name=last_name\n )\n\n\nclass UsersServicer(Users, users_pb2_grpc.UsersServicer):\n pass\n\n\ndef wait_for_termination(server):\n # See:\n # https://github.com/grpc/grpc/pull/19299\n # https://github.com/grpc/grpc/pull/19852\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n\n\ndef enable_reflection(server):\n service_names = (\n users_pb2.DESCRIPTOR.services_by_name[\"Users\"].full_name,\n grpc_reflection.v1alpha.reflection.SERVICE_NAME,\n )\n grpc_reflection.v1alpha.reflection.enable_server_reflection(\n service_names, server\n )\n\n\ndef serve(grpc_port):\n server = grpc.server(concurrent.futures.ThreadPoolExecutor(max_workers=10))\n users_pb2_grpc.add_UsersServicer_to_server(UsersServicer(), server)\n enable_reflection(server)\n\n server.add_insecure_port(f\"[::]:{grpc_port}\")\n print(f\"Running Users service on port {grpc_port}\")\n server.start()\n wait_for_termination(server)\n\n\ndef main():\n grpc_port = os.environ.get(\"GRPC_PORT\", 50051)\n serve(grpc_port)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "42328", "language": "Python", "matching_score": 3.452669858932495, "max_stars_count": 0, "path": "_bin/grpc_server.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport textwrap\n\nimport google.protobuf.empty_pb2\nimport grpc\n\nimport users_pb2\nimport users_pb2_grpc\n\n\n_EMPTY = google.protobuf.empty_pb2.Empty()\n\n\ndef insert_user(stub, first_name, last_name):\n user = users_pb2.User(first_name=first_name, last_name=last_name)\n response = stub.AddUser(user)\n print(f\"Inserted user:\\n{textwrap.indent(str(response), ' ')}\")\n\n\ndef main():\n grpc_port = os.environ.get(\"GRPC_PORT\", 50051)\n\n address = f\"localhost:{grpc_port}\"\n with grpc.insecure_channel(address) as channel:\n stub = users_pb2_grpc.UsersStub(channel)\n # Insert users.\n insert_user(stub, \"Bob\", \"Green\")\n insert_user(stub, \"Alice\", \"Redmond\")\n # Get users.\n print(\"Retrieving users:\")\n for user in stub.GetUsers(_EMPTY):\n print(f\" User:\\n{textwrap.indent(str(user), ' ')}\")\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "9408629", "language": "Python", "matching_score": 0.2516326904296875, "max_stars_count": 0, "path": "_bin/call_grpc.py" }, { "content": "import base64\nimport string\n\n# NOTE: I tried https://pypi.org/project/statsdmetrics/ and it did\n# not work\n\n# See:\n# - https://docs.datadoghq.com/developers/dogstatsd/datagram_shell/\n# - https://docs.datadoghq.com/developers/metrics/#naming-custom-metrics\n#\n# The following custom metric naming convention must be followed:\n#\n# - Metric names must start with a letter.\n# - Metric names must only contain ASCII alphanumerics, underscores, and\n# periods.\n# - Other characters, including spaces, are converted to underscores.\n# - Unicode is not supported.\n# - Metric names must not exceed 200 characters. Fewer than 100 is preferred\n# from a UI perspective.\n#\n# **Note**: Metric names are case sensitive in Datadog.\n#\n#\n# Metric\n# ======\n# <METRIC_NAME>:<VALUE>|<TYPE>|@<SAMPLE_RATE>|#<TAG_KEY_1>:<TAG_VALUE_1>,<TAG_2>\n# TYPE: c ==COUNT, g==GUAGE, ms==TIMER, h==HISTOGRAM, s==SET, d==DISTRIBUTION\n# OPTIONAL (<SAMPLE_RATE>)\n# OPTIONAL (<TAG_*>)\n# - \"dd-sanity-check.request :1|c| #path :/ \"\n# - \"api.requests.response_code.all:1|c| #route:*, method:GET, response_code:200, service:dd-sanity-check\"\n# - \"api.requests.response_code.200:1|c| #route:*, method:GET, response_code:200, service:dd-sanity-check\"\n# - \"api.requests.response_time :2|h| #route:*, method:GET, response_code:200, service:dd-sanity-check\"\n# - \"page.views:1|c\"\n# - \"fuel.level:0.5|g\"\n# - \"song.length:240|h|@0.5\"\n# - \"users.uniques:1234|s\"\n# - \"users.online:1|c|#country:china\"\n# - \"users.online:1|c|@0.5|#country:china\"\n\n\ndef _metric_type_pretty(value):\n if value == \"c\":\n return \"COUNT\"\n\n if value == \"g\":\n return \"\"\n\n if value == \"ms\":\n return \"TIMER\"\n\n if value == \"h\":\n return \"HISTOGRAM\"\n\n if value == \"s\":\n return \"SET\"\n\n if value == \"d\":\n return \"DISTRIBUTION\"\n\n return value\n\n\ndef parse_metric(value_bytes):\n result = {\"raw_binary\": base64.b64encode(value_bytes).decode(\"ascii\")}\n try:\n value = value_bytes.decode(\"ascii\")\n except UnicodeDecodeError:\n return result\n\n result[\"raw\"] = value\n\n parts = value.split(\"|\")\n metric_name_value = parts[0]\n if not metric_name_value[:1] in string.ascii_letters:\n # This means it must be something else, e.g. `_e` or `_sc`.\n # TODO: Return something else here, e.g. `None`\n return value_bytes\n\n if not 2 <= len(parts) <= 4:\n # This means the metric is malformed\n return result\n\n metric_name_value_parts = metric_name_value.split(\":\", 1)\n if len(metric_name_value_parts) != 2:\n # This means the pair is malformed\n return result\n\n metric_name, metric_value = metric_name_value_parts\n result[\"metric\"] = {\"name\": metric_name, \"value\": metric_value}\n result[\"type\"] = _metric_type_pretty(parts[1])\n\n if len(parts) == 3:\n rate_or_tags = parts[2]\n if rate_or_tags.startswith(\"@\"):\n result[\"sample_rate\"] = rate_or_tags[1:]\n elif rate_or_tags.startswith(\"#\"):\n result[\"tags\"] = rate_or_tags[1:].split(\",\")\n else:\n # This means the last segment is malformed\n return result\n elif len(parts) == 4:\n sample_rate = parts[2]\n if sample_rate.startswith(\"@\"):\n result[\"sample_rate\"] = sample_rate[1:]\n else:\n # This means the sample rate is malformed\n return result\n\n tag_pairs = parts[3]\n if tag_pairs.startswith(\"#\"):\n result[\"tags\"] = tag_pairs[1:].split(\",\")\n else:\n # This means the tag pairs segment is malformed\n return result\n\n return result\n\n\n# Event\n# =====\n# _e{<TITLE>.length,<TEXT>.length}:<TITLE>|<TEXT>|d:<TIMESTAMP>|h:<HOSTNAME>|p:<PRIORITY>|t:<ALERT_TYPE>|#<TAG_KEY_1>:<TAG_VALUE_1>,<TAG_2>\n# OPTIONAL (d:<TIMESTAMP>)\n# OPTIONAL (h:<HOSTNAME>)\n# OPTIONAL (p:<PRIORITY>)\n# OPTIONAL (t:<ALERT_TYPE>)\n# OPTIONAL (#<TAG_*>)\n# - \"_e{21,36}:An exception occurred|Cannot parse CSV file from 10.0.0.17|t:warning|#err_type:bad_file\"\n# - \"_e{21,42}:An exception occurred|Cannot parse JSON request:\\\\n{\"foo: \"bar\"}|p:low|#err_type:bad_request\"\n#\n# Service Check\n# =============\n# _sc|<NAME>|<STATUS>|d:<TIMESTAMP>|h:<HOSTNAME>|#<TAG_KEY_1>:<TAG_VALUE_1>,<TAG_2>|m:<SERVICE_CHECK_MESSAGE>\n# OPTIONAL (d:<TIMESTAMP>)\n# OPTIONAL (h:<HOSTNAME>)\n# OPTIONAL (#<TAG_*>)\n# OPTIONAL (m:<SERVICE_CHECK_MESSAGE>)\n# - \"_sc|Redis connection|2|#env:dev|m:Redis connection timed out after 10s\"\n", "id": "3396716", "language": "Python", "matching_score": 0.8805646300315857, "max_stars_count": 1, "path": "src/python/parse_datagram.py" }, { "content": "import example\n\nimport wrapper\n\n\ndef main():\n print(\">>> wrapper.morp()\")\n return_value = wrapper.morp()\n assert return_value is None\n\n print(\">>> example.foo(1.5, 2.5)\")\n result = example.foo(1.5, 2.5)\n print(repr(result))\n\n print(\">>> wrapper.triple_foo(1.5, 2.5)\")\n result = wrapper.triple_foo(1.5, 2.5)\n print(repr(result))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "11801558", "language": "Python", "matching_score": 0.3238576650619507, "max_stars_count": 14, "path": "cython/use_cimport/check_wrapper.py" }, { "content": "from __future__ import print_function\n\nimport ctypes\nimport os\n\nimport cffi # 1.11.5\nimport numpy as np # 1.15.0\n\n\nHERE = os.path.dirname(__file__)\nSO_FILE = os.path.abspath(os.path.join(HERE, \"..\", \"fortran\", \"example.so\"))\nSEPARATOR = \"-\" * 60\nMAKE_UDF_TEMPLATE = \"\"\"\\\nquuz = make_udf({}, {}, {})\n = {}\n\"\"\"\nUDF_PTR_TEMPLATE = \"\"\"\\\nptr_as_int = address(made_it) # intptr_t / ssize_t / long\nptr_as_int = {} # 0x{:x}\nudf_ptr(ptr_as_int) # Set memory in ``made_it``\nmade_it = {}\n\"\"\"\nFOO_ARRAY_TEMPLATE = \"\"\"\\\nval =\n{}\ntwo_val = foo_array({}, val)\ntwo_val =\n{}\n\"\"\"\n\n\nclass UserDefined(ctypes.Structure):\n _fields_ = [\n (\"buzz\", ctypes.c_double),\n (\"broken\", ctypes.c_double),\n (\"how_many\", ctypes.c_int),\n ]\n\n def __repr__(self):\n template = (\n \"UserDefined(buzz={self.buzz}, \"\n \"broken={self.broken}, \"\n \"how_many={self.how_many})\"\n )\n return template.format(self=self)\n\n\nclass DataContainer(ctypes.Structure):\n _fields_ = [(\"data_\", ctypes.c_double * 8)]\n\n @property\n def data(self):\n result = np.ctypeslib.as_array(self.data_)\n return result.reshape((4, 2), order=\"F\")\n\n\ndef numpy_pointer(array):\n return array.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n\ndef get_intptr_t(verify=True):\n if verify:\n ffi = cffi.FFI()\n if ffi.sizeof(\"intptr_t\") != ffi.sizeof(\"ssize_t\"):\n raise ValueError(\"Unexpected size of ``intptr_t``.\")\n\n # NOTE: On many platforms, ``ctypes.c_ssize_t is ctypes.c_long``.\n return ctypes.c_ssize_t\n\n\ndef prepare_udf():\n made_it = UserDefined()\n raw_pointer = ctypes.cast(ctypes.pointer(made_it), ctypes.c_void_p)\n intptr_t = get_intptr_t()\n ptr_as_int = intptr_t(raw_pointer.value)\n return made_it, ptr_as_int\n\n\ndef view_knob(lib_example):\n # This is a stupid hack. (We don't bind(c, name='view_knob')\n # because the ``f2py`` parser fails on that input.)\n return lib_example.__example_MOD_view_knob()\n\n\ndef main():\n lib_example = ctypes.cdll.LoadLibrary(SO_FILE)\n\n print(SEPARATOR)\n # foo()\n bar = ctypes.c_double(1.0)\n baz = ctypes.c_double(16.0)\n quux = ctypes.c_double()\n lib_example.foo(bar, baz, ctypes.byref(quux))\n print(\"quux = foo({}, {}) = {}\".format(bar, baz, quux))\n\n print(SEPARATOR)\n # make_udf()\n buzz = ctypes.c_double(1.25)\n broken = ctypes.c_double(5.0)\n how_many = ctypes.c_int(1337)\n quuz = UserDefined()\n lib_example.make_udf(\n ctypes.byref(buzz),\n ctypes.byref(broken),\n ctypes.byref(how_many),\n ctypes.byref(quuz),\n )\n msg = MAKE_UDF_TEMPLATE.format(buzz, broken, how_many, quuz)\n print(msg, end=\"\")\n print(\"needsfree(quuz) = {}\".format(bool(quuz._b_needsfree_)))\n quuz_address = ctypes.addressof(quuz)\n print(\"address(quuz) = {0} # 0x{0:x}\".format(quuz_address))\n alt_quuz = UserDefined.from_address(quuz_address)\n print(\"*address(quuz) =\\n {}\".format(alt_quuz))\n\n print(SEPARATOR)\n # foo_array()\n val = np.asfortranarray([[3.0, 4.5], [1.0, 1.25], [9.0, 0.0], [-1.0, 4.0]])\n shape = val.shape\n two_val = np.empty(shape, order=\"F\")\n size, _ = shape\n\n size = ctypes.c_int(size)\n lib_example.foo_array(\n ctypes.byref(size), numpy_pointer(val), numpy_pointer(two_val)\n )\n msg = FOO_ARRAY_TEMPLATE.format(val, size, two_val)\n print(msg, end=\"\")\n\n print(SEPARATOR)\n # udf_ptr()\n made_it, ptr_as_int = prepare_udf()\n lib_example.udf_ptr(ctypes.byref(ptr_as_int))\n msg = UDF_PTR_TEMPLATE.format(ptr_as_int, ptr_as_int.value, made_it)\n print(msg, end=\"\")\n print(\"needsfree(made_it) = {}\".format(bool(made_it._b_needsfree_)))\n alt_made_it = UserDefined.from_address(ptr_as_int.value)\n print(\"*ptr_as_int =\\n {}\".format(alt_made_it))\n\n print(SEPARATOR)\n # make_container()\n contained = np.asfortranarray(\n [[0.0, 4.0], [1.0, 9.0], [1.0, 2.0], [3.0, 1.0]]\n )\n container = DataContainer()\n lib_example.make_container(\n numpy_pointer(contained), ctypes.byref(container)\n )\n print(\"contained =\\n{}\".format(contained))\n print(\"container = make_container(contained)\")\n print(\"container.data =\\n{}\".format(container.data))\n print(\n \"address(contained) = {0} # 0x{0:x}\".format(\n contained.ctypes.data\n )\n )\n addr1 = ctypes.addressof(container)\n print(\"address(container) = {0} # 0x{0:x}\".format(addr1))\n addr2 = ctypes.addressof(container.data_)\n print(\"address(container.data) = {0} # 0x{0:x}\".format(addr2))\n\n print(SEPARATOR)\n # just_print()\n print(\"just_print()\")\n lib_example.just_print()\n\n print(SEPARATOR)\n # \"Turn the knob\" module constant\n knob = view_knob(lib_example)\n print(\"view_knob() = {}\".format(knob))\n new_value = ctypes.c_int(42)\n print(\"turn_knob({})\".format(new_value))\n lib_example.turn_knob(ctypes.byref(new_value))\n knob = view_knob(lib_example)\n print(\"view_knob() = {}\".format(knob))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "1252987", "language": "Python", "matching_score": 5.946895122528076, "max_stars_count": 14, "path": "python/check_ctypes.py" }, { "content": "from __future__ import print_function\n\nimport os\n\nimport cffi # 1.11.5\nimport numpy as np # 1.15.0\n\nfrom check_ctypes import FOO_ARRAY_TEMPLATE\nfrom check_ctypes import SEPARATOR\nfrom check_ctypes import UDF_PTR_TEMPLATE\nfrom check_ctypes import view_knob\n\n\nHERE = os.path.dirname(__file__)\nSO_FILE = os.path.abspath(os.path.join(HERE, \"..\", \"fortran\", \"example.so\"))\n\n\ndef numpy_pointer(array, ffi):\n if array.dtype != np.float64:\n raise TypeError(\"Unexpected data type\", array.dtype)\n return ffi.cast(\"double *\", array.ctypes.data)\n\n\ndef udf_str(udf):\n return \"UserDefined({0.buzz}, {0.broken}, {0.how_many})\".format(udf)\n\n\ndef main():\n ffi = cffi.FFI()\n ffi.cdef(\"void foo(double bar, double baz, double *quux);\")\n ffi.cdef(\n \"typedef struct UserDefined {\\n\"\n \" double buzz;\\n\"\n \" double broken;\\n\"\n \" int how_many;\\n\"\n \"} UserDefined;\"\n )\n ffi.cdef(\n \"void make_udf(double *buzz, double *broken,\\n\"\n \" int *how_many, UserDefined *quux);\"\n )\n ffi.cdef(\"void foo_array(int *size, double *val, double *two_val);\")\n ffi.cdef(\"void udf_ptr(intptr_t *ptr_as_int);\")\n ffi.cdef(\"void just_print();\")\n ffi.cdef(\"int __example_MOD_view_knob(void);\")\n ffi.cdef(\"void turn_knob(int *new_value);\")\n lib_example = ffi.dlopen(SO_FILE)\n\n print(SEPARATOR)\n # foo()\n bar = 1.0\n baz = 16.0\n quux_ptr = ffi.new(\"double *\")\n lib_example.foo(bar, baz, quux_ptr)\n quux = quux_ptr[0]\n print(\"quux = foo({}, {}) = {}\".format(bar, baz, quux))\n\n print(SEPARATOR)\n # make_udf()\n buzz = 1.25\n buzz_ptr = ffi.new(\"double *\")\n buzz_ptr[0] = buzz\n broken = 5.0\n broken_ptr = ffi.new(\"double *\")\n broken_ptr[0] = broken\n how_many = 1337\n how_many_ptr = ffi.new(\"int *\")\n how_many_ptr[0] = how_many\n quuz_ptr = ffi.new(\"UserDefined *\")\n lib_example.make_udf(buzz_ptr, broken_ptr, how_many_ptr, quuz_ptr)\n quuz = quuz_ptr[0]\n msg = \"quuz = make_udf({}, {}, {})\\n = {}\".format(\n buzz, broken, how_many, udf_str(quuz)\n )\n print(msg)\n\n print(SEPARATOR)\n # foo_array()\n val = np.asfortranarray([[3.0, 4.5], [1.0, 1.25], [9.0, 0.0], [-1.0, 4.0]])\n shape = val.shape\n two_val = np.empty(shape, order=\"F\")\n size_ptr = ffi.new(\"int *\")\n size_ptr[0], _ = shape\n\n lib_example.foo_array(\n size_ptr, numpy_pointer(val, ffi), numpy_pointer(two_val, ffi)\n )\n msg = FOO_ARRAY_TEMPLATE.format(val, size_ptr[0], two_val)\n print(msg, end=\"\")\n\n print(SEPARATOR)\n # udf_ptr()\n made_it_ptr = ffi.new(\"UserDefined *\")\n ptr_as_int_ptr = ffi.new(\"intptr_t *\")\n ptr_as_int_ptr[0] = ffi.cast(\"intptr_t\", made_it_ptr)\n lib_example.udf_ptr(ptr_as_int_ptr)\n made_it = made_it_ptr[0]\n ptr_as_int = ptr_as_int_ptr[0]\n msg = UDF_PTR_TEMPLATE.format(ptr_as_int, ptr_as_int, udf_str(made_it))\n print(msg, end=\"\")\n\n print(SEPARATOR)\n # just_print()\n print(\"just_print()\")\n lib_example.just_print()\n\n print(SEPARATOR)\n # \"Turn the knob\" module constant\n knob = view_knob(lib_example)\n print(\"view_knob() = {}\".format(knob))\n new_value_ptr = ffi.new(\"int *\")\n new_value = 42\n new_value_ptr[0] = new_value\n print(\"turn_knob({})\".format(new_value))\n lib_example.turn_knob(new_value_ptr)\n knob = view_knob(lib_example)\n print(\"view_knob() = {}\".format(knob))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "7807513", "language": "Python", "matching_score": 5.388415336608887, "max_stars_count": 14, "path": "python/check_cffi.py" }, { "content": "from __future__ import print_function\n\nimport numpy as np\n\nfrom check_ctypes import MAKE_UDF_TEMPLATE\nfrom check_ctypes import prepare_udf\nfrom check_ctypes import SEPARATOR\nfrom check_ctypes import UDF_PTR_TEMPLATE\nfrom check_ctypes import UserDefined\nimport example\n\n\nMSG_FOO_ARRAY = \"\"\"\\\nval =\n{}\ntwo_val = foo_array(val)\ntwo_val =\n{}\"\"\"\n\n\ndef np_to_udf(arr):\n assert arr.dtype == np.dtype(\"S1\")\n address = arr.ctypes.data\n return UserDefined.from_address(address)\n\n\ndef main():\n print(SEPARATOR)\n print(\"example: {}\".format(example))\n example_ns = example.example\n exported_names = [\n name\n for name in dir(example_ns)\n if not (name.startswith(\"__\") and name.endswith(\"__\"))\n ]\n print(\"dir(example.example): {}\".format(\", \".join(exported_names)))\n\n print(SEPARATOR)\n # foo()\n bar = 1.0\n baz = 16.0\n msg_foo = \"foo ({}, {}) = {}\".format(\n bar, baz, example_ns.foo(bar, baz)\n )\n print(msg_foo)\n msg_foo_by_ref = \"foo_by_ref({}, {}) = {}\".format(\n bar, baz, example_ns.foo_by_ref(bar, baz)\n )\n print(msg_foo_by_ref)\n\n print(SEPARATOR)\n # make_udf()\n buzz = 1.25\n broken = 5.0\n how_many = 1337\n quuz_as_bytes = example_ns.make_udf(buzz, broken, how_many)\n quuz = np_to_udf(quuz_as_bytes)\n msg = MAKE_UDF_TEMPLATE.format(buzz, broken, how_many, quuz)\n print(msg, end=\"\")\n\n print(SEPARATOR)\n # foo_array()\n val = np.asfortranarray([[3.0, 4.5], [1.0, 1.25], [9.0, 0.0], [-1.0, 4.0]])\n two_val = example_ns.foo_array(val)\n print(MSG_FOO_ARRAY.format(val, two_val))\n\n print(SEPARATOR)\n # udf_ptr()\n made_it, ptr_as_int = prepare_udf()\n ptr_as_int = ptr_as_int.value\n example_ns.udf_ptr(ptr_as_int)\n msg = UDF_PTR_TEMPLATE.format(ptr_as_int, ptr_as_int, made_it)\n print(msg, end=\"\")\n\n print(SEPARATOR)\n # just_print()\n print(\"just_print()\")\n example_ns.just_print()\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "3118783", "language": "Python", "matching_score": 4.383584976196289, "max_stars_count": 14, "path": "f2py/check_f2py.py" }, { "content": "from __future__ import print_function\n\nimport numpy as np\n\nfrom check_ctypes import MAKE_UDF_TEMPLATE\nfrom check_ctypes import SEPARATOR\nimport example\n\n\ndef main():\n print(SEPARATOR)\n # foo()\n bar = 1.0\n baz = 16.0\n quux = example.foo(bar, baz)\n print(\"quux = foo({}, {}) = {}\".format(bar, baz, quux))\n\n print(SEPARATOR)\n # make_udf()\n buzz = 1.25\n broken = 5.0\n how_many = 1337\n quuz = example.make_udf(buzz, broken, how_many)\n msg = MAKE_UDF_TEMPLATE.format(buzz, broken, how_many, quuz)\n print(msg, end=\"\")\n\n print(SEPARATOR)\n # foo_array()\n val = np.asfortranarray([[3.0, 4.5], [1.0, 1.25], [9.0, 0.0], [-1.0, 4.0]])\n two_val = example.foo_array(val)\n print(\"val =\\n{}\".format(val))\n print(\"two_val = foo_array(val)\")\n print(\"two_val =\\n{}\".format(two_val))\n\n print(SEPARATOR)\n # udf_ptr()\n made_it = example.udf_ptr()\n print(\"made_it = udf_ptr()\\n = {}\".format(made_it))\n\n print(SEPARATOR)\n # just_print()\n print(\"just_print()\")\n example.just_print()\n\n print(SEPARATOR)\n # get_include()\n include_dir = example.get_include()\n msg = \"example.get_include() =\\n{}\".format(include_dir)\n print(msg)\n\n print(SEPARATOR)\n # \"Turn the knob\" module constant\n knob = example.view_knob()\n print(\"view_knob() = {}\".format(knob))\n new_value = 42\n print(\"turn_knob({})\".format(new_value))\n example.turn_knob(new_value)\n knob = example.view_knob()\n print(\"view_knob() = {}\".format(knob))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "7509814", "language": "Python", "matching_score": 0.18895208835601807, "max_stars_count": 14, "path": "cython/check_cython.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\n\nclass Test__appveyor_provider(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test():\n from ci_diff_helper.appveyor import _appveyor_provider\n return _appveyor_provider()\n\n def _helper(self, repo_provider):\n import mock\n from ci_diff_helper import environment_vars as env\n\n mock_env = {env.APPVEYOR_REPO: repo_provider}\n with mock.patch('os.environ', new=mock_env):\n return self._call_function_under_test()\n\n def test_success(self):\n from ci_diff_helper import appveyor\n\n result = self._helper('bitbucket')\n self.assertIs(result, appveyor.AppVeyorRepoProvider.bitbucket)\n\n def test_success_different_case(self):\n from ci_diff_helper import appveyor\n\n result = self._helper('gitHub')\n self.assertIs(result, appveyor.AppVeyorRepoProvider.github)\n\n def test_failure(self):\n import mock\n\n with mock.patch('os.environ', new={}):\n with self.assertRaises(ValueError):\n self._call_function_under_test()\n\n\nclass TestAppVeyorRepoProvider(unittest.TestCase):\n\n @staticmethod\n def _get_target_class():\n from ci_diff_helper import appveyor\n return appveyor.AppVeyorRepoProvider\n\n def _make_one(self, enum_val):\n klass = self._get_target_class()\n return klass(enum_val.lower())\n\n def test_members(self):\n klass = self._get_target_class()\n self.assertEqual(\n set([enum_val.name for enum_val in klass]),\n set(['bitbucket', 'github', 'gitlab', 'kiln', 'vso']))\n\n def test_bitbucket(self):\n klass = self._get_target_class()\n provider_obj = self._make_one('bitbucket')\n self.assertIs(provider_obj, klass.bitbucket)\n\n def test_github(self):\n klass = self._get_target_class()\n provider_obj = self._make_one('gitHub')\n self.assertIs(provider_obj, klass.github)\n\n def test_gitlab(self):\n klass = self._get_target_class()\n provider_obj = self._make_one('gitlab')\n self.assertIs(provider_obj, klass.gitlab)\n\n def test_kiln(self):\n klass = self._get_target_class()\n provider_obj = self._make_one('kiln')\n self.assertIs(provider_obj, klass.kiln)\n\n def test_vso(self):\n klass = self._get_target_class()\n provider_obj = self._make_one('vso')\n self.assertIs(provider_obj, klass.vso)\n\n def test_invalid(self):\n with self.assertRaises(ValueError):\n self._make_one('ketchup')\n\n\nclass TestAppVeyor(unittest.TestCase):\n\n @staticmethod\n def _get_target_class():\n from ci_diff_helper import appveyor\n return appveyor.AppVeyor\n\n def _make_one(self):\n klass = self._get_target_class()\n return klass()\n\n def test_constructor(self):\n from ci_diff_helper import _utils\n\n klass = self._get_target_class()\n config = self._make_one()\n self.assertIsInstance(config, klass)\n self.assertIs(config._provider, _utils.UNSET)\n\n def _provider_helper(self, provider_val):\n import mock\n from ci_diff_helper import _utils\n\n config = self._make_one()\n # Make sure there is no _provider value set.\n self.assertIs(config._provider, _utils.UNSET)\n\n # Patch the helper so we can control the value.\n provider_patch = mock.patch(\n 'ci_diff_helper.appveyor._appveyor_provider',\n return_value=provider_val)\n with provider_patch as mocked:\n result = config.provider\n self.assertIs(result, provider_val)\n mocked.assert_called_once_with()\n\n return config\n\n def test_provider_property(self):\n from ci_diff_helper.appveyor import AppVeyorRepoProvider\n\n provider_val = AppVeyorRepoProvider.github\n self._provider_helper(provider_val)\n\n def test_provider_property_cache(self):\n from ci_diff_helper.appveyor import AppVeyorRepoProvider\n\n provider_val = AppVeyorRepoProvider.gitlab\n config = self._provider_helper(provider_val)\n # Test that the value is cached.\n self.assertIs(config._provider, provider_val)\n # Test that cached value is re-used.\n self.assertIs(config.provider, provider_val)\n\n def test_tag_property(self):\n # NOTE: This method is only needed for test coverage. The defined\n # do-nothing tag property is there to modify the docstring\n # of the original.\n config = self._make_one()\n tag = '0.x.y'\n config._tag = tag\n self.assertEqual(config.tag, tag)\n\n def test___repr__(self):\n import mock\n from ci_diff_helper import environment_vars as env\n\n config = self._make_one()\n\n mock_env = {env.IN_APPVEYOR: 'false'}\n with mock.patch('os.environ', new=mock_env):\n self.assertEqual(repr(config), '<AppVeyor (active=False)>')\n", "id": "3834881", "language": "Python", "matching_score": 4.754006862640381, "max_stars_count": 5, "path": "tests/test_appveyor.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\n\nclass Test_get_config(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test():\n from ci_diff_helper import get_config\n return get_config()\n\n def test_none(self):\n import mock\n\n with mock.patch('os.environ', new={}):\n with self.assertRaises(OSError):\n self._call_function_under_test()\n\n def test_multiple(self):\n import mock\n from ci_diff_helper import environment_vars as env\n\n mock_env = {\n env.IN_CIRCLE_CI: 'true',\n env.IN_APPVEYOR: 'True',\n }\n with mock.patch('os.environ', new=mock_env):\n with self.assertRaises(OSError):\n self._call_function_under_test()\n\n def test_match(self):\n import mock\n from ci_diff_helper import environment_vars as env\n from ci_diff_helper import travis\n\n mock_env = {env.IN_TRAVIS: 'true'}\n with mock.patch('os.environ', new=mock_env):\n config = self._call_function_under_test()\n\n self.assertIsInstance(config, travis.Travis)\n", "id": "2561726", "language": "Python", "matching_score": 2.3614003658294678, "max_stars_count": 5, "path": "tests/test__init__.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\n\nclass Test__circle_ci_pr(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test():\n from ci_diff_helper import circle_ci\n return circle_ci._circle_ci_pr()\n\n def test_success(self):\n import mock\n from ci_diff_helper import environment_vars as env\n\n valid_int = '331'\n actual_val = 331\n self.assertEqual(int(valid_int), actual_val)\n mock_env = {env.CIRCLE_CI_PR_NUM: valid_int}\n with mock.patch('os.environ', new=mock_env):\n self.assertEqual(self._call_function_under_test(), actual_val)\n\n def test_failure_unset(self):\n import mock\n\n with mock.patch('os.environ', new={}):\n self.assertIsNone(self._call_function_under_test())\n\n def test_failure_bad_value(self):\n import mock\n from ci_diff_helper import environment_vars as env\n\n not_int = 'not-int'\n self.assertRaises(ValueError, int, not_int)\n mock_env = {env.CIRCLE_CI_PR_NUM: not_int}\n with mock.patch('os.environ', new=mock_env):\n self.assertIsNone(self._call_function_under_test())\n\n\nclass Test__repo_url(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test():\n from ci_diff_helper import circle_ci\n return circle_ci._repo_url()\n\n def test_success(self):\n import mock\n from ci_diff_helper import environment_vars as env\n\n repo_url = 'https://github.com/foo/bar'\n mock_env = {env.CIRCLE_CI_REPO_URL: repo_url}\n with mock.patch('os.environ', new=mock_env):\n result = self._call_function_under_test()\n self.assertEqual(result, repo_url)\n\n def test_failure(self):\n import mock\n\n with mock.patch('os.environ', new={}):\n with self.assertRaises(OSError):\n self._call_function_under_test()\n\n\nclass Test__provider_slug(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(repo_url):\n from ci_diff_helper import circle_ci\n return circle_ci._provider_slug(repo_url)\n\n def test_github(self):\n from ci_diff_helper import circle_ci\n\n repo_url = 'https://github.com/hi/goodbye'\n provider, slug = self._call_function_under_test(repo_url)\n self.assertIs(provider, circle_ci.CircleCIRepoProvider.github)\n self.assertEqual(slug, 'hi/goodbye')\n\n def test_github_bad_prefix(self):\n with self.assertRaises(ValueError):\n self._call_function_under_test('http://github.com/org/repo')\n\n def test_bitbucket(self):\n from ci_diff_helper import circle_ci\n\n repo_url = 'https://bitbucket.org/fly/on-the-wall'\n provider, slug = self._call_function_under_test(repo_url)\n self.assertIs(provider, circle_ci.CircleCIRepoProvider.bitbucket)\n self.assertEqual(slug, 'fly/on-the-wall')\n\n def test_bitbucket_bad_prefix(self):\n with self.assertRaises(ValueError):\n self._call_function_under_test('http://bitbucket.org/user/proj')\n\n def test_bad_url(self):\n with self.assertRaises(ValueError):\n self._call_function_under_test('nope')\n\n\nclass TestCircleCIRepoProvider(unittest.TestCase):\n\n @staticmethod\n def _get_target_class():\n from ci_diff_helper import circle_ci\n return circle_ci.CircleCIRepoProvider\n\n def _make_one(self, enum_val):\n klass = self._get_target_class()\n return klass(enum_val)\n\n def test_members(self):\n klass = self._get_target_class()\n self.assertEqual(\n set([enum_val.name for enum_val in klass]),\n set(['bitbucket', 'github']))\n\n def test_bitbucket(self):\n klass = self._get_target_class()\n provider_obj = self._make_one('bitbucket')\n self.assertIs(provider_obj, klass.bitbucket)\n\n def test_github(self):\n klass = self._get_target_class()\n provider_obj = self._make_one('github')\n self.assertIs(provider_obj, klass.github)\n\n def test_invalid(self):\n with self.assertRaises(ValueError):\n self._make_one('mustard')\n\n\nclass TestCircleCI(unittest.TestCase):\n\n @staticmethod\n def _get_target_class():\n from ci_diff_helper import circle_ci\n return circle_ci.CircleCI\n\n def _make_one(self):\n klass = self._get_target_class()\n return klass()\n\n def test_constructor(self):\n from ci_diff_helper import _utils\n\n klass = self._get_target_class()\n config = self._make_one()\n self.assertIsInstance(config, klass)\n self.assertIs(config._active, _utils.UNSET)\n self.assertIs(config._base, _utils.UNSET)\n self.assertIs(config._branch, _utils.UNSET)\n self.assertIs(config._is_merge, _utils.UNSET)\n self.assertIs(config._pr, _utils.UNSET)\n self.assertIs(config._pr_info_cached, _utils.UNSET)\n self.assertIs(config._provider, _utils.UNSET)\n self.assertIs(config._repo_url, _utils.UNSET)\n self.assertIs(config._slug, _utils.UNSET)\n self.assertIs(config._tag, _utils.UNSET)\n\n def test___repr__(self):\n import mock\n\n config = self._make_one()\n with mock.patch('os.environ', new={}):\n self.assertEqual(repr(config), '<CircleCI (active=False)>')\n\n def _pr_helper(self, pr_val):\n import mock\n from ci_diff_helper import _utils\n\n config = self._make_one()\n # Make sure there is no _pr value set.\n self.assertIs(config._pr, _utils.UNSET)\n\n # Patch the helper so we can control the value.\n travis_pr_patch = mock.patch(\n 'ci_diff_helper.circle_ci._circle_ci_pr', return_value=pr_val)\n with travis_pr_patch as mocked:\n result = config.pr\n self.assertIs(result, pr_val)\n mocked.assert_called_once_with()\n\n return config\n\n def test_pr_property(self):\n pr_val = 1337\n self._pr_helper(pr_val)\n\n def test_pr_property_cache(self):\n pr_val = 42043\n config = self._pr_helper(pr_val)\n # Test that the value is cached.\n self.assertIs(config._pr, pr_val)\n # Test that cached value is re-used.\n self.assertIs(config.pr, pr_val)\n\n def test_in_pr_property(self):\n config = self._make_one()\n # Patch with an actual PR.\n config._pr = 1337\n self.assertTrue(config.in_pr)\n\n def test_in_pr_property_fails(self):\n config = self._make_one()\n # Patch a missing PR.\n config._pr = None\n self.assertFalse(config.in_pr)\n\n def _repo_url_helper(self, repo_url_val):\n import mock\n from ci_diff_helper import _utils\n\n config = self._make_one()\n # Make sure there is no _repo_url value set.\n self.assertIs(config._repo_url, _utils.UNSET)\n\n # Patch the helper so we can control the value.\n repo_url_patch = mock.patch(\n 'ci_diff_helper.circle_ci._repo_url',\n return_value=repo_url_val)\n with repo_url_patch as mocked:\n result = config.repo_url\n self.assertIs(result, repo_url_val)\n mocked.assert_called_once_with()\n\n return config\n\n def test_repo_url_property(self):\n repo_url_val = 'reap-oh-no-you-are-elle'\n self._repo_url_helper(repo_url_val)\n\n def test_repo_url_property_cache(self):\n repo_url_val = 'read-poem-earl'\n config = self._repo_url_helper(repo_url_val)\n # Test that the value is cached.\n self.assertIs(config._repo_url, repo_url_val)\n # Test that cached value is re-used.\n self.assertIs(config.repo_url, repo_url_val)\n\n def _slug_provider_helper(self, provider_val, slug_val, slug_first=False):\n import mock\n from ci_diff_helper import _utils\n\n config = self._make_one()\n config._repo_url = mock.sentinel.repo_url\n # Make sure there is no _provider value set.\n self.assertIs(config._provider, _utils.UNSET)\n\n # Patch the helper so we can control the value.\n provider_patch = mock.patch(\n 'ci_diff_helper.circle_ci._provider_slug',\n return_value=(provider_val, slug_val))\n with provider_patch as mocked:\n if slug_first:\n self.assertIs(config.slug, slug_val)\n self.assertIs(config.provider, provider_val)\n else:\n self.assertIs(config.provider, provider_val)\n self.assertIs(config.slug, slug_val)\n mocked.assert_called_once_with(mock.sentinel.repo_url)\n\n return config\n\n def test_provider_property(self):\n provider_val = 'pro-divide-uhr'\n self._slug_provider_helper(provider_val, None)\n\n def test_provider_property_cache(self):\n provider_val = 'pro-bono-vide'\n config = self._slug_provider_helper(provider_val, None)\n # Test that the value is cached.\n self.assertIs(config._provider, provider_val)\n # Test that cached value is re-used.\n self.assertIs(config.provider, provider_val)\n\n def test_slug_property(self):\n slug_val = 'slug-slugger-sluggest'\n self._slug_provider_helper(None, slug_val, slug_first=True)\n\n def test_slug_property_cache(self):\n slug_val = 'soup'\n config = self._slug_provider_helper(\n None, slug_val, slug_first=True)\n # Test that the value is cached.\n self.assertIs(config._slug, slug_val)\n # Test that cached value is re-used.\n self.assertIs(config.slug, slug_val)\n\n def test__pr_info_property_cache(self):\n import mock\n\n config = self._make_one()\n config._pr_info_cached = mock.sentinel.info\n\n self.assertIs(config._pr_info, mock.sentinel.info)\n\n def test__pr_info_property_non_pr(self):\n from ci_diff_helper import _utils\n\n config = self._make_one()\n\n # Fake that there is no PR.\n config._pr = None\n self.assertIsNone(config.pr)\n\n # Make sure the cached value isn't set.\n self.assertIs(config._pr_info_cached, _utils.UNSET)\n\n # Now compute the property value.\n self.assertEqual(config._pr_info, {})\n\n def test__pr_info_property_github_pr(self):\n import mock\n from ci_diff_helper import circle_ci\n from ci_diff_helper import environment_vars as env\n\n config = self._make_one()\n\n slug = 'arf/garf'\n repo_url = circle_ci._GITHUB_PREFIX + slug\n pr_id = 223311\n mock_env = {\n env.CIRCLE_CI_REPO_URL: repo_url,\n env.CIRCLE_CI_PR_NUM: str(pr_id),\n }\n with mock.patch('os.environ', new=mock_env):\n with mock.patch('ci_diff_helper._github.pr_info',\n return_value=mock.sentinel.info) as get_info:\n pr_info = config._pr_info\n self.assertIs(pr_info, mock.sentinel.info)\n get_info.assert_called_once_with(slug, pr_id)\n\n self.assertEqual(get_info.call_count, 1)\n # Make sure value is cached and doesn't call the helper again.\n self.assertIs(pr_info, mock.sentinel.info)\n self.assertEqual(get_info.call_count, 1)\n\n def test__pr_info_property_pr_not_github(self):\n import mock\n from ci_diff_helper import circle_ci\n from ci_diff_helper import environment_vars as env\n\n config = self._make_one()\n\n slug = 'bucket/chuck-it'\n repo_url = circle_ci._BITBUCKET_PREFIX + slug\n mock_env = {\n env.CIRCLE_CI_REPO_URL: repo_url,\n env.CIRCLE_CI_PR_NUM: '817',\n }\n with mock.patch('os.environ', new=mock_env):\n with mock.patch('ci_diff_helper._github.pr_info') as get_info:\n with self.assertRaises(NotImplementedError):\n getattr(config, '_pr_info')\n get_info.assert_not_called()\n\n def test_base_property_cache(self):\n import mock\n\n config = self._make_one()\n config._base = mock.sentinel.base\n\n self.assertIs(config.base, mock.sentinel.base)\n\n def test_base_property_non_pr(self):\n config = self._make_one()\n # Fake that we are outside a PR.\n config._pr = None\n\n with self.assertRaises(NotImplementedError):\n getattr(config, 'base')\n\n def test_base_property_success(self):\n config = self._make_one()\n # Fake that we are inside a PR.\n config._pr = 123\n base_sha = '23ff39e7f437d888cb1aa07b4646fc6376f4af35'\n payload = {'base': {'sha': base_sha}}\n config._pr_info_cached = payload\n\n self.assertEqual(config.base, base_sha)\n\n def test_base_property_pr_bad_payload(self):\n config = self._make_one()\n # Fake that we are inside a PR.\n config._pr = 678\n config._pr_info_cached = {}\n # Also fake the info that shows up in the exception.\n config._slug = 'foo/food'\n\n with self.assertRaises(KeyError):\n getattr(config, 'base')\n", "id": "12766839", "language": "Python", "matching_score": 5.919840335845947, "max_stars_count": 5, "path": "tests/test_circle_ci.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of utilities for dealing with Circle CI.\n\nThis module provides a custom configuration type\n:class:`CircleCI` for the `CircleCI`_ CI system.\n\n.. _CircleCI: https://circleci.com/\n\nThis module uses a selection of environment variables to detect\nthe state of Circle CI configuration. See\n:mod:`~ci_diff_helper.environment_vars` for more details.\n\n:class:`CircleCI` Configuration Type\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nWhen running in CircleCI, you can automatically detect your\ncurrent environment and get the configuration object:\n\n.. testsetup:: auto-detect\n\n import os\n os.environ = {\n 'CIRCLECI': 'true',\n }\n\n.. doctest:: auto-detect\n\n >>> import ci_diff_helper\n >>> config = ci_diff_helper.get_config()\n >>> config\n <CircleCI (active=True)>\n\nTo use the :class:`CircleCI` configuration type directly:\n\n.. testsetup:: circle-ci-push\n\n import os\n os.environ = {\n 'CIRCLECI': 'true',\n 'CIRCLE_BRANCH': 'master',\n 'CIRCLE_TAG': '0.4.2',\n 'CIRCLE_REPOSITORY_URL': (\n 'https://github.com/organization/repository'),\n }\n import ci_diff_helper\n\n.. doctest:: circle-ci-push\n\n >>> config = ci_diff_helper.CircleCI()\n >>> config\n <CircleCI (active=True)>\n >>> config.branch\n 'master'\n >>> config.tag\n '0.4.2'\n >>> config.repo_url\n 'https://github.com/organization/repository'\n >>> config.provider\n <CircleCIRepoProvider.github: 'github'>\n >>> config.slug\n 'organization/repository'\n\nDuring a pull request build, we can determine information about\nthe current PR being built:\n\n.. testsetup:: circle-ci-pr\n\n import os\n os.environ = {\n 'CIRCLECI': 'true',\n 'CIRCLE_PR_NUMBER': '23',\n 'CIRCLE_BRANCH': 'pull/23',\n 'CIRCLE_REPOSITORY_URL': (\n 'https://github.com/organization/repository'),\n }\n import ci_diff_helper\n from ci_diff_helper import _github\n\n def mock_pr_info(slug, pr_id):\n assert slug == 'organization/repository'\n assert pr_id == 23\n payload = {\n 'base': {\n 'sha': '7450ebe1a2133442098faa07f3c2c08b612d75f5',\n },\n }\n return payload\n\n _github.pr_info = mock_pr_info\n\n.. doctest:: circle-ci-pr\n\n >>> config = ci_diff_helper.CircleCI()\n >>> config\n <CircleCI (active=True)>\n >>> config.in_pr\n True\n >>> config.pr\n 23\n >>> config.branch\n 'pull/23'\n >>> config.base\n '7450ebe1a2133442098faa07f3c2c08b612d75f5'\n\"\"\"\n\nimport os\n\nimport enum\n\nfrom ci_diff_helper import _config_base\nfrom ci_diff_helper import _github\nfrom ci_diff_helper import _utils\nfrom ci_diff_helper import environment_vars as env\n\n\n_REPO_URL_TEMPLATE = (\n 'CircleCI build does not have a repo URL set (via {})')\n_GITHUB_HOST = 'github.com'\n_GITHUB_PREFIX = 'https://{}/'.format(_GITHUB_HOST)\n_BITBUCKET_HOST = 'bitbucket.org'\n_BITBUCKET_PREFIX = 'https://{}/'.format(_BITBUCKET_HOST)\n\n\ndef _circle_ci_pr():\n \"\"\"Get the current CircleCI pull request (if any).\n\n Returns:\n Optional[int]: The current pull request ID.\n \"\"\"\n try:\n return int(os.getenv(env.CIRCLE_CI_PR_NUM, ''))\n except ValueError:\n return None\n\n\ndef _repo_url():\n \"\"\"Get the repository URL for the current build.\n\n Returns:\n str: The repository URL for the current build.\n\n Raises:\n OSError: If the ``CIRCLE_REPOSITORY_URL`` environment variable\n isn't set during a CircleCI build.\n \"\"\"\n try:\n return os.environ[env.CIRCLE_CI_REPO_URL]\n except KeyError as exc:\n msg = _REPO_URL_TEMPLATE.format(env.CIRCLE_CI_REPO_URL)\n raise OSError(exc, msg)\n\n\ndef _provider_slug(repo_url):\n \"\"\"Get the code hosting provider for the current CircleCI build.\n\n Args:\n repo_url (str): The URL of a code hosting repository.\n\n Returns:\n Tuple[CircleCIRepoProvider, str]: Pair of the code hosting provider\n for the current CircleCI build and the repository slug.\n\n Raises:\n ValueError: If ``repo_url`` contains the GitHub host but\n does not start with the corresponding expected prefix.\n ValueError: If ``repo_url`` contains the Bitbucket host but\n does not start with the corresponding expected prefix.\n ValueError: If ``repo_url`` doesn't match either the GitHub\n or Bitbucket hosts.\n \"\"\"\n if _GITHUB_HOST in repo_url:\n if repo_url.startswith(_GITHUB_PREFIX):\n _, slug = repo_url.split(_GITHUB_PREFIX, 1)\n return CircleCIRepoProvider.github, slug\n else:\n raise ValueError('Repository URL contained host',\n _GITHUB_HOST,\n 'but did not begin as expected',\n 'expected prefix', _GITHUB_PREFIX)\n elif _BITBUCKET_HOST in repo_url:\n if repo_url.startswith(_BITBUCKET_PREFIX):\n _, slug = repo_url.split(_BITBUCKET_PREFIX, 1)\n return CircleCIRepoProvider.bitbucket, slug\n else:\n raise ValueError('Repository URL contained host',\n _BITBUCKET_HOST,\n 'but did not begin as expected',\n 'expected prefix', _BITBUCKET_PREFIX)\n else:\n raise ValueError('Invalid repo URL', repo_url,\n 'Expected a URL for one of',\n [enum_val.name for enum_val in CircleCIRepoProvider])\n\n\n# pylint: disable=too-few-public-methods\nclass CircleCIRepoProvider(enum.Enum):\n \"\"\"Enum representing all possible CircleCI repo providers.\"\"\"\n github = 'github'\n bitbucket = 'bitbucket'\n# pylint: enable=too-few-public-methods\n\n\nclass CircleCI(_config_base.Config):\n \"\"\"Represent CircleCI state and cache return values.\"\"\"\n\n # Default instance attributes.\n _base = _utils.UNSET\n _pr = _utils.UNSET\n _pr_info_cached = _utils.UNSET\n _provider = _utils.UNSET\n _repo_url = _utils.UNSET\n _slug = _utils.UNSET\n # Class attributes.\n _active_env_var = env.IN_CIRCLE_CI\n _branch_env_var = env.CIRCLE_CI_BRANCH\n _tag_env_var = env.CIRCLE_CI_TAG\n\n @property\n def pr(self):\n \"\"\"int: The current CircleCI pull request (if any).\n\n If there is no active pull request, returns :data:`None`.\n \"\"\"\n if self._pr is _utils.UNSET:\n self._pr = _circle_ci_pr()\n return self._pr\n\n @property\n def in_pr(self):\n \"\"\"bool: Indicates if currently running in CircleCI pull request.\n\n This uses the ``CIRCLE_PR_NUMBER`` environment variable to check\n if currently in a pull request.\n \"\"\"\n return self.pr is not None\n\n @property\n def _pr_info(self):\n \"\"\"dict: The information for the current pull request.\n\n This information is retrieved from the GitHub API and cached.\n It is non-public, but a ``@property`` is used for the caching.\n\n .. warning::\n\n This property is only meant to be used in a pull request\n from a GitHub repository.\n \"\"\"\n if self._pr_info_cached is not _utils.UNSET:\n return self._pr_info_cached\n\n current_pr = self.pr\n if current_pr is None:\n self._pr_info_cached = {}\n elif self.provider is CircleCIRepoProvider.github:\n self._pr_info_cached = _github.pr_info(self.slug, current_pr)\n else:\n raise NotImplementedError(\n 'GitHub is only supported way to retrieve PR info')\n\n return self._pr_info_cached\n\n @property\n def repo_url(self):\n \"\"\"str: The URL of the current repository being built.\n\n For example: ``https://github.com/{organization}/{repository}`` or\n ``https://bitbucket.org/{user}/{repository}``.\n \"\"\"\n if self._repo_url is _utils.UNSET:\n self._repo_url = _repo_url()\n return self._repo_url\n\n @property\n def provider(self):\n \"\"\"str: The code hosting provider for the current CircleCI build.\"\"\"\n if self._provider is _utils.UNSET:\n # NOTE: One **could** check here that _slug isn't already set,\n # but that would be over-protective, since the only\n # way it could be set also sets _provider.\n self._provider, self._slug = _provider_slug(self.repo_url)\n return self._provider\n\n @property\n def slug(self):\n \"\"\"str: The current slug in the CircleCI build.\n\n Of the form ``{organization}/{repository}``.\n \"\"\"\n if self._slug is _utils.UNSET:\n # NOTE: One **could** check here that _provider isn't already set,\n # but that would be over-protective, since the only\n # way it could be set also sets _slug.\n self._provider, self._slug = _provider_slug(self.repo_url)\n return self._slug\n\n @property\n def base(self):\n \"\"\"str: The ``git`` object that current build is changed against.\n\n The ``git`` object can be any of a branch name, tag, a commit SHA\n or a special reference.\n\n .. warning::\n\n This property will currently only work in a build for a\n pull request from a GitHub repository.\n \"\"\"\n if self._base is not _utils.UNSET:\n return self._base\n\n if self.in_pr:\n pr_info = self._pr_info\n try:\n self._base = pr_info['base']['sha']\n except KeyError:\n raise KeyError(\n 'Missing key in the GitHub API payload',\n 'expected base->sha',\n pr_info, self.slug, self.pr)\n else:\n raise NotImplementedError(\n 'Diff base currently only supported in a PR from GitHub')\n\n return self._base\n", "id": "11363040", "language": "Python", "matching_score": 2.949265718460083, "max_stars_count": 5, "path": "ci_diff_helper/circle_ci.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\n\nclass Test__in_ci(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(env_var):\n from ci_diff_helper._config_base import _in_ci\n return _in_ci(env_var)\n\n def _helper(self, env_var, value):\n import mock\n\n mock_env = {env_var: value}\n with mock.patch('os.environ', new=mock_env):\n return self._call_function_under_test(env_var)\n\n def test_success(self):\n env_var = 'MY_CI'\n self.assertTrue(self._helper(env_var, 'true'))\n\n def test_success_uppercase(self):\n env_var = 'MOI_SEE_OY'\n self.assertTrue(self._helper(env_var, 'True'))\n\n def test_failure_missing(self):\n import mock\n\n env_var = 'MY_CI'\n with mock.patch('os.environ', new={}):\n self.assertFalse(self._call_function_under_test(env_var))\n\n def test_failure_invalid(self):\n env_var = 'HI_BYE_CI'\n self.assertFalse(self._helper(env_var, 'Treeoooh'))\n\n\nclass Test__ci_branch(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(env_var):\n from ci_diff_helper._config_base import _ci_branch\n return _ci_branch(env_var)\n\n def test_success(self):\n import mock\n\n branch = 'this-very-branch'\n env_var = 'MY_CI'\n mock_env = {env_var: branch}\n with mock.patch('os.environ', new=mock_env):\n result = self._call_function_under_test(env_var)\n self.assertEqual(result, branch)\n\n def test_failure(self):\n import mock\n\n env_var = 'MY_CI'\n with mock.patch('os.environ', new={}):\n with self.assertRaises(OSError):\n self._call_function_under_test(env_var)\n\n\nclass TestConfig(unittest.TestCase):\n\n @staticmethod\n def _get_target_class():\n from ci_diff_helper import _config_base\n return _config_base.Config\n\n def _make_one(self):\n klass = self._get_target_class()\n return klass()\n\n def test_constructor(self):\n from ci_diff_helper import _utils\n\n klass = self._get_target_class()\n config = self._make_one()\n self.assertIsInstance(config, klass)\n self.assertIs(config._active, _utils.UNSET)\n self.assertIs(config._branch, _utils.UNSET)\n self.assertIs(config._is_merge, _utils.UNSET)\n\n def _active_helper(self, env_var, active_val):\n import mock\n from ci_diff_helper import _utils\n\n config = self._make_one()\n # Fake the environment variable on the instance.\n config._active_env_var = env_var\n # Make sure there is no _active value set.\n self.assertIs(config._active, _utils.UNSET)\n\n # Patch the helper so we can control the value.\n in_ci_patch = mock.patch(\n 'ci_diff_helper._config_base._in_ci', return_value=active_val)\n with in_ci_patch as mocked:\n result = config.active\n self.assertIs(result, active_val)\n mocked.assert_called_once_with(env_var)\n\n return mocked, config\n\n def test_active_property(self):\n active_val = object()\n env_var = 'MY_CI'\n self._active_helper(env_var, active_val)\n\n def test_active_property_cache(self):\n active_val = object()\n env_var = 'MY_CI'\n mocked, config = self._active_helper(env_var, active_val)\n # Make sure the mock was only used once on first access.\n self.assertEqual(mocked.call_count, 1)\n # Test that the value is cached.\n self.assertIs(config._active, active_val)\n # Test that cached value is re-used.\n self.assertIs(config.active, active_val)\n # Make sure the mock did not get called again on future access.\n self.assertEqual(mocked.call_count, 1)\n\n def _branch_helper(self, env_var, branch_val):\n import mock\n from ci_diff_helper import _utils\n\n config = self._make_one()\n # Fake the environment variable on the instance.\n config._branch_env_var = env_var\n # Make sure there is no _branch value set.\n self.assertIs(config._branch, _utils.UNSET)\n\n # Patch the helper so we can control the value.\n ci_branch_patch = mock.patch(\n 'ci_diff_helper._config_base._ci_branch',\n return_value=branch_val)\n with ci_branch_patch as mocked:\n result = config.branch\n self.assertIs(result, branch_val)\n mocked.assert_called_once_with(env_var)\n\n return mocked, config\n\n def test_branch_property(self):\n branch_val = 'branch-on-a-tree-in-a-forest'\n env_var = 'MY_CI'\n self._branch_helper(env_var, branch_val)\n\n def test_branch_property_cache(self):\n branch_val = 'make-tomorrow-a-tree'\n env_var = 'MY_CI'\n mocked, config = self._branch_helper(env_var, branch_val)\n # Make sure the mock was only used once on first access.\n self.assertEqual(mocked.call_count, 1)\n # Test that the value is cached.\n self.assertIs(config._branch, branch_val)\n # Test that cached value is re-used.\n self.assertIs(config.branch, branch_val)\n # Make sure the mock did not get called again on future access.\n self.assertEqual(mocked.call_count, 1)\n\n def test_branch_property_error(self):\n import mock\n\n config = self._make_one()\n with mock.patch('os.environ', new={}):\n with self.assertRaises(OSError):\n getattr(config, 'branch')\n\n def _is_merge_helper(self, is_merge_val):\n import mock\n from ci_diff_helper import _utils\n\n config = self._make_one()\n # Make sure there is no _is_merge value set.\n self.assertIs(config._is_merge, _utils.UNSET)\n\n # Patch the helper so we can control the value.\n merge_commit_patch = mock.patch(\n 'ci_diff_helper.git_tools.merge_commit',\n return_value=is_merge_val)\n with merge_commit_patch as mocked:\n result = config.is_merge\n if is_merge_val:\n self.assertTrue(result)\n else:\n self.assertFalse(result)\n mocked.assert_called_once_with()\n\n return mocked, config\n\n def test_is_merge_property(self):\n self._is_merge_helper(True)\n\n def test_is_merge_property_cache(self):\n mocked, config = self._is_merge_helper(False)\n # Make sure the mock was only used once on first access.\n self.assertEqual(mocked.call_count, 1)\n # Test that the value is cached.\n self.assertFalse(config._is_merge)\n # Test that cached value is re-used.\n self.assertFalse(config.is_merge)\n # Make sure the mock did not get called again on future access.\n self.assertEqual(mocked.call_count, 1)\n\n def _tag_helper(self, env_var, tag_val='', expected=None):\n import mock\n from ci_diff_helper import _utils\n\n config = self._make_one()\n # Fake the environment variable on the instance.\n config._tag_env_var = env_var\n # Make sure there is no _tag value set.\n self.assertIs(config._tag, _utils.UNSET)\n\n # Patch the environment so we can control the value.\n environ_patch = mock.patch(\n 'os.environ', new={env_var: tag_val})\n with environ_patch:\n result = config.tag\n if expected is None:\n self.assertIsNone(result, expected)\n else:\n self.assertEqual(result, expected)\n\n return config\n\n def test_tag_property_unset(self):\n env_var = 'MY_CI'\n self._tag_helper(env_var)\n\n def test_tag_property_set(self):\n env_var = 'MY_CI'\n tag = '0.1.0'\n self._tag_helper(env_var, tag, tag)\n\n def test_tag_property_cache(self):\n env_var = 'MY_CI'\n tag = '0.0.144'\n config = self._tag_helper(env_var, tag, tag)\n # Test that the value is cached.\n self.assertEqual(config._tag, tag)\n # Test that cached value is re-used.\n self.assertEqual(config.tag, tag)\n\n def test___repr__(self):\n import mock\n\n config = self._make_one()\n with mock.patch('os.environ', new={}):\n self.assertEqual(repr(config), '<Config (active=False)>')\n", "id": "11549770", "language": "Python", "matching_score": 5.865265846252441, "max_stars_count": 5, "path": "tests/test__config_base.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\n\nclass Test__travis_pr(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test():\n from ci_diff_helper.travis import _travis_pr\n return _travis_pr()\n\n def test_success(self):\n import mock\n from ci_diff_helper import environment_vars as env\n\n valid_int = '1234'\n actual_val = 1234\n self.assertEqual(int(valid_int), actual_val)\n mock_env = {env.TRAVIS_PR: valid_int}\n with mock.patch('os.environ', new=mock_env):\n self.assertEqual(self._call_function_under_test(), actual_val)\n\n def test_failure_unset(self):\n import mock\n\n with mock.patch('os.environ', new={}):\n self.assertIsNone(self._call_function_under_test())\n\n def test_failure_bad_value(self):\n import mock\n from ci_diff_helper import environment_vars as env\n\n not_int = 'not-int'\n self.assertRaises(ValueError, int, not_int)\n mock_env = {env.TRAVIS_PR: not_int}\n with mock.patch('os.environ', new=mock_env):\n self.assertIsNone(self._call_function_under_test())\n\n\nclass Test__travis_event_type(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test():\n from ci_diff_helper.travis import _travis_event_type\n return _travis_event_type()\n\n def test_success(self):\n import mock\n from ci_diff_helper import environment_vars as env\n from ci_diff_helper import travis\n\n event_env = 'push'\n mock_env = {env.TRAVIS_EVENT_TYPE: event_env}\n with mock.patch('os.environ', new=mock_env):\n result = self._call_function_under_test()\n self.assertIs(result, travis.TravisEventType.push)\n\n def test_failure(self):\n import mock\n\n with mock.patch('os.environ', new={}):\n with self.assertRaises(ValueError):\n self._call_function_under_test()\n\n\nclass Test__get_commit_range(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test():\n from ci_diff_helper.travis import _get_commit_range\n return _get_commit_range()\n\n def test_success(self):\n import mock\n from ci_diff_helper import environment_vars as env\n from ci_diff_helper import travis\n\n start = 'abcd'\n finish = 'wxyz'\n commit_range = start + travis._RANGE_DELIMITER + finish\n mock_env = {env.TRAVIS_RANGE: commit_range}\n with mock.patch('os.environ', new=mock_env):\n result = self._call_function_under_test()\n self.assertEqual(result, (start, finish))\n\n def test_failure(self):\n import mock\n\n with mock.patch('os.environ', new={}):\n with self.assertRaises(OSError):\n self._call_function_under_test()\n\n\nclass Test__verify_merge_base(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(start, finish):\n from ci_diff_helper.travis import _verify_merge_base\n return _verify_merge_base(start, finish)\n\n def test_success(self):\n import mock\n\n start = 'abcd'\n finish = 'wxyz'\n output_mock = mock.patch('ci_diff_helper._utils.check_output',\n return_value=start)\n with output_mock as mocked:\n result = self._call_function_under_test(start, finish)\n self.assertIsNone(result)\n mocked.assert_called_once_with(\n 'git', 'merge-base', start, finish, ignore_err=True)\n\n def _failure_helper(self, start, merge_base):\n import mock\n\n finish = 'wxyz'\n output_mock = mock.patch('ci_diff_helper._utils.check_output',\n return_value=merge_base)\n with output_mock as mocked:\n with self.assertRaises(ValueError):\n self._call_function_under_test(start, finish)\n mocked.assert_called_once_with(\n 'git', 'merge-base', start, finish, ignore_err=True)\n\n def test_failure_sys_call_bad_base(self):\n start = 'abcd'\n merge_base = 'not-start'\n self.assertNotEqual(start, merge_base)\n self._failure_helper(start, merge_base)\n\n def test_failure_sys_call_error(self):\n start = 'abcd'\n # A \"merge_base=None\" indicates the system call failed.\n self._failure_helper(start, None)\n\n\nclass Test__get_merge_base_from_github(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(slug, start, finish):\n from ci_diff_helper.travis import _get_merge_base_from_github\n return _get_merge_base_from_github(slug, start, finish)\n\n def test_success(self):\n import mock\n\n sha = 'f8c2476b625f6a6f35a9e7f4d566c9b036722f11'\n payload = {\n 'merge_base_commit': {\n 'sha': sha,\n },\n }\n slug = 'a/b'\n start = '1234'\n finish = '6789'\n\n compare_patch = mock.patch(\n 'ci_diff_helper._github.commit_compare',\n return_value=payload)\n with compare_patch as mocked:\n result = self._call_function_under_test(slug, start, finish)\n self.assertEqual(result, sha)\n mocked.assert_called_once_with(slug, start, finish)\n\n def test_failure(self):\n import mock\n\n slug = 'a/b'\n start = '1234'\n finish = '6789'\n\n compare_patch = mock.patch(\n 'ci_diff_helper._github.commit_compare',\n return_value={})\n with compare_patch as mocked:\n with self.assertRaises(KeyError):\n self._call_function_under_test(slug, start, finish)\n mocked.assert_called_once_with(slug, start, finish)\n\n\nclass Test__push_build_base(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(slug):\n from ci_diff_helper.travis import _push_build_base\n return _push_build_base(slug)\n\n def test_unresolved_start_commit(self):\n import mock\n\n start = 'abcd'\n finish = 'wxyz'\n slug = 'raindrops/roses'\n patch_range = mock.patch(\n 'ci_diff_helper.travis._get_commit_range',\n return_value=(start, finish))\n # Make sure ``start_full`` is empty, indicating that the\n # local ``git`` checkout doesn't have the commit.\n patch_output = mock.patch(\n 'ci_diff_helper._utils.check_output',\n return_value=None)\n # Make sure ``start_full`` is empty, indicating that the\n # local ``git`` checkout doesn't have the commit.\n sha = '058b526c33dea1e8fc7013b498593cd106300411'\n patch_from_github = mock.patch(\n 'ci_diff_helper.travis._get_merge_base_from_github',\n return_value=sha)\n\n with patch_range as mocked_range:\n with patch_output as mocked_output:\n with patch_from_github as mocked:\n result = self._call_function_under_test(slug)\n self.assertEqual(result, sha)\n mocked.called_once_with(slug, start, finish)\n mocked_output.assert_called_once_with(\n 'git', 'rev-parse', start, ignore_err=True)\n mocked_range.assert_called_once_with()\n\n def test_success(self):\n import mock\n\n start = 'abcd'\n start_full = 'abcd-zomg-more'\n finish = 'wxyz'\n patch_range = mock.patch(\n 'ci_diff_helper.travis._get_commit_range',\n return_value=(start, finish))\n # Just hide the verification / make it do nothing.\n patch_verify = mock.patch(\n 'ci_diff_helper.travis._verify_merge_base')\n # Make sure ``start_full`` is empty, indicating that the\n # local ``git`` checkout doesn't have the commit.\n patch_output = mock.patch(\n 'ci_diff_helper._utils.check_output',\n return_value=start_full)\n\n with patch_range as mocked_range:\n with patch_verify as mocked_verify:\n with patch_output as mocked:\n result = self._call_function_under_test(None)\n self.assertEqual(result, start_full)\n mocked.assert_called_once_with(\n 'git', 'rev-parse', start, ignore_err=True)\n mocked_verify.assert_called_once_with(start_full, finish)\n mocked_range.assert_called_once_with()\n\n\nclass Test__travis_slug(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test():\n from ci_diff_helper.travis import _travis_slug\n return _travis_slug()\n\n def test_success(self):\n import mock\n from ci_diff_helper import environment_vars as env\n\n slug = 'foo/bar'\n mock_env = {env.TRAVIS_SLUG: slug}\n with mock.patch('os.environ', new=mock_env):\n result = self._call_function_under_test()\n self.assertEqual(result, slug)\n\n def test_failure(self):\n import mock\n\n with mock.patch('os.environ', new={}):\n with self.assertRaises(OSError):\n self._call_function_under_test()\n\n\nclass TestTravisEventType(unittest.TestCase):\n\n @staticmethod\n def _get_target_class():\n from ci_diff_helper import travis\n return travis.TravisEventType\n\n def _make_one(self, enum_val):\n klass = self._get_target_class()\n return klass(enum_val)\n\n def test_members(self):\n klass = self._get_target_class()\n self.assertEqual(\n set([enum_val.name for enum_val in klass]),\n set(['api', 'cron', 'pull_request', 'push']))\n\n def test_api(self):\n klass = self._get_target_class()\n enum_obj = self._make_one('api')\n self.assertIs(enum_obj, klass.api)\n\n def test_cron(self):\n klass = self._get_target_class()\n enum_obj = self._make_one('cron')\n self.assertIs(enum_obj, klass.cron)\n\n def test_pull_request(self):\n klass = self._get_target_class()\n enum_obj = self._make_one('pull_request')\n self.assertIs(enum_obj, klass.pull_request)\n\n def test_push(self):\n klass = self._get_target_class()\n enum_obj = self._make_one('push')\n self.assertIs(enum_obj, klass.push)\n\n def test_invalid(self):\n with self.assertRaises(ValueError):\n self._make_one('ice-cubes')\n\n\nclass TestTravis(unittest.TestCase):\n\n @staticmethod\n def _get_target_class():\n from ci_diff_helper import travis\n return travis.Travis\n\n def _make_one(self):\n klass = self._get_target_class()\n return klass()\n\n def test_constructor(self):\n from ci_diff_helper import _utils\n\n klass = self._get_target_class()\n config = self._make_one()\n self.assertIsInstance(config, klass)\n self.assertIs(config._base, _utils.UNSET)\n self.assertIs(config._event_type, _utils.UNSET)\n self.assertIs(config._merged_pr, _utils.UNSET)\n self.assertIs(config._pr, _utils.UNSET)\n self.assertIs(config._slug, _utils.UNSET)\n\n def _pr_helper(self, pr_val):\n import mock\n from ci_diff_helper import _utils\n\n config = self._make_one()\n # Make sure there is no _pr value set.\n self.assertIs(config._pr, _utils.UNSET)\n\n # Patch the helper so we can control the value.\n travis_pr_patch = mock.patch(\n 'ci_diff_helper.travis._travis_pr', return_value=pr_val)\n with travis_pr_patch as mocked:\n result = config.pr\n self.assertIs(result, pr_val)\n mocked.assert_called_once_with()\n\n return config\n\n def test_pr_property(self):\n pr_val = 1337\n self._pr_helper(pr_val)\n\n def test_pr_property_cache(self):\n pr_val = 42\n config = self._pr_helper(pr_val)\n # Test that the value is cached.\n self.assertIs(config._pr, pr_val)\n # Test that cached value is re-used.\n self.assertIs(config.pr, pr_val)\n\n def test_base_property_in_pr(self):\n from ci_diff_helper import _utils\n from ci_diff_helper import travis\n\n config = self._make_one()\n # Make sure the Travis config thinks we are in a PR.\n config._event_type = travis.TravisEventType.pull_request\n self.assertTrue(config.in_pr)\n # Make sure the Travis config knows the current branch.\n branch = 'scary-tree-branch'\n config._branch = branch\n self.assertEqual(config.branch, branch)\n # Check that in the PR case, the base is a branch.\n self.assertIs(config._base, _utils.UNSET)\n self.assertEqual(config.base, branch)\n # Verify that caching works.\n self.assertEqual(config._base, branch)\n self.assertEqual(config.base, branch)\n\n def test_base_property_push(self):\n import mock\n from ci_diff_helper import _utils\n from ci_diff_helper import travis\n\n config = self._make_one()\n # Make sure the Travis config thinks we are in a push build.\n config._event_type = travis.TravisEventType.push\n self.assertFalse(config.in_pr)\n self.assertIs(config.event_type, travis.TravisEventType.push)\n # Make sure the Travis slug is set.\n slug = 'rainbows/puppies'\n config._slug = slug\n self.assertEqual(config.slug, slug)\n # Check that in the \"push\" case, the base gets set\n # from _push_build_base().\n base_val = '076879d777af62e621c9f72d2b5f6863e88689e9'\n push_base_patch = mock.patch(\n 'ci_diff_helper.travis._push_build_base',\n return_value=base_val)\n self.assertIs(config._base, _utils.UNSET)\n with push_base_patch as mocked:\n self.assertEqual(config.base, base_val)\n mocked.assert_called_once_with(slug)\n # Verify that caching works.\n self.assertEqual(config._base, base_val)\n self.assertEqual(config.base, base_val)\n\n def test_base_property_unsupported(self):\n from ci_diff_helper import travis\n\n config = self._make_one()\n # Make sure the Travis config thinks we are not in a PR.\n config._event_type = travis.TravisEventType.cron\n self.assertFalse(config.in_pr)\n # Verify the failure.\n with self.assertRaises(NotImplementedError):\n getattr(config, 'base')\n\n def _event_type_helper(self, event_type_val):\n import mock\n from ci_diff_helper import _utils\n\n config = self._make_one()\n # Make sure there is no _event_type value set.\n self.assertIs(config._event_type, _utils.UNSET)\n\n # Patch the helper so we can control the value.\n event_type_patch = mock.patch(\n 'ci_diff_helper.travis._travis_event_type',\n return_value=event_type_val)\n with event_type_patch as mocked:\n result = config.event_type\n self.assertIs(result, event_type_val)\n mocked.assert_called_once_with()\n\n return config\n\n def test_event_type_property(self):\n event_type_val = 'push'\n self._event_type_helper(event_type_val)\n\n def test_event_type_property_cache(self):\n event_type_val = 'cron'\n config = self._event_type_helper(event_type_val)\n # Test that the value is cached.\n self.assertIs(config._event_type, event_type_val)\n # Test that cached value is re-used.\n self.assertIs(config.event_type, event_type_val)\n\n def _slug_helper(self, slug_val):\n import mock\n from ci_diff_helper import _utils\n\n config = self._make_one()\n # Make sure there is no _slug value set.\n self.assertIs(config._slug, _utils.UNSET)\n\n # Patch the helper so we can control the value.\n slug_patch = mock.patch(\n 'ci_diff_helper.travis._travis_slug',\n return_value=slug_val)\n with slug_patch as mocked:\n result = config.slug\n self.assertIs(result, slug_val)\n mocked.assert_called_once_with()\n\n return config\n\n def test_slug_property(self):\n slug_val = 'slug-on-a-tree-in-a-forest'\n self._slug_helper(slug_val)\n\n def test_slug_property_cache(self):\n slug_val = 'slugging-along'\n config = self._slug_helper(slug_val)\n # Test that the value is cached.\n self.assertIs(config._slug, slug_val)\n # Test that cached value is re-used.\n self.assertIs(config.slug, slug_val)\n\n def test_slug_property_error(self):\n import mock\n\n config = self._make_one()\n with mock.patch('os.environ', new={}):\n with self.assertRaises(OSError):\n getattr(config, 'slug')\n\n def _merged_pr_helper(self, event_type, is_merge=False, pr_id=None):\n import mock\n\n config = self._make_one()\n # Stub out the event type.\n config._event_type = event_type\n\n patch_merge = mock.patch(\n 'ci_diff_helper.git_tools.merge_commit',\n return_value=is_merge)\n patch_subject = mock.patch(\n 'ci_diff_helper.git_tools.commit_subject',\n return_value='#{}'.format(pr_id))\n with patch_merge as mocked_merge:\n with patch_subject as mocked_subject:\n result = config.merged_pr\n\n return mocked_merge, mocked_subject, result\n\n def test_merged_pr_in_pr(self):\n from ci_diff_helper import travis\n\n event_type = travis.TravisEventType.pull_request\n mocked_merge, mocked_subject, result = self._merged_pr_helper(\n event_type)\n mocked_merge.assert_not_called()\n mocked_subject.assert_not_called()\n self.assertIsNone(result)\n\n def test_merged_pr_non_merge(self):\n from ci_diff_helper import travis\n\n event_type = travis.TravisEventType.push\n mocked_merge, mocked_subject, result = self._merged_pr_helper(\n event_type, is_merge=False)\n mocked_merge.assert_called_once_with()\n mocked_subject.assert_not_called()\n self.assertIsNone(result)\n\n def test_merged_pr_in_merge(self):\n from ci_diff_helper import travis\n\n event_type = travis.TravisEventType.push\n pr_id = 1355\n mocked_merge, mocked_subject, result = self._merged_pr_helper(\n event_type, is_merge=True, pr_id=pr_id)\n mocked_merge.assert_called_once_with()\n mocked_subject.assert_called_once_with()\n self.assertEqual(result, pr_id)\n\n def test_merged_pr_unsupported(self):\n from ci_diff_helper import travis\n\n event_type = travis.TravisEventType.cron\n # Verify the failure.\n with self.assertRaises(NotImplementedError):\n self._merged_pr_helper(event_type)\n\n def test_merged_pr_cache(self):\n import mock\n\n config = self._make_one()\n config._merged_pr = 4567\n\n patch_merge = mock.patch('ci_diff_helper.git_tools.merge_commit')\n patch_subject = mock.patch('ci_diff_helper.git_tools.commit_subject')\n with patch_merge as mocked_merge:\n with patch_subject as mocked_subject:\n result = config.merged_pr\n\n self.assertEqual(result, config._merged_pr)\n mocked_merge.assert_not_called()\n mocked_subject.assert_not_called()\n\n def test_tag_property(self):\n # NOTE: This method is only needed for test coverage. The defined\n # do-nothing tag property is there to modify the docstring\n # of the original.\n config = self._make_one()\n tag = '0.x.y'\n config._tag = tag\n self.assertEqual(config.tag, tag)\n\n def test___repr__(self):\n import mock\n from ci_diff_helper import environment_vars as env\n\n config = self._make_one()\n\n mock_env = {env.IN_TRAVIS: 'true'}\n with mock.patch('os.environ', new=mock_env):\n self.assertEqual(repr(config), '<Travis (active=True)>')\n\n def _repo_url_helper(self, slug_val):\n from ci_diff_helper import _utils\n from ci_diff_helper import travis\n\n config = self._make_one()\n # Make sure there is no _repo_url value set.\n self.assertIs(config._repo_url, _utils.UNSET)\n\n # Patch the slug on config so we can control the value.\n config._slug = slug_val\n result = config.repo_url\n self.assertEqual(result, travis._URL_TEMPLATE.format(slug_val))\n return config\n\n def test_repo_url_property(self):\n slug_val = 'slurg-slog'\n self._repo_url_helper(slug_val)\n\n def test_repo_url_property_cache(self):\n from ci_diff_helper import travis\n\n slug_val = 'slentriloquist'\n repo_url_val = travis._URL_TEMPLATE.format(slug_val)\n config = self._repo_url_helper(slug_val)\n # Test that the value is cached.\n cached_val = config._repo_url\n self.assertEqual(cached_val, repo_url_val)\n # Test that cached value is re-used.\n self.assertIs(config.repo_url, cached_val)\n", "id": "9101430", "language": "Python", "matching_score": 7.466874599456787, "max_stars_count": 5, "path": "tests/test_travis.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of utilities for dealing with Travis CI.\n\nThis module provides a custom configuration type\n:class:`Travis` for the `Travis`_ CI system.\n\n.. _Travis: https://travis-ci.com/\n\nSince Travis only works with GitHub, the commands in this module\nare GitHub and ``git`` centric.\n\nThis module uses a selection of environment variables to detect\nthe state of Travis configuration. See\n:mod:`~ci_diff_helper.environment_vars` for more details.\n\n:class:`Travis` Configuration Type\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nWhen running in Travis, you can automatically detect your\ncurrent environment and get the configuration object:\n\n.. testsetup:: auto-detect\n\n import os\n os.environ = {\n 'TRAVIS': 'true',\n }\n\n.. doctest:: auto-detect\n\n >>> import ci_diff_helper\n >>> config = ci_diff_helper.get_config()\n >>> config\n <Travis (active=True)>\n\nTo use the :class:`Travis` configuration type directly:\n\n.. testsetup:: travis-pr\n\n import os\n os.environ = {\n 'TRAVIS': 'true',\n 'TRAVIS_EVENT_TYPE': 'pull_request',\n 'TRAVIS_BRANCH': 'master',\n 'TRAVIS_REPO_SLUG': 'organization/repository',\n 'TRAVIS_PULL_REQUEST': '1234',\n }\n import ci_diff_helper\n\n.. doctest:: travis-pr\n\n >>> config = ci_diff_helper.Travis()\n >>> config\n <Travis (active=True)>\n >>> config.active\n True\n >>> config.in_pr\n True\n >>> config.branch\n 'master'\n\nIn addition this configuration provides extra features for\ndetermining a diffbase.\n\n.. doctest:: travis-pr\n\n >>> config = ci_diff_helper.Travis()\n >>> config.event_type\n <TravisEventType.pull_request: 'pull_request'>\n >>> config.slug\n 'organization/repository'\n >>> config.repo_url\n 'https://github.com/organization/repository'\n >>> config.pr\n 1234\n >>> config.tag is None\n True\n >>> config.base\n 'master'\n\nNot only is this object valuable during a pull request build,\nit can also be used to find relevant information in a\n\"push\" build:\n\n.. testsetup:: travis-push\n\n import os\n os.environ = {\n 'TRAVIS_EVENT_TYPE': 'push',\n 'TRAVIS_REPO_SLUG': 'organization/repository',\n 'TRAVIS_TAG': '0.13.37',\n }\n import ci_diff_helper\n from ci_diff_helper import travis\n\n def mock_push_base(slug):\n assert slug == 'organization/repository'\n return '4ad7349dc7223ebc02175a16dc577a013044a538'\n\n travis._push_build_base = mock_push_base\n\n.. doctest:: travis-push\n\n >>> config = ci_diff_helper.Travis()\n >>> config.event_type\n <TravisEventType.push: 'push'>\n >>> config.pr is None\n True\n >>> config.tag\n '0.13.37'\n >>> config.base\n '4ad7349dc7223ebc02175a16dc577a013044a538'\n\nThough the :attr:`~Travis.base` property can be useful as a diffbase\nof a given commit, it may be inappropriate. In a \"push\" build,\n:attr:`~Travis.base` will be computed from the ``TRAVIS_COMMIT_RANGE``\nenvironment variable, and this value is not particularly reliable.\nInstead, :attr:`~Travis.merged_pr` provides a way to determine the\nPR that was merged:\n\n.. testsetup:: travis-push-merged-pr\n\n import ci_diff_helper\n config = ci_diff_helper.Travis()\n config._merged_pr = 1355\n config._is_merge = True\n\n.. doctest:: travis-push-merged-pr\n\n >>> config.is_merge\n True\n >>> config.merged_pr\n 1355\n\"\"\"\n\nimport os\n\nimport enum\n\nfrom ci_diff_helper import _github\nfrom ci_diff_helper import _config_base\nfrom ci_diff_helper import _utils\nfrom ci_diff_helper import environment_vars as env\nfrom ci_diff_helper import git_tools\n\n\n_RANGE_DELIMITER = '...'\n_SLUG_TEMPLATE = (\n 'Travis build does not have a repo slug set (via {})')\n_URL_TEMPLATE = 'https://github.com/{}'\n\n\ndef _travis_pr():\n \"\"\"Get the current Travis pull request (if any).\n\n Returns:\n Optional[int]: The current pull request ID.\n \"\"\"\n try:\n return int(os.getenv(env.TRAVIS_PR, ''))\n except ValueError:\n return None\n\n\ndef _travis_event_type():\n \"\"\"Get the event type of the current Travis build\n\n Returns:\n TravisEventType: The type of the current Travis build.\n\n Raises:\n ValueError: If the ``TRAVIS_EVENT_TYPE`` environment\n variable is not one of the expected values.\n \"\"\"\n event_env = os.getenv(env.TRAVIS_EVENT_TYPE, '')\n try:\n return TravisEventType(event_env)\n except ValueError:\n raise ValueError('Invalid event type', event_env,\n 'Expected one of',\n [enum_val.name for enum_val in TravisEventType])\n\n\ndef _get_commit_range():\n \"\"\"Get the Travis commit range from the environment.\n\n Uses the ``TRAVIS_COMMIT_RANGE`` environment variable and then\n makes sure it can be split into a start and finish commit.\n\n .. note::\n\n This will throw an :exc:`OSError` on the very first \"push\" build\n for a branch. This is because Travis leaves the value empty in\n builds triggered by the initial commit of a new branch.\n\n Returns:\n Tuple[str, str]: The ``start``, ``finish`` pair from the commit range.\n\n Raises:\n OSError: If the ``TRAVIS_COMMIT_RANGE`` does not contain\n '...' (which indicates a start and end commit).\n \"\"\"\n commit_range = os.getenv(env.TRAVIS_RANGE, '')\n try:\n start, finish = commit_range.split(_RANGE_DELIMITER)\n return start, finish\n except ValueError as exc:\n raise OSError(\n exc, 'Commit range in unexpected format', commit_range)\n\n\ndef _verify_merge_base(start, finish):\n \"\"\"Verifies that the merge base of a commit range **is** the start.\n\n Args:\n start (str): The start commit in a range.\n finish (str): The last commit in a range.\n\n Raises:\n ValueError: If the merge base is not the start commit.\n \"\"\"\n merge_base = _utils.check_output(\n 'git', 'merge-base', start, finish, ignore_err=True)\n if merge_base != start:\n raise ValueError(\n 'git merge base is not the start commit in range',\n merge_base, start, finish)\n\n\ndef _get_merge_base_from_github(slug, start, finish):\n \"\"\"Retrieves the merge base of two commits from the GitHub API.\n\n This is intended to be used in cases where one of the commits\n is no longer in the local checkout, but is still around on GitHub.\n\n Args:\n slug (str): The GitHub repo slug for the current build.\n Of the form ``{organization}/{repository}``.\n start (str): The start commit in a range.\n finish (str): The last commit in a range.\n\n Returns:\n str: The commit SHA of the merge base.\n\n Raises:\n KeyError: If the payload doesn't contain the nested key\n merge_base_commit->sha.\n \"\"\"\n payload = _github.commit_compare(slug, start, finish)\n try:\n return payload['merge_base_commit']['sha']\n except KeyError:\n raise KeyError(\n 'Missing key in the GitHub API payload',\n 'expected merge_base_commit->sha',\n payload, slug, start, finish)\n\n\ndef _push_build_base(slug):\n \"\"\"Get the diffbase for a Travis \"push\" build.\n\n Args:\n slug (str): The GitHub repo slug for the current build.\n Of the form ``{organization}/{repository}``.\n\n Returns:\n str: The commit SHA of the diff base.\n \"\"\"\n start, finish = _get_commit_range()\n # Resolve the start object name into a 40-char SHA1 hash.\n start_full = _utils.check_output('git', 'rev-parse', start,\n ignore_err=True)\n\n if start_full is None:\n # In this case, the start commit isn't in history so we\n # need to use the GitHub API.\n return _get_merge_base_from_github(slug, start, finish)\n else:\n # In this case, the start commit is in history so we\n # expect it to also be the merge base of the start and finish\n # commits.\n _verify_merge_base(start_full, finish)\n return start_full\n\n\ndef _travis_slug():\n \"\"\"Get the GitHub repo slug for the current build.\n\n Of the form ``{organization}/{repository}``.\n\n Returns:\n str: The slug for the current build.\n\n Raises:\n OSError: If the ``TRAVIS_REPO_SLUG`` environment variable\n isn't set during a Travis build.\n \"\"\"\n try:\n return os.environ[env.TRAVIS_SLUG]\n except KeyError as exc:\n msg = _SLUG_TEMPLATE.format(env.TRAVIS_SLUG)\n raise OSError(exc, msg)\n\n\n# pylint: disable=too-few-public-methods\nclass TravisEventType(enum.Enum):\n \"\"\"Enum representing all possible Travis event types.\"\"\"\n push = 'push'\n pull_request = 'pull_request'\n api = 'api'\n cron = 'cron'\n# pylint: enable=too-few-public-methods\n\n\nclass Travis(_config_base.Config):\n \"\"\"Represent Travis state and cache return values.\"\"\"\n\n # Default instance attributes.\n _base = _utils.UNSET\n _event_type = _utils.UNSET\n _merged_pr = _utils.UNSET\n _pr = _utils.UNSET\n _repo_url = _utils.UNSET\n _slug = _utils.UNSET\n # Class attributes.\n _active_env_var = env.IN_TRAVIS\n _branch_env_var = env.TRAVIS_BRANCH\n _tag_env_var = env.TRAVIS_TAG\n\n @property\n def base(self):\n \"\"\"str: The ``git`` object that current build is changed against.\n\n The ``git`` object can be any of a branch name, tag, a commit SHA\n or a special reference.\n\n This can be used in combination with :func:`.get_changed_files` to\n determine files that need to be linted, tested or inspected in\n some other way:\n\n .. testsetup:: travis-base-with-changed\n\n import os\n import ci_diff_helper\n from ci_diff_helper import _utils\n\n os.environ = {\n 'TRAVIS': 'true',\n 'TRAVIS_EVENT_TYPE': 'pull_request',\n 'TRAVIS_BRANCH': 'master',\n }\n config = ci_diff_helper.Travis()\n\n blob_name1 = 'HEAD'\n blob_name2 = 'master'\n calls = [\n ('git', 'diff', '--name-only', blob_name1, blob_name2),\n ]\n files = (\n '/path/to/your/git_checkout/project/_supporting.py')\n results = [\n files,\n ]\n\n def mock_check(*args):\n assert args == calls.pop(0)\n return results.pop(0)\n\n _utils.check_output = mock_check\n\n .. doctest:: travis-base-with-changed\n :options: +NORMALIZE_WHITESPACE\n\n >>> config\n <Travis (active=True)>\n >>> config.base\n 'master'\n >>> ci_diff_helper.get_changed_files('HEAD', config.base)\n ['/path/to/your/git_checkout/project/_supporting.py']\n\n .. note::\n\n This will throw an :exc:`OSError` on the very first \"push\" build\n for a branch. This is because Travis leaves the value empty in\n builds triggered by the initial commit of a new branch.\n\n .. warning::\n\n This property is only meant to be used in a \"pull request\" or\n \"push\" build.\n \"\"\"\n if self._base is _utils.UNSET:\n if self.in_pr:\n self._base = self.branch\n elif self.event_type is TravisEventType.push:\n self._base = _push_build_base(self.slug)\n else:\n raise NotImplementedError\n return self._base\n\n @property\n def event_type(self):\n \"\"\"bool: Indicates if currently running in Travis.\"\"\"\n if self._event_type is _utils.UNSET:\n self._event_type = _travis_event_type()\n return self._event_type\n\n @property\n def in_pr(self):\n \"\"\"bool: Indicates if currently running in Travis pull request.\n\n This uses the ``TRAVIS_EVENT_TYPE`` environment variable to check\n if currently in a pull request. Though it doesn't use the\n ``TRAVIS_PULL_REQUEST`` environment variable, checking that the\n value is set to an integer would be a perfectly valid approach.\n \"\"\"\n return self.event_type is TravisEventType.pull_request\n\n @property\n def merged_pr(self):\n \"\"\"int: The pull request corresponding to a merge commit at HEAD.\n\n If not currently in a push build, returns :data:`None`. If\n the HEAD commit is not a merge commit, returns :data:`None`.\n\n .. note::\n\n This only uses the ``git`` checkout to determine the pull\n request ID. A more comprehensive check would involve\n veriying the ID by using the GitHub API.\n\n .. warning::\n\n This property is only meant to be used in a \"pull request\" or\n \"push\" build.\n \"\"\"\n if self._merged_pr is not _utils.UNSET:\n return self._merged_pr\n\n if self.in_pr:\n self._merged_pr = None\n elif self.event_type is TravisEventType.push:\n if git_tools.merge_commit():\n merge_subject = git_tools.commit_subject()\n self._merged_pr = _utils.pr_from_commit(merge_subject)\n else:\n self._merged_pr = None\n else:\n raise NotImplementedError\n return self._merged_pr\n\n @property\n def pr(self):\n \"\"\"int: The current Travis pull request (if any).\n\n If there is no active pull request, returns :data:`None`.\n \"\"\"\n if self._pr is _utils.UNSET:\n self._pr = _travis_pr()\n return self._pr\n\n @property\n def slug(self):\n \"\"\"str: The current slug in the Travis build.\n\n Of the form ``{organization}/{repository}``.\n \"\"\"\n if self._slug is _utils.UNSET:\n self._slug = _travis_slug()\n return self._slug\n\n @property\n def repo_url(self):\n \"\"\"str: The URL of the current repository being built.\n\n Of the form ``https://github.com/{organization}/{repository}``.\n \"\"\"\n if self._repo_url is _utils.UNSET:\n self._repo_url = _URL_TEMPLATE.format(self.slug)\n return self._repo_url\n\n @property\n def tag(self):\n \"\"\"str: The ``git`` tag of the current Travis build.\n\n .. note::\n\n We only expect the ``TRAVIS_TAG`` environment variable\n to be set during a tag \"push\" build, but we don't verify\n that we are in a push build before checking for the tag.\n \"\"\"\n return super(Travis, self).tag\n", "id": "4795395", "language": "Python", "matching_score": 5.741640090942383, "max_stars_count": 5, "path": "ci_diff_helper/travis.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Diff Helper for Continuous Integration (CI) Services.\n\nFor an open source project, running unit tests, system tests, torture tests,\nfuzz tests, integration tests, code quality checks, etc. can quickly become\na large task.\n\nIn order to limit the amount of time and resources that these jobs require,\nthis tool provides a way to determine which files have changed and provides\na Python API for these changes. In addition, this library provides the\ncorresponding commit SHA (or other artifact) that is used as the diffbase.\n\nThe library supports (planned)\n\n* Continuous Integration Services\n\n * `Travis CI`_\n * `AppVeyor`_\n * `CircleCI`_\n\n* Verson Control Systems\n\n * `git`_\n\n* Project Hosting Sites\n\n * `GitHub`_\n\n.. _Travis CI: https://travis-ci.com/\n.. _AppVeyor: https://www.appveyor.com/\n.. _CircleCI: https://circleci.com/\n.. _git: https://git-scm.com/\n.. _GitHub: https://github.com/\n\n.. note::\n\n When configuring your CI environment, it may be useful to set\n the ``GITHUB_OAUTH_TOKEN`` environment variable\n (:data:`~.environment_vars.GH_TOKEN`). By authenticating in\n GitHub API requests, `rate limiting`_ can be avoided. Unauthenticated\n requests will be subject to rate limiting across the entire\n CI system.\n\n.. _rate limiting: https://developer.github.com/v3/#rate-limiting\n\nTo use this in your project, first install:\n\n.. code-block:: bash\n\n $ pip install --upgrade ci-diff-helper\n\nOnce you've done that, you can automatically detect your\ncurrent environment and get a configuration object with\ninformation about your environment:\n\n.. testsetup:: auto-detect\n\n import os\n os.environ = {\n 'CIRCLECI': 'true',\n }\n\n.. doctest:: auto-detect\n\n >>> import ci_diff_helper\n >>> config = ci_diff_helper.get_config()\n >>> config\n <CircleCI (active=True)>\n\nCommon Configuration Properties\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nLong-lived configuration objects are provided as an interface for\nCI system information. These config objects cache the returned values\nfor each property and use them to compute other useful values.\n\nEach such configuration type (e.g. :class:`~.appveyor.AppVeyor`,\n:class:`~.circle_ci.CircleCI`, :class:`~.travis.Travis`) has a\ncommon set of properties.\n\n.. testsetup:: shared\n\n import os\n import ci_diff_helper\n os.environ = {\n 'CIRCLECI': 'true',\n 'CIRCLE_BRANCH': 'pull/808',\n }\n config = ci_diff_helper.CircleCI()\n config._is_merge = False\n\n.. doctest:: shared\n\n >>> config\n <CircleCI (active=True)>\n >>> config.active\n True\n >>> config.branch\n 'pull/808'\n >>> config.tag is None\n True\n\nAll configuration types can also be used to detect if a merge\ncommit is currently being built:\n\n.. doctest:: shared\n\n >>> config.is_merge\n False\n\n``git`` tools\n~~~~~~~~~~~~~\n\nThe helpers :func:`~git_tools.git_root`,\n:func:`~git_tools.get_checked_in_files` and\n:func:`~git_tools.get_changed_files` are provided as\ntools for a ``git``-based project.\n\nThe most relevant of these for finding diffs is\n:func:`~git_tools.get_changed_files`. For example, to find\nchanged files between a current checkout and an upstream\nbranch:\n\n.. testsetup:: git-changed\n\n import ci_diff_helper\n from ci_diff_helper import _utils\n\n blob_name1 = 'HEAD'\n blob_name2 = 'upstream/master'\n calls = [\n ('git', 'diff', '--name-only', blob_name1, blob_name2),\n ]\n files = (\n '/path/to/your/git_checkout/project/_supporting.py\\\\n'\n '/path/to/your/git_checkout/README.md')\n results = [\n files,\n ]\n\n def mock_check(*args):\n assert args == calls.pop(0)\n return results.pop(0)\n\n _utils.check_output = mock_check\n\n.. doctest:: git-changed\n :options: +NORMALIZE_WHITESPACE\n\n >>> ci_diff_helper.get_changed_files('HEAD', 'upstream/master')\n ['/path/to/your/git_checkout/project/_supporting.py',\n '/path/to/your/git_checkout/README.md']\n\nIn addition, being able to get the\nroot of the current ``git`` checkout may be needed to collect\nfiles, execute scripts, etc. Getting all checked in files can\nbe useful for things like test collection, file linting, etc.\n\n.. testsetup:: git\n\n import ci_diff_helper\n from ci_diff_helper import _utils\n\n root_dir = '/path/to/your/git_checkout'\n calls = [\n ('git', 'rev-parse', '--show-toplevel'),\n ('git', 'rev-parse', '--show-toplevel'),\n ('git', 'ls-files', root_dir),\n ]\n files = (\n '/path/to/your/git_checkout/setup.py\\\\n'\n '/path/to/your/git_checkout/project/__init__.py\\\\n'\n '/path/to/your/git_checkout/project/feature.py')\n results = [\n root_dir,\n root_dir,\n files,\n ]\n\n def mock_check(*args):\n assert args == calls.pop(0)\n return results.pop(0)\n\n _utils.check_output = mock_check\n\n.. doctest:: git\n :options: +NORMALIZE_WHITESPACE\n\n >>> ci_diff_helper.git_root()\n '/path/to/your/git_checkout'\n >>> ci_diff_helper.get_checked_in_files()\n ['/path/to/your/git_checkout/setup.py',\n '/path/to/your/git_checkout/project/__init__.py',\n '/path/to/your/git_checkout/project/feature.py']\n\"\"\"\n\nfrom ci_diff_helper.appveyor import AppVeyor\nfrom ci_diff_helper.circle_ci import CircleCI\nfrom ci_diff_helper.git_tools import get_changed_files\nfrom ci_diff_helper.git_tools import get_checked_in_files\nfrom ci_diff_helper.git_tools import git_root\nfrom ci_diff_helper.travis import Travis\n\n\n__all__ = [\n 'AppVeyor',\n 'CircleCI',\n 'get_changed_files',\n 'get_checked_in_files',\n 'get_config',\n 'git_root',\n 'Travis',\n]\n\n\ndef get_config():\n \"\"\"Get configuration for the current environment.\n\n Returns:\n Union[~appveyor.AppVeyor, ~circle_ci.CircleCI, ~travis.Travis]: A\n configuration class for the current environment.\n\n Raises:\n OSError: If no (unique) environment is active.\n \"\"\"\n choices = [AppVeyor(), CircleCI(), Travis()]\n current = []\n for choice in choices:\n if choice.active:\n current.append(choice)\n\n if len(current) != 1:\n raise OSError(\n None, 'Could not find unique environment. Found:',\n current)\n return current[0]\n", "id": "8610864", "language": "Python", "matching_score": 3.9626855850219727, "max_stars_count": 5, "path": "ci_diff_helper/__init__.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Comprehensive list of environment variables used in ci-diff-helper.\n\nThese environment variables are core to this library. They are used\nto detect the current environment.\n\nFor more details, see the `Travis env docs`_, `AppVeyor env docs`_\nand `_CircleCI env docs`.\n\n.. _Travis env docs: https://docs.travis-ci.com/user/\\\n environment-variables#Default-Environment-Variables\n.. _AppVeyor env docs: https://www.appveyor.com/docs/environment-variables/\n.. _CircleCI env docs: https://circleci.com/docs/environment-variables/\n\"\"\"\n\nIN_TRAVIS = 'TRAVIS'\n\"\"\"Indicates if running in Travis.\"\"\"\n\nTRAVIS_PR = 'TRAVIS_PULL_REQUEST'\n\"\"\"Indicates which Travis pull request we are in.\n\nIs an integer when in a pull request or \"false\" when not.\n\"\"\"\n\nTRAVIS_BRANCH = 'TRAVIS_BRANCH'\n\"\"\"Indicates the active Travis branch.\n\nIn a \"push\" build, this is the branch that was pushed while\nin a \"pull request\" build it is the branch that a pull\nrequest is against.\n\"\"\"\n\nTRAVIS_EVENT_TYPE = 'TRAVIS_EVENT_TYPE'\n\"\"\"Indicates the type of build that is occurring.\"\"\"\n\nTRAVIS_RANGE = 'TRAVIS_COMMIT_RANGE'\n\"\"\"The range of commits changed in the current build.\n\nThis is not particularly useful in a PR build.\n\n.. note::\n\n This is empty for builds triggered by the initial commit of\n a new branch.\n\"\"\"\n\nTRAVIS_SLUG = 'TRAVIS_REPO_SLUG'\n\"\"\"The GitHub repository slug for the current Travis build.\n\nA slug is of the form ``{organization}/{repository}``.\n\"\"\"\n\nTRAVIS_TAG = 'TRAVIS_TAG'\n\"\"\"The tag of the current Travis build.\n\nWe only expect the ``TRAVIS_TAG`` environment variable to be set\nduring a tag \"push\" build, but it can be set as the empty string\nin non-\"push\" builds.\n\"\"\"\n\nGH_TOKEN = 'GITHUB_OAUTH_TOKEN'\n\"\"\"GitHub OAuth 2.0 token.\n\nThis environment variable must be used to authenticate to\nthe GitHub API. Making unauthenticated requests on a Continuous\nIntegration server will typically be `rate limited`_.\n\n.. _rate limited: https://developer.github.com/v3/#rate-limiting\n\"\"\"\n\nIN_APPVEYOR = 'APPVEYOR'\n\"\"\"Indicates if running in AppVeyor.\"\"\"\n\nAPPVEYOR_REPO = 'APPVEYOR_REPO_PROVIDER'\n\"\"\"The code hosting provided for the repository being tested in AppVeyor.\"\"\"\n\nAPPVEYOR_BRANCH = 'APPVEYOR_REPO_BRANCH'\n\"\"\"Indicates the active AppVeyor branch.\n\nIn a \"pull request\" build it is the **base** branch the PR is\nmerging into, otherwise it is the branch being built.\n\"\"\"\n\nAPPVEYOR_TAG = 'APPVEYOR_REPO_TAG_NAME'\n\"\"\"The tag of the current AppVeyor build.\n\nThis will only be valid when ``APPVEYOR_REPO_TAG``, i.e. when the\nbuild was started by a pushed tag.\n\"\"\"\n\nIN_CIRCLE_CI = 'CIRCLECI'\n\"\"\"Indicates if running in CircleCI.\"\"\"\n\nCIRCLE_CI_BRANCH = 'CIRCLE_BRANCH'\n\"\"\"Indicates the active ``git`` branch being tested on CircleCI.\"\"\"\n\nCIRCLE_CI_TAG = 'CIRCLE_TAG'\n\"\"\"The name of the ``git`` tag being tested\n\nOnly set if the build is running for a tag.\n\"\"\"\n\nCIRCLE_CI_PR = 'CI_PULL_REQUEST'\n\"\"\"Pull request containing the current change set.\n\nIf the current build is part of only one pull request, the URL of that\nPR will be populated here. If there was more than one pull request, this\nfield contain one of the pull request URLs (picked randomly).\n\"\"\"\n\nCIRCLE_CI_PRS = 'CI_PULL_REQUESTS'\n\"\"\"Comma-separated list of pull requests current build is a part of.\"\"\"\n\nCIRCLE_CI_REPO_URL = 'CIRCLE_REPOSITORY_URL'\n\"\"\"A link to the homepage for the current repository.\"\"\"\n\nCIRCLE_CI_PR_NUM = 'CIRCLE_PR_NUMBER'\n\"\"\"The ID of the PR that started the current build.\n\nWe only expect this environment variable to be set during a\nbuild that is a part of a pull request from a fork.\n\"\"\"\n\nCIRCLE_CI_PR_REPO = 'CIRCLE_PR_REPONAME'\n\"\"\"The name of the forked repository that started the current PR build.\n\nWe only expect this environment variable to be set during a\nbuild that is a part of a pull request from a fork.\n\"\"\"\n\nCIRCLE_CI_PR_OWNER = 'CIRCLE_PR_USERNAME'\n\"\"\"The owner of the forked repository that started the current PR build.\n\nWe only expect this environment variable to be set during a\nbuild that is a part of a pull request from a fork.\n\"\"\"\n", "id": "2559288", "language": "Python", "matching_score": 3.661564826965332, "max_stars_count": 5, "path": "ci_diff_helper/environment_vars.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of utilities for dealing with AppVeyor CI.\n\nThis module provides a custom configuration type\n:class:`AppVeyor` for the `AppVeyor`_ CI system.\n\n.. _AppVeyor: https://www.appveyor.com/\n\nThis module uses a selection of environment variables to detect\nthe state of AppVeyor configuration. See\n:mod:`~ci_diff_helper.environment_vars` for more details.\n\n:class:`AppVeyor` Configuration Type\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nWhen running in AppVeyor, you can automatically detect your\ncurrent environment and get the configuration object:\n\n.. testsetup:: auto-detect\n\n import os\n os.environ = {\n 'APPVEYOR': 'True',\n }\n\n.. doctest:: auto-detect\n\n >>> import ci_diff_helper\n >>> config = ci_diff_helper.get_config()\n >>> config\n <AppVeyor (active=True)>\n\nTo use the :class:`AppVeyor` configuration type directly:\n\n.. testsetup:: appveyor-pr\n\n import os\n os.environ = {\n 'APPVEYOR': 'True',\n 'APPVEYOR_REPO_BRANCH': 'master',\n 'APPVEYOR_REPO_PROVIDER': 'gitHub',\n }\n import ci_diff_helper\n\n.. doctest:: appveyor-pr\n\n >>> config = ci_diff_helper.AppVeyor()\n >>> config\n <AppVeyor (active=True)>\n >>> config.branch\n 'master'\n >>> config.provider\n <AppVeyorRepoProvider.github: 'github'>\n\"\"\"\n\nimport os\n\nimport enum\n\nfrom ci_diff_helper import _config_base\nfrom ci_diff_helper import _utils\nfrom ci_diff_helper import environment_vars as env\n\n\ndef _appveyor_provider():\n \"\"\"Get the code hosting provider for the current AppVeyor build.\n\n Returns:\n AppVeyorRepoProvider: The code hosting provider for the\n current AppVeyor build.\n\n Raises:\n ValueError: If the ``APPVEYOR_REPO_PROVIDER`` environment\n variable is not one of the (case-insensitive)\n expected values.\n \"\"\"\n repo_provider = os.getenv(env.APPVEYOR_REPO, '')\n try:\n return AppVeyorRepoProvider(repo_provider.lower())\n except ValueError:\n raise ValueError('Invalid repo provider', repo_provider,\n 'Expected one of (case-insensitive)',\n [enum_val.name for enum_val in AppVeyorRepoProvider])\n\n\n# pylint: disable=too-few-public-methods\nclass AppVeyorRepoProvider(enum.Enum):\n \"\"\"Enum representing all possible AppVeyor repo providers.\"\"\"\n github = 'github'\n bitbucket = 'bitbucket'\n kiln = 'kiln'\n vso = 'vso'\n gitlab = 'gitlab'\n# pylint: enable=too-few-public-methods\n\n\nclass AppVeyor(_config_base.Config):\n \"\"\"Represent AppVeyor state and cache return values.\"\"\"\n\n # Default instance attributes.\n _provider = _utils.UNSET\n # Class attributes.\n _active_env_var = env.IN_APPVEYOR\n _branch_env_var = env.APPVEYOR_BRANCH\n _tag_env_var = env.APPVEYOR_TAG\n\n @property\n def provider(self):\n \"\"\"str: The code hosting provider for the current AppVeyor build.\"\"\"\n if self._provider is _utils.UNSET:\n self._provider = _appveyor_provider()\n return self._provider\n\n @property\n def tag(self):\n \"\"\"str: The ``git`` tag of the current AppVeyor build.\n\n .. note::\n\n We only expect the ``APPVEYOR_REPO_TAG_NAME`` environment variable\n to be set when ``APPVEYOR_REPO_TAG=true`` indicates the build was\n started by a pushed tag. However, we don't verify that we are in\n a build started by a tag before checking for the tag.\n \"\"\"\n return super(AppVeyor, self).tag\n", "id": "12152026", "language": "Python", "matching_score": 3.5290915966033936, "max_stars_count": 5, "path": "ci_diff_helper/appveyor.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base for configuration classes and associated helpers.\"\"\"\n\nimport os\n\nfrom ci_diff_helper import _utils\nfrom ci_diff_helper import git_tools\n\n\n_BRANCH_ERR_TEMPLATE = (\n 'Build does not have an associated branch set (via {}).')\n\n\ndef _in_ci(env_var):\n \"\"\"Detect if we are running in the target CI system.\n\n Assumes the only valid environment variable value is ``true`` (case\n insensitive).\n\n Args:\n env_var (str): The environment variable which holds the status.\n\n Returns:\n bool: Flag indicating if we are running in the target CI system.\n \"\"\"\n return os.getenv(env_var, '').lower() == 'true'\n\n\ndef _ci_branch(env_var):\n \"\"\"Get the current branch of CI build.\n\n Args:\n env_var (str): The environment variable which holds the branch.\n\n Returns:\n str: The name of the branch the current build is for / associated\n with. (May indicate the active branch or the base branch of\n a pull request.)\n\n Raises:\n OSError: If the environment variable isn't set during the build.\n \"\"\"\n try:\n return os.environ[env_var]\n except KeyError as exc:\n msg = _BRANCH_ERR_TEMPLATE.format(env_var)\n raise OSError(exc, msg)\n\n\nclass Config(object):\n \"\"\"Base class for caching CI configuration objects.\"\"\"\n\n # Default instance attributes.\n _active = _utils.UNSET\n _branch = _utils.UNSET\n _is_merge = _utils.UNSET\n _tag = _utils.UNSET\n # Class attributes.\n _active_env_var = None\n _branch_env_var = None\n _tag_env_var = None\n\n @property\n def active(self):\n \"\"\"bool: Indicates if currently running in the target CI system.\"\"\"\n if self._active is _utils.UNSET:\n self._active = _in_ci(self._active_env_var)\n return self._active\n\n @property\n def branch(self):\n \"\"\"bool: Indicates the current branch in the target CI system.\n\n This may indicate the active branch or the base branch of a\n pull request.\n \"\"\"\n if self._branch is _utils.UNSET:\n self._branch = _ci_branch(self._branch_env_var)\n return self._branch\n\n @property\n def is_merge(self):\n \"\"\"bool: Indicates if the HEAD commit is a merge commit.\"\"\"\n if self._is_merge is _utils.UNSET:\n self._is_merge = git_tools.merge_commit()\n return self._is_merge\n\n @property\n def tag(self):\n \"\"\"str: The ``git`` tag of the current CI build.\"\"\"\n if self._tag is _utils.UNSET:\n tag_val = os.getenv(self._tag_env_var, '')\n # NOTE: On non-tag builds in some environments (e.g. Travis)\n # the tag environment variable is still populated, but empty.\n if tag_val == '':\n self._tag = None\n else:\n self._tag = tag_val\n return self._tag\n\n def __repr__(self):\n \"\"\"Representation of current configuration.\n\n Returns:\n str: Object representation.\n \"\"\"\n return '<{} (active={})>'.format(\n self.__class__.__name__, self.active)\n", "id": "8089025", "language": "Python", "matching_score": 1.8072267770767212, "max_stars_count": 5, "path": "ci_diff_helper/_config_base.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport unittest\n\nfrom tests import utils\n\n\nclass Test_git_root(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test():\n from ci_diff_helper.git_tools import git_root\n return git_root()\n\n def test_sys_call(self):\n import mock\n\n with mock.patch('ci_diff_helper._utils.check_output') as mocked:\n result = self._call_function_under_test()\n self.assertIs(result, mocked.return_value)\n mocked.assert_called_once_with(\n 'git', 'rev-parse', '--show-toplevel')\n\n @unittest.skipUnless(utils.HAS_GIT, 'git not installed')\n def test_actual_call(self):\n result = self._call_function_under_test()\n result = os.path.abspath(result) # Normalize path for Windows.\n tests_dir = os.path.dirname(__file__)\n root_dir = os.path.abspath(os.path.join(tests_dir, '..'))\n self.assertEqual(result, root_dir)\n\n\nclass Test_get_checked_in_files(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test():\n from ci_diff_helper.git_tools import get_checked_in_files\n return get_checked_in_files()\n\n @staticmethod\n def _do_nothing(value):\n return value\n\n def test_it(self):\n import mock\n\n filenames = [\n 'a.py',\n 'shell-not-py.sh',\n os.path.join('b', 'c.py'),\n 'Makefile',\n os.path.join('d', 'e', 'f.py'),\n ]\n cmd_output = '\\n'.join(filenames)\n mock_output = mock.patch('ci_diff_helper._utils.check_output',\n return_value=cmd_output)\n\n git_root = os.path.join('totally', 'on', 'your', 'filesystem')\n mock_root = mock.patch('ci_diff_helper.git_tools.git_root',\n return_value=git_root)\n\n mock_abspath = mock.patch('os.path.abspath', new=self._do_nothing)\n\n with mock_abspath:\n with mock_root:\n with mock_output as mocked:\n result = self._call_function_under_test()\n mocked.assert_called_once_with(\n 'git', 'ls-files', git_root)\n self.assertEqual(result, filenames)\n\n @staticmethod\n def _all_files(root_dir):\n result = set()\n for dirname, _, filenames in os.walk(root_dir):\n for filename in filenames:\n result.add(os.path.join(dirname, filename))\n return result\n\n @unittest.skipUnless(utils.HAS_GIT, 'git not installed')\n def test_actual_call(self):\n result = self._call_function_under_test()\n tests_dir = os.path.dirname(__file__)\n root_dir = os.path.abspath(os.path.join(tests_dir, '..'))\n self.assertLessEqual(set(result), self._all_files(root_dir))\n\n\nclass Test_get_changed_files(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(blob_name1, blob_name2):\n from ci_diff_helper import git_tools\n\n return git_tools.get_changed_files(blob_name1, blob_name2)\n\n def _helper(self, changed, expected):\n import mock\n\n blob_name1 = 'HEAD'\n blob_name2 = '031cf739bc419eb2c320f8c897b03c04796943a9'\n output_patch = mock.patch('ci_diff_helper._utils.check_output',\n return_value=changed)\n with output_patch as mocked:\n result = self._call_function_under_test(blob_name1, blob_name2)\n self.assertEqual(result, expected)\n mocked.assert_called_once_with(\n 'git', 'diff', '--name-only', blob_name1, blob_name2)\n\n def test_empty(self):\n self._helper('', [])\n\n def test_with_changes(self):\n expected = ['foo.py', os.path.join('bar', 'baz.txt')]\n self._helper('\\n'.join(expected), expected)\n\n @unittest.skipUnless(utils.HAS_GIT, 'git not installed')\n def test_actual_call_same(self):\n blob_name1 = '7575455ec442498f3d1c5b2a8d3bc7861918d987'\n blob_name2 = blob_name1\n result = self._call_function_under_test(blob_name1, blob_name2)\n expected = []\n self.assertEqual(result, expected)\n\n def _maybe_skip_if_no_commit(self, blob_name):\n from ci_diff_helper import _utils\n\n commit_file = _utils.check_output(\n 'git', 'cat-file', '-t', blob_name, ignore_err=True)\n if commit_file != 'commit': # pragma: NO COVER\n self.skipTest(\n 'Commit {!r} does not exist'.format(blob_name))\n\n @unittest.skipUnless(utils.HAS_GIT, 'git not installed')\n def test_actual_call_parent(self): # pragma: NO COVER\n blob_name1 = 'bdb1ee24f05abe80f099bc5fd612fd46b36f3b28'\n self._maybe_skip_if_no_commit(blob_name1)\n blob_name2 = blob_name1 + '^'\n self._maybe_skip_if_no_commit(blob_name2)\n\n result = self._call_function_under_test(blob_name1, blob_name2)\n expected = [\n 'ci_diff_helper/appveyor.py',\n 'ci_diff_helper/environment_vars.py',\n 'docs/ci_diff_helper.appveyor.rst',\n 'docs/index.rst',\n 'tests/test_appveyor.py',\n 'tests/test_travis.py',\n ]\n self.assertEqual(result, expected)\n\n\nclass Test_merge_commit(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(revision):\n from ci_diff_helper.git_tools import merge_commit\n return merge_commit(revision)\n\n def _helper(self, parents, revision='HEAD'):\n import mock\n\n output_patch = mock.patch('ci_diff_helper._utils.check_output',\n return_value=parents)\n with output_patch as mocked:\n result = self._call_function_under_test(revision)\n mocked.assert_called_once_with(\n 'git', 'log', '--pretty=%P', '-1', revision)\n return result\n\n def test_non_merge_default(self):\n parents = 'fd5cffa5d437607159ceeda68895b9b53f23a531'\n result = self._helper(parents)\n self.assertFalse(result)\n\n def test_non_merge_explicit(self):\n parents = 'fd5cffa5d437607159ceeda68895b9b53f23a531'\n result = self._helper(parents, revision='master')\n self.assertFalse(result)\n\n def test_merge(self):\n parents = ('47ebd0bb461180dcab674b3beca5ec9c11a1b976 '\n 'e8fd7135497b1027cba26ffab7851f1533ff08e3')\n result = self._helper(parents)\n self.assertTrue(result)\n\n def test_three_parents(self):\n parents = ('8103a3b85aa5f3e2b14200bfef815539c1be109a '\n 'e9b5c87f8153fd177a0e10f7abda0b4bb4730626 '\n 'ce60976326725217c16fe84b5120c6a8661177a8')\n with self.assertRaises(NotImplementedError):\n self._helper(parents)\n\n\nclass Test_commit_subject(unittest.TestCase):\n\n @staticmethod\n def _call_function_under_test(*args):\n from ci_diff_helper.git_tools import commit_subject\n return commit_subject(*args)\n\n def test_non_merge_default(self):\n import mock\n\n output_patch = mock.patch('ci_diff_helper._utils.check_output')\n with output_patch as mocked:\n result = self._call_function_under_test()\n self.assertIs(result, mocked.return_value)\n mocked.assert_called_once_with(\n 'git', 'log', '--pretty=%s', '-1', 'HEAD')\n\n def test_non_merge_explicit(self):\n import mock\n revision = 'ffe035e3c4b4d11053b6162fce96474bb15c6869'\n\n output_patch = mock.patch('ci_diff_helper._utils.check_output')\n with output_patch as mocked:\n result = self._call_function_under_test(revision)\n self.assertIs(result, mocked.return_value)\n mocked.assert_called_once_with(\n 'git', 'log', '--pretty=%s', '-1', revision)\n", "id": "10186964", "language": "Python", "matching_score": 4.827515125274658, "max_stars_count": 5, "path": "tests/test_git_tools.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helpers for interacting with ``git``.\"\"\"\n\nimport os\n\nfrom ci_diff_helper import _utils\n\n\ndef git_root():\n \"\"\"Return the root directory of the current ``git`` checkout.\n\n Returns:\n str: Filesystem path to ``git`` checkout root.\n \"\"\"\n return _utils.check_output('git', 'rev-parse', '--show-toplevel')\n\n\ndef get_checked_in_files():\n \"\"\"Gets a list of files in the current ``git`` repository.\n\n Effectively runs:\n\n .. code-block:: bash\n\n $ git ls-files ${GIT_ROOT}\n\n and then finds the absolute path for each file returned.\n\n Returns:\n list: List of all filenames checked into the repository.\n \"\"\"\n root_dir = git_root()\n cmd_output = _utils.check_output('git', 'ls-files', root_dir)\n\n result = []\n for filename in cmd_output.split('\\n'):\n result.append(os.path.abspath(filename))\n\n return result\n\n\ndef get_changed_files(blob_name1, blob_name2):\n \"\"\"Gets a list of changed files between two ``git`` revisions.\n\n A ``git`` object reference can be any of a branch name, tag,\n a commit SHA or a special reference.\n\n Args:\n blob_name1 (str): A ``git`` object reference.\n blob_name2 (str): A ``git`` object reference.\n\n Returns:\n list: List of all filenames changed.\n \"\"\"\n cmd_output = _utils.check_output(\n 'git', 'diff', '--name-only', blob_name1, blob_name2)\n\n if cmd_output:\n return cmd_output.split('\\n')\n else:\n return []\n\n\ndef merge_commit(revision='HEAD'):\n \"\"\"Checks if a ``git`` revision is a merge commit.\n\n Args:\n revision (Optional[str]): A ``git`` revision, any of a branch\n name, tag, a commit SHA or a special reference.\n\n Returns:\n bool: Flag indicating if the given revision.\n\n Raises:\n NotImplementedError: if the number of parents is not 1 or 2.\n \"\"\"\n parents = _utils.check_output(\n 'git', 'log', '--pretty=%P', '-1', revision)\n num_parents = len(parents.split())\n if num_parents == 1:\n return False\n elif num_parents == 2:\n return True\n else:\n raise NotImplementedError(\n 'Unexpected number of parent commits', parents)\n\n\ndef commit_subject(revision='HEAD'):\n \"\"\"Gets the subject of a ``git`` commit.\n\n Args:\n revision (Optional[str]): A ``git`` revision, any of a branch\n name, tag, a commit SHA or a special reference.\n\n Returns:\n str: The commit subject.\n \"\"\"\n return _utils.check_output(\n 'git', 'log', '--pretty=%s', '-1', revision)\n", "id": "8894282", "language": "Python", "matching_score": 1.0536413192749023, "max_stars_count": 5, "path": "ci_diff_helper/git_tools.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Custom script to run Pylint on ci-diff-helper.\n\nThis runs pylint as a script via subprocess in two different\nsubprocesses. The first lints the production/library code\nusing the default rc file (PRODUCTION_RC). The second lints the\ntest code using an rc file (TEST_RC) which allows more style\nviolations (hence it has a reduced number of style checks).\n\"\"\"\n\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport io\nimport os\nimport subprocess\nimport sys\n\nimport six\n\nimport ci_diff_helper\n\n\n_SCRIPTS_DIR = os.path.abspath(os.path.dirname(__file__))\nPRODUCTION_RC = os.path.join(_SCRIPTS_DIR, 'pylintrc_production')\nTEST_RC = os.path.join(_SCRIPTS_DIR, 'pylintrc_test')\n\n_ERROR_TEMPLATE = 'Pylint failed on {} with status {:d}.'\n_SKIP_TEMPLATE = 'Skipping {}, no files to lint.'\n\n_PRODUCTION_RC_ADDITIONS = {\n 'MESSAGES CONTROL': {\n 'disable': ['I'],\n },\n 'BASIC': {\n 'good-names': ['pr'],\n },\n}\n_PRODUCTION_RC_REPLACEMENTS = {\n 'MASTER': {\n 'load-plugins': 'pylint.extensions.check_docs',\n 'jobs': '4',\n },\n 'DESIGN': {\n 'max-attributes': '10',\n },\n 'REPORTS': {\n 'reports': 'no',\n },\n 'FORMAT': {\n 'max-line-length': '80',\n 'no-space-check': '',\n 'expected-line-ending-format': 'LF',\n },\n}\n_TEST_RC_ADDITIONS = copy.deepcopy(_PRODUCTION_RC_ADDITIONS)\n_TEST_RC_ADDITIONS['MESSAGES CONTROL']['disable'].extend([\n 'missing-docstring',\n 'protected-access',\n 'similarities',\n 'too-many-public-methods',\n])\n_TEST_RC_REPLACEMENTS = copy.deepcopy(_PRODUCTION_RC_REPLACEMENTS)\n_TEST_RC_REPLACEMENTS.setdefault('BASIC', {})\n_TEST_RC_REPLACEMENTS['BASIC']['class-rgx'] = (\n '([A-Z_][a-zA-Z0-9]+|Test_.*)$')\n_TEST_RC_REPLACEMENTS['BASIC']['method-rgx'] = '[a-z_][a-z0-9_]{2,30}$|^test_'\n\n_ROOT_DIR = os.path.abspath(os.path.join(_SCRIPTS_DIR, '..'))\nIGNORED_FILES = (\n os.path.join(_ROOT_DIR, 'docs', 'conf.py'),\n)\n\n\ndef get_default_config():\n \"\"\"Get the default Pylint configuration.\n\n Returns:\n str: The default Pylint configuration.\n \"\"\"\n # Swallow STDERR if it says\n # \"No config file found, using default configuration\"\n result = subprocess.check_output(['pylint', '--generate-rcfile'],\n stderr=subprocess.PIPE)\n # On Python 3, this returns bytes (from STDOUT), so we\n # convert to a string.\n return result.decode('utf-8')\n\n\ndef read_config(contents):\n \"\"\"Reads pylintrc config into native ConfigParser object.\n\n Args:\n contents (str): The contents of the file containing the INI config.\n\n Returns:\n ConfigParser.ConfigParser: The parsed configuration.\n \"\"\"\n file_obj = io.StringIO(contents)\n config = six.moves.configparser.ConfigParser()\n config.readfp(file_obj)\n return config\n\n\ndef _transform_opt(opt_val):\n \"\"\"Transform a config option value to a string.\n\n If already a string, do nothing. If an iterable, then\n combine into a string by joining on \",\".\n\n Args:\n opt_val (Union[str, list]): A config option's value.\n\n Returns:\n str: The option value converted to a string.\n \"\"\"\n if isinstance(opt_val, (list, tuple)):\n return ','.join(opt_val)\n else:\n return opt_val\n\n\ndef make_rc(base_cfg, target_filename,\n additions=None, replacements=None):\n \"\"\"Combines a base rc and additions into single file.\n\n Args:\n base_cfg (ConfigParser.ConfigParser): The configuration we\n are merging into.\n target_filename (str): The filename where the new configuration\n will be saved.\n additions (Optional[dict]): The values added to the configuration.\n replacements (Optional[dict]): The wholesale replacements for the\n new configuration.\n\n Raises:\n KeyError: If one of the additions or replacements does not\n already exist in the current config.\n \"\"\"\n # Set-up the mutable default values.\n if additions is None:\n additions = {}\n if replacements is None:\n replacements = {}\n\n # Create fresh config, which must extend the base one.\n new_cfg = six.moves.configparser.ConfigParser()\n # pylint: disable=protected-access\n new_cfg._sections = copy.deepcopy(base_cfg._sections)\n new_sections = new_cfg._sections\n # pylint: enable=protected-access\n\n for section, opts in additions.items():\n curr_section = new_sections.setdefault(\n section, collections.OrderedDict())\n for opt, opt_val in opts.items():\n curr_val = curr_section.get(opt)\n if curr_val is None:\n raise KeyError('Expected to be adding to existing option.')\n curr_val = curr_val.rstrip(',')\n opt_val = _transform_opt(opt_val)\n curr_section[opt] = '{}, {}'.format(curr_val, opt_val)\n\n for section, opts in replacements.items():\n curr_section = new_sections.setdefault(\n section, collections.OrderedDict())\n for opt, opt_val in opts.items():\n curr_val = curr_section.get(opt)\n if curr_val is None:\n raise KeyError('Expected to be replacing existing option.')\n opt_val = _transform_opt(opt_val)\n curr_section[opt] = '{}'.format(opt_val)\n\n with open(target_filename, 'w') as file_obj:\n new_cfg.write(file_obj)\n\n\ndef valid_filename(filename):\n \"\"\"Checks if a file is a valid Python file.\n\n Args:\n filename (str): The name of a source file.\n\n Returns:\n bool: Flag indicating if the file is valid.\n \"\"\"\n if filename in IGNORED_FILES:\n return False\n if not os.path.exists(filename):\n return False\n _, ext = os.path.splitext(filename)\n return ext == '.py'\n\n\ndef is_test_filename(filename):\n \"\"\"Checks if the file is a test file.\n\n Args:\n filename (str): The name of a source file.\n\n Returns:\n bool: Boolean indicating if ``filename`` is a test file.\n \"\"\"\n return 'test' in filename\n\n\ndef get_python_files(all_files=None):\n \"\"\"Gets a list of all Python files in the repository.\n\n Separates the files based on test or production code according\n to :func:`is_test_filename`.\n\n Args:\n all_files (Optional[list]): A list of all files to consider.\n\n Returns:\n Tuple[list, list]: A tuple containing two lists. The first list\n contains all production files, the next all test files.\n \"\"\"\n if all_files is None:\n all_files = ci_diff_helper.get_checked_in_files()\n\n production_files = []\n test_files = []\n for filename in all_files:\n if not valid_filename(filename):\n continue\n if is_test_filename(filename):\n test_files.append(filename)\n else:\n production_files.append(filename)\n\n return production_files, test_files\n\n\ndef lint_fileset(filenames, rc_filename, description):\n \"\"\"Lints a group of files using a given rcfile.\n\n Args:\n filenames (list): A list of files to be linted.\n rc_filename (str): The name of the Pylint config RC file.\n description (str): A description of the files and configuration\n currently being run.\n \"\"\"\n if filenames:\n pylint_shell_command = ['pylint', '--rcfile', rc_filename]\n pylint_shell_command.extend(filenames)\n status_code = subprocess.call(pylint_shell_command)\n if status_code != 0:\n error_message = _ERROR_TEMPLATE.format(\n description, status_code)\n print(error_message, file=sys.stderr)\n sys.exit(status_code)\n else:\n print(_SKIP_TEMPLATE.format(description))\n\n\ndef main(all_files=None):\n \"\"\"Script entry point. Lints both sets of files.\n\n Args:\n all_files (Optional[list]): A list of all files to consider.\n \"\"\"\n default_config = read_config(get_default_config())\n make_rc(default_config, PRODUCTION_RC,\n additions=_PRODUCTION_RC_ADDITIONS,\n replacements=_PRODUCTION_RC_REPLACEMENTS)\n make_rc(default_config, TEST_RC,\n additions=_TEST_RC_ADDITIONS,\n replacements=_TEST_RC_REPLACEMENTS)\n production_files, test_files = get_python_files(all_files=all_files)\n lint_fileset(production_files, PRODUCTION_RC, 'Library')\n lint_fileset(test_files, TEST_RC, 'Test')\n\n\nif __name__ == '__main__':\n main()\n", "id": "9412430", "language": "Python", "matching_score": 3.0997254848480225, "max_stars_count": 5, "path": "scripts/run_pylint.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Custom script to run pycodestyle on ci-diff-helper codebase.\n\nThis runs pycodestyle as a script via subprocess but only runs it on the\n.py files that are checked in to the repository.\n\"\"\"\n\n\nfrom __future__ import print_function\n\nimport os\nimport subprocess\nimport sys\n\nimport ci_diff_helper\n\n\ndef main(all_files=None):\n \"\"\"Run pycodestyle on all Python files in the repository.\n\n Args:\n all_files (Optional[list]): A list of all files to consider.\n \"\"\"\n if all_files is None:\n all_files = ci_diff_helper.get_checked_in_files()\n\n python_files = []\n for filename in all_files:\n _, ext = os.path.splitext(filename)\n if ext == '.py':\n python_files.append(filename)\n\n if not python_files:\n print('No Python files to lint, exiting.')\n else:\n pycodestyle_command = ['pycodestyle'] + python_files\n status_code = subprocess.call(pycodestyle_command)\n if status_code != 0:\n sys.exit(status_code)\n\n\nif __name__ == '__main__':\n main()\n", "id": "4715454", "language": "Python", "matching_score": 2.9709720611572266, "max_stars_count": 5, "path": "scripts/pycodestyle_on_repo.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Custom script to run lint rules on ci-diff-helper.\"\"\"\n\nimport ci_diff_helper\nimport pycodestyle_on_repo\nimport run_pylint\n\n\ndef main():\n \"\"\"Script entry point.\n\n Runs Pylint and pycodestyle.\n \"\"\"\n all_files = ci_diff_helper.get_checked_in_files()\n pycodestyle_on_repo.main(all_files=all_files)\n run_pylint.main(all_files=all_files)\n\n\nif __name__ == '__main__':\n main()\n", "id": "329879", "language": "Python", "matching_score": 0.121393583714962, "max_stars_count": 5, "path": "scripts/run_all_lint.py" }, { "content": "import base64\nimport json\nimport os\n\nfrom BeautifulSoup import BeautifulSoup\nimport requests\n\nimport local_settings\nimport utils\n\n\nNATIONAL_BRACKET = ('http://games.espn.com/tournament-challenge-bracket/'\n '2017/en/entry?entryID=115703')\nHTML_FILENAME = os.path.join(\n local_settings.YEAR,\n base64.b64encode(NATIONAL_BRACKET) + '.html')\nSLOT_KEY = 'data-slotindex'\n\n\ndef get_national_bracket():\n if not os.path.exists(HTML_FILENAME):\n response = requests.get(NATIONAL_BRACKET)\n with open(HTML_FILENAME, 'w') as fh:\n fh.write(response.content)\n response.close()\n with open(HTML_FILENAME, 'r') as fh:\n return fh.read()\n\n\ndef get_team_info(data_tag):\n \"\"\"Returns ({teamID}, {team name}) from a node.\"\"\"\n name_span, = data_tag.findAll('span', {'class': 'name'})\n team_name = name_span.text\n slot_id = int(data_tag[SLOT_KEY])\n # NOTE: Assumes the team ID is 1 more than the slot ID.\n team_id = slot_id + 1\n return team_id, team_name\n\n\ndef get_data_slot_tags(tag):\n if tag.name != 'div':\n return False\n return tag.has_key(SLOT_KEY)\n\n\ndef parse_teams():\n bracket_html = get_national_bracket()\n soup = BeautifulSoup(bracket_html)\n\n data_tags = soup.findAll(get_data_slot_tags)\n assert len(data_tags) == 127\n opening_round_tags = [tag for tag in data_tags\n if int(tag[SLOT_KEY]) < 64]\n assert len(opening_round_tags) == 64\n team_info = [get_team_info(data_tag) for data_tag in\n opening_round_tags]\n team_info = dict(set(team_info))\n\n with open(utils.TEAM_MAP_FILENAME, 'w') as fh:\n json.dump(team_info, fh, indent=2, sort_keys=True,\n separators=(',', ': '))\n\n\nif __name__ == '__main__':\n utils.prepare_directory(local_settings.YEAR)\n parse_teams()\n", "id": "55011", "language": "Python", "matching_score": 3.2945914268493652, "max_stars_count": 1, "path": "get_team_mapping.py" }, { "content": "from __future__ import print_function\n\nfrom BeautifulSoup import BeautifulSoup\nimport json\nimport os\n\nimport get_brackets_html\nimport get_team_mapping\n\n\ndef get_team_info(data_tag):\n # Modeled after get_team_mapping.get_data_slot_tags\n slot_id = int(data_tag['data-slotindex'])\n team_id = int(data_tag['data-teamid'])\n return slot_id, team_id\n\n\ndef get_slots(filename):\n with open(filename, 'r') as fh:\n soup = BeautifulSoup(fh.read())\n\n data_tags = soup.findAll(get_team_mapping.get_data_slot_tags)\n if len(data_tags) != 127:\n raise ValueError('Expected 127 slots in the bracket.')\n\n tag_info = [get_team_info(tag) for tag in data_tags]\n slot_winners = {}\n for slot_id, team_id in tag_info:\n if slot_id < 64:\n # Check assumption from get_team_mapping.py that\n # the team ID is 1 more than the slot ID.\n if slot_id + 1 != team_id:\n raise ValueError('Expected team ID to be 1 more than slot ID')\n else:\n slot_winners[slot_id] = team_id\n\n if sorted(slot_winners.keys()) != range(64, 127):\n raise ValueError('Expected winner slots to be [64 .. 126]')\n\n return slot_winners\n\n\ndef main():\n for entry_id in get_brackets_html.get_bracket_ids():\n json_filename = os.path.join(get_brackets_html.BRACKETS_DIR,\n str(entry_id) + '.json')\n if os.path.exists(json_filename):\n msg = 'Exists: {}'.format(json_filename)\n print(msg)\n continue\n\n html_filename = os.path.join(get_brackets_html.BRACKETS_DIR,\n str(entry_id) + '.html')\n slot_winners = get_slots(html_filename)\n with open(json_filename, 'w') as fh:\n msg = 'Creating {}'.format(json_filename)\n print(msg)\n json.dump(slot_winners, fh, indent=2, sort_keys=True,\n separators=(',', ': '))\n\n\nif __name__ == '__main__':\n main()\n", "id": "3462385", "language": "Python", "matching_score": 2.792975425720215, "max_stars_count": 1, "path": "parse_brackets_html.py" }, { "content": "from __future__ import print_function\n\nimport json\nimport os\nimport pickle\n\nfrom game_tree_classes import Team\nimport get_brackets_html\nimport utils\n\n\nwith open(utils.BASE_BRACKET_PICKLE, 'r') as fh:\n BASE_BRACKET = pickle.load(fh)\n\n\ndef get_complete_bracket(slot_winners, game_slots):\n result = game_slots.copy()\n for slot_id in xrange(64, 127):\n winner_of = result.get_slot(slot_id)\n winning_team = slot_winners[str(slot_id)]\n\n team_id1 = result.get_slot(winner_of.game_slot1).team_id\n team_id2 = result.get_slot(winner_of.game_slot2).team_id\n\n if str(winning_team) not in (team_id1, team_id2):\n raise ValueError('Winner is not possible.')\n result.reset_slot(slot_id, Team(winning_team))\n\n if not result.complete:\n raise ValueError('Expected bracket to be complete.')\n return result.reduced\n\n\ndef main():\n to_store = {}\n for entry_id in get_brackets_html.get_bracket_ids():\n json_filename = os.path.join(get_brackets_html.BRACKETS_DIR,\n str(entry_id) + '.json')\n with open(json_filename, 'r') as fh:\n slot_winners = json.load(fh)\n\n bracket_reduced = get_complete_bracket(slot_winners, BASE_BRACKET)\n to_store[entry_id] = bracket_reduced\n\n filename = utils.REDUCED_FILLED_OUT\n with open(filename, 'w') as fh:\n json.dump(to_store, fh, indent=2, sort_keys=True,\n separators=(',', ': '))\n msg = 'Created {}'.format(filename)\n print(msg)\n\n\nif __name__ == '__main__':\n main()\n", "id": "5549708", "language": "Python", "matching_score": 3.498979091644287, "max_stars_count": 1, "path": "entries_as_reduced.py" }, { "content": "from __future__ import print_function\n\nimport itertools\nimport json\nimport pickle\n\nfrom game_tree_classes import WinnerOf\nimport utils\n\n\nwith open(utils.SWEET16_PICKLE, 'r') as fh:\n SLOTS_BEFORE = pickle.load(fh)\n\n\ndef complete_bracket(game_slots, choice_slots, choices):\n result = game_slots.copy()\n for slot_id, choice_val in zip(choice_slots, choices):\n winner_of = result.get_slot(slot_id)\n if choice_val not in (winner_of.game_slot1, winner_of.game_slot2):\n raise ValueError('Choice does not match available.')\n winning_team = result.get_slot(choice_val)\n result.reset_slot(slot_id, winning_team)\n\n if not result.complete:\n raise ValueError('Expected bracket to be complete.')\n return result.reduced\n\n\ndef main():\n choice_slots = []\n choice_vals = []\n for slot_id in xrange(127):\n value = SLOTS_BEFORE.get_slot(slot_id)\n if isinstance(value, WinnerOf):\n choice_slots.append(slot_id)\n choice_vals.append((value.game_slot1, value.game_slot2))\n\n msg = '{:d} choices left'.format(len(choice_slots))\n print(msg)\n reduced_vals = []\n for choice_tuple in itertools.product(*choice_vals):\n reduced_vals.append(\n complete_bracket(SLOTS_BEFORE, choice_slots, choice_tuple))\n\n filename = utils.REDUCED_SCENARIOS\n with open(filename, 'w') as fh:\n json.dump(reduced_vals, fh, indent=2, sort_keys=True,\n separators=(',', ': '))\n msg = 'Created {}'.format(filename)\n print(msg)\n\n\nif __name__ == '__main__':\n main()\n", "id": "8864562", "language": "Python", "matching_score": 2.334019899368286, "max_stars_count": 1, "path": "run_all_scenarios.py" }, { "content": "import json\nimport pickle\n\nimport local_settings\nimport utils\n\n\nwith open(utils.TEAM_MAP_FILENAME, 'r') as fh:\n TEAM_MAP = json.load(fh)\n\n\ndef choose_winner(winner_of, game_slots):\n prev_team1 = game_slots.get_slot(winner_of.game_slot1)\n prev_team2 = game_slots.get_slot(winner_of.game_slot2)\n\n team_name1 = TEAM_MAP[prev_team1.team_id]\n team_name2 = TEAM_MAP[prev_team2.team_id]\n\n message = '%s [y] or %s [n]? ' % (team_name1, team_name2)\n choice = raw_input(message)\n if choice.strip().lower() == 'y':\n return prev_team1\n else:\n return prev_team2\n\n\n\ndef main():\n with open(utils.BASE_BRACKET_PICKLE, 'r') as fh:\n slots_before = pickle.load(fh)\n\n game_slots = slots_before.copy()\n for slot_id in xrange(64, 64 + 32 + 16):\n winner_of = game_slots.get_slot(slot_id)\n winning_team = choose_winner(winner_of, game_slots)\n game_slots.reset_slot(slot_id, winning_team)\n game_slots.save(utils.SWEET16_PICKLE)\n\n\nif __name__ == '__main__':\n main()\n", "id": "10209408", "language": "Python", "matching_score": 3.828752040863037, "max_stars_count": 1, "path": "update_national_bracket.py" }, { "content": "import json\nimport pickle\n\nimport utils\n\n\nwith open(utils.TEAM_MAP_FILENAME, 'r') as fh:\n TEAM_MAP = json.load(fh)\n\n\ndef choose_winner(winner_of, game_slots):\n prev_team1 = game_slots.get_slot(winner_of.game_slot1)\n prev_team2 = game_slots.get_slot(winner_of.game_slot2)\n\n team_name1 = TEAM_MAP[prev_team1.team_id]\n team_name2 = TEAM_MAP[prev_team2.team_id]\n\n message = '%s [y] or %s [n]? ' % (team_name1, team_name2)\n choice = raw_input(message)\n response = choice.strip().lower()\n if response == '':\n return\n elif response == 'y':\n return prev_team1\n else:\n return prev_team2\n\n\n\ndef main():\n with open(utils.SWEET16_PICKLE, 'r') as fh:\n slots_before = pickle.load(fh)\n\n game_slots = slots_before.copy()\n winners_added = 0\n for slot_id in xrange(64 + 32 + 16, 64 + 32 + 16 + 8):\n winner_of = game_slots.get_slot(slot_id)\n winning_team = choose_winner(winner_of, game_slots)\n if winning_team is not None:\n winners_added += 1\n game_slots.reset_slot(slot_id, winning_team)\n if winners_added != 4:\n raise ValueError('Expected to add 4 winners after the first'\n 'half of the Elite 8.')\n game_slots.save('complete_bracket_first_half_of_elite_8.pkl')\n\n\nif __name__ == '__main__':\n main()\n", "id": "2986309", "language": "Python", "matching_score": 2.1467676162719727, "max_stars_count": 1, "path": "first_half_of_elite_8.py" }, { "content": "import os\nimport pickle\n\nfrom game_tree_classes import GameSlots\nfrom game_tree_classes import Team\nfrom game_tree_classes import WinnerOf\nimport utils\n\n\ndef main():\n game_slots = GameSlots()\n for slot_id in xrange(64):\n # NOTE: This relies on the assumption from get_team_mapping.py\n # that the team ID is 1 more than the slot ID.\n team = Team(slot_id + 1)\n game_slots.add_slot(slot_id, team)\n\n prev_first, first_index = 0, 64\n for round_size in (32, 16, 8, 4, 2, 1):\n for slot_offset in xrange(round_size):\n slot_id = slot_offset + first_index\n prev_slot1 = prev_first + 2 * slot_offset\n prev_slot2 = prev_first + 2 * slot_offset + 1\n winner_of = WinnerOf(prev_slot1, prev_slot2)\n game_slots.add_slot(slot_id, winner_of)\n prev_first, first_index = first_index, first_index + round_size\n game_slots.save(utils.BASE_BRACKET_PICKLE)\n\n\nif __name__ == '__main__':\n main()\n", "id": "2788598", "language": "Python", "matching_score": 2.367056131362915, "max_stars_count": 1, "path": "create_pickled_national_bracket.py" }, { "content": "import pickle\nimport string\n\n\n_BASE64_ALPHABET = (string.ascii_uppercase + string.ascii_lowercase +\n string.digits + '+/')\nBASE64_ALPHABET_DICT = {str(i + 1): letter\n for i, letter in enumerate(_BASE64_ALPHABET)}\nBASE64_ALPHABET_REVERSE = {letter: i\n for i, letter in BASE64_ALPHABET_DICT.iteritems()}\nKEY_SET = frozenset(map(str, range(127)))\n\n\nclass GameSlots(object):\n\n def __init__(self, mapping=None):\n self.data = {} if mapping is None else mapping\n\n def save(self, filename):\n with open(filename, 'w') as fh:\n pickle.dump(self, fh)\n\n def copy(game_slots):\n return GameSlots(game_slots.data.copy())\n\n def add_slot(self, slot_id, obj):\n slot_id = str(int(slot_id))\n if slot_id in self.data:\n raise Exception('%s already claimed' % slot_id)\n\n self.data[slot_id] = obj\n\n def get_slot(self, slot_id):\n slot_id = str(int(slot_id))\n return self.data[slot_id]\n\n def reset_slot(self, slot_id, obj):\n slot_id = str(int(slot_id))\n if slot_id not in self.data:\n raise Exception('%s hasn\\'t been claimed yet' % slot_id)\n\n self.data[slot_id] = obj\n\n @property\n def complete(self):\n assert set(self.data.keys()) == KEY_SET\n for value in self.data.itervalues():\n if not isinstance(value, Team):\n return False\n return True\n\n @property\n def reduced(self):\n if not self.complete:\n return super(Team, self).__hash__()\n else:\n # Since complete, we know keys == KEY_SET\n result_list = []\n for i in range(127):\n team = self.data[str(i)]\n result_list.append(BASE64_ALPHABET_DICT[team.team_id])\n return ''.join(result_list)\n\n @classmethod\n def from_reduced(cls, reduced):\n mapping = {}\n assert len(reduced) == 127\n for i in range(127):\n mapping[str(i)] = Team(BASE64_ALPHABET_REVERSE[reduced[i]])\n return cls(mapping=mapping)\n\n def __eq__(self, other):\n if not isinstance(other, GameSlots):\n return False\n if not (self.complete and other.complete):\n return False\n return self.reduced == other.reduced\n\n def __repr__(self):\n return 'GameSlots(slots=%s)' % self.data.keys()\n\n\nclass Team(object):\n\n def __init__(self, team_id):\n # Make sure it is an integer\n self.team_id = str(int(team_id))\n\n def __eq__(self, other):\n if not isinstance(other, Team):\n return False\n return self.team_id == other.team_id\n\n def __repr__(self):\n return 'Team(%s)' % self.team_id\n\n\nclass WinnerOf(object):\n\n def __init__(self, game_slot1, game_slot2):\n self.game_slot1 = game_slot1\n self.game_slot2 = game_slot2\n\n def __repr__(self):\n return 'WinnerOf(slot=%s, slot=%s)' % (self.game_slot1, self.game_slot2)\n", "id": "9718135", "language": "Python", "matching_score": 2.011218547821045, "max_stars_count": 1, "path": "game_tree_classes.py" }, { "content": "import itertools\nimport json\nimport logging\n\nfrom google.appengine.ext import ndb\n\nfrom game_tree_classes import GameSlots\nfrom game_tree_classes import Team\nfrom game_tree_classes import WinnerOf\n\n\nwith open('team_map.json.data', 'r') as fh:\n TEAM_MAP = json.load(fh)\nBOTTOM_BAR = ('=' * 30) + '\\n\\n'\n\n\nclass PotentialBracket(ndb.Model):\n year = ndb.IntegerProperty(default=2017, indexed=False)\n highest_names = ndb.StringProperty(repeated=True, indexed=False)\n highest_scores = ndb.IntegerProperty(repeated=True, indexed=False)\n lowest_names = ndb.StringProperty(repeated=True, indexed=False)\n lowest_scores = ndb.IntegerProperty(repeated=True, indexed=False)\n\n\ndef all_possible_outcomes(unfinished_game_slots):\n undecided_slots = []\n for game_slot in range(127):\n if isinstance(unfinished_game_slots.get_slot(game_slot), WinnerOf):\n undecided_slots.append(game_slot)\n\n all_outcomes = [GameSlots.copy(unfinished_game_slots)]\n for undecided_slot in undecided_slots:\n # Assume all GameSlots in the list have yet to\n # decide who wins \"undecided_slot\"\n initial_game = all_outcomes[0]\n winner_of = initial_game.get_slot(undecided_slot)\n\n # We will branch each GameSlots entry in all_outcomes to\n # two different GameSlots entries with a WinnerOf member\n # swapped out for a Team instance, and will ditch\n new_outcomes = []\n for outcome in all_outcomes:\n team1 = outcome.get_slot(winner_of.game_slot1)\n slot1_winner = GameSlots.copy(outcome)\n slot1_winner.reset_slot(undecided_slot, team1)\n\n team2 = outcome.get_slot(winner_of.game_slot2)\n slot2_winner = GameSlots.copy(outcome)\n slot2_winner.reset_slot(undecided_slot, team2)\n\n new_outcomes.extend([slot1_winner, slot2_winner])\n\n all_outcomes = new_outcomes\n\n return all_outcomes\n\n\ndef assumed_winners(unfinished_master_game_slots, guess_slots):\n assumptions = []\n for game_slot in range(127):\n if not isinstance(unfinished_master_game_slots.get_slot(game_slot),\n Team):\n winner_of = unfinished_master_game_slots.get_slot(game_slot)\n opponent_ids = [\n guess_slots.get_slot(winner_of.game_slot1).team_id,\n guess_slots.get_slot(winner_of.game_slot2).team_id\n ]\n\n winning_team_id = guess_slots.get_slot(game_slot).team_id\n losing_team_ids = [id_ for id_ in opponent_ids\n if id_ != winning_team_id]\n if len(losing_team_ids) != 1:\n raise Exception('Losing team not a single ID: %s' % game_slot)\n losing_team_id = losing_team_ids[0]\n\n message = '%s over %s' % (TEAM_MAP[winning_team_id],\n TEAM_MAP[losing_team_id])\n assumptions.append(message)\n\n return assumptions\n\n\ndef pretty_print(assumptions, winners, last_place):\n winners_row_messages = [\n '%d: %s - %d' % (index + 1, bracket_name, score)\n for index, (bracket_name, score) in enumerate(winners)\n ]\n winners_message = '\\n'.join(winners_row_messages)\n last_place_row_messages = [\n 'Last: %s - %d' % (bracket_name, score)\n for bracket_name, score in last_place\n ]\n last_place_message = '\\n'.join(last_place_row_messages)\n assumptions_message = '\\n'.join(['Assumptions:'] + assumptions)\n\n return '%s\\n%s\\n\\n%s\\n\\n%s' % (winners_message, last_place_message,\n assumptions_message, BOTTOM_BAR)\n\n\ndef get_scenario(unfinished_master_game_slots, potential_finished_slots):\n assumptions = assumed_winners(unfinished_master_game_slots,\n potential_finished_slots)\n\n reduced = potential_finished_slots.reduced\n all_data = PotentialBracket.get_by_id(reduced)\n winners = zip(all_data.highest_names, all_data.highest_scores)\n # Pretty print actually expects the winners to be in the reverse\n # order they are stored.\n winners = reversed(winners)\n last_place = zip(all_data.lowest_names, all_data.lowest_scores)\n return pretty_print(assumptions, winners, last_place)\n", "id": "11842440", "language": "Python", "matching_score": 2.0503461360931396, "max_stars_count": 1, "path": "application/utils.py" }, { "content": "from __future__ import print_function\n\nimport json\nimport os\n\nfrom game_tree_classes import _BASE64_ALPHABET\nfrom game_tree_classes import Team\nimport get_brackets_html\nimport utils\n\n\ndef get_brackets():\n filename = utils.REDUCED_FILLED_OUT\n with open(filename, 'r') as fh:\n return json.load(fh)\n\n\ndef get_scenarios():\n filename = utils.REDUCED_SCENARIOS\n with open(filename, 'r') as fh:\n return json.load(fh)\n\n\ndef score_bracket(master, guess):\n if len(master) != 127 or len(guess) != 127:\n raise ValueError('Expected 127 slots.')\n if (master[:64] != _BASE64_ALPHABET or\n guess[:64] != _BASE64_ALPHABET):\n raise ValueError('Expected identical first 64 slots.')\n\n score = 0\n game_value = 10\n base_index = 64\n for games_per_round in (32, 16, 8, 4, 2, 1):\n next_index = base_index + games_per_round\n for position in xrange(base_index, next_index):\n if guess[position] == master[position]:\n score += game_value\n\n base_index = next_index\n # Double the score per game.\n game_value *= 2\n\n return score\n\n\ndef get_lowest(pair_list):\n \"\"\"Gets the pairs with the lowest score.\n\n Assumes pair_list[0] has the lowest score and the score\n is the first element of the pair.\n \"\"\"\n low_score = pair_list[0][0]\n result = []\n\n index = 0\n while pair_list[index][0] == low_score:\n result.append(pair_list[index])\n index += 1\n\n return result\n\n\ndef get_highest9(pair_list):\n \"\"\"Gets the pairs with the highest 9 scores.\n\n If there is a tie for the 9th highest score, will return all that\n match it.\n\n Assumes pair_list[0] has the lowest score and the score\n is the first element of the pair.\n \"\"\"\n cutoff_score = pair_list[-9][0]\n result = pair_list[-9:]\n\n index = -10\n while pair_list[index][0] == cutoff_score:\n result.insert(0, pair_list[index])\n index -= 1\n\n return result\n\n\ndef get_all_scores(master, brackets):\n results = []\n for entry_id, guess in brackets.items():\n curr_score = score_bracket(master, guess)\n results.append((curr_score, entry_id))\n results.sort(key=lambda pair: pair[0])\n\n lowest = get_lowest(results)\n highest = get_highest9(results)\n return lowest, highest\n\n\ndef main():\n brackets = get_brackets()\n scenarios = get_scenarios()\n\n to_store = {}\n count = 0\n for master in scenarios:\n count += 1\n if master in to_store:\n raise KeyError(master, 'already exists')\n lowest, highest = get_all_scores(master, brackets)\n to_store[master] = [lowest, highest]\n if count % 25 == 0:\n msg = 'Count: {}'.format(count)\n print(msg)\n\n filename = utils.WINNING_SCORES\n with open(filename, 'w') as fh:\n json.dump(to_store, fh, indent=2, sort_keys=True,\n separators=(',', ': '))\n msg = 'Created {}'.format(filename)\n print(msg)\n\n\nif __name__ == '__main__':\n main()\n", "id": "6103263", "language": "Python", "matching_score": 1.3933212757110596, "max_stars_count": 1, "path": "compute_all_winners.py" }, { "content": "import json\nimport logging\n\nfrom google.appengine.ext import deferred\nfrom google.appengine.ext import ndb\nimport webapp2\n\nfrom base_handler import BaseHandler\nfrom game_tree_classes import WinnerOf\nfrom main import BracketContainer\nfrom utils import all_possible_outcomes\nfrom utils import get_scenario\n\n\nwith open('team_map.json.data', 'r') as fh:\n TEAM_MAP = json.load(fh)\n\n\nclass AdminPage(BaseHandler):\n\n def get(self):\n self.render_response('admin.templ')\n\n\nclass SelectLast(BaseHandler):\n\n def handle_get(self, how_many):\n kwargs = {'incorrect_url': False, 'finished': False}\n\n last_how_many = BracketContainer.get_or_insert_n(how_many)\n if last_how_many.bracket is not None:\n kwargs['finished'] = True\n kwargs['message'] = 'Last %d already selected' % how_many\n else:\n # Need this to move on\n one_more = BracketContainer.get_or_insert_n(how_many + 1)\n if one_more.bracket is None:\n kwargs['not_possible'] = True\n kwargs['message'] = ('No Last %d Bracket to go '\n 'off of' % (how_many + 1))\n else:\n bracket = one_more.bracket\n undecided = get_undecided(bracket, range(120, 124))\n if len(undecided) + 3 != how_many:\n kwargs['not_possible'] = True\n kwargs['message'] = ('Last %d Bracket has wrong amount '\n 'undecided' % (how_many + 1))\n else:\n kwargs['how_many'] = how_many\n kwargs['matchups'] = get_matchups(bracket, undecided)\n self.render_response('select_last.templ', **kwargs)\n\n def get(self, how_many):\n how_many = how_many.strip()\n if how_many not in ('4', '5', '6', '7'):\n self.render_response('select_last.templ', incorrect_url=True)\n return\n self.handle_get(int(how_many))\n\n def handle_post(self, how_many):\n one_more = BracketContainer.get_or_insert_n(how_many + 1)\n if one_more.bracket is not None:\n bracket = one_more.bracket.copy()\n undecided = get_undecided(bracket, range(120, 124))\n selected = {}\n for game_slot in undecided:\n value = self.request.params.get(str(game_slot))\n if value != 'null':\n selected[str(game_slot)] = value\n if len(selected) == 1:\n slot_choices = map(int, selected.keys())\n validated = check_values(bracket, selected, slot_choices)\n for slot, team in validated.iteritems():\n logging.info((slot, TEAM_MAP[team.team_id]))\n bracket.reset_slot(slot, team)\n last_how_many = BracketContainer.get_or_insert_n(how_many)\n last_how_many.bracket = bracket\n last_how_many.put()\n run_it_all(last_how_many)\n self.redirect('/select-last-%d' % how_many)\n\n def post(self, how_many):\n how_many = how_many.strip()\n if how_many not in ('4', '5', '6', '7'):\n self.redirect('/')\n return\n self.handle_post(int(how_many))\n\n\nclass SelectElite8(BaseHandler):\n\n def get(self):\n kwargs = {'still_pending': True}\n elite_8 = BracketContainer.get_or_insert_n(8)\n if elite_8.bracket is not None:\n kwargs['still_pending'] = False\n else:\n sweet_16 = BracketContainer.get_or_insert_n(16)\n bracket = sweet_16.bracket\n if bracket is None:\n self.response.clear()\n self.response.set_status(500)\n return\n kwargs['matchups'] = get_matchups(bracket, range(112, 120))\n self.render_response('select_elite8.templ', **kwargs)\n\n def post(self):\n sweet_16 = BracketContainer.get_or_insert_n(16)\n if sweet_16.bracket is not None:\n bracket = sweet_16.bracket.copy()\n validated = check_values(bracket, self.request.params,\n range(112, 120))\n if validated is not None:\n for slot, team in validated.iteritems():\n logging.info((slot, TEAM_MAP[team.team_id]))\n bracket.reset_slot(slot, team)\n elite_8 = BracketContainer.get_or_insert_n(8)\n elite_8.bracket = bracket\n elite_8.put()\n run_it_all(elite_8)\n self.redirect('/select-elite-8')\n\n\ndef run_it_all(bracket_container, defer_now=True):\n if defer_now:\n deferred.defer(run_it_all, bracket_container, defer_now=False)\n return\n\n unfinished_game_slots = bracket_container.bracket\n all_outcomes = all_possible_outcomes(unfinished_game_slots)\n all_scenarios = [get_scenario(unfinished_game_slots, outcome)\n for outcome in all_outcomes]\n bracket_container.scenarios = ''.join(all_scenarios)\n bracket_container.put()\n\n\ndef check_values(bracket, request_params, slot_choices):\n slots = {}\n for game_slot in slot_choices:\n value = request_params.get(str(game_slot))\n\n winner_of = bracket.get_slot(game_slot)\n team1 = bracket.get_slot(winner_of.game_slot1)\n team2 = bracket.get_slot(winner_of.game_slot2)\n if team1.team_id == value:\n slots[game_slot] = team1\n elif team2.team_id == value:\n slots[game_slot] = team2\n else:\n return None\n return slots\n\n\ndef get_matchups(bracket, slot_choices):\n matchups = []\n for game_slot in slot_choices:\n winner_of = bracket.get_slot(game_slot)\n team1 = bracket.get_slot(winner_of.game_slot1)\n team2 = bracket.get_slot(winner_of.game_slot2)\n team1_name = TEAM_MAP[team1.team_id]\n team2_name = TEAM_MAP[team2.team_id]\n matchup = (game_slot,\n team1.team_id, team1_name,\n team2.team_id, team2_name)\n matchups.append(matchup)\n return matchups\n\n\ndef get_undecided(bracket, slot_choices):\n undecided = []\n for game_slot in slot_choices:\n potential = bracket.get_slot(game_slot)\n if isinstance(potential, WinnerOf):\n undecided.append(game_slot)\n return undecided\n\n\nroutes = [\n ('/admin', AdminPage),\n ('/select-elite-8', SelectElite8),\n ('/select-last-(.*)', SelectLast),\n]\napp = webapp2.WSGIApplication(routes, debug=True)\n", "id": "12453895", "language": "Python", "matching_score": 4.116584300994873, "max_stars_count": 1, "path": "application/admin.py" }, { "content": "import json\n\nfrom google.appengine.ext import ndb\nimport webapp2\n\nfrom base_handler import BaseHandler\n\n\nKEY_MAP = {\n 16: 'sweet16',\n 8: 'elite8',\n 7: 'last7',\n 6: 'last6',\n 5: 'last5',\n 4: 'final4',\n}\nUSER_PATHS = '|'.join(\n [value for value in KEY_MAP.itervalues() if value != KEY_MAP[16]])\nUSER_PATHS_ROUTE = '/(%s)' % (USER_PATHS,)\n\n\n\nclass BracketContainer(ndb.Model):\n year = ndb.IntegerProperty(default=2017, indexed=False)\n bracket = ndb.PickleProperty()\n scenarios = ndb.StringProperty(indexed=False)\n\n @classmethod\n def get_or_insert_n(cls, n):\n return cls.get_or_insert(KEY_MAP[n])\n\n\nclass MainPage(BaseHandler):\n\n def get(self):\n self.render_response('main.templ')\n\n\nclass ShowUserData(BaseHandler):\n\n def get(self, key):\n self.response.headers['Content-Type'] = 'text/plain'\n bracket_container = BracketContainer.get_or_insert(key)\n if bracket_container.scenarios is not None:\n self.response.write(bracket_container.scenarios)\n else:\n self.response.write('Not ready yet')\n\n\nclass RedirectHandler(webapp2.RequestHandler):\n\n def get(self):\n self.redirect('/')\n\n\nroutes = [\n (USER_PATHS_ROUTE, ShowUserData),\n ('/', MainPage),\n ('/.*', RedirectHandler),\n]\napp = webapp2.WSGIApplication(routes, debug=True)\n", "id": "6076636", "language": "Python", "matching_score": 1.9126421213150024, "max_stars_count": 1, "path": "application/main.py" }, { "content": "from __future__ import print_function\n\nimport json\nimport pickle\n\nfrom google.appengine.ext import ndb\n\n\nclass PotentialBracket(ndb.Model):\n year = ndb.IntegerProperty(default=2017, indexed=False)\n highest_names = ndb.StringProperty(repeated=True, indexed=False)\n highest_scores = ndb.IntegerProperty(repeated=True, indexed=False)\n lowest_names = ndb.StringProperty(repeated=True, indexed=False)\n lowest_scores = ndb.IntegerProperty(repeated=True, indexed=False)\n\n\nwith open('winning_scores.json', 'r') as fh:\n ALL_OUTCOMES = json.load(fh)\nwith open('bracket_links.json', 'r') as fh:\n BRACKETS = json.load(fh)\nENTRY_TO_NAME = {str(val): key for key, val in BRACKETS.items()}\nPAGE_SIZE = 100\n\n\ndef to_entity(key, outcomes):\n lowest, highest = outcomes[key]\n lowest_scores = [pair[0] for pair in lowest]\n lowest_names = [ENTRY_TO_NAME[pair[1]] for pair in lowest]\n highest_scores = [pair[0] for pair in highest]\n highest_names = [ENTRY_TO_NAME[pair[1]] for pair in highest]\n\n return PotentialBracket(\n id=key, lowest_scores=lowest_scores, lowest_names=lowest_names,\n highest_scores=highest_scores, highest_names=highest_names)\n\n\ndef store_entities():\n # Page 100 items at a time.\n base_index = 0\n\n # Use sorted so it is deterministic.\n keys = sorted(ALL_OUTCOMES.keys())\n num_keys = len(keys)\n while base_index < num_keys:\n max_index = min(num_keys, base_index + PAGE_SIZE)\n entities = [to_entity(keys[i], ALL_OUTCOMES)\n for i in xrange(base_index, max_index)]\n ndb.put_multi(entities)\n base_index += PAGE_SIZE\n msg = 'Completed {}'.format(base_index)\n print(msg)\n\n\ndef store_sweet16():\n class BracketContainer(ndb.Model):\n year = ndb.IntegerProperty(default=2017, indexed=False)\n bracket = ndb.PickleProperty()\n\n with open('complete_bracket_sweet_16.pkl', 'r') as fh:\n pickled_obj = pickle.load(fh)\n\n entity = BracketContainer(id='sweet16', bracket=pickled_obj)\n ndb.put_multi([entity])\n\n\ndef main():\n store_entities()\n store_sweet16()\n", "id": "1879149", "language": "Python", "matching_score": 4.899638652801514, "max_stars_count": 1, "path": "application/load_dev_datastore.py" }, { "content": "from __future__ import print_function\n\nimport json\n\nfrom google.cloud import datastore\n\nimport local_settings\nimport utils\n\n\nwith open(utils.WINNING_SCORES, 'r') as fh:\n ALL_OUTCOMES = json.load(fh)\nwith open(utils.BRACKET_LINKS_FILE, 'r') as fh:\n BRACKETS = json.load(fh)\nENTRY_TO_NAME = {str(val): key for key, val in BRACKETS.items()}\nPAGE_SIZE = 500\n\n\ndef to_entity(key, client, outcomes):\n lowest, highest = outcomes[key]\n lowest_scores = [pair[0] for pair in lowest]\n lowest_names = [ENTRY_TO_NAME[pair[1]] for pair in lowest]\n highest_scores = [pair[0] for pair in highest]\n highest_names = [ENTRY_TO_NAME[pair[1]] for pair in highest]\n\n ds_key = client.key('PotentialBracket', key)\n entity = datastore.Entity(ds_key)\n entity['year'] = 2017\n entity['lowest_scores'] = lowest_scores\n entity['lowest_names'] = lowest_names\n entity['highest_scores'] = highest_scores\n entity['highest_names'] = highest_names\n\n return entity\n\n\ndef store_entities():\n client = datastore.Client(project=local_settings.DATASET_ID)\n\n # Page ``PAGE_SIZE`` items at a time.\n base_index = 0\n\n # Use sorted so it is deterministic.\n keys = sorted(ALL_OUTCOMES.keys())\n num_keys = len(keys)\n while base_index < num_keys:\n max_index = min(num_keys, base_index + PAGE_SIZE)\n entities = [to_entity(keys[i], client, ALL_OUTCOMES)\n for i in xrange(base_index, max_index)]\n client.put_multi(entities)\n base_index += PAGE_SIZE\n msg = 'Completed {}'.format(base_index)\n print(msg)\n\n\nif __name__ == '__main__':\n store_entities()\n", "id": "9166371", "language": "Python", "matching_score": 2.3706023693084717, "max_stars_count": 1, "path": "write_winners_to_datastore.py" }, { "content": "from google.cloud import datastore\n\nimport local_settings\nimport utils\n\n\ndef main():\n client = datastore.Client(project=local_settings.DATASET_ID)\n with open(utils.SWEET16_PICKLE, 'rb') as fh:\n pickle_contents = fh.read()\n\n key = client.key('BracketContainer', 'sweet16')\n entity = datastore.Entity(key, exclude_from_indexes=('bracket',))\n entity['bracket'] = pickle_contents\n entity['year'] = 2017\n client.put(entity)\n\n\nif __name__ == '__main__':\n main()\n", "id": "8051402", "language": "Python", "matching_score": 1.8686466217041016, "max_stars_count": 1, "path": "write_sweet16_to_datastore.py" }, { "content": "import os\n\nimport local_settings\n\n\nBRACKET_LINKS_FILE = os.path.join(\n local_settings.YEAR, 'bracket_links.json')\nTEAM_MAP_FILENAME = os.path.join(\n local_settings.YEAR, 'team_map.json')\nBASE_BRACKET_PICKLE = os.path.join(\n local_settings.YEAR, 'base_bracket.pkl')\nSWEET16_PICKLE = os.path.join(\n local_settings.YEAR, 'complete_bracket_sweet_16.pkl')\nREDUCED_SCENARIOS = os.path.join(\n local_settings.YEAR, 'reduced_completed_scenarios.json')\nREDUCED_FILLED_OUT = os.path.join(\n local_settings.YEAR, 'reduced_all_filled_out.json')\nWINNING_SCORES = os.path.join(\n local_settings.YEAR, 'winning_scores.json')\n\n\ndef prepare_directory(dirname):\n if not os.path.isdir(dirname):\n msg = 'Creating {}'.format(dirname)\n print(msg)\n os.mkdir(dirname)\n else:\n msg = 'Already exists: {}'.format(dirname)\n print(msg)\n", "id": "5116764", "language": "Python", "matching_score": 0.0065586743876338005, "max_stars_count": 1, "path": "utils.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport errno\nimport socket\nimport threading\n\nimport tcp_h2_describe._buffer\nimport tcp_h2_describe._describe\nimport tcp_h2_describe._display\nimport tcp_h2_describe._proxy_protocol\n\n\ndef redirect_socket(recv_socket, send_socket, description, is_client):\n \"\"\"Redirect a TCP stream from one socket to another.\n\n This only redirects in **one** direction, i.e. it RECVs from\n ``recv_socket`` and SENDs to ``send_socket``.\n\n Args:\n recv_socket (socket.socket): The socket that will be RECV-ed from.\n send_socket (socket.socket): The socket that will be SENT to.\n description (str): A description of the RECV->SEND relationship for\n this socket pair.\n is_client (bool): Indicates if the ``recv_socket`` is a client socket.\n For a client socket, the connection **may** begin with a proxy\n protocol line and **should** begin with the client connection\n preface.\n \"\"\"\n expect_preface = False\n proxy_line = None\n if is_client:\n expect_preface = True\n proxy_line = tcp_h2_describe._proxy_protocol.consume_proxy_line(\n recv_socket, send_socket\n )\n\n tcp_chunk = tcp_h2_describe._buffer.recv(recv_socket, send_socket)\n while tcp_chunk != b\"\":\n # Describe the chunk that was just encountered\n message = tcp_h2_describe._describe.describe(\n tcp_chunk, description, expect_preface, proxy_line\n )\n tcp_h2_describe._display.display(message)\n # After the first usage, make sure ``expect_preface`` and\n # ``proxy_line`` are not set.\n expect_preface = False\n proxy_line = None\n\n tcp_h2_describe._buffer.send(send_socket, tcp_chunk)\n # Read the next chunk from the socket.\n tcp_chunk = tcp_h2_describe._buffer.recv(recv_socket, send_socket)\n\n tcp_h2_describe._display.display(\n f\"Done redirecting socket for {description}\"\n )\n recv_socket.close()\n\n\ndef connect_socket_pair(client_socket, client_addr, server_host, server_port):\n \"\"\"Connect two socket pairs for bidirectional RECV<->SEND.\n\n Since calls to RECV (both on the client and the server sockets) can block,\n this will spawn two threads that simultaneously read (via RECV) from one\n socket and write (via SEND) into the other socket.\n\n Args:\n client_socket (socket.socket): An already open socket from a client\n that has made a request directly to a running ``tcp-h2-describe``\n proxy.\n client_addr (str): The address of the client socket; used for printing\n information about the connection. Note that\n ``client_socket.getsockname()`` could be used directly to recover\n this information.\n server_host (str): The host name where the \"server\" process is running\n (i.e. the server that is being proxied).\n server_port (int): A port number for a running \"server\" process.\n \"\"\"\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # See: https://docs.python.org/3/library/socket.html#timeouts-and-the-accept-method\n server_socket.setblocking(0)\n indicator = server_socket.connect_ex((server_host, server_port))\n if indicator not in (0, errno.EINPROGRESS):\n err_name = errno.errorcode.get(indicator, \"UNKNOWN\")\n raise BlockingIOError(indicator, f\"Error: {err_name}\")\n\n server_addr = f\"{server_host}:{server_port}\"\n read_description = f\"client({client_addr})->proxy->server({server_addr})\"\n t_read = threading.Thread(\n target=redirect_socket,\n args=(client_socket, server_socket, read_description, True),\n )\n write_description = f\"server({server_addr})->proxy->client({client_addr})\"\n t_write = threading.Thread(\n target=redirect_socket,\n args=(server_socket, client_socket, write_description, False),\n )\n\n t_read.start()\n t_write.start()\n\n t_read.join()\n t_write.join()\n", "id": "4259540", "language": "Python", "matching_score": 5.496540069580078, "max_stars_count": 0, "path": "src/tcp_h2_describe/_connect.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport errno\nimport socket\nimport threading\nimport time\n\nimport _buffer\nimport _display\n\n\ndef _maybe_log_line(log_queue, tcp_chunk, client_socket, server_socket):\n \"\"\"Sent a log line to the log queue, if set.\n\n Args:\n log_queue (Optional[queue.Queue]): The queue where log lines will be\n pushed, or :data:`None`.\n tcp_chunk (bytes): Chunk of data that was proxied.\n client_socket (socket.socket): The client socket.\n server_socket (socket.socket): The server socket.\n \"\"\"\n if log_queue is None:\n return\n\n log_queue.put((time.time_ns(), tcp_chunk, client_socket, server_socket))\n\n\ndef redirect_socket(recv_socket, send_socket, description, log_queue):\n \"\"\"Redirect a TCP stream from one socket to another.\n\n This only redirects in **one** direction, i.e. it RECVs from\n ``recv_socket`` and SENDs to ``send_socket``.\n\n Args:\n recv_socket (socket.socket): The socket that will be RECV-ed from.\n send_socket (socket.socket): The socket that will be SENT to.\n description (str): A description of the RECV->SEND relationship for\n this socket pair.\n log_queue (Optional[queue.Queue]): The queue where log lines will be\n pushed, or :data:`None`.\n \"\"\"\n tcp_chunk = _buffer.recv(recv_socket, send_socket)\n while tcp_chunk != b\"\":\n _maybe_log_line(log_queue, tcp_chunk, recv_socket, send_socket)\n\n _buffer.send(send_socket, tcp_chunk)\n # Read the next chunk from the socket.\n tcp_chunk = _buffer.recv(recv_socket, send_socket)\n\n _display.display(f\"Done redirecting socket for {description}\")\n recv_socket.close()\n\n\ndef connect_socket_pair(\n log_queue, client_socket, client_addr, server_host, server_port\n):\n \"\"\"Connect two socket pairs for bidirectional RECV<->SEND.\n\n Since calls to RECV (both on the client and the server sockets) can block,\n this will spawn two threads that simultaneously read (via RECV) from one\n socket and write (via SEND) into the other socket.\n\n Args:\n log_queue (queue.Queue): The queue where log lines will be pushed.\n client_socket (socket.socket): An already open socket from a client\n that has made a request directly to a running\n ``tcp-replay-reverse-proxy`` proxy.\n client_addr (str): The address of the client socket; used for printing\n information about the connection. Note that\n ``client_socket.getsockname()`` could be used directly to recover\n this information.\n server_host (str): The host name where the \"server\" process is running\n (i.e. the server that is being proxied).\n server_port (int): A port number for a running \"server\" process.\n \"\"\"\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # See: https://docs.python.org/3/library/socket.html#timeouts-and-the-accept-method\n server_socket.setblocking(0)\n indicator = server_socket.connect_ex((server_host, server_port))\n if indicator not in (0, errno.EINPROGRESS):\n err_name = errno.errorcode.get(indicator, \"UNKNOWN\")\n raise BlockingIOError(indicator, f\"Error: {err_name}\")\n\n server_addr = f\"{server_host}:{server_port}\"\n read_description = f\"client({client_addr})->proxy->server({server_addr})\"\n # Only log the lines sent **to** the server.\n t_read = threading.Thread(\n target=redirect_socket,\n args=(client_socket, server_socket, read_description, log_queue),\n )\n write_description = f\"server({server_addr})->proxy->client({client_addr})\"\n t_write = threading.Thread(\n target=redirect_socket,\n args=(server_socket, client_socket, write_description, None),\n )\n\n t_read.start()\n t_write.start()\n\n t_read.join()\n t_write.join()\n", "id": "1101456", "language": "Python", "matching_score": 3.7216978073120117, "max_stars_count": 0, "path": "server/_connect.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport queue\nimport select\nimport socket\nimport threading\nimport time\n\nimport _connect\nimport _display\nimport _keepalive\nimport _save_replay_log\n\n\nPROXY_HOST = \"0.0.0.0\"\nBACKLOG = 5\nKEEP_ALIVE_INTERVAL = 180 # 3 minutes, in seconds\n# Number of log lines to write before puts to queue will block\nQUEUE_BUFFER = 256\n\n\ndef accept(non_blocking_socket):\n \"\"\"Accept a connection on a non-blocking socket.\n\n Since the socket is non-blocking, a **blocking** call to\n ``select.select()`` is used to wait until the socket is ready.\n\n Args:\n non_blocking_socket (socket.socket): A socket that will block to accept\n a connection.\n\n Returns:\n Tuple[socket.socket, str]: A pair of:\n * The socket of the client connection that was accepted\n * The address (IP and port) of the client socket\n\n Raises:\n ValueError: If ``non_blocking_socket`` is not readable after\n ``select.select()`` returns.\n \"\"\"\n readable, _, _ = select.select([non_blocking_socket], [], [])\n if readable != [non_blocking_socket]:\n raise ValueError(\"Socket not ready to accept connections\")\n\n client_socket, (ip_addr, port) = non_blocking_socket.accept()\n # See: https://docs.python.org/3/library/socket.html#timeouts-and-the-accept-method\n client_socket.setblocking(0)\n # Turn on KEEPALIVE for the connection.\n _keepalive.set_keepalive(client_socket, KEEP_ALIVE_INTERVAL)\n\n client_addr = f\"{ip_addr}:{port}\"\n return client_socket, client_addr\n\n\ndef _serve_proxy(all_threads, log_queue, proxy_port, server_host, server_port):\n \"\"\"Serve the proxy.\n\n This is a \"happy path\" implementation for ``serve_proxy`` that doesn't\n worry about interrupt handling (e.g. ``KeyboardInterrupt``).\n\n Args:\n all_threads (List[threading.Thread]): A list of threads to append to.\n We utilize the fact that `list.append()` is thread-safe in Python.\n log_queue (queue.Queue): The queue where log lines will be pushed.\n proxy_port (int): A legal port number that the caller has permissions\n to bind to.\n server_host (str): The host name where the server process is\n running (i.e. the server that is being proxied).\n server_port (int): A port number for a running \"server\" process.\n \"\"\"\n proxy_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n proxy_socket.setblocking(0)\n proxy_socket.bind((PROXY_HOST, proxy_port))\n proxy_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n proxy_socket.listen(BACKLOG)\n _display.display(\n \"Starting tcp-replay-reverse-proxy proxy server on port \"\n f\"{proxy_port}\\n Proxying server located at \"\n f\"{server_host}:{server_port}\"\n )\n\n while True:\n client_socket, client_addr = accept(proxy_socket)\n _display.display(f\"Accepted connection from {client_addr}\")\n # NOTE: Nothing actually `.join()`-s this thread.\n t_handle = threading.Thread(\n target=_connect.connect_socket_pair,\n args=(\n log_queue,\n client_socket,\n client_addr,\n server_host,\n server_port,\n ),\n )\n t_handle.start()\n all_threads.append(t_handle)\n\n\ndef serve_proxy(*, proxy_port, server_host, server_port, replay_log):\n \"\"\"Serve the proxy.\n\n This should run as a top-level server and CLI invocations of\n ``tcp-replay-reverse-proxy`` will directly invoke it.\n\n Args:\n proxy_port (int): A legal port number that the caller has permissions\n to bind to.\n server_host (Optional[str]): The host name where the server process is\n running (i.e. the server that is being proxied).\n server_port (int): A port number for a running \"server\" process.\n replay_log (pathlib.Path): The file where the replay log will be\n written.\n \"\"\"\n # TODO: Limit the size of the thread pool.\n # e.g. see\n # https://github.com/dhermes/tcp-h2-describe/blob/73c135b37550858c322b7b67c84333381afd3c69/src/tcp_h2_describe/_serve.py#L105\n done_event = threading.Event()\n log_queue = queue.Queue(maxsize=QUEUE_BUFFER)\n save_log_thread = threading.Thread(\n target=_save_replay_log.save_log_worker,\n args=(replay_log, log_queue, done_event),\n )\n save_log_thread.start()\n all_threads = [save_log_thread]\n\n try:\n _serve_proxy(\n all_threads, log_queue, proxy_port, server_host, server_port\n )\n except KeyboardInterrupt:\n _display.display(\n \"Stopping tcp-replay-reverse-proxy proxy server \"\n f\"on port {proxy_port}\"\n )\n _display.display(\"Waiting for request handlers to complete...\")\n done_event.set()\n # TODO: Add thread shutdown (possibly via a `threading.Event`) instead\n # of just waiting for each socket to be closed.\n for t_handle in all_threads:\n t_handle.join()\n", "id": "2468175", "language": "Python", "matching_score": 5.304820537567139, "max_stars_count": 0, "path": "server/_serve.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport select\nimport socket\nimport threading\nimport time\n\nimport tcp_h2_describe._connect\nimport tcp_h2_describe._display\nimport tcp_h2_describe._keepalive\n\n\nPROXY_HOST = \"0.0.0.0\"\nDEFAULT_SERVER_HOST = \"localhost\"\nBACKLOG = 5\nKEEP_ALIVE_INTERVAL = 180 # 3 minutes, in seconds\n\n\ndef accept(non_blocking_socket):\n \"\"\"Accept a connection on a non-blocking socket.\n\n Since the socket is non-blocking, a **blocking** call to\n ``select.select()`` is used to wait until the socket is ready.\n\n Args:\n non_blocking_socket (socket.socket): A socket that will block to accept\n a connection.\n\n Returns:\n Tuple[socket.socket, str]: A pair of:\n * The socket of the client connection that was accepted\n * The address (IP and port) of the client socket\n\n Raises:\n ValueError: If ``non_blocking_socket`` is not readable after\n ``select.select()`` returns.\n \"\"\"\n readable, _, _ = select.select([non_blocking_socket], [], [])\n if readable != [non_blocking_socket]:\n raise ValueError(\"Socket not ready to accept connections\")\n\n client_socket, (ip_addr, port) = non_blocking_socket.accept()\n # See: https://docs.python.org/3/library/socket.html#timeouts-and-the-accept-method\n client_socket.setblocking(0)\n # Turn on KEEPALIVE for the connection.\n tcp_h2_describe._keepalive.set_keepalive(\n client_socket, KEEP_ALIVE_INTERVAL\n )\n\n client_addr = f\"{ip_addr}:{port}\"\n return client_socket, client_addr\n\n\ndef _serve_proxy(proxy_port, server_port, server_host, update_threads):\n \"\"\"Serve the proxy.\n\n This is a \"happy path\" implementation for ``serve_proxy`` that doesn't\n worry about interrupt handling (e.g. ``KeyboardInterrupt``).\n\n Args:\n proxy_port (int): A legal port number that the caller has permissions\n to bind to.\n server_port (int): A port number for a running \"server\" process.\n server_host (str): The host name where the server process is\n running (i.e. the server that is being proxied). Defaults to\n ``localhost``.\n update_threads (Callable[[threading.Thread], None]): A callable that\n takes a single thread and does not return. Used to track state\n of the request handling threads by external caller.\n \"\"\"\n proxy_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n proxy_socket.setblocking(0)\n proxy_socket.bind((PROXY_HOST, proxy_port))\n proxy_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n proxy_socket.listen(BACKLOG)\n tcp_h2_describe._display.display(\n f\"Starting tcp-h2-describe proxy server on port {proxy_port}\\n\"\n f\" Proxying server located at {server_host}:{server_port}\"\n )\n\n while True:\n client_socket, client_addr = accept(proxy_socket)\n tcp_h2_describe._display.display(\n f\"Accepted connection from {client_addr}\"\n )\n # NOTE: Nothing actually `.join()`-s this thread.\n t_handle = threading.Thread(\n target=tcp_h2_describe._connect.connect_socket_pair,\n args=(client_socket, client_addr, server_host, server_port),\n )\n t_handle.start()\n update_threads(t_handle)\n\n\nclass UpdateThreads:\n \"\"\"Closure around list of currently active threads.\n\n An instance of this class is expected to be used by ``_serve_proxy()``\n as the ``update_threads`` argument.\n\n Args:\n prune_interval (Optional[float]): Time (in seconds) between \"pruning\"\n active request threads. The pruning only occurs when ``__call__``\n is invoked, which is typically immediately after ACCEPT-ing a new\n request.\n \"\"\"\n\n def __init__(self, prune_interval=15.0):\n self.prune_interval = prune_interval\n self.active_threads = []\n self.last_prune = time.monotonic()\n\n def __call__(self, t_handle):\n \"\"\"Call this instance with a single thread.\n\n Args:\n t_handle (threading.Thread): A newly created thread that is\n handling a request.\n \"\"\"\n self.active_threads.append(t_handle)\n # Check if we should prune the active threads.\n now = time.monotonic()\n if now - self.last_prune > self.prune_interval:\n self.prune_inactive()\n self.last_prune = now\n\n def prune_inactive(self):\n \"\"\"Prune number of active request handling threads.\n\n This updates ``self.active_threads`` in place.\n \"\"\"\n self.active_threads = [\n t_handle for t_handle in self.active_threads if t_handle.is_alive()\n ]\n\n def wait_all(self):\n \"\"\"Wait until all active threads have completed.\"\"\"\n for t_handle in self.active_threads:\n t_handle.join()\n\n\ndef serve_proxy(proxy_port, server_port, server_host=DEFAULT_SERVER_HOST):\n \"\"\"Serve the proxy.\n\n This should run as a top-level server and CLI invocations of\n ``tcp-h2-describe`` will directly invoke it.\n\n Args:\n proxy_port (int): A legal port number that the caller has permissions\n to bind to.\n server_port (int): A port number for a running \"server\" process.\n server_host (Optional[str]): The host name where the server process is\n running (i.e. the server that is being proxied). Defaults to\n ``localhost``.\n \"\"\"\n update_threads = UpdateThreads()\n try:\n _serve_proxy(proxy_port, server_port, server_host, update_threads)\n except KeyboardInterrupt:\n tcp_h2_describe._display.display(\n f\"Stopping tcp-h2-describe proxy server on port {proxy_port}\"\n )\n tcp_h2_describe._display.display(\n \"Waiting for request handlers to complete...\"\n )\n update_threads.wait_all()\n", "id": "4387542", "language": "Python", "matching_score": 1.9480408430099487, "max_stars_count": 0, "path": "src/tcp_h2_describe/_serve.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport socket\nimport sys\n\n\nIS_MACOS = sys.platform == \"darwin\"\nIS_LINUX = sys.platform in (\"linux\", \"linux2\")\n# See: https://github.com/golang/go/blob/go1.12.9/src/syscall/zerrors_darwin_amd64.go#L1017\nMACOS_TCP_KEEPALIVE = 0x10\n\n\ndef increase_sockopt(socket_, level, option, value):\n \"\"\"Increase a socket option to ``value``.\n\n If the currently set value for that ``option`` equals or exceeds ``value``,\n this will do nothing.\n\n Args:\n level (int): The protocol level where the option should be set.\n option (int): The option to set for the protocol level.\n value (int): The value to set for the socket option.\n\n Returns:\n bool: Indicating if the value was set.\n \"\"\"\n current_value = socket_.getsockopt(level, option)\n if current_value >= value:\n return False\n\n socket_.setsockopt(level, option, value)\n return True\n\n\ndef set_keepalive(socket_, seconds):\n \"\"\"Set keepalive options on a TCP socket.\n\n Some helpful resources:\n\n https://stackoverflow.com/a/14855726\n https://github.com/golang/go/blob/go1.12.9/src/net/tcpsockopt_unix.go#L19-L22\n https://github.com/golang/go/blob/go1.12.9/src/net/sockopt_posix.go#L116-L120\n https://github.com/golang/go/blob/go1.12.9/src/net/tcpsockopt_darwin.go#L19-L22\n\n Note that ``increase_sockopt()`` is used here rather than directly calling\n ``setsockopt``. This way the keepalive interval **exceeds** ``seconds``,\n this won't downgrade the window. For example, the ``TCP_KEEPIDLE`` (on\n Linux) / ``TCP_KEEPALIVE`` (on macOS) interval is typically 2 hours by\n default so calling ``setsockopt`` may actually decrease this interval. On\n the other hand the ``TCP_KEEPINTVL`` value (on Linux) is 75 seconds, so\n it may be worthwhile to increase this.\n\n Args:\n socket_ (socket.socket): The socket to have keepalive set.\n seconds (int): The number of seconds to use for keepalive.\n\n Raises:\n NotImplementedError: If the current platform is not macOS or Linux.\n \"\"\"\n if IS_LINUX:\n socket_.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n increase_sockopt(\n socket_, socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, seconds\n )\n increase_sockopt(\n socket_, socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, seconds\n )\n return\n\n if IS_MACOS:\n socket_.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n increase_sockopt(\n socket_, socket.IPPROTO_TCP, MACOS_TCP_KEEPALIVE, seconds\n )\n return\n\n raise NotImplementedError(f\"Unsupported platform {sys.platform}\")\n", "id": "3170152", "language": "Python", "matching_score": 1.2729398012161255, "max_stars_count": 0, "path": "src/tcp_h2_describe/_keepalive.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport select\nimport socket\n\nimport tcp_h2_describe._buffer\nimport tcp_h2_describe._display\n\n\nPROXY_PREFIX = b\"PROXY\"\n\n\ndef verify_inet_protocol(inet_protocol):\n \"\"\"Verify the ``INET_PROTOCOL`` from a proxy protocol line.\n\n Args:\n inet_protocol (bytes): The segment from the proxy protocol line.\n\n Returns:\n socket.AddressFamily: The address family enum associated with the\n protocol.\n\n Raises:\n ValueError: If ``inet_protocol`` does not match ``TPC{4,6}``.\n \"\"\"\n if inet_protocol == b\"TCP4\":\n return socket.AF_INET\n if inet_protocol == b\"TCP6\":\n return socket.AF_INET6\n\n raise ValueError(f\"Unhandled protocol type: {inet_protocol}\")\n\n\ndef verify_ip(ip_, address_family):\n \"\"\"Verify that a string is an IP in a given family (IPv4 or IPv6).\n\n Args:\n ip_ (bytes): The IP address as a bytestring.\n address_family (socket.AddressFamily): The expected address family of\n the IP address,\n\n Returns:\n str: The parsed IP address as a string.\n\n Raises:\n ValueError: If ``ip_`` is not valid for ``address_family``.\n \"\"\"\n try:\n ip_ascii = ip_.decode(\"ascii\")\n except:\n ip_ascii = \"\"\n\n try:\n socket.inet_pton(address_family, ip_ascii)\n except OSError:\n raise ValueError(\n f\"Invalid IP {ip_} for address family {address_family}\"\n )\n\n return ip_ascii\n\n\ndef verify_port(port_str):\n \"\"\"Verify that a bytestring is a valid port.\n\n Args:\n port_str (bytes): The port as a bytestring.\n\n Returns:\n int: The parsed port.\n\n Raises:\n ValueError: If ``port_str`` is not a valid port.\n \"\"\"\n try:\n port = int(port_str.decode(\"ascii\"))\n except:\n port = -1\n\n if not 0 < port < 0x10000:\n raise ValueError(f\"Invalid port: {port_str}\")\n\n return port\n\n\ndef read_next_byte(recv_socket, send_socket):\n \"\"\"Read the next byte (via RECV) from an open socket.\n\n This assumes ``recv_socket`` is non-blocking, so first waits until the\n socket is ready via ``select.select()``.\n\n Args:\n recv_socket (socket.socket): A socket to RECV from.\n send_socket (socket.socket): A socket connected (on the \"other end\") to\n ``recv_socket``.\n\n Returns:\n bytes: The byte that was RECV-ed.\n\n Raises:\n RuntimeError: If ``wait_readable`` returns :data:`None`; this\n indicates that the ``send_socket`` is closed.\n RuntimeError: If the RECV returns an empty bytestring, indicating the\n TCP stream has no more data.\n \"\"\"\n recv_socket = tcp_h2_describe._buffer.wait_readable(\n recv_socket, send_socket\n )\n if recv_socket is None:\n raise RuntimeError(\"Trying to read next byte on a closed connection.\")\n\n next_byte = recv_socket.recv(1)\n if len(next_byte) != 1:\n raise RuntimeError(\n \"Trying to read next byte on stream with no more data.\"\n )\n\n return next_byte\n\n\ndef consume_proxy_line(recv_socket, send_socket):\n \"\"\"(Maybe) consume the proxy protocol (first) line of TCP packet data.\n\n .. proxy protocol: https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-proxy-protocol.html\n\n This assumes ``recv_socket`` is non-blocking. Uses ``MSG_PEEK`` to check\n if the first five bytes are ``PROXY`` and exits early if not. After\n confirming the first line **is** a `proxy protocol`_ line, this reads until\n a newline byte is reached, then verifies each of the (six) parts in the\n header line.\n\n Args:\n recv_socket (socket.socket): A socket to RECV from.\n send_socket (socket.socket): A socket connected (on the \"other end\") to\n ``recv_socket``.\n\n Returns:\n Optional[bytes]: The proxy protocol line (including the CRLF) if the\n first line of TCP packet data begins with ``PROXY ...``, otherwise\n :data:`None`.\n\n Raises:\n RuntimeError: If ``wait_readable`` returns :data:`None`; this\n indicates that the ``send_socket`` is closed.\n ValueError: If the character immediately preceding the newline is not\n a carriage return (lines from TCP packet data are CRLF delimited).\n ValueError: If the proxy protocol line does not have 6\n (space-delimited) parts.\n \"\"\"\n recv_socket = tcp_h2_describe._buffer.wait_readable(\n recv_socket, send_socket\n )\n if recv_socket is None:\n raise RuntimeError(\"Socket not readable when checking for proxy line\")\n\n prefix = recv_socket.recv(len(PROXY_PREFIX), socket.MSG_PEEK)\n if prefix != PROXY_PREFIX:\n return None\n\n read_bytes = []\n next_byte = read_next_byte(recv_socket, send_socket)\n while next_byte != b\"\\n\":\n read_bytes.append(next_byte)\n next_byte = read_next_byte(recv_socket, send_socket)\n\n if read_bytes[-1] != b\"\\r\":\n raise ValueError(\"Expected first line to end in CRLF\")\n\n proxy_protocol_line = b\"\".join(read_bytes[:-1])\n proxy_parts = proxy_protocol_line.split(b\" \")\n\n if len(proxy_parts) != 6:\n raise ValueError(\n f\"Invalid header line {proxy_protocol_line}; \"\n f\"has {len(proxy_parts)} parts\"\n )\n\n _, inet_protocol, client_ip, proxy_ip, client_port, proxy_port = (\n proxy_parts\n )\n address_family = verify_inet_protocol(inet_protocol)\n client_ip = verify_ip(client_ip, address_family)\n proxy_ip = verify_ip(proxy_ip, address_family)\n client_port = verify_port(client_port)\n proxy_port = verify_port(proxy_port)\n\n return proxy_protocol_line + b\"\\r\\n\"\n", "id": "4771506", "language": "Python", "matching_score": 2.579525947570801, "max_stars_count": 0, "path": "src/tcp_h2_describe/_proxy_protocol.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport select\n\n\nSELECT_TIMEOUT = 0.05\n\n\ndef is_closed(socket_):\n \"\"\"Determine if a socket is closed.\n\n This uses the associated file descriptor as a proxy for \"closed\".\n\n Args:\n socket_ (socket.socket): The socket to check.\n\n Returns:\n bool: Indicates if closed or open.\n \"\"\"\n return socket_.fileno() == -1\n\n\ndef wait_readable(recv_socket, send_socket):\n \"\"\"Wait until a non-blocking socket is readable.\n\n Args:\n recv_socket (socket.socket): A socket to RECV from.\n send_socket (socket.socket): A socket connected (on the \"other end\") to\n ``recv_socket``.\n\n Returns:\n Optional[socket.socket]: Either ``recv_socket`` if the connection is\n still open or :data:`None`.\n\n Raises:\n ValueError: If ``recv_socket`` is not readable after\n ``select.select()`` returns.\n \"\"\"\n while True:\n readable, _, _ = select.select([recv_socket], [], [], SELECT_TIMEOUT)\n if readable:\n break\n # If the \"other end\" of the socket is closed, ``recv_socket`` is done.\n if is_closed(send_socket):\n return None\n\n if readable != [recv_socket]:\n raise ValueError(\"Socket not ready to RECV\")\n\n return recv_socket\n\n\ndef recv(recv_socket, send_socket, buffer_size=0x10000):\n \"\"\"Call ``recv()`` on a socket; with some extra checks.\n\n This **assumes** ``recv_socket`` is non-blocking, so a **blocking** call to\n ``select.select()`` with a timoeut is used to wait until the socket is\n ready. Additionally, the ``send_socket`` is used to determine if the\n connection has been closed.\n\n .. note::\n\n Rather than using the negotiated connection (e.g.\n ``SETTINGS_MAX_FRAME_SIZE``) to set the buffer size, we just use a\n best guess of something \"large enough\" to make sure that each frame\n fits in this size.\n\n If a RECV returns a chunk from the TCP stream **equal** to the buffer\n size, we interpret this as an \"incomplete\" frame and throw a cowardly\n exception. A more robust implementation could try to make subsequent\n ``recv()`` calls to see if the chunk is an entire frame.\n\n Args:\n recv_socket (socket.socket): A socket to RECV from.\n send_socket (socket.socket): A socket connected (on the \"other end\") to\n ``recv_socket``.\n buffer_size (Optional[int]): The size of the read.\n\n Returns:\n bytes: The chunk that was read from the TCP stream.\n\n Raises:\n RuntimeError: If the TCP chunk returned is \"full size\" (i.e. has\n ``buffer_size`` bytes).\n \"\"\"\n recv_socket = wait_readable(recv_socket, send_socket)\n if recv_socket is None:\n # Indicates the \"other end\" of the socket is closed, so we\n # simulate an empty RECV.\n return b\"\"\n\n tcp_chunk = recv_socket.recv(buffer_size)\n if len(tcp_chunk) == buffer_size:\n raise RuntimeError(\n \"TCP RECV() may not have captured entire message frame\"\n )\n\n return tcp_chunk\n\n\ndef send(send_socket, tcp_chunk):\n \"\"\"Call ``send()`` on a socket; with some extra checks.\n\n .. note::\n\n This throws a cowardly exception if the entire ``tcp_chunk`` cannot be\n sent in a single SND. A more robust implementation could try to make\n subsequent ``send()`` calls or just use ``sendall()``.\n\n Args:\n send_socket (socket.socket): A socket to SEND to.\n tcp_chunk (bytes): A chunk to send to the socket.\n\n Raises:\n RuntimeError: If SEND returns a length other than the size of\n ``tcp_chunk``.\n \"\"\"\n bytes_sent = send_socket.send(tcp_chunk)\n if bytes_sent != len(tcp_chunk):\n raise RuntimeError(\"Not all bytes were sent\")\n", "id": "9252123", "language": "Python", "matching_score": 1.308380365371704, "max_stars_count": 0, "path": "server/_buffer.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nProvides an implementation for a pluggable capturing reverse proxy server.\n\nThe primary goal of the reverse proxy server is to shuttle TCP packets from\na downstream client to an upstream server. However, the intent is to capture\neach TCP packet with accompanying metadata about the timestamp and an\nidentifier for the socket. Packets (with metadata) will be sent to a\nchannel; consumer(s) of this channel will persist the packets without\nblocking the proxied connection.\n\nIt is designed to be pluggable so that\n- Each new connection **to** the proxy server can be wrapped so that things\n like parsing PROXY protocol headers or unwrapping the content of a TLS\n connection (by terminating TLS directly)\n- Each new connection **to** the upstream server can be wrapped as well,\n so that TCP packets can be written into a raw TCP socket, a TLS socket or\n any other abstraction\n- The consumer of the channel with TCP packets can be customized to write\n to disk, send packets over the network, etc.\n- Minimally invasive metrics and tracing can be added as needed\n\"\"\"\n", "id": "9576536", "language": "Python", "matching_score": 0.19490791857242584, "max_stars_count": 0, "path": "server/__init__.py" }, { "content": "# If you have not yet seen the source in basic_with_auth/main.py,\n# please take a look.\n\n# In this sample we expand on authenticated insertion of a single entity\n# by showing how to insert a collection of entities at once while\n# requiring that the user is authenticated.\n\nimport endpoints\n\nfrom google.appengine.ext import ndb\nfrom protorpc import remote\n\nfrom endpoints_proto_datastore.ndb import EndpointsModel\n\n\nclass MyModel(EndpointsModel):\n attr1 = ndb.StringProperty()\n attr2 = ndb.StringProperty()\n created = ndb.DateTimeProperty(auto_now_add=True)\n\n\n@endpoints.api(name='myapi', version='v1', description='My Little API')\nclass MyApi(remote.Service):\n\n # In standard usage, there is no default way to use request_fields\n # to turn a method that accepts an EndpointsModel subtype into one\n # that accepts a collection of items of the same subtype.\n\n # However, EndpointsModel.method accepts an alternate keyword argument\n # which allows this: request_message.\n\n # By specifying request_message (and/or response_message), the protorpc\n # definition of the request can be directly provided to the method.\n\n # In order to get the protorpc definition for a collection, we use the\n # EndpointsModel.ProtoCollection utility which is used to form the\n # responses in EndpointsModel.query_method.\n @MyModel.method(request_message=MyModel.ProtoCollection(),\n response_message=MyModel.ProtoCollection(),\n user_required=True,\n path='mymodel_multi',\n name='mymodel.insert_multi')\n def MyModelMultiInsert(self, my_model_collection):\n # Convert the RPC messages into the corresponding ndb entities.\n # This is necessary because using request_fields makes the request\n # object a raw ProtoRPC message of the given type.\n entities = [MyModel.FromMessage(item_msg)\n for item_msg in my_model_collection.items]\n # Efficiently write the entities to datastore.\n ndb.put_multi(entities)\n # Since the response type is hardcoded as a ProtoRPC message type, the\n # ProtoRPC message must be directly returned. This is different than\n # the typical flow using response_fields since the current method\n # doesn't know how to convert the response from a native ndb.Model\n # into a protorpc message.\n response_items = [entity.ToMessage() for entity in entities]\n response_collection = MyModel.ProtoCollection()(items=response_items)\n return response_collection\n\n\napplication = endpoints.api_server([MyApi], restricted=False)\n", "id": "1682082", "language": "Python", "matching_score": 5.00639533996582, "max_stars_count": 91, "path": "examples/multi_put/main.py" }, { "content": "# If you have not yet seen the source in basic/main.py, please take a look.\n\n# In this sample we override the ProtoRPC message schema of MyModel in both the\n# request and response of MyModelInsert and in the response of MyModelList.\n\n# This is used to randomly set the value of attr2 based on attr1.\nimport random\n\nimport endpoints\n\nfrom google.appengine.ext import ndb\nfrom protorpc import remote\n\nfrom endpoints_proto_datastore.ndb import EndpointsModel\n\n\n# These are used as extra phrases to randomly add to the value of attr1 when\n# setting attr2.\nPHRASES = ['I', 'AM', 'RANDOM', 'AND', 'ARBITRARY']\n\n\nclass MyModel(EndpointsModel):\n attr1 = ndb.StringProperty()\n attr2 = ndb.StringProperty()\n created = ndb.DateTimeProperty(auto_now_add=True)\n\n\n@endpoints.api(name='myapi', version='v1', description='My Little API')\nclass MyApi(remote.Service):\n\n # In addition to the arguments used in the MyModel.method decorator in\n # basic/main.py, we also use request_fields and response_fields to override\n # the schema of the ProtoRPC request message and response message,\n # respectively.\n\n # Since request_fields is ('attr1',), instead of the three string fields\n # attr1, attr2 and created, the request message schema will contain a single\n # string field corresponding to the NDB property attr1. Similarly, since\n # response_fields is ('created',), the response message schema will contain a\n # single string field corresponding to the NDB property created.\n @MyModel.method(request_fields=('attr1',),\n response_fields=('created',),\n path='mymodel',\n http_method='POST',\n name='mymodel.insert')\n def MyModelInsert(self, my_model):\n # We use a random value from PHRASES to set attr2 in terms of attr1. Since\n # the request message can only contain a value for attr1, we need to also\n # provide a value for attr2.\n my_model.attr2 = '%s-%s' % (my_model.attr1, random.choice(PHRASES))\n # As in basic/main.py, since created is auto_now_add, the entity gets a new\n # value for created and an ID after being persisted.\n my_model.put()\n return my_model\n\n # As above, in addition to the arguments used in the MyModel.query_method\n # decorator in basic/main.py, we also use collection_fields to override\n # the schema of the ProtoRPC messages that are listed in the \"items\" fields\n # of the query response. As in basic/main.py, there are no query arguments.\n # Since collection_fields is ('attr2', 'created'), each value in the \"items\"\n # list will contain the two string fields corresponding to the NDB properties\n # attr2 and created.\n @MyModel.query_method(collection_fields=('attr2', 'created'),\n path='mymodels', name='mymodel.list')\n def MyModelList(self, query):\n # As in basic/main.py, no filters are applied.\n return query\n\n\napplication = endpoints.api_server([MyApi], restricted=False)\n", "id": "151689", "language": "Python", "matching_score": 5.320096015930176, "max_stars_count": 91, "path": "examples/custom_api_response_messages/main.py" }, { "content": "# If you have not yet seen the source in basic/main.py, please take a look.\n\n# In this sample we add an additional method MyModelGet which allows a specific\n# entity to be retrieved.\n\nimport endpoints\n\nfrom google.appengine.ext import ndb\nfrom protorpc import remote\n\nfrom endpoints_proto_datastore.ndb import EndpointsModel\n\n\n# In this model definition, we have included _message_fields_schema to define\n# a custom ProtoRPC message schema for this model. To see a similar but\n# different way to use custom fields, check out the samples in\n# custom_api_response_messages/main.py and paging/main.py.\nclass MyModel(EndpointsModel):\n # This results in a ProtoRPC message definition with four fields, in the exact\n # order specified here: id, attr1, attr2, and created.\n # The fields corresponding to properties (attr1, attr2 and created) are string\n # fields as in basic/main.py. The field \"id\" will be an integer field\n # representing the ID of the entity in the datastore. For example if\n # my_entity.key is equal to ndb.Key(MyModel, 1), the id is the integer 1.\n\n # The property \"id\" is one of five helper properties provided by default to\n # help you perform common operations like this (retrieving by ID). In addition\n # there is an \"entityKey\" property which provides a base64 encoded version of\n # a datastore key and can be used in a similar fashion as \"id\", and three\n # properties used for queries -- limit, order, pageToken -- which are\n # described in more detail in paging/main.py.\n _message_fields_schema = ('id', 'attr1', 'attr2', 'created')\n\n attr1 = ndb.StringProperty()\n attr2 = ndb.StringProperty()\n created = ndb.DateTimeProperty(auto_now_add=True)\n\n\n@endpoints.api(name='myapi', version='v1', description='My Little API')\nclass MyApi(remote.Service):\n\n @MyModel.method(path='mymodel', http_method='POST', name='mymodel.insert')\n def MyModelInsert(self, my_model):\n # Here, since the schema includes an ID, it is possible that the entity\n # my_model has an ID, hence we could be specifying a new ID in the datastore\n # or overwriting an existing entity. If no ID is included in the ProtoRPC\n # request, then no key will be set in the model and the ID will be set after\n # the put completes, as in basic/main.py.\n\n # In either case, the datastore ID from the entity will be returned in the\n # ProtoRPC response message.\n my_model.put()\n return my_model\n\n # This method is not defined in any of the previous examples: it allows an\n # entity to be retrieved from it's ID. As in\n # custom_api_response_messages/main.py, we override the schema of the ProtoRPC\n # request message to limit to a single field: \"id\". Since \"id\" is one of\n # the helper methods provided by EndpointsModel, we may use it as one of our\n # request_fields. In general, other than these five, only properties you\n # define are allowed.\n @MyModel.method(request_fields=('id',),\n path='mymodel/{id}', http_method='GET', name='mymodel.get')\n def MyModelGet(self, my_model):\n # Since the field \"id\" is included, when it is set from the ProtoRPC\n # message, the decorator attempts to retrieve the entity by its ID. If the\n # entity was retrieved, the boolean from_datastore on the entity will be\n # True, otherwise it will be False. In this case, if the entity we attempted\n # to retrieve was not found, we return an HTTP 404 Not Found.\n\n # For more details on the behavior of setting \"id\", see the sample\n # custom_alias_properties/main.py.\n if not my_model.from_datastore:\n raise endpoints.NotFoundException('MyModel not found.')\n return my_model\n\n # This is identical to the example in basic/main.py, however since the\n # ProtoRPC schema for the model now includes \"id\", all the values in \"items\"\n # will also contain an \"id\".\n @MyModel.query_method(path='mymodels', name='mymodel.list')\n def MyModelList(self, query):\n return query\n\n\napplication = endpoints.api_server([MyApi], restricted=False)\n", "id": "9085662", "language": "Python", "matching_score": 6.4862284660339355, "max_stars_count": 91, "path": "examples/simple_get/main.py" }, { "content": "# If you have not yet seen the source in basic/main.py, please take a look.\n\n# In this sample we modify the query parameters in the MyModelList method to\n# allow paging through results.\n\nimport endpoints\n\nfrom google.appengine.ext import ndb\nfrom protorpc import remote\n\nfrom endpoints_proto_datastore.ndb import EndpointsModel\n\n\nclass MyModel(EndpointsModel):\n attr1 = ndb.StringProperty()\n attr2 = ndb.StringProperty()\n created = ndb.DateTimeProperty(auto_now_add=True)\n\n\n@endpoints.api(name='myapi', version='v1', description='My Little API')\nclass MyApi(remote.Service):\n\n @MyModel.method(path='mymodel', http_method='POST', name='mymodel.insert')\n def MyModelInsert(self, my_model):\n my_model.put()\n return my_model\n\n # To add paging functionality, we set the keyword argument query_fields in the\n # MyModel.query_method decorator. By specifying the fields \"limit\", \"order\"\n # and \"pageToken\" as the query fields, we can accept values specializing the\n # query before retrieving results from the datastore. Though \"limit\", \"order\"\n # and \"pageToken\" are not defined as properties on MyModel, they are included\n # as helper properties by the base class EndpointsModel.\n\n # The three helper properties we use here perform the following\n\n # - limit: Allows a limit to be set for the number of results retrieved by a\n # query.\n\n # - order: This allows the result set to be ordered by properties. For\n # example, if the value of order is \"attr1\", results of the query\n # will be in ascending order, ordered by \"attr1\". Similarly, if the\n # value of order is \"-attr2\", the results of the query will be in\n # descending order, ordered by \"attr2\".\n\n # Even more complex orders can be created, such as \"attr1,-attr2\",\n # which will first order by attr1 and then within each value order by\n # attr2. However, such queries are not possible in the datastore if\n # no index has been built. See custom_alias_properties/main.py and\n # matching_queries_to_indexes/main.py for examples of how to deal\n # with complex queries.\n\n # - pageToken: This is used for paging within a result set. For example, if a\n # limit of 10 is set, but there are 12 results, then the ProtoRPC\n # response will have \"items\" with 10 values and a nextPageToken\n # which contains a string cursor for the query. By using this\n # value as pageToken in a subsequent query, the remaining 2\n # results can be retrieved and the ProtoRPC response will not\n # contain a nextPageToken since there are no more results.\n\n # For a bit more on the other helper properties provided by EndpointsModel,\n # see simple_get/main.py. To see how to define your own helper properties, see\n # custom_alias_properties/main.py, matching_queries_to_indexes/main.py and\n # keys_with_ancestors/main.py.\n\n # To see how query fields can be used to perform simple equality filters, see\n # property_filters/main.py.\n @MyModel.query_method(query_fields=('limit', 'order', 'pageToken'),\n path='mymodels', name='mymodel.list')\n def MyModelList(self, query):\n return query\n\n\napplication = endpoints.api_server([MyApi], restricted=False)\n", "id": "3519522", "language": "Python", "matching_score": 5.785694599151611, "max_stars_count": 91, "path": "examples/paging/main.py" }, { "content": "# If you have not yet seen the source in paging/main.py, please take a look.\n\n# In this sample we modify the query parameters in the MyModelList method to\n# allow querying with simple equality filters.\n\nimport endpoints\n\nfrom google.appengine.ext import ndb\nfrom protorpc import remote\n\nfrom endpoints_proto_datastore.ndb import EndpointsModel\n\n\nclass MyModel(EndpointsModel):\n attr1 = ndb.StringProperty()\n attr2 = ndb.StringProperty()\n created = ndb.DateTimeProperty(auto_now_add=True)\n\n\n@endpoints.api(name='myapi', version='v1', description='My Little API')\nclass MyApi(remote.Service):\n\n @MyModel.method(path='mymodel', http_method='POST', name='mymodel.insert')\n def MyModelInsert(self, my_model):\n my_model.put()\n return my_model\n\n # To add simple filters, we set the keyword argument query_fields in the\n # MyModel.query_method decorator. By specifying the fields \"attr1\" and \"attr2\"\n # as the query fields, we can filter for entities based on the values of the\n # NDB properties attr1 and/or attr2.\n\n # For example, a request /mymodels?attr1=cheese will return all entities with\n # attr1 equal to \"cheese\". The query parameters attr1 and attr2 can be used\n # individually, at the same time, or not at all.\n\n # An NDB property can only be used in query_fields to construct an equality\n # filter. For NDB properties which correspond to ProtoRPC message fields, such\n # as UserProperty or GeoPtProperty (see basic_with_auth/main.py), the values\n # of the property cannot be represented simply via /path?key=value. As a\n # result, such NDB properties are explicitly not allowed in query_fields and\n # if this is attempted a TypeError will be raised.\n @MyModel.query_method(query_fields=('attr1', 'attr2'),\n path='mymodels', name='mymodel.list')\n def MyModelList(self, query):\n return query\n\n\napplication = endpoints.api_server([MyApi], restricted=False)\n", "id": "11787831", "language": "Python", "matching_score": 5.754035949707031, "max_stars_count": 91, "path": "examples/property_filters/main.py" }, { "content": "import endpoints\n\nfrom google.appengine.ext import ndb\nfrom protorpc import remote\n\nfrom endpoints_proto_datastore.ndb import EndpointsModel\n\n\n# Transitioning an existing model is as easy as replacing ndb.Model with\n# EndpointsModel. Since EndpointsModel inherits from ndb.Model, you will have\n# the same behavior and more functionality.\nclass MyModel(EndpointsModel):\n # By default, the ProtoRPC message schema corresponding to this model will\n # have three string fields: attr1, attr2 and created\n # in an arbitrary order (the ordering of properties in a dictionary is not\n # guaranteed).\n attr1 = ndb.StringProperty()\n attr2 = ndb.StringProperty()\n created = ndb.DateTimeProperty(auto_now_add=True)\n\n\n# Use of this decorator is the same for APIs created with or without\n# endpoints-proto-datastore.\n@endpoints.api(name='myapi', version='v1', description='My Little API')\nclass MyApi(remote.Service):\n\n # Instead of the endpoints.method decorator, we can use MyModel.method to\n # define a new endpoints method. Instead of having to convert a\n # ProtoRPC request message into an entity of our model and back again, we\n # start out with a MyModel entity and simply have to return one.\n # Since no overrides for the schema are specified in this decorator, the\n # request and response ProtoRPC message definition will have the three string\n # fields attr1, attr2 and created.\n @MyModel.method(path='mymodel', http_method='POST', name='mymodel.insert')\n def MyModelInsert(self, my_model):\n # Though we don't actively change the model passed in, two things happen:\n # - The entity gets an ID and is persisted\n # - Since created is auto_now_add, the entity gets a new value for created\n my_model.put()\n return my_model\n\n # As MyModel.method replaces a ProtoRPC request message to an entity of our\n # model, MyModel.query_method replaces it with a query object for our model.\n # By default, this query will take no arguments (the ProtoRPC request message\n # is empty) and will return a response with two fields: items and\n # nextPageToken. \"nextPageToken\" is simply a string field for paging through\n # result sets. \"items\" is what is called a \"MessageField\", meaning its value\n # is a ProtoRPC message itself; it is also a repeated field, meaning we have\n # an array of values rather than a single value. The nested ProtoRPC message\n # in the definition of \"items\" uses the same schema in MyModel.method, so each\n # value in the \"items\" array will have the fields attr1, attr2 and created.\n # As with MyModel.method, overrides can be specified for both the schema of\n # the request that defines the query and the schema of the messages contained\n # in the \"items\" list. We'll see how to use these in further examples.\n @MyModel.query_method(path='mymodels', name='mymodel.list')\n def MyModelList(self, query):\n # We have no filters that we need to apply, so we just return the query\n # object as is. As we'll see in further examples, we can augment the query\n # using environment variables and other parts of the request state.\n return query\n\n\n# Use of endpoints.api_server is the same for APIs created with or without\n# endpoints-proto-datastore.\napplication = endpoints.api_server([MyApi], restricted=False)\n", "id": "11557323", "language": "Python", "matching_score": 3.566169023513794, "max_stars_count": 91, "path": "examples/basic/main.py" }, { "content": "# If you have not yet seen the source in matching_queries_to_indexes/main.py and\n# custom_alias_properties/main.py, please take a look.\n\n# In this sample we define an EndpointsAliasProperty which does not override\n# one of the helper properties provided by EndpointsModel; this is a first as\n# all the other samples have simply tweaked existing alias properties. We use\n# this property in conjuction with another alias property to define entity keys\n# which have an ancestor -- for example ndb.Key(MyParent, ..., MyModel, ...) --\n# which is slightly more complex than the keys we have seen so far.\n\n# We define an extra model MyParent to hold all the data for the ancestors being\n# used (though this is not strictly necessary, an ancestor key does not need to\n# exist in the datastore to be used). In addition, since we will be requiring\n# that a MyParent entity exists to be used as an ancestor, we provide a method\n# MyParentInsert to allow API users to create or update parent objects.\n\nimport endpoints\n\nfrom google.appengine.ext import ndb\nfrom protorpc import remote\n\n# See matching_queries_to_indexes/main.py for reference on this import.\nfrom endpoints_proto_datastore.ndb import EndpointsAliasProperty\nfrom endpoints_proto_datastore.ndb import EndpointsModel\n\n\nclass MyParent(EndpointsModel):\n # As in simple_get/main.py, by setting _message_fields_schema, we can set a\n # custom ProtoRPC message schema. We set the schema to the alias property\n # \"name\" and ignore the NDB property updated.\n _message_fields_schema = ('name',)\n\n updated = ndb.DateTimeProperty(auto_now=True)\n\n # This is a setter which will be used by the alias property \"name\".\n def NameSet(self, value):\n # The property \"name\" is a string field, so we expect a value passed in from\n # a ProtoRPC message to be a string. Since (as seen below), \"name\" is\n # required, we also need not worry about the case that the value is None.\n if not isinstance(value, basestring):\n raise TypeError('Name must be a string.')\n # We update the key using the name.\n self.UpdateFromKey(ndb.Key(MyParent, value))\n\n # This EndpointsAliasProperty is used for the property \"name\". It is required,\n # meaning that a value must always be set if the corresponding field is\n # contained in a ProtoRPC message schema.\n\n # Since no property_type is specified, the default value of\n # messages.StringField is used.\n\n # See matching_queries_to_indexes/main.py for more information on\n # EndpointsAliasProperty.\n @EndpointsAliasProperty(setter=NameSet, required=True)\n def name(self):\n # First check if the entity has a key.\n if self.key is not None:\n # If the entity has a key, return only the string_id since the property is\n # a string field.\n return self.key.string_id()\n\n\nclass MyModel(EndpointsModel):\n # These values are placeholders to be used when a key is created; the _parent\n # will be used as the ancestor and the _id as the ID. For example:\n # ndb.Key(MyParent, _parent, MyModel, _id)\n # Since these values will be set by alias properties which are not set\n # simultaneously, we need to hold them around until both are present before we\n # can create a key from them.\n _parent = None\n _id = None\n\n attr1 = ndb.StringProperty()\n attr2 = ndb.StringProperty()\n created = ndb.DateTimeProperty(auto_now_add=True)\n\n # This is a helper method that will set the key on the entity only if both the\n # parent and ID are present. It will be used by property setters that provide\n # values for _parent and _id.\n def SetKey(self):\n # Can only set the key if both the parent and the child ID are set.\n if self._parent is not None and self._id is not None:\n key = ndb.Key(MyParent, self._parent, MyModel, self._id)\n # Will set the key and attempt to update the entity if it exists.\n self.UpdateFromKey(key)\n\n # This is a helper method that will set the _parent and _id values using the\n # entity key, if it exists. It will be used by property getters that retrieve\n # the current values of _parent and _id.\n def SetParts(self):\n # If there is no key, nothing can be set.\n if self.key is not None:\n # If there are not two tuples in the key pairs, a ValueError will occur.\n parent_pair, id_pair = self.key.pairs()\n # Each pair in key pairs will be a tuple (model kind, value) where model\n # kind is a string representing the name of the model and value is the\n # actual string or integer ID that was set.\n self._parent = parent_pair[1]\n self._id = id_pair[1]\n\n # This is a setter which will be used by the alias property \"parent\". This\n # method will be called when parent is set from a ProtoRPC request.\n def ParentSet(self, value):\n # The property \"parent\" is a string field, so we expect a value passed in\n # from a ProtoRPC message to be a string. Since (as seen below), \"parent\" is\n # required, we also need not worry about the case that the value is None.\n if not isinstance(value, basestring):\n raise TypeError('Parent name must be a string.')\n\n self._parent = value\n # After setting the value, we must make sure the parent exists before it can\n # be used as an ancestor.\n if ndb.Key(MyParent, value).get() is None:\n # If the MyParent key does not correspond to an entity in the datastore,\n # we return an HTTP 404 Not Found.\n raise endpoints.NotFoundException('Parent %s does not exist.' % value)\n # The helper method SetKey is called to set the entity key if the _id has\n # also been set already.\n self.SetKey()\n\n # If the \"parent\" property is used in a query method, we want the ancestor\n # of the query to be the parent key.\n self._endpoints_query_info.ancestor = ndb.Key(MyParent, value)\n\n # This EndpointsAliasProperty is used to get and set a parent for our entity\n # key. It is required, meaning that a value must always be set if the\n # corresponding field is contained in a ProtoRPC message schema.\n\n # Since no property_type is specified, the default value of\n # messages.StringField is used.\n\n # See matching_queries_to_indexes/main.py for more information on\n # EndpointsAliasProperty.\n @EndpointsAliasProperty(setter=ParentSet, required=True)\n def parent(self):\n # If _parent has not already been set on the entity, try to set it.\n if self._parent is None:\n # Using the helper method SetParts, _parent will be set if a valid key has\n # been set on the entity.\n self.SetParts()\n return self._parent\n\n # This is a setter which will be used by the alias property \"id\". This\n # method will be called when id is set from a ProtoRPC request. This replaces\n # the helper property \"id\" provided by EndpointsModel, but does not use any of\n # the functionality from that method.\n def IdSet(self, value):\n # The property \"id\" is a string field, so we expect a value passed in from a\n # ProtoRPC message to be a string. Since (as seen below), \"id\" is required,\n # we also need not worry about the case that the value is None.\n if not isinstance(value, basestring):\n raise TypeError('ID must be a string.')\n\n self._id = value\n # The helper method SetKey is called to set the entity key if the _parent\n # has also been set already.\n self.SetKey()\n\n # This EndpointsAliasProperty is used to get and set an id value for our\n # entity key. It is required, meaning that a value must always be set if the\n # corresponding field is contained in a ProtoRPC message schema.\n\n # Since no property_type is specified, the default value of\n # messages.StringField is used.\n\n # See matching_queries_to_indexes/main.py for more information on\n # EndpointsAliasProperty.\n @EndpointsAliasProperty(setter=IdSet, required=True)\n def id(self):\n # If _id has not already been set on the entity, try to set it.\n if self._id is None:\n # Using the helper method SetParts, _id will be set if a valid key has\n # been set on the entity.\n self.SetParts()\n return self._id\n\n\n@endpoints.api(name='myapi', version='v1', description='My Little API')\nclass MyApi(remote.Service):\n\n # This method is not defined in any of the previous examples; it allows a\n # parent entity to be inserted so that it can be used as an ancestor. Since\n # the ProtoRPC message schema for MyParent is a single field \"name\", this will\n # be all that is contained in the request and the response.\n @MyParent.method(path='myparent', http_method='POST',\n name='myparent.insert')\n def MyParentInsert(self, my_parent):\n # Though we don't actively change the model passed in, the value of updated\n # is set to the current time. No check is performed to see if the MyParent\n # entity already exists, since the values other than the name (set in the\n # key) are not relevant.\n my_parent.put()\n return my_parent\n\n # Since we require MyModel instances also have a MyParent ancestor, we include\n # \"parent\" in the request path by setting path='mymodel/{parent}'. Since \"id\"\n # is also required, an \"id\" must be included in the request body or it will be\n # rejected by ProtoRPC before this method is called.\n @MyModel.method(path='mymodel/{parent}', http_method='POST',\n name='mymodel.insert')\n def MyModelInsert(self, my_model):\n # If the entity already exists (as evidenced by from_datastore equal to\n # True), an HTTP 400 Bad Request is returned. Since both \"parent\" and \"id\"\n # are required fields, both _parent and _id will be set on the entity and\n # MyModel.SetKey must have been called.\n\n # Checking in this fashion is not truly safe against duplicates. To do this,\n # a datastore transaction would be necessary.\n if my_model.from_datastore:\n raise endpoints.BadRequestException(\n 'MyModel %s with parent %s already exists.' %\n (my_model.id, my_model.parent))\n my_model.put()\n return my_model\n\n # To make sure queries have a specified ancestor, we use the alias property\n # \"parent\" which we defined on MyModel and specify query_fields equal to\n # ('parent',). To specify the parent in the query, it is included in the path\n # as it was in MyModelInsert. So no query parameters will be required, simply\n # a request to\n # .../mymodels/someparent\n # where ... is the full path to the API.\n @MyModel.query_method(query_fields=('parent',),\n path='mymodels/{parent}', name='mymodel.list')\n def MyModelList(self, query):\n return query\n\n\napplication = endpoints.api_server([MyApi], restricted=False)\n", "id": "10202404", "language": "Python", "matching_score": 8.835865020751953, "max_stars_count": 91, "path": "examples/keys_with_ancestors/main.py" }, { "content": "# If you have not yet seen the source in simple_get/main.py, please take a look.\n\n# In this sample, we override two of the helper properties provided by\n# EndpointsModel: id and order. The purpose of this sample is to understand\n# how these properties -- called alias properties -- are used. For more\n# reference on EndpointsAliasProperty, see matching_queries_to_indexes/main.py\n# and keys_with_ancestors/main.py.\n\nimport endpoints\n\nfrom google.appengine.ext import ndb\nfrom protorpc import remote\n\n# See matching_queries_to_indexes/main.py for reference on this import.\nfrom endpoints_proto_datastore.ndb import EndpointsAliasProperty\nfrom endpoints_proto_datastore.ndb import EndpointsModel\n\n\n# The helper property \"order\" provided by EndpointsModel has no default value,\n# but we can provide it with this one, which will result in ordering a query\n# first by attr1 and then attr2 in descending order. To ensure queries using\n# this order do not fail, we specify the equivalent index in index.yaml.\nDEFAULT_ORDER = 'attr1,-attr2'\n\n\nclass MyModel(EndpointsModel):\n # As in simple_get/main.py, by setting _message_fields_schema, we can set a\n # custom ProtoRPC message schema. We set the schema to the alias property\n # \"id\" -- which we override here -- and the three properties corresponding to\n # the NDB properties and exclude the fifth property, which is the alias\n # property \"order\".\n\n # The property \"order\" is excluded since we defined our own schema but would\n # have been included otherwise. We have observed that the helper property\n # \"order\" from EndpointsModel is not included in the ProtoRPC message schema\n # when _message_fields_schema is not present, but this case does not\n # contradict that fact. When \"order\" (or any of the other four helper\n # properties) is overridden, it is treated like any other NDB or alias\n # property and is included in the schema.\n _message_fields_schema = ('id', 'attr1', 'attr2', 'created')\n\n attr1 = ndb.StringProperty()\n attr2 = ndb.StringProperty()\n created = ndb.DateTimeProperty(auto_now_add=True)\n\n # This is a setter which will be used by the helper property \"id\", which we\n # are overriding here. The setter used for that helper property is also named\n # IdSet. This method will be called when id is set from a ProtoRPC query\n # request.\n def IdSet(self, value):\n # By default, the property \"id\" assumes the \"id\" will be an integer in a\n # simple key -- e.g. ndb.Key(MyModel, 10) -- which is the default behavior\n # if no key is set. Instead, we wish to use a string value as the \"id\" here,\n # so first check if the value being set is a string.\n if not isinstance(value, basestring):\n raise TypeError('ID must be a string.')\n # We call UpdateFromKey, which each of EndpointsModel.IdSet and\n # EndpointsModel.EntityKeySet use, to update the current entity using a\n # datastore key. This method sets the key on the current entity, attempts to\n # retrieve a corresponding entity from the datastore and then patch in any\n # missing values if an entity is found in the datastore.\n self.UpdateFromKey(ndb.Key(MyModel, value))\n\n # This EndpointsAliasProperty is our own helper property and overrides the\n # original \"id\". We specify the setter as the function IdSet which we just\n # defined. We also set required=True in the EndpointsAliasProperty decorator\n # to signal that an \"id\" must always have a value if it is included in a\n # ProtoRPC message schema.\n\n # Since no property_type is specified, the default value of\n # messages.StringField is used.\n\n # See matching_queries_to_indexes/main.py for more information on\n # EndpointsAliasProperty.\n @EndpointsAliasProperty(setter=IdSet, required=True)\n def id(self):\n # First check if the entity has a key.\n if self.key is not None:\n # If the entity has a key, return only the string_id. The method id()\n # would return any value, string, integer or otherwise, but we have a\n # specific type we wish to use for the entity \"id\" and that is string.\n return self.key.string_id()\n\n # This EndpointsAliasProperty only seeks to override the default value used by\n # the helper property \"order\". Both the original getter and setter are used;\n # the first by setter=EndpointsModel.OrderSet and the second by using super\n # to call the original getter. The argument default=DEFAULT_ORDER is used to\n # augment the EndpointsAliasProperty decorator by specifying a default value.\n # This value is used by the corresponding ProtoRPC field to set a value if\n # none is set by the request. Therefore, if a query has no order, rather than\n # a basic query, the order of DEFAULT_ORDER will be used.\n\n # Since no property_type is specified, the default value of\n # messages.StringField is used.\n @EndpointsAliasProperty(setter=EndpointsModel.OrderSet, default=DEFAULT_ORDER)\n def order(self):\n # Use getter from parent class.\n return super(MyModel, self).order\n\n\n@endpoints.api(name='myapi', version='v1', description='My Little API')\nclass MyApi(remote.Service):\n\n # Since \"id\" is required, we require that the request contain an \"id\" to be\n # set on the entity. Rather than being specified in the POST body, we ask that\n # the \"id\" be sent in the request by setting path='mymodel/{id}'. To insert\n # a new value with id equal to cheese we would submit a request to\n # .../mymodel/cheese\n # where ... is the full path to the API.\n @MyModel.method(path='mymodel/{id}', http_method='POST',\n name='mymodel.insert')\n def MyModelInsert(self, my_model):\n # If the API user is trying to insert an entity which already exists in the\n # datastore (as evidenced by from_datastore being True) then we return an\n # HTTP 400 Bad request saying the entity already exists. We only want users\n # to be able to insert new entities, not to overwrite existing ones.\n\n # See simple_get/main.py for more about from_datastore.\n if my_model.from_datastore:\n # We can use the entity name by retrieving the string_id, since we know\n # our overridden definition of \"id\" ensures the string_id is set.\n name = my_model.key.string_id()\n # We raise an exception which results in an HTTP 400.\n raise endpoints.BadRequestException(\n 'MyModel of name %s already exists.' % (name,))\n # If the entity does not already exist, insert it into the datastore. Since\n # the key is set when UpdateFromKey is called within IdSet, the \"id\" of the\n # inserted entity will be the value passed in from the request.\n my_model.put()\n return my_model\n\n # To use the helper property \"order\" that we defined, we specify query_fields\n # equal to ('order',) in the MyModel.query_method decorator. This will result\n # in a single string field in the ProtoRPC message schema. If no \"order\" is\n # specified in the query, the default value from the \"order\" property we\n # defined will be used instead.\n @MyModel.query_method(query_fields=('order',),\n path='mymodels', name='mymodel.list')\n def MyModelList(self, query):\n return query\n\n\napplication = endpoints.api_server([MyApi], restricted=False)\n", "id": "3698997", "language": "Python", "matching_score": 7.285131454467773, "max_stars_count": 91, "path": "examples/custom_alias_properties/main.py" }, { "content": "# If you have not yet seen the source in basic_with_auth/main.py and\n# paging/main.py, please take a look.\n\n# In this sample we use a custom Enum for the \"order\" property in queries\n# to strictly control the indexes used and make sure we have corresponding\n# indexes created in index.yaml.\n\nimport endpoints\n\nfrom google.appengine.ext import ndb\n# This import allows us to define our own Enum using the ProtoRPC messages\n# library. This is not usually needed, since EndpointsModel handles message\n# definition, but in this case it is.\nfrom protorpc import messages\nfrom protorpc import remote\n\n# We import EndpointsAliasProperty so that we can define our own helper property\n# similar to the properties \"id\", \"entityKey\", \"limit\", \"order\" and \"pageToken\"\n# provided by EndpointsModel.\nfrom endpoints_proto_datastore.ndb import EndpointsAliasProperty\nfrom endpoints_proto_datastore.ndb import EndpointsModel\n\n\n# This is an Enum used to strictly define which order values are allowed.\n# In this case, we are only allowing two query orders and have an enum value\n# corresponding to each.\nclass Order(messages.Enum):\n MYFIRST = 1\n MYSECOND = 2\n\n\nclass MyModel(EndpointsModel):\n # As in simple_get/main.py, by setting _message_fields_schema, we can set a\n # custom ProtoRPC message schema. We set the schema to the four properties\n # corresponding to the NDB properties and exclude the fifth property, which is\n # the alias property \"order\". Though the helper property \"order\" from\n # EndpointsModel is not included in the message schema, since we define our\n # own \"order\", this would be included if we did not define our own schema.\n _message_fields_schema = ('attr1', 'attr2', 'owner', 'created')\n\n # The properties attr1 and attr2 are required here so that all entities will\n # have values for performing queries.\n attr1 = ndb.StringProperty(required=True)\n attr2 = ndb.StringProperty(required=True)\n created = ndb.DateTimeProperty(auto_now_add=True)\n # As in basic_with_auth/main.py, an owner property is used and each entity\n # created will have the current user saved as the owner. As with attr1 and\n # attr2 above, we are also requiring the owner field so we can use it for\n # queries too.\n owner = ndb.UserProperty(required=True)\n\n # This is a setter which will be used by the helper property \"order\", which we\n # are overriding here. The setter used for that helper property is also named\n # OrderSet. This method will be called when order is set from a ProtoRPC\n # query request.\n def OrderSet(self, value):\n # Since we wish to control which queries are made, we only accept values\n # from our custom Enum type Order.\n if not isinstance(value, Order):\n raise TypeError('Expected an enum, received: %s.' % (value,))\n\n # For MYFIRST, we order by attr1.\n if value == Order.MYFIRST:\n # Use the method OrderSet from the parent class to set the string value\n # based on the enum.\n super(MyModel, self).OrderSet('attr1')\n # For MYSECOND, we order by attr2, but in descending order.\n elif value == Order.MYSECOND:\n # Use the method OrderSet from the parent class to set the string value\n # based on the enum.\n super(MyModel, self).OrderSet('-attr2')\n # For either case, the order used here will be combined with an equality\n # filter based on the current user, and we have the corresponding indexes\n # specified in index.yaml so no index errors are experienced by our users.\n\n # If the value is not a valid Enum value, raise a TypeError. This should\n # never occur since value is known to be an instance of Order.\n else:\n raise TypeError('Unexpected value of Order: %s.' % (value,))\n\n # This EndpointsAliasProperty is our own helper property and overrides the\n # original \"order\". We specify the setter as the function OrderSet which we\n # just defined. The property_type is the class Order and the default value of\n # the alias property is MYFIRST.\n\n # Endpoints alias properties must have a corresponding property type, which\n # can be either a ProtoRPC field or a ProtoRPC message class or enum class.\n # Here, by providing a property type of Order, we aid in the creation of a\n # field corresponding to this property in a ProtoRPC message schema.\n\n # The EndpointsAliasProperty can be used as a decorator as is done here, or\n # can be used in the same way NDB properties are, e.g.\n # attr1 = ndb.StringProperty()\n # and the similar\n # order = EndpointsAliasProperty(OrderGet, setter=OrderSet, ...)\n # where OrderGet would be the function defined here.\n @EndpointsAliasProperty(setter=OrderSet, property_type=Order,\n default=Order.MYFIRST)\n def order(self):\n # We only need to limit the values to Order enums, so we can use the getter\n # from the helper property with no changes.\n return super(MyModel, self).order\n\n\n\n# Since we are using auth, we want to test with the Google APIs Explorer:\n# https://developers.google.com/apis-explorer/\n# By default, if allowed_client_ids is not specified, this is enabled by\n# default. If you specify allowed_client_ids, you'll need to include\n# endpoints.API_EXPLORER_CLIENT_ID in this list. This is necessary for auth\n# tokens obtained by the API Explorer (on behalf of users) to be considered\n# valid by our API.\n@endpoints.api(name='myapi', version='v1', description='My Little API')\nclass MyApi(remote.Service):\n\n # We use specify that request_fields is ('attr1', 'attr2') because the\n # created value is set when the entity is put to the datastore and the owner\n # is set from the current user. As in basic_with_auth, since user_required is\n # set to True, the current user will always be valid.\n\n # Since no response_fields are set, the four fields from\n # _message_fields_schema will be sent in the response.\n @MyModel.method(request_fields=('attr1', 'attr2'),\n user_required=True,\n path='mymodel', http_method='POST', name='mymodel.insert')\n def MyModelInsert(self, my_model):\n my_model.owner = endpoints.get_current_user()\n my_model.put()\n return my_model\n\n # As in paging/main.py, we use the fields limit, order and pageToken for\n # paging, but here \"order\" is the Enum-based property we defined above. As\n # mentioned in the definition of OrderSet, these order values are coupled with\n # the filter for current user.\n\n # Since no collection_fields are set, each value in \"items\" in the response\n # will use the four fields from _message_fields_schema.\n @MyModel.query_method(query_fields=('limit', 'order', 'pageToken'),\n user_required=True,\n path='mymodels', name='mymodel.list')\n def MyModelList(self, query):\n # Current user is valid since user_required is set to True.\n return query.filter(MyModel.owner == endpoints.get_current_user())\n\n\napplication = endpoints.api_server([MyApi], restricted=False)\n", "id": "12760763", "language": "Python", "matching_score": 6.133053779602051, "max_stars_count": 91, "path": "examples/matching_queries_to_indexes/main.py" }, { "content": "# If you have not yet seen the source in basic/main.py, please take a look.\n\nimport endpoints\n\nfrom google.appengine.ext import ndb\nfrom protorpc import remote\n\nfrom endpoints_proto_datastore.ndb import EndpointsModel\n\n\n# In this model definition, we have added an extra field \"owner\" to the model\n# defined in basic/main.py. Since using auth, we will save the current user and\n# query by the current user, so saving a user property on each entity will allow\n# us to do this.\nclass MyModel(EndpointsModel):\n # By default, the ProtoRPC message schema corresponding to this model will\n # have four fields: attr1, attr2, created and owner\n # in an arbitrary order (the ordering of properties in a dictionary is not\n # guaranteed).\n attr1 = ndb.StringProperty()\n attr2 = ndb.StringProperty()\n created = ndb.DateTimeProperty(auto_now_add=True)\n # The three properties above are represented by string fields, but the\n # UserProperty below is represented in the ProtoRPC message schema as a\n # message field -- a field whose value is itself a message. To hold a user\n # property, a custom ProtoRPC message class is defined in\n # endpoints_proto_datastore.utils and is used to convert to and from the NDB\n # property and the corresponding ProtoRPC field.\n owner = ndb.UserProperty()\n\n\n# Since we are using auth, we want to test with the Google APIs Explorer:\n# https://developers.google.com/apis-explorer/\n# By default, if allowed_client_ids is not specified, this is enabled by\n# default. If you specify allowed_client_ids, you'll need to include\n# endpoints.API_EXPLORER_CLIENT_ID in this list. This is necessary for auth\n# tokens obtained by the API Explorer (on behalf of users) to be considered\n# valid by our API.\n@endpoints.api(name='myapi', version='v1', description='My Little API')\nclass MyApi(remote.Service):\n\n # To specify that this method requires authentication, we can simply set the\n # keyword argument user_required to True in the MyModel.method decorator. The\n # remaining arguments to the decorator are the same as in basic/main.py. Once\n # user_required is set, the method will first determine if a user has been\n # detected from the token sent with the request (if any was sent it all) and\n # will return an HTTP 401 Unauthorized if no valid user is detected. In the\n # case of a 401, the method will not be executed. Conversely, if method\n # execution occurs, user_required=True will guarantee that the current user is\n # valid.\n @MyModel.method(user_required=True,\n path='mymodel', http_method='POST', name='mymodel.insert')\n def MyModelInsert(self, my_model):\n # Since user_required is True, we know endpoints.get_current_user will\n # return a valid user.\n my_model.owner = endpoints.get_current_user()\n # Also note, since we don't override the default ProtoRPC message schema,\n # API users can send an owner object in the request, but we overwrite the\n # model property with the current user before the entity is inserted into\n # the datastore and this put operation will only occur if a valid token\n # identifying the user was sent in the Authorization header.\n my_model.put()\n return my_model\n\n # As above with MyModelInsert, we add user_required=True to the arguments\n # passed to the MyModel.query_method decorator in basic/main.py. Therefore,\n # only queries can be made by a valid user.\n @MyModel.query_method(user_required=True,\n path='mymodels', name='mymodel.list')\n def MyModelList(self, query):\n # We only allow users to query the MyModel entities that they have created,\n # so query using owner equal to the current user. Since user_required is\n # set, we know get_current_user will return a valid user.\n return query.filter(MyModel.owner == endpoints.get_current_user())\n\n\napplication = endpoints.api_server([MyApi], restricted=False)\n", "id": "10082190", "language": "Python", "matching_score": 2.7585296630859375, "max_stars_count": 91, "path": "examples/basic_with_auth/main.py" }, { "content": "# Copyright 2012 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom protorpc import remote\n\nimport endpoints\n\nfrom models import Board\nfrom models import Score\n\n\n@endpoints.api(name='tictactoe', version='v1',\n description='Tic Tac Toe API',\n allowed_client_ids=['YOUR-CLIENT-ID',\n endpoints.API_EXPLORER_CLIENT_ID])\nclass TicTacToeApi(remote.Service):\n\n @Board.method(path='board', http_method='POST',\n name='board.getmove')\n def BoardGetMove(self, board):\n if not (len(board.state) == 9 and set(board.state) <= set('OX-')):\n raise endpoints.BadRequestException('Invalid board.')\n board.MoveOpponent()\n return board\n\n @Score.method(request_fields=('id',),\n path='scores/{id}', http_method='GET',\n name='scores.get')\n def ScoresGet(self, score):\n if not score.from_datastore:\n raise endpoints.NotFoundException('Score not found.')\n\n if score.player != endpoints.get_current_user():\n raise endpoints.ForbiddenException(\n 'You do not have access to this score.')\n\n return score\n\n @Score.method(request_fields=('outcome',),\n path='scores', http_method='POST',\n name='scores.insert')\n def ScoresInsert(self, score):\n score.put() # score.player already set since EndpointsUserProperty\n return score\n\n @Score.query_method(query_fields=('limit', 'order', 'pageToken'),\n user_required=True,\n path='scores', name='scores.list')\n def ScoresList(self, query):\n return query.filter(Score.player == endpoints.get_current_user())\n", "id": "11799281", "language": "Python", "matching_score": 2.9591896533966064, "max_stars_count": 91, "path": "examples/tictactoe/tictactoe_api.py" }, { "content": "# Copyright 2012 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nimport re\n\nfrom google.appengine.ext import ndb\nfrom protorpc import messages\n\nfrom endpoints_proto_datastore.ndb import EndpointsAliasProperty\nfrom endpoints_proto_datastore.ndb import EndpointsModel\nfrom endpoints_proto_datastore.ndb import EndpointsUserProperty\n\n\nclass Board(EndpointsModel):\n state = ndb.StringProperty(required=True)\n\n def MoveOpponent(self):\n free_indices = [match.start() for match in re.finditer('-', self.state)]\n random_index = random.choice(free_indices)\n result = list(self.state) # Need a mutable object\n result[random_index] = 'O'\n self.state = ''.join(result)\n\n\nclass Order(messages.Enum):\n WHEN = 1\n TEXT = 2\n\n\nclass Score(EndpointsModel):\n _message_fields_schema = ('id', 'outcome', 'played', 'player')\n\n outcome = ndb.StringProperty(required=True)\n played = ndb.DateTimeProperty(auto_now_add=True)\n player = EndpointsUserProperty(required=True, raise_unauthorized=True)\n\n def OrderSet(self, value):\n if not isinstance(value, Order):\n raise TypeError('Expected an enum, received: %s.' % (value,))\n\n if value == Order.WHEN:\n super(Score, self).OrderSet('-played')\n elif value == Order.TEXT:\n super(Score, self).OrderSet('outcome')\n else:\n raise TypeError('Unexpected value of Order: %s.' % (value,))\n\n @EndpointsAliasProperty(setter=OrderSet, property_type=Order,\n default=Order.WHEN)\n def order(self):\n return super(Score, self).order\n", "id": "11469472", "language": "Python", "matching_score": 0.7595418691635132, "max_stars_count": 91, "path": "examples/tictactoe/models.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport copy\nimport json\nimport operator\nimport pathlib\nimport uuid\n\nimport numpy as np\n\n\nHERE = pathlib.Path(__file__).resolve().parent\nOPCODES = {\n 1: (\"ADD\", 3),\n 2: (\"MULTIPLY\", 3),\n 3: (\"INPUT\", 1),\n 4: (\"OUTPUT\", 1),\n 5: (\"JUMP-IF-TRUE\", 2),\n 6: (\"JUMP-IF-FALSE\", 2),\n 7: (\"LESS-THAN\", 3),\n 8: (\"EQUALS\", 3),\n 9: (\"ADJUST_BASE\", 1),\n 99: (\"HALT\", 0),\n}\nPOSITION_MODE = \"0\"\nIMMEDIATE_MODE = \"1\"\nRELATIVE_MODE = \"2\"\nALL_MODES = set(\"012\")\nNO_JUMP_JUMP_INDEX = uuid.uuid4()\nTERMINAL_JUMP_INDEX = uuid.uuid4()\nTILE_DEFAULT = -1\nTILE_BLOCK = 2\nTILE_PADDLE = 3\nTILE_BALL = 4\nTILE_IDS = {\n 0: \" \", # \"EMPTY\"\n 1: \"#\", # \"WALL\"\n TILE_BLOCK: \"B\", # \"BLOCK\"\n TILE_PADDLE: \"X\", # \"PADDLE\"\n TILE_BALL: \"o\", # \"BALL\"\n}\nJOYSTICK_NEUTRAL = 0\nJOYSTICK_LEFT = -1\nJOYSTICK_RIGHT = 1\nNUM_QUARTERS = 2\n\n\nclass AdjustBase:\n def __init__(self, value):\n self.value = value\n\n\ndef less_than_binary_op(value1, value2):\n if value1 < value2:\n to_store = 1\n else:\n to_store = 0\n\n return to_store\n\n\ndef equal_binary_op(value1, value2):\n if value1 == value2:\n to_store = 1\n else:\n to_store = 0\n\n return to_store\n\n\ndef get_value(mode, param, relative_base, program):\n if mode == POSITION_MODE:\n index = param\n assert 0 <= index\n return program[index]\n\n if mode == IMMEDIATE_MODE:\n return param\n\n if mode == RELATIVE_MODE:\n index = relative_base + param\n assert 0 <= index\n return program[index]\n\n raise ValueError(\"Invalid mode\", mode)\n\n\ndef set_value(mode, param, to_store, relative_base, program):\n if mode == POSITION_MODE:\n index = param\n assert 0 <= index\n program[index] = to_store\n return\n\n if mode == RELATIVE_MODE:\n index = relative_base + param\n assert 0 <= index\n program[index] = to_store\n return\n\n raise ValueError(\"Invalid mode\", mode)\n\n\ndef _do_binary_op(modes, params, relative_base, program, fn):\n mode1, mode2, mode3 = modes\n param1, param2, param3 = params\n value1 = get_value(mode1, param1, relative_base, program)\n value2 = get_value(mode2, param2, relative_base, program)\n\n to_store = fn(value1, value2)\n set_value(mode3, param3, to_store, relative_base, program)\n\n return NO_JUMP_JUMP_INDEX\n\n\ndef do_add(modes, params, relative_base, program):\n return _do_binary_op(modes, params, relative_base, program, operator.add)\n\n\ndef do_multiply(modes, params, relative_base, program):\n return _do_binary_op(modes, params, relative_base, program, operator.mul)\n\n\ndef do_input(modes, params, relative_base, program, std_input):\n mode, = modes\n param, = params\n\n to_store = next(std_input)\n set_value(mode, param, to_store, relative_base, program)\n\n return NO_JUMP_JUMP_INDEX\n\n\ndef do_output(modes, params, relative_base, program, std_output):\n mode, = modes\n param, = params\n\n value = get_value(mode, param, relative_base, program)\n std_output.append(value)\n\n return NO_JUMP_JUMP_INDEX\n\n\ndef _do_jump_unary_predicate(modes, params, relative_base, program, fn):\n mode1, mode2 = modes\n param1, param2 = params\n\n value1 = get_value(mode1, param1, relative_base, program)\n value2 = get_value(mode2, param2, relative_base, program)\n\n if fn(value1):\n return value2\n\n return NO_JUMP_JUMP_INDEX\n\n\ndef do_jump_if_true(modes, params, relative_base, program):\n return _do_jump_unary_predicate(\n modes, params, relative_base, program, operator.truth\n )\n\n\ndef do_jump_if_false(modes, params, relative_base, program):\n return _do_jump_unary_predicate(\n modes, params, relative_base, program, operator.not_\n )\n\n\ndef do_less_than(modes, params, relative_base, program):\n return _do_binary_op(\n modes, params, relative_base, program, less_than_binary_op\n )\n\n\ndef do_equal(modes, params, relative_base, program):\n return _do_binary_op(\n modes, params, relative_base, program, equal_binary_op\n )\n\n\ndef do_adjust_base(modes, params, relative_base, program):\n mode, = modes\n param, = params\n\n value = get_value(mode, param, relative_base, program)\n return AdjustBase(value)\n\n\ndef do_halt():\n return TERMINAL_JUMP_INDEX\n\n\ndef next_instruction(index, program):\n assert 0 <= index\n op_code_with_extra = program[index]\n assert op_code_with_extra >= 0\n\n mode_as_int, op_code = divmod(op_code_with_extra, 100)\n instruction, num_params = OPCODES[op_code]\n next_index = index + 1 + num_params\n if num_params == 0:\n assert mode_as_int == 0\n return instruction, (), (), next_index\n\n mode_chars = str(mode_as_int).zfill(num_params)\n assert len(mode_chars) == num_params, (mode_chars, num_params)\n assert set(mode_chars) <= ALL_MODES\n modes = tuple(reversed(mode_chars))\n\n params = tuple(program[i] for i in range(index + 1, next_index))\n assert len(params) == num_params # No partial slice\n\n return instruction, modes, params, next_index\n\n\ndef execute_instruction(\n instruction, modes, params, relative_base, program, std_input, std_output\n):\n if instruction == \"ADD\":\n return do_add(modes, params, relative_base, program)\n\n if instruction == \"MULTIPLY\":\n return do_multiply(modes, params, relative_base, program)\n\n if instruction == \"INPUT\":\n return do_input(modes, params, relative_base, program, std_input)\n\n if instruction == \"OUTPUT\":\n return do_output(modes, params, relative_base, program, std_output)\n\n if instruction == \"JUMP-IF-TRUE\":\n return do_jump_if_true(modes, params, relative_base, program)\n\n if instruction == \"JUMP-IF-FALSE\":\n return do_jump_if_false(modes, params, relative_base, program)\n\n if instruction == \"LESS-THAN\":\n return do_less_than(modes, params, relative_base, program)\n\n if instruction == \"EQUALS\":\n return do_equal(modes, params, relative_base, program)\n\n if instruction == \"ADJUST_BASE\":\n return do_adjust_base(modes, params, relative_base, program)\n\n if instruction == \"HALT\":\n return do_halt()\n\n raise ValueError(\"Bad instruction\", instruction, modes, params, program)\n\n\ndef run_intcode(program, std_input, std_output):\n relative_base = 0\n running_program = copy.deepcopy(program)\n\n jump_index = NO_JUMP_JUMP_INDEX\n index = 0\n while jump_index != TERMINAL_JUMP_INDEX:\n instruction, modes, params, index = next_instruction(\n index, running_program\n )\n jump_index = execute_instruction(\n instruction,\n modes,\n params,\n relative_base,\n running_program,\n std_input,\n std_output,\n )\n if isinstance(jump_index, AdjustBase):\n relative_base += jump_index.value\n elif jump_index in (NO_JUMP_JUMP_INDEX, TERMINAL_JUMP_INDEX):\n # Nothing to do here, all good.\n pass\n elif jump_index >= 0:\n index = jump_index\n else:\n raise ValueError(\"Invalid jump index\", jump_index)\n\n return running_program\n\n\ndef print_board(board):\n for row in board.T:\n for tile in row:\n print(TILE_IDS[tile], end=\" \")\n print(\"\\n\", end=\"\")\n\n\nclass Arcade:\n def __init__(self, seed_moves, program):\n self.program = copy.deepcopy(program)\n self.program[0] = NUM_QUARTERS\n self.index = 0\n self.std_input = [value for value in seed_moves]\n self.std_output = []\n self.score = None\n self.board = None\n self.ball_location = None\n self.paddle_location = None\n self.trajectory = None\n self.retired_output = []\n\n def __iter__(self):\n return self\n\n def __next__(self):\n curr_index = self.index\n self.index = curr_index + 1\n if self.board is None:\n assert curr_index == 0\n num_tiles, remainder = divmod(len(self.std_output), 3)\n assert remainder == 0\n # Set the score.\n assert self.std_output[-3:] == [-1, 0, 0]\n self.score = 0\n # Determine the board size.\n x_values = self.std_output[:-3:3]\n assert min(x_values) == 0\n width_x = max(x_values) + 1\n y_values = self.std_output[1:-3:3]\n assert min(y_values) == 0\n width_y = max(y_values) + 1\n # Populate the board.\n self.board = TILE_DEFAULT * np.ones((width_x, width_y), dtype=int)\n for i in range(num_tiles - 1): # Ignore last triple (-1, 0, 1)\n x, y, tile = self.std_output[3 * i : 3 * i + 3]\n assert 0 <= x < width_x\n assert 0 <= y < width_y\n assert tile in TILE_IDS\n if self.board[x, y] != TILE_DEFAULT:\n raise ValueError(x, y, self.board)\n self.board[x, y] = tile\n # Make sure the board is fully set.\n assert np.all(self.board != TILE_DEFAULT)\n # Set the location of the ball and paddle (and assert exactly one)\n self.ball_location = locate(self.board, TILE_BALL)\n self.paddle_location = locate(self.board, TILE_PADDLE)\n # Reset std_output\n self.reset_std_output()\n # Get the next move\n next_move(self.board, curr_index, self.std_input)\n else:\n new_score = update_board(self.board, self.std_output)\n if new_score is not None:\n self.score = new_score\n # Only print the new score if we are in \"USER INPUT\" mode.\n if curr_index >= len(self.std_input):\n print(f\"New score: {new_score}\")\n self.reset_std_output()\n updated = next_move(self.board, curr_index, self.std_input)\n if updated:\n with open(HERE / \"moves.json\", \"w\") as file_obj:\n json.dump(self.std_input, file_obj, indent=4)\n file_obj.write(\"\\n\")\n\n return self.std_input[curr_index]\n\n def append(self, value):\n self.std_output.append(value)\n\n def reset_std_output(self):\n self.retired_output.append(self.std_output)\n self.std_output = []\n\n\ndef locate(board, tile):\n (x,), (y,) = np.where(board == tile)\n return x, y\n\n\ndef update_board(board, std_output):\n width_x, width_y = board.shape\n size = len(std_output)\n assert size % 3 == 0\n index = 0\n new_score = None\n while index < size:\n next_index = index + 3\n x, y, tile = std_output[index:next_index]\n # For next iteration.\n index = next_index\n\n if (x, y) == (-1, 0):\n if new_score is None:\n new_score = tile\n else:\n assert tile > new_score\n new_score = tile\n continue\n\n assert 0 <= x < width_x, (x, y, tile)\n assert 0 <= y < width_y, (x, y, tile)\n assert tile in TILE_IDS\n board[x, y] = tile\n\n return new_score\n\n\ndef next_move(board, index, std_input):\n if index < len(std_input):\n return False\n\n print_board(board)\n next_move = input(\"l/-/r? \")\n if next_move == \"l\":\n std_input.append(JOYSTICK_LEFT)\n return True\n\n if next_move == \"-\":\n std_input.append(JOYSTICK_NEUTRAL)\n return True\n\n if next_move == \"r\":\n std_input.append(JOYSTICK_RIGHT)\n return True\n\n raise ValueError(\"Invalid input\", next_move)\n\n\ndef main():\n filename = HERE / \"input.txt\"\n with open(filename, \"r\") as file_obj:\n content = file_obj.read()\n\n program = collections.defaultdict(int)\n for index, value in enumerate(content.strip().split(\",\")):\n program[index] = int(value)\n\n std_input_list = []\n std_input = iter(std_input_list)\n std_output = []\n run_intcode(program, std_input, std_output)\n assert len(std_output) % 3 == 0\n tile_ids = std_output[2::3]\n tile_id_counts = collections.Counter(tile_ids)\n print(f\"Number of blocks: {tile_id_counts[TILE_BLOCK]}\")\n\n with open(HERE / \"moves.json\", \"r\") as file_obj:\n seed_moves = json.load(file_obj)\n arcade = Arcade(seed_moves, program)\n run_intcode(arcade.program, arcade, arcade)\n assert arcade.std_output\n new_score = update_board(arcade.board, arcade.std_output)\n assert new_score is not None\n arcade.score = new_score\n arcade.reset_std_output()\n\n print(f\"Final score: {arcade.score}\")\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "2321651", "language": "Python", "matching_score": 6.440038681030273, "max_stars_count": 0, "path": "day13/main.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport copy\nimport operator\nimport pathlib\nimport uuid\n\n\nHERE = pathlib.Path(__file__).resolve().parent\nOPCODES = {\n 1: (\"ADD\", 3),\n 2: (\"MULTIPLY\", 3),\n 3: (\"INPUT\", 1),\n 4: (\"OUTPUT\", 1),\n 5: (\"JUMP-IF-TRUE\", 2),\n 6: (\"JUMP-IF-FALSE\", 2),\n 7: (\"LESS-THAN\", 3),\n 8: (\"EQUALS\", 3),\n 9: (\"ADJUST_BASE\", 1),\n 99: (\"HALT\", 0),\n}\nPOSITION_MODE = \"0\"\nIMMEDIATE_MODE = \"1\"\nRELATIVE_MODE = \"2\"\nALL_MODES = set(\"012\")\nNO_JUMP_JUMP_INDEX = uuid.uuid4()\nTERMINAL_JUMP_INDEX = uuid.uuid4()\n\n\nclass AdjustBase:\n def __init__(self, value):\n self.value = value\n\n\ndef less_than_binary_op(value1, value2):\n if value1 < value2:\n to_store = 1\n else:\n to_store = 0\n\n return to_store\n\n\ndef equal_binary_op(value1, value2):\n if value1 == value2:\n to_store = 1\n else:\n to_store = 0\n\n return to_store\n\n\ndef get_value(mode, param, relative_base, program):\n if mode == POSITION_MODE:\n index = param\n assert 0 <= index\n return program[index]\n\n if mode == IMMEDIATE_MODE:\n return param\n\n if mode == RELATIVE_MODE:\n index = relative_base + param\n assert 0 <= index\n return program[index]\n\n raise ValueError(\"Invalid mode\", mode)\n\n\ndef set_value(mode, param, to_store, relative_base, program):\n if mode == POSITION_MODE:\n index = param\n assert 0 <= index\n program[index] = to_store\n return\n\n if mode == RELATIVE_MODE:\n index = relative_base + param\n assert 0 <= index\n program[index] = to_store\n return\n\n raise ValueError(\"Invalid mode\", mode)\n\n\ndef _do_binary_op(modes, params, relative_base, program, fn):\n mode1, mode2, mode3 = modes\n param1, param2, param3 = params\n value1 = get_value(mode1, param1, relative_base, program)\n value2 = get_value(mode2, param2, relative_base, program)\n\n to_store = fn(value1, value2)\n set_value(mode3, param3, to_store, relative_base, program)\n\n return NO_JUMP_JUMP_INDEX\n\n\ndef do_add(modes, params, relative_base, program):\n return _do_binary_op(modes, params, relative_base, program, operator.add)\n\n\ndef do_multiply(modes, params, relative_base, program):\n return _do_binary_op(modes, params, relative_base, program, operator.mul)\n\n\ndef do_input(modes, params, relative_base, program, std_input):\n mode, = modes\n param, = params\n\n to_store = next(std_input)\n set_value(mode, param, to_store, relative_base, program)\n\n return NO_JUMP_JUMP_INDEX\n\n\ndef do_output(modes, params, relative_base, program, std_output):\n mode, = modes\n param, = params\n\n value = get_value(mode, param, relative_base, program)\n std_output.append(value)\n\n return NO_JUMP_JUMP_INDEX\n\n\ndef _do_jump_unary_predicate(modes, params, relative_base, program, fn):\n mode1, mode2 = modes\n param1, param2 = params\n\n value1 = get_value(mode1, param1, relative_base, program)\n value2 = get_value(mode2, param2, relative_base, program)\n\n if fn(value1):\n return value2\n\n return NO_JUMP_JUMP_INDEX\n\n\ndef do_jump_if_true(modes, params, relative_base, program):\n return _do_jump_unary_predicate(\n modes, params, relative_base, program, operator.truth\n )\n\n\ndef do_jump_if_false(modes, params, relative_base, program):\n return _do_jump_unary_predicate(\n modes, params, relative_base, program, operator.not_\n )\n\n\ndef do_less_than(modes, params, relative_base, program):\n return _do_binary_op(\n modes, params, relative_base, program, less_than_binary_op\n )\n\n\ndef do_equal(modes, params, relative_base, program):\n return _do_binary_op(\n modes, params, relative_base, program, equal_binary_op\n )\n\n\ndef do_adjust_base(modes, params, relative_base, program):\n mode, = modes\n param, = params\n\n value = get_value(mode, param, relative_base, program)\n return AdjustBase(value)\n\n\ndef do_halt():\n return TERMINAL_JUMP_INDEX\n\n\ndef next_instruction(index, program):\n assert 0 <= index\n op_code_with_extra = program[index]\n assert op_code_with_extra >= 0\n\n mode_as_int, op_code = divmod(op_code_with_extra, 100)\n instruction, num_params = OPCODES[op_code]\n next_index = index + 1 + num_params\n if num_params == 0:\n assert mode_as_int == 0\n return instruction, (), (), next_index\n\n mode_chars = str(mode_as_int).zfill(num_params)\n assert len(mode_chars) == num_params, (mode_chars, num_params)\n assert set(mode_chars) <= ALL_MODES\n modes = tuple(reversed(mode_chars))\n\n params = tuple(program[i] for i in range(index + 1, next_index))\n assert len(params) == num_params # No partial slice\n\n return instruction, modes, params, next_index\n\n\ndef execute_instruction(\n instruction, modes, params, relative_base, program, std_input, std_output\n):\n if instruction == \"ADD\":\n return do_add(modes, params, relative_base, program)\n\n if instruction == \"MULTIPLY\":\n return do_multiply(modes, params, relative_base, program)\n\n if instruction == \"INPUT\":\n return do_input(modes, params, relative_base, program, std_input)\n\n if instruction == \"OUTPUT\":\n return do_output(modes, params, relative_base, program, std_output)\n\n if instruction == \"JUMP-IF-TRUE\":\n return do_jump_if_true(modes, params, relative_base, program)\n\n if instruction == \"JUMP-IF-FALSE\":\n return do_jump_if_false(modes, params, relative_base, program)\n\n if instruction == \"LESS-THAN\":\n return do_less_than(modes, params, relative_base, program)\n\n if instruction == \"EQUALS\":\n return do_equal(modes, params, relative_base, program)\n\n if instruction == \"ADJUST_BASE\":\n return do_adjust_base(modes, params, relative_base, program)\n\n if instruction == \"HALT\":\n return do_halt()\n\n raise ValueError(\"Bad instruction\", instruction, modes, params, program)\n\n\ndef run_intcode(program, std_input, std_output):\n relative_base = 0\n running_program = copy.deepcopy(program)\n\n jump_index = NO_JUMP_JUMP_INDEX\n index = 0\n while jump_index != TERMINAL_JUMP_INDEX:\n instruction, modes, params, index = next_instruction(\n index, running_program\n )\n jump_index = execute_instruction(\n instruction,\n modes,\n params,\n relative_base,\n running_program,\n std_input,\n std_output,\n )\n if isinstance(jump_index, AdjustBase):\n relative_base += jump_index.value\n elif jump_index in (NO_JUMP_JUMP_INDEX, TERMINAL_JUMP_INDEX):\n # Nothing to do here, all good.\n pass\n elif jump_index >= 0:\n index = jump_index\n else:\n raise ValueError(\"Invalid jump index\", jump_index)\n\n return running_program\n\n\ndef main():\n filename = HERE / \"input.txt\"\n with open(filename, \"r\") as file_obj:\n content = file_obj.read()\n\n program = collections.defaultdict(int)\n for index, value in enumerate(content.strip().split(\",\")):\n program[index] = int(value)\n\n for input_val in (1, 2):\n std_input_list = [input_val]\n std_input = iter(std_input_list)\n std_output = []\n run_intcode(program, std_input, std_output)\n print(std_output)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "12519271", "language": "Python", "matching_score": 6.413967609405518, "max_stars_count": 0, "path": "day09/main.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport copy\nimport operator\nimport pathlib\nimport uuid\n\nimport numpy as np\nimport PIL.Image\n\n\nHERE = pathlib.Path(__file__).resolve().parent\nOPCODES = {\n 1: (\"ADD\", 3),\n 2: (\"MULTIPLY\", 3),\n 3: (\"INPUT\", 1),\n 4: (\"OUTPUT\", 1),\n 5: (\"JUMP-IF-TRUE\", 2),\n 6: (\"JUMP-IF-FALSE\", 2),\n 7: (\"LESS-THAN\", 3),\n 8: (\"EQUALS\", 3),\n 9: (\"ADJUST_BASE\", 1),\n 99: (\"HALT\", 0),\n}\nPOSITION_MODE = \"0\"\nIMMEDIATE_MODE = \"1\"\nRELATIVE_MODE = \"2\"\nALL_MODES = set(\"012\")\nNO_JUMP_JUMP_INDEX = uuid.uuid4()\nTERMINAL_JUMP_INDEX = uuid.uuid4()\nCOLOR_BLACK = 0\nCOLOR_WHITE = 1\nMAX_PIXEL = 255\nTURN_LEFT = np.array([[0, -1], [1, 0]])\nTURN_RIGHT = np.array([[0, 1], [-1, 0]])\n\n\nclass AdjustBase:\n def __init__(self, value):\n self.value = value\n\n\ndef less_than_binary_op(value1, value2):\n if value1 < value2:\n to_store = 1\n else:\n to_store = 0\n\n return to_store\n\n\ndef equal_binary_op(value1, value2):\n if value1 == value2:\n to_store = 1\n else:\n to_store = 0\n\n return to_store\n\n\ndef get_value(mode, param, relative_base, program):\n if mode == POSITION_MODE:\n index = param\n assert 0 <= index\n return program[index]\n\n if mode == IMMEDIATE_MODE:\n return param\n\n if mode == RELATIVE_MODE:\n index = relative_base + param\n assert 0 <= index\n return program[index]\n\n raise ValueError(\"Invalid mode\", mode)\n\n\ndef set_value(mode, param, to_store, relative_base, program):\n if mode == POSITION_MODE:\n index = param\n assert 0 <= index\n program[index] = to_store\n return\n\n if mode == RELATIVE_MODE:\n index = relative_base + param\n assert 0 <= index\n program[index] = to_store\n return\n\n raise ValueError(\"Invalid mode\", mode)\n\n\ndef _do_binary_op(modes, params, relative_base, program, fn):\n mode1, mode2, mode3 = modes\n param1, param2, param3 = params\n value1 = get_value(mode1, param1, relative_base, program)\n value2 = get_value(mode2, param2, relative_base, program)\n\n to_store = fn(value1, value2)\n set_value(mode3, param3, to_store, relative_base, program)\n\n return NO_JUMP_JUMP_INDEX\n\n\ndef do_add(modes, params, relative_base, program):\n return _do_binary_op(modes, params, relative_base, program, operator.add)\n\n\ndef do_multiply(modes, params, relative_base, program):\n return _do_binary_op(modes, params, relative_base, program, operator.mul)\n\n\ndef do_input(modes, params, relative_base, program, std_input):\n mode, = modes\n param, = params\n\n to_store = next(std_input)\n set_value(mode, param, to_store, relative_base, program)\n\n return NO_JUMP_JUMP_INDEX\n\n\ndef do_output(modes, params, relative_base, program, std_output):\n mode, = modes\n param, = params\n\n value = get_value(mode, param, relative_base, program)\n std_output.append(value)\n\n return NO_JUMP_JUMP_INDEX\n\n\ndef _do_jump_unary_predicate(modes, params, relative_base, program, fn):\n mode1, mode2 = modes\n param1, param2 = params\n\n value1 = get_value(mode1, param1, relative_base, program)\n value2 = get_value(mode2, param2, relative_base, program)\n\n if fn(value1):\n return value2\n\n return NO_JUMP_JUMP_INDEX\n\n\ndef do_jump_if_true(modes, params, relative_base, program):\n return _do_jump_unary_predicate(\n modes, params, relative_base, program, operator.truth\n )\n\n\ndef do_jump_if_false(modes, params, relative_base, program):\n return _do_jump_unary_predicate(\n modes, params, relative_base, program, operator.not_\n )\n\n\ndef do_less_than(modes, params, relative_base, program):\n return _do_binary_op(\n modes, params, relative_base, program, less_than_binary_op\n )\n\n\ndef do_equal(modes, params, relative_base, program):\n return _do_binary_op(\n modes, params, relative_base, program, equal_binary_op\n )\n\n\ndef do_adjust_base(modes, params, relative_base, program):\n mode, = modes\n param, = params\n\n value = get_value(mode, param, relative_base, program)\n return AdjustBase(value)\n\n\ndef do_halt():\n return TERMINAL_JUMP_INDEX\n\n\ndef next_instruction(index, program):\n assert 0 <= index\n op_code_with_extra = program[index]\n assert op_code_with_extra >= 0\n\n mode_as_int, op_code = divmod(op_code_with_extra, 100)\n instruction, num_params = OPCODES[op_code]\n next_index = index + 1 + num_params\n if num_params == 0:\n assert mode_as_int == 0\n return instruction, (), (), next_index\n\n mode_chars = str(mode_as_int).zfill(num_params)\n assert len(mode_chars) == num_params, (mode_chars, num_params)\n assert set(mode_chars) <= ALL_MODES\n modes = tuple(reversed(mode_chars))\n\n params = tuple(program[i] for i in range(index + 1, next_index))\n assert len(params) == num_params # No partial slice\n\n return instruction, modes, params, next_index\n\n\ndef execute_instruction(\n instruction, modes, params, relative_base, program, std_input, std_output\n):\n if instruction == \"ADD\":\n return do_add(modes, params, relative_base, program)\n\n if instruction == \"MULTIPLY\":\n return do_multiply(modes, params, relative_base, program)\n\n if instruction == \"INPUT\":\n return do_input(modes, params, relative_base, program, std_input)\n\n if instruction == \"OUTPUT\":\n return do_output(modes, params, relative_base, program, std_output)\n\n if instruction == \"JUMP-IF-TRUE\":\n return do_jump_if_true(modes, params, relative_base, program)\n\n if instruction == \"JUMP-IF-FALSE\":\n return do_jump_if_false(modes, params, relative_base, program)\n\n if instruction == \"LESS-THAN\":\n return do_less_than(modes, params, relative_base, program)\n\n if instruction == \"EQUALS\":\n return do_equal(modes, params, relative_base, program)\n\n if instruction == \"ADJUST_BASE\":\n return do_adjust_base(modes, params, relative_base, program)\n\n if instruction == \"HALT\":\n return do_halt()\n\n raise ValueError(\"Bad instruction\", instruction, modes, params, program)\n\n\ndef run_intcode(program, std_input, std_output):\n relative_base = 0\n running_program = copy.deepcopy(program)\n\n jump_index = NO_JUMP_JUMP_INDEX\n index = 0\n while jump_index != TERMINAL_JUMP_INDEX:\n instruction, modes, params, index = next_instruction(\n index, running_program\n )\n jump_index = execute_instruction(\n instruction,\n modes,\n params,\n relative_base,\n running_program,\n std_input,\n std_output,\n )\n if isinstance(jump_index, AdjustBase):\n relative_base += jump_index.value\n elif jump_index in (NO_JUMP_JUMP_INDEX, TERMINAL_JUMP_INDEX):\n # Nothing to do here, all good.\n pass\n elif jump_index >= 0:\n index = jump_index\n else:\n raise ValueError(\"Invalid jump index\", jump_index)\n\n return running_program\n\n\nclass Robot:\n def __init__(self, start_color):\n assert start_color in (COLOR_BLACK, COLOR_WHITE)\n self.input_index = 0\n self.std_input = [start_color] # Seed first panel\n self.std_output = []\n self.panels = collections.defaultdict(list)\n self.position = np.array([[0], [0]])\n self.direction = np.array([[0], [1]])\n\n def __iter__(self):\n return self\n\n def __next__(self):\n # NOTE: This is not thread-safe\n curr_index = self.input_index\n self.input_index = curr_index + 1\n return self.std_input[curr_index]\n\n def append(self, value):\n # NOTE: This is not thread-safe\n self.std_output.append(value)\n if len(self.std_output) % 2 == 0:\n color, direction_int = self.std_output[-2:]\n assert color in (COLOR_BLACK, COLOR_WHITE)\n # Paint the current panel.\n self.panels[tuple(self.position.flatten())].append(color)\n # Turn the robot\n if direction_int == 0:\n self.direction = TURN_LEFT.dot(self.direction)\n elif direction_int == 1:\n self.direction = TURN_RIGHT.dot(self.direction)\n else:\n raise ValueError(\"Invalid direction\", direction_int)\n # Advance the robot\n self.position += self.direction\n # Get current paint color of new position\n colors = self.panels[tuple(self.position.flatten())]\n if colors:\n curr_color = colors[-1]\n else:\n curr_color = COLOR_BLACK\n # Add the color to inputs.\n self.std_input.append(curr_color)\n\n\ndef paint_hull(program, start_color):\n robot = Robot(start_color)\n run_intcode(program, robot, robot)\n return robot\n\n\ndef main():\n filename = HERE / \"input.txt\"\n with open(filename, \"r\") as file_obj:\n content = file_obj.read()\n\n program = collections.defaultdict(int)\n for index, value in enumerate(content.strip().split(\",\")):\n program[index] = int(value)\n\n robot = paint_hull(program, COLOR_BLACK)\n count = sum(1 for colors in robot.panels.values() if colors)\n print(f\"Number of painted panels when starting with Black: {count}\")\n\n robot = paint_hull(program, COLOR_WHITE)\n all_indices = np.array(list(robot.panels.keys()))\n min_x = min(all_indices[:, 0])\n max_x = max(all_indices[:, 0])\n min_y = min(all_indices[:, 1])\n max_y = max(all_indices[:, 1])\n width_x = max_x - min_x + 1\n width_y = max_y - min_y + 1\n\n painted = COLOR_BLACK * np.ones((width_x, width_y), dtype=np.uint8)\n for position, colors in robot.panels.items():\n if not colors:\n continue\n assert len(colors) == 1\n color = colors[0]\n assert color in (COLOR_BLACK, COLOR_WHITE)\n x, y = position\n shifted_x = x - min_x\n shifted_y = y - min_y\n assert 0 <= shifted_x < width_x\n assert 0 <= shifted_y < width_y\n painted[shifted_x, shifted_y] = color\n\n # Swap rows and columns\n painted = painted.T\n # Invert rows\n painted = painted[::-1, :]\n\n # Swap white and black and scale up to highest pixel intensity.\n image = PIL.Image.fromarray(MAX_PIXEL - MAX_PIXEL * painted)\n image.save(HERE / \"image.png\")\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "8132090", "language": "Python", "matching_score": 5.448893070220947, "max_stars_count": 0, "path": "day11/main.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport itertools\nimport pathlib\nimport threading\nimport time\n\n\nHERE = pathlib.Path(__file__).resolve().parent\nOPCODES = {\n 1: (\"ADD\", 3),\n 2: (\"MULTIPLY\", 3),\n 3: (\"INPUT\", 1),\n 4: (\"OUTPUT\", 1),\n 5: (\"JUMP-IF-TRUE\", 2),\n 6: (\"JUMP-IF-FALSE\", 2),\n 7: (\"LESS-THAN\", 3),\n 8: (\"EQUALS\", 3),\n 99: (\"HALT\", 0),\n}\nPOSITION_MODE = \"0\"\nIMMEDIATE_MODE = \"1\"\nALL_MODES = set(\"01\")\n\n\ndef do_add(modes, params, program):\n mode1, mode2, mode3 = modes\n assert mode3 == POSITION_MODE\n param1, param2, param3 = params\n if mode1 == POSITION_MODE:\n assert 0 <= param1 < len(program)\n value1 = program[param1]\n elif mode1 == IMMEDIATE_MODE:\n value1 = param1\n else:\n raise ValueError(\"Bad mode 1\", modes, params, program)\n\n if mode2 == POSITION_MODE:\n assert 0 <= param2 < len(program)\n value2 = program[param2]\n elif mode2 == IMMEDIATE_MODE:\n value2 = param2\n else:\n raise ValueError(\"Bad mode 2\", modes, params, program)\n\n assert 0 <= param3 < len(program)\n program[param3] = value1 + value2\n\n return -1\n\n\ndef do_multiply(modes, params, program):\n # TODO: Re-factor into `do_add()`\n mode1, mode2, mode3 = modes\n assert mode3 == POSITION_MODE\n param1, param2, param3 = params\n if mode1 == POSITION_MODE:\n assert 0 <= param1 < len(program)\n value1 = program[param1]\n elif mode1 == IMMEDIATE_MODE:\n value1 = param1\n else:\n raise ValueError(\"Bad mode 1\", modes, params, program)\n\n if mode2 == POSITION_MODE:\n assert 0 <= param2 < len(program)\n value2 = program[param2]\n elif mode2 == IMMEDIATE_MODE:\n value2 = param2\n else:\n raise ValueError(\"Bad mode 2\", modes, params, program)\n\n assert 0 <= param3 < len(program)\n program[param3] = value1 * value2\n\n return -1\n\n\ndef do_input(modes, params, program, std_input):\n assert modes == (POSITION_MODE,), modes\n param, = params\n\n assert 0 <= param < len(program)\n program[param] = next(std_input)\n\n return -1\n\n\ndef do_output(modes, params, program, std_output):\n mode, = modes\n param, = params\n\n if mode == POSITION_MODE:\n assert 0 <= param < len(program)\n value = program[param]\n elif mode == IMMEDIATE_MODE:\n value = param\n else:\n raise ValueError(\"Bad mode\", modes, params, program)\n\n std_output.append(value)\n\n return -1\n\n\ndef next_instruction(index, program):\n assert 0 <= index < len(program)\n op_code_with_extra = program[index]\n assert op_code_with_extra >= 0\n\n mode_as_int, op_code = divmod(op_code_with_extra, 100)\n instruction, num_params = OPCODES[op_code]\n next_index = index + 1 + num_params\n if num_params == 0:\n assert mode_as_int == 0\n return instruction, (), (), next_index\n\n mode_chars = str(mode_as_int).zfill(num_params)\n assert len(mode_chars) == num_params, (mode_chars, num_params)\n assert set(mode_chars) <= ALL_MODES\n modes = tuple(reversed(mode_chars))\n\n params = tuple(program[index + 1 : next_index])\n assert len(params) == num_params # No partial slice\n\n return instruction, modes, params, next_index\n\n\ndef do_jump_if_true(modes, params, program):\n mode1, mode2 = modes\n param1, param2 = params\n\n # TODO: This may be incorrect interpretation.\n if mode1 == POSITION_MODE:\n assert 0 <= param1 < len(program)\n value1 = program[param1]\n elif mode1 == IMMEDIATE_MODE:\n value1 = param1\n else:\n raise ValueError(\"Bad mode 1\", modes, params, program)\n\n if mode2 == POSITION_MODE:\n assert 0 <= param2 < len(program)\n value2 = program[param2]\n elif mode2 == IMMEDIATE_MODE:\n value2 = param2\n else:\n raise ValueError(\"Bad mode 2\", modes, params, program)\n\n if value1 != 0:\n return value2\n\n return -1\n\n\ndef do_jump_if_false(modes, params, program):\n # TODO: Fold this into `do_jump_if_true`\n mode1, mode2 = modes\n param1, param2 = params\n\n # TODO: This may be incorrect interpretation.\n if mode1 == POSITION_MODE:\n assert 0 <= param1 < len(program)\n value1 = program[param1]\n elif mode1 == IMMEDIATE_MODE:\n value1 = param1\n else:\n raise ValueError(\"Bad mode 1\", modes, params, program)\n\n if mode2 == POSITION_MODE:\n assert 0 <= param2 < len(program)\n value2 = program[param2]\n elif mode2 == IMMEDIATE_MODE:\n value2 = param2\n else:\n raise ValueError(\"Bad mode 2\", modes, params, program)\n\n if value1 == 0: # Only difference from `do_jump_if_true`\n return value2\n\n return -1\n\n\ndef do_less_than(modes, params, program):\n mode1, mode2, mode3 = modes\n assert mode3 == POSITION_MODE\n param1, param2, param3 = params\n if mode1 == POSITION_MODE:\n assert 0 <= param1 < len(program)\n value1 = program[param1]\n elif mode1 == IMMEDIATE_MODE:\n value1 = param1\n else:\n raise ValueError(\"Bad mode 1\", modes, params, program)\n\n if mode2 == POSITION_MODE:\n assert 0 <= param2 < len(program)\n value2 = program[param2]\n elif mode2 == IMMEDIATE_MODE:\n value2 = param2\n else:\n raise ValueError(\"Bad mode 2\", modes, params, program)\n\n if value1 < value2:\n to_store = 1\n else:\n to_store = 0\n\n assert 0 <= param3 < len(program)\n program[param3] = to_store\n return -1\n\n\ndef do_equal(modes, params, program):\n # TODO: Factor into `do_less_than`\n mode1, mode2, mode3 = modes\n assert mode3 == POSITION_MODE\n param1, param2, param3 = params\n if mode1 == POSITION_MODE:\n assert 0 <= param1 < len(program)\n value1 = program[param1]\n elif mode1 == IMMEDIATE_MODE:\n value1 = param1\n else:\n raise ValueError(\"Bad mode 1\", modes, params, program)\n\n if mode2 == POSITION_MODE:\n assert 0 <= param2 < len(program)\n value2 = program[param2]\n elif mode2 == IMMEDIATE_MODE:\n value2 = param2\n else:\n raise ValueError(\"Bad mode 2\", modes, params, program)\n\n if value1 == value2: # Only difference from `do_less_than`\n to_store = 1\n else:\n to_store = 0\n\n assert 0 <= param3 < len(program)\n program[param3] = to_store\n return -1\n\n\ndef do_halt():\n return -2\n\n\ndef execute_instruction(\n instruction, modes, params, program, std_input, std_output\n):\n if instruction == \"ADD\":\n return do_add(modes, params, program)\n\n if instruction == \"MULTIPLY\":\n return do_multiply(modes, params, program)\n\n if instruction == \"INPUT\":\n return do_input(modes, params, program, std_input)\n\n if instruction == \"OUTPUT\":\n return do_output(modes, params, program, std_output)\n\n if instruction == \"JUMP-IF-TRUE\":\n return do_jump_if_true(modes, params, program)\n\n if instruction == \"JUMP-IF-FALSE\":\n return do_jump_if_false(modes, params, program)\n\n if instruction == \"JUMP-IF-FALSE\":\n return do_jump_if_false(modes, params, program)\n\n if instruction == \"LESS-THAN\":\n return do_less_than(modes, params, program)\n\n if instruction == \"EQUALS\":\n return do_equal(modes, params, program)\n\n if instruction == \"HALT\":\n return do_halt()\n\n raise ValueError(\"Bad instruction\", instruction, modes, params, program)\n\n\ndef run_intcode(program, std_input, std_output):\n running_program = copy.deepcopy(program)\n\n jump_index = -1\n index = 0\n while jump_index != -2:\n assert jump_index >= -1\n instruction, modes, params, index = next_instruction(\n index, running_program\n )\n jump_index = execute_instruction(\n instruction, modes, params, running_program, std_input, std_output\n )\n if jump_index >= 0:\n index = jump_index\n\n return running_program\n\n\ndef run_it(program, input_, std_output=None):\n std_input = iter(input_)\n if std_output is None:\n std_output = []\n run_intcode(program, std_input, std_output)\n return std_output\n\n\ndef run_sequence(program, sequence):\n output_value = 0\n for sequence_value in sequence:\n output_value, = run_it(program, [sequence_value, output_value])\n return output_value\n\n\nclass BlockingStream:\n def __init__(self, initial_values):\n self.lock = threading.Lock()\n self.values = [value for value in initial_values]\n self.index = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n with self.lock:\n curr_index = self.index\n self.index = curr_index + 1\n\n # NOTE: This spinlock is suboptimal, but it could be worse.\n while curr_index >= len(self.values):\n time.sleep(1e-3)\n\n return self.values[curr_index]\n\n def append(self, value):\n with self.lock:\n self.values.append(value)\n\n\ndef run_sequence_connected(program, sequence):\n vA, vB, vC, vD, vE = sequence\n sA = BlockingStream([vA, 0])\n sB = BlockingStream([vB])\n sC = BlockingStream([vC])\n sD = BlockingStream([vD])\n sE = BlockingStream([vE])\n\n tAB = threading.Thread(target=run_it, args=(program, sA, sB))\n tBC = threading.Thread(target=run_it, args=(program, sB, sC))\n tCD = threading.Thread(target=run_it, args=(program, sC, sD))\n tDE = threading.Thread(target=run_it, args=(program, sD, sE))\n tEA = threading.Thread(target=run_it, args=(program, sE, sA))\n tAB.start()\n tBC.start()\n tCD.start()\n tDE.start()\n tEA.start()\n\n tAB.join()\n tBC.join()\n tCD.join()\n tDE.join()\n tEA.join()\n\n return sA.values[-1]\n\n\ndef main():\n filename = HERE / \"input.txt\"\n with open(filename, \"r\") as file_obj:\n content = file_obj.read()\n\n program = [int(value) for value in content.strip().split(\",\")]\n\n max_value = 0\n max_permutation = None\n for permutation in itertools.permutations((0, 1, 2, 3, 4)):\n value = run_sequence(program, permutation)\n if value > max_value:\n max_value = value\n max_permutation = permutation\n\n print(f\"Serial I/O: {max_permutation} -> {max_value}\")\n\n max_value = 0\n max_permutation = None\n for permutation in itertools.permutations((5, 6, 7, 8, 9)):\n value = run_sequence_connected(program, permutation)\n if value > max_value:\n max_value = value\n max_permutation = permutation\n\n print(f\"Feedback I/O: {max_permutation} -> {max_value}\")\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "5670633", "language": "Python", "matching_score": 5.17002010345459, "max_stars_count": 0, "path": "day07/main.py" }, { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport doctest\nimport pathlib\n\n\nHERE = pathlib.Path(__file__).resolve().parent\nOPCODES = {\n 1: (\"ADD\", 3),\n 2: (\"MULTIPLY\", 3),\n 3: (\"INPUT\", 1),\n 4: (\"OUTPUT\", 1),\n 5: (\"JUMP-IF-TRUE\", 2),\n 6: (\"JUMP-IF-FALSE\", 2),\n 7: (\"LESS-THAN\", 3),\n 8: (\"EQUALS\", 3),\n 99: (\"HALT\", 0),\n}\nPOSITION_MODE = \"0\"\nIMMEDIATE_MODE = \"1\"\nALL_MODES = set(\"01\")\n\n\ndef do_add(modes, params, program):\n mode1, mode2, mode3 = modes\n assert mode3 == POSITION_MODE\n param1, param2, param3 = params\n if mode1 == POSITION_MODE:\n assert 0 <= param1 < len(program)\n value1 = program[param1]\n elif mode1 == IMMEDIATE_MODE:\n value1 = param1\n else:\n raise ValueError(\"Bad mode 1\", modes, params, program)\n\n if mode2 == POSITION_MODE:\n assert 0 <= param2 < len(program)\n value2 = program[param2]\n elif mode2 == IMMEDIATE_MODE:\n value2 = param2\n else:\n raise ValueError(\"Bad mode 2\", modes, params, program)\n\n assert 0 <= param3 < len(program)\n program[param3] = value1 + value2\n\n return -1\n\n\ndef do_multiply(modes, params, program):\n # TODO: Re-factor into `do_add()`\n mode1, mode2, mode3 = modes\n assert mode3 == POSITION_MODE\n param1, param2, param3 = params\n if mode1 == POSITION_MODE:\n assert 0 <= param1 < len(program)\n value1 = program[param1]\n elif mode1 == IMMEDIATE_MODE:\n value1 = param1\n else:\n raise ValueError(\"Bad mode 1\", modes, params, program)\n\n if mode2 == POSITION_MODE:\n assert 0 <= param2 < len(program)\n value2 = program[param2]\n elif mode2 == IMMEDIATE_MODE:\n value2 = param2\n else:\n raise ValueError(\"Bad mode 2\", modes, params, program)\n\n assert 0 <= param3 < len(program)\n program[param3] = value1 * value2\n\n return -1\n\n\ndef do_input(modes, params, program, std_input):\n assert modes == (POSITION_MODE,), modes\n param, = params\n\n assert 0 <= param < len(program)\n program[param] = next(std_input)\n\n return -1\n\n\ndef do_output(modes, params, program, std_output):\n mode, = modes\n param, = params\n\n if mode == POSITION_MODE:\n assert 0 <= param < len(program)\n value = program[param]\n elif mode == IMMEDIATE_MODE:\n value = param\n else:\n raise ValueError(\"Bad mode\", modes, params, program)\n\n std_output.append(value)\n\n return -1\n\n\ndef next_instruction(index, program):\n assert 0 <= index < len(program)\n op_code_with_extra = program[index]\n assert op_code_with_extra >= 0\n\n mode_as_int, op_code = divmod(op_code_with_extra, 100)\n instruction, num_params = OPCODES[op_code]\n next_index = index + 1 + num_params\n if num_params == 0:\n assert mode_as_int == 0\n return instruction, (), (), next_index\n\n mode_chars = str(mode_as_int).zfill(num_params)\n assert len(mode_chars) == num_params, (mode_chars, num_params)\n assert set(mode_chars) <= ALL_MODES\n modes = tuple(reversed(mode_chars))\n\n params = tuple(program[index + 1 : next_index])\n assert len(params) == num_params # No partial slice\n\n return instruction, modes, params, next_index\n\n\ndef do_jump_if_true(modes, params, program):\n mode1, mode2 = modes\n param1, param2 = params\n\n # TODO: This may be incorrect interpretation.\n if mode1 == POSITION_MODE:\n assert 0 <= param1 < len(program)\n value1 = program[param1]\n elif mode1 == IMMEDIATE_MODE:\n value1 = param1\n else:\n raise ValueError(\"Bad mode 1\", modes, params, program)\n\n if mode2 == POSITION_MODE:\n assert 0 <= param2 < len(program)\n value2 = program[param2]\n elif mode2 == IMMEDIATE_MODE:\n value2 = param2\n else:\n raise ValueError(\"Bad mode 2\", modes, params, program)\n\n if value1 != 0:\n return value2\n\n return -1\n\n\ndef do_jump_if_false(modes, params, program):\n # TODO: Fold this into `do_jump_if_true`\n mode1, mode2 = modes\n param1, param2 = params\n\n # TODO: This may be incorrect interpretation.\n if mode1 == POSITION_MODE:\n assert 0 <= param1 < len(program)\n value1 = program[param1]\n elif mode1 == IMMEDIATE_MODE:\n value1 = param1\n else:\n raise ValueError(\"Bad mode 1\", modes, params, program)\n\n if mode2 == POSITION_MODE:\n assert 0 <= param2 < len(program)\n value2 = program[param2]\n elif mode2 == IMMEDIATE_MODE:\n value2 = param2\n else:\n raise ValueError(\"Bad mode 2\", modes, params, program)\n\n if value1 == 0: # Only difference from `do_jump_if_true`\n return value2\n\n return -1\n\n\ndef do_less_than(modes, params, program):\n mode1, mode2, mode3 = modes\n assert mode3 == POSITION_MODE\n param1, param2, param3 = params\n if mode1 == POSITION_MODE:\n assert 0 <= param1 < len(program)\n value1 = program[param1]\n elif mode1 == IMMEDIATE_MODE:\n value1 = param1\n else:\n raise ValueError(\"Bad mode 1\", modes, params, program)\n\n if mode2 == POSITION_MODE:\n assert 0 <= param2 < len(program)\n value2 = program[param2]\n elif mode2 == IMMEDIATE_MODE:\n value2 = param2\n else:\n raise ValueError(\"Bad mode 2\", modes, params, program)\n\n if value1 < value2:\n to_store = 1\n else:\n to_store = 0\n\n assert 0 <= param3 < len(program)\n program[param3] = to_store\n return -1\n\n\ndef do_equal(modes, params, program):\n # TODO: Factor into `do_less_than`\n mode1, mode2, mode3 = modes\n assert mode3 == POSITION_MODE\n param1, param2, param3 = params\n if mode1 == POSITION_MODE:\n assert 0 <= param1 < len(program)\n value1 = program[param1]\n elif mode1 == IMMEDIATE_MODE:\n value1 = param1\n else:\n raise ValueError(\"Bad mode 1\", modes, params, program)\n\n if mode2 == POSITION_MODE:\n assert 0 <= param2 < len(program)\n value2 = program[param2]\n elif mode2 == IMMEDIATE_MODE:\n value2 = param2\n else:\n raise ValueError(\"Bad mode 2\", modes, params, program)\n\n if value1 == value2: # Only difference from `do_less_than`\n to_store = 1\n else:\n to_store = 0\n\n assert 0 <= param3 < len(program)\n program[param3] = to_store\n return -1\n\n\ndef do_halt():\n return -2\n\n\ndef execute_instruction(\n instruction, modes, params, program, std_input, std_output\n):\n if instruction == \"ADD\":\n return do_add(modes, params, program)\n\n if instruction == \"MULTIPLY\":\n return do_multiply(modes, params, program)\n\n if instruction == \"INPUT\":\n return do_input(modes, params, program, std_input)\n\n if instruction == \"OUTPUT\":\n return do_output(modes, params, program, std_output)\n\n if instruction == \"JUMP-IF-TRUE\":\n return do_jump_if_true(modes, params, program)\n\n if instruction == \"JUMP-IF-FALSE\":\n return do_jump_if_false(modes, params, program)\n\n if instruction == \"JUMP-IF-FALSE\":\n return do_jump_if_false(modes, params, program)\n\n if instruction == \"LESS-THAN\":\n return do_less_than(modes, params, program)\n\n if instruction == \"EQUALS\":\n return do_equal(modes, params, program)\n\n if instruction == \"HALT\":\n return do_halt()\n\n raise ValueError(\"Bad instruction\", instruction, modes, params, program)\n\n\ndef run_intcode(program, std_input, std_output):\n running_program = copy.deepcopy(program)\n\n jump_index = -1\n index = 0\n while jump_index != -2:\n assert jump_index >= -1\n instruction, modes, params, index = next_instruction(\n index, running_program\n )\n jump_index = execute_instruction(\n instruction, modes, params, running_program, std_input, std_output\n )\n if jump_index >= 0:\n index = jump_index\n\n return running_program\n\n\ndef main():\n filename = HERE / \"input.txt\"\n with open(filename, \"r\") as file_obj:\n content = file_obj.read()\n\n program = [int(value) for value in content.strip().split(\",\")]\n\n std_input_list = [1]\n std_input = iter(std_input_list)\n std_output = []\n run_intcode(program, std_input, std_output)\n print(std_output)\n\n std_input_list = [5]\n std_input = iter(std_input_list)\n std_output = []\n run_intcode(program, std_input, std_output)\n print(std_output)\n\n\nif __name__ == \"__main__\":\n # doctest.testmod()\n main()\n", "id": "3143182", "language": "Python", "matching_score": 1.0791668891906738, "max_stars_count": 0, "path": "day05/main.py" }, { "content": "#!/usr/bin/env python\n\n# Given the positive integers, x, y, and z, are consecutive terms of an\n# arithmetic progression, the least value of the positive integer, n,\n# for which the equation, x^2 - y^2 - z^2 = n, has exactly two solutions\n# is n = 27:\n\n# 34^2 - 27^2 - 20^2 = 12^2 - 9^2 - 6^2 = 27\n\n# It turns out that n = 1155 is the least value which has exactly\n# ten solutions.\n\n# How many values of n less than one million have exactly\n# ten distinct solutions?\n\n###############################\n# Positive integers, a > k > 0\n# n = (a + k)^2 - a^2 - (a - k)^2\n# n = a(4k - a)\n\n# Pick a | n, then k = (n + a**2)/(4*a)\n# We need k < a, so n = a(4k - a) < 3a**2\n# We need k integral, so (n/a + a) % 4 == 0\n\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\nfrom python.functions import factors\nfrom python.functions import sieve\n\n\ndef num_solutions(factor_list):\n n = max(factor_list)\n choices_a = [factor for factor in factor_list if n < 3 * (factor ** 2)]\n return [a for a in choices_a if (n / a + a) % 4 == 0]\n\n\ndef main(verbose=False):\n MAX_n = 10 ** 6 - 1\n distinct_solutions = 10\n PRIMES = sieve(int(sqrt(MAX_n)) + 1)\n factor_hash = {}\n\n count = 0\n for n in range(1, MAX_n + 1):\n factor_list = factors(n, factor_hash, PRIMES)\n if len(num_solutions(factor_list)) == distinct_solutions:\n count += 1\n return count\n\nif __name__ == '__main__':\n print euler_timer(135)(main)(verbose=True)\n", "id": "7101523", "language": "Python", "matching_score": 2.1697206497192383, "max_stars_count": 7, "path": "python/complete/no135.py" }, { "content": "#!/usr/bin/env python\n\n# There are ten composites below thirty containing precisely two, not\n# necessarily distinct, prime factors:\n# 4, 6, 9, 10, 14, 15, 21, 22, 25, 26.\n\n# How many composite integers, n < 10**8, have precisely two, not necessarily\n# distinct, prime factors?\n\nfrom bisect import bisect\nfrom math import sqrt\n\nfrom python.decorators import euler_timer\nfrom python.functions import sieve\n\n\ndef main(verbose=False):\n MAX_n = 10 ** 8\n # q <= p, q**2 <= pq = n < max_n, q < sqrt(max_n)\n # 2 <= q, 2p <= pq < max_n, p < max_n/2\n # Given q, pq < max_n, p < max_n/q\n PRIMES = sieve(MAX_n / 2) # integer division intended\n result = 0\n q_max_index = bisect(PRIMES, sqrt(MAX_n))\n for q_index in range(q_max_index + 1):\n p_min_index = q_index\n p_max_index = bisect(PRIMES, MAX_n * 1.0 / PRIMES[q_index])\n result += p_max_index - p_min_index\n return result\n\nif __name__ == '__main__':\n print euler_timer(187)(main)(verbose=True)\n", "id": "1254205", "language": "Python", "matching_score": 1.2181133031845093, "max_stars_count": 7, "path": "python/complete/no187.py" }, { "content": "#!/usr/bin/env python\n\n# The first two consecutive numbers to have two distinct prime factors are:\n# 14 = 2 X 7, 15 = 3 X 5\n\n# The first three consecutive numbers to have three\n# distinct prime factors are:\n# 644 = 2^2 X 7 X 23, 645 = 3 X 5 X 43, 646 = 2 X 17 X 19\n\n# Find the first four consecutive integers to have four distinct\n# primes factors. What is the first of these numbers?\n\nfrom python.decorators import euler_timer\nfrom python.functions import prime_factors\n\n\ndef increment(value, list_):\n \"\"\"\n This updates the value according to the list. Since we seek 4\n consecutive numbers with exactly 4 prime factors, we can jump\n 4 numbers if the last doesn't have 4 factors, can jump 3 if\n the second to last doesn't have 4 factors, and so on\n \"\"\"\n if list_[-1] != 4:\n return value + 4\n\n # We can assume the last element is a 4\n if list_[-2:] != [4, 4]:\n return value + 3\n\n # We can assume the last 2 elements are [4,4]\n if list_[-3:] != [4, 4, 4]:\n return value + 2\n\n # We can assume the last 3 elements are [4,4,4]\n return value + 1\n\n\ndef main(verbose=False):\n # Find the first four consecutive integers to have four distinct\n # primes factors. What is the first of these numbers?\n\n factor_hash = {1: [], 2: [2]}\n # Smallest product of 4 primes is 2*3*5*7 = 210\n # We need to update the hash to get to this point\n for i in range(3, 210 + 1):\n prime_factors(i, hash_=factor_hash)\n\n smallest = 210 # The smallest integer of the four\n num_factors = [len(prime_factors(smallest + i,\n unique=True,\n hash_=factor_hash))\n for i in range(4)]\n while num_factors != [4, 4, 4, 4]:\n smallest = increment(smallest, num_factors)\n num_factors = [len(prime_factors(smallest + i,\n unique=True,\n hash_=factor_hash))\n for i in range(4)]\n return smallest\n\nif __name__ == '__main__':\n print euler_timer(47)(main)(verbose=True)\n", "id": "5822664", "language": "Python", "matching_score": 1.9005000591278076, "max_stars_count": 7, "path": "python/too_slow/no047.py" }, { "content": "#!/usr/bin/env python\n\n# What is the value of the first triangle number to have over\n# five hundred divisors?\n\nfrom python.decorators import euler_timer\nfrom python.functions import prime_factors\n\n\ndef list_frequencies(list_):\n result = {}\n for element in list_:\n # if element is not in result, sets to 1 (default 0 returned by get)\n result[element] = result.get(element, 0) + 1\n return result.items()\n\n\ndef special_num_factors(a, b, hash_):\n factors = (prime_factors(a, unique=False, hash_=hash_) +\n prime_factors(b, unique=False, hash_=hash_))\n factors = list_frequencies(factors)\n\n prod = 1\n for factor in factors:\n prod *= factor[1] + 1\n return prod\n\n\ndef num_factors_nth_triangular(n, hash_):\n if n % 2 == 0:\n return special_num_factors(n / 2, n + 1, hash_)\n else:\n return special_num_factors(n, (n + 1) / 2, hash_)\n\n\ndef main(verbose=False):\n n = 1\n h = {}\n num_fac = num_factors_nth_triangular(n, h)\n while num_fac <= 500:\n n += 1\n num_fac = num_factors_nth_triangular(n, h)\n if verbose:\n return \"%s.\\nIt is the %sth triangular number and has %s divisors.\" % (\n (n * (n + 1)) / 2, n, num_fac)\n else:\n return (n * (n + 1)) / 2\n\nif __name__ == '__main__':\n print euler_timer(12)(main)(verbose=True)\n", "id": "8606427", "language": "Python", "matching_score": 1.7932982444763184, "max_stars_count": 7, "path": "python/complete/no012.py" }, { "content": "#!/usr/bin/env python\n\n# What is the largest prime factor of the number 600851475143\n\nfrom python.decorators import euler_timer\nfrom python.functions import prime_factors\n\n\ndef main(verbose=False):\n return max(prime_factors(600851475143))\n\nif __name__ == '__main__':\n print euler_timer(3)(main)(verbose=True)\n", "id": "5710060", "language": "Python", "matching_score": 1.1651912927627563, "max_stars_count": 7, "path": "python/complete/no003.py" } ]
1.778367
piotrek-k
[ { "content": "import os\nfrom setuptools import setup, find_packages\n\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\nexcept:\n long_description = \"\"\"\n Forked from https://github.com/FaBoPlatform/FaBo9AXIS-MPU9250-Python .\n Changes are meant to enable Python 3 support. \n Original package is called `FaBo9Axis_MPU9250`.\n Author of original package: FaBo\"\"\"\n\nclassifiers = ['Development Status :: 4 - Beta',\n 'Operating System :: POSIX :: Linux',\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: System :: Hardware']\n\nsetup(\n name='FaBo9Axis_MPU9250_Python3',\n version='1.0.3',\n author='RandomUser1',\n description=\"Fork of `FaBo9Axis_MPU9250`. This is a library for the FaBo 9AXIS I2C Brick.\",\n long_description=long_description,\n url='https://github.com/piotrek-k/FaBo9AXIS-MPU9250-Python3',\n license='Apache License 2.0',\n classifiers=classifiers,\n packages=find_packages(),\n install_requires=[\n 'smbus'\n ]\n)\n", "id": "10261688", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "setup.py" }, { "content": "# https://github.com/shoji9x9/CIFAR-10-By-small-ResNet/blob/master/ResNet-for-CIFAR-10-with-Keras.ipynb\n\nimport tensorflow as tf\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.keras.layers import Conv2D, Dense, BatchNormalization, Activation, MaxPool2D, GlobalAveragePooling2D, Add, Input, Flatten\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.regularizers import l2\n\n\ncifar = tf.keras.datasets.cifar10\n\nclass RESNET_Model:\n def __init__(self):\n self.num_channels = 3\n self.image_size = 32\n self.num_labels = 10\n self.model = None\n\n def build_resnet(self):\n n = 9 # 56 layers\n channels = [16, 32, 64]\n\n inputs = Input(shape=(32, 32, 3))\n x = Conv2D(channels[0], kernel_size=(3, 3), padding=\"same\", kernel_initializer=\"he_normal\",\n kernel_regularizer=l2(1e-4))(inputs)\n x = BatchNormalization()(x)\n x = Activation(tf.nn.relu)(x)\n\n for c in channels:\n for i in range(n):\n subsampling = i == 0 and c > 16\n strides = (2, 2) if subsampling else (1, 1)\n y = Conv2D(c, kernel_size=(3, 3), padding=\"same\", strides=strides, kernel_initializer=\"he_normal\",\n kernel_regularizer=l2(1e-4))(x)\n y = BatchNormalization()(y)\n y = Activation(tf.nn.relu)(y)\n y = Conv2D(c, kernel_size=(3, 3), padding=\"same\", kernel_initializer=\"he_normal\",\n kernel_regularizer=l2(1e-4))(y)\n y = BatchNormalization()(y)\n if subsampling:\n x = Conv2D(c, kernel_size=(1, 1), strides=(2, 2), padding=\"same\", kernel_initializer=\"he_normal\",\n kernel_regularizer=l2(1e-4))(x)\n x = Add()([x, y])\n x = Activation(tf.nn.relu)(x)\n\n x = GlobalAveragePooling2D()(x)\n x = Flatten()(x)\n outputs = Dense(10, activation=tf.nn.softmax, kernel_initializer=\"he_normal\")(x)\n\n self.model = Model(inputs=inputs, outputs=outputs)\n self.model.type = \"resnet\" + str(6 * n + 2)\n\n def train(self):\n (x_train, y_train), (x_test, y_test) = cifar.load_data()\n x_train, x_test = x_train / 255.0, x_test / 255.0\n\n self.model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n self.model.fit(x_train, y_train, epochs=5)\n self.model.evaluate(x_test, y_test)\n\nresnet_model = RESNET_Model()\nresnet_model.build_resnet()\n\nresnet_model.model.summary()\n\nresnet_model.train()", "id": "4363447", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "setup_resnet.py" } ]
0
JulieLenzer
[ { "content": "import re\nimport string\n\ndef normalize_tokens(tokenlist):\n # Input: list of tokens as strings, e.g. ['I', ' ', 'saw', ' ', '@psresnik', ' ', 'on', ' ','Twitter']\n # Output: list of tokens where\n # - All tokens are lowercased\n # - All tokens starting with a whitespace character have been filtered out\n # - All handles (tokens starting with @) have been filtered out\n # - Any underscores have been replaced with + (since we use _ as a special character in bigrams)\n normalized_tokens = [token.lower().replace('_','+') for token in tokenlist # lowercase, _ => +\n if re.search('[^\\s]', token) is not None # ignore whitespace tokens\n and not token.startswith(\"@\") # ignore handles\n ]\n return normalized_tokens \n\n# Take a list of string tokens and return all ngrams of length n,\n# representing each ngram as a list of tokens.\n# E.g. ngrams(['the','quick','brown','fox'], 2)\n# returns [['the','quick'], ['quick','brown'], ['brown','fox']]\n# Note that this should work for any n, not just unigrams and bigrams\ndef ngrams(tokens, n):\n # Returns all ngrams of size n in sentence, where an ngram is itself a list of tokens\n return [tokens[i:i+n] for i in range(len(tokens)-n+1)]\n\ndef filter_punctuation_bigrams(ngrams):\n # Input: assume ngrams is a list of ['token1','token2'] bigrams\n # Removes ngrams like ['today','.'] where either token is a punctuation character\n # Returns list with the items that were not removed\n punct = string.punctuation\n return [ngram for ngram in ngrams if ngram[0] not in punct and ngram[1] not in punct]\n\ndef filter_stopword_bigrams(ngrams, stopwords):\n # Input: assume ngrams is a list of ['token1','token2'] bigrams, stopwords is a set of words like 'the'\n # Removes ngrams like ['in','the'] and ['senator','from'] where either word is a stopword\n # Returns list with the items that were not removed\n result = [ngram for ngram in ngrams if ngram[0] not in stopwords and ngram[1] not in stopwords]\n return result\n", "id": "7581557", "language": "Python", "matching_score": 1.3075790405273438, "max_stars_count": 0, "path": "assignment2/assignment1_fns.py" }, { "content": "################################################################\n##\n## Demonstrations of CKY control structure\n##\n## CKY minus the grammar. :)\n## \n################################################################\nimport argparse\n\n################################################################\n# Convenience functions\n################################################################\ndef ordinal(n):\n if n == 1:\n return(\"1st\")\n elif n == 2:\n return(\"2nd\")\n elif n == 3:\n return(\"3rd\")\n else:\n return(\"{}th\".format(n))\n\n# Human-readable span\ndef span(words,start,end):\n return( \"{}-{}({})\".format(start,end,\" \".join(words[start:end])))\n\n################################################################\n# SLP version of CKY\n################################################################\ndef cky_by_start_and_end(wordstring):\n words = wordstring.split()\n n = len(words)\n\n print(\"CKY with the control structure in SLP figure 13.5\\nCell [i,j] means word span from i to j\\n\")\n # for j from 1 to length(words)\n for j in range(1,n+1): \n print(\"Looking for A's to put in cell [i={},j={}] span starting at {} and ending at {}, i.e. {}\".format(j-1,j,j-1,j,span(words,j-1,j)))\n # for i from j-2 down to 0\n for i in range(j-2, -1, -1):\n print(\" Looking for A's to put in cell [i={},j={}] (span starting at {} and ending at {}, i.e. {})\".format(i,j,i,j,span(words,i,j)))\n # for k from i+1 to j-1\n for k in range(i+1,j-1+1):\n print(\" Considering split point k={}, for combining B in [i={},j={}] with a C in [i={},j={}], i.e. {} + {}\".format(k,i,k,k,j,span(words,i,k),span(words,k,j)))\n print(\"Checking for an S in cell [{},{}]\".format(0,n))\n\n################################################################\n# Wikipedia/lecture version of CKY\n################################################################\ndef cky_by_length_and_start(wordstring):\n words = wordstring.split()\n n = len(words)\n\n print(\"CKY with control structure in the Wikipedia entry at https://en.wikipedia.org/wiki/CYK_algorithm\")\n print(\"Cell [l,i] means span of length len=l starting at position i\\n\")\n\n # for j from 1 to length(words)\n for s in range(1,len(words)+1):\n print(\"Looking for A's to put in cell [len={},start={}], i.e. {}\".format(1,s-1,span(words,s-1,s)))\n\n # for each l = 2 to n -- Length of span\n for l in range(2,len(words)+1):\n print(\"Building spans of length {} (row numbered {} in table)\".format(l,l))\n # for each s = 1 to n-l+1 -- Start of span\n for i in range(0,n-l+1):\n print(\" Looking for A's to put in cell [len={},start={}], i.e. {}\".format(l,i,span(words,i,i+l)))\n # for each p = 1 to l-1 -- Partition of span\n for p in range(1,l-1+1):\n print(\" Considering partition with p={} (split after {} word in the span), for combining B in [len={},start={}] with C in [len={},start={}], i.e. {} + {}\".format(p,ordinal(p),p,i,l-p,i+p,span(words,i,i+p),span(words,i+p,i+l)))\n print(\"Checking for an S in cell [len={},start={}]\".format(n,0))\n\n\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser(description='Demonstrate CKY control structure')\n parser.add_argument('--version', default='Wikipedia', action='store', help=\"SLP or Wikipedia\")\n parser.add_argument('--string', default='this is a string', action='store', help=\"String of tokens to parse, e.g. 'this is a test'\")\n args = parser.parse_args()\n\n print(\"\\nRunning CKY on: {}\\n\".format(args.string))\n \n if args.version == 'SLP':\n cky_by_start_and_end(args.string)\n elif args.version == 'Wikipedia':\n cky_by_length_and_start(args.string)\n else:\n print(\"Unknown value for --version\")\n", "id": "7608106", "language": "Python", "matching_score": 0.46612754464149475, "max_stars_count": 0, "path": "assignment3/cky_ordering.py" }, { "content": "import sys\nimport spacy\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cluster import KMeans\n\n\n# Word clustering using k-means, using spacy's vector representations of words,\n# which are 300-dimensional GloVe vectors trained on the Common Crawl; see https://spacy.io/models/en\n# and https://nlp.stanford.edu/projects/glove/. This function:\n# Converts a list of words into a list of vectors using the spacy nlp object\n# Does k-means clustering to generate numclusters clusters\n# Returns a dictionary that maps from numeric cluster label to the list of words in that cluster\n#\n# Note: this function silently ignores words that don't have a vector representation in spacy.\n#\n# See https://github.com/danielwilentz/Cuisine-Classifier/blob/master/topic_modeling/clustering.ipynb\n# for an example of clustering that uses word2vec vectors, which you can find online by searching\n# for GoogleNews-vectors-negative300.bin. That example also illustrates a common manual method for\n# deciding on how many clusters to use.\ndef cluster_words(nlp, words, numclusters):\n \n # Convert words into spacy tokens and from there into vectors\n # Only include tokens that actually have a vector in spacy\n tokens = [nlp(word) for word in words]\n vectors = [token.vector for token in tokens if token.has_vector]\n wordlist = [token.text for token in tokens if token.has_vector]\n\n # Do k-means clustering on the set of vectors\n # Result of clustering is item_labels, a numpy array with one entry per item that was clustered\n # That is, item_labels[0] is the cluster label for the first vector,\n # item_labels[1] is the cluster label for the next vector, etc.\n sys.stderr.write(\"Running KMeans...\\n\")\n df_vectors = pd.DataFrame(vectors)\n km = KMeans(n_clusters=numclusters, init='k-means++', random_state=10, n_init=1)\n km.fit(df_vectors)\n item_labels = km.predict(df_vectors) \n\n # Convert k-means result into a dictionary (clusterdict) mapping from\n # a cluster label to list of the words in that cluster\n clusterdict = {}\n item_number = 0\n for item_label in item_labels:\n if item_label in clusterdict:\n clusterdict[item_label].append(wordlist[item_number])\n else:\n clusterdict[item_label] = [wordlist[item_number]]\n item_number +=1\n \n return clusterdict\n\n\n\nif __name__ == \"__main__\":\n \n # Illustrate k-means clustering using spacy's vector representations.\n # The result, clusterdict, is a mapping from integer cluster labels (0, 1, etc.)\n # to words in the cluster. What do you expect the words to be in the two clusters?\n \n print(\"Initializing spaCy\")\n nlp = spacy.load('en_core_web_sm')\n \n print(\"Clustering\")\n wordlist = ['eat', 'drink', 'consume', 'dog', 'cat', 'bear', 'pig']\n clusterdict = cluster_words(nlp, wordlist, 2)\n\n print(\"Clustering result\")\n print(clusterdict)\n", "id": "6912613", "language": "Python", "matching_score": 0.893919050693512, "max_stars_count": 0, "path": "assignment3/cluster_words.py" }, { "content": "import os\nimport sys\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\nimport sklearn.metrics as metrics\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport json\nimport gzip\nimport csv, re\nimport string\nfrom tqdm import tqdm\nimport codecs\nimport argparse\nfrom collections import Counter\nfrom spacy.lang.en import English\nfrom assignment1_fns import *\n\n\n# Convenient for debugging but feel free to comment out\nfrom traceback_with_variables import activate_by_import\n\n# Hard-wired variables\ninput_speechfile = \"./speeches2020_jan_to_jun.jsonl.gz\"\nstopwords_file = \"./mallet_en_stoplist.txt\"\n\n\n# This is the similar to read_and_clean_lines in the previous assignment, but\n# rather than just returning a list of cleaned lines of text, we should return\n# returns two lists (of the same length): the cleaned lines and the party of the person who was speaking\n#\n# Make sure to replace line-internal whitespace (newlines, tabs, etc.) in text with a space.\n#\n# For information on how to read from a gzipped file, rather than uncompressing and reading, see\n# https://stackoverflow.com/questions/10566558/python-read-lines-from-compressed-text-files#30868178\n#\n# For info on parsing jsonlines, see https://www.geeksforgeeks.org/json-loads-in-python/.\n# (There are other ways of doing it, of course.)\ndef read_and_clean_lines(infile):\n print(\"\\nReading and cleaning text from {}\".format(infile))\n lines = []\n parties = []\n # TO DO: Your code goes here\n print(\"Read {} documents\".format(len(lines)))\n print(\"Read {} labels\".format(len(parties)))\n return lines, parties\n\n# Read a set of stoplist words from filename, assuming it contains one word per line\n# Return a python Set data structure (https://www.w3schools.com/python/python_sets.asp)\ndef load_stopwords(filename):\n stopwords = []\n with codecs.open(filename, 'r', encoding='ascii', errors='ignore') as fp:\n stopwords = fp.read().split('\\n')\n return set(stopwords)\n\n\n# Call sklearn's train_test_split function to split the dataset into training items/labels\n# and test items/labels. See https://realpython.com/train-test-split-python-data/\n# (or Google train_test_split) for how to make this call.\n#\n# Note that the train_test_split function returns four sequences: X_train, X_test, y_train, y_test\n# X_train and y_train are the training items and labels, respectively\n# X_test and y_test are the test items and labels, respectively\n#\n# This function should return those four values\ndef split_training_set(lines, labels, test_size=0.3, random_seed=42):\n # TO DO: replace this line with a call to train_test_split\n X_train, X_test, y_train, y_test = np.array([]), np.array([]), np.array([]), np.array([]) \n print(\"Training set label counts: {}\".format(Counter(y_train)))\n print(\"Test set label counts: {}\".format(Counter(y_test)))\n return X_train, X_test, y_train, y_test\n\n# Converting text into features.\n# Inputs:\n# X - a sequence of raw text strings to be processed\n# analyzefn - either built-in (see CountVectorizer documentation), or a function we provide from strings to feature-lists\n#\n# Arguments used by the words analyzer\n# stopwords - set of stopwords (used by \"word\" analyzer\")\n# lowercase - true if normalizing by lowercasing\n# ngram_range - (N,M) for using ngrams of sizes N up to M as features, e.g. (1,2) for unigrams and bigrams\n#\n# Outputs:\n# X_features - corresponding feature vector for each raw text item in X\n# training_vectorizer - vectorizer object that can now be applied to some new X', e.g. containing test texts\n# \n# You can find documentation at https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html\n# and there's a nice, readable discussion at https://medium.com/swlh/understanding-count-vectorizer-5dd71530c1b\n#\ndef convert_text_into_features(X, stopwords_arg, analyzefn=\"word\", range=(1,2)):\n training_vectorizer = CountVectorizer(stop_words=stopwords_arg,\n analyzer=analyzefn,\n lowercase=True,\n ngram_range=range)\n X_features = training_vectorizer.fit_transform(X)\n return X_features, training_vectorizer\n\n# Input:\n# lines - a raw text corpus, where each element in the list is a string\n# stopwords - a set of strings that are stopwords\n# remove_stopword_bigrams = True or False\n#\n# Output: a corresponding list converting the raw strings to space-separated features\n#\n# The features extracted should include non-stopword, non-punctuation unigrams,\n# plus the bigram features that were counted in collect_bigram_counts from the previous assignment\n# represented as underscore_separated tokens.\n# Example:\n# Input: [\"This is Remy's dinner.\",\n# \"Remy will eat it.\"]\n# Output: [\"remy 's dinner remy_'s 's_dinner\",\n# \"remy eat\"]\ndef convert_lines_to_feature_strings(lines, stopwords, remove_stopword_bigrams=True):\n\n print(\" Converting from raw text to unigram and bigram features\")\n if remove_stopword_bigrams:\n print(\" Includes filtering stopword bigrams\")\n \n print(\" Initializing\")\n nlp = English(parser=False)\n all_features = []\n print(\" Iterating through documents extracting unigram and bigram features\")\n for line in tqdm(lines):\n \n # Get spacy tokenization and normalize the tokens\n spacy_analysis = nlp(line)\n spacy_tokens = [token.orth_ for token in spacy_analysis]\n normalized_tokens = normalize_tokens(spacy_tokens)\n\n # Collect unigram tokens as features\n # Exclude unigrams that are stopwords or are punctuation strings (e.g. '.' or ',')\n unigrams = [token for token in normalized_tokens\n if token not in stopwords and token not in string.punctuation]\n\n # Collect string bigram tokens as features\n bigrams = []\n bigram_tokens = [\"_\".join(bigram) for bigram in bigrams]\n bigrams = ngrams(normalized_tokens, 2) \n bigrams = filter_punctuation_bigrams(bigrams)\n if remove_stopword_bigrams:\n bigrams = filter_stopword_bigrams(bigrams, stopwords)\n bigram_tokens = [\"_\".join(bigram) for bigram in bigrams]\n\n # Conjoin the feature lists and turn into a space-separated string of features.\n # E.g. if unigrams is ['coffee', 'cup'] and bigrams is ['coffee_cup', 'white_house']\n # then feature_string should be 'coffee cup coffee_cup white_house'\n\n # TO DO: replace this line with your code\n feature_string = [] \n\n # Add this feature string to the output\n all_features.append(feature_string)\n\n\n print(\" Feature string for first document: '{}'\".format(all_features[0]))\n \n return all_features\n\n# For both classes, print the n most heavily weighted features in this classifier.\ndef most_informative_features(vectorizer, classifier, n=20):\n # Adapted from https://stackoverflow.com/questions/11116697/how-to-get-most-informative-features-for-scikit-learn-classifiers#11116960\n feature_names = vectorizer.get_feature_names()\n coefs_with_features = sorted(zip(classifier.coef_[0], feature_names))\n top = zip(coefs_with_features[:n], coefs_with_features[:-(n + 1):-1])\n for (coef_1, feature_1), (coef_2, feature_2) in top:\n print(\"\\t%.4f\\t%-15s\\t\\t%.4f\\t%-15s\" % (coef_1, feature_1, coef_2, feature_2))\n\n# Split on whitespace, e.g. \"a b_c d\" returns tokens ['a','b_c','d']\ndef whitespace_tokenizer(line):\n return line.split()\n \ndef main(use_sklearn_feature_extraction, num_most_informative, plot_metrics):\n stop_words = load_stopwords(stopwords_file)\n\n # Read the dataset in and split it into training documents/labels (X) and test documents/labels (y)\n X_train, X_test, y_train, y_test = split_training_set(*read_and_clean_lines(input_speechfile))\n \n if use_sklearn_feature_extraction:\n # Use sklearn CountVectorizer's built-in tokenization to get unigrams and bigrams as features\n X_features_train, training_vectorizer = convert_text_into_features(X_train, stop_words, \"word\", range=(1,2))\n X_test_documents = X_test\n else:\n # Roll your own feature extraction.\n # Call convert_lines_to_feature_strings() to get your features\n # as a whitespace-separated string that will now represent the document.\n print(\"Creating feature strings for training data\")\n X_train_feature_strings = convert_lines_to_feature_strings(X_train, stop_words)\n print(\"Creating feature strings for test data\")\n X_test_documents = convert_lines_to_feature_strings(X_test, stop_words)\n \n # Call CountVectorizer with whitespace-based tokenization as the analyzer, so that it uses exactly your features,\n # but without doing any of its own analysis/feature-extraction.\n X_features_train, training_vectorizer = convert_text_into_features(X_train_feature_strings, stop_words, whitespace_tokenizer)\n \n # Create a logistic regression classifier trained on the featurized training data\n lr_classifier = LogisticRegression(solver='liblinear')\n lr_classifier.fit(X_features_train, y_train)\n\n # Show which features have the highest-value logistic regression coefficients\n print(\"Most informative features\")\n most_informative_features(training_vectorizer, lr_classifier, num_most_informative)\n\n # Apply the \"vectorizer\" created using the training data to the test documents, to create testset feature vectors\n X_test_features = training_vectorizer.transform(X_test_documents)\n\n # Classify the test data and see how well you perform\n # For various evaluation scores see https://scikit-learn.org/stable/modules/model_evaluation.html\n print(\"Classifying test data\")\n predicted_labels = lr_classifier.predict(X_test_features)\n print('Accuracy = {}'.format(metrics.accuracy_score(predicted_labels, y_test)))\n for label in ['Republican', 'Democrat']:\n print('Precision for label {} = {}'.format(label, metrics.precision_score(predicted_labels, y_test, pos_label=label)))\n print('Recall for label {} = {}'.format(label, metrics.recall_score(predicted_labels, y_test, pos_label=label)))\n \n if plot_metrics:\n print(\"Generating plots\")\n metrics.plot_confusion_matrix(lr_classifier, X_test_features, y_test, normalize='true')\n metrics.plot_roc_curve(lr_classifier, X_test_features, y_test)\n plt.show()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Options for running this script')\n parser.add_argument('--use_sklearn_features', default=False, action='store_true', help=\"Use sklearn's feature extraction\")\n parser.add_argument('--plot_metrics', default=False, action='store_true', help=\"Generate figures for evaluation\")\n parser.add_argument('--num_most_informative', default=10, action='store', help=\"Number of most-informative features to show\")\n args = parser.parse_args()\n main(args.use_sklearn_features, int(args.num_most_informative), args.plot_metrics)\n\n", "id": "382014", "language": "Python", "matching_score": 10.179485321044922, "max_stars_count": 0, "path": "assignment2/assignment.py" }, { "content": "################################################################\n# Imports\n################################################################\n\n# System imports\nimport os\nimport sys\nimport codecs\nimport argparse\nimport json\nimport gzip\nimport re\nimport string\nfrom collections import Counter\n\n# Helper imports\n# - Verbose info for debugging\nfrom traceback_with_variables import activate_by_import\n# - Progress bars\nfrom tqdm import tqdm # Runtime progress bar\n\n# Machine learning imports\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold, StratifiedKFold\nimport sklearn.metrics as metrics\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# NLP imports\nfrom spacy.lang.en import English\n\n# Other imports\nfrom assignment1_fns import *\n\n################################################################\n# Functions from previous assignments\n################################################################\n\n# Read from speeches file, returning parallel lists of documents and their labels\ndef read_and_clean_lines(infile, chamber='Senate'):\n print(\"\\nReading and cleaning text from {}\".format(infile))\n lines = []\n parties = []\n with gzip.open(infile,'rt') as f:\n for line in tqdm(f):\n j = json.loads(line)\n if (j['chamber'] == chamber):\n party = j['party']\n text = j['text']\n clean_text = re.sub(r\"\\s+\",\" \",text)\n lines.append(clean_text)\n parties.append(party)\n print(\"Read {} documents\".format(len(lines)))\n print(\"Read {} labels\".format(len(parties)))\n return lines, parties\n\n\n# Read a set of stoplist words from filename, assuming it contains one word per line\n# Return a python Set data structure (https://www.w3schools.com/python/python_sets.asp)\ndef load_stopwords(filename):\n stopwords = []\n with codecs.open(filename, 'r', encoding='ascii', errors='ignore') as fp:\n stopwords = fp.read().split('\\n')\n return set(stopwords)\n\n\n# Call sklearn's train_test_split function to split the dataset into training items/labels\n# and test items/labels. See https://realpython.com/train-test-split-python-data/\n# (or Google train_test_split) for how to make this call.\n#\n# Note that the train_test_split function returns four sequences: X_train, X_test, y_train, y_test\n# X_train and y_train are the training items and labels, respectively\n# X_test and y_test are the test items and labels, respectively\n#\n# This function should return those four values\ndef split_training_set(lines, labels, test_size=0.3, random_seed=42):\n X_train, X_test, y_train, y_test = train_test_split(lines, labels, test_size=test_size, random_state=random_seed, stratify=labels)\n print(\"Training set label counts: {}\".format(Counter(y_train)))\n print(\"Test set label counts: {}\".format(Counter(y_test)))\n return X_train, X_test, y_train, y_test\n\n# Converting text into features.\n# Inputs:\n# X - a sequence of raw text strings to be processed\n# analyzefn - either built-in (see CountVectorizer documentation), or a function we provide from strings to feature-lists\n#\n# Arguments used by the words analyzer\n# stopwords - set of stopwords (used by \"word\" analyzer\")\n# lowercase - true if normalizing by lowercasing\n# ngram_range - (N,M) for using ngrams of sizes N up to M as features, e.g. (1,2) for unigrams and bigrams\n#\n# Outputs:\n# X_features - corresponding feature vector for each raw text item in X\n# training_vectorizer - vectorizer object that can now be applied to some new X', e.g. containing test texts\n# \n# You can find documentation at https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html\n# and there's a nice, readable discussion at https://medium.com/swlh/understanding-count-vectorizer-5dd71530c1b\n#\ndef convert_text_into_features(X, stopwords_arg, analyzefn=\"word\", range=(1,2)):\n training_vectorizer = CountVectorizer(stop_words=stopwords_arg,\n analyzer=analyzefn,\n lowercase=True,\n ngram_range=range)\n X_features = training_vectorizer.fit_transform(X)\n return X_features, training_vectorizer\n\n# Input:\n# lines - a raw text corpus, where each element in the list is a string\n# stopwords - a set of strings that are stopwords\n# remove_stopword_bigrams = True or False\n#\n# Output: a corresponding list converting the raw strings to space-separated features\n#\n# The features extracted should include non-stopword, non-punctuation unigrams,\n# plus the bigram features that were counted in collect_bigram_counts from the previous assignment\n# represented as underscore_separated tokens.\n# Example:\n# Input: [\"This is Remy's dinner.\",\n# \"Remy will eat it.\"]\n# Output: [\"remy 's dinner remy_'s 's_dinner\",\n# \"remy eat\"]\ndef convert_lines_to_feature_strings(lines, stopwords, remove_stopword_bigrams=True):\n\n print(\" Converting from raw text to unigram and bigram features\")\n if remove_stopword_bigrams:\n print(\" Includes filtering stopword bigrams\")\n \n print(\" Initializing\")\n nlp = English(parser=False)\n all_features = []\n print(\" Iterating through documents extracting unigram and bigram features\")\n for line in tqdm(lines):\n\n # Get spacy tokenization and normalize the tokens\n spacy_analysis = nlp(line)\n spacy_tokens = [token.orth_ for token in spacy_analysis]\n normalized_tokens = normalize_tokens(spacy_tokens)\n\n # Collect unigram tokens as features\n # Exclude unigrams that are stopwords or are punctuation strings (e.g. '.' or ',')\n unigrams = [token for token in normalized_tokens\n if token not in stopwords and token not in string.punctuation]\n\n # Collect string bigram tokens as features\n bigrams = ngrams(normalized_tokens, 2) \n bigrams = filter_punctuation_bigrams(bigrams)\n if remove_stopword_bigrams:\n bigrams = filter_stopword_bigrams(bigrams, stopwords)\n bigram_tokens = [\"_\".join(bigram) for bigram in bigrams]\n\n # Conjoin the feature lists and turn into a space-separated string of features.\n # E.g. if unigrams is ['coffee', 'cup'] and bigrams is ['coffee_cup', 'white_house']\n # then feature_string should be 'coffee cup coffee_cup white_house'\n\n # TO DO: replace this line with your code\n feature_list = unigrams + bigram_tokens\n feature_string = \" \".join(feature_list)\n\n # Add this feature string to the output\n all_features.append(feature_string)\n\n\n # print(\" Feature string for first document: '{}'\".format(all_features[0]))\n \n return all_features\n\n# For both classes, print the n most heavily weighted features in this classifier.\ndef most_informative_features(vectorizer, classifier, n=20):\n # Adapted from https://stackoverflow.com/questions/11116697/how-to-get-most-informative-features-for-scikit-learn-classifiers#11116960\n feature_names = vectorizer.get_feature_names()\n coefs_with_features = sorted(zip(classifier.coef_[0], feature_names))\n top = zip(coefs_with_features[:n], coefs_with_features[:-(n + 1):-1])\n for (coef_1, feature_1), (coef_2, feature_2) in top:\n print(\"\\t%.4f\\t%-15s\\t\\t%.4f\\t%-15s\" % (coef_1, feature_1, coef_2, feature_2))\n\n# Split on whitespace, e.g. \"a b_c d\" returns tokens ['a','b_c','d']\ndef whitespace_tokenizer(line):\n return line.split()\n\n\n", "id": "7390852", "language": "Python", "matching_score": 5.639723300933838, "max_stars_count": 0, "path": "assignment4/assignment4_fns.py" }, { "content": "import sys\nimport numpy as np\nimport argparse\n\n\n# Import functions from other assignments\nfrom assignment4_fns import *\n \n# Convenient for debugging but feel free to comment out\nfrom traceback_with_variables import activate_by_import\n\n\n###################################################################################################################\n# Adaptation of the main logistic regression experiment from Assignment 2, to use cross-validation.\n###################################################################################################################\ndef run_experiment(input_speechfile, stopwords_file, use_sklearn_feature_extraction, test_size, num_folds, stratify, random_seed):\n\n # Load stopwords\n stop_words = load_stopwords(stopwords_file)\n\n # Read the dataset in and split it into training documents/labels (X) and test documents/labels (y)\n # Note that for this assignment, we are then going to ignore X_test and y_test.\n # This simulates real-world experimentation where one might successively improve the system\n # using cross-validation on the training set, e.g. trying different features or doing hyperparameter tuning,\n # while being careful not to test on the test data until the end.\n X, y = read_and_clean_lines(input_speechfile)\n X_train, X_test, y_train, y_test = split_training_set(X, y, test_size)\n\n # Feature extraction is the same as done previously\n if use_sklearn_feature_extraction:\n # Use sklearn CountVectorizer's built-in tokenization to get unigrams and bigrams as features\n X_features_train, training_vectorizer = convert_text_into_features(X_train, stop_words, \"word\", range=(1,2))\n X_test_documents = X_test\n else:\n # Roll your own feature extraction.\n # Call convert_lines_to_feature_strings() to get your features\n # as a whitespace-separated string that will now represent the document.\n print(\"Creating feature strings for training data\")\n X_train_feature_strings = convert_lines_to_feature_strings(X_train, stop_words)\n\n # Commenting out feature extraction for final test data, since we're not going to use it\n # (See earlier comment about X_test and y_test.)\n # print(\"Creating feature strings for final test data\")\n # X_test_documents = convert_lines_to_feature_strings(X_test, stop_words)\n \n # Call CountVectorizer with whitespace-based tokenization as the analyzer, so that it uses exactly your features,\n # but without doing any of its own analysis/feature-extraction.\n # Again, this is the same as previously\n X_features_train, training_vectorizer = convert_text_into_features(X_train_feature_strings, stop_words, whitespace_tokenizer)\n\n # Create a k-fold cross validation object.\n # Use the shuffle parameter (shuffle before splitting) and pass in random_seed.\n # Use either Kfold or StratifiedKFold - the latter makes sure the ratio of labels in each fold is the same as the original data.\n # See https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html\n # and https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html\n # Note that if you use stratified split() it needs to include y_train (i.e. labels) so it knows the proportion of labels in the original training set.\n print(\"Doing cross-validation splitting with stratify={}. Showing 10 indexes for items in train/test splits in {} folds.\".format(stratify,num_folds))\n kfold = None # Replace this with appropriate function call to create cross-validation object\n \n # Create the classifier object\n classifier = LogisticRegression(solver='liblinear')\n\n # Do cross-validation and look at mean/stdev of scores by calling cross_val_score()\n # See https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html\n # Nice tutorial: https://machinelearningmastery.com/how-to-configure-k-fold-cross-validation/\n # The cross_val_score expects classifier, feature-vectorized docs, labels, evaluation score to use ('accuracy'), and the kfold object\n # Use accuracy, but for other evaluation scores you can see https://scikit-learn.org/stable/modules/model_evaluation.html\n print(\"Running {}-fold cross-validation on {}% of the data, still holding out the rest for final testing.\".format(num_folds,(1-test_size)*100))\n accuracy_scores = [0]*num_folds # Replace this line with your call to cross_val_score()\n print(\"accuracy scores = {}, mean = {}, stdev = {}\".format(accuracy_scores, np.mean(accuracy_scores), np.std(accuracy_scores)))\n\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser(description='Classification with cross validation')\n parser.add_argument('--use_sklearn_features', default=False, action='store_true', help=\"Use sklearn's feature extraction\")\n parser.add_argument('--test_size', default=0.3, action='store', help=\"Proportion (from 0 to 1) of items held out for final testing\")\n parser.add_argument('--num_folds', default=5, action='store', help=\"Number of folds for cross-validation (use 2 for just a train/test split)\")\n parser.add_argument('--stratify', default=False, action='store_true', help=\"Use stratified rather than plain cross-validation\")\n parser.add_argument('--seed', default=13, action='store', help=\"Random seed\")\n parser.add_argument('--stopwords', default=\"./mallet_en_stoplist.txt\", action='store', help=\"Stopwords file\")\n parser.add_argument('--infile', default=None, action='store', help=\"Input jsonlines.gz file\")\n args = parser.parse_args()\n if args.infile == None:\n print(\"Argument --infile is required.\")\n sys.exit(1)\n run_experiment(args.infile,\n args.stopwords,\n args.use_sklearn_features,\n float(args.test_size),\n int(args.num_folds),\n args.stratify,\n int(args.seed))\n\n", "id": "688508", "language": "Python", "matching_score": 3.0890729427337646, "max_stars_count": 0, "path": "assignment4/assignment4.py" }, { "content": "import os\nimport sys\nimport numpy as np\nimport json\nimport csv, re\nimport string\nfrom tqdm import tqdm\nimport codecs\nimport argparse\nfrom collections import Counter\nimport spacy\nfrom assgn2_fns import *\n\n\n# Convenient for debugging but feel free to comment out\nfrom traceback_with_variables import activate_by_import\n\n# Hard-wired variables\ninput_speechfile = \"./speeches2020_jan_to_jun.jsonl.gz\"\n\n# Input: dictionary mapping items to numbers\n# Prints n items, either the top n or the bottom n\n# depending on the order.\ndef print_sorted_items(dict, n=10, order='ascending'):\n if order == 'descending':\n multiplier = -1\n else:\n multiplier = 1\n ranked = sorted(dict.items(), key=lambda x: x[1] * multiplier)\n for key, value in ranked[:n] :\n print(key, value)\n\n \ndef main(chamber, party, verb, maxlines, num_to_show):\n\n # Some initializations\n lines_processed = 0\n counts = Counter()\n\n # Read in the speeches that we want to analyze\n print(\"Reading {}\".format(input_speechfile))\n lines, parties = read_and_clean_lines(input_speechfile, chamber)\n\n # Make sure maxlines is no longer than the corpus itself\n if maxlines > len(lines):\n maxlines = len(lines)\n\n # Initialize spacy\n print(\"Initializing spaCy\")\n nlp = spacy.load('en_core_web_sm')\n\n # Main loop\n # If you're not familiar with this use of 'for x,y in zip(list1,list2)'\n # see https://stackoverflow.com/questions/21098350/python-iterate-over-two-lists-simultaneously\n print(\"Iterating through {} documents\".format(maxlines))\n for line, line_party in tqdm( zip(lines,parties), total=maxlines ):\n\n # Stop after maxlines lines have been processed\n if lines_processed >= maxlines:\n break\n \n # Skip this speech if it's not the party we're interested in\n if line_party != party:\n continue\n \n # Do the NLP analysis using spacy\n # There are many tutorials out there for things you can do with spacy;\n # the one at https://www.machinelearningplus.com/spacy-tutorial-nlp/ is particularly good.\n analysis = nlp(line)\n \n # For each token, see if it's the object of the verb we're interested in\n # and if so, increment its count (normalizing using its lowercased lemma)\n for token in analysis:\n if token.head.lemma_ == verb and token.dep_ == 'dobj':\n counts[token.lemma_.lower()] += 1\n if False: # Set to True for debugging to see info spacy gives you for dependencies\n print(\"{3}/{4} --{2}--> {0}/{1}\".format(\n token.text, # Text of this token\n token.tag_, # POS tag for this token\n token.dep_, # Label for dependency link\n token.head.text, # Head that this token modifies\n token.head.tag_)) # POS tag for the head this token modifies\n\n \n lines_processed += 1\n\n print(\"For {}s in the {}, top direct objects for the verb '{}'\".format(party, chamber, verb))\n print_sorted_items(counts, num_to_show, 'descending')\n\n \n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Find most frequent objects for a specified verb in political speeches')\n parser.add_argument('--chamber', default='Senate', action='store', help=\"Senate or House\")\n parser.add_argument('--party', default='Republican', action='store', help=\"Democrat or Republican\")\n parser.add_argument('--verb', default='eat', action='store', help=\"verb of interest\")\n parser.add_argument('--maxlines', default=1000000, action='store', help=\"maximum number of speeches to analyze\")\n parser.add_argument('--num_to_show', default=20, action='store', help=\"top-n items to show\")\n args = parser.parse_args()\n main(args.chamber, args.party, args.verb, int(args.maxlines), int(args.num_to_show))\n", "id": "3398877", "language": "Python", "matching_score": 2.609539270401001, "max_stars_count": 0, "path": "assignment3/assignment3.py" }, { "content": "import gzip\nimport json\nimport re\nfrom tqdm import tqdm\n\n# Input: gzip'd jsonlines file containing Congressional speeches with (at least)\n# elements for chamber (House or Senate), party, and text of the speech.\n#\n# For each input line,\n# Grabs the json using json.loads()\n# If the chamber is the one we're interested in:\n# Extracts the party and text elements\n# Cleans the line of text by replacing runs of whitespace (\\s+) with a single space\n# Adds cleaned line to list of lines\n# Adds party for this speech to list of parties\n# At end, returns two values: lines and parties\n# Note that these are parallel lists, i.e. parties[i] is the part for the legislator\n# who gave the speech in lines[i].\ndef read_and_clean_lines(infile, chamber='Senate'):\n print(\"\\nReading and cleaning text from {}\".format(infile))\n lines = []\n parties = []\n with gzip.open(infile,'rt') as f:\n for line in tqdm(f):\n j = json.loads(line)\n if (j['chamber'] == chamber):\n party = j['party']\n text = j['text']\n clean_text = re.sub(r\"\\s+\",\" \",text)\n lines.append(clean_text)\n parties.append(party)\n print(\"Read {} documents\".format(len(lines)))\n print(\"Read {} labels\".format(len(parties)))\n return lines, parties\n", "id": "4670415", "language": "Python", "matching_score": 1.2570194005966187, "max_stars_count": 0, "path": "assignment3/assgn2_fns.py" } ]
1.958559
vpadillar
[ { "content": "from __future__ import unicode_literals\n\nfrom django.db import models\n\n# Create your models here.\n\n\nclass ReporteProducto(models.Model):\n nombre = models.CharField(null=True, blank=True, max_length=100)\n inicio = models.DateField()\n fin = models.DateField()\n tipo = models.IntegerField(\n choices=[(1, 'Diaria'), (2, 'Semana'), (3, 'Mensual'), (4, 'Anual')])\n\n def __unicode__(self):\n i = 0\n men = ''\n while i < 10 - len(str(self.pk)):\n men = men + '0'\n i = i+1\n # end ford\n return '%s%d' % (men, self.pk)\n # end def\n\n def __str__(self):\n i = 0\n men = ''\n while i < 10 - len(str(self.pk)):\n men = men + '0'\n i = i+1\n # end ford\n return '%s%d' % (men, self.pk)\n # end def\n\n class Meta:\n verbose_name = \"Reporte Producto\"\n verbose_name_plural = \"Reporte Productos\"\n # end class\n# end class\n", "id": "7581118", "language": "Python", "matching_score": 2.541208267211914, "max_stars_count": 0, "path": "reporte/models.py" }, { "content": "from django.contrib import admin\nimport models\nfrom daterange_filter.filter import DateRangeFilter\nfrom django.utils.html import format_html\n\n# Register your models here.\n\n\nclass ReporteProductoAdmin(admin.ModelAdmin):\n list_display = ['id_reporte', 'nombre', 'inicio', 'fin', 'accion_reporte']\n search_fields = ['id ', 'nombre', 'inicio', 'fin']\n list_display_links = ('id_reporte',)\n\n def id_reporte(self, obj):\n i = 0\n men = ''\n while i < 10 - len(str(obj.pk)):\n men = men + '0'\n i = i+1\n # end ford\n return '%s%d' % (men, obj.pk)\n # end def\n\n class Media:\n js = ('/static/reporte/js/jquery-3.1.1.js', '/static/reporte/js/reporte.js',)\n # end class\n\n def accion_reporte(self, obj):\n return format_html(\"<a href='{0}' class='generar addlink'>Imprimir</a>\", obj.id)\n # end def\n id_reporte.allow_tags = True\n id_reporte.short_description = 'Reporte Id'\n accion_reporte.allow_tags = True\n accion_reporte.short_description = 'Generar'\n# end class\nadmin.site.register(models.ReporteProducto, ReporteProductoAdmin)\n", "id": "619133", "language": "Python", "matching_score": 0.9033781886100769, "max_stars_count": 0, "path": "reporte/admin.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import HttpResponse\nfrom datetime import date, timedelta, datetime\nfrom django.views.generic import View\nfrom django.db import connection\nimport csv\nimport models\nfrom datetime import date\n# Create your views here.\n\n\nclass Reporte(View):\n\n @method_decorator(csrf_exempt)\n def dispatch(self, *args, **kwargs):\n # do something\n return super(Reporte, self).dispatch(*args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n id = request.GET.get('id', 0)\n orden = models.ReporteProducto.objects.filter(id=id).first()\n response = HttpResponse(content_type='text/csv')\n response[\n 'Content-Disposition'] = 'attachment; filename=\"Reporte Empleados.csv\"'\n writer = csv.writer(response)\n writer.writerow(['Express del norte'.encode('utf-8')])\n lista = list()\n if orden:\n writer.writerow(['Fecha de inicio para el reporte'.encode('utf-8'), str(orden.inicio).encode('utf-8')])\n writer.writerow(['Fecha de fin para el reporte'.encode('utf-8'), str(orden.fin).encode('utf-8')])\n if orden.tipo == 3:\n writer.writerow(['Reporte Mensual'.encode('utf-8')])\n lista.append(u'Año'.encode('utf-8'))\n lista.append(u'Mes'.encode('utf-8'))\n lista.append(u'Producto'.encode('utf-8'))\n lista.append(u'cantidad'.encode('utf-8'))\n lista.append(u'Precio'.encode('utf-8'))\n lista.append(u'Venta'.encode('utf-8'))\n writer.writerow(lista)\n sql = \"select * from (select year_,id_mes,nom_mes,name,count(name) as total,\"\n sql = sql + \"sum(case when cantidad is null then 0 else cantidad end) as articulos,\"\n sql = sql + \"sum(case when cantidad is not null and producto_precio is not null then producto_precio*cantidad else 0 end) as venta\"\n sql = sql + \",producto_precio from informes_total where date(fecha)>= date('\"+str(orden.inicio)+\"') and date(fecha)<= date('\"+str(orden.fin)+\"') group by year_,id_mes,nom_mes,name) as t\"\n cursor = connection.cursor()\n cursor.execute(sql)\n row = cursor.fetchall()\n r = 0\n while r < len(row):\n li = list()\n print row[r]\n li.append(row[r][0])\n li.append((row[r][2]).encode('utf-8'))\n li.append((row[r][3]).encode('utf-8'))\n li.append(row[r][5])\n li.append(row[r][7])\n li.append(row[r][6])\n writer.writerow(li)\n r = r + 1\n # end for\n return response\n elif orden.tipo == 4:\n writer.writerow(['Reporte Anual'.encode('utf-8')])\n lista.append(u'Año'.encode('utf-8'))\n lista.append(u'Total articulos'.encode('utf-8'))\n lista.append(u'Cantidad de Productos'.encode('utf-8'))\n lista.append(u'Total Venta'.encode('utf-8'))\n writer.writerow(lista)\n sql = \"select * from (select year_,id_mes,nom_mes,name,count(name) as total,\"\n sql = sql + \"sum(case when cantidad is null then 0 else cantidad end) as articulos,\"\n sql = sql + \"sum(case when cantidad is not null and producto_precio is not null then producto_precio*cantidad else 0 end) as venta\"\n sql = sql + \",producto_precio from informes_total where date(fecha)>= date('\"+str(orden.inicio)+\"') and date(fecha)<= date('\"+str(orden.fin)+\"') group by year_) as t\"\n cursor = connection.cursor()\n cursor.execute(sql)\n row = cursor.fetchall()\n r = 0\n while r < len(row):\n li = list()\n li.append(row[r][0])\n li.append(row[r][5])\n li.append(row[r][6])\n writer.writerow(li)\n r = r + 1\n # end for\n return response\n elif orden.tipo == 2:\n writer.writerow(['Reporte Semana'.encode('utf-8')])\n lista.append(u'Año'.encode('utf-8'))\n lista.append(u'Mes'.encode('utf-8'))\n lista.append(u'Dia'.encode('utf-8'))\n lista.append(u'Semana'.encode('utf-8'))\n lista.append(u'Producto'.encode('utf-8'))\n lista.append(u'Total articulos'.encode('utf-8'))\n lista.append(u'Valor unitario'.encode('utf-8'))\n lista.append(u'Total Venta'.encode('utf-8'))\n writer.writerow(lista)\n sql = \"select * from (select year_,id_mes,dia_mes,nom_mes,name,count(name) as total,\"\n sql = sql + \"sum(case when cantidad is null then 0 else cantidad end) as articulos,\"\n sql = sql + \"sum(case when cantidad is not null and producto_precio is not null then producto_precio*cantidad else 0 end) as venta,producto_precio,strftime('%W',fecha) as semana,fecha\"\n sql = sql + \",producto_precio from informes_total where date(fecha)>= date('\"+str(orden.inicio)+\"') and date(fecha)<= date('\"+str(orden.fin)+\"') group by year_,id_mes,nom_mes,strftime('%W',fecha) ,name) as t\"\n cursor = connection.cursor()\n cursor.execute(sql)\n row = cursor.fetchall()\n r = 0\n while r < len(row):\n li = list()\n li.append(row[r][0])\n li.append(str(row[r][3]).encode('utf-8'))\n li.append(row[r][2])\n li.append(row[r][9])\n li.append(str(row[r][4]).encode('utf-8'))\n li.append(row[r][6])\n li.append(row[r][8])\n li.append(row[r][7])\n writer.writerow(li)\n r = r + 1\n # end for\n return response\n elif orden.tipo == 1:\n writer.writerow(['Reporte Dias'.encode('utf-8')])\n lista.append(u'Año'.encode('utf-8'))\n lista.append(u'Mes'.encode('utf-8'))\n lista.append(u'Dia'.encode('utf-8'))\n lista.append(u'Fecha'.encode('utf-8'))\n lista.append(u'Producto'.encode('utf-8'))\n lista.append(u'Total articulos'.encode('utf-8'))\n lista.append(u'Valor unitario'.encode('utf-8'))\n lista.append(u'Total Venta'.encode('utf-8'))\n writer.writerow(lista)\n sql = \"select * from (select year_,id_mes,dia_mes,nom_mes,name,count(name) as total,\"\n sql = sql + \"sum(case when cantidad is null then 0 else cantidad end) as articulos,\"\n sql = sql + \"sum(case when cantidad is not null and producto_precio is not null then producto_precio*cantidad else 0 end) as venta,producto_precio,strftime('%W',fecha) as semana,date(fecha)\"\n sql = sql + \",producto_precio from informes_total where date(fecha)>= date('\"+str(orden.inicio)+\"') and date(fecha)<= date('\"+str(orden.fin)+\"') group by year_,id_mes,nom_mes,fecha ,name) as t\"\n cursor = connection.cursor()\n cursor.execute(sql)\n row = cursor.fetchall()\n r = 0\n while r < len(row):\n li = list()\n li.append(row[r][0])\n li.append(str(row[r][3]).encode('utf-8'))\n li.append(row[r][2])\n li.append(row[r][10])\n li.append(str(row[r][4]).encode('utf-8'))\n li.append(row[r][6])\n li.append(row[r][8])\n li.append(row[r][7])\n writer.writerow(li)\n r = r + 1\n # end for\n return response\n # end if\n # end if\n # end for\n return render(request, 'reporte/reporte.html', {})\n # end class\n# end class\n\n\nclass ReporteMensual(View):\n\n @method_decorator(csrf_exempt)\n def dispatch(self, *args, **kwargs):\n # do something\n return super(ReporteMensual, self).dispatch(*args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return render(request, 'reporte/reporte.html', {})\n # end class\n# end class\n\n\nclass ExcelMensual(View):\n\n @method_decorator(csrf_exempt)\n def dispatch(self, *args, **kwargs):\n # do something\n return super(ExcelMensual, self).dispatch(*args, **kwargs)\n # end def\n\n def get(self, request):\n print request.GET\n id_emp=request.GET.get('id', '0')\n ini=request.GET.get('ini', '2015-01-01')\n fin=request.GET.get('fin', '%s-%s-%s' %\n (date.today().year, date.today().month, date.today().day))\n f1=ini.split('-')\n f2=fin.split('-')\n d1='%s-%s-%s' % (f1[2], f1[0], f1[1])\n d2='%s-%s-%s' % (f2[2], f2[0], f2[1])\n estado=request.GET.get('estado', False)\n r=0\n lista=list()\n response=HttpResponse(content_type='text/csv')\n response[\n 'Content-Disposition']='attachment; filename=\"Reporte Empleados.csv\"'\n writer=csv.writer(response)\n writer.writerow(['Express del norte'.encode('utf-8')])\n writer.writerow(['Fecha de inicio para el reporte'.encode('utf-8'), d1.encode('utf-8'), ''.encode(\n 'utf-8'), ''.encode('utf-8'), 'Fecha de fin para el reporte'.encode('utf-8'), d2.encode('utf-8')])\n lista.append(u'Año'.encode('utf-8'))\n lista.append(u'Mes'.encode('utf-8'))\n lista.append(u'Producto'.encode('utf-8'))\n lista.append(u'Producto'.encode('utf-8'))\n lista.append(u'Vendidos'.encode('utf-8'))\n lista.append(u'Venta'.encode('utf-8'))\n writer.writerow(lista)\n sql='''select * from (select year_,id,nombre,name,count(name) as total,\n sum(case when cantidad is null then 0 else cantidad end) as articulos,\n sum(case when cantidad is not null and producto_precio is not null then\n producto_precio*cantidad else 0 end) as venta\n from informes_total group by year_,id,nombre,name) as t\n '''\n cursor=connection.cursor()\n cursor.execute(sql)\n row=cursor.fetchall()\n cursor2=connection.cursor()\n r=0\n while r < len(row):\n li=list()\n print row[r]\n li.append(row[r][0])\n li.append((row[r][2]).encode('utf-8'))\n li.append((row[r][3]).encode('utf-8'))\n li.append(row[r][5])\n li.append(row[r][6])\n writer.writerow(li)\n r=r + 1\n # end for\n return response\n # end class\n# end class\n\n\nclass ExcelReporteDia(View):\n @method_decorator(csrf_exempt)\n def dispatch(self, *args, **kwargs):\n # do something\n return super(ExcelReporteDia, self).dispatch(*args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n response = HttpResponse(content_type='text/csv')\n response[\n 'Content-Disposition'] = 'attachment; filename=\"Reporte Empleados.csv\"'\n writer = csv.writer(response)\n writer.writerow(['Express del norte'.encode('utf-8')])\n lista = list()\n writer.writerow(['Reporte del Dias'.encode('utf-8')])\n lista.append(u'Año'.encode('utf-8'))\n lista.append(u'Mes'.encode('utf-8'))\n lista.append(u'Dia'.encode('utf-8'))\n lista.append(u'Fecha'.encode('utf-8'))\n lista.append(u'Producto'.encode('utf-8'))\n lista.append(u'Total articulos'.encode('utf-8'))\n lista.append(u'Valor unitario'.encode('utf-8'))\n lista.append(u'Total Venta'.encode('utf-8'))\n writer.writerow(lista)\n d = date.today()\n f = '%d-%d-%d' % (d.year, d.month, d.day)\n sql = \"select * from (select year_,id_mes,dia_mes,nom_mes,name,count(name) as total,\"\n sql = sql + \"sum(case when cantidad is null then 0 else cantidad end) as articulos,\"\n sql = sql + \"sum(case when cantidad is not null and producto_precio is not null then producto_precio*cantidad else 0 end) as venta,producto_precio,strftime('%W',fecha) as semana,date(fecha)\"\n sql = sql + \",producto_precio from informes_total where date(fecha)>= date('\"+str(f)+\"') and date(fecha)<= date('\"+str(f)+\"') group by year_,id_mes,nom_mes,fecha ,name) as t\"\n cursor = connection.cursor()\n cursor.execute(sql)\n row = cursor.fetchall()\n r = 0\n while r < len(row):\n li = list()\n li.append(row[r][0])\n li.append(str(row[r][3]).encode('utf-8'))\n li.append(row[r][2])\n li.append(row[r][10])\n li.append(str(row[r][4]).encode('utf-8'))\n li.append(row[r][6])\n li.append(row[r][8])\n li.append(row[r][7])\n writer.writerow(li)\n r = r + 1\n # end for\n return response\n # end class\n# end class\n", "id": "2539647", "language": "Python", "matching_score": 1.7755169868469238, "max_stars_count": 0, "path": "reporte/views.py" }, { "content": "from django.conf.urls import url\nimport views\n\n\nurlpatterns = [\n url(r'^get/reporte/$', views.Reporte.as_view(), name='reporte_producto'),\n url(r'^get/reporte/mensual/$', views.ReporteMensual.as_view(), name='reporte_producto_mensual'),\n url(r'^get/reporte/meses/$', views.ExcelMensual.as_view(), name='reporte_producto_mes'),\n url(r'^get/reporte/dia/$', views.ExcelReporteDia.as_view(), name='reporte_producto_dia'),\n]\n", "id": "4425059", "language": "Python", "matching_score": 0.7310070395469666, "max_stars_count": 0, "path": "reporte/urls.py" } ]
1.339448
zihaooo9
[ { "content": "from django.shortcuts import render, redirect\n\n\ndef index(request):\n if request.user.is_authenticated:\n return redirect(\"dashboard\")\n else:\n return render(request, 'pages/index.html')\n", "id": "11158396", "language": "Python", "matching_score": 1.3273366689682007, "max_stars_count": 0, "path": "pages/views.py" }, { "content": "import datetime\n\nimport cloudinary\nimport requests\nfrom django.contrib import auth, messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.password_validation import (\n password_validators_help_texts, validate_password)\nfrom django.contrib.auth.tokens import PasswordResetTokenGenerator\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.mail import EmailMessage\nfrom django.db.models import Sum\nfrom django.http import JsonResponse\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode\nfrom environs import Env\nfrom notifications.signals import notify\n\nfrom .models import (Data, Deliveries, Group, Shipping, Transaction,\n UserExtension)\nfrom .utils import account_activation_token\n\n# Set up environ\nenv = Env()\nenv.read_env()\n\n# Cloudinary config\ncloudinary.config(\n cloud_name=env.str('CLOUD_NAME'),\n api_key=env.str('API_KEY'),\n api_secret=env.str('API_SECRET'),\n secure=True\n)\n\n\ndef logout(request):\n auth.logout(request)\n return redirect(\"index\")\n\n\n@login_required(login_url='/accounts/login')\ndef dashboard(request):\n mygroups = Group.objects.filter(members__contains=[request.user.username])\n user = UserExtension.objects.filter(user=request.user)\n platform = list()\n for i in range(len(mygroups)):\n g = Shipping.objects.filter(group_name=mygroups[i].group_name)\n platform.append(g[0].platform)\n deliveries = Deliveries.objects.filter(user_id=request.user.id)\n expense = Transaction.objects.filter(user_id=request.user.id)\n\n if len(user) == 0:\n UserExtension.objects.create(user=User.objects.get(username=request.user.username), first_time_user=True)\n\n first_time = True\n if len(user) != 0 and not user[0].first_time_user:\n first_time = False\n\n platforms = UserExtension.objects.get(user=request.user).platforms\n\n context = {\n 'deliveries': deliveries if len(deliveries) != 0 else None,\n 'mygroups': zip(mygroups, platform) if len(mygroups) != 0 else None,\n 'first_time': first_time,\n 'transactions': True if len(expense) != 0 else False,\n 'platforms': platforms,\n }\n return render(request, \"accounts/dashboard.html\", context=context)\n\n\ndef register(request):\n if request.user.is_authenticated:\n return redirect(\"dashboard\")\n else:\n if request.method == \"POST\":\n # Get form values\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n password = request.POST[\"password\"]\n\n try:\n validate_password(password)\n except:\n messages.error(request, password_validators_help_texts(\n password_validators=None))\n return redirect(\"register\")\n\n # Check email\n if User.objects.filter(email=email).exists():\n messages.error(\n request, \"Account already created, please login instead.\")\n return redirect(\"register\")\n elif User.objects.filter(username=username).exists():\n messages.error(request, \"Username has already been taken.\")\n return redirect(\"register\")\n else:\n user = User.objects.create_user(\n username=username,\n email=email,\n password=password,\n )\n user.is_active = False\n user.save()\n current_site = get_current_site(request).domain\n email_body = {\n 'user': user,\n 'domain': current_site,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user),\n }\n\n link = reverse('activate', kwargs={\n 'uidb64': email_body['uid'], 'token': email_body['token']})\n\n email_subject = 'Activate your account'\n\n activate_url = 'http://shopbud.herokuapp.com'+link\n\n email = EmailMessage(\n email_subject,\n 'Hi ' + user.username +\n ', Please click the link below to activate your account \\n' + activate_url,\n '<EMAIL>',\n [email],\n )\n email.send(fail_silently=False)\n\n UserExtension.objects.create(user=user, first_time_user=True)\n\n messages.success(\n request, 'An email has been sent to you to activate your account')\n return redirect(\"register\")\n else:\n return render(request, \"accounts/register.html\")\n\n\ndef activate(request, uidb64, token):\n try:\n id = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=id)\n\n if not account_activation_token.check_token(user, token):\n messages.error(request, 'User already active')\n return redirect('login')\n\n if user.is_active:\n return redirect('login')\n user.is_active = True\n user.save()\n\n messages.success(request, 'Account activated successfully')\n return redirect('login')\n\n except Exception:\n pass\n\n return redirect('login')\n\n\ndef login(request):\n if request.user.is_authenticated:\n return redirect(\"dashboard\")\n else:\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n\n user = auth.authenticate(\n request, username=username, password=password)\n if user is not None:\n auth.login(request, user)\n return redirect(\"dashboard\")\n else:\n if len(User.objects.filter(username=username)) == 0:\n messages.error(request, \"Invalid credentials\")\n return redirect(\"login\")\n elif not User.objects.get(username=username).is_active:\n messages.error(\n request, 'Account is not active, please check your email')\n return redirect(\"login\")\n else:\n messages.error(request, \"Invalid credentials\")\n return redirect(\"login\")\n else:\n return render(request, \"accounts/login.html\")\n\n\n@login_required(login_url='/accounts/login')\ndef transaction(request):\n # month = datetime.datetime.now()\n transactions = Transaction.objects.filter(user_id=request.user.id)\n userext = UserExtension.objects.get(user_id=request.user.id)\n context = {\n # 'month': month,\n 'transactions': transactions,\n 'platforms': userext.platforms,\n }\n if request.method == \"POST\":\n name = request.POST[\"name\"].lower()\n date = request.POST[\"date\"]\n company = request.POST[\"company\"].lower()\n\n if company not in userext.platforms:\n userext.platforms.append(company)\n userext.save()\n\n # Formats price to be in 2 decimal place\n price = request.POST[\"price\"]\n price_formatted = float(\"{:.2f}\".format(float(price)))\n\n # Formats date to be in the form of dd/mm/yyyy\n datelist = date.split(\"-\")\n datelist.reverse()\n date = '{}/{}/{}'.format(*datelist)\n\n # Create and save new transaction\n Transaction.objects.create(\n item=name, user_id=request.user.id, date=date, price=price_formatted, company=company)\n\n return redirect(\"transaction\")\n else:\n return render(request, \"accounts/transaction.html\", context=context)\n\n\n@login_required(login_url='/accounts/login')\ndef delivery(request):\n deliveries = Deliveries.objects.filter(user_id=request.user.id)\n context = {\n 'deliveries': deliveries,\n }\n\n if request.method == \"POST\":\n name = request.POST[\"name\"].lower()\n tkg_number = request.POST[\"tkg_number\"]\n courier_code = request.POST[\"courier\"].split(\",\")[0]\n courier_name = request.POST[\"courier\"].split(\",\")[1]\n\n header = {\n \"Content-Type\": \"application/json\",\n \"Tracking-Api-Key\": env.str('TRACKING_API_KEY'),\n }\n\n params = {\n \"tracking_number\": tkg_number,\n \"courier_code\": courier_code,\n }\n\n r = requests.post(\n url=\"https://api.trackingmore.com/v3/trackings/realtime\", headers=header, json=params)\n\n if r.json()[\"data\"][\"delivery_status\"] == \"notfound\":\n messages.error(request, \" \")\n return redirect(\"delivery\")\n else:\n Deliveries.objects.create(name=name, user_id=request.user.id, tkg_number=tkg_number,\n courier_code=courier_code, courier_name=courier_name)\n return redirect(\"delivery\")\n\n else:\n return render(request, \"accounts/delivery.html\", context=context)\n\n\n@login_required(login_url='/accounts/login')\ndef ship(request):\n if request.method == \"POST\":\n name = request.POST[\"name\"]\n platform = request.POST[\"platform\"]\n location = request.POST[\"location\"]\n contact = request.POST['contact']\n base_shipping = request.POST['base_shipping_fee']\n free_shipping_min = request.POST['freeshipping']\n description = request.POST['description']\n owner = request.user.username\n\n grp = Group.objects.create(\n group_name=name, description=description, members=[owner], contacts=[contact], owner=owner)\n Shipping.objects.create(group=grp, group_name=name, platform=platform, location=location,\n base_shipping=base_shipping, free_shipping_min=free_shipping_min, member_count=1)\n user = UserExtension.objects.filter(user=request.user)\n\n if len(user) != 0:\n user[0].phone_number = contact\n user[0].save()\n else:\n UserExtension.objects.create(user=User.objects.get(\n user_id=request.user.id), phone_number=contact)\n\n return redirect(f'ship/{name}')\n else:\n groups = Shipping.objects.all()\n mygroups = Group.objects.filter(\n members__contains=[request.user.username])\n user = UserExtension.objects.filter(user=request.user)\n context = {\n 'groups': groups,\n 'mygroups': mygroups,\n 'contact': user[0].phone_number if len(user) != 0 else ''\n }\n return render(request, \"accounts/ship.html\", context)\n\n\ndef joinGroup(request):\n if request.method == 'POST':\n contact = request.POST['contact']\n group_name = request.POST['group_name']\n grp = Group.objects.get(pk=group_name)\n user = UserExtension.objects.filter(user=request.user)\n group_shipping = Shipping.objects.get(pk=group_name)\n grp.members.append(request.user.username)\n grp.contacts.append(contact)\n grp.save()\n group_shipping.member_count += 1\n group_shipping.save()\n\n if len(user) != 0:\n user[0].phone_number = contact\n user[0].save()\n else:\n UserExtension.objects.create(user=User.objects.get(\n user_id=request.user.id), phone_number=contact)\n\n return redirect(f'ship/{group_name}')\n\n\n@login_required(login_url='/accounts/login')\ndef groupmainpage(request, group_name):\n group = Group.objects.get(group_name=group_name)\n data = Data.objects.filter(group_name=group_name)\n user = UserExtension.objects.filter(user=request.user)\n # Ensures that the group is locked before allowing members to access this page\n if group.is_locked:\n return redirect('grouplocked', group_name=group_name)\n tabledata = None\n if len(data) != 0:\n tabledata = zip(data[0].users, data[0].items,\n data[0].quantity, data[0].prices, data[0].urls)\n context = {\n 'info': group,\n 'shipping': Shipping.objects.filter(group_name=group_name)[0],\n 'data': data[0] if len(data) != 0 else None,\n 'table_data': tabledata,\n 'contact': user[0].phone_number if len(user) != 0 else ''\n }\n if request.method == 'POST':\n user = request.user.username\n name = request.POST['name']\n quantity = request.POST['quantity']\n price = request.POST['price']\n url = request.POST['url']\n adddata = None\n if len(data) == 0:\n if request.user.username == group.owner:\n adddata = Data.objects.create(group_name=group, users=[user], items=[name], prices=[\n price], urls=[url], quantity=[quantity], paid=[True])\n else:\n adddata = Data.objects.create(group_name=group, users=[user], items=[name], prices=[\n price], urls=[url], quantity=[quantity], paid=[False])\n adddata.save()\n else:\n adddata = Data.objects.filter(group_name=group)[0]\n adddata.users.append(user)\n adddata.items.append(name)\n adddata.prices.append(price)\n adddata.urls.append(url)\n adddata.quantity.append(quantity)\n adddata.paid.append(\n False if request.user.username != group.owner else True)\n adddata.save()\n return redirect('groupmainpage', group_name=group_name)\n else:\n return render(request, \"accounts/groupmainpage.html\", context)\n\n\n@login_required(login_url='/accounts/login')\ndef grouplocked(request, group_name):\n group = Group.objects.get(group_name=group_name)\n data = Data.objects.filter(group_name=group)\n user_total = 0\n\n # Ensures that the group is locked before allowing members to access this page\n if not group.is_locked:\n return redirect('groupmainpage', group_name=group_name)\n\n tabledata = None\n if len(data) != 0:\n tabledata = zip(data[0].users, data[0].items, data[0].quantity,\n data[0].prices, data[0].urls, data[0].paid)\n for i in range(len(data[0].users)):\n if data[0].users[i] == request.user.username:\n user_total += data[0].prices[i] * data[0].quantity[i]\n\n context = {\n 'info': group,\n 'shipping': Shipping.objects.filter(group_name=group_name)[0],\n 'member_details': zip(group.members, group.contacts),\n 'table_data': tabledata,\n 'date': group.meeting_date.isoformat(),\n 'user_total': user_total,\n 'data': data[0] if len(data) != 0 else None,\n }\n if request.method == \"POST\":\n tkg_number = request.POST['tkg_number']\n courier = request.POST['courier']\n meetup = request.POST['date']\n date = datetime.date.fromisoformat(meetup)\n address = request.POST['address']\n\n group.tkg_number = tkg_number\n group.courier = courier\n group.address = address\n group.meeting_date = date\n group.save()\n\n return redirect('grouplocked', group_name=group_name)\n else:\n return render(request, \"accounts/grouplocked.html\", context)\n\n\ndef sendUpdate(request):\n if request.method == 'POST':\n msg = request.POST['message']\n group_name = request.POST['group']\n grp = Group.objects.get(pk=group_name)\n users = grp.members\n for user in users:\n if user != request.user.username:\n notify.send(request.user, recipient=User.objects.get(\n username=user), verb=msg, description=\"message\", group=group_name)\n\n return redirect('grouplocked', group_name=group_name)\n\n\ndef uploadImage(request):\n if request.method == 'POST':\n name = request.POST['group_name']\n img = request.FILES['img']\n group = Group.objects.get(group_name=name)\n group.scrnshot = img\n group.save()\n return redirect('grouplocked', group_name=name)\n\n\n@login_required(login_url='/accounts/login')\ndef settings(request):\n u = User.objects.get(username=request.user.username)\n if request.method == \"POST\":\n # Get form values\n currpw = request.POST[\"currpw\"]\n password = request.POST[\"<PASSWORD>\"]\n user = auth.authenticate(\n request, username=request.user.username, password=<PASSWORD>)\n if user is not None:\n u.set_password(password)\n u.save()\n auth.login(\n request, u, backend=\"django.contrib.auth.backends.ModelBackend\"\n )\n messages.success(request, \"Your profile was updated successfully\")\n return redirect(\"settings\")\n else:\n messages.error(request, \"Invalid credentials\")\n return redirect(\"settings\")\n else:\n return render(request, \"accounts/settings.html\")\n\n\n@login_required(login_url='/accounts/login')\ndef report(request):\n return render(request, \"accounts/report.html\")\n\n\ndef forgetpassword(request):\n if request.method == \"POST\":\n email = request.POST[\"email\"]\n # Check email\n if User.objects.filter(email=email).exists() == False:\n messages.error(\n request, \"Email does not exist.\")\n return redirect(\"forgetpassword\")\n else:\n user = User.objects.filter(email=email)[0]\n current_site = get_current_site(request)\n email_body = {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': PasswordResetTokenGenerator().make_token(user),\n }\n\n link = reverse('resetpw', kwargs={\n 'uidb64': email_body['uid'], 'token': email_body['token']})\n\n email_subject = 'Reset your Password'\n\n reset_url = 'http://shopbud.herokuapp.com'+link\n\n email = EmailMessage(\n email_subject,\n 'Hi there, Please click the link below to reset your password \\n'+reset_url,\n '<EMAIL>',\n [email],\n )\n email.send(fail_silently=False)\n return redirect(\"resetpasswordsuccess\")\n\n return render(request, \"accounts/forgetpassword.html\")\n\n\ndef resetpw(request, uidb64, token):\n context = {'uidb64': uidb64, 'token': token}\n if request.method == \"POST\":\n newpw = request.POST[\"password\"]\n user_id = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=user_id)\n\n try:\n validate_password(<PASSWORD>)\n except:\n messages.error(request, password_validators_help_texts(\n password_validators=None))\n return redirect(\"resetpw\", uidb64=uidb64, token=token)\n user.set_password(<PASSWORD>)\n user.save()\n messages.success(request, \"Password reset successfully\")\n return redirect(\"login\")\n\n return render(request, \"accounts/resetpw.html\", context)\n\n\ndef resetpasswordsuccess(request):\n return render(request, \"accounts/resetpasswordsuccess.html\")\n\n# Handles AJAX Requests\ndef deleteTransaction(request):\n if request.method == \"POST\" and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n item = request.POST.get(\"name\").lower()\n date = request.POST.get(\"date\")\n price = request.POST.get(\"price\")[1:]\n company = request.POST.get(\"company\").lower()\n\n dlt = Transaction.objects.filter(\n item=item, date=date, price=price, company=company, user_id=request.user.id)[0]\n dlt.delete()\n\n if len(Transaction.objects.filter(company=company, user_id=request.user.id)) == 0:\n userext = UserExtension.objects.get(user_id=request.user.id)\n userext.platforms.remove(company)\n userext.save()\n\n return JsonResponse({\"success\": \"\"}, status=200)\n\n\ndef displayExpenses(request):\n if request.method == \"GET\" and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n transactions = Transaction.objects.filter(user_id=request.user.id)\n platforms = UserExtension.objects.get(user_id=request.user.id).platforms\n response = {}\n for platform in platforms:\n response[platform] = transactions.filter(company=platform).aggregate(\n Sum('price'))['price__sum']\n\n return JsonResponse(response, status=200)\n\n\ndef editTransaction(request):\n if request.method == \"POST\" and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n oItem = request.POST.get(\"oItem\").lower()\n oDate = request.POST.get(\"oDate\")\n oPrice = request.POST.get(\"oPrice\")[1:]\n oCom = request.POST.get(\"oCom\").lower()\n oEntry = Transaction.objects.get(\n item=oItem, date=oDate, price=oPrice, company=oCom)\n\n oEntry.item = request.POST.get(\"nItem\").lower()\n oEntry.date = request.POST.get(\"nDate\")\n oEntry.price = float(\"{:.2f}\".format(\n float(request.POST.get(\"nPrice\"))))\n oEntry.company = request.POST.get(\"nCom\").lower()\n oEntry.save()\n\n if len(Transaction.objects.filter(company=oCom)) == 0:\n userext = UserExtension.objects.get(user_id=request.user.id)\n userext.platforms.remove(oCom)\n if request.POST.get(\"nCom\").lower() not in userext.platforms:\n userext.platforms.append(request.POST.get(\"nCom\").lower())\n userext.save()\n\n if request.POST.get(\"nCom\").lower() not in UserExtension.objects.get(user_id=request.user.id).platforms:\n userext = UserExtension.objects.get(user_id=request.user.id)\n userext.platforms.append(request.POST.get(\"nCom\").lower())\n userext.save()\n\n return JsonResponse({\"success\": \"\"}, status=200)\n\n\ndef displayDeliveries(request):\n if request.method == \"GET\" and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n # Extract all tracking numbers\n deliveries = Deliveries.objects.filter(user_id=request.user.id)\n tkg_numbers = [delivery.tkg_number for delivery in deliveries]\n tracking_numbers = \"\"\n for number in tkg_numbers:\n tracking_numbers += number + \",\"\n\n header = {\n \"Content-Type\": \"application/json\",\n \"Tracking-Api-Key\": env.str('TRACKING_API_KEY'),\n }\n\n params = {\n \"tracking_numbers\": tracking_numbers\n }\n\n r = requests.get(\n url=\"https://api.trackingmore.com/v3/trackings/get\", headers=header, params=params)\n\n return JsonResponse({\n \"response\": r.json()['data'],\n }, status=200)\n\n\ndef deleteDelivery(request):\n if request.method == \"POST\" and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n name = request.POST.get(\"name\").lower()\n tkg_number = request.POST.get(\"tkg_number\")\n\n dlt = Deliveries.objects.filter(\n name=name, tkg_number=tkg_number, user_id=request.user.id)\n dlt.delete()\n\n return JsonResponse({\"success\": \"\"}, status=200)\n\n\ndef changePaidStatus(request):\n if request.method == \"GET\" and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n group_name = request.GET['name']\n index = int(request.GET['index'])\n paid = True if request.GET['paid'] == 'true' else False\n data = Data.objects.get(group_name=group_name)\n data.paid[index] = paid\n data.save()\n return JsonResponse({\"success\": \"\"}, status=200)\n\n\ndef deleteItem(request):\n if request.method == \"POST\" and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n group_name = request.POST['name']\n index = int(request.POST['index'])\n\n data = Data.objects.filter(group_name=group_name)[0]\n data.users.pop(index)\n data.items.pop(index)\n data.quantity.pop(index)\n data.prices.pop(index)\n data.urls.pop(index)\n data.paid.pop(index)\n data.save()\n\n return JsonResponse({\"success\": \"\"}, status=200)\n\n\ndef leaveGroup(request):\n if request.method == \"GET\" and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n group_name = request.GET['name']\n grp = Group.objects.get(pk=group_name)\n data = Data.objects.filter(group_name=group_name)\n grp_shipping = Shipping.objects.get(pk=group_name)\n index = grp.members.index(request.user.username)\n grp.contacts.pop(index)\n grp.members.remove(request.user.username)\n grp.save()\n grp_shipping.member_count -= 1\n grp_shipping.save()\n\n # Deletes user's data when user leaves group\n if len(data) != 0:\n while request.user.username in data[0].users:\n index = data[0].users.index(request.user.username)\n data[0].items.pop(index)\n data[0].urls.pop(index)\n data[0].prices.pop(index)\n data[0].quantity.pop(index)\n data[0].paid.pop(index)\n data[0].users.remove(request.user.username)\n data[0].save()\n\n return JsonResponse({\"success\": \"\"}, status=200)\n\n\ndef lockGroup(request):\n if request.method == \"GET\" and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n group_name = request.GET['name']\n grp = Group.objects.get(pk=group_name)\n grp.is_locked = True\n grp.save()\n return JsonResponse({\"success\": \"\"}, status=200)\n\n\ndef sendNotification_locked(request):\n if request.method == \"GET\" and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n group_name = request.GET['name']\n grp = Group.objects.get(pk=group_name)\n users = grp.members\n for user in users:\n if user != request.user.username:\n notify.send(request.user, recipient=User.objects.get(\n username=user), verb=grp.group_name + ' has been locked!', description=\"info\")\n return JsonResponse({\"success\": \"\"}, status=200)\n\n\ndef deleteGroup(request):\n if request.method == \"GET\" and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n group_name = request.GET['name']\n grp_ship = Shipping.objects.get(pk=group_name)\n grp_ship.delete()\n grp = Group.objects.get(pk=group_name)\n grp.delete()\n return JsonResponse({\"success\": \"\"}, status=200)\n\n\ndef unlockGroup(request):\n if request.method == \"GET\" and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n group_name = request.GET['name']\n grp = Group.objects.get(pk=group_name)\n grp.is_locked = False\n grp.save()\n return JsonResponse({\"success\": \"\"}, status=200)\n\n\ndef onboardingFin(request):\n if request.method == \"GET\" and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n user = UserExtension.objects.filter(user=request.user)\n\n if len(user) != 0:\n user[0].first_time_user = False\n user[0].save()\n return JsonResponse({\"success\": \"\"}, status=200)\n\n\ndef clearNotifications(request):\n if request.method == \"GET\" and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n user = User.objects.get(username=request.user.username)\n user.notifications.mark_all_as_read()\n return JsonResponse({\"success\": \"\"}, status=200)", "id": "792531", "language": "Python", "matching_score": 8.055489540100098, "max_stars_count": 0, "path": "accounts/views.py" }, { "content": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('logout', views.logout, name='logout'),\n path('dashboard', views.dashboard, name='dashboard'),\n path('register', views.register, name='register'),\n path('login', views.login, name='login'),\n path('transaction', views.transaction, name='transaction'),\n path('delivery', views.delivery, name='delivery'),\n path('ship', views.ship, name='ship'),\n path('settings', views.settings, name='settings'),\n path('forgetpassword', views.forgetpassword, name='forgetpassword'),\n path('resetpasswordsuccess', views.resetpasswordsuccess,\n name='resetpasswordsuccess'),\n path('ship/<group_name>', views.groupmainpage, name='groupmainpage'),\n path('deleteGroup', views.deleteGroup, name='deleteGroup'),\n path('joinGroup', views.joinGroup, name='joinGroup'),\n path('leaveGroup', views.leaveGroup, name='leaveGroup'),\n path('ship/<group_name>/locked', views.grouplocked, name='grouplocked'),\n path('lockGroup', views.lockGroup, name='lockGroup'),\n path('unlockGroup', views.unlockGroup, name='unlockGroup'),\n path('report', views.report, name='report'),\n path('uploadImage', views.uploadImage, name='uploadImage'),\n path('activate/<uidb64>/<token>', views.activate, name='activate'),\n path('resetpw/<uidb64>/<token>', views.resetpw, name='resetpw'),\n path('sendUpdate',views.sendUpdate, name='sendUpdate'),\n\n # Handles AJAX requests\n path('deleteTransaction', views.deleteTransaction, name='deleteTransaction'),\n path('displayExpenses', views.displayExpenses, name='displayExpenses'),\n path('editTransaction', views.editTransaction, name='editTransaction'),\n path('displayDeliveries', views.displayDeliveries, name='displayDeliveries'),\n path('deleteDelivery', views.deleteDelivery, name='deleteDelivery'),\n path('changePaidStatus', views.changePaidStatus, name='changePaidStatus'),\n path('deleteItem', views.deleteItem, name='deleteItem'),\n path('onboardingFin', views.onboardingFin, name='onboardingFin'),\n path('sendNotification_locked', views.sendNotification_locked, name='sendNotification_locked'),\n path('clearNotifications', views.clearNotifications, name='clearNotifications'),\n]\n", "id": "7371742", "language": "Python", "matching_score": 0.2683609426021576, "max_stars_count": 0, "path": "accounts/urls.py" }, { "content": "from django.contrib import admin\nfrom .models import Price\n\n\nclass PriceAdmin(admin.ModelAdmin):\n list_display = ('user', 'name', 'company', 'url')\n list_filter = ('user', 'company')\n search_fields = ('user__username', 'name', 'url')\n list_per_page = 20\n\n\n# admin.site.register(Price, PriceAdmin)\n", "id": "6676762", "language": "Python", "matching_score": 3.4519026279449463, "max_stars_count": 0, "path": "scraping/admin.py" }, { "content": "from django.contrib import admin\nfrom django.db import models\n\nfrom .models import Deliveries, Group, Shipping, Transaction, Data, UserExtension\n\n\nclass TransactionAdmin(admin.ModelAdmin):\n # Changes what is displayed in the admin page for Transaction model\n list_display = ('user', 'item', 'date', 'company', 'price')\n list_filter = ('user',) # Sort by user\n search_fields = ('user__username', 'item', 'date', 'price')\n list_per_page = 20\n\n\nclass DeliveriesAdmin(admin.ModelAdmin):\n list_display = ('user', 'name', 'tkg_number', 'courier_name')\n list_filter = ('user', 'courier_name')\n search_fields = ('user__username', 'name', 'tkg_number', 'courier_name')\n list_per_page = 20\n\n\nclass ShippingAdmin(admin.ModelAdmin):\n list_display = ('group_name', 'platform', 'location')\n list_filter = ('platform', 'location')\n search_fields = ('group_name', 'platform', 'location')\n list_per_page = 20\n\n\nclass DataAdmin(admin.ModelAdmin):\n list_display = ('group_name',)\n list_filter = ('group_name',)\n search_fields = ('group_name',)\n list_per_page = 20\n\n\nclass DataTabularInline(admin.TabularInline):\n model = Data\n\n\nclass GroupAdmin(admin.ModelAdmin):\n inlines = [DataTabularInline]\n list_display = ('group_name', 'owner', 'is_locked')\n list_filter = ('owner', 'group_name')\n list_editable = ('is_locked',)\n search_fields = ('group_name', 'owner')\n list_per_page = 20\n\n\nclass UserExtensionAdmin(admin.ModelAdmin):\n list_display = ('user', 'first_time_user', 'phone_number', 'platforms')\n list_editable = ('first_time_user', 'platforms')\n search_fields = ('user', 'phone_number')\n list_per_page = 20\n\nadmin.site.register(Transaction, TransactionAdmin)\nadmin.site.register(Deliveries, DeliveriesAdmin)\nadmin.site.register(Shipping, ShippingAdmin)\nadmin.site.register(Group, GroupAdmin)\nadmin.site.register(Data, DataAdmin)\nadmin.site.register(UserExtension, UserExtensionAdmin)\n", "id": "3285980", "language": "Python", "matching_score": 1.007441759109497, "max_stars_count": 0, "path": "accounts/admin.py" }, { "content": "# Generated by Django 3.2.2 on 2021-06-11 13:27\n\nfrom django.conf import settings\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Price',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('url', models.CharField(max_length=250)),\n ('name', models.CharField(max_length=200)),\n ('company', models.CharField(max_length=20)),\n ('priceArr', django.contrib.postgres.fields.ArrayField(base_field=models.DecimalField(decimal_places=2, max_digits=6), size=None)),\n ('dateArr', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=15), size=None)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n", "id": "894685", "language": "Python", "matching_score": 4.4149394035339355, "max_stars_count": 0, "path": "scraping/migrations/0001_initial.py" }, { "content": "import datetime\n\nfrom django.contrib.auth.models import User\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.db import models\nfrom django.db.models.deletion import CASCADE\n\n\nclass Price(models.Model):\n user = models.ForeignKey(User, on_delete=CASCADE)\n url = models.CharField(max_length=500)\n name = models.CharField(max_length=200)\n company = models.CharField(max_length=20)\n priceArr = ArrayField(models.DecimalField(max_digits=6, decimal_places=2))\n dateArr = ArrayField(models.CharField(max_length=15))\n\n def __str__(self) -> str:\n return self.url\n", "id": "4482677", "language": "Python", "matching_score": 4.187845706939697, "max_stars_count": 0, "path": "scraping/models.py" }, { "content": "from cloudinary.models import CloudinaryField\nfrom django.contrib.auth.models import User\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.db import models\nfrom django.db.models.deletion import CASCADE\nfrom django.db.models.fields import BooleanField\nfrom django.utils import timezone\n\n\nclass Transaction(models.Model):\n user = models.ForeignKey(User, on_delete=CASCADE)\n item = models.CharField(max_length=255)\n date = models.CharField(max_length=15)\n company = models.CharField(max_length=20)\n price = models.DecimalField(max_digits=6, decimal_places=2)\n\n def __str__(self) -> str:\n return self.item\n\n\nclass Deliveries(models.Model):\n user = models.ForeignKey(User, on_delete=CASCADE)\n name = models.CharField(max_length=200)\n tkg_number = models.CharField(max_length=25)\n courier_code = models.CharField(max_length=25)\n courier_name = models.CharField(max_length=25)\n\n class Meta:\n verbose_name = \"Deliveries\"\n verbose_name_plural = \"Deliveries\"\n\n def __str__(self) -> str:\n return self.tkg_number\n\n\nclass Group(models.Model):\n group_name = models.CharField(max_length=200, primary_key=True)\n description = models.TextField()\n contacts = ArrayField(models.PositiveIntegerField())\n members = ArrayField(models.CharField(max_length=100))\n scrnshot = CloudinaryField('image')\n tkg_number = models.CharField(max_length=25, default='')\n courier = models.CharField(max_length=25, default='')\n meeting_date = models.DateField(default=timezone.now)\n owner = models.CharField(max_length=50)\n address = models.CharField(max_length=255, default='')\n is_locked = models.BooleanField(default=False)\n\n def __str__(self) -> str:\n return self.group_name\n\n\nclass Shipping(models.Model):\n group = models.ForeignKey(Group, on_delete=CASCADE)\n group_name = models.CharField(max_length=200, primary_key=True)\n platform = models.CharField(max_length=50)\n location = models.CharField(max_length=200)\n base_shipping = models.DecimalField(max_digits=4, decimal_places=2)\n free_shipping_min = models.DecimalField(\n max_digits=5, decimal_places=2, null=True)\n member_count = models.PositiveSmallIntegerField()\n\n def __str__(self) -> str:\n return self.group_name\n\n\nclass Data(models.Model):\n group_name = models.OneToOneField(Group, on_delete=CASCADE)\n users = ArrayField(models.CharField(max_length=50), default=list)\n items = ArrayField(models.CharField(max_length=200), default=list)\n prices = ArrayField(models.DecimalField(\n max_digits=6, decimal_places=2), default=list)\n urls = ArrayField(models.URLField(max_length=500), default=list)\n quantity = ArrayField(models.PositiveSmallIntegerField(), default=list)\n paid = ArrayField(models.BooleanField(), default=list)\n\n def total_price(self):\n total = 0\n for i in range(len(self.prices)):\n total += self.quantity[i] * self.prices[i]\n return total\n\n def __str__(self) -> str:\n return self.group_name.group_name\n\nclass UserExtension(models.Model):\n user = models.OneToOneField(User, on_delete=CASCADE)\n first_time_user = BooleanField(default=False)\n phone_number = models.PositiveIntegerField(blank=True, null=True)\n platforms = ArrayField(models.CharField(max_length=20), default=list, null=True, blank=True)\n\n def __str__(self) -> str:\n return self.user.username", "id": "10956593", "language": "Python", "matching_score": 5.706295490264893, "max_stars_count": 0, "path": "accounts/models.py" }, { "content": "# Generated by Django 3.2.4 on 2021-06-23 12:33\n\nimport cloudinary.models\nfrom django.conf import settings\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Group',\n fields=[\n ('group_name', models.CharField(max_length=200, primary_key=True, serialize=False)),\n ('description', models.TextField()),\n ('contacts', django.contrib.postgres.fields.ArrayField(base_field=models.PositiveIntegerField(), size=None)),\n ('members', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), size=None)),\n ('scrnshot', cloudinary.models.CloudinaryField(max_length=255, verbose_name='image')),\n ('tkg_number', models.CharField(default='', max_length=25)),\n ('courier', models.CharField(default='', max_length=25)),\n ('meeting_date', models.DateField(default=django.utils.timezone.now)),\n ('owner', models.CharField(max_length=50)),\n ('address', models.CharField(default='', max_length=255)),\n ('is_locked', models.BooleanField(default=False)),\n ],\n ),\n migrations.CreateModel(\n name='Transaction',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('item', models.CharField(max_length=255)),\n ('date', models.CharField(max_length=15)),\n ('company', models.CharField(max_length=20)),\n ('price', models.DecimalField(decimal_places=2, max_digits=6)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Shipping',\n fields=[\n ('group_name', models.CharField(max_length=200, primary_key=True, serialize=False)),\n ('platform', models.CharField(max_length=50)),\n ('location', models.CharField(max_length=200)),\n ('base_shipping', models.DecimalField(decimal_places=2, max_digits=4)),\n ('free_shipping_min', models.DecimalField(decimal_places=2, max_digits=5, null=True)),\n ('member_count', models.PositiveSmallIntegerField()),\n ('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.group')),\n ],\n ),\n migrations.CreateModel(\n name='Deliveries',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=200)),\n ('tkg_number', models.CharField(max_length=25)),\n ('courier_code', models.CharField(max_length=25)),\n ('courier_name', models.CharField(max_length=25)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Deliveries',\n 'verbose_name_plural': 'Deliveries',\n },\n ),\n migrations.CreateModel(\n name='Data',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('users', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=50), default=list, size=None)),\n ('items', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=200), default=list, size=None)),\n ('prices', django.contrib.postgres.fields.ArrayField(base_field=models.DecimalField(decimal_places=2, max_digits=6), default=list, size=None)),\n ('urls', django.contrib.postgres.fields.ArrayField(base_field=models.URLField(max_length=500), default=list, size=None)),\n ('quantity', django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), default=list, size=None)),\n ('paid', django.contrib.postgres.fields.ArrayField(base_field=models.BooleanField(), default=list, size=None)),\n ('group_name', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='accounts.group')),\n ],\n ),\n ]\n", "id": "6683382", "language": "Python", "matching_score": 6.074182987213135, "max_stars_count": 0, "path": "accounts/migrations/0001_initial.py" }, { "content": "# Generated by Django 3.2.4 on 2021-07-06 13:19\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('accounts', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserExtension',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('first_time_user', models.BooleanField(default=False)),\n ('phone_number', models.PositiveIntegerField(blank=True)),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n", "id": "3342089", "language": "Python", "matching_score": 2.331008195877075, "max_stars_count": 0, "path": "accounts/migrations/0002_userextension.py" }, { "content": "# Generated by Django 3.2.4 on 2021-07-14 06:31\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0003_alter_userextension_phone_number'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='userextension',\n name='platforms',\n field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=20), blank=True, default=list, null=True, size=None),\n ),\n ]\n", "id": "5599221", "language": "Python", "matching_score": 3.354724645614624, "max_stars_count": 0, "path": "accounts/migrations/0004_userextension_platforms.py" }, { "content": "# Generated by Django 3.2.4 on 2021-07-06 17:29\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0002_userextension'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='userextension',\n name='phone_number',\n field=models.PositiveIntegerField(blank=True, null=True),\n ),\n ]\n", "id": "11703542", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "accounts/migrations/0003_alter_userextension_phone_number.py" }, { "content": "from requests_html import HTMLSession\n\n\ndef checkShopeePrice(url):\n found = False\n while not found:\n try:\n session = HTMLSession()\n r = session.get(url)\n r.html.render()\n price_html = r.html.find('._3e_UQT', first=True)\n price = price_html.text\n found = True\n return price\n except AttributeError:\n pass\n", "id": "2041496", "language": "Python", "matching_score": 3.002347946166992, "max_stars_count": 0, "path": "scraping/shopee.py" }, { "content": "from requests_html import HTMLSession\n\n\ndef checkLazadaPrice(url):\n found = False\n while not found:\n try:\n session = HTMLSession()\n r = session.get(url)\n price_html = r.html.find(\n 'span.pdp-price.pdp-price_type_normal.pdp-price_color_orange.pdp-price_size_xl', first=True)\n price = price_html.text\n found = True\n return price\n except AttributeError:\n pass\n", "id": "4436332", "language": "Python", "matching_score": 3.0015153884887695, "max_stars_count": 0, "path": "scraping/lazada.py" }, { "content": "from requests_html import HTMLSession, AsyncHTMLSession\n\n\ndef checkAmazonPrice(url):\n found = False\n while not found:\n try:\n session = HTMLSession()\n r = session.get(url)\n price_html = r.html.find('#priceblock_ourprice', first=True)\n price = price_html.text\n found = True\n return price\n except AttributeError:\n pass\n", "id": "1902365", "language": "Python", "matching_score": 0.6586583852767944, "max_stars_count": 0, "path": "scraping/amazon.py" }, { "content": "from datetime import datetime\n\nfrom django.core.management.base import BaseCommand\nfrom scraping.amazon import checkAmazonPrice\nfrom scraping.lazada import checkLazadaPrice\nfrom scraping.models import Price\nfrom scraping.shopee import checkShopeePrice\n\n\nclass Command(BaseCommand):\n help = \"collect price from shopping platforms\"\n\n # define logic of command\n def handle(self, *args, **options):\n # Update DB\n entries = Price.objects.all()\n curr_date = datetime.now().strftime(\"%m/%d/%Y\")\n\n # Loops through each entry in the database and updates them\n for entry in entries:\n if \"lazada\" in entry.url:\n price = float(\"{:.2f}\".format(\n float(checkLazadaPrice(entry.url)[1:])))\n elif \"shopee\" in entry.url:\n price = float(\"{:.2f}\".format(\n float(checkShopeePrice(entry.url)[1:])))\n elif \"amazon\" in entry.url:\n price = float(\"{:.2f}\".format(\n float(checkAmazonPrice(entry.url)[2:])))\n else:\n pass\n\n entry.priceArr.append(price)\n entry.dateArr.append(curr_date)\n entry.save()\n\n self.stdout.write('job complete')\n", "id": "7898181", "language": "Python", "matching_score": 3.6525330543518066, "max_stars_count": 0, "path": "scraping/management/commands/scrape.py" }, { "content": "from scraping.amazon import checkAmazonPrice\nfrom scraping.lazada import checkLazadaPrice\nfrom scraping.shopee import checkShopeePrice\n\n\ndef checkPrice(url):\n if \"lazada\" in url:\n price = float(\"{:.2f}\".format(float(checkLazadaPrice(url)[1:])))\n elif \"shopee\" in url:\n price = float(\"{:.2f}\".format(float(checkShopeePrice(url)[1:])))\n elif \"amazon\" in url:\n price = float(\"{:.2f}\".format(float(checkAmazonPrice(url)[2:])))\n else:\n pass\n\n return price\n", "id": "661216", "language": "Python", "matching_score": 1.613144874572754, "max_stars_count": 0, "path": "scraping/checkPrice.py" } ]
3.002348
eringrant
[ { "content": "import numpy as np\nimport os\nimport pickle\nimport re\nimport sys\nimport argparse\n\n\nclass FileNotFoundError(OSError):\n pass\n\n\nclass Preprocess():\n def __init__(self, path_to_dataset, c_max_len):\n # path_to_dataset example: '././babi_original'\n self.path_to_dataset = path_to_dataset\n self.train_paths = None\n self.val_paths = None\n self.test_paths = None\n self.all_paths = None\n self._c_word_set = set()\n self._q_word_set = set()\n self._a_word_set = set()\n self._cqa_word_set = set()\n self._all_word_set = set()\n self.c_max_len = c_max_len\n self.s_max_len = 0\n self.q_max_len = 0\n self.mask_index = 0\n\n def set_path(self, path_to_dataset, all_paths_to_babi):\n \"\"\"Set list of train, val, and test dataset paths.\"\"\"\n self.path_to_dataset = path_to_dataset\n\n train_paths = []\n val_paths = []\n test_paths = []\n for dirpath, dirnames, filenames in os.walk(path_to_dataset):\n for filename in filenames:\n if filename.endswith('.txt'):\n if 'train' in filename:\n train_paths.append(os.path.join(dirpath, filename))\n elif 'val' in filename:\n val_paths.append(os.path.join(dirpath, filename))\n else:\n assert 'test' in filename\n test_paths.append(os.path.join(dirpath, filename))\n else:\n print(\"Ignored file: {}\".format(filename))\n self.train_paths = sorted(train_paths)\n self.val_paths = sorted(val_paths)\n self.test_paths = sorted(test_paths)\n\n all_paths = []\n for dirpath, dirnames, filenames in os.walk(all_paths_to_babi):\n for filename in filenames:\n if filename.endswith('.txt'):\n all_paths.append(os.path.join(dirpath, filename))\n else:\n print(\"Ignored file: {}\".format(filename))\n self.all_paths = sorted(all_paths)\n\n def _split_paragraphs(self, path_to_file):\n \"\"\"Split into paragraphs.\"\"\"\n with open(path_to_file, 'r') as f:\n babi = f.readlines()\n paragraph = []\n paragraphs = []\n alphabet = re.compile('[a-zA-Z]')\n for d in babi:\n if d.startswith('1 '):\n if paragraph:\n paragraphs.append(paragraph)\n paragraph = []\n mark = re.search(alphabet, d).span()[0]\n paragraph.append(d[mark:])\n return paragraphs\n\n def _split_clqa(self, paragraphs, show_print=True):\n \"\"\"For each paragraph, split into context, label, question and answer.\n\n Args:\n paragraphs: list of paragraphs\n\n Returns:\n context: list of contexts\n label: list of labels\n question: list of questions\n answer: list of answers\n \"\"\"\n context = []\n label = []\n question = []\n answer = []\n for paragraph in paragraphs:\n for i, sent in enumerate(paragraph):\n if '?' in sent:\n related_para = [para.strip().lower() for para in paragraph[:i] if '?' not in para][::-1]\n # Get rid of tab symbol\n related_para = [para.split('\\t')[0] for para in related_para]\n if len(related_para) > 20:\n related_para = related_para[:20]\n context.append(related_para)\n label.append([i for i in range(len(related_para))])\n q_a_ah = sent.split('\\t')\n question.append(q_a_ah[0].strip().lower())\n answer.append(q_a_ah[1].strip().lower())\n # check\n if show_print:\n if (len(question) == len(answer)) & (len(answer) == len(context)) & (len(context) == len(label)):\n print(\"bAbI is well separated into question, answer, context, and label!\")\n print(\"total: {}\".format(len(label)))\n else:\n print(\"Something is missing! check again\")\n print(\"the number of questions: {}\".format(len(question)))\n print(\"the number of answers: {}\".format(len(answer)))\n print(\"the number of contexts: {}\".format(len(context)))\n print(\"the number of labels: {}\".format(len(label)))\n return context, label, question, answer\n\n def split_all_clqa(self, paths, show_print=True):\n \"\"\"Merge all tasks into one dataset.\n\n Args:\n paths: list of path to tasks\n\n Returns:\n contexts: list of contexts of all tasks\n labels: list of labels of all tasks\n questions: list of questions of all tasks\n answers: list of answers of all tasks\n \"\"\"\n if paths is None:\n print('path is None, run set_path() first!')\n else:\n contexts = []\n labels = []\n questions = []\n answers = []\n for path in paths:\n if show_print:\n print('=================')\n paragraphs = self._split_paragraphs(path)\n if show_print:\n print(\"data: {}\".format(os.path.basename(path)))\n context, label, question, answer = self._split_clqa(paragraphs, show_print=show_print)\n contexts.extend(context)\n labels.extend(label)\n questions.extend(question)\n answers.extend(answer)\n return contexts, labels, questions, answers\n\n def set_word_set(self, word_set_path):\n\n try:\n c_word_set, q_word_set, a_word_set = np.load(word_set_path)\n\n except Exception as e:\n # Create the word set from the training, validation, and test data\n c_word_set = set()\n q_word_set = set()\n a_word_set = set()\n\n # Global vocabulary across multiple datasets\n all_contexts, all_labels, all_questions, all_answers = self.split_all_clqa(\n self.all_paths, show_print=False)\n\n for para in all_contexts:\n for sent in para:\n sent = sent.replace(\".\", \" .\")\n sent = sent.replace(\"?\", \" ?\")\n sent = sent.split()\n c_word_set.update(sent)\n\n for sent in all_questions:\n sent = sent.replace(\".\", \" .\")\n sent = sent.replace(\"?\", \" ?\")\n sent = sent.split()\n q_word_set.update(sent)\n\n for answer in all_answers:\n answer = answer.split(',')\n a_word_set.update(answer)\n\n a_word_set.add(',')\n\n # Save the word set if requested\n if word_set_path is not None and isinstance(e, FileNotFoundError):\n np.save(word_set_path, (c_word_set, q_word_set, a_word_set))\n\n self._c_word_set = c_word_set\n self._q_word_set = q_word_set\n self._a_word_set = a_word_set\n self._cqa_word_set = c_word_set.union(q_word_set).union(a_word_set)\n self._qa_word_set = c_word_set.union(q_word_set).union(a_word_set)\n\n def _index_context(self, contexts):\n c_word_index = dict()\n for i, word in enumerate(self._c_word_set):\n c_word_index[word] = i+1 # index 0 for zero padding\n indexed_cs = []\n for context in contexts:\n indexed_c = []\n for sentence in context:\n sentence = sentence.replace(\".\", \" .\")\n sentence = sentence.replace(\"?\", \" ?\")\n sentence = sentence.split()\n indexed_s = []\n for word in sentence:\n indexed_s.append(c_word_index[word])\n indexed_c.append(indexed_s)\n indexed_cs.append(np.array(indexed_c))\n return indexed_cs\n\n def _index_label(self, labels):\n indexed_ls = []\n for label in labels:\n indexed_ls.append(np.eye(self.c_max_len)[label])\n return indexed_ls\n\n def _index_question(self, questions):\n q_word_index = dict()\n for i, word in enumerate(self._q_word_set):\n q_word_index[word] = i+1 # index 0 for zero padding\n indexed_qs = []\n for sentence in questions:\n sentence = sentence.replace(\".\", \" .\")\n sentence = sentence.replace(\"?\", \" ?\")\n sentence = sentence.split()\n indexed_s = []\n for word in sentence:\n indexed_s.append(q_word_index[word])\n indexed_qs.append(np.array(indexed_s))\n return indexed_qs\n\n def _index_answer(self, answers):\n a_word_index = dict()\n a_word_dict = dict()\n for i, word in enumerate(self._cqa_word_set):\n a_word_dict[i] = word\n if word in self._a_word_set:\n answer_one_hot = np.zeros(len(self._cqa_word_set), dtype=np.float32)\n answer_one_hot[i] = 1\n a_word_index[word] = answer_one_hot\n indexed_as = []\n for answer in answers:\n if ',' in answer:\n multiple_answer = [a_word_index[',']]\n for a in answer.split(','):\n indexed_a = a_word_index[a]\n multiple_answer.append(indexed_a)\n indexed_as.append(np.sum(multiple_answer, axis=0))\n else:\n indexed_a = a_word_index[answer]\n indexed_as.append(indexed_a)\n\n return indexed_as\n\n def masking(self, context_index, label_index, question_index):\n context_masked = []\n question_masked = []\n label_masked = []\n context_real_len = []\n question_real_len = []\n # cs: one context\n for cs, l, q in zip(context_index, label_index, question_index):\n context_masked_tmp = []\n context_real_length_tmp = []\n # cs: many sentences\n for context in cs:\n context_real_length_tmp.append(len(context))\n diff = self.s_max_len - len(context)\n if (diff > 0):\n context_mask = np.append(context, [self.mask_index]*diff, axis=0)\n context_masked_tmp.append(context_mask.tolist())\n else:\n context_masked_tmp.append(context)\n diff_c = self.c_max_len - len(cs)\n context_masked_tmp.extend([[0]*self.s_max_len]*diff_c)\n context_masked.append(context_masked_tmp)\n\n diff_q = self.q_max_len - len(q)\n question_real_len.append(len(q))\n question_masked_tmp = np.array(np.append(q, [self.mask_index]*diff_q, axis=0))\n question_masked.append(question_masked_tmp.tolist())\n\n diff_l = self.c_max_len - len(l)\n label_masked_tmp = np.append(l, np.zeros((diff_l, self.c_max_len)), axis=0)\n label_masked.append(label_masked_tmp.tolist())\n context_real_length_tmp.extend([0] * diff_l)\n context_real_len.append(context_real_length_tmp)\n\n return context_masked, question_masked, label_masked, context_real_len, question_real_len\n\n def load(self, mode, path):\n\n assert mode in ['train', 'val', 'test']\n\n contexts, labels, questions, answers = self.split_all_clqa([path])\n\n context_index = self._index_context(contexts)\n label_index = self._index_label(labels)\n question_index = self._index_question(questions)\n answer_index = self._index_answer(answers)\n\n if mode == 'train':\n # check max sentence length\n for context in context_index:\n for sentence in context:\n if len(sentence) > self.s_max_len:\n self.s_max_len = len(sentence)\n # check max question length\n for question in question_index:\n if len(question) > self.q_max_len:\n self.q_max_len = len(question)\n\n assert self.s_max_len > 0\n assert self.q_max_len > 0\n\n self.path_to_processed = '_'.join([\n self.output_path,\n str(self.c_max_len),\n str(self.s_max_len),\n str(self.q_max_len),\n str(len(self._c_word_set)),\n str(len(self._q_word_set)),\n str(len(self._a_word_set)),\n ])\n if not os.path.exists(self.path_to_processed):\n os.makedirs(self.path_to_processed)\n\n context_masked, question_masked, label_masked, context_real_len, question_real_len = self.masking(context_index, label_index, question_index)\n # check masking\n cnt = 0\n for c, q, l in zip(context_masked, question_masked, label_masked):\n for context in c:\n if (len(context) != self.s_max_len) | (len(q) != self.q_max_len) | (len(l) != self.c_max_len):\n cnt += 1\n if cnt == 0:\n print(\"Masking success!\")\n else:\n print(\"Masking process error\")\n dataset = (question_masked, answer_index, context_masked, label_masked, context_real_len, question_real_len)\n\n dump_path = os.path.basename(path) + '.pkl'\n with open(os.path.join(self.path_to_processed, dump_path), 'wb') as f:\n pickle.dump(dataset, f)\n\n\ndef get_args_parser():\n _parser = argparse.ArgumentParser()\n _parser.add_argument('--path', required=True)\n _parser.add_argument('--c_max_len', type=int, required=True)\n _parser.add_argument('--all', '--all_paths', required=True)\n _parser.add_argument('--word_set', '--word_set_path', default=None,\n help='Optional word set. If not specified, generated from'\n 'the union of training, validation, and test data.')\n _parser.add_argument('--output_path', required=True)\n\n return _parser\n\n\ndef default_write(f, string, default_value):\n if string is None:\n f.write(str(default_value) + \"\\t\")\n else:\n f.write(str(string) + \"\\t\")\n\n\ndef main():\n args = get_args_parser().parse_args()\n\n preprocess = Preprocess(args.path, args.c_max_len)\n\n preprocess.output_path = args.output_path\n preprocess.set_path(args.path, args.all)\n preprocess.set_word_set(args.word_set)\n\n for train_path in preprocess.train_paths:\n preprocess.load('train', train_path)\n for val_path in preprocess.val_paths:\n preprocess.load('val', val_path)\n for test_path in preprocess.test_paths:\n preprocess.load('test', test_path)\n\n\nif __name__ == '__main__':\n main()\n", "id": "8929925", "language": "Python", "matching_score": 3.1259682178497314, "max_stars_count": 1, "path": "preprocessing.py" }, { "content": "import argparse\r\nfrom datetime import datetime\r\nimport errno\r\nfrom glob import glob\r\nimport numpy as np\r\nimport os\r\nimport pickle\r\nfrom time import time\r\n\r\nimport tensorflow as tf\r\n\r\nfrom model import Model\r\n\r\n\r\ndef mkdir_p(path):\r\n try:\r\n os.makedirs(path)\r\n except OSError as exc: # Python >2.5\r\n if exc.errno == errno.EEXIST and os.path.isdir(path):\r\n pass\r\n else:\r\n raise\r\n\r\n\r\ndef read_data(train_file_path, test_file_path):\r\n\r\n train, val, test = [], [], []\r\n for file_path_i in glob(os.path.join(train_file_path, '*.pkl')):\r\n with open(os.path.join(file_path_i), 'rb') as f:\r\n if 'train' in file_path_i:\r\n train += [pickle.load(f) + (file_path_i,)]\r\n elif 'val' in file_path_i:\r\n val += [pickle.load(f) + (file_path_i,)]\r\n else:\r\n assert 'test' in file_path_i\r\n\r\n for file_path_i in glob(os.path.join(test_file_path, '*.pkl')):\r\n with open(os.path.join(file_path_i), 'rb') as f:\r\n if 'train' not in file_path_i and 'val' not in file_path_i:\r\n assert 'test' in file_path_i\r\n test += [pickle.load(f) + (file_path_i,)]\r\n\r\n return train, val, test\r\n\r\n\r\ndef batch_iter(c, q, l, a, c_real_len, q_real_len,\r\n batch_size, num_epochs, shuffle=True):\r\n \"\"\"\r\n Generates a batch iterator for a dataset.\r\n \"\"\"\r\n c = np.array(c)\r\n q = np.array(q)\r\n l = np.array(l)\r\n a = np.array(a)\r\n c_real_len = np.array(c_real_len)\r\n q_real_len = np.array(q_real_len)\r\n data_size = len(q)\r\n num_batches_per_epoch = int(data_size / batch_size) + 1\r\n for epoch in range(num_epochs):\r\n print(\"num_epochs\")\r\n print(\"In epoch >> \" + str(epoch + 1))\r\n print(\"num batches per epoch is: \" + str(num_batches_per_epoch))\r\n # Shuffle the data at each epoch\r\n if shuffle:\r\n shuffle_indices = np.random.permutation(np.arange(data_size))\r\n c_shuffled = c[shuffle_indices]\r\n q_shuffled = q[shuffle_indices]\r\n l_shuffled = l[shuffle_indices]\r\n a_shuffled = a[shuffle_indices]\r\n c_real_len_shuffled = c_real_len[shuffle_indices]\r\n q_real_len_shuffled = q_real_len[shuffle_indices]\r\n else:\r\n c_shuffled = c\r\n q_shuffled = q\r\n l_shuffled = l\r\n a_shuffled = a\r\n c_real_len_shuffled = c_real_len\r\n q_real_len_shuffled = q_real_len\r\n\r\n for batch_num in range(num_batches_per_epoch):\r\n start_index = batch_num * batch_size\r\n end_index = (batch_num + 1) * batch_size\r\n if end_index < data_size:\r\n c_batch = c_shuffled[start_index:end_index]\r\n q_batch = q_shuffled[start_index:end_index]\r\n l_batch = l_shuffled[start_index:end_index]\r\n a_batch = a_shuffled[start_index:end_index]\r\n c_real_len_batch = c_real_len_shuffled[start_index:end_index]\r\n q_real_len_batch = q_real_len_shuffled[start_index:end_index]\r\n yield list(zip(c_batch, q_batch, l_batch, a_batch,\r\n c_real_len_batch, q_real_len_batch))\r\n\r\n\r\ndef concatenate_datasets(datasets):\r\n q, a, c, l, c_real_len, q_real_len = [], [], [], [], [], []\r\n for dataset in datasets:\r\n q += dataset[0]\r\n a += dataset[1]\r\n c += dataset[2]\r\n l += dataset[3]\r\n c_real_len += dataset[4]\r\n q_real_len += dataset[5]\r\n return q, a, c, l, c_real_len, q_real_len\r\n\r\n\r\ndef main(args):\r\n\r\n (c_max_len,\r\n s_max_len,\r\n q_max_len,\r\n c_vocab_size,\r\n q_vocab_size,\r\n a_vocab_size,\r\n ) = [int(x) for x in args.train_data_path.split(\"_\")[-6:]]\r\n\r\n (train_datasets,\r\n val_datasets, test_datasets) = read_data(args.train_data_path,\r\n args.test_data_path)\r\n\r\n # Concatenate training datasets\r\n train_dataset = concatenate_datasets(train_datasets)\r\n\r\n # Concatenate validation datasets\r\n val_dataset = concatenate_datasets(val_datasets)\r\n\r\n assert q_max_len == len(train_dataset[0][0])\r\n a_vocab_size = len(train_dataset[1][0])\r\n assert c_max_len == len(train_dataset[2][0])\r\n\r\n date = datetime.fromtimestamp(time()).strftime('%Y-%m-%d_%H:%M:%S')\r\n model_id = \"RN-s_hidden-{}-q_hidden-{}-context_size-{}-lr-{}-batch_size-{}-{}\".format(\r\n args.s_hidden_size,\r\n args.q_hidden_size,\r\n c_max_len,\r\n args.learning_rate,\r\n args.batch_size,\r\n date,\r\n )\r\n\r\n child = args.train_data_path.split('/')[-1]\r\n save_dir = os.path.join(args.save_path, child, model_id)\r\n save_summary_path = save_dir\r\n save_variable_path = os.path.join(save_dir, 'model')\r\n\r\n if not os.path.exists(save_dir):\r\n mkdir_p(os.path.join(args.save_path))\r\n mkdir_p(os.path.join(args.save_path, child))\r\n mkdir_p(os.path.join(args.save_path, child, model_id))\r\n\r\n config = {\r\n 'batch_size': args.batch_size,\r\n 's_hidden': args.s_hidden_size,\r\n 'q_hidden': args.q_hidden_size,\r\n 'context_vocab_size': c_vocab_size,\r\n 'question_vocab_size': q_vocab_size,\r\n 'answer_vocab_size': a_vocab_size,\r\n 'c_max_len': c_max_len,\r\n 'q_max_len': q_max_len,\r\n 's_max_len': s_max_len,\r\n 'iter_time': int(args.iter_time),\r\n 'iter_time': int(args.display_step),\r\n }\r\n\r\n with tf.Graph().as_default():\r\n\r\n config_proto = tf.ConfigProto()\r\n config_proto.gpu_options.allow_growth = True\r\n sess = tf.Session(config=config_proto)\r\n sess = tf.Session()\r\n start_time = time()\r\n with sess.as_default():\r\n\r\n rn = Model(config)\r\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=4)\r\n\r\n if args.max_train_iters > 0:\r\n global_step = tf.Variable(0, name='global_step', trainable=False)\r\n opt = tf.train.AdamOptimizer(args.learning_rate)\r\n optimizer = opt.minimize(rn.loss, global_step=global_step)\r\n sess.run(tf.global_variables_initializer())\r\n\r\n if args.max_train_iters > 0:\r\n # Define training procedure\r\n\r\n loss_train = tf.summary.scalar(\"loss_train\", rn.loss)\r\n accuracy_train = tf.summary.scalar(\"accuracy_train\", rn.accuracy)\r\n train_summary_ops = tf.summary.merge([loss_train, accuracy_train])\r\n\r\n loss_val = tf.summary.scalar(\"loss_val\", rn.loss)\r\n accuracy_val = tf.summary.scalar(\"accuracy_val\", rn.accuracy)\r\n val_summary_ops = tf.summary.merge([loss_val, accuracy_val])\r\n\r\n summary_writer = tf.summary.FileWriter(save_summary_path, sess.graph)\r\n batch_train = batch_iter(c=train_dataset[2],\r\n q=train_dataset[0],\r\n l=train_dataset[3],\r\n a=train_dataset[1],\r\n c_real_len=train_dataset[4],\r\n q_real_len=train_dataset[5],\r\n num_epochs=config['iter_time'],\r\n batch_size=config['batch_size'])\r\n for i, train in enumerate(batch_train):\r\n if i > args.max_train_iters:\r\n print(\"Maximum training iterations reached.\")\r\n break\r\n c_batch, q_batch, l_batch, a_batch, \\\r\n c_real_len_batch, q_real_len_batch = zip(*train)\r\n feed_dict = {rn.context: c_batch,\r\n rn.question: q_batch,\r\n rn.label: l_batch,\r\n rn.answer: a_batch,\r\n rn.context_real_len: c_real_len_batch,\r\n rn.question_real_len: q_real_len_batch,\r\n rn.is_training: True}\r\n current_step = sess.run(global_step, feed_dict=feed_dict)\r\n optimizer.run(feed_dict=feed_dict)\r\n train_summary = sess.run(train_summary_ops, feed_dict=feed_dict)\r\n summary_writer.add_summary(train_summary, current_step)\r\n if current_step % (args.display_step) == 0:\r\n print(\"step: {}\".format(current_step))\r\n print(\"====validation start====\")\r\n batch_val = batch_iter(c=val_dataset[2],\r\n q=val_dataset[0],\r\n l=val_dataset[3],\r\n a=val_dataset[1],\r\n c_real_len=val_dataset[4],\r\n q_real_len=val_dataset[5],\r\n num_epochs=1,\r\n batch_size=args.batch_size)\r\n accs = []\r\n for val in batch_val:\r\n c_val, q_val, l_val, a_val, \\\r\n c_real_len_val, q_real_len_val = zip(*val)\r\n feed_dict = {rn.context: c_val,\r\n rn.question: q_val,\r\n rn.label: l_val,\r\n rn.answer: a_val,\r\n rn.context_real_len: c_real_len_val,\r\n rn.question_real_len: q_real_len_val,\r\n rn.is_training: False}\r\n acc = rn.accuracy.eval(feed_dict=feed_dict)\r\n accs.append(acc)\r\n val_summary = sess.run(val_summary_ops, feed_dict=feed_dict)\r\n summary_writer.add_summary(val_summary, current_step)\r\n print(\"Mean accuracy=\" + str(sum(accs) / len(accs)))\r\n saver.save(sess, save_path=save_variable_path, global_step=current_step)\r\n print(\"====training====\")\r\n end_time = time()\r\n print(\"Training finished in {}sec\".format(end_time-start_time))\r\n\r\n config['batch_size'] = 1\r\n tf.get_variable_scope().reuse_variables()\r\n rn = Model(config)\r\n\r\n if args.model_path is not None:\r\n saver.restore(sess, tf.train.latest_checkpoint(args.model_path))\r\n\r\n mean_accs = []\r\n for test_dataset in test_datasets:\r\n test_dataset_name = test_dataset[6]\r\n batch_test = batch_iter(c=test_dataset[2],\r\n q=test_dataset[0],\r\n l=test_dataset[3],\r\n a=test_dataset[1],\r\n c_real_len=test_dataset[4],\r\n q_real_len=test_dataset[5],\r\n num_epochs=1,\r\n batch_size=1, # for testing\r\n )\r\n\r\n accs = []\r\n for test in batch_test:\r\n c_test, q_test, l_test, a_test, \\\r\n c_real_len_test, q_real_len_test = zip(*test)\r\n feed_dict = {rn.context: c_test,\r\n rn.question: q_test,\r\n rn.label: l_test,\r\n rn.answer: a_test,\r\n rn.context_real_len: c_real_len_test,\r\n rn.question_real_len: q_real_len_test,\r\n rn.is_training: False}\r\n acc = rn.accuracy.eval(feed_dict=feed_dict)\r\n accs.append(acc)\r\n mean_acc = np.mean(accs)\r\n print(\"Test dataset: {}\".format(test_dataset_name))\r\n print(\"Mean accuracy= {}\".format(mean_acc))\r\n mean_accs += [mean_acc]\r\n print(\"Accuracy across test datasets: {}\".format(np.mean(mean_accs)))\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--train_data_path')\r\n parser.add_argument('--test_data_path')\r\n parser.add_argument('--save_path', default='results')\r\n parser.add_argument('--model_path', default=None)\r\n parser.add_argument('--batch_size', type=int, nargs='?', const=64)\r\n parser.add_argument('--q_hidden_size', type=int, nargs='?', const=32)\r\n parser.add_argument('--s_hidden_size', type=int, nargs='?', const=32)\r\n parser.add_argument('--learning_rate', type=float, nargs='?', const=2e-4)\r\n parser.add_argument('--iter_time', type=int, nargs='?', const=10)\r\n parser.add_argument('--max_train_iters', type=int, nargs='?', const=2500)\r\n parser.add_argument('--display_step', type=int, nargs='?', const=100)\r\n main(parser.parse_args())\r\n", "id": "7002834", "language": "Python", "matching_score": 1.0582749843597412, "max_stars_count": 1, "path": "train.py" }, { "content": "class A:\n def foo():\n result = type(message)(\"\")\n\n\n# Don't merge multiline (e.g. triple-quoted) strings.\ndef foo():\n query = (\n \"\"\"SELECT xxxxxxxxxxxxxxxxxxxx(xxx)\"\"\"\n \"\"\" FROM xxxxxxxxxxxxxxxx WHERE xxxxxxxxxx AND xxx <> xxxxxxxxxxxxxx()\"\"\")\n\n# There was a bug where tuples were being identified as long strings.\nlong_tuple = ('Apple', 'Berry', 'Cherry', 'Dill', 'Evergreen', 'Fig',\n 'Grape', 'Harry', 'Iglu', 'Jaguar')\n\nstupid_format_method_bug = \"Some really long string that just so happens to be the {} {} to force the 'format' method to hang over the line length boundary. This is pretty annoying.\".format(\"perfect\", \"length\")\n\nclass A:\n def foo():\n os.system(\"This is a regression test. xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxxx.\".format(\"xxxxxxxxxx\", \"xxxxxx\", \"xxxxxxxxxx\"))\n\n\nclass A:\n def foo():\n XXXXXXXXXXXX.append(\n (\n \"xxx_xxxxxxxxxx(xxxxx={}, xxxx={}, xxxxx, xxxx_xxxx_xxxxxxxxxx={})\".format(\n xxxxx, xxxx, xxxx_xxxx_xxxxxxxxxx\n ),\n my_var,\n my_other_var,\n )\n )\n\nclass A:\n class B:\n def foo():\n bar(\n (\n \"[{}]: xxx_xxxxxxxxxx(xxxxx={}, xxxx={}, xxxxx={}\"\n \" xxxx_xxxx_xxxxxxxxxx={}, xxxx={})\"\n .format(xxxx._xxxxxxxxxxxxxx, xxxxx, xxxx, xxxx_xxxx_xxxxxxxxxx, xxxxxxx)\n ),\n varX,\n varY,\n varZ,\n )\n\ndef foo(xxxx):\n for (xxx_xxxx, _xxx_xxx, _xxx_xxxxx, xxx_xxxx) in xxxx:\n for xxx in xxx_xxxx:\n assert (\"x\" in xxx) or (\n xxx in xxx_xxx_xxxxx\n ), \"{0} xxxxxxx xx {1}, xxx {1} xx xxx xx xxxx xx xxx xxxx: xxx xxxx {2}\".format(\n xxx_xxxx, xxx, xxxxxx.xxxxxxx(xxx_xxx_xxxxx)\n )\n\nclass A:\n def disappearing_comment():\n return (\n ( # xx -x xxxxxxx xx xxx xxxxxxx.\n '{{xxx_xxxxxxxxxx_xxxxxxxx}} xxx xxxx'\n ' {} {{xxxx}} >&2'\n .format(\n \"{xxxx} {xxxxxx}\"\n if xxxxx.xx_xxxxxxxxxx\n else ( # Disappearing Comment\n \"--xxxxxxx --xxxxxx=x --xxxxxx-xxxxx=xxxxxx\"\n \" --xxxxxx-xxxx=xxxxxxxxxxx.xxx\"\n )\n )\n ),\n (x, y, z),\n )\n\nclass A:\n class B:\n def foo():\n xxxxx_xxxx(\n xx, \"\\t\"\n \"@xxxxxx '{xxxx_xxx}\\t' > {xxxxxx_xxxx}.xxxxxxx;\"\n \"{xxxx_xxx} >> {xxxxxx_xxxx}.xxxxxxx 2>&1; xx=$$?;\"\n \"xxxx $$xx\"\n .format(xxxx_xxx=xxxx_xxxxxxx, xxxxxx_xxxx=xxxxxxx + \"/\" + xxxx_xxx_xxxx, x=xxx_xxxxx_xxxxx_xxx),\n x,\n y,\n z,\n )\n\nfunc_call_where_string_arg_has_method_call_and_bad_parens(\n (\n \"A long string with {}. This string is so long that it is ridiculous. It can't fit on one line at alllll.\".format(\"formatting\")\n ),\n)\n\nfunc_call_where_string_arg_has_old_fmt_and_bad_parens(\n (\n \"A long string with {}. This string is so long that it is ridiculous. It can't fit on one line at alllll.\" % \"formatting\"\n ),\n)\n\nfunc_call_where_string_arg_has_old_fmt_and_bad_parens(\n (\n \"A long string with {}. This {} is so long that it is ridiculous. It can't fit on one line at alllll.\" % (\"formatting\", \"string\")\n ),\n)\n\nclass A:\n def append(self):\n if True:\n xxxx.xxxxxxx.xxxxx( ('xxxxxxxxxx xxxx xx xxxxxx(%x) xx %x xxxx xx xxx %x.xx'\n % (len(self) + 1,\n xxxx.xxxxxxxxxx,\n xxxx.xxxxxxxxxx))\n + (' %.3f (%s) to %.3f (%s).\\n'\n % (xxxx.xxxxxxxxx,\n xxxx.xxxxxxxxxxxxxx(xxxx.xxxxxxxxx),\n x,\n xxxx.xxxxxxxxxxxxxx( xx)\n )))\n\nclass A:\n def foo():\n some_func_call(\n 'xxxxxxxxxx',\n (\n \"xx {xxxxxxxxxxx}/xxxxxxxxxxx.xxx xxxx.xxx && xxxxxx -x \"\n \"\\\"xxxx xxxxxxx xxxxxx xxxx; xxxx xxxxxx_xxxxx xxxxxx xxxx; \"\n \"xxxx.xxxx_xxxxxx(['xxxx.xxx'], xxxx.xxxxxxx().xxxxxxxxxx)\\\" \"\n ),\n None,\n ('xxxxxxxxxxx',),\n ),\n\nclass A:\n def foo():\n some_func_call(\n (\n \"xx {xxxxxxxxxxx}/xxxxxxxxxxx.xxx xxxx.xxx && xxxxxx -x \"\n \"xxxx, ('xxxxxxx xxxxxx xxxx, xxxx') xxxxxx_xxxxx xxxxxx xxxx; \"\n \"xxxx.xxxx_xxxxxx(['xxxx.xxx'], xxxx.xxxxxxx().xxxxxxxxxx)\\\" \"\n ),\n None,\n ('xxxxxxxxxxx',),\n ),\n\nxxxxxxx = { 'xx' : 'xxxx xxxxxxx xxxxxxxxx -x xxx -x /xxx/{0} -x xxx,xxx -xx {1} \\\n-xx {1} -xx xxx=xxx_xxxx,xxx_xx,xxx_xxx,xxx_xxxx,xxx_xx,xxx_xxx |\\\n xxxxxx -x xxxxxxxx -x xxxxxxxx -x',\n 'xx' : 'xxxx xxxxxxx xxxxxxxxx -x xxx -x /xxx/{0} -x xxx,xxx -xx {1} \\\n-xx {1} -xx xxx=xxx_xxxx_xxx_xxxx,xxx_xx_xxx_xxxx,xxx_xxxx_xxx_xxxx,\\\nxxx_xx_xxxx_xxxx,xxx_xxx_xxxx,xxx_xxx_xxxx xxxx=xxx | xxxxxx -x xxxxxxxx -x xxxxxxxx -x'\n}\n\nclass A:\n def foo(self):\n if True:\n xxxxx_xxxxxxxxxxxx('xxx xxxxxx xxx xxxxxxxxx.xx xx xxxxxxxx. xxx xxxxxxxxxxxxx.xx xxxxxxx '\n + 'xx xxxxxx xxxxxx xxxxxx xx xxxxxxx xxx xxx ${0} xx x xxxxxxxx xxxxx'.xxxxxx(xxxxxx_xxxxxx_xxx))\n\nclass A:\n class B:\n def foo():\n row = {\n 'xxxxxxxxxxxxxxx' : xxxxxx_xxxxx_xxxx,\n # 'xxxxxxxxxxxxxxxxxxxxxxx'\n # 'xxxxxxxxxxxxxxxxxxxxxx'\n # 'xxxxxxxxxxxxxxxxxx'\n # 'xxxxxxxxxxxxxxxxx'\n 'xxxxxxxxxx' : xxxxx_xxxxx,\n }\n\nclass A:\n def xxxx_xxx_xx_xxxxxxxxxx_xxxx_xxxxxxxxx(xxxx):\n xxxxxxxx = [\n xxxxxxxxxxxxxxxx(\n 'xxxx',\n xxxxxxxxxxx={\n 'xxxx' : 1.0,\n },\n xxxxxx={'xxxxxx 1' : xxxxxx(xxxx='xxxxxx 1', xxxxxx=600.0)},\n xxxxxxxx_xxxxxxx=0.0,\n ),\n xxxxxxxxxxxxxxxx(\n 'xxxxxxx',\n xxxxxxxxxxx={\n 'xxxx' : 1.0,\n },\n xxxxxx={'xxxxxx 1' : xxxxxx(xxxx='xxxxxx 1', xxxxxx=200.0)},\n xxxxxxxx_xxxxxxx=0.0,\n ),\n xxxxxxxxxxxxxxxx(\n 'xxxx',\n ),\n ]\n\nsome_dictionary = {\n 'xxxxx006': ['xxx-xxx xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx== xxxxx000 xxxxxxxxxx\\n',\n 'xxx-xxx xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx== xxxxx010 xxxxxxxxxx\\n'],\n 'xxxxx016': ['xxx-xxx xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx== xxxxx000 xxxxxxxxxx\\n',\n 'xxx-xxx xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx== xxxxx010 xxxxxxxxxx\\n']\n}\n\ndef foo():\n xxx_xxx = (\n 'xxxx xxx xxxxxxxx_xxxx xx \"xxxxxxxxxx\".'\n '\\n xxx: xxxxxx xxxxxxxx_xxxx=xxxxxxxxxx'\n ) # xxxx xxxxxxxxxx xxxx xx xxxx xx xxx xxxxxxxx xxxxxx xxxxx.\n\nsome_tuple = (\"some string\", \"some string\" \" which should be joined\")\n\nsome_commented_string = (\n \"This string is long but not so long that it needs hahahah toooooo be so greatttt\" # This comment gets thrown to the top.\n \" {} that I just can't think of any more good words to say about it at\"\n \" allllllllllll\".format(\"ha\") # comments here are fine\n)\n\nsome_commented_string = (\n \"This string is long but not so long that it needs hahahah toooooo be so greatttt\" # But these\n \" {} that I just can't think of any more good words to say about it at\" # comments will stay\n \" allllllllllll\".format(\"ha\") # comments here are fine\n)\n\nlpar_and_rpar_have_comments = func_call( # LPAR Comment\n \"Long really ridiculous type of string that shouldn't really even exist at all. I mean commmme onnn!!!\", # Comma Comment\n) # RPAR Comment\n\ncmd_fstring = (\n f\"sudo -E deluge-console info --detailed --sort-reverse=time_added \"\n f\"{'' if ID is None else ID} | perl -nE 'print if /^{field}:/'\"\n)\n\ncmd_fstring = f\"sudo -E deluge-console info --detailed --sort-reverse=time_added {'' if ID is None else ID} | perl -nE 'print if /^{field}:/'\"\n\ncmd_fstring = f\"sudo -E deluge-console info --detailed --sort-reverse=time_added {'{{}}' if ID is None else ID} | perl -nE 'print if /^{field}:/'\"\n\ncmd_fstring = f\"sudo -E deluge-console info --detailed --sort-reverse=time_added {{'' if ID is None else ID}} | perl -nE 'print if /^{field}:/'\"\n\nfstring = f\"This string really doesn't need to be an {{{{fstring}}}}, but this one most certainly, absolutely {does}.\"\n\nfstring = (\n f\"We have to remember to escape {braces}.\"\n \" Like {these}.\"\n f\" But not {this}.\"\n)\n\nclass A:\n class B:\n def foo():\n st_error = STError(\n f\"This string ({string_leaf.value}) appears to be pointless (i.e. has\"\n \" no parent).\"\n )\n\ndef foo():\n user_regex = _lazy_re_compile(\n r\"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\\Z\" # dot-atom\n r'|^\"([\\001-\\010\\013\\014\\016-\\037!#-\\[\\]-\\177]|\\\\[\\001-\\011\\013\\014\\016-\\177])*\"\\Z)', # quoted-string\n re.IGNORECASE)\n\ndef foo():\n user_regex = _lazy_re_compile(\n \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # dot-atom\n 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx', # quoted-string\n xyz\n )\n\ndef foo():\n user_regex = _lazy_re_compile(\n \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # dot-atom\n 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx', # quoted-string\n xyz\n )\n\nclass A:\n class B:\n def foo():\n if not hasattr(module, name):\n raise ValueError(\n \"Could not find object %s in %s.\\n\"\n \"Please note that you cannot serialize things like inner \"\n \"classes. Please move the object into the main module \"\n \"body to use migrations.\\n\"\n \"For more information, see \"\n \"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values\"\n % (name, module_name, get_docs_version()))\n\nclass A:\n class B:\n def foo():\n if not hasattr(module, name):\n raise ValueError(\n \"Could not find object %s in %s.\\nPlease note that you cannot serialize things like inner classes. Please move the object into the main module body to use migrations.\\nFor more information, see https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values\"\n % (name, module_name, get_docs_version()))\n\nx = (\n \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n)\n\nclass Step(StepBase):\n def who(self):\n self.cmd = 'SR AAAA-CORRECT NAME IS {last_name} {first_name}{middle_name} {title}/P{passenger_association}'.format(\n last_name=last_name,\n first_name=first_name,\n middle_name=middle_name,\n title=title,\n passenger_association=passenger_association,\n )\n\nxxxxxxx_xxxxxx_xxxxxxx = xxx(\n [\n xxxxxxxxxxxx(\n xxxxxx_xxxxxxx=(\n '((x.aaaaaaaaa = \"xxxxxx.xxxxxxxxxxxxxxxxxxxxx\") || (x.xxxxxxxxx = \"xxxxxxxxxxxx\")) && '\n # xxxxx xxxxxxxxxxxx xxxx xxx (xxxxxxxxxxxxxxxx) xx x xxxxxxxxx xx xxxxxx.\n \"(x.bbbbbbbbbbbb.xxx != \"\n '\"xxx:xxx:xxx::cccccccccccc:xxxxxxx-xxxx/xxxxxxxxxxx/xxxxxxxxxxxxxxxxx\") && '\n )\n )\n ]\n)\n\nif __name__ == \"__main__\":\n for i in range(4, 8):\n cmd = (\n r\"for pid in $(ps aux | grep paster | grep -v grep | grep '\\-%d' | awk '{print $2}'); do kill $pid; done\"\n % (i)\n )\n\ndef A():\n def B():\n def C():\n def D():\n def E():\n def F():\n def G():\n assert (\n c_float(val[0][0] / val[0][1]).value\n == c_float(value[0][0] / value[0][1]).value\n ), \"%s didn't roundtrip\" % tag\n\nclass xxxxxxxxxxxxxxxxxxxxx(xxxx.xxxxxxxxxxxxx):\n def xxxxxxx_xxxxxx(xxxx):\n assert xxxxxxx_xxxx in [\n x.xxxxx.xxxxxx.xxxxx.xxxxxx,\n x.xxxxx.xxxxxx.xxxxx.xxxx,\n ], (\"xxxxxxxxxxx xxxxxxx xxxx (xxxxxx xxxx) %x xxx xxxxx\" % xxxxxxx_xxxx)\n\n# output\n\n\nclass A:\n def foo():\n result = type(message)(\"\")\n\n\n# Don't merge multiline (e.g. triple-quoted) strings.\ndef foo():\n query = (\n \"\"\"SELECT xxxxxxxxxxxxxxxxxxxx(xxx)\"\"\"\n \"\"\" FROM xxxxxxxxxxxxxxxx WHERE xxxxxxxxxx AND xxx <> xxxxxxxxxxxxxx()\"\"\"\n )\n\n\n# There was a bug where tuples were being identified as long strings.\nlong_tuple = (\n \"Apple\",\n \"Berry\",\n \"Cherry\",\n \"Dill\",\n \"Evergreen\",\n \"Fig\",\n \"Grape\",\n \"Harry\",\n \"Iglu\",\n \"Jaguar\",\n)\n\nstupid_format_method_bug = (\n \"Some really long string that just so happens to be the {} {} to force the 'format'\"\n \" method to hang over the line length boundary. This is pretty annoying.\".format(\n \"perfect\", \"length\"\n )\n)\n\n\nclass A:\n def foo():\n os.system(\n \"This is a regression test. xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx\"\n \" xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx\"\n \" xxxx.\".format(\"xxxxxxxxxx\", \"xxxxxx\", \"xxxxxxxxxx\")\n )\n\n\nclass A:\n def foo():\n XXXXXXXXXXXX.append(\n (\n \"xxx_xxxxxxxxxx(xxxxx={}, xxxx={}, xxxxx, xxxx_xxxx_xxxxxxxxxx={})\"\n .format(xxxxx, xxxx, xxxx_xxxx_xxxxxxxxxx),\n my_var,\n my_other_var,\n )\n )\n\n\nclass A:\n class B:\n def foo():\n bar(\n \"[{}]: xxx_xxxxxxxxxx(xxxxx={}, xxxx={}, xxxxx={}\"\n \" xxxx_xxxx_xxxxxxxxxx={}, xxxx={})\".format(\n xxxx._xxxxxxxxxxxxxx, xxxxx, xxxx, xxxx_xxxx_xxxxxxxxxx, xxxxxxx\n ),\n varX,\n varY,\n varZ,\n )\n\n\ndef foo(xxxx):\n for (xxx_xxxx, _xxx_xxx, _xxx_xxxxx, xxx_xxxx) in xxxx:\n for xxx in xxx_xxxx:\n assert (\"x\" in xxx) or (xxx in xxx_xxx_xxxxx), (\n \"{0} xxxxxxx xx {1}, xxx {1} xx xxx xx xxxx xx xxx xxxx: xxx xxxx {2}\"\n .format(xxx_xxxx, xxx, xxxxxx.xxxxxxx(xxx_xxx_xxxxx))\n )\n\n\nclass A:\n def disappearing_comment():\n return (\n ( # xx -x xxxxxxx xx xxx xxxxxxx.\n \"{{xxx_xxxxxxxxxx_xxxxxxxx}} xxx xxxx {} {{xxxx}} >&2\".format(\n \"{xxxx} {xxxxxx}\"\n if xxxxx.xx_xxxxxxxxxx\n else ( # Disappearing Comment\n \"--xxxxxxx --xxxxxx=x --xxxxxx-xxxxx=xxxxxx\"\n \" --xxxxxx-xxxx=xxxxxxxxxxx.xxx\"\n )\n )\n ),\n (x, y, z),\n )\n\n\nclass A:\n class B:\n def foo():\n xxxxx_xxxx(\n xx,\n \"\\t\"\n \"@xxxxxx '{xxxx_xxx}\\t' > {xxxxxx_xxxx}.xxxxxxx;\"\n \"{xxxx_xxx} >> {xxxxxx_xxxx}.xxxxxxx 2>&1; xx=$$?;\"\n \"xxxx $$xx\".format(\n xxxx_xxx=xxxx_xxxxxxx,\n xxxxxx_xxxx=xxxxxxx + \"/\" + xxxx_xxx_xxxx,\n x=xxx_xxxxx_xxxxx_xxx,\n ),\n x,\n y,\n z,\n )\n\n\nfunc_call_where_string_arg_has_method_call_and_bad_parens(\n \"A long string with {}. This string is so long that it is ridiculous. It can't fit\"\n \" on one line at alllll.\".format(\"formatting\"),\n)\n\nfunc_call_where_string_arg_has_old_fmt_and_bad_parens(\n \"A long string with {}. This string is so long that it is ridiculous. It can't fit\"\n \" on one line at alllll.\" % \"formatting\",\n)\n\nfunc_call_where_string_arg_has_old_fmt_and_bad_parens(\n \"A long string with {}. This {} is so long that it is ridiculous. It can't fit on\"\n \" one line at alllll.\" % (\"formatting\", \"string\"),\n)\n\n\nclass A:\n def append(self):\n if True:\n xxxx.xxxxxxx.xxxxx(\n \"xxxxxxxxxx xxxx xx xxxxxx(%x) xx %x xxxx xx xxx %x.xx\"\n % (len(self) + 1, xxxx.xxxxxxxxxx, xxxx.xxxxxxxxxx)\n + \" %.3f (%s) to %.3f (%s).\\n\"\n % (\n xxxx.xxxxxxxxx,\n xxxx.xxxxxxxxxxxxxx(xxxx.xxxxxxxxx),\n x,\n xxxx.xxxxxxxxxxxxxx(xx),\n )\n )\n\n\nclass A:\n def foo():\n some_func_call(\n \"xxxxxxxxxx\",\n \"xx {xxxxxxxxxxx}/xxxxxxxxxxx.xxx xxxx.xxx && xxxxxx -x \"\n '\"xxxx xxxxxxx xxxxxx xxxx; xxxx xxxxxx_xxxxx xxxxxx xxxx; '\n \"xxxx.xxxx_xxxxxx(['xxxx.xxx'], xxxx.xxxxxxx().xxxxxxxxxx)\\\" \",\n None,\n (\"xxxxxxxxxxx\",),\n ),\n\n\nclass A:\n def foo():\n some_func_call(\n \"xx {xxxxxxxxxxx}/xxxxxxxxxxx.xxx xxxx.xxx && xxxxxx -x \"\n \"xxxx, ('xxxxxxx xxxxxx xxxx, xxxx') xxxxxx_xxxxx xxxxxx xxxx; \"\n \"xxxx.xxxx_xxxxxx(['xxxx.xxx'], xxxx.xxxxxxx().xxxxxxxxxx)\\\" \",\n None,\n (\"xxxxxxxxxxx\",),\n ),\n\n\nxxxxxxx = {\n \"xx\": (\n \"xxxx xxxxxxx xxxxxxxxx -x xxx -x /xxx/{0} -x xxx,xxx -xx {1} -xx {1} -xx\"\n \" xxx=xxx_xxxx,xxx_xx,xxx_xxx,xxx_xxxx,xxx_xx,xxx_xxx | xxxxxx -x xxxxxxxx -x\"\n \" xxxxxxxx -x\"\n ),\n \"xx\": (\n \"xxxx xxxxxxx xxxxxxxxx -x xxx -x /xxx/{0} -x xxx,xxx -xx {1} -xx {1} -xx\"\n \" xxx=xxx_xxxx_xxx_xxxx,xxx_xx_xxx_xxxx,xxx_xxxx_xxx_xxxx,xxx_xx_xxxx_xxxx,xxx_xxx_xxxx,xxx_xxx_xxxx\"\n \" xxxx=xxx | xxxxxx -x xxxxxxxx -x xxxxxxxx -x\"\n ),\n}\n\n\nclass A:\n def foo(self):\n if True:\n xxxxx_xxxxxxxxxxxx(\n \"xxx xxxxxx xxx xxxxxxxxx.xx xx xxxxxxxx. xxx xxxxxxxxxxxxx.xx\"\n \" xxxxxxx \"\n + \"xx xxxxxx xxxxxx xxxxxx xx xxxxxxx xxx xxx ${0} xx x xxxxxxxx xxxxx\"\n .xxxxxx(xxxxxx_xxxxxx_xxx)\n )\n\n\nclass A:\n class B:\n def foo():\n row = {\n \"xxxxxxxxxxxxxxx\": xxxxxx_xxxxx_xxxx,\n # 'xxxxxxxxxxxxxxxxxxxxxxx'\n # 'xxxxxxxxxxxxxxxxxxxxxx'\n # 'xxxxxxxxxxxxxxxxxx'\n # 'xxxxxxxxxxxxxxxxx'\n \"xxxxxxxxxx\": xxxxx_xxxxx,\n }\n\n\nclass A:\n def xxxx_xxx_xx_xxxxxxxxxx_xxxx_xxxxxxxxx(xxxx):\n xxxxxxxx = [\n xxxxxxxxxxxxxxxx(\n \"xxxx\",\n xxxxxxxxxxx={\n \"xxxx\": 1.0,\n },\n xxxxxx={\"xxxxxx 1\": xxxxxx(xxxx=\"xxxxxx 1\", xxxxxx=600.0)},\n xxxxxxxx_xxxxxxx=0.0,\n ),\n xxxxxxxxxxxxxxxx(\n \"xxxxxxx\",\n xxxxxxxxxxx={\n \"xxxx\": 1.0,\n },\n xxxxxx={\"xxxxxx 1\": xxxxxx(xxxx=\"xxxxxx 1\", xxxxxx=200.0)},\n xxxxxxxx_xxxxxxx=0.0,\n ),\n xxxxxxxxxxxxxxxx(\n \"xxxx\",\n ),\n ]\n\n\nsome_dictionary = {\n \"xxxxx006\": [\n \"xxx-xxx\"\n \" xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx==\"\n \" xxxxx000 xxxxxxxxxx\\n\",\n \"xxx-xxx\"\n \" xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx==\"\n \" xxxxx010 xxxxxxxxxx\\n\",\n ],\n \"xxxxx016\": [\n \"xxx-xxx\"\n \" xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx==\"\n \" xxxxx000 xxxxxxxxxx\\n\",\n \"xxx-xxx\"\n \" xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx==\"\n \" xxxxx010 xxxxxxxxxx\\n\",\n ],\n}\n\n\ndef foo():\n xxx_xxx = ( # xxxx xxxxxxxxxx xxxx xx xxxx xx xxx xxxxxxxx xxxxxx xxxxx.\n 'xxxx xxx xxxxxxxx_xxxx xx \"xxxxxxxxxx\".\\n xxx: xxxxxx xxxxxxxx_xxxx=xxxxxxxxxx'\n )\n\n\nsome_tuple = (\"some string\", \"some string which should be joined\")\n\nsome_commented_string = ( # This comment gets thrown to the top.\n \"This string is long but not so long that it needs hahahah toooooo be so greatttt\"\n \" {} that I just can't think of any more good words to say about it at\"\n \" allllllllllll\".format(\"ha\") # comments here are fine\n)\n\nsome_commented_string = (\n \"This string is long but not so long that it needs hahahah toooooo be so greatttt\" # But these\n \" {} that I just can't think of any more good words to say about it at\" # comments will stay\n \" allllllllllll\".format(\"ha\") # comments here are fine\n)\n\nlpar_and_rpar_have_comments = func_call( # LPAR Comment\n \"Long really ridiculous type of string that shouldn't really even exist at all. I\"\n \" mean commmme onnn!!!\", # Comma Comment\n) # RPAR Comment\n\ncmd_fstring = (\n \"sudo -E deluge-console info --detailed --sort-reverse=time_added \"\n f\"{'' if ID is None else ID} | perl -nE 'print if /^{field}:/'\"\n)\n\ncmd_fstring = (\n \"sudo -E deluge-console info --detailed --sort-reverse=time_added\"\n f\" {'' if ID is None else ID} | perl -nE 'print if /^{field}:/'\"\n)\n\ncmd_fstring = (\n \"sudo -E deluge-console info --detailed --sort-reverse=time_added\"\n f\" {'{{}}' if ID is None else ID} | perl -nE 'print if /^{field}:/'\"\n)\n\ncmd_fstring = (\n \"sudo -E deluge-console info --detailed --sort-reverse=time_added {'' if ID is\"\n f\" None else ID}} | perl -nE 'print if /^{field}:/'\"\n)\n\nfstring = (\n \"This string really doesn't need to be an {{fstring}}, but this one most\"\n f\" certainly, absolutely {does}.\"\n)\n\nfstring = f\"We have to remember to escape {braces}. Like {{these}}. But not {this}.\"\n\n\nclass A:\n class B:\n def foo():\n st_error = STError(\n f\"This string ({string_leaf.value}) appears to be pointless (i.e. has\"\n \" no parent).\"\n )\n\n\ndef foo():\n user_regex = _lazy_re_compile(\n r\"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\\Z\" # dot-atom\n r'|^\"([\\001-\\010\\013\\014\\016-\\037!#-\\[\\]-\\177]|\\\\[\\001-\\011\\013\\014\\016-\\177])*\"\\Z)', # quoted-string\n re.IGNORECASE,\n )\n\n\ndef foo():\n user_regex = _lazy_re_compile(\n \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # dot-atom\n \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\", # quoted-string\n xyz,\n )\n\n\ndef foo():\n user_regex = _lazy_re_compile(\n \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # dot-atom\n \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\", # quoted-string\n xyz,\n )\n\n\nclass A:\n class B:\n def foo():\n if not hasattr(module, name):\n raise ValueError(\n \"Could not find object %s in %s.\\n\"\n \"Please note that you cannot serialize things like inner \"\n \"classes. Please move the object into the main module \"\n \"body to use migrations.\\n\"\n \"For more information, see \"\n \"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values\"\n % (name, module_name, get_docs_version())\n )\n\n\nclass A:\n class B:\n def foo():\n if not hasattr(module, name):\n raise ValueError(\n \"Could not find object %s in %s.\\nPlease note that you cannot\"\n \" serialize things like inner classes. Please move the object into\"\n \" the main module body to use migrations.\\nFor more information,\"\n \" see https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values\"\n % (name, module_name, get_docs_version())\n )\n\n\nx = (\n \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n)\n\n\nclass Step(StepBase):\n def who(self):\n self.cmd = (\n \"SR AAAA-CORRECT NAME IS {last_name} {first_name}{middle_name}\"\n \" {title}/P{passenger_association}\".format(\n last_name=last_name,\n first_name=first_name,\n middle_name=middle_name,\n title=title,\n passenger_association=passenger_association,\n )\n )\n\n\nxxxxxxx_xxxxxx_xxxxxxx = xxx(\n [\n xxxxxxxxxxxx(\n xxxxxx_xxxxxxx=(\n '((x.aaaaaaaaa = \"xxxxxx.xxxxxxxxxxxxxxxxxxxxx\") || (x.xxxxxxxxx ='\n ' \"xxxxxxxxxxxx\")) && '\n # xxxxx xxxxxxxxxxxx xxxx xxx (xxxxxxxxxxxxxxxx) xx x xxxxxxxxx xx xxxxxx.\n \"(x.bbbbbbbbbbbb.xxx != \"\n '\"xxx:xxx:xxx::cccccccccccc:xxxxxxx-xxxx/xxxxxxxxxxx/xxxxxxxxxxxxxxxxx\") && '\n )\n )\n ]\n)\n\nif __name__ == \"__main__\":\n for i in range(4, 8):\n cmd = (\n r\"for pid in $(ps aux | grep paster | grep -v grep | grep '\\-%d' | awk\"\n r\" '{print $2}'); do kill $pid; done\" % (i)\n )\n\n\ndef A():\n def B():\n def C():\n def D():\n def E():\n def F():\n def G():\n assert (\n c_float(val[0][0] / val[0][1]).value\n == c_float(value[0][0] / value[0][1]).value\n ), \"%s didn't roundtrip\" % tag\n\n\nclass xxxxxxxxxxxxxxxxxxxxx(xxxx.xxxxxxxxxxxxx):\n def xxxxxxx_xxxxxx(xxxx):\n assert xxxxxxx_xxxx in [\n x.xxxxx.xxxxxx.xxxxx.xxxxxx,\n x.xxxxx.xxxxxx.xxxxx.xxxx,\n ], (\n \"xxxxxxxxxxx xxxxxxx xxxx (xxxxxx xxxx) %x xxx xxxxx\" % xxxxxxx_xxxx\n )\n", "id": "8691812", "language": "Python", "matching_score": 0.29212450981140137, "max_stars_count": 0, "path": "tests/data/long_strings__regression.py" } ]
1.058275
DSI-Universite-Rennes2
[ { "content": "# -*-coding:utf-8 -*\nclass BSSConnexionException(Exception):\n \"\"\"\n Exception levée lors d'une erreur de connexion\n\n :ivar message: message à afficher\n \"\"\"\n def __init__(self,code, message):\n self.msg = str(code)+\" : \"+str(message)", "id": "11388071", "language": "Python", "matching_score": 2.4353997707366943, "max_stars_count": 3, "path": "lib_Partage_BSS/exceptions/BSSConnexionException.py" }, { "content": "# -*-coding:utf-8 -*\nclass DomainException(Exception):\n \"\"\"\n Exception levée lors d'une erreur sur le nom de domaine\n\n :ivar message: message à afficher\n \"\"\"\n def __init__(self, message):\n self.msg = message\n", "id": "6125452", "language": "Python", "matching_score": 0.04247629642486572, "max_stars_count": 3, "path": "lib_Partage_BSS/exceptions/DomainException.py" }, { "content": "import pytest\nfrom lib_Partage_BSS.models.Group import Group\nfrom lib_Partage_BSS.exceptions.ServiceException import ServiceException\nfrom lib_Partage_BSS.exceptions.NotFoundException import NotFoundException\nfrom lib_Partage_BSS.services import AccountService, GroupService, BSSConnexion\nimport time as timer\n\n\ndef create_account(name):\n account = AccountService.getAccount(name)\n if account == None:\n AccountService.createAccount(name,\"{ssha}BIDON\")\n\ndef delete_group(name):\n group = GroupService.getGroup(name)\n if group != None:\n GroupService.deleteGroup(name)\n\ndef create_connexion(config):\n timer.sleep(1)\n con = BSSConnexion()\n if 'bss_url' in config:\n con.url = config['bss_url']\n con.setDomainKey({config['bss_domain']: config['bss_domain_key']})\n return BSSConnexion()\n\ndef test_cleanup_bss_environment(test_config):\n print(\"Cleanup BSS environment before running tests...\")\n create_account(test_config['accountname'])\n create_account(test_config['autre_accountname'])\n delete_group(test_config['groupname'])\n\ndef test_createGroup_cas_normal(test_config):\n newGroup = GroupService.createGroup(test_config['groupname'])\n group = GroupService.getGroup(test_config['groupname'])\n assert group.name == test_config['groupname']\n\n\ndef test_createGroup_cas_groupeExistant(test_config):\n with pytest.raises(ServiceException):\n newGroup = GroupService.createGroup(test_config['groupname'])\n\ndef test_getGroup_cas_normal(test_config):\n group = GroupService.getGroup(test_config['groupname'])\n assert group.name == test_config['groupname']\n\ndef test_getGroup_cas_groupe_inexistant(test_config):\n group = GroupService.getGroup(\"inexistant\" + '@' + test_config['bss_domain'])\n assert group == None\n\ndef test_addGroupAliases_cas_Normal(test_config):\n GroupService.addGroupAliases(test_config['groupname'], test_config['groupalias'])\n group = GroupService.getGroup(test_config['groupname'])\n assert test_config['groupalias'] in group.aliases\n\ndef test_addGroupAliases_cas_groupe_existant(test_config):\n with pytest.raises(ServiceException):\n GroupService.addGroupAliases(test_config['groupname'], test_config['groupalias'])\n\ndef test_updateGroupAliases_cas_Normal(test_config):\n GroupService.updateGroupAliases(test_config['groupname'], test_config['autre_groupalias'])\n group = GroupService.getGroup(test_config['groupname'])\n assert (test_config['autre_groupalias'] in group.aliases) and (test_config['groupalias'] not in group.aliases)\n\ndef test_removeGroupAliases_cas_Normal(test_config):\n GroupService.removeGroupAliases(test_config['groupname'], test_config['autre_groupalias'])\n group = GroupService.getGroup(test_config['groupname'], full_info = True)\n assert test_config['autre_groupalias'] not in group.aliases\n\ndef test_updateGroupAliases_cas_domaine_incorrect(test_config):\n with pytest.raises(ServiceException):\n GroupService.updateGroupAliases(test_config['groupname'], \"<EMAIL>\")\n\ndef test_removeGroupAliases_cas_alias_inconnu(test_config):\n with pytest.raises(ServiceException):\n GroupService.removeGroupAliases(test_config['groupname'], test_config['autre_groupalias'])\n\ndef test_addGroupMember_cas_Normal(test_config):\n GroupService.addGroupMembers(test_config['groupname'], test_config['accountname'])\n group = GroupService.getGroup(test_config['groupname'])\n assert (test_config['accountname'] in group.members)\n\ndef test_updateGroupMembers_cas_Normal(test_config):\n GroupService.updateGroupMembers(test_config['groupname'], test_config['autre_accountname'])\n group = GroupService.getGroup(test_config['groupname'])\n assert (test_config['autre_accountname'] in group.members) and (test_config['accountname'] not in group.members)\n\ndef test_removeGroupMembers_cas_Normal(test_config):\n GroupService.removeGroupMembers(test_config['groupname'], test_config['autre_accountname'])\n group = GroupService.getGroup(test_config['groupname'])\n assert test_config['autre_accountname'] not in group.members\n\ndef test_removeGroupMembers_cas_alias_inconnu(test_config):\n with pytest.raises(ServiceException):\n GroupService.removeGroupMembers(test_config['groupname'], test_config['autre_accountname'])\n\ndef test_addGroupSenders_cas_Normal(test_config):\n GroupService.addGroupSenders(test_config['groupname'], test_config['accountname'])\n group = GroupService.getGroup(test_config['groupname'], full_info = True)\n assert (test_config['accountname'] in group.senders)\n\ndef test_addGroupSenders_cas_compte_inconnu(test_config):\n with pytest.raises(NotFoundException):\n GroupService.addGroupSenders(test_config['groupname'], \"inexistant\" + '@' + test_config['bss_domain'])\n\ndef test_updateGroupSenders_cas_Normal(test_config):\n GroupService.updateGroupSenders(test_config['groupname'], test_config['autre_accountname'])\n group = GroupService.getGroup(test_config['groupname'], full_info = True)\n assert (test_config['autre_accountname'] in group.senders) and (test_config['accountname'] not in group.senders)\n\ndef test_removeGroupSenders_cas_Normal(test_config):\n GroupService.removeGroupSenders(test_config['groupname'], test_config['autre_accountname'])\n group = GroupService.getGroup(test_config['groupname'])\n assert test_config['autre_accountname'] not in group.senders\n\ndef test_removeGroupSenders_cas_alias_inconnu(test_config):\n with pytest.raises(ServiceException):\n GroupService.removeGroupSenders(test_config['groupname'], test_config['autre_accountname'])\n\ndef test_deleteGroup_cas_normal(test_config):\n newGroup = GroupService.deleteGroup(test_config['groupname'])\n group = GroupService.getGroup(test_config['groupname'])\n assert group == None\n\ndef test_deleteGroup_cas_groupe_inexistant(test_config):\n with pytest.raises(ServiceException):\n newGroup = GroupService.deleteGroup(test_config['groupname'])\n\n", "id": "4477024", "language": "Python", "matching_score": 3.588968515396118, "max_stars_count": 3, "path": "test_integration/lib_Partage_BSS/services/test_ServiceGroup.py" }, { "content": "import pytest\nfrom lib_Partage_BSS.exceptions.ServiceException import ServiceException\nfrom lib_Partage_BSS.services import AccountService\n\ndef create_account(name):\n account = AccountService.getAccount(name)\n if account == None:\n AccountService.createAccount(name,\"{ssha}BIDON\")\n\ndef delete_account(name):\n account = AccountService.getAccount(name)\n if account != None:\n AccountService.deleteAccount(name)\n\ndef test_cleanup_bss_environment(test_config):\n print(\"Cleanup BSS environment before running tests...\")\n delete_account(test_config['accountname'])\n delete_account(test_config['autre_accountname'])\n\ndef test_createAccount_cas_normal(test_config):\n AccountService.createAccount(test_config['accountname'], \"{ssha}BIDON\")\n account = AccountService.getAccount(test_config['accountname'])\n assert account.name == test_config['accountname']\n\ndef test_createAccount_cas_compteExistant(test_config):\n with pytest.raises(ServiceException):\n AccountService.createAccount(test_config['accountname'], \"{ssha}BIDON\")\n\ndef test_getAccount_cas_normal(test_config):\n account = AccountService.getAccount(test_config['accountname'])\n assert account.name == test_config['accountname']\n\ndef test_getAccount_cas_compte_inexistant(test_config):\n account = AccountService.getAccount(\"inexistant\" + '@' + test_config['bss_domain'])\n assert account == None\n\ndef test_modifyAccount_cas_Normal(test_config):\n account_as_dict = { 'displayName': \"Test2\",\n 'telephoneNumber': \"0223232323\",\n 'carLicense': \"test@DOMAIN\",\n 'givenName': \"prénom\",\n 'sn': \"nom accentué\",\n }\n account = AccountService.getAccount(test_config['accountname'])\n for attribute in account_as_dict:\n setattr(account, \"_\" + attribute, account_as_dict[attribute])\n\n AccountService.modifyAccount(account)\n account = AccountService.getAccount(test_config['accountname'])\n errors = 0\n for attribute in account_as_dict:\n if getattr(account, \"_\" + attribute) != account_as_dict[attribute]:\n errors = errors + 1\n assert errors == 0\n\ndef test_modifyAccount_cas_addZimlet(test_config):\n account = AccountService.getAccount(test_config['accountname'])\n account.addZimbraZimletAvailableZimlets(\"com_zimbra_emaildownloader\")\n AccountService.modifyAccount(account)\n account = AccountService.getAccount(test_config['accountname'])\n assert \"com_zimbra_emaildownloader\" in account.zimbraZimletAvailableZimlets\n\ndef test_modifyAccount_cas_resetZimlet(test_config):\n account = AccountService.getAccount(test_config['accountname'])\n account.resetZimbraZimletAvailableZimlets()\n AccountService.modifyAccount(account)\n account = AccountService.getAccount(test_config['accountname'])\n assert \"com_zimbra_emaildownloader\" not in account.zimbraZimletAvailableZimlets\n\ndef test_modifyAliases_cas_departVideAjout1Alias(test_config):\n AccountService.modifyAccountAliases(test_config['accountname'], [test_config['accountalias']])\n account = AccountService.getAccount(test_config['accountname'])\n assert account.zimbraMailAlias == test_config['accountalias']\n\ndef test_modifyAliases_cas_depart1AliasPassageA2Alias(test_config):\n AccountService.modifyAccountAliases(test_config['accountname'], [test_config['accountalias'], test_config['autre_accountalias']])\n account = AccountService.getAccount(test_config['accountname'])\n assert (test_config['accountalias'] in account.zimbraMailAlias) and (test_config['autre_accountalias'] in account.zimbraMailAlias)\n\ndef test_modifyAliases_cas_depart2AliasPassageA1Alias(test_config):\n AccountService.modifyAccountAliases(test_config['accountname'], [test_config['autre_accountalias']])\n account = AccountService.getAccount(test_config['accountname'])\n assert account.zimbraMailAlias == test_config['autre_accountalias']\n\ndef test_deleteAccount_cas_Normal(test_config):\n AccountService.deleteAccount(test_config['accountname'])\n account = AccountService.getAccount(test_config['accountname'])\n assert account == None\n\ndef test_deleteAccount_cas_compteInexistant(test_config):\n with pytest.raises(ServiceException):\n AccountService.deleteAccount(test_config['accountname'])\n\n", "id": "8343046", "language": "Python", "matching_score": 1.810657262802124, "max_stars_count": 3, "path": "test_integration/lib_Partage_BSS/services/test_ServiceAccount.py" }, { "content": "import pytest\n\nfrom lib_Partage_BSS.exceptions import DomainException\nfrom lib_Partage_BSS.exceptions.BSSConnexionException import BSSConnexionException\nimport re\nimport time as timer\nfrom lib_Partage_BSS.services import BSSConnexion\n\ndef create_connexion(config):\n timer.sleep(1)\n con = BSSConnexion()\n if 'bss_url' in config:\n con.url = config['bss_url']\n con.setDomainKey({config['bss_domain']: config['bss_domain_key']})\n return BSSConnexion()\n\ndef close_connexion(con):\n con.instance = None\n\ndef test_getToken_casNormal(test_config):\n con = create_connexion(test_config)\n assert re.match(\"[0-9a-z]{32}\", con.token(test_config['bss_domain']))\n close_connexion(con)\n\ndef test_getToken_casDomainFaux(test_config):\n con = create_connexion(test_config)\n with pytest.raises(DomainException):\n con.token(\"not_a_domain\")\n close_connexion(con)\n\ndef test_getToken_casDomainNonPresent(test_config):\n con = create_connexion(test_config)\n with pytest.raises(DomainException):\n token = con.token(\"unknown_domain\")\n\ndef test_getToken_8sApresCreation(test_config):\n con = create_connexion(test_config)\n token = con.token(test_config['bss_domain'])\n timer.sleep(8)\n token2 = con.token(test_config['bss_domain'])\n assert token == token2\n close_connexion(con)\n\n@pytest.mark.skip(reason=\"ça prend trop de temps...\")\ndef test_getToken_5minApresCreation(test_config):\n con = create_connexion(test_config)\n token = con.token(test_config['bss_domain'])\n timer.sleep(5 * 60)\n token2 = con.token(test_config['bss_domain'])\n assert token != token2\n close_connexion(con)\n\n\n", "id": "7054801", "language": "Python", "matching_score": 1.929011583328247, "max_stars_count": 3, "path": "test_integration/lib_Partage_BSS/services/test_BSSConnexion.py" }, { "content": "import pytest\nfrom lib_Partage_BSS.services import COSService\nfrom lib_Partage_BSS.services import DomainService\n\n\n\ndef test_get_all_cos(test_config):\n all_cos = COSService.getAllCOS(test_config['bss_domain'])\n assert len(all_cos) > 0\n\ndef test_get_cos(test_config):\n all_cos = COSService.getAllCOS(test_config['bss_domain'])\n for one_cos in all_cos:\n cos = COSService.getCOS(test_config['bss_domain'], one_cos.name)\n assert cos.name == one_cos.name\n\ndef test_count_objects(test_config):\n count = DomainService.countObjects(test_config['bss_domain'], \"userAccount\")\n assert int(count) > 0\n\ndef test_get_domain(test_config):\n domain = DomainService.getDomain(test_config['bss_domain'])\n assert domain['name'] == test_config['bss_domain']\n\n", "id": "5257775", "language": "Python", "matching_score": 1.0458983182907104, "max_stars_count": 3, "path": "test_integration/lib_Partage_BSS/services/test_ServiceGLobal.py" }, { "content": "from collections import OrderedDict\n\nimport pytest\n\nfrom lib_Partage_BSS.utils import checkIsNum, checkIsMailAddress, checkIsDomain, checkIsPreDeleteAccount, \\\n changeBooleanToString, changeStringToBoolean, changeToInt, changeTimestampToDate, \\\n changeDateToTimestamp\n\n\ndef test_checkIsNum_casTrueSansSeparateur():\n assert checkIsNum(\"0123456789\")\n\n\ndef test_checkIsNum_casTrueAvecTiret():\n assert checkIsNum(\"01-23-45-67-89\")\n\n\ndef test_checkIsNum_casTrueAvecPoint():\n assert checkIsNum(\"01.23.45.67.89\")\n\n\ndef test_checkIsNum_casTrueAvecEspace():\n assert checkIsNum(\"01 23 45 67 89\")\n\ndef test_checkIsNum_casTrueAvecUnderscore():\n assert checkIsNum(\"01_23_45_67_89\")\n\n\ndef test_checkIsNum_casTrueAvecSlash():\n assert checkIsNum(\"01/23/45/67/89\")\n\n\ndef test_checkIsNum_casTrueVide():\n assert checkIsNum(\"\")\n\n\ndef test_checkIsNum_casFalseAvecLettre():\n assert not checkIsNum(\"01/23/45/67/89a\")\n\n\ndef test_checkIsNum_casFalseAvecCaractereSpecial():\n assert not checkIsNum(\"01/23/45/67/89{\")\n\n\ndef test_checkIsMailAddress_casTrueAvecDebutEn1Partie():\n assert checkIsMailAddress(\"<EMAIL>\")\n\n\ndef test_checkIsMailAddress_casTrueAvecDebutEn2Parties():\n assert checkIsMailAddress(\"<EMAIL>\")\n\n\ndef test_checkIsMailAddress_casTrueAvecApostrophe():\n assert checkIsMailAddress(\"super't<EMAIL>\")\n\n\ndef test_checkIsMailAddress_casTrueAvecPlus():\n assert checkIsMailAddress(\"<EMAIL>+<EMAIL>@<EMAIL>\")\n\n\ndef test_checkIsMailAddress_casTrueVide():\n assert checkIsMailAddress(\"\")\n\n\ndef test_checkIsMailAddress_casFalseSansDomain():\n assert not checkIsMailAddress(\"super.test\")\n\n\ndef test_checkIsMailAddress_casFalseSansExtensionDeDomain():\n assert not checkIsMailAddress(\"super.test@domain\")\n\n\ndef test_checkIsMailAddress_casFalseSansAdresseMaisAvecDomaine():\n assert not checkIsMailAddress(\"@domain.fr\")\n\n\ndef test_checkIsDomain_casTrueDomainAvecExtension2caracteres():\n assert checkIsDomain(\"domain.fr\")\n\n\ndef test_checkIsDomain_casTrueDomainAvecExtension4caracteres():\n assert checkIsDomain(\"domain.fran\")\n\n\ndef test_checkIsDomain_casTrueDomainAvecSousDomaine():\n assert checkIsDomain(\"test.domain.fr\")\n\n\ndef test_checkIsDomain_casFalseDomainAvecExtension5caracteres():\n assert not checkIsDomain(\"domain.franc\")\n\n\ndef test_checkIsDomain_casFalseDomainAvecExtension1caractere():\n assert not checkIsDomain(\"domain.f\")\n\n\ndef test_checkIsPreDeleteAccount_casTrue():\n assert checkIsPreDeleteAccount(\"readytodelete_2018-03-09-12-00-00_test<EMAIL>.fr\")\n\n\ndef test_checkIsPreDeleteAccount_casFalsePasreadytodeleteAuDebut():\n assert not checkIsPreDeleteAccount(\"readytodelet_2018-03-09-12-00-00_test@<EMAIL>.fr\")\n\n\ndef test_checkIsPreDeleteAccount_casFalseMauvaisFormatDate():\n assert not checkIsPreDeleteAccount(\"readytodelete_2018/03/09/12:00:00_test@domain.fr\")\n\n\ndef test_checkIsPreDeleteAccount_casFalseDateIncomplete():\n assert not checkIsPreDeleteAccount(\"readytodelete_2018-03-09-12-00_<EMAIL>\")\n\n\ndef test_checkIsPreDeleteAccount_casFalsePasAdresseMailALaFin():\n assert not checkIsPreDeleteAccount(\"readytodelete_2018-03-09-12-00_test\")\n\ndef test_changeBooleanToString_casTrueParamTrue():\n assert changeBooleanToString(True) == \"TRUE\"\n\n\ndef test_changeBooleanToString_casTrueParamFalse():\n assert changeBooleanToString(False) == \"FALSE\"\n\n\ndef test_changeStringToBoolean_casTrueParamTRUE():\n assert changeStringToBoolean(\"TRUE\")\n\n\ndef test_changeStringToBoolean_casTrueParamFALSE():\n assert not changeStringToBoolean(\"FALSE\")\n\n\ndef test_changeStringToBoolean_casNoneParamAUTRE():\n assert changeStringToBoolean(\"AUTRE\") is None\n\n\ndef test_changeToInt_casTrueInteger():\n test_int = OrderedDict()\n test_int[\"type\"] = \"integer\"\n test_int[\"content\"] = \"1\"\n assert isinstance(changeToInt(test_int), int)\n\n\ndef test_changeToInt_casException():\n with pytest.raises(TypeError):\n test_int = OrderedDict()\n test_int[\"type\"] = \"string\"\n test_int[\"content\"] = \"1\"\n changeToInt(test_int)\n\n\ndef test_changeTimestampToDate_casOk():\n assert changeTimestampToDate(1520512483) == \"2018-03-08-13-34-43\"\n\n\ndef test_changeDateToTimestamp_casOk():\n assert changeDateToTimestamp(\"2018-03-08-13-34-43\") == 1520512483", "id": "1442233", "language": "Python", "matching_score": 3.9380059242248535, "max_stars_count": 3, "path": "test_unitaire/lib_Partage_BSS/utils/test_CheckMethods.py" }, { "content": "# -*-coding:utf-8 -*\n\"\"\"\nModule contenant les méthodes de vérification de paramètres et de conversion de paramètres\n\"\"\"\nimport re\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom time import mktime\n\n\ndef checkIsNum(value):\n \"\"\"\n Vérifie si la valeur passée en paramètre est un nombre\n\n :param value: la valeur a tester\n :return: True si c'est un nombre False sinon\n :raises TypeError: Exception levée si le paramètre n'est pas un str\n \"\"\"\n if value is None:\n return False\n if isinstance(value, str):\n if value == \"\" or re.match(\"^[0-9 .\\-_/]*$\", value):\n return True\n else:\n return False\n else:\n raise TypeError\n\n\ndef checkIsMailAddress(value):\n \"\"\"\n Vérifie si la valeur passée en paramètre est une adresse mail\n\n :param value: la valeur à tester\n :return: True si c'est une adresse mail ou vide False sinon\n :raises TypeError: Exception levée si le paramètre n'est pas un str\n \"\"\"\n if value is None:\n return True\n if isinstance(value, str):\n # Expression régulière publiée sur https://www.regular-expressions.info/email.html\n if value == \"\" or re.match(\"\\A[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?$\", value):\n return True\n else:\n return False\n else:\n raise TypeError\n\n\ndef checkIsDomain(value):\n \"\"\"\n Vérifie si la valeur passée en paramètre est un nom de domaine\n\n :param value: la valeur a tester\n :return: True si c'est un domain False sinon\n :raises TypeError: Exception levée si le paramètre n'est pas un str\n \"\"\"\n if value is None:\n return False\n if isinstance(value, str):\n if value == \"\" or re.match(\"^[a-zA-Z0-9_\\-]+(\\.[a-zA-Z0-9_\\-]+)*\\.[a-zA-Z]{2,4}$\", value):\n return True\n else:\n return False\n else:\n raise TypeError\n\n\ndef checkIsPreDeleteAccount(value):\n \"\"\"\n Vérifie si la valeur passée en paramètre est un nom de compte en pré instance de suppression (deleted_timestamp_nom)\n\n :param value: l'identifiant du compte\n :return: True si c'est un compte en instance de suppression False sinon\n :raises TypeError: Exception levée si le paramètre n'est pas un str\n \"\"\"\n if isinstance(value, str):\n if re.match(\"^readytodelete_\\d{4}[\\-]\\d{2}[\\-]\\d{2}[\\-]\\d{2}[\\-]\\d{2}[\\-]\\d{2}_.*\", value):\n if checkIsMailAddress(value.split(\"_\")[2]):\n return True\n else:\n return False\n else:\n return False\n else:\n raise TypeError\n\n\ndef changeBooleanToString(boolean):\n \"\"\"\n Permet de changer les booleen True et False en String.\n\n :param booleanString: le booléen à changer en String\n :return: \"TRUE\" ou \"FALSE\"\n :raises TypeError: Exception levée si le paramètre n'est pas un bool\n \"\"\"\n if boolean is not None:\n if isinstance(boolean, bool):\n if boolean:\n return \"TRUE\"\n else:\n return \"FALSE\"\n else:\n raise TypeError()\n else:\n return None\n\n\ndef changeStringToBoolean(booleanString):\n \"\"\"\n Permet de changer les chaînes TRUE et FALSE (quelque soit leurs casse) en booléen.\n Renvoie un TypeErreur sinon\n\n :param booleanString: \"TRUE\" ou \"FALSE\"\n :return: renvoie le booleen correspondant\n :raises TypeError: Exception levée si le paramètre n'est pas un String\n \"\"\"\n if booleanString is not None:\n if isinstance(booleanString, str):\n if booleanString.upper() == \"TRUE\":\n return True\n elif booleanString.upper() == \"FALSE\":\n return False\n else:\n return None\n else:\n raise TypeError()\n else:\n return None\n\n\ndef changeToInt(value):\n \"\"\"\n Permet de changer les réponses qui contiennent le type integer en int\n\n :param value: la valeur de la réponse à changer en int\n :return: renvoie le int correspondant\n :raises TypeError: Exception levée si le paramètre n'est pas un OrderedDict et si il ne possède pas un champs type avec la valeur integer\n \"\"\"\n if value is not None:\n if isinstance(value, OrderedDict):\n if value[\"type\"] == \"integer\":\n return int(value[\"content\"])\n else:\n raise TypeError\n elif isinstance (value, int):\n return value\n else:\n raise TypeError\n else:\n return None\n\n\ndef changeTimestampToDate(timestamp):\n \"\"\"\n Méthode permettant de changer un timestamp en date de forme AAAA-MM-JJ-HH-MM-SS\n\n :param timestamp: le timestamp à convertir\n :return: la date obtenue\n :raises TypeError: Exception levée si le paramètre n'est pas un integer\n \"\"\"\n if isinstance(timestamp, int):\n return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d-%H-%M-%S')\n else:\n raise TypeError\n\n\ndef changeDateToTimestamp(strDate):\n \"\"\"\n éthode permetant de changer un string date de forme AAAA-MM-JJ-HH-MM-SS en timestamp\n :param date: la date à convertir\n :return: le timestamp obtenue\n :raises TypeError: Exception levée si le paramètre n'est pas un String\n \"\"\"\n if isinstance(strDate, str):\n return mktime(datetime.strptime(strDate, '%Y-%m-%d-%H-%M-%S').timetuple())\n else:\n raise TypeError\n\ndef checkBoolean( v ):\n \"\"\"\n Vérifie si une valeur est un booléen ou peut être convertie en booléen. Il\n est possible de convertir:\n\n * les chaînes contenant ``t``, ``true``, ``1``, ``y``, ``yes``, ``on``,\n ``o``, ``oui``, ``v``, ``vrai``, ``f``, ``false``, ``0``, ``n``, ``no``,\n ``non``, ``faux`` ou ``off``;\n * les valeurs numériques;\n * les instances d'une classe possédant une méthode ``__bool__``\n \"\"\"\n if v is None:\n return False\n if isinstance( v , str ):\n return v.lower( ) in [\n 't' , 'true' , '1' , 'y' , 'yes' , 'on' ,\n 'o', 'oui', 'v', 'vrai' ,\n 'f' , 'false' , 'faux' , '0' , 'n' , 'no' , 'non' , 'off' ]\n if type( v ) in ( bool , int , float ):\n return True\n return callable( getattr( v , '__bool__' , None ) )\n\ndef convertToBoolean( v ):\n \"\"\"\n Convertit une valeur en booléen.\n\n Si la valeur à convertir est une chaîne, les valeurs ``true``, ``1``, ``t``,\n ``y``, ``yes``, ``o``, ``oui``, ``v``, ``vrai`` et ``on`` seront considérées\n comme vraies.\n\n Si la valeur à convertir n'est pas une chaîne, la conversion par défaut\n sera appliquée.\n\n :param v: la valeur à convertir\n\n :return: le booléen résultant de la conversion\n \"\"\"\n if isinstance( v , str ):\n return v.lower( ) in [\n 't' , 'true' , '1' , 'y' , 'yes' , 'on' ,\n 'o', 'oui', 'v', 'vrai' ]\n return bool( v )\n\n", "id": "1099737", "language": "Python", "matching_score": 3.1470913887023926, "max_stars_count": 3, "path": "lib_Partage_BSS/utils/CheckMethods.py" }, { "content": "# -*-coding:utf-8 -*\n\"\"\"\nModule contenant les méthodes permettant d'appeler les services de l'API BSS concernant les comptes\n\"\"\"\nimport re\nfrom collections import OrderedDict\nfrom time import time\n\nfrom lib_Partage_BSS import models, utils, services\nfrom lib_Partage_BSS.exceptions import NameException, DomainException, ServiceException, TmpServiceException, NotFoundException\nfrom .GlobalService import callMethod, checkResponseStatus\n\n\ndef fillAccount(accountResponse):\n \"\"\"\n Permet de remplir un objet compte depuis une réponse de l'API BSS\n\n :param accountResponse: l'objet account renvoyé par l'API\n :return: l'objet account créé\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail valide\n \"\"\"\n if not utils.checkIsMailAddress(accountResponse[\"name\"]):\n raise NameException(\"L'adresse mail \" + accountResponse[\"name\"] + \" n'est pas valide\")\n\n retAccount = models.Account(accountResponse[\"name\"])\n accountKeys = accountResponse.keys()\n for attr in accountKeys:\n if accountResponse[attr] is not None:\n if isinstance(accountResponse[attr], str):\n if accountResponse[attr] == \"TRUE\" or accountResponse[attr] == \"FALSE\":\n retAccount.__setattr__(\"_\" + attr, utils.changeStringToBoolean(accountResponse[attr]))\n else:\n retAccount.__setattr__(\"_\" + attr, accountResponse[attr])\n elif isinstance(accountResponse[attr], OrderedDict):\n if \"type\" in accountResponse[attr].keys():\n if accountResponse[attr][\"type\"] == \"integer\":\n retAccount.__setattr__(\"_\" + attr, int(accountResponse[attr][\"content\"]))\n elif accountResponse[attr][\"type\"] == \"array\":\n if attr == \"zimbraZimletAvailableZimlets\":\n retAccount.__setattr__(\"_\" + attr, accountResponse[attr][\"zimbraZimletAvailableZimlet\"])\n elif attr == \"zimbraMailAlias\":\n retAccount.__setattr__(\"_\" + attr, accountResponse[attr][\"zimbraMailAlias\"])\n return retAccount\n\n\ndef getAccount(name):\n \"\"\"\n Méthode permettant de récupérer les informations d'un compte via l'API BSS\n\n :return: Le compte récupéré ou None si le compte n'existe pas\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail valide\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n if not utils.checkIsMailAddress(name):\n raise NameException(\"L'adresse mail \" + name + \" n'est pas valide\")\n data = {\n \"name\": name\n }\n response = callMethod(services.extractDomain(name), \"GetAccount\", data)\n\n try:\n checkResponseStatus(response)\n except NotFoundException:\n return None\n\n account = response[\"account\"]\n return fillAccount(account)\n\n\ndef getAllAccounts(domain, limit=100, offset=0, ldapQuery=\"\", attrs=\"\", sortBy=\"\"):\n \"\"\"\n Permet de rechercher tous les comptes mail d'un domain\n\n :param domain: le domaine de la recherche\n :param limit: le nombre de résultats renvoyés (optionnel)\n :param offset: le nombre à partir duquel les comptes sont renvoyés (optionnel)\n :param ldapQuery: un filtre ldap pour affiner la rechercher (optionnel)\n :param sortBy: tri des résultat (mail, givenName, sn, displayName)\n :param attrs: la liste des attributs demandés (par défaut: used , quota, admin, cos_name) (optionnel)\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises DomainException: Exception levée si le domaine n'est pas un domaine valide\n \"\"\"\n if not utils.checkIsDomain(domain):\n raise DomainException(domain + \" n'est pas un nom de domain valide\")\n data = {\n \"limit\": limit,\n \"offset\": offset,\n \"ldap_query\": ldapQuery,\n \"attrs\": attrs,\n \"sortby\": sortBy\n }\n response = callMethod(domain, \"GetAllAccounts\", data)\n checkResponseStatus(response)\n\n if len(response[\"accounts\"]) == 1:\n return []\n else:\n accounts = response[\"accounts\"][\"account\"]\n retAccounts = []\n if isinstance(accounts, list):\n for account in accounts:\n retAccounts.append(fillAccount(account))\n else:\n retAccounts.append(fillAccount(accounts))\n return retAccounts\n\n\n\ndef createAccount(name,userPassword, cosId = None, account = None):\n \"\"\"\n Méthode permettant de créer un compte via l'API BSS en lui passant en paramètre l'empreinte du mot de passe (SSHA) et le cosId\n\n :param userPassword: l'empreine du mot de passe de l'utilisateur\n :param cosId: l'identifiant du cosId à appliquer pour le compte\n :param account: objet account contenant les informations à ajouter dans le compte (optionnel)\n :return: Le compte créé\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail valide\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n\n if not re.search(r'^\\{\\S+\\}', userPassword):\n raise NameException(\"Le format de l'empreinte du mot de passe n'est pas correcte ; format attendu : {algo}empreinte\")\n\n if not utils.checkIsMailAddress(name):\n raise NameException(\"L'adresse mail \" + name + \" n'est pas valide\")\n\n # Les attributs issus de l'objet account\n data = {}\n if account is not None:\n data = account.toData()\n\n # Les attributs obligatoires\n data.update({\n \"name\": name,\n \"password\": \"\",\n \"userPassword\": <PASSWORD>,\n \"zimbraHideInGal\": \"FALSE\"\n })\n if cosId is not None:\n data[\"zimbraCOSId\"]= cosId\n\n response = callMethod(services.extractDomain(name), \"CreateAccount\", data)\n\n checkResponseStatus(response)\n\n # if account is not None:\n # modifyAccount(account)\n\n return getAccount(name)\n\n\ndef createAccountExt(account , password):\n \"\"\"\n Méthode permettant de créer un compte via l'API BSS en lui passant en\n paramètre les informations concernant un compte ainsi qu'une empreinte de\n mot de passe.\n\n :param Account account: l'objet contenant les informations du compte \\\n utilisateur\n :param str password: l'empreinte du mot de passe de l'utilisateur\n\n :raises ServiceException: la requête vers l'API a echoué. L'exception \\\n contient le code de l'erreur et le message.\n :raises NameException: le nom du compte n'est pas une adresse mail valide, \\\n ou le mot de passe spécifié n'est pas une empreinte.\n :raises DomainException: le domaine de l'adresse mail n'est pas un domaine \\\n valide.\n \"\"\"\n\n if not re.search(r'^\\{\\S+\\}', password):\n raise NameException(\"Le format de l'empreinte du mot de passe \"\n + \"n'est pas correcte ; format attendu : {algo}empreinte\")\n\n data = account.toData( )\n data.update({\n 'password': '',\n 'userPassword': password,\n })\n response = callMethod( services.extractDomain( account.name ) ,\n 'CreateAccount' , data )\n checkResponseStatus(response)\n\n\ndef deleteAccount(name):\n \"\"\"\n Permet de supprimer un compte\n\n :param name: Nom du compte à supprimer\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail valide\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n if not utils.checkIsMailAddress(name):\n raise NameException(\"L'adresse mail \" + name + \" n'est pas valide\")\n data = {\n \"name\": name\n }\n response = callMethod(services.extractDomain(name), \"DeleteAccount\", data)\n checkResponseStatus(response)\n\n\ndef preDeleteAccount(name):\n \"\"\"\n Permet de mettre un compte dans un état de préSuppression\n Cette méthode désactive le compte puis le renomme (ajout d'un préfixe 'deleted_timestampactuel_name')\n\n :param name: nom du compte à préSupprimer\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail valide\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n if not utils.checkIsMailAddress(name):\n raise NameException(\"L'adresse mail \" + name + \" n'est pas valide\")\n closeAccount(name)\n newname = \"readytodelete_\"+utils.changeTimestampToDate(round(time()))+\"_\"+name\n renameAccount(name, newname)\n return newname\n\n\n\n\ndef restorePreDeleteAccount(name):\n \"\"\"\n Permet d'annuler la préSuppression d'un compte\n\n :param name: le nom du compte preSupprimé à restaurer\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail preSupprimé\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n if not utils.checkIsPreDeleteAccount(name):\n raise NameException(\"L'adresse mail \" + name + \" n'est pas une adresse mail preSupprimé\")\n activateAccount(name)\n renameAccount(name, name.split(\"_\")[2])\n\n\n\ndef modifyAccount(account):\n \"\"\"\n Permet de modifier un compte via l'API\n\n :param account: un objets compte avec les attributs à changer\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail preSupprimé\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n response = callMethod(services.extractDomain(account.name), \"ModifyAccount\", account.toData())\n checkResponseStatus(response)\n\n\ndef setPassword(name, newPassword):\n if not utils.checkIsMailAddress(name):\n raise NameException(\"L'adresse mail \" + name + \" n'est pas valide\")\n data={\n \"name\": name,\n \"password\": <PASSWORD>\n }\n response = callMethod(services.extractDomain(name), \"SetPassword\", data)\n checkResponseStatus(response)\n\ndef modifyPassword(name, newUserPassword):\n \"\"\"\n Pour modifier le mot de passe on n'accepte que l'empreinte du mot de passe.\n On commence par faire un SetPassword avec une valeur factice pour forcer la déconnexion des sessions en cours\n On passe ensuite via ModifyAccount l'empreinte du nouveau mot de passe\n\n :param newUserPassword:\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail preSupprimé\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n if not re.search(r'^\\{\\S+\\}', newUserPassword):\n raise NameException(\"Le format de l'empreinte du mot de passe n'est pas correcte ; format attendu : {algo}empreinte\")\n if not utils.checkIsMailAddress(name):\n raise NameException(\"L'adresse mail \" + name + \" n'est pas valide\")\n setPassword(name,\"<PASSWORD>\")\n data = {\n \"name\": name,\n \"userPassword\": <PASSWORD>\n }\n response = callMethod(services.extractDomain(name), \"ModifyAccount\", data)\n checkResponseStatus(response)\n\n\n\ndef addAccountAlias(name, newAlias):\n \"\"\"\n Méthode permettant d'ajouter un alias d'un compte\n\n :param name: le nom du compte\n :param aliasToDelete: l'alias a ajouter\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail preSupprimé\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n if not utils.checkIsMailAddress(name) or not utils.checkIsMailAddress(newAlias):\n raise NameException(\"L'adresse mail \" + name + \" ou \" + newAlias + \" n'est pas valide\")\n data = {\n \"name\": name,\n \"alias\": newAlias\n }\n response = callMethod(services.extractDomain(name), \"AddAccountAlias\", data)\n checkResponseStatus(response)\n\n\n\ndef removeAccountAlias(name, aliasToDelete):\n \"\"\"\n Méthode permettant de supprimer un alias d'un compte\n\n :param name: le nom du compte\n :param aliasToDelete: l'alias a supprimer\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail preSupprimé\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n if not utils.checkIsMailAddress(name) or not utils.checkIsMailAddress(aliasToDelete):\n raise NameException(\"L'adresse mail \" + name +\" ou \"+aliasToDelete+\" n'est pas valide\")\n data = {\n \"name\": name,\n \"alias\": aliasToDelete\n }\n response = callMethod(services.extractDomain(name), \"RemoveAccountAlias\", data)\n checkResponseStatus(response)\n\n\n\ndef modifyAccountAliases(name, listOfAliases):\n \"\"\"\n Méthode permettant de changer l'ensemble des alias d'un compte par ceux passés en paramètre\n\n :param name: le nom du compte\n :param listOfAliases: la liste des alias pour le compte\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail preSupprimé\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n :raises TypeError: Exception levée si le parametre listOfAliases n'est pas une liste\n \"\"\"\n if not utils.checkIsMailAddress(name):\n raise NameException(\"L'adresse mail \" + name + \" n'est pas valide\")\n if not isinstance(listOfAliases, list):\n raise TypeError\n account = getAccount(name)\n #On vérifie que les adresses mail passées en paramètres sont des adresses valide\n for alias in listOfAliases:\n if not utils.checkIsMailAddress(alias):\n raise NameException(\"L'adresse mail \" + alias + \" n'est pas valide\")\n #On parcour la liste passé en paramètre\n for alias in listOfAliases:\n if isinstance(account.zimbraMailAlias, list) or isinstance(account.zimbraMailAlias, str):\n #si la l'adresse mail n'est pas présente dans partage on la rajoute\n if alias not in account.zimbraMailAlias:\n addAccountAlias(name, alias)\n #si la liste partage est vide on rajoute l'adresse\n elif account.zimbraMailAlias is None:\n addAccountAlias(name, alias)\n if isinstance(account.zimbraMailAlias, list):\n #On parcours la liste des adresses partages\n for alias in account.zimbraMailAlias:\n #Si l'adresse n'est pas présente dans la liste passé en parametre on supprime l'adresse de partage\n if alias not in listOfAliases:\n removeAccountAlias(name, alias)\n #Si le compte n'a qu'un alias on test si il est présent ou pas dans la liste passé en paramètre\n elif isinstance(account.zimbraMailAlias, str):\n if account.zimbraMailAlias not in listOfAliases:\n removeAccountAlias(name, account.zimbraMailAlias)\n\n\ndef activateAccount(name):\n \"\"\"\n Méthode permettant de passer l'état d'un compte à activer\n\n :param name: le nom du compte à (ré)activer\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail preSupprimé\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n if not utils.checkIsMailAddress(name):\n raise NameException(\"L'adresse mail \" + name + \" n'est pas valide\")\n account = models.Account(name)\n account.zimbraAccountStatus = \"active\"\n account.zimbraHideInGal = False\n modifyAccount(account)\n\n\ndef lockAccount(name):\n \"\"\"\n Méthode permettant de passer l'état d'un compte à lock\n Cette état déconnecte toutes les instances du compte et empêche la connexion à celui-ci.\n Le compte sera toujours visible dans la GAL et les mails seront toujours acheminés vers cette boîte\n\n :param name: le nom du compte à verrouiller\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail preSupprimé\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n if not utils.checkIsMailAddress(name):\n raise NameException(\"L'adresse mail \" + name + \" n'est pas valide\")\n setPassword(name, \"<PASSWORD>\")\n account = models.Account(name)\n account.zimbraAccountStatus = \"locked\"\n modifyAccount(account)\n\n\ndef closeAccount(name):\n \"\"\"\n Cette méthode déconnecte toutes les instances du compte et empêche la connexion à celui-ci.\n Le compte ne sera plus visible dans la GAL et les mails entrants seront rejetés\n\n :param name: le nom du compte à Désactiver\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail preSupprimé\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n if not utils.checkIsMailAddress(name):\n raise NameException(\"L'adresse mail n'est pas valide\")\n setPassword(name, \"<PASSWORD>\")\n account = models.Account(name)\n account.zimbraAccountStatus = \"closed\"\n account.zimbraHideInGal = True\n modifyAccount(account)\n\n\ndef renameAccount(name, newName):\n \"\"\"\n Permet de renommer un compte :param name: nom du compte à renommer :param newName: le nouveau nom du compte\n\n :param name: le nom du compte à renommer\n :param newName: le nouveau nom du compte\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail preSupprimé\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n if not utils.checkIsMailAddress(name) or not utils.checkIsMailAddress(newName):\n raise NameException(\"L'adresse mail n'est pas valide\")\n data = {\n \"name\": name,\n \"newname\": newName\n }\n response = callMethod(services.extractDomain(name), \"RenameAccount\", data)\n checkResponseStatus(response)\n", "id": "10801381", "language": "Python", "matching_score": 5.641854763031006, "max_stars_count": 0, "path": "lib_Partage_BSS/services/AccountService.py" }, { "content": "# -*-coding:utf-8 -*\n\"\"\"\nModule contenant les méthodes permettant d'appeler les services de l'API BSS concernant les classes de service\n\"\"\"\nimport re\nfrom collections import OrderedDict\nfrom time import time\n\nfrom lib_Partage_BSS import models, utils, services\nfrom lib_Partage_BSS.exceptions import NameException, DomainException, ServiceException, TmpServiceException, NotFoundException\nfrom .GlobalService import callMethod, checkResponseStatus\n\n\ndef fillCOS(cosResponse):\n \"\"\"\n Permet de remplir un objet COS depuis une réponse de l'API BSS\n\n :param cosResponse: l'objet COS renvoyé par l'API\n :return: l'objet COS créé\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail valide\n \"\"\"\n\n retCOS = models.COS(cosResponse[\"name\"])\n cosKeys = cosResponse.keys()\n for attr in cosKeys:\n if cosResponse[attr] is not None:\n if isinstance(cosResponse[attr], str):\n if cosResponse[attr] == \"TRUE\" or cosResponse[attr] == \"FALSE\":\n retCOS.__setattr__(attr, utils.changeStringToBoolean(cosResponse[attr]))\n else:\n retCOS.__setattr__(attr, cosResponse[attr])\n elif isinstance(cosResponse[attr], OrderedDict):\n if \"type\" in cosResponse[attr].keys():\n if cosResponse[attr][\"type\"] == \"integer\":\n retCOS.__setattr__(attr, int(cosResponse[attr][\"content\"]))\n elif cosResponse[attr][\"type\"] == \"array\":\n if attr == \"zimbraZimletAvailableZimlets\":\n retCOS.__setattr__(attr, cosResponse[attr][\"zimbraZimletAvailableZimlet\"])\n elif attr == \"zimbraMailAlias\":\n retCOS.__setattr__(attr, cosResponse[attr][\"zimbraMailAlias\"])\n return retCOS\n\n\ndef getCOS(domain, name):\n \"\"\"\n Méthode permettant de récupérer les informations d'une classe de service via l'API BSS\n\n :return: La classe de service récupérée ou None si la classe de service n'existe pas\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail valide\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n\n data = {\n \"name\": name\n }\n response = callMethod(domain, \"GetCos\", data)\n\n try:\n checkResponseStatus(response)\n except NotFoundException:\n return None\n\n cos = response[\"cos\"]\n return fillCOS(cos)\n\n\ndef getAllCOS(domain):\n \"\"\"\n Permet de rechercher toutes les classes de service d'un domain\n\n :param domain: le domaine de la recherche\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises DomainException: Exception levée si le domaine n'est pas un domaine valide\n \"\"\"\n if not utils.checkIsDomain(domain):\n raise DomainException(domain + \" n'est pas un nom de domain valide\")\n response = callMethod(domain, \"GetAllCos\", { } )\n checkResponseStatus(response)\n if len(response[\"coses\"]) == 1:\n return []\n else:\n coses = response[\"coses\"][\"cose\"]\n retCoses = []\n if isinstance(coses, list):\n for cos in coses:\n retCoses.append(fillCOS(cos))\n else:\n retCoses.append(fillCOS(coses))\n return retCoses\n\n", "id": "5338055", "language": "Python", "matching_score": 6.455554008483887, "max_stars_count": 3, "path": "lib_Partage_BSS/services/COSService.py" }, { "content": "# -*-coding:utf-8 -*\n\"\"\"\nModule contenant les méthodes permettant d'appeler les services de l'API BSS concernant les domaines\n\"\"\"\nimport re\nfrom collections import OrderedDict\nfrom time import time\n\nfrom lib_Partage_BSS import models, utils, services\nfrom lib_Partage_BSS.exceptions import NameException, DomainException, ServiceException, TmpServiceException, NotFoundException\nfrom .GlobalService import callMethod, checkResponseStatus\n\n\ndef getDomain(domain):\n \"\"\"\n Méthode permettant de récupérer les informations d'un domaine via l'API BSS\n\n :return: Les informations sur le domaine\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail valide\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n\n data = {\n }\n response = callMethod(domain, \"GetDomain\", data)\n\n try:\n checkResponseStatus(response)\n except NotFoundException:\n return None\n\n domain = response[\"domain\"]\n return domain\n\n\ndef countObjects(domain, type):\n \"\"\"\n Méthode permettant de récupérer le nombre d'objets (userAccount, alias, dl, calresource) d'un domaine via l'API BSS\n\n :return: Le nombre d'objets dans le domaine\n :raises ServiceException: Exception levée si la requête vers l'API à echoué. L'exception contient le code de l'erreur et le message\n :raises NameException: Exception levée si le nom n'est pas une adresse mail valide\n :raises DomainException: Exception levée si le domaine de l'adresse mail n'est pas un domaine valide\n \"\"\"\n\n data = {\n \"type\": type\n }\n response = callMethod(domain, \"CountObjects\", data)\n\n try:\n checkResponseStatus(response)\n except NotFoundException:\n return None\n\n count = response[\"count\"][\"content\"]\n return count\n", "id": "12675111", "language": "Python", "matching_score": 1.2584648132324219, "max_stars_count": 3, "path": "lib_Partage_BSS/services/DomainService.py" }, { "content": "# -*-coding:utf-8 -*\nimport json\n\nfrom lib_Partage_BSS import utils\nfrom lib_Partage_BSS.exceptions.NameException import NameException\nfrom lib_Partage_BSS.models.GlobalModel import GlobalModel\n\n\nclass Group( GlobalModel ):\n \"\"\"\n Classe représentant un groupe ou une liste de distribution dans Partage.\n\n :ivar _description: description du groupe ou de la liste\n :ivar _displayName: nom d'affichage du groupe\n :ivar _zimbraDistributionListSendShareMessageToNewMembers: détermine \\\n si la liste des partages devrait être envoyée par mail aux \\\n nouveaux membres\n :ivar _zimbraHideInGal: masquer dans la GAL ?\n :ivar _zimbraMailStatus: ce groupe peut-il recevoir du mail? Si oui, \\\n il s'agit d'une liste de distribution.\n :ivar _zimbraNotes: notes concernant le groupe ou la liste.\n :ivar _members: l'ensemble des adresses des membres du groupe ou \\\n de la liste\n :ivar _senders: l'ensemble des comptes autorisés à envoyer du mail en \\\n utilisant la liste comme adresse d'expédition\n :ivar _aliases: l'ensemble des alias de la liste\n \"\"\"\n\n # Attributs utilisés dans {Create,Modify}Account\n ATTRIBUTES = (\n 'description' , 'displayName' ,\n 'zimbraDistributionListSendShareMessageToNewMembers' ,\n 'zimbraHideInGal' , 'zimbraMailStatus' , 'zimbraNotes'\n )\n\n # Attributs synthétiques sous la forme d'ensembles\n SETS = ( 'members' , 'senders' , 'aliases' )\n\n def __init__( self , name = None ):\n if name is not None and not isinstance( name , str ):\n raise TypeError\n if name is not None and not utils.checkIsMailAddress( name ):\n raise NameException( \"Adresse mail {} invalide\".format( name ) )\n\n GlobalModel.__init__( self , name )\n\n for a in Group.ATTRIBUTES:\n setattr( self , '_{}'.format( a ) , None )\n self._members = set( )\n self._senders = set( )\n self._aliases = set( )\n\n #---------------------------------------------------------------------------\n\n @staticmethod\n def _get_set( output , data , name , sub = None ):\n \"\"\"\n Récupère les données correspondant à un attribut de type ensemble depuis\n la réponse du serveur BSS.\n\n :param output: l'instance à mettre à jour\n :param data: les données reçues du serveur\n :param name: le nom du champ contenant la liste\n :param sub: le nom des éléments de la liste, s'ils diffèrent du nom \\\n de celle-ci\n \"\"\"\n if name not in data: return\n od = data[ name ]\n if sub is None:\n sub = name\n if sub in od:\n if isinstance( od[ sub ] , str ):\n output.add( od[ sub ] )\n else:\n output.update( od[ sub ] )\n\n @staticmethod\n def _from_bool( value , true_value , false_value , xform ):\n \"\"\"\n Vérifie et retourne la valeur à utiliser pour un champ 'booléen' mais\n encodé sous la forme de chaînes.\n\n :param value: la nouvelle valeur du champ\n :param true_value: la chaîne correspondant à une valeur vraie\n :param false_value: la chaîne correspondant à une valeur fausse\n :param xform: une fonction qui transforme la chaîne d'entrée si \\\n nécessaire\n\n :raises TypeError: la valeur n'est ni une chaîne ni un booléen, ou \\\n sa valeur ne correspond pas à l'une des chaînes indiquées\n\n :return: la nouvelle valeur du champ\n \"\"\"\n if value is None:\n return None\n v = None\n if isinstance( value , str ) and xform( value ) in ( true_value ,\n false_value ):\n v = xform( value )\n elif isinstance( value , bool ):\n v = true_value if value else false_value\n if v is None:\n raise TypeError\n return v\n\n #---------------------------------------------------------------------------\n\n @staticmethod\n def from_bss( data ):\n \"\"\"\n Crée une instance en se basant sur des données reçues du serveur\n Partage, soit via GetGroup soit via GetAllGroups. Dans le premier cas,\n tous les champs à l'exception de la liste des utilisateurs autorisés à\n expédier avec l'adresse du groupe seront mis à jour.\n\n :param data: les données du compte reçues depuis le serveur Partage\n\n :raises TypeError: un champ n'a pas le format attendu\n\n :return: l'instance de Group créée, avec ses champs renseignés\n \"\"\"\n group = Group( data[ 'name' ] )\n group.from_dict( data )\n Group._get_set( group._members , data , 'members' , 'member' )\n Group._get_set( group._aliases , data , 'zimbraMailAlias' )\n return group\n\n def from_dict( self , data , allow_name = False ):\n \"\"\"\n Met à jour les champs d'une instance à partir d'un dictionnaire. Seuls\n les attributs, et optionellement le nom, peuvent être modifiés par cette\n méthode.\n\n :param data: le dictionnaire à partir duquel on veut mettre à jour les \\\n données\n :param allow_name: permettre la modification du champ 'name' à partir \\\n du dictionnaire; si False, une entrée 'name' dans le \\\n dictionnaire sera ignorée\n\n :raises TypeError: un champ n'a pas le format attendu\n \"\"\"\n attrs = (\n ( 'name' , *Group.ATTRIBUTES ) if allow_name\n else Group.ATTRIBUTES\n )\n for a in attrs:\n if a in data:\n setattr( self , a , data[ a ] )\n\n def senders_from_bss( self , data ):\n \"\"\"\n Remplace la liste des utilisateurs autorisés à expédier avec l'adresse\n de ce groupe à partir de données fournies par le serveur Partage.\n\n :param data: les données reçues du serveur Partage\n\n :return: l'ensemble des adresses autorisées\n \"\"\"\n self._senders.clear( )\n Group._get_set( self._senders , data , 'accounts' , 'account' )\n return self.senders\n\n def to_bss( self ):\n \"\"\"\n Génère un dictionnaire pouvant être utilisé pour créer ou modifier un\n groupe sur le serveur.\n\n :return: le dictionnaire contenant les attributs\n \"\"\"\n rv = { }\n for a in ( 'name' , *Group.ATTRIBUTES ):\n value = getattr( self , a )\n if value is not None:\n rv[ a ] = value\n return rv\n\n #---------------------------------------------------------------------------\n\n @staticmethod\n def from_json( source , is_file = False ):\n \"\"\"\n Génère une instance à partir de données au format JSON.\n\n :param source: la source des données à partir desquelles on doit créer \\\n une instance. Il peut s'agir de source JSON ou bien d'un \\\n fichier, en fonction de la valeur du paramètre is_file. Dans \\\n le second cas, on peut passer aussi bien le chemin du fichier \\\n qu'une instance (par exemple de file) permettant le chargement \\\n du JSON.\n :param is_file: un booléen qui indique si le paramètre précédent est \\\n un fichier (True) ou du source JSON (False).\n\n :raises TypeError: si certains des champs ont des types invalides\n :raises NameException: si l'adresse contenue dans le champ name, ou \\\n l'une des adresses de membres, l'un des alias ou l'une des \\\n entrées d'autorisation sont invalides\n\n :return: l'instance créée\n \"\"\"\n if is_file:\n if isinstance( source , str ):\n with open( source ) as json_file:\n data = json.load( json_file )\n else:\n data = json.load( source )\n else:\n data = json.loads( source )\n return Group.from_json_record( data )\n\n @staticmethod\n def from_json_record( record ):\n \"\"\"\n Génère une instance à partir de données JSON décodées dans un\n dictionnaire Python.\n\n :param record: le dictionnaire dans lequel les information ont été \\\n décodées\n\n :raises TypeError: si certains des champs ont des types invalides\n :raises NameException: si l'adresse contenue dans le champ name, ou \\\n l'une des adresses de membres, l'un des alias ou l'une des \\\n entrées d'autorisation sont invalides\n\n :return: l'instance créée\n \"\"\"\n group = Group( record[ 'name' ] if 'name' in record else None )\n for a in Group.ATTRIBUTES:\n if a in record:\n setattr( group , a , record[ a ] )\n for s in Group.SETS:\n if s in record:\n bad_addr = set([ a for a in record[ s ]\n if not utils.checkIsMailAddress( a ) ])\n if not bad_addr:\n getattr( group , '_{}'.format( s ) ).update( record[ s ] )\n continue\n raise NameException( \"Adresse(s) mail {} invalide(s)\".format(\n ', '.join( bad_addr ) ) )\n return group\n\n def to_json_record( self ):\n \"\"\"\n Génère les données (sous la forme d'un dictionnaire Python) pour un\n enregistrement JSON décrivant l'instance.\n\n :return: un dictionnaire contenant les champs appropriés pour \\\n sauvegarde au format JSON\n \"\"\"\n rv = {\n a : getattr( self , a )\n for a in ( 'name' , *Group.ATTRIBUTES )\n if getattr( self , a ) is not None\n }\n rv.update({\n s : list( getattr( self , '_{}'.format( s ) ) )\n for s in Group.SETS\n if getattr( self , '_{}'.format( s ) )\n })\n return rv\n\n #---------------------------------------------------------------------------\n\n @property\n def members( self ):\n \"\"\"\n La liste des membres, triée par ordre alphabétique.\n \"\"\"\n return sorted( self._members )\n\n @property\n def members_set( self ):\n \"\"\"\n L'ensemble des membres, modifiable.\n \"\"\"\n return self._members\n\n @property\n def has_members( self ):\n \"\"\"\n La présence, ou non, de membres dans le groupe\n \"\"\"\n return bool( self._members )\n\n @property\n def senders( self ):\n \"\"\"\n La liste des expéditeurs autorisés, triée par ordre alphabétique.\n \"\"\"\n return sorted( self._senders )\n\n @property\n def senders_set( self ):\n \"\"\"\n L'ensemble des expéditeurs autorisés, modifiable.\n \"\"\"\n return self._senders\n\n @property\n def has_senders( self ):\n \"\"\"\n La présence, ou non, d'expéditeurs autorisés\n \"\"\"\n return bool( self._senders )\n\n @property\n def aliases( self ):\n \"\"\"\n La liste des alias du groupe, triée par ordre alphabétique.\n \"\"\"\n return sorted( self._aliases )\n\n @property\n def aliases_set( self ):\n \"\"\"\n L'ensemble des alias du groupe, modifiable\n \"\"\"\n return self._aliases\n\n @property\n def has_aliases( self ):\n \"\"\"\n La présence, ou non, d'alias pour ce groupe\n \"\"\"\n return bool( self._aliases )\n\n #---------------------------------------------------------------------------\n\n @property\n def description( self ):\n return self._description\n\n @description.setter\n def description( self , value ):\n if isinstance( value , str ) or value is None:\n self._description = value\n else:\n raise TypeError\n\n #---------------------------------------------------------------------------\n\n @property\n def displayName( self ):\n return self._displayName\n\n @displayName.setter\n def displayName( self , value ):\n if isinstance( value , str ) or value is None:\n self._displayName = value\n else:\n raise TypeError\n\n #---------------------------------------------------------------------------\n\n @property\n def zimbraDistributionListSendShareMessageToNewMembers( self ):\n return self._zimbraDistributionListSendShareMessageToNewMembers\n\n @zimbraDistributionListSendShareMessageToNewMembers.setter\n def zimbraDistributionListSendShareMessageToNewMembers( self , value ):\n v = Group._from_bool( value , 'TRUE' , 'FALSE' , lambda x : x.upper( ) )\n self._zimbraDistributionListSendShareMessageToNewMembers = v\n\n #---------------------------------------------------------------------------\n\n @property\n def zimbraHideInGal( self ):\n return self._zimbraHideInGal\n\n @zimbraHideInGal.setter\n def zimbraHideInGal( self , value ):\n v = Group._from_bool( value , 'TRUE' , 'FALSE' , lambda x : x.upper( ) )\n self._zimbraHideInGal = v\n\n #---------------------------------------------------------------------------\n\n @property\n def zimbraMailStatus( self ):\n return self._zimbraMailStatus\n\n @zimbraMailStatus.setter\n def zimbraMailStatus( self , value ):\n self._zimbraMailStatus = Group._from_bool( value ,\n 'enabled' , 'disabled' , lambda x : x.lower( ) )\n\n #---------------------------------------------------------------------------\n\n @property\n def zimbraNotes( self ):\n return self._zimbraNotes\n\n @zimbraNotes.setter\n def zimbraNotes( self , value ):\n if isinstance( value , str ) or value is None:\n self._zimbraNotes = value\n else:\n raise TypeError\n", "id": "7120323", "language": "Python", "matching_score": 3.276909351348877, "max_stars_count": 3, "path": "lib_Partage_BSS/models/Group.py" }, { "content": "# -*-coding:utf-8 -*\nimport json\n\n\nclass GlobalModel:\n \"\"\"\n Classe générale regroupant les méthodes communes des différents modèles\n \"\"\"\n def __init__(self, name):\n self._name = name\n\n def showAttr(self):\n \"\"\"\n Méthode permettant d'avoir un string listant tous les attributs non null du modèle\n\n :return: string contenant la liste des attributs du modèle\n \"\"\"\n ret = \"\"\n for key in self.__dict__.keys():\n if self.__dict__[key] is not None:\n ret += (key+\" : \"+str(self.__dict__[key])+\"\\n\")\n return ret\n\n def __repr__( self ):\n \"\"\"\n Transforme les données du compte en une chaîne pouvant être utilisée\n pour le débogage.\n \"\"\"\n return '{}({})'.format( self.__class__.__name__ , ','.join( [\n '{}={}'.format( k , repr( v ) )\n for k,v in self.__dict__.items( )\n if v is not None\n ] ) )\n\n @property\n def name(self):\n \"\"\"\n Getter de name\n\n :return: le nom du modèle\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, newValue):\n \"\"\"\n Setter de name\n\n :param newValue: le nouveau nom du modèle\n \"\"\"\n self._name = newValue\n\n def exportJsonAccount(self):\n json_data = {}\n for key in self.__dict__.keys():\n json_data[key[1:]]= self.__dict__[key]\n with open(self._name+\".json\", \"w\") as json_file:\n json_file.write(json.dumps(json_data, indent=4))\n\n\n\n\n\n", "id": "1294292", "language": "Python", "matching_score": 0.5957275629043579, "max_stars_count": 3, "path": "lib_Partage_BSS/models/GlobalModel.py" }, { "content": "#!/usr/bin/env python3\n# vim: set tabstop=4 softtabstop=4 shiftwidth=4 expandtab:\n\n\"\"\"\nLogger (singleton)\n\nLogger pour les consommateurs RMQ\nDSI plamaizi 25/04/2017\n\n\"\"\"\nimport syslog\n\n\nclass Logger(object):\n class __Logger:\n def __init__(self):\n self.ident = None\n\n def loginfo(self, text):\n print(self.ident)\n syslog.openlog(ident=self.ident, facility=syslog.LOG_USER)\n syslog.syslog(syslog.LOG_INFO, str(text))\n print(text)\n\n def logerror(self, text):\n syslog.openlog(ident=self.ident, facility=syslog.LOG_USER)\n syslog.syslog(syslog.LOG_ERR, str(text))\n print(text)\n\n instance = None\n\n def __new__(cls):\n if not Logger.instance:\n Logger.instance = Logger.__Logger()\n return Logger.instance\n\n def __getattr__(self, name):\n return getattr(self.instance, name)\n\n def __setattr__(self, name):\n return setattr(self.instance, name)\n", "id": "280589", "language": "Python", "matching_score": 0.0030492939986288548, "max_stars_count": 3, "path": "lib_Partage_BSS/utils/Log.py" }, { "content": "# -*-coding:utf-8 -*\nimport json\n\nfrom lib_Partage_BSS import utils\nfrom lib_Partage_BSS.exceptions.NameException import NameException\nfrom lib_Partage_BSS.models.GlobalModel import GlobalModel\n\n\nclass COS(GlobalModel):\n \"\"\"\n Classe représentant une classe de service dans Partage\n\n :ivar _zimbraDumpsterEnabled: ...\n :ivar _zimbraExternalSharingEnabled: ...\n :ivar _zimbraFeatureBriefcasesEnabled: ...\n :ivar _zimbraFeatureCalendarEnabled: ...\n :ivar _zimbraFeatureChangePasswordEnabled: ...\n :ivar _zimbraFeatureContactsEnabled: ...\n :ivar _zimbraFeatureConversationsEnabled: ...\n :ivar _zimbraFeatureDistributionListFolderEnabled: ...\n :ivar _zimbraFeatureExportFolderEnabled: ...\n :ivar _zimbraFeatureFiltersEnabled: ...\n :ivar _zimbraFeatureFlaggingEnabled: ...\n :ivar _zimbraFeatureGalAutoCompleteEnabled: ...\n :ivar _zimbraFeatureGalEnabled: ...\n :ivar _zimbraFeatureGroupCalendarEnabled: ...\n :ivar _zimbraFeatureHtmlComposeEnabled: ...\n :ivar _zimbraFeatureIdentitiesEnabled: ...\n :ivar _zimbraFeatureImapDataSourceEnabled: ...\n :ivar _zimbraFeatureImportFolderEnabled: ...\n :ivar _zimbraFeatureMailEnabled: ...\n :ivar _zimbraFeatureMailForwardingEnabled: ...\n :ivar _zimbraFeatureMailPriorityEnabled: ...\n :ivar _zimbraFeatureMailSendLaterEnabled: ...\n :ivar _zimbraFeatureManageZimlets: ...\n :ivar _zimbraFeatureMAPIConnectorEnabled: ...\n :ivar _zimbraFeatureMobileSyncEnabled: ...\n :ivar _zimbraFeatureNewMailNotificationEnabled: ...\n :ivar _zimbraFeatureOptionsEnabled: ...\n :ivar _zimbraFeatureOutOfOfficeReplyEnabled: ...\n :ivar _zimbraFeaturePop3DataSourceEnabled: ...\n :ivar _zimbraFeatureReadReceiptsEnabled: ...\n :ivar _zimbraFeatureSavedSearchesEnabled: ...\n :ivar _zimbraFeatureSharingEnabled: ...\n :ivar _zimbraFeatureSkinChangeEnabled: ...\n :ivar _zimbraFeatureTaggingEnabled: ...\n :ivar _zimbraFeatureTasksEnabled: ...\n :ivar _zimbraId: identifiant de la classe de service\n :ivar _zimbraImapEnabled: ...\n :ivar _zimbraMailQuota: quota de la classe de service\n :ivar _zimbraNotes: commentaire\n :ivar _zimbraPop3Enabled: ...\n :ivar _zimbraPublicSharingEnabled: ...\n :ivar _zimbraZimletAvailableZimlets: Array ...\n \"\"\"\n def __init__(self, name):\n GlobalModel.__init__(self, name)\n for attr in ['zimbraDumpsterEnabled','zimbraExternalSharingEnabled','zimbraFeatureCalendarEnabled','zimbraFeatureChangePasswordEnabled',\n 'zimbraFeatureContactsEnabled','zimbraFeatureConversationsEnabled','zimbraFeatureDistributionListFolderEnabled',\n 'zimbraFeatureExportFolderEnabled','zimbraFeatureFiltersEnabled','zimbraFeatureFlaggingEnabled','zimbraFeatureGalAutoCompleteEnabled',\n 'zimbraFeatureGalEnabled','zimbraFeatureGroupCalendarEnabled','zimbraFeatureHtmlComposeEnabled','zimbraFeatureIdentitiesEnabled',\n 'zimbraFeatureImapDataSourceEnabled','zimbraFeatureImportFolderEnabled','zimbraFeatureMailEnabled','zimbraFeatureMailForwardingEnabled',\n 'zimbraFeatureMailPriorityEnabled','zimbraFeatureMailSendLaterEnabled','zimbraFeatureManageZimlets','zimbraFeatureMAPIConnectorEnabled',\n 'zimbraFeatureMobileSyncEnabled','zimbraFeatureNewMailNotificationEnabled','zimbraFeatureOptionsEnabled','zimbraFeatureOutOfOfficeReplyEnabled',\n 'zimbraFeaturePop3DataSourceEnabled','zimbraFeatureReadReceiptsEnabled','zimbraFeatureSavedSearchesEnabled','zimbraFeatureSharingEnabled',\n 'zimbraFeatureSkinChangeEnabled','zimbraFeatureTaggingEnabled','zimbraFeatureTasksEnabled','zimbraId','zimbraImapEnabled','_zimbraMailQuota',\n 'zimbraNotes','zimbraPop3Enabled','zimbraPublicSharingEnabled','zimbraZimletAvailableZimlets']:\n setattr(self, attr, None)\n\n def fillCOS(self, listOfAttr):\n if not isinstance(listOfAttr, dict):\n raise TypeError\n\n for key, value in listOfAttr.items():\n setattr(self, key, value)\n\n\n\n", "id": "12741020", "language": "Python", "matching_score": 5.8831305503845215, "max_stars_count": 3, "path": "lib_Partage_BSS/models/COS.py" }, { "content": "# -*-coding:utf-8 -*\nimport json\n\nfrom lib_Partage_BSS import utils\nfrom lib_Partage_BSS.exceptions.NameException import NameException\nfrom lib_Partage_BSS.models.GlobalModel import GlobalModel\nfrom collections import OrderedDict\n\nclass Account(GlobalModel):\n \"\"\"\n Classe représentant un compte dans Partage\n\n :ivar _id: l'identifiant du compte\n :ivar _admin: le niveau d'administrateur du compte (ADMIN,...)\n :ivar _co: ...\n :ivar _company: ...\n :ivar _description: Description du compte\n :ivar _facsimileTelephoneNumber: Numéro de Fax du compte\n :ivar _homePhone: ...\n :ivar _initials: ...\n :ivar _l: ...\n :ivar _mavTransformation: ...\n :ivar _mavRedirection: ...\n :ivar _mobile: Numéro de mobile associé au compte\n :ivar _pager: ...\n :ivar _postalCode: Code postal associé au compte\n :ivar _used: Espace utilisé par le compte (en octet)\n :ivar _quota: Espace disponible pour le compte (en octet)\n :ivar _carLicense: EduPersonPrincipalName l'identifiant fondation du compte\n :ivar _givenName: Prénom de la personne\n :ivar _displayName: Nom complet de la personne\n :ivar _businessCategory: ...\n :ivar _sn: Nom de la personne\n :ivar _st:\n :ivar _street: Rue de la personne\n :ivar _telephoneNumber: Numéro de téléphone de la personne\n :ivar _title: ...\n :ivar _zimbraAccountStatus: Etat du compte défaut active (active,closed)\n :ivar _zimbraFeatureBriefcasesEnabled: ...\n :ivar _zimbraFeatureCalendarEnabled: ...\n :ivar _zimbraFeatureMailEnabled: ...\n :ivar _zimbraFeatureMailForwardingEnabled: Permettre à l'utilisateur d'indiquer une adresse de redirection (TRUE,FALSE)\n :ivar _zimbraFeatureOptionsEnabled: ...\n :ivar _zimbraFeatureTasksEnabled: ...\n :ivar _zimbraHideInGal: Masquer dans la GAL (TRUE,FALSE)\n :ivar _zimbraLastLogonTimestamp: Timestamp de la dernière connection au compte\n :ivar _zimbraMailQuota: ...\n :ivar _zimbraNotes: ...\n :ivar _zimbraPasswordMustChange: Forcer le changement de mot de passe à la prochaine connection (TRUE,FALSE)\n :ivar _zimbraPrefMailForwardingAddress: Adresse de redirection saisie par l?utilisateur\n :ivar _zimbraPrefMailLocalDeliveryDisabled: Ne pas conserver de copie des mails sur le client local\n :ivar _zimbraMailAlias: Liste des alias du compte\n :ivar _zimbraMailCanonicalAddress: Adresse email visible pour les messages sortants\n :ivar _zimbraPrefFromDisplay: Adresse email visible pour les messages sortants\n :ivar _zimbraCOSId: Id de la classe de Service du compte\n :ivar _zimbraZimletAvailableZimlets: Les zimlets disponible pour le compte\n \"\"\"\n def __init__(self, name):\n if utils.checkIsMailAddress(name):\n GlobalModel.__init__(self, name)\n self._id = None\n self._admin = None\n self._businessCategory = None\n self._co = None\n self._company = None\n self._description = None\n self._displayName = None\n self._carLicense = None\n self._facsimileTelephoneNumber = None\n self._givenName = None\n self._homePhone = None\n self._initials = None\n self._l = None\n self._mavTransformation = None\n self._mavRedirection = None\n self._mobile = None\n self._pager = None\n self._postalCode = None\n self._quota = None\n self._sn = None\n self._st = None\n self._street = None\n self._telephoneNumber = None\n self._title = None\n self._used = None\n self._zimbraAccountStatus = None\n self._zimbraFeatureBriefcasesEnabled = None\n self._zimbraFeatureCalendarEnabled = None\n self._zimbraFeatureMailEnabled = None\n self._zimbraFeatureMailForwardingEnabled = None\n self._zimbraFeatureOptionsEnabled = None\n self._zimbraFeatureTasksEnabled = None\n self._zimbraHideInGal = None\n self._zimbraLastLogonTimestamp = None\n self._zimbraMailQuota = None\n self._zimbraNotes = None\n self._zimbraPasswordMustChange = None\n self._zimbraPrefMailForwardingAddress = None\n self._zimbraPrefMailLocalDeliveryDisabled = None\n self._zimbraMailAlias = None\n self._zimbraMailCanonicalAddress = None\n self._zimbraPrefFromDisplay = None\n self._zimbraCOSId = None\n self._zimbraZimletAvailableZimlets = None\n\n else:\n raise NameException(\"Le nom donné n'est pas une adresse mail\")\n\n @property\n def id(self):\n return self._id\n\n @property\n def admin(self):\n return self._admin\n\n @property\n def businessCategory(self):\n return self._businessCategory\n\n @property\n def co(self):\n return self._co\n\n @property\n def company(self):\n return self._company\n\n @property\n def description(self):\n return self._description\n\n @property\n def displayName(self):\n return self._displayName\n\n @property\n def carLicense(self):\n return self._carLicense\n\n @property\n def facsimileTelephoneNumber(self):\n return self._facsimileTelephoneNumber\n\n @property\n def givenName(self):\n return self._givenName\n\n @property\n def homePhone(self):\n return self._homePhone\n\n @property\n def initials(self):\n return self._initials\n\n @property\n def l(self):\n return self._l\n\n @property\n def mavTransformation(self):\n return self._mavTransformation\n\n @property\n def mavRedirection(self):\n return self._mavRedirection\n\n @property\n def mobile(self):\n return self._mobile\n\n @property\n def pager(self):\n return self._pager\n\n @property\n def postalCode(self):\n return self._postalCode\n\n @property\n def quota(self):\n return self._quota\n\n @property\n def sn(self):\n return self._sn\n\n @property\n def st(self):\n return self._st\n\n @property\n def street(self):\n return self._street\n\n @property\n def telephoneNumber(self):\n return self._telephoneNumber\n\n @property\n def title(self):\n return self._title\n\n @property\n def used(self):\n return self._used\n\n @property\n def zimbraAccountStatus(self):\n return self._zimbraAccountStatus\n\n @property\n def zimbraFeatureBriefcasesEnabled(self):\n return self._zimbraFeatureBriefcasesEnabled\n\n @property\n def zimbraFeatureCalendarEnabled(self):\n return self._zimbraFeatureCalendarEnabled\n\n @property\n def zimbraFeatureMailEnabled(self):\n return self._zimbraFeatureMailEnabled\n\n @property\n def zimbraFeatureMailForwardingEnabled(self):\n return self._zimbraFeatureMailForwardingEnabled\n\n @property\n def zimbraFeatureOptionsEnabled(self):\n return self._zimbraFeatureOptionsEnabled\n\n @property\n def zimbraFeatureTasksEnabled(self):\n return self._zimbraFeatureTasksEnabled\n\n @property\n def zimbraHideInGal(self):\n return self._zimbraHideInGal\n\n @property\n def zimbraLastLogonTimestamp(self):\n return self._zimbraLastLogonTimestamp\n\n @property\n def zimbraMailAlias(self):\n return self._zimbraMailAlias\n\n @property\n def zimbraMailQuota(self):\n return self._zimbraMailQuota\n\n @property\n def zimbraMailCanonicalAddress(self):\n return self._zimbraMailCanonicalAddress\n\n @property\n def zimbraNotes(self):\n return self._zimbraNotes\n\n @property\n def zimbraPasswordMustChange(self):\n return self._zimbraPasswordMustChange\n\n @property\n def zimbraPrefFromDisplay(self):\n return self._zimbraPrefFromDisplay\n\n @property\n def zimbraPrefMailForwardingAddress(self):\n return self._zimbraPrefMailForwardingAddress\n\n @property\n def zimbraPrefMailLocalDeliveryDisabled(self):\n return self._zimbraPrefMailLocalDeliveryDisabled\n\n @property\n def zimbraCOSId(self):\n return self._zimbraCOSId\n\n @property\n def zimbraZimletAvailableZimlets(self):\n return self._zimbraZimletAvailableZimlets\n\n @admin.setter\n def admin(self, value):\n if isinstance(value, str) or value is None:\n self._admin = value\n else:\n raise TypeError\n\n @businessCategory.setter\n def businessCategory(self, value):\n if isinstance(value, str) or value is None:\n self._businessCategory = value\n else:\n raise TypeError\n\n @co.setter\n def co(self, value):\n if isinstance(value, str) or value is None:\n self._co = value\n else:\n raise TypeError\n\n @company.setter\n def company(self, value):\n if isinstance(value, str) or value is None:\n self._company = value\n else:\n raise TypeError\n\n @description.setter\n def description(self, value):\n if isinstance(value, str) or value is None:\n self._description = value\n else:\n raise TypeError\n\n @displayName.setter\n def displayName(self, value):\n if isinstance(value, str) or value is None:\n self._displayName = value\n else:\n raise TypeError\n\n @carLicense.setter\n def carLicense(self, value):\n if isinstance(value, str) or value is None:\n self._carLicense = value\n else:\n raise TypeError\n\n @facsimileTelephoneNumber.setter\n def facsimileTelephoneNumber(self, value):\n if isinstance(value, str) or value is None:\n if utils.checkIsNum(value):\n self._facsimileTelephoneNumber = value\n else:\n raise TypeError\n\n @givenName.setter\n def givenName(self, value):\n if isinstance(value, str) or value is None:\n self._givenName = value\n else:\n raise TypeError\n\n @homePhone.setter\n def homePhone(self, value):\n if isinstance(value, str) or value is None:\n if utils.checkIsNum(value):\n self._homePhone = value\n else:\n raise TypeError\n\n @initials.setter\n def initials(self, value):\n if isinstance(value, str) or value is None:\n self._initials = value\n else:\n raise TypeError\n\n @l.setter\n def l(self, value):\n if isinstance(value, str) or value is None:\n self._l = value\n else:\n raise TypeError\n\n @mavTransformation.setter\n def mavTransformation(self, value):\n if value is None:\n self._mavTransformation = None\n elif utils.checkBoolean( value ):\n self._mavTransformation = utils.convertToBoolean( value )\n else:\n raise TypeError\n\n @mavRedirection.setter\n def mavRedirection(self, value):\n\n if isinstance(value, str) or value is None:\n self._mavRedirection = value\n else:\n raise TypeError\n\n @mobile.setter\n def mobile(self, value):\n if isinstance(value, str) or value is None:\n if utils.checkIsNum(value):\n self._mobile = value\n else:\n raise TypeError\n\n @pager.setter\n def pager(self, value):\n if isinstance(value, str) or value is None:\n self._pager = value\n else:\n raise TypeError\n\n @postalCode.setter\n def postalCode(self, value):\n if isinstance(value, int):\n self._postalCode = str(value)\n elif isinstance(value, str) or value is None:\n if utils.checkIsNum(value):\n self._postalCode = value\n else:\n raise TypeError\n\n @quota.setter\n def quota(self, value):\n if isinstance(value, int) or value is None:\n self._quota = value\n elif isinstance(value , str):\n try:\n self._quota = int(value)\n except ValueError:\n raise TypeError\n else:\n raise TypeError\n\n @sn.setter\n def sn(self, value):\n if isinstance(value, str) or value is None:\n self._sn = value\n else:\n raise TypeError\n\n @st.setter\n def st(self, value):\n if isinstance(value, str) or value is None:\n self._st = value\n else:\n raise TypeError\n\n @street.setter\n def street(self, value):\n if isinstance(value, str) or value is None:\n self._street = value\n else:\n raise TypeError\n\n @telephoneNumber.setter\n def telephoneNumber(self, value):\n if isinstance(value, str) or value is None:\n if utils.checkIsNum(value):\n self._telephoneNumber = value\n else:\n raise TypeError\n\n @title.setter\n def title(self, value):\n if isinstance(value, str) or value is None:\n self._title = value\n else:\n raise TypeError\n\n @used.setter\n def used(self, value):\n if isinstance(value, int) or value is None:\n self._used = value\n elif isinstance( value , str ):\n try:\n self._used = int( value )\n except ValueError:\n raise TypeError\n else:\n raise TypeError\n\n @zimbraAccountStatus.setter\n def zimbraAccountStatus(self, value):\n if value == \"active\" or value == \"closed\" or value == \"locked\":\n self._zimbraAccountStatus = value\n else:\n raise TypeError\n\n @zimbraFeatureBriefcasesEnabled.setter\n def zimbraFeatureBriefcasesEnabled(self, value):\n if value is None:\n self._zimbraFeatureBriefcasesEnabled = None\n elif utils.checkBoolean( value ):\n self._zimbraFeatureBriefcasesEnabled = utils.convertToBoolean(\n value )\n else:\n raise TypeError\n\n @zimbraFeatureCalendarEnabled.setter\n def zimbraFeatureCalendarEnabled(self, value):\n if value is None:\n self._zimbraFeatureCalendarEnabled = None\n elif utils.checkBoolean( value ):\n self._zimbraFeatureCalendarEnabled = utils.convertToBoolean( value )\n else:\n raise TypeError\n\n @zimbraFeatureMailEnabled.setter\n def zimbraFeatureMailEnabled(self, value):\n if value is None:\n self._zimbraFeatureMailEnabled = None\n elif utils.checkBoolean( value ):\n self._zimbraFeatureMailEnabled = utils.convertToBoolean( value )\n else:\n raise TypeError\n\n @zimbraFeatureMailForwardingEnabled.setter\n def zimbraFeatureMailForwardingEnabled(self, value):\n if value is None:\n self._zimbraFeatureMailForwardingEnabled = None\n elif utils.checkBoolean( value ):\n self._zimbraFeatureMailForwardingEnabled = utils.convertToBoolean(\n value )\n else:\n raise TypeError\n\n @zimbraFeatureOptionsEnabled.setter\n def zimbraFeatureOptionsEnabled(self, value):\n if value is None:\n self._zimbraFeatureOptionsEnabled = None\n elif utils.checkBoolean( value ):\n self._zimbraFeatureOptionsEnabled = utils.convertToBoolean( value )\n else:\n raise TypeError\n\n @zimbraFeatureTasksEnabled.setter\n def zimbraFeatureTasksEnabled(self, value):\n if value is None:\n self._zimbraFeatureTasksEnabled = None\n elif utils.checkBoolean( value ):\n self._zimbraFeatureTasksEnabled = utils.convertToBoolean( value )\n else:\n raise TypeError\n\n @zimbraHideInGal.setter\n def zimbraHideInGal(self, value):\n if value is None:\n self._zimbraHideInGal = None\n elif utils.checkBoolean( value ):\n self._zimbraHideInGal = utils.convertToBoolean( value )\n else:\n raise TypeError\n\n @zimbraMailQuota.setter\n def zimbraMailQuota(self, value):\n if isinstance(value, int) or value is None:\n self._zimbraMailQuota = value\n elif isinstance(value, str):\n try:\n self._zimbraMailQuota = int( value )\n except ValueError:\n raise TypeError\n else:\n raise TypeError\n\n @zimbraMailAlias.setter\n def zimbraMailAlias(self, value):\n if isinstance(value, list) or value is None:\n self._zimbraMailAlias = value\n else:\n raise TypeError\n\n @zimbraMailCanonicalAddress.setter\n def zimbraMailCanonicalAddress(self, value):\n if isinstance(value, str) or value is None:\n if utils.checkIsMailAddress(value):\n self._zimbraMailCanonicalAddress = value\n else:\n raise NameException(\"L'adresse mail \" + value + \" n'est pas une adresse mail valide\")\n else:\n raise TypeError\n\n @zimbraLastLogonTimestamp.setter\n def zimbraLastLogonTimestamp(self, value):\n if isinstance(value, str) or value is None:\n self._zimbraLastLogonTimestamp = value\n else:\n raise TypeError\n\n @zimbraNotes.setter\n def zimbraNotes(self, value):\n if isinstance(value, str) or value is None:\n self._zimbraNotes = value\n else:\n raise TypeError\n\n @zimbraPasswordMustChange.setter\n def zimbraPasswordMustChange(self, value):\n if value is None:\n self._zimbraPasswordMustChange = None\n elif utils.checkBoolean( value ):\n self._zimbraPasswordMustChange = utils.convertToBoolean( value )\n else:\n raise TypeError\n\n @zimbraPrefFromDisplay.setter\n def zimbraPrefFromDisplay(self, value):\n if isinstance(value, str) or value is None:\n self._zimbraPrefFromDisplay = value\n else:\n raise TypeError\n\n @zimbraPrefMailForwardingAddress.setter\n def zimbraPrefMailForwardingAddress(self, value):\n if isinstance(value, str) or value is None:\n if utils.checkIsMailAddress(value):\n self._zimbraPrefMailForwardingAddress = value\n else:\n raise NameException(\"L'adresse mail \" + value + \" n'est pas une adresse mail valide\")\n else:\n raise TypeError\n\n @zimbraPrefMailLocalDeliveryDisabled.setter\n def zimbraPrefMailLocalDeliveryDisabled(self, value):\n if value is None:\n self._zimbraPrefMailLocalDeliveryDisabled = None\n elif utils.checkBoolean( value ):\n self._zimbraPrefMailLocalDeliveryDisabled = utils.convertToBoolean(\n value )\n else:\n raise TypeError\n\n @zimbraCOSId.setter\n def zimbraCOSId(self, value):\n if isinstance(value, str) or value is None:\n self._zimbraCOSId = value\n else:\n raise TypeError\n\n def addZimbraZimletAvailableZimlets(self, value):\n if isinstance(value, str):\n if self._zimbraZimletAvailableZimlets is None:\n self._zimbraZimletAvailableZimlets = []\n if value not in self._zimbraZimletAvailableZimlets:\n self._zimbraZimletAvailableZimlets.append(value)\n else:\n raise TypeError\n\n def removeZimbraZimletAvailableZimlets(self, valueToRemove):\n if isinstance(valueToRemove, str):\n if valueToRemove in self._zimbraZimletAvailableZimlets:\n self._zimbraZimletAvailableZimlets.remove(valueToRemove)\n else:\n raise TypeError\n\n def resetZimbraZimletAvailableZimlets(self):\n self._zimbraZimletAvailableZimlets = 'DELETE_ARRAY'\n\n def fillAccount(self, listOfAttr, allowNameChange=False):\n if not isinstance(listOfAttr, dict) and not isinstance(listOfAttr, list):\n raise TypeError\n for attr in listOfAttr:\n if attr == \"name\" and not allowNameChange:\n continue\n propattr = getattr(self.__class__, attr, None)\n if isinstance(propattr, property) and propattr.fset is not None:\n if listOfAttr[attr] == \"None\":\n propattr.fset(self, None)\n else:\n propattr.fset(self, listOfAttr[attr])\n\n def toData(self, checkName = True):\n \"\"\"\n Transforme les données du compte en un dictionnaire pouvant être\n utilisé avec l'API BSS, après avoir éventuellement vérifié\n l'adresse.\n\n :param bool checkName: vérifie l'adresse associée au compte\n\n :raises NameException: exception levée si le nom n'est pas une \\\n adresse mail valide\n\n :return: le dictionnaire contenant les informations au sujet du \\\n compte et pouvant être passé à l'API BSS.\n \"\"\"\n if self.name is None:\n raise NameException( 'Aucune adresse mail spécifiée.' )\n if checkName and not utils.checkIsMailAddress( self.name ):\n raise NameException(\"L'adresse mail \" + self.name\n + \" n'est pas valide\")\n data = {}\n for attr in self.__dict__:\n attrValue = self.__getattribute__(attr)\n\n # On ne prend pas le préfixe '_'\n attrKey = attr[1:]\n\n if (self.__getattribute__(attr) is None ):\n continue\n\n if isinstance(attrValue, list) or attrValue == 'DELETE_ARRAY':\n # On prévoit une valeur spéciale 'DELETE_ARRAY' pour effacer un attribut de type tableau\n if attrValue == 'DELETE_ARRAY':\n attrValue = ''\n\n attrKey = attrKey+'[]'\n\n if isinstance(attrValue, bool):\n attrValue = utils.changeBooleanToString(attrValue)\n\n data[attrKey] = attrValue\n return data\n\n\ndef importJsonAccount(jsonAccount):\n json_data = open(jsonAccount)\n data = json.load(json_data)\n\n if \"name\" not in data:\n raise NameException(\"Adresse mail non présent dans le fichier json\")\n account = Account(data[\"name\"])\n for attr in data:\n if attr == \"name\":\n continue\n propattr = getattr(account.__class__, attr, None)\n if isinstance(propattr, property):\n #and propattr.fset is not None:\n if data[attr] == \"None\":\n delattr(account,attr)\n #propattr.fset(account, None)\n else:\n propattr.fset(account, data[attr])\n return account\n", "id": "8454060", "language": "Python", "matching_score": 6.456500053405762, "max_stars_count": 3, "path": "lib_Partage_BSS/models/Account.py" }, { "content": "from unittest.mock import MagicMock\n\nimport pytest\nfrom requests import Response\n\nfrom lib_Partage_BSS.models.Account import Account\nfrom lib_Partage_BSS.exceptions.NameException import NameException\nfrom lib_Partage_BSS.exceptions.DomainException import DomainException\nfrom lib_Partage_BSS.exceptions.ServiceException import ServiceException\nfrom lib_Partage_BSS.exceptions.TmpServiceException import TmpServiceException\nfrom lib_Partage_BSS.exceptions.NotFoundException import NotFoundException\nfrom lib_Partage_BSS.services import AccountService, BSSConnexion, BSSConnexionService\n\n\n@pytest.fixture()\ndef initGoodResponse():\n response = MagicMock(Response)\n response.text = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\" \\\n \"<Response>\" \\\n \" <status type=\\\"integer\\\">0</status>\" \\\n \" <message>Opération réalisée avec succès !</message>\" \\\n \" <account>\" \\\n \" <name><EMAIL></name>\" \\\n \" <id>idTest</id>\" \\\n \" <admin>DOMAIN</admin>\" \\\n \" <mav-transformation>FALSE</mav-transformation>\" \\\n \" <mav-redirection></mav-redirection>\" \\\n \" <used type=\\\"integer\\\">0</used>\" \\\n \" <quota type=\\\"integer\\\">0</quota>\" \\\n \" <carLicense>EPPN</carLicense>\" \\\n \" <givenName>prenomTest</givenName>\" \\\n \" <zimbraFeatureMailForwardingEnabled>TRUE</zimbraFeatureMailForwardingEnabled>\" \\\n \" <displayName><NAME></displayName>\" \\\n \" <businessCategory>1</businessCategory>\" \\\n \" <zimbraFeatureCalendarEnabled>TRUE</zimbraFeatureCalendarEnabled>\" \\\n \" <zimbraAccountStatus>active</zimbraAccountStatus>\" \\\n \" <zimbraFeatureContactsEnabled>TRUE</zimbraFeatureContactsEnabled>\" \\\n \" <zimbraLastLogonTimestamp>20180131091551Z</zimbraLastLogonTimestamp>\" \\\n \" <zimbraFeatureOptionsEnabled>TRUE</zimbraFeatureOptionsEnabled>\" \\\n \" <zimbraFeatureTasksEnabled>TRUE</zimbraFeatureTasksEnabled>\" \\\n \" <zimbraPrefMailLocalDeliveryDisabled>FALSE</zimbraPrefMailLocalDeliveryDisabled>\" \\\n \" <zimbraMailQuota>0</zimbraMailQuota>\" \\\n \" <sn>nomTest</sn>\" \\\n \" <zimbraCOSId>testCOSId</zimbraCOSId>\" \\\n \" <zimbraZimletAvailableZimlets type=\\\"array\\\">\" \\\n \" <zimbraZimletAvailableZimlet>com_zimbra_attachmail</zimbraZimletAvailableZimlet>\" \\\n \" <zimbraZimletAvailableZimlet>com_zimbra_srchhighlighter</zimbraZimletAvailableZimlet>\" \\\n \" <zimbraZimletAvailableZimlet>com_zimbra_url</zimbraZimletAvailableZimlet>\" \\\n \" <zimbraZimletAvailableZimlet>com_zimbra_email</zimbraZimletAvailableZimlet>\" \\\n \" <zimbraZimletAvailableZimlet>com_zimbra_ymemoticons</zimbraZimletAvailableZimlet>\" \\\n \" <zimbraZimletAvailableZimlet>com_zimbra_date</zimbraZimletAvailableZimlet>\" \\\n \" <zimbraZimletAvailableZimlet>com_zimbra_attachcontacts</zimbraZimletAvailableZimlet>\" \\\n \" </zimbraZimletAvailableZimlets>\" \\\n \" <zimbraFeatureBriefcasesEnabled>TRUE</zimbraFeatureBriefcasesEnabled>\" \\\n \" <zimbraHideInGal>FALSE</zimbraHideInGal>\" \\\n \" <zimbraFeatureMailEnabled>TRUE</zimbraFeatureMailEnabled>\" \\\n \" </account>\" \\\n \"</Response>\"\n return response\n\n\n@pytest.fixture()\ndef initBadResponse():\n response = MagicMock(Response)\n response.text = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\" \\\n \"<Response>\" \\\n \" <status type=\\\"integer\\\">2</status>\" \\\n \" <message>Opération réalisée avec succès !</message>\" \\\n \"</Response>\"\n return response\n\n@pytest.fixture()\ndef create_connexion():\n con = BSSConnexion()\n con.setDomainKey({\"domain.com\": \"keyDeTest\"})\n return con\n\ndef test_init_cas_nom_vallide():\n account = Account(\"<EMAIL>\")\n assert account.name == \"<EMAIL>\"\n\n\ndef test_init_cas_nom_non_vallide():\n with pytest.raises(NameException):\n account = Account(\"test\")\n\n\ndef test_getAccount_cas_compte_existant(mocker):\n response = initGoodResponse()\n con = create_connexion()\n\n with mocker.patch('requests.post', return_value=response):\n with mocker.patch.object(con, 'token', return_value=\"test\"):\n account = AccountService.getAccount(\"<EMAIL>\")\n assert account.name == \"<EMAIL>\"\n print(account.carLicense)\n assert account.carLicense == \"EPPN\"\n assert account.zimbraCOSId == \"testCOSId\"\n\n\ndef test_getAccount_cas_compte_inexistant(mocker):\n with pytest.raises(ServiceException):\n response = initBadResponse()\n con = create_connexion()\n with mocker.patch('requests.post', return_value=response):\n with mocker.patch.object(con, 'token', return_value=\"test\"):\n AccountService.getAccount(\"<EMAIL>\")\n\n\n", "id": "6270515", "language": "Python", "matching_score": 4.9419121742248535, "max_stars_count": 3, "path": "test_unitaire/lib_Partage_BSS/services/test_ServiceAccount.py" }, { "content": "import pytest\nfrom requests.models import Response\nfrom unittest.mock import MagicMock\n\nfrom lib_Partage_BSS.exceptions import DomainException\nfrom lib_Partage_BSS.exceptions.BSSConnexionException import BSSConnexionException\n\nimport time as timer\nimport hmac\n\nfrom lib_Partage_BSS.services import BSSConnexion\n\n\n@pytest.fixture()\ndef create_connexion():\n con = BSSConnexion()\n con.setDomainKey({\"domain.com\": \"keyDeTest\"})\n con.setDomainKey({\"autre.com\": \"keyDeTest\"})\n con.ttl = 10\n return con\n\n\ndef mock_response():\n class MockResponse(Response):\n\n def __init__(self):\n Response.__init__(self)\n self.status_code = 200\n self._text = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n<Response>\\n <status type=\\\"integer\\\">0</status>\\n <message>Op\\xc3\\xa9ration r\\xc3\\xa9alis\\xc3\\xa9e avec succ\\xc3\\xa8s !</message>\\n <token>BSSToken</token>\\n</Response>\\n\"\n\n return MockResponse\n\n\ndef test_getToken_casNormal(mocker):\n con = create_connexion()\n response = MagicMock(Response)\n response.text = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n<Response>\\n <status type=\\\"integer\\\">0</status>\\n <message>Op\\xc3\\xa9ration r\\xc3\\xa9alis\\xc3\\xa9e avec succ\\xc3\\xa8s !</message>\\n <token>tokenDeTest</token>\\n</Response>\\n\"\n with mocker.patch('requests.post', return_value=response):\n assert con.token(\"domain.com\") == \"tokenDeTest\"\n BSSConnexion.instance = None\n\n\ndef test_getToken_casNormalSurAutreDomain(mocker):\n con = create_connexion()\n response = MagicMock(Response)\n response.text = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n<Response>\\n <status type=\\\"integer\\\">0</status>\\n <message>Op\\xc3\\xa9ration r\\xc3\\xa9alis\\xc3\\xa9e avec succ\\xc3\\xa8s !</message>\\n <token>tokenDeTest</token>\\n</Response>\\n\"\n with mocker.patch('requests.post', return_value=response):\n assert con.token(\"autre.com\") == \"tokenDeTest\"\n BSSConnexion.instance = None\n\n\ndef test_getToken_casPreAuthEchec(mocker):\n with pytest.raises(BSSConnexionException):\n con = create_connexion()\n response = MagicMock(Response)\n response.text = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n<Response>\\n <status type=\\\"integer\\\">2</status>\\n <message>Echec de la preauthentification</message>\\n <token>tokenDeTestEchec</token>\\n</Response>\\n\"\n with mocker.patch('requests.post', return_value=response):\n token = con.token(\"domain.com\")\n print(token)\n BSSConnexion.instance = None\n\n\ndef test_getToken_casDomainNonString(mocker):\n with pytest.raises(TypeError):\n con = create_connexion()\n response = MagicMock(Response)\n response.text = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n<Response>\\n <status type=\\\"integer\\\">2</status>\\n <message>Echec de la preauthentification</message>\\n <token>tokenDeTestEchec</token>\\n</Response>\\n\"\n with mocker.patch('requests.post', return_value=response):\n token = con.token(0)\n BSSConnexion.instance = None\n\n\ndef test_getToken_casDomainNonValide(mocker):\n with pytest.raises(DomainException):\n con = create_connexion()\n response = MagicMock(Response)\n response.text = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n<Response>\\n <status type=\\\"integer\\\">2</status>\\n <message>Echec de la preauthentification</message>\\n <token>tokenDeTestEchec</token>\\n</Response>\\n\"\n with mocker.patch('requests.post', return_value=response):\n token = con.token(\"domain\")\n BSSConnexion.instance = None\n\n\ndef test_getToken_casDomainNonPresentDansConfig(mocker):\n with pytest.raises(DomainException):\n con = create_connexion()\n response = MagicMock(Response)\n response.text = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n<Response>\\n <status type=\\\"integer\\\">2</status>\\n <message>Echec de la preauthentification</message>\\n <token>tokenDeTestEchec</token>\\n</Response>\\n\"\n with mocker.patch('requests.post', return_value=response):\n token = con.token(\"domain.fr\")\n BSSConnexion.instance = None\n\ndef test_getToken_4minApresCreation(mocker):\n con = create_connexion()\n response = MagicMock(Response)\n response.text = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n<Response>\\n <status type=\\\"integer\\\">0</status>\\n <message>Op\\xc3\\xa9ration r\\xc3\\xa9alis\\xc3\\xa9e avec succ\\xc3\\xa8s !</message>\\n <token>tokenDeTest</token>\\n</Response>\\n\"\n with mocker.patch('requests.post', return_value=response):\n token = con.token(\"domain.com\")\n mocker.spy(hmac, 'new')\n timer.sleep(int( con.ttl * .8 ))\n token = con.token(\"domain.com\")\n assert hmac.new.call_count == 0\n BSSConnexion.instance = None\n\n\ndef test_getToken_5minApresCreation(mocker):\n con = create_connexion()\n response = MagicMock(Response)\n response.text = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n<Response>\\n <status type=\\\"integer\\\">0</status>\\n <message>Op\\xc3\\xa9ration r\\xc3\\xa9alis\\xc3\\xa9e avec succ\\xc3\\xa8s !</message>\\n <token>token<PASSWORD></token>\\n</Response>\\n\"\n with mocker.patch('requests.post', return_value=response):\n token = con.token(\"domain.com\")\n mocker.spy(hmac, 'new')\n timer.sleep(con.ttl)\n token = con.token(\"domain.com\")\n assert hmac.new.call_count == 1\n BSSConnexion.instance = None\n", "id": "2432706", "language": "Python", "matching_score": 3.131808280944824, "max_stars_count": 3, "path": "test_unitaire/lib_Partage_BSS/services/test_BSSConnexion.py" }, { "content": "# -*-coding:utf-8 -*\nimport json\nimport hmac\nimport hashlib\nfrom time import time\n\nfrom lib_Partage_BSS import utils\nfrom lib_Partage_BSS.exceptions import BSSConnexionException, DomainException\nfrom lib_Partage_BSS.utils.BSSRequest import postBSS\n\n\nclass BSSConnexion(object):\n \"\"\"\n Classe permettant de récuperer un token d'une durée de vie de 5min auprès de l'API BSS Partage.\n Elle regenère un token lorsque celui-ci est sur le point d'expirer\n\n :ivar _domain: Le domaine cible\n :ivar _key: La clé associée au domaine\n :ivar _timestampOfLastToken: Le timestamp auquel on à obtenue notre dernier token. Permet de renouveller le token avant expiration\n :ivar _token: Le token obtenu via l'API pour utiliser les autres méthodes de l'API\n :ivar _url: L'url vers l'API BSS Partage (https://api.partage.renater.fr/service/domain/)\n :ivar _ttl: le délai d'expiration des tokens reçus (300 secondes par défaut)\n \"\"\"\n class __BSSConnexion:\n\n def __init__(self):\n \"\"\"Constructeur de BSS connexion\n\n Arguments :\n domain(string): Le domaine cible\n key(string): La clé associée au domaine\n\n Retour :\n BBSConnexion : l'objet contenant tous les paramètres de connexion\n\n Exemple d'utilisation :\n >>>BSSConnexion(\"domain.com\",\"6b7ead4bd425836e8cf0079cd6c1a05acc127acd07c8ee4b61023e19250e929c\")\n \"\"\"\n self._domain = \"\"\n self._key = {}\n \"\"\"La clé associés au domaine\"\"\"\n self._timestampOfLastToken = {}\n \"\"\"Le timestamp auquel on a obtenu le dernier token. Permet de renouveller le token avant expiration\"\"\"\n self._token = {}\n \"\"\"Le token obtenu via l'API pour utiliser les autres méthodes\"\"\"\n self._url = \"https://api.partage.renater.fr/service/domain/\"\n \"\"\"L'url vers l'API BSS Partage\"\"\"\n self._ttl = 300\n\n @property\n def url(self):\n \"\"\"Getter de l'url\n\n Arguments : Aucun\n\n Retour :\n string : url de l'API BSS Partage\n\n Example d'utilisation :\n >>>con = BSSConnexion(\"domain.com\",\"6b7ead4bd425836e8cf0079cd6c1a05acc127acd07c8ee4b61023e19250e929c\")\n >>>url = con.url\n >>>print(url)\n https://api.partage.renater.fr/service/domain/\n \"\"\"\n return self._url\n\n @url.setter\n def url(self,url):\n \"\"\"Setter de l'url\n\n :param url:\n L'url vars l'api BSS par default https://api.partage.renater.fr/service/domaine\n \"\"\"\n self._url = url\n\n @property\n def domain(self):\n \"\"\"Getter du domaine\n\n Arguments : Aucun\n\n Retour :\n string : domaine cible\n\n Example d'utilisation :\n >>>con = BSSConnexion(\"domain.com\",\"6b7ead4bd425836e8cf0079cd6c1a05acc127acd07c8ee4b61023e19250e929c\")\n >>>domain = con.domain\n >>>print(domain)\n domain.com\n \"\"\"\n return self._domain\n\n @property\n def ttl(self):\n \"\"\"\n Lecture de la durée de vie des tokens.\n\n :return: la durée de vie des tokens, en secondes.\n \"\"\"\n return self._ttl\n\n @ttl.setter\n def ttl(self, value):\n self._ttl = value\n\n def setDomainKey(self, listDomainKey):\n \"\"\"Getter du domaine\n\n :param listDomainKey: une liste conntenant l'emsemble des domaine à initialiser. Formate de la liste\n {\"domaine1\" : \"keydomain1\", \"domaine2\" : \"keydomain2\"}\n\n\n Example d'utilisation :\n >>>con = BSSConnexion()\n >>>con.setDomainKey({\"domaine1\" : \"keydomain1\", \"domaine2\" : \"keydomain2\"} )\n \"\"\"\n if not isinstance(listDomainKey, dict):\n raise TypeError\n for domain in listDomainKey:\n if not utils.checkIsDomain(domain):\n raise DomainException(domain + \" n'est pas un nom de domain valide\")\n self._key[domain] = listDomainKey[domain]\n self._timestampOfLastToken[domain] = 0\n self._token[domain] = \"\"\n\n def token(self, domain):\n \"\"\"Getter du Token\n\n Arguments : Aucun\n\n Retour :\n string : token pour connexion à l'API\n\n Exception:\n BSSConnexion en cas d'Erreur lors de la récupération du token\n\n Example d'utilisation :\n >>>con = BSSConnexion(\"domain.com\",\"6b7ead4bd425836e8cf0079cd6c1a05acc127acd07c8ee4b61023e19250e929c\")\n >>>token = con.token\n >>>try:\n ... print(token) #doctest: +ELLIPSIS\n ...except BSSConnexionException as err:\n ... print(\"BSS Erreur: {0}\".format(err))\n ...\n Description :\n Le token ayant une durée de vie de 5min on le regénère si il est plus vieux que 4min30s\n Si l'ecart entre le timestamp actuel et le timestamp de l'obtention du dernier token est de moins de 270 secondes (4min30s)\n on renvoie le token actuel. Au delà on génère un nouveau token\n \"\"\"\n if isinstance(domain, str):\n if utils.checkIsDomain(domain):\n\n self._domain = domain\n \"\"\"Le domaine sur lequel on souhaite travailler\"\"\"\n if domain not in self._key:\n raise DomainException(domain + \" : Domaine non initialisé\")\n actualTimestamp = round(time())\n if (actualTimestamp - self._timestampOfLastToken[domain]) < int( self._ttl * .9 ):\n return self._token[domain]\n else:\n self._timestampOfLastToken[domain] = actualTimestamp\n msg = domain + \"|\" + str(actualTimestamp)\n preAuth = hmac.new(self._key[domain].encode(\"utf-8\"), msg.encode(\"utf-8\"), hashlib.sha1).hexdigest()\n data = {\n \"domain\": domain,\n \"timestamp\": str(round(time())),\n \"preauth\": preAuth\n }\n response = postBSS(self._url + \"/Auth\", data)\n status_code = utils.changeToInt(response[\"status\"])\n message = response[\"message\"]\n if status_code == 0:\n self._token[domain] = response[\"token\"]\n else:\n raise BSSConnexionException(status_code, message)\n return self._token[domain]\n else:\n raise DomainException(domain+\" n'est pas un nom de domain valide\")\n else:\n raise TypeError\n\n instance = None\n\n def __new__(cls): # _new_ est toujours une méthode de classe\n if not BSSConnexion.instance:\n BSSConnexion.instance = BSSConnexion.__BSSConnexion()\n return BSSConnexion.instance\n\n def __getattr__(self, attr):\n return getattr(self.instance, attr)\n\n def __setattr__(self, attr, val):\n return setattr(self.instance, attr, val)\n\n", "id": "7342427", "language": "Python", "matching_score": 3.655181884765625, "max_stars_count": 3, "path": "lib_Partage_BSS/services/BSSConnexionService.py" }, { "content": "# -*-coding:utf-8 -*\nfrom lib_Partage_BSS import utils\nclass TmpServiceException(Exception):\n \"\"\"\n Exception levée lorsqu'un appel à l'API génère une erreur temporaire\n\n :ivar code: code de l'erreur\n :ivar message: message à afficher\n \"\"\"\n def __init__(self,code, message):\n self.code = utils.changeToInt(code)\n self.msg = str(utils.changeToInt(code))+\" : \"+message\n", "id": "282589", "language": "Python", "matching_score": 1.962425708770752, "max_stars_count": 3, "path": "lib_Partage_BSS/exceptions/TmpServiceException.py" }, { "content": "# -*-coding:utf-8 -*\nclass NameException(Exception):\n \"\"\"\n Exception levée lorsqu'un nom d'objet n'est pas conforme\n\n :ivar message: message à afficher\n \"\"\"\n def __init__(self, message):\n self.msg = message\n", "id": "10201820", "language": "Python", "matching_score": 1.0847265720367432, "max_stars_count": 3, "path": "lib_Partage_BSS/exceptions/NameException.py" }, { "content": "\"\"\"Package exceptions\"\"\"\nfrom .ServiceException import ServiceException\nfrom .TmpServiceException import TmpServiceException\nfrom .NameException import NameException\nfrom .BSSConnexionException import BSSConnexionException\nfrom .DomainException import DomainException\nfrom .NotFoundException import NotFoundException\n", "id": "12582420", "language": "Python", "matching_score": 1.831381916999817, "max_stars_count": 3, "path": "lib_Partage_BSS/exceptions/__init__.py" }, { "content": "\"\"\"Package services\"\"\"\nfrom .BSSConnexionService import BSSConnexion\nfrom .GlobalService import *\nfrom .AccountService import *\nfrom .COSService import *\n\n", "id": "9655310", "language": "Python", "matching_score": 2.1943106651306152, "max_stars_count": 0, "path": "lib_Partage_BSS/services/__init__.py" }, { "content": "\"\"\"Package models\"\"\"\nfrom .Account import Account\nfrom .COS import COS", "id": "9341918", "language": "Python", "matching_score": 0.3498478829860687, "max_stars_count": 0, "path": "lib_Partage_BSS/models/__init__.py" }, { "content": "\"\"\"Package utils\"\"\"\nfrom .CheckMethods import *\nfrom .BSSRequest import *", "id": "1981748", "language": "Python", "matching_score": 0.5812841057777405, "max_stars_count": 3, "path": "lib_Partage_BSS/utils/__init__.py" }, { "content": "from . import exceptions\nfrom . import models\nfrom . import services\nfrom . import utils\n", "id": "2229612", "language": "Python", "matching_score": 0.21248126029968262, "max_stars_count": 3, "path": "lib_Partage_BSS/__init__.py" } ]
2.078368
KrishMunot
[ { "content": "from sklearn import preprocessing\nimport numpy as np\ndef derivative(x):\n return x * (1.0 — x)\ndef sigmoid(x):\n return 1.0 / (1.0 + np.exp(-x))\nX = []\nY = []\n\nwith open(‘Train.csv’) as f:\n for line in f:\n curr = line.split(‘,’)\n new_curr = [1]\n for item in curr[:len(curr) — 1]:\n new_curr.append(float(item))\n X.append(new_curr)\n Y.append([float(curr[-1])])\nX = np.array(X)\nX = preprocessing.scale(X) # feature scaling\nY = np.array(Y)\n\nX_train = X[0:2500]\nY_train = Y[0:2500]\n\nX_test = X[2500:]\ny_test = Y[2500:]\nX = X_train\ny = Y_train\n\n# input layer has 57 nodes (1 for each feature)\n# hidden layer has 4 nodes\n# output layer has 1 node\ndim1 = len(X_train[0])\ndim2 = 4\nnp.random.seed(1)\nweight0 = 2 * np.random.random((dim1, dim2)) — 1\nweight1 = 2 * np.random.random((dim2, 1)) — 1\nfor j in xrange(25000):\n # first evaluate the output for each training email\n layer_0 = X_train\n layer_1 = sigmoid(np.dot(layer_0,weight0))\n layer_2 = sigmoid(np.dot(layer_1,weight1))\n # calculate the error\n layer_2_error = Y_train — layer_2\n # perform back propagation\n layer_2_delta = layer_2_error * derivative(layer_2)\n layer_1_error = layer_2_delta.dot(weight1.T)\n layer_1_delta = layer_1_error * derivative(layer_1)\n # update the weight vectors\n weight1 += layer_1.T.dot(layer_2_delta)\n weight0 += layer_0.T.dot(layer_1_delta)\n# evaluation on the testing data\nlayer_0 = X_test\nlayer_1 = sigmoid(np.dot(layer_0,weight0))\nlayer_2 = sigmoid(np.dot(layer_1,weight1))\ncorrect = 0\n\n# if the output is > 0.5, then label as spam else no spam\nfor i in xrange(len(layer_2)):\n if(layer_2[i][0] > 0.5):\n layer_2[i][0] = 1\n else:\n layer_2[i][0] = 0\n if(layer_2[i][0] == y_test[i][0]):\n correct += 1\nprint “total = “, len(layer_2)\nprint “correct = “, correct\nprint “accuracy = “, correct * 100.0 / len(layer_2)\n", "id": "2400143", "language": "Python", "matching_score": 4.758877277374268, "max_stars_count": 3, "path": "check.py" }, { "content": "from sklearn import preprocessing\nimport numpy as np\n\ndef derivative(x):\n return x * (1.0 - x)\n\ndef sigmoid(x):\n\treturn 1.0 / (1.0 + np.exp(-x))\n\nX = []\nY = []\n\n# read the training data\nwith open('Train.csv') as f:\n\tfor line in f:\n\t\tcurr = line.split(',')\n\t\tnew_curr = [1]\n\t\tfor item in curr[:len(curr) - 1]:\n\t\t\tnew_curr.append(float(item))\n\t\tX.append(new_curr)\n\t\tY.append([float(curr[-1])])\n\nX = np.array(X)\nX = preprocessing.scale(X)\t\t# feature scaling\nY = np.array(Y)\n\n# the first 2500 out of 3000 emails will serve as training data\nX_train = X[0:2500]\nY_train = Y[0:2500]\n\n# the rest 500 emails will serve as testing data\nX_test = X[2500:]\ny_test = Y[2500:]\n\nX = X_train\ny = Y_train\n\n# we have 3 layers: input layer, hidden layer and output layer\n# input layer has 57 nodes (1 for each feature)\n# hidden layer has 4 nodes\n# output layer has 1 node\n\ndim1 = len(X_train[0])\ndim2 = 4\n\n# randomly initialize the weight vectors\nnp.random.seed(1)\nweight0 = 2 * np.random.random((dim1, dim2)) - 1\nweight1 = 2 * np.random.random((dim2, 1)) - 1\n\n# you can change the number of iterations\nfor j in xrange(2500):\n\t# first evaluate the output for each training email\n\tlayer_0 = X_train\n\tlayer_1 = sigmoid(np.dot(layer_0,weight0))\n\tlayer_1 = np.array(layer_1)\n\tlayer_2 = sigmoid(np.dot(layer_1,weight1))\n\n\t# calculate the error\n\tlayer_2_error = Y_train - layer_2\n\n\t# perform back propagation\n\tlayer_2_delta = layer_2_error * derivative(layer_2)\n\tlayer_1_error = layer_2_delta.dot(weight1.T)\n\tlayer_1_delta = layer_1_error * derivative(layer_1)\n\n\t# update the weight vectors\n\tweight1 += layer_1.T.dot(layer_2_delta)\n\tweight0 += layer_0.T.dot(layer_1_delta)\n\n# evaluation on the testing data\nlayer_0 = X_test\nlayer_1 = sigmoid(np.dot(layer_0,weight0))\nlayer_2 = sigmoid(np.dot(layer_1,weight1))\n\nfor t in range(1, 10):\n\tthreshold = t / 10.0\n\tcorrect = 0\n\t# if the output is > 0.5, then label as spam else no spam\n\tfor i in xrange(len(layer_2)):\n\t\tcurrent = 0\n\t\tif(layer_2[i][0] > threshold):\n\t\t\tcurrent = 1\n\t\telse:\n\t\t\tcurrent = 0\n\t\tif(current == y_test[i][0]):\n\t\t\tcorrect += 1\n\n\t# printing the output\n\tprint \"threshold = \", threshold\n\tprint \"total = \", len(layer_2)\n\tprint \"correct = \", correct\n\tprint \"accuracy = \", correct * 100.0 / len(layer_2)\n\tprint \"*****************************\"\n", "id": "6652629", "language": "Python", "matching_score": 0.024293672293424606, "max_stars_count": 3, "path": "tmp.py" }, { "content": "__author__ = 'krish'\n\nimport os\nimport wolframalpha\nfrom flask import Flask, request, Response, redirect\n\n\ntry:\n import config\n wol_id = config.wolframalpha['app_id']\nexcept:\n wol_id = os.environ.get('APP_ID')\n\n\nif not wol_id:\n import sys\n print 'No config.py found exisiting...'\n sys.exit(0)\n\n\napp = Flask(__name__)\n\nclient = wolframalpha.Client(wol_id)\n\n\n@app.route('/wa',methods=['post'])\ndef wa():\n '''\n :Example:\n /wa current weather in San Francisco?\n '''\n text = request.values.get('text')\n try:\n res = client.query(text)\n except UnicodeEncodeError:\n return Response(('Sorry I did\\'t understand. Would you please simplify your query?'\n '%s is not a valid input.' % text),\n content_type='text\\plain; charset=utf-8')\n resp_qs = ['Hi! The top answer for \"%s\"\\n' % text]\n resp_qs.extend(next(res.results).text)\n\n return Response(''.join(resp_qs),\n content_type='text/plain; chatset=utf-8')\n\n@app.route('/')\ndef hello():\n return redirect('https://github.com/KrishMunot/WA-Slack-Bot/')\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT',5000))\n app.run(host='0.0.0.0', port=port)\n", "id": "12688153", "language": "Python", "matching_score": 2.3417303562164307, "max_stars_count": 1, "path": "app.py" }, { "content": "from slackclient import SlackClient\nimport requests\nimport time\nimport json\n\nslack_token = \"<KEY>\"\n\nsc = SlackClient(slack_token)\n\nclinton = 0\ntrump = 0\ncached_states = {}\nnop = 0\n\nwhile True:\n r = requests.get(\"https://intf.nyt.com/newsgraphics/2016/11-08-election-forecast/president.json\")\n\n electorate = r.json()[\"president\"][\"current\"][\"electoral_votes_counted\"]\n share = r.json()[\"president\"][\"current\"][\"vote_share_counted\"]\n\n states = r.json()[\"president\"][\"races\"]\n\n if clinton != electorate[\"clintonh\"] or trump != electorate[\"trumpd\"]:\n trump = electorate[\"trumpd\"]\n clinton = electorate[\"clintonh\"]\n\n johnvar = (share[\"clintonh\"] + share[\"johnsong\"]) - share[\"trumpd\"]\n print johnvar\n\n sc.api_call(\n \"chat.postMessage\",\n channel=\"#random\",\n text=\"Hillary now has \" + str(clinton) + \" (\" + str(share[\"clintonh\"] * 100) + \"%) electoral votes, and Trump has \" + str(trump) + \" (\" + str(share[\"trumpd\"] * 100) + \"%) electoral votes. Johnson is at \" + str(share[\"johnsong\"] * 100) + \"%.\"\n )\n\n sc.api_call(\n \"chat.postMessage\",\n channel=\"#random\",\n text=\"Hillary would be \" + str(johnvar) + \"% (pop. vote) over Trump if Johnson voters voted for her instead.\"\n )\n\n for state in states:\n try:\n stype = state[\"type\"]\n winner = state[\"current\"][\"winner\"]\n statev = state[\"state\"]\n except KeyError:\n print json.dumps(state)\n continue\n\n if stype != \"president\" or not winner:\n continue\n\n if statev in cached_states:\n continue\n\n cached_states[statev] = 1\n\n print statev\n\n if nop == 0:\n continue\n\n sc.api_call(\n \"chat.postMessage\",\n channel=\"#random\",\n text=state[\"current\"][\"winner\"][\"candidate_key\"] + \" probably won \" + state[\"state\"] + \".\"\n )\n nop = 1\n time.sleep(5)\n", "id": "11866785", "language": "Python", "matching_score": 1.323221206665039, "max_stars_count": 2, "path": "Bot.py" }, { "content": "from django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom googleplaces import GooglePlaces\n\nYOUR_API_KEY = '<KEY>'\n\ngoogle_places = GooglePlaces(YOUR_API_KEY)\n\ndef home(request):\n\treturn render_to_response('index.html', context_instance=RequestContext(request))\n\ndef map(request):\n\tngos_name = []\n\tngos_address = []\n\tngos = {}\n\tloc = \"\"\n\tif request.method == 'POST':\n\t\tloc = request.POST.get('location', '')\n\tquery_result = google_places.nearby_search(\n\t\t\tlocation=loc, keyword='ngos and non profit organizations',\n\t\t\tradius=200000)\n\tfor place in query_result.places:\n\t\tplace.get_details()\n\t\tngos[place.name] = [place.formatted_address, place.local_phone_number, place.international_phone_number, place.url, place.website]\n\treturn render_to_response('display_list2.html', {'location': loc, 'ngos':ngos}, context_instance=RequestContext(request))\n", "id": "1142479", "language": "Python", "matching_score": 1.1040546894073486, "max_stars_count": 0, "path": "NGeO/NGeO/views.py" }, { "content": "import os\nfrom bs4 import BeautifulSoup\nimport urllib2\n\ndef main():\n\tcounter = 0\n\tLabels = []\n\tsolutionUrls = []\n# Enter your username below within quotes\t\n\tusername = \"krishmunot\"\n\n# Creating new directory for saving code files\n\tnewpath = os.getcwd() + \"/codechef\" \n\tif not os.path.exists(newpath):\n\t os.makedirs(newpath)\n\turl=\"http://www.codechef.com/users/\" + username\n\n# Opening page && parsing html content. \n\tpage = urllib2.urlopen(url)\n\tsoup = BeautifulSoup(page.read(), \"html.parser\")\n\n# getting all links of the code submission \n\tLinks = soup.find('table', {'class' :None }).find_all('a')\n\n# removing first two links as they are irrelevant\n# such as personal and team links\n\tLinks = Links[2:]\n\n# Labels(names) of every question you solved\n\tfor Link in Links:\n\t\tLabels.append(Link.text)\n\n# All urls of questions you solved\n\tfor Link in Links:\n\t\tsolutionUrls.append(Link['href'])\n\n# Language Extension Dictionary\n# you can add more language extension if you need\n\tExtensions = {\t'C' : 'c',\n\t\t\t'C++ 4.3.2' : 'cpp',\n\t\t\t'PYTH' : 'py',\n\t\t\t'C++ 4.9.2' : 'cpp',\n\t\t\t'PYTH 3.4' : 'py',\n\t\t\t'JAVA' : 'java',\n\t\t\t\"C++ 4.8.1\", \"cpp\",\n\t\t\t\"C++14\", \"cpp\",\n\t\t\t\"C++11\", \"cpp\",\n\t\t\t\"C99 strict\", \"c\",\n\t\t\t\"C#\", \"cs\",\n\t\t\t\"F#\", \"fs\",\n\t\t\t\"PYTH 3.1.2\", \"py\",\n\t\t\t\"ASM\", \"asm\",\n\t\t\t\"PHP\", \"php\",\n\t\t\t\"TEXT\", \"txt\",\n\t\t\t\"PERL\", \"pl\",\n\t\t\t\"JS\", \"js\"\n\t\t\t}\n\n# Navigating to Every url of the solution & get the solution code\n\tfor link in solutionUrls:\n\t# generating link of every code submissions\n\t\tlink = \"http://codechef.com\" + link\n\n\t# get page content to get unique code of every submission\n\t\tpage = urllib2.urlopen(link)\n\t\tsoup = BeautifulSoup(page.read(), \"html.parser\")\n\n\t# getting code from this html page\n\t\tgetCode = soup.find('td', {'class' : 'centered', 'width': '75'}).find_all('a')\n\n\t# we need this while saving file\n\t\tFileExtension = soup.find('td', {'class' : 'centered', 'width': '70'}).text\n\n\t# Genrating link for the raw code\n\t\ttempString = getCode[0]['href'].replace(\"viewsolution\", \"viewplaintext\")\n\n\t# modifying link for to get raw code easily\n\t\tSourceCodeLink = \"http://codechef.com\" + tempString\n\n\t# Opening page and getting html content\n\t\tpage = urllib2.urlopen(SourceCodeLink)\n\t\tsoup = BeautifulSoup(page.read(), \"html.parser\")\n\n\t# saving raw code in 'SourceCode' variable.\n\t\tSourceCode = soup.find('pre', {'class' : None }).text\n\n\t# Getting subbmission code name for saved labels && extension from dictionary\n\t\tfilename = Labels[counter] + '.' + Extensions[FileExtension]\n\n\t# Opening that file and saving code in it.\n\t\twith open(os.path.join(newpath, filename), 'wb') as temp_file:\n\t\t temp_file.write(SourceCode)\n\t\t\n\t# Yeah, counter is for the labels list.\n\t\tcounter += 1\n\t\t\nif __name__ == '__main__':\n\tmain()\n", "id": "1427444", "language": "Python", "matching_score": 0.6319476366043091, "max_stars_count": 1, "path": "code.py" } ]
1.213638
benberg86
[ { "content": "import sys\nfrom modules import octopy\n\n", "id": "2296973", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "src/octopy/createEnvironment.py" } ]
0
gridranger
[ { "content": "# gfx\nscreen_width = 1280\nscreen_height = 720\nfps = 60\n", "id": "7966409", "language": "Python", "matching_score": 0.7913757562637329, "max_stars_count": 0, "path": "settings.py" }, { "content": "from logging import debug, info, basicConfig\nfrom random import choice, randrange\n\nbasicConfig(level=\"INFO\")\n\n\ndef get_bitwise_result(n):\n heads = [index for index, number in enumerate(n[::-1]) if number == \"1\"]\n result = 0\n for value in heads:\n result = result ^ value\n debug(f\"Bitwise result of heads {heads} is {result}.\")\n return result\n\n\ndef get_coin_to_flip(board_width: int = 8, coin_layout: str = \"\", where_to_hide: int = -1, draw: bool = True):\n if coin_layout:\n coin_layout = (\"0\" * ((board_width ** 2) - len(coin_layout)) + coin_layout)\n else:\n coin_layout = \"\".join([choice([\"0\", \"1\"]) for i in range(board_width ** 2)])\n key_is_hidden_at = where_to_hide if where_to_hide != -1 else randrange(0, board_width ** 2)\n info(f\"Board size {board_width}×{board_width}; key is hidden at square {key_is_hidden_at}.\")\n debug(f\"Coin layout: {coin_layout}\")\n # First convict\n suggestion = get_bitwise_result(coin_layout)\n coin_to_flip = ((board_width ** 2) - 1) ^ (key_is_hidden_at ^ suggestion)\n info(f\"The coin to flip is the one at square {coin_to_flip}.\")\n new_layout_as_list = list(coin_layout)\n new_layout_as_list[coin_to_flip] = \"1\" if new_layout_as_list[coin_to_flip] == \"0\" else \"0\"\n new_coin_layout = \"\".join(new_layout_as_list)\n debug(f\"New coin layout: {new_coin_layout}\")\n # Second convict\n check_value = get_bitwise_result(new_coin_layout)\n assert check_value == key_is_hidden_at\n info(f\"Successful check, key was found at: {check_value}\")\n if draw:\n draw_result(board_width, coin_layout, key_is_hidden_at, coin_to_flip)\n\n\ndef draw_result(size: int, layout: str, key: int, flip: int):\n result = [\"\\n\\n*: key location; @: coin to flip\"]\n horizontal = \"----\".join((size + 1) * \"+\")\n for row in range(size):\n result.append(horizontal)\n column_content = []\n for column in range(size):\n value = row * size + column\n column_content.append(f\"{layout[value-1]} {value:2}\")\n if flip == value:\n column_content[-1] = column_content[-1][:-2] + \" @\"\n if key == value:\n column_content[-1] = column_content[-1][:-2] + \" *\"\n result.append(f\"|{'|'.join(column_content)}|\")\n result.append(horizontal)\n info(\"\\n\".join(result))\n\n\ndef mass_check():\n passed = failed = 0\n for i in range(1000):\n try:\n get_coin_to_flip(draw=False)\n passed += 1\n except AssertionError:\n failed += 1\n info(f\"PASS: {passed}; FAIL: {failed}\")\n\n\nget_coin_to_flip()\n", "id": "6336801", "language": "Python", "matching_score": 0.6489049196243286, "max_stars_count": 0, "path": "jail.py" }, { "content": "from unittest import TestCase\nfrom unittest.mock import MagicMock, Mock\n\nfrom pygame import K_F4, K_LALT, K_RALT, KEYDOWN\n\nimport main\nfrom main import Main\nfrom ui import TitleScene\n\n\nclass TestMain(TestCase):\n\n def setUp(self):\n def dummy_init(self):\n pass # pragma: no cover\n self.orig_init = Main.__init__\n Main.__init__ = dummy_init\n self.main = Main()\n self.main.active_scene = MagicMock(spec=TitleScene)\n\n def test___init__(self):\n main.init = Mock()\n main.set_mode = Mock()\n main.set_caption = Mock()\n Main.__init__ = self.orig_init\n m = Main()\n main.init.assert_called()\n main.set_mode.assert_called_with((1280, 720))\n self.assertTrue(\" v\" in main.set_caption.call_args[0][0])\n self.assertTrue(hasattr(m.clock, \"tick\"))\n self.assertTrue(hasattr(m.active_scene, \"process_input\"))\n\n def test__clean_events_quit_attempt(self):\n main.get_events = Mock(return_value=[Mock(type=256)])\n self.main._clean_events([])\n self.main.active_scene.terminate.assert_called()\n\n def test__clean_events_alt_f4(self):\n main.get_events = Mock(return_value=[Mock(key=K_F4, type=KEYDOWN)])\n self.main._clean_events({K_LALT: True, K_RALT: False})\n self.main.active_scene.terminate.assert_called()\n\n def test__clean_events(self):\n main.get_events = Mock(return_value=[Mock()])\n result = self.main._clean_events([])\n self.assertEqual(main.get_events.return_value, result)\n\n def test_run_game(self):\n self.main.clock = Mock(tick=Mock())\n\n def dummy_flip():\n if self.main.clock.tick.call_count:\n self.main.active_scene = None\n main.flip = dummy_flip\n main.get_pressed = Mock(return_value=[1, 2, 3])\n self.main.screen = Mock()\n scene = self.main.active_scene\n self.main._clean_events = Mock(return_value=[0])\n self.main.run_game()\n main.get_pressed.assert_called()\n self.main._clean_events.assert_called_with([1, 2, 3])\n scene.process_input.assert_called_with([0], [1, 2, 3])\n scene.update.assert_called()\n scene.render.assert_called_with(self.main.screen)\n self.main.clock.tick.assert_called()\n", "id": "4020435", "language": "Python", "matching_score": 4.421316146850586, "max_stars_count": 0, "path": "test/test_main.py" }, { "content": "from typing import List, Sequence\n\nfrom pygame import init, KEYDOWN, K_F4, K_LALT, K_RALT\nfrom pygame.display import flip, set_mode, set_caption\nfrom pygame.event import Event, get as get_events\nfrom pygame.key import get_pressed\nfrom pygame.locals import QUIT\nfrom pygame.time import Clock\n\nfrom settings import fps, screen_height, screen_width\nfrom ui import TitleScene\n\n\nclass Main:\n title = \"Számjáték\"\n version = \"0.1\"\n\n def __init__(self):\n init()\n self.screen = set_mode((screen_width, screen_height))\n set_caption(f\"{Main.title} v{Main.version}\")\n self.clock = Clock()\n self.active_scene = TitleScene()\n\n def _clean_events(self, pressed_keys: Sequence[bool]) -> List[Event]:\n result = []\n for current_event in get_events():\n quit_attempt = False\n if current_event.type == QUIT:\n quit_attempt = True\n elif current_event.type == KEYDOWN:\n alt_pressed = pressed_keys[K_LALT] or pressed_keys[K_RALT]\n if current_event.key == K_F4 and alt_pressed:\n quit_attempt = True\n if quit_attempt:\n self.active_scene.terminate()\n else:\n result.append(current_event)\n return result\n\n def run_game(self):\n while self.active_scene:\n pressed_keys = get_pressed()\n events = self._clean_events(pressed_keys)\n self.active_scene.process_input(events, pressed_keys)\n self.active_scene.update()\n self.active_scene.render(self.screen)\n self.active_scene = self.active_scene.next_scene\n flip()\n self.clock.tick(fps)\n\n\nif __name__ == \"__main__\":\n Main().run_game() # pragma: no cover\n", "id": "9647057", "language": "Python", "matching_score": 2.1893131732940674, "max_stars_count": 0, "path": "main.py" }, { "content": "from abc import abstractmethod\nfrom typing import List, Sequence, Union\n\nfrom pygame.event import Event\nfrom pygame.surface import Surface\n\n\nclass Scene:\n def __init__(self):\n self._next_scene = self\n\n @property\n def next_scene(self) -> \"Scene\":\n return self._next_scene\n\n @abstractmethod\n def process_input(self, events: List[Event], pressed_keys: Sequence[bool]):\n \"\"\"overwrite it\"\"\"\n\n @abstractmethod\n def update(self):\n \"\"\"overwrite it\"\"\"\n\n @abstractmethod\n def render(self, screen: Surface):\n \"\"\"overwrite it\"\"\"\n\n def switch_to_scene(self, next_scene: Union[\"Scene\", None]):\n self._next_scene = next_scene\n\n def terminate(self):\n self.switch_to_scene(None)\n", "id": "10845346", "language": "Python", "matching_score": 3.0537145137786865, "max_stars_count": 0, "path": "ui/scenes/scene.py" }, { "content": "from typing import List, Sequence\n\nfrom pygame import K_1, K_2, K_3\nfrom pygame.event import Event\nfrom pygame.surface import Surface\n\nfrom .scene import Scene\n\n\nclass TitleScene(Scene):\n color = (0, 0, 0)\n\n def process_input(self, events: List[Event], pressed_keys: Sequence[bool]):\n if pressed_keys[K_1]:\n self.color = (255, 0, 0)\n elif pressed_keys[K_2]:\n self.color = (0, 255, 0)\n elif pressed_keys[K_3]:\n self.color = (0, 0, 255)\n\n def update(self):\n pass\n\n def render(self, screen: Surface):\n screen.fill(self.color)\n", "id": "6371477", "language": "Python", "matching_score": 0.07734301686286926, "max_stars_count": 0, "path": "ui/scenes/title.py" }, { "content": "# Mount & Blade Music Manager - User interface module\n# Written by <NAME> (2014-2015)\n# License: Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)\n# http://creativecommons.org/licenses/by-nc-sa/4.0/\n\nfrom glob import glob\nfrom os import listdir,makedirs,path\nfrom shutil import copy2\nfrom tkinter import Button, Canvas, Checkbutton, Entry, filedialog, Frame, IntVar, Label, Menu, messagebox, Scrollbar, StringVar, Tk\nfrom tkinter.ttk import Combobox\nfrom xml.etree.ElementTree import Element,ElementTree,parse,SubElement\nfrom mbmm import MusicTxt, Track\n\nclass AppWindow(Tk):\n \"\"\"main window\"\"\"\n def __init__(self):\n Tk.__init__(self)\n self.title(\"Mount & Blade Music Manager\")\n self.wm_geometry(\"600x600+100+100\")\n self.protocol(\"WM_DELETE_WINDOW\", self.exit)\n self.languageIsSet = False\n self.conf = Config()\n self.language = self.readLanguages()\n self.musicTxt = MusicTxtPalceHolder()\n self.menubar = AppMenu(self)\n self.config(menu = self.menubar)\n self.musicFileName = \"music.txt\"\n self.musicFolderName = \"Music\"\n self.game_db = self.conf.readTypes()\n self.game_type = None\n self.rowdict = {}\n self.worktop = Frame(self)\n self.changed = False\n self.order = (\"fileName\", False) # other possible values are \"id\" and True\n self.rownum = 0\n self.createCanvas()\n \n def createCanvas(self):\n \"\"\"generating widgets\"\"\"\n self.worktop.destroy()\n self.worktop = Frame(self)\n self.worktop.pack(side=\"bottom\")\n self.buttonline = Frame(self.worktop)\n self.buttonline.pack(side=\"top\", anchor=\"w\")\n self.canvas = Canvas(self.worktop)\n self.mainframe = Frame(self.canvas)\n self.scrollbar = Scrollbar(self.worktop, orient=\"vertical\", command=self.canvas.yview)\n \n def showCanvas(self):\n \"\"\"shows widgets\"\"\"\n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n self.scrollbar.pack(side=\"right\",fill=\"y\")\n self.canvas.pack(side=\"left\")\n self.canvas.create_window((0,0), window = self.mainframe, anchor = \"nw\")\n self.mainframe.bind(\"<Configure>\", self.scrolling)\n \n def openModule(self):\n \"\"\"opens a module\"\"\"\n if not self.confirmColse():\n return\n self.createCanvas()\n d = filedialog.askdirectory() + \"/\"\n if d == \"/\":\n return\n if not path.exists(d + self.musicFileName) or not path.exists(d + self.musicFolderName): # check if txt and folder is in the right place\n answer = messagebox.askyesno(self.conf.ui[\"question\"].get(), self.conf.ui[\"noMusicTxt\"].get()) # if no we ask if we shall create them\n if answer: # if he said yes\n if not path.exists(d + self.musicFolderName): # we check if folder is exists\n makedirs(d + self.musicFolderName) # if not we create it\n if not path.exists(d + self.musicFileName): # ans also check if the file is exits\n with open(d + self.musicFileName, \"w\") as file: # and if not we create it too\n file.write(str(1) + MusicTxt.header)\n else:\n return\n # finding game type\n self.game_type = None\n for known_game in self.game_db:\n if known_game in d: # if game title is in the path, we set it as the current game_type\n self.game_type = known_game\n break\n self.musicTxt = MusicTxt(d + \"/music.txt\")\n self.musicTxt.order_collection(\"fileName\")\n # if automatic identifying the game was unsuccesful we compare the file content against the type dicts\n if self.game_type is None:\n counters = {}\n for known_game in self.game_db:\n counters[known_game] = {\"good\": 0, \"bad\": 0, \"rate\": 0.0}\n id_list = []\n for track in self.musicTxt.collection_get():\n id_list.append(track.getId())\n id_list = set(id_list)\n for type_id in id_list:\n for known_game in self.game_db:\n if type_id in self.game_db[known_game][\"by_id\"]:\n counters[known_game][\"good\"] += 1\n else:\n counters[known_game][\"bad\"] += 1\n for known_game in counters:\n counters[known_game][\"rate\"] = counters[known_game][\"good\"] / (counters[known_game][\"good\"] + counters[known_game][\"bad\"])\n games_by_succes_rate = sorted(counters.keys(), key=lambda item: counters[item][\"rate\"], reverse=True)\n self.game_type = games_by_succes_rate[0]\n if counters[self.game_type][\"rate\"] != 1.0:\n messagebox.showinfo(self.conf.ui[\"gametype_title\"].get(), self.conf.ui[\"gametype\"].get())\n self.rownum = 0\n xbutton = Button(self.buttonline, text = \"\\u2620\", relief=\"flat\", command = self.invertSelection) # 274C\n xbutton.pack(side=\"left\", padx = 2)\n self.titleButton = Button(self.buttonline, textvariable = self.conf.ui[\"title_asc\"], width = 34, command = self.sortByTitle)\n self.titleButton.pack(side=\"left\", padx = 2)\n addbutton = Button(self.buttonline, text = \"\\u271A\", command = self.newRow)\n addbutton.pack(side=\"left\")\n self.topicButton = Button(self.buttonline, textvariable = self.conf.ui[\"topic\"], width = 36, command = self.sortByTopic)\n self.topicButton.pack(side=\"left\", padx = 4)\n fileList = self.readMusicFolder()\n registeredFileList = []\n missingFileCounter = 0\n for track in self.musicTxt.collection_get():\n self.rowdict[\"pk%s\" % self.rownum] = Row(self.mainframe, track)\n self.rowdict[\"pk%s\" % self.rownum].pack()\n registeredFileList.append(track.getFn())\n if track.getFn() not in fileList:\n missingFileCounter += 1\n self.rowdict[\"pk%s\" % self.rownum].isToRemove.set(1)\n self.rownum += 1\n for file in fileList:\n if file not in registeredFileList:\n track = self.musicTxt.addTrack(file, '0 0')\n self.rowdict[\"pk%s\" % self.rownum] = Row(self.mainframe, track)\n self.rowdict[\"pk%s\" % self.rownum].pack()\n self.rownum += 1\n self.showCanvas()\n self.menubar.enabler(True)\n if missingFileCounter > 0:\n messagebox.showwarning(self.conf.ui[\"missingFilesTitle\"].get(), self.conf.ui[\"missingFiles\"].get())\n \n def confirmColse(self):\n \"\"\"asks the user if he or she wants to discard the unsaved changes\"\"\"\n answer = True\n if self.changed:\n answer = messagebox.askyesno(self.conf.ui[\"question\"].get(), self.conf.ui[\"unsaved\"].get())\n return answer\n \n def readLanguages(self):\n \"\"\"get language files' name from main folder\"\"\"\n languageFileDict = {}\n languageFiles = glob(\"lang-*.xml\")\n for file in languageFiles:\n languageFileDict[file[5:-4]] = file\n return languageFileDict\n \n def getLanguageKeys(self):\n \"\"\"passes the registered languages key sorted\"\"\"\n return sorted(self.language.keys())\n \n def scrolling(self, event):\n self.canvas.configure(scrollregion = self.canvas.bbox(\"all\"), width = 580, height = 600)\n \n def set_changed(self):\n self.changed = True\n \n def save(self):\n self.musicTxt.save()\n self.changed = False\n \n def exit(self):\n if not self.confirmColse():\n return\n self.quit()\n \n def about(self):\n messagebox.showinfo(self.conf.ui[\"businesscard\"].get(), \"Mount & Blade Music Manager v1.0\\n\" + self.conf.ui[\"by\"].get() + \" <NAME>\")\n \n def sortByTitle(self):\n if self.order[0] == \"fileName\" and not self.order[1]:\n self.order = (\"fileName\", True)\n self.titleButton.config(textvariable = self.conf.ui[\"title_desc\"])\n else:\n self.order = (\"fileName\", False)\n self.titleButton.config(textvariable = self.conf.ui[\"title_asc\"])\n self.topicButton.config(textvariable = self.conf.ui[\"topic\"])\n self.sortList()\n \n def sortByTopic(self):\n if self.order[0] == \"id\" and not self.order[1]:\n self.order = (\"id\", True)\n self.topicButton.config(textvariable = self.conf.ui[\"topic_desc\"])\n else:\n self.order = (\"id\", False)\n self.topicButton.config(textvariable = self.conf.ui[\"topic_asc\"])\n self.titleButton.config(textvariable = self.conf.ui[\"title\"])\n self.sortList()\n \n def sortList(self):\n if \"mainframe\" in locals():\n self.mainframe.destroy()\n self.mainframe = Frame(self.canvas)\n self.canvas.create_window((0,0), window=self.mainframe, anchor = \"nw\")\n self.mainframe.bind(\"<Configure>\", self.scrolling)\n self.rownum = 0\n qset = []\n if self.order[0] == \"fileName\" and len(self.musicTxt.collection) > 0:\n self.musicTxt.order_collection(self.order[0], self.order[1])\n qset = self.musicTxt.collection_get()\n elif len(self.musicTxt.collection) > 0:\n qset = sorted(self.musicTxt.collection_get(), key=lambda track: self.game_db[self.game_type][\"by_id\"][track.getId()].getName(), reverse = self.order[1])\n for track in qset:\n self.rowdict[\"pk%s\" % self.rownum] = Row(self.mainframe, track)\n self.rowdict[\"pk%s\" % self.rownum].pack()\n self.rownum += 1\n \n def readMusicFolder(self):\n fileList = []\n path = self.musicTxt.getPath()\n for file in listdir(path[:-9] + self.musicFolderName):\n if file.endswith(\".mp3\") or file.endswith(\".ogg\") or file.endswith(\".wav\"):\n fileList.append(file)\n if \"Modules\" in path:\n for file in listdir(path[:path.find(\"Modules\")] + self.musicFolderName):\n if file.endswith(\".mp3\") or file.endswith(\".ogg\") or file.endswith(\".wav\"):\n fileList.append(file)\n return fileList\n \n def changeLanguage(self, languageCode = \"EN\"):\n if self.languageIsSet:\n self.conf.setLanguageCode(languageCode)\n self.conf.loadLanguage(False)\n self.menubar.loadText()\n self.game_db = self.conf.readTypes()\n self.sortList()\n else:\n self.languageIsSet = True \n \n def invertSelection(self):\n for row in self.rowdict.keys():\n self.rowdict[row].isToRemove.set(not self.rowdict[row].isToRemove.get())\n \n def newRow(self):\n \"\"\"creates a new row and track objects\"\"\"\n track = self.musicTxt.addTrack(\"noFileSelected.yet\", '0 0')\n self.rowdict[\"pk%s\" % self.rownum] = Row(self.mainframe, track)\n self.rowdict[\"pk%s\" % self.rownum].browseFile()\n self.rowdict[\"pk%s\" % self.rownum].pack()\n self.rownum += 1\n \nclass AppMenu(Menu):\n \"\"\"menubar\"\"\"\n def __init__(self, master):\n Menu.__init__(self, master = master)\n self.master = master\n languages = master.getLanguageKeys()\n self.filemenu = Menu(self, tearoff = 0)\n self.filemenu.add_command(command = master.openModule)\n self.filemenu.add_command(command = master.save, state=\"disabled\")\n self.filemenu.insert_separator(3)\n self.filemenu.add_command(command = master.exit) \n self.add_cascade(menu = self.filemenu)\n self.language = Menu(self, tearoff = 0)\n for lang in languages:\n self.language.add_radiobutton(label = lang, command = lambda x = lang : master.changeLanguage(x))\n self.language.invoke(languages.index(master.conf.getLanguageCode()))\n self.appmenu = Menu(self, tearoff = 0)\n self.appmenu.add_cascade(menu = self.language)\n self.appmenu.add_command(command = master.about)\n self.add_cascade(menu = self.appmenu)\n self.loadText()\n\n def loadText(self):\n self.filemenu.entryconfig(0, label = self.master.conf.ui[\"open\"].get())\n self.filemenu.entryconfig(1, label = self.master.conf.ui[\"save\"].get())\n self.filemenu.entryconfig(3, label = self.master.conf.ui[\"exit\"].get())\n self.entryconfig(1, label = self.master.conf.ui[\"module\"].get()) \n self.appmenu.entryconfig(0, label = self.master.conf.ui[\"language\"].get())\n self.appmenu.entryconfig(1, label = self.master.conf.ui[\"about\"].get())\n self.entryconfig(2, label = self.master.conf.ui[\"application\"].get())\n \n def enabler(self, enable = True):\n \"\"\"enables or disables menu items\"\"\"\n if enable:\n self.filemenu.entryconfig(1, state=\"normal\")\n else:\n self.filemenu.entryconfig(1, state=\"disabled\")\n \nclass Config():\n \"\"\"settings\"\"\"\n def __init__(self):\n self.cfgName = \"config.xml\"\n self.ui = {}\n self.file = parse(self.cfgName)\n self.root = self.file.getroot()\n self.languageCode = self.root.find(\"language\").text\n self.loadLanguage()\n \n def loadLanguage(self, initialLoad = True):\n \"\"\"loading language resources\"\"\"\n file = parse(\"lang-%s.xml\" % self.languageCode)\n root = file.getroot()\n for item in root.findall(\"item\"):\n id = item.get(\"id\")\n text = item.text\n if initialLoad:\n self.ui[id] = StringVar()\n if id in [\"title\", \"topic\"]:\n self.ui[id + \"_asc\"] = StringVar()\n self.ui[id + \"_desc\"] = StringVar()\n self.ui[id].set(text)\n if id in [\"title\", \"topic\"]:\n self.ui[id + \"_asc\"].set(text + \" \\u25B2\")\n self.ui[id + \"_desc\"].set(text + \" \\u25BC\")\n \n def getLanguageCode(self):\n return self.languageCode\n \n def setLanguageCode(self, newCode):\n self.languageCode = newCode\n self.saveConfig()\n \n def saveConfig(self):\n cfg = Element('config')\n file = ElementTree(cfg)\n lang = SubElement(cfg, 'language')\n lang.text = self.languageCode\n file.write(self.cfgName, xml_declaration = True, encoding = 'utf-8', method = 'xml')\n \n def readTypes(self):\n \"\"\"reads the types dictionary from XML and returns it\"\"\"\n file = parse(\"types-%s.xml\" % self.languageCode)\n root = file.getroot()\n game_db = {}\n for game_type in root.findall(\"types\"):\n game_name = game_type.get(\"game\")\n game_db[game_name] = {\"by_type\": {}, \"by_id\": {}}\n for item in game_type.findall(\"item\"):\n id = item.get(\"id\")\n text = item.text\n newType = Type(text, id)\n game_db[game_name][\"by_type\"][text] = newType\n game_db[game_name][\"by_id\"][id] = newType\n return game_db\n\nclass Row(Frame):\n \"\"\"widget that displays a Track object as a row\"\"\" \n def __init__(self, master, track):\n Frame.__init__(self, master = master)\n self.root = master.master.master.master\n self.track = track # putting a track to a var\n self.isToRemove = IntVar()\n self.isToRemove.set(self.track.getIsToRemove())\n self.isToRemove.trace(\"w\", self.checkbuttonOnChange)\n chkbx = Checkbutton(self, variable = self.isToRemove)\n chkbx.pack(side=\"left\")\n # Trackname\n self.titleVar = StringVar() # getting a var for onChange event\n self.titleVar.set(self.track.fileName) # setting default value\n self.titleVar.trace(\"w\", self.titleVar_onchange) # binding to onChange event\n self.title = Entry(self, textvariable = self.titleVar, width = 40) # creating form element\n self.title.pack(side=\"left\")\n # Browse button\n self.threedots = Button(self, text = \"...\", command = self.browseFile)\n self.threedots.pack(side=\"left\", padx = 5)\n # Dropdown for type\n self.typeVar = StringVar()\n if self.track.id not in self.root.game_db[self.root.game_type][\"by_id\"].keys():\n self.track.setId(\"0 0\")\n self.typeVar.set(self.root.game_db[self.root.game_type][\"by_id\"][self.track.id].getName())\n self.typeVar.trace(\"w\", self.typeVar_onchange) # binding to onChange event\n self.type = Combobox(self, textvariable=self.typeVar, width = 40)\n self.type['values'] = sorted(self.root.game_db[self.root.game_type][\"by_type\"].keys())\n if self.isToRemove.get():\n for var in [self.title, self.threedots, self.type]:\n var.config(state = \"disabled\")\n self.type.pack(side=\"left\")\n \n def titleVar_onchange(self, a, b, c):\n \"\"\"onChange event for titleVar\"\"\"\n self.track.setFn(self.titleVar.get())\n self.root.set_changed()\n \n def typeVar_onchange(self, a, b, c):\n \"\"\"onChange event for typeVar\"\"\"\n self.track.setId(self.root.game_db[self.root.game_type][\"by_type\"][self.typeVar.get()].getId())\n self.root.set_changed()\n \n def removeTrack(self):\n \"\"\"removes the current track from the collection\"\"\"\n self.root.musicTxt.removeTrack(self.track)\n \n def checkbuttonOnChange(self, a, b, c):\n \"\"\"enables, disables row\"\"\"\n if self.isToRemove.get():\n self.track.setIsToRemove()\n newState = \"disabled\"\n else:\n newState = \"normal\"\n self.track.setIsToRemove(False)\n for var in [self.title, self.threedots, self.type]:\n var.config(state = newState)\n \n def browseFile(self):\n \"\"\"let users pick additional files from disk\"\"\"\n filepath = filedialog.askopenfilename(parent = self.root, filetypes = [(\"MP3 \" + self.root.conf.ui[\"files\"].get(), \".mp3\"), (\"OGG \" + self.root.conf.ui[\"files\"].get(), \".ogg\"), (\"WAV \" + self.root.conf.ui[\"files\"].get(), \".wav\")])\n if filepath:\n filename = filepath.rsplit('/', 1)[1].replace(\" \",\"_\").replace(\"'\", \"\")\n newfile = copy2(filepath, self.root.musicTxt.getPath()[:-9] + self.root.musicFolderName + '/' + filename)\n self.titleVar.set(filename)\n else:\n return\n\nclass Type():\n \"\"\"a simple Type object\"\"\"\n def __init__(self, name, id):\n self.name = name\n self.id = id\n \n def getName(self):\n return self.name\n \n def getId(self):\n return self.id \n \nclass MusicTxtPalceHolder():\n \"\"\"enables some functions before a real MusicTxt object is opened\"\"\"\n def __init__(self):\n self.collection = []\n\n def save(save):\n pass\n\nif __name__ == \"__main__\":\n a = AppWindow()\n a.mainloop()", "id": "5900177", "language": "Python", "matching_score": 5.145945072174072, "max_stars_count": 0, "path": "mbmmui.pyw" }, { "content": "# Mount & Blade Music Manager - Business logic module\n# Written by <NAME> (2014-2015)\n# License: Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)\n# http://creativecommons.org/licenses/by-nc-sa/4.0/\n\nclass MusicTxt():\n \"\"\"class of the main data collection\"\"\" \n header = '\\ncant_find_this.ogg 0 0\\n'\n \n def __init__(self, path = \"\"):\n self.collection = []\n self.path = path\n readSuccess = self.read(self.path) \n \n def read(self, path):\n \"\"\"reading from file\"\"\"\n with open(path, \"r\") as file:\n records = [line.rstrip() for line in file] # reading the lines to a list\n records = list(filter(('').__ne__, records)) # removing empty lines\n first_record = 1\n if \" 0 0\" in records[1]:\n first_record = 2\n for record in records[first_record:]: # looping through the file\n try:\n name, id = tuple(record.split(' ',1)) # analizing data\n except ValueError:\n name = record\n value = \"000\"\n self.addTrack(name, id) # generating Tracks\n \n def addTrack(self, name, id = \"0 0\"):\n \"\"\"adds a Track to the collection, needs a file name and id as argument\"\"\"\n newTrack = Track(name, id)\n self.collection.append(newTrack)\n return newTrack\n \n def removeTrack(self, track):\n \"\"\"removes a Track from the collection, needs a Track object as argument\"\"\"\n self.collection.remove(track)\n \n def order_collection(self, order_by = \"\", reverse = False):\n \"\"\"orders the collection by 'fileName' or 'id'\"\"\"\n if order_by == \"fileName\":\n self.collection.sort(key=lambda item: item.fileName.lower(), reverse=reverse)\n else:\n self.collection.sort(key=lambda item: item.id, reverse=reverse)\n \n def backup(self):\n data = \"\"\n with open(self.path) as original:\n data = original.read()\n with open(self.path + \".original\", \"w\") as backup:\n backup.write(data)\n \n def save(self):\n \"\"\"exports the collection to file\"\"\"\n exportList = []\n for item in self.collection:\n if not item.getIsToRemove() and item.id != \"0 0\":\n exportList.append(item)\n trackList = sorted(exportList, key = lambda item: item.fileName)\n content = str(len(trackList) +1) + self.header # the plus 1 represents the track in the header\n for track in trackList:\n content += \"%s %s\\n\" % (track.fileName, track.id)\n with open(self.path, \"w\") as file:\n file.write(content)\n \n def collection_get(self):\n return self.collection\n \n def setPath(self, newPath):\n self.path = newPath\n \n def getPath(self):\n return self.path\n \nclass Track():\n \"\"\"class to represent a line\"\"\"\n def __init__(self, musicFileName = '', id = '0 0'):\n self.fileName = musicFileName\n self.id = id\n self.isToRemove = False\n \n def save(self):\n \"\"\"returns the whole line as string for saving the MusicTxt object\"\"\"\n value = '%s %s' % (self.fileName, self.id)\n return value\n \n def getFn(self):\n return self.fileName\n \n def setFn(self, newFn = \"\"):\n self.fileName = newFn\n \n def getId(self):\n return self.id\n \n def setId(self, newId):\n self.id = newId\n \n def setIsToRemove(self, value = True):\n self.isToRemove = value\n \n def getIsToRemove(self):\n return self.isToRemove", "id": "1183104", "language": "Python", "matching_score": 0.11891115456819534, "max_stars_count": 0, "path": "mbmm.py" }, { "content": "# -*- coding: utf-8 -*-\nfrom sys import argv\n\n__author__ = '<NAME>'\n\n# Feets\nl = \"-\" # LONG\ns = \"μ\" # SHORT\ndactyl = \"-μμ\"\niamb = \"μ-\"\npyrrich = \"μμ\"\nspondee = \"--\"\ntrochee = \"-μ\"\niambic = [iamb, pyrrich, spondee]\n# add new feet above this line\n\n# Lines\nhexameter = [[dactyl, spondee], [dactyl, spondee], [dactyl, spondee], [dactyl, spondee], [dactyl],\n [spondee, trochee]]\npentameter = [[dactyl, spondee], [dactyl, spondee], [l, s], [dactyl], [dactyl], [l, s]]\npentameter_closure = [[dactyl], [dactyl], [l, s]]\niambic_tetrameter = [iambic, iambic, iambic, [iamb], [s, \"\"]]\n# add new line structure before this line\nknown_formats = {\"hexameter\": hexameter,\n \"pentameter\": pentameter,\n \"pentameter closure\": pentameter_closure,\n \"iambic tetrameter\": iambic_tetrameter}\n\n\nclass WrongFeet(RuntimeError):\n pass\n\n\nclass NotEnoughFeet(RuntimeError):\n pass\n\n\nclass MoraFinder(object):\n long_vowels = [\"Á\", \"É\", \"Í\", \"Ó\", \"Ő\", \"Ú\", \"Ű\"]\n vowels = [\"A\", \"Á\", \"E\", \"É\", \"I\", \"Í\", \"O\", \"Ó\", \"Ö\", \"Ő\", \"U\", \"Ú\", \"Ü\", \"Ű\"]\n single_letter_consonants = [\"B\", \"C\", \"D\", \"F\", \"G\", \"H\", \"J\", \"K\", \"L\", \"M\", \"N\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"V\", \"W\", \"X\", \"Y\", \"Z\"]\n multi_character_consonants = [\"DZS\", \"CS\", \"DZ\", \"GY\", \"LY\", \"NY\", \"SZ\", \"TY\", \"ZS\"]\n consonants = single_letter_consonants + multi_character_consonants\n\n def is_vowel(self, letter):\n return True if letter in self.vowels else False\n\n def is_consonant(self, letter):\n return True if letter in self.consonants else False\n\n def check_line(self, line):\n pseudosyllabes = self.split_to_pseudosyllabes(line)\n feet = \"\"\n for pseudosyllabe in pseudosyllabes:\n feet += self.get_length(pseudosyllabe)\n return self.recognize_forms(feet)\n\n def split_to_pseudosyllabes(self, line):\n line = line.upper()\n result = []\n current_pseudosyllabe = \"\"\n for character in line:\n if self.is_vowel(character) and current_pseudosyllabe == \"\":\n current_pseudosyllabe += character\n elif self.is_vowel(character):\n result.append(str(current_pseudosyllabe))\n current_pseudosyllabe = \"\"\n current_pseudosyllabe += character\n elif self.is_consonant(character) and current_pseudosyllabe == \"\":\n continue\n elif self.is_consonant(character):\n current_pseudosyllabe += character\n if current_pseudosyllabe:\n result.append(current_pseudosyllabe)\n return result\n\n def get_length(self, pseudosyllabe):\n current_vowel = pseudosyllabe[0]\n current_consonants = pseudosyllabe[1:]\n if current_vowel in self.long_vowels:\n return l\n for multi_caracter_consonant in self.multi_character_consonants:\n current_consonants = current_consonants.replace(multi_caracter_consonant, \"0\")\n for consonant in self.consonants:\n current_consonants = current_consonants.replace(consonant, \"0\")\n if len(current_consonants) in [0, 1]:\n return s\n else:\n return l\n\n def recognize_forms(self, feet):\n log = []\n for name, current_format in known_formats.items():\n log.append(\"Checking as {}...\".format(name))\n try:\n self.recognize_form(current_format, feet)\n except (NotEnoughFeet, WrongFeet) as e:\n log.append(\" {}\".format(e))\n else:\n print(\"Line identified as {}\".format(name))\n return current_format\n print(\"\\n\".join(log))\n\n @staticmethod\n def recognize_form(expected_form, raw_feet):\n final_form = []\n for feet_alternatives in expected_form:\n if any(raw_feet.startswith(feet_alternative) for feet_alternative in feet_alternatives):\n for feet_alternative in feet_alternatives:\n if raw_feet.startswith(feet_alternative):\n final_form.append(feet_alternative)\n raw_feet = raw_feet.replace(feet_alternative, \"\", 1)\n break\n elif len(raw_feet) < min([len(feet_alternative) for feet_alternative in feet_alternatives]):\n alternatives = \" or \".join(feet_alternatives)\n raise NotEnoughFeet(f\"Remaining feet ({raw_feet}) are not enough for {alternatives}.\\n Parsed so far: {final_form}\")\n else:\n raise WrongFeet(\"None of the expected feet ({}) was found in: {}.\\n Parsed so far: {}\".format(feet_alternatives, raw_feet, final_form))\n if raw_feet != \"\":\n raise WrongFeet(\"Feet processed {} but more left {}\".format(final_form, raw_feet))\n\n\nif __name__ == \"__main__\":\n h = MoraFinder()\n try:\n line = argv[1]\n print(line, end=\" - \")\n h.check_line(line)\n except IndexError:\n print(\"Type exit to quit.\")\n line = \"\"\n while line != \"exit\":\n if line:\n print(line, end=\" - \")\n h.check_line(line)\n line = input(\">>> \")\n", "id": "2423944", "language": "Python", "matching_score": 1.8443421125411987, "max_stars_count": 0, "path": "morafinder.py" }, { "content": "# -*- coding: utf-8 -*-\nfrom unittest import TestCase\nfrom morafinder import MoraFinder, s, l, hexameter\n\n__author__ = '<NAME>'\n\n\nclass TestMoraFinder(TestCase):\n def test_split_to_syllabes(self):\n h = MoraFinder()\n result = h.split_to_pseudosyllabes(\"alma a fa alatt\")\n self.assertEqual([\"ALM\", \"A\", \"AF\", \"A\", \"AL\", \"ATT\"], result)\n\n def test_get_length(self):\n h = MoraFinder()\n self.assertEqual(l, h.get_length(\"ÚT\"))\n self.assertEqual(l, h.get_length(\"ITT\"))\n self.assertEqual(l, h.get_length(\"ITSZ\"))\n self.assertEqual(s, h.get_length(\"A\"))\n self.assertEqual(s, h.get_length(\"AZ\"))\n self.assertEqual(s, h.get_length(\"ICS\"))\n self.assertEqual(s, h.get_length(\"IDZ\"))\n self.assertEqual(s, h.get_length(\"IDZS\"))\n\n def test_check_line(self):\n sample = \"Mért legyek én tisztességes? Kiterítenek úgyis!\"\n # expected_result = \"lss\" + \"ll\" + \"ll\" + \"lss\" + \"lss\" + \"ls\"\n h = MoraFinder()\n result = h.check_line(sample)\n self.assertEqual(hexameter, result)\n", "id": "8391302", "language": "Python", "matching_score": 1.4314779043197632, "max_stars_count": 0, "path": "test_morafinder.py" }, { "content": "from unittest import TestCase\nfrom unittest.mock import patch\n\nfrom ui.scenes.scene import Scene\n\n\nclass TestScene(TestCase):\n\n @patch(\"ui.scenes.scene.Scene.process_input\", set())\n @patch(\"ui.scenes.scene.Scene.update\", set())\n @patch(\"ui.scenes.scene.Scene.render\", set())\n def setUp(self):\n self.scene = Scene()\n\n def test_next_scene(self):\n self.assertEqual(self.scene.next_scene, self.scene)\n\n def test_switch_to_next_scene(self):\n self.scene._next_scene = None\n self.scene.switch_to_scene(self.scene)\n self.assertEqual(self.scene._next_scene, self.scene)\n\n def test_terminate(self):\n self.scene.terminate()\n self.assertIsNone(self.scene._next_scene)\n", "id": "7893767", "language": "Python", "matching_score": 2.2611100673675537, "max_stars_count": 0, "path": "test/ui/scenes/test_scene.py" }, { "content": "from unittest import expectedFailure, TestCase\n\nfrom ui import TitleScene\n\n\nclass TestTitleScene(TestCase):\n\n def setUp(self):\n self.title_scene = TitleScene()\n\n @expectedFailure\n def test_process_input(self):\n self.fail()\n\n @expectedFailure\n def test_update(self):\n self.fail()\n\n @expectedFailure\n def test_render(self):\n self.fail()\n", "id": "12113482", "language": "Python", "matching_score": 0.9580509662628174, "max_stars_count": 0, "path": "test/ui/scenes/test_title.py" }, { "content": "from .title import TitleScene\n\n__all__ = [\"TitleScene\"]\n", "id": "11376532", "language": "Python", "matching_score": 2.171926736831665, "max_stars_count": 0, "path": "ui/scenes/__init__.py" }, { "content": "from .scenes import TitleScene\n\n__all__ = [\"TitleScene\"]\n", "id": "6375826", "language": "Python", "matching_score": 2.17014741897583, "max_stars_count": 0, "path": "ui/__init__.py" } ]
2.007245
whipper-team
[ { "content": "import eaclogger\nimport time\nimport hashlib\nfrom morituri.common import common\nfrom morituri.result import result\n\n\nclass EacLogger(result.Logger):\n\n _accuratelyRipped = 0\n _inARDatabase = 0\n _errors = False\n\n # Overrides morituri's common implementation because EAC writes minutes\n # value without zero padding it (EAC: %2d) vs (morituri: %02d)\n def _framesToMSF(self, frames):\n \"\"\"Returns MSF representation of the provided frames value\"\"\"\n\n f = frames % common.FRAMES_PER_SECOND\n frames -= f\n s = (frames / common.FRAMES_PER_SECOND) % 60\n frames -= s * common.FRAMES_PER_SECOND\n m = frames / common.FRAMES_PER_SECOND / 60\n return \"%2d:%02d.%02d\" % (m, s, f)\n\n # Overrides morituri's common implementation because EAC writes hours\n # value without zero padding it (EAC: %2d) vs (morituri: %02d)\n # HMSF is used to represent pre-gaps' duration\n def _framesToHMSF(self, frames):\n \"\"\"Returns HMSF representation of the provided frames value\"\"\"\n\n f = frames % common.FRAMES_PER_SECOND\n frames -= f\n s = (frames / common.FRAMES_PER_SECOND) % 60\n frames -= s * common.FRAMES_PER_SECOND\n m = (frames / common.FRAMES_PER_SECOND / 60) % 60\n frames -= m * common.FRAMES_PER_SECOND * 60\n h = frames / common.FRAMES_PER_SECOND / 60 / 60\n return \"%2d:%02d:%02d.%02d\" % (h, m, s, f)\n\n def log(self, ripResult, epoch=time.time()):\n \"\"\"Returns big str: logfile joined text lines\"\"\"\n\n lines = self.logRip(ripResult, epoch=epoch)\n return \"\\n\".join(lines)\n\n def logRip(self, ripResult, epoch):\n \"\"\"Returns lines list\"\"\"\n\n lines = []\n\n # Ripper version\n # ATM differs from EAC's typical log line\n from morituri.configure import configure\n lines.append(\"morituri version %s (eac logger %s)\" % (\n configure.version, eaclogger.__version__))\n lines.append(\"\")\n\n # Rip date\n # ATM differs from EAC's typical log line\n date = time.strftime(\"%d. %B %Y, %R\", time.gmtime(epoch)).strip()\n lines.append(\"morituri extraction logfile from %s\" % date)\n lines.append(\"\")\n\n # Artist / Album\n lines.append(\"%s / %s\" % (ripResult.artist, ripResult.title))\n lines.append(\"\")\n\n # Drive information\n # Missing information about \"Adapter\" (not relevant on *nix?)\n lines.append(\n \"Used drive : %s%s\" % (\n ripResult.vendor, ripResult.model))\n lines.append(\"\")\n\n # Rip settings\n lines.append(\"Read mode : Secure\")\n # Extra line (not included in EAC's logfiles)\n lines.append(\"Use cdparanoia mode : Yes (cdparanoia %s)\" % (\n ripResult.cdparanoiaVersion))\n defeat = \"No\"\n if ripResult.cdparanoiaDefeatsCache:\n defeat = \"Yes\"\n lines.append(\"Defeat audio cache : %s\" % defeat)\n lines.append(\"Make use of C2 pointers : No\")\n lines.append(\"\")\n lines.append(\"Read offset correction : %d\" %\n ripResult.offset)\n # Currently unsupported by unpatched cdparanoia\n lines.append(\"Overread into Lead-In and Lead-Out : No\")\n lines.append(\"Fill up missing offset samples with silence : Yes\")\n lines.append(\"Delete leading and trailing silent blocks : No\")\n lines.append(\"Null samples used in CRC calculations : Yes\")\n # Missing line \"Used interface\" (not relevant on *nix?)\n # Extra line (not included in EAC's logfiles)\n lines.append(\"Gap detection : \"\n \"cdrdao version %s\" % ripResult.cdrdaoVersion)\n lines.append(\"Gap handling : \"\n \"Appended to previous track\")\n lines.append(\"\")\n\n # Missing lines (unneeded?): \"Selected bitrate\", \"Quality\"\n # ATM differs from EAC's typical log line\n lines.append(\"Used output format : %s\" %\n ripResult.profileName)\n # Extra lines (not included in EAC's logfiles)\n lines.append(\"GStreamer pipeline : %s\" %\n ripResult.profilePipeline)\n lines.append(\"GStreamer version : %s\" %\n ripResult.gstreamerVersion)\n lines.append(\"GStreamer Python version : %s\" %\n ripResult.gstPythonVersion)\n lines.append(\"Encoder plugin version : %s\" %\n ripResult.encoderVersion)\n lines.append(\"\")\n lines.append(\"\")\n\n # TOC\n lines.append(\"TOC of the extracted CD\")\n lines.append(\"\")\n lines.append(\n \" Track | Start | Length | Start sector | End sector \")\n lines.append(\n \" ---------------------------------------------------------\")\n table = ripResult.table\n\n htoa = None\n try:\n htoa = table.tracks[0].getIndex(0)\n except KeyError:\n pass\n\n # If true include HTOA line in log's TOC\n if htoa and htoa.path:\n htoastart = htoa.absolute\n htoaend = table.getTrackEnd(0)\n htoalength = table.tracks[0].getIndex(1).absolute - htoastart\n lines.append(\n \" %2d | %s | %s | %6d | %6d \" % (\n 0,\n self._framesToMSF(htoastart),\n self._framesToMSF(htoalength),\n htoastart, htoaend))\n\n # For every track include information in the TOC\n for t in table.tracks:\n start = t.getIndex(1).absolute\n length = table.getTrackLength(t.number)\n end = table.getTrackEnd(t.number)\n lines.append(\n \" %2d | %s | %s | %6d | %6d \" % (\n t.number,\n self._framesToMSF(start),\n self._framesToMSF(length),\n start, end))\n lines.append(\"\")\n lines.append(\"\")\n\n # per-track\n for t in ripResult.tracks:\n if not t.filename:\n continue\n lines.extend(self.trackLog(t))\n lines.append('')\n\n # AccurateRip summary at the end of the logfile\n lines.append(\"\")\n if self._inARDatabase == 0:\n lines.append(\"None of the tracks are present \"\n \"in the AccurateRip database\")\n else:\n nonHTOA = len(ripResult.tracks)\n if ripResult.tracks[0].number == 0:\n nonHTOA -= 1\n if self._accuratelyRipped == 0:\n lines.append(\"No tracks could be verified as accurate\")\n lines.append(\n \"You may have a different pressing \"\n \"from the one(s) in the database\")\n elif self._accuratelyRipped < nonHTOA:\n lines.append(\"%d track(s) accurately ripped\" %\n self._accuratelyRipped)\n lines.append(\"%d track(s) could not be verified as accurate\" %\n (nonHTOA - self._accuratelyRipped))\n lines.append(\"\")\n lines.append(\"Some tracks could not be verified as accurate\")\n\n else:\n lines.append(\"All tracks accurately ripped\")\n lines.append(\"\")\n\n # FIXME: ATM this will always pick else\n # When does EAC report errors? (only on abort?)\n if self._errors:\n lines.append(\"There were errors\")\n else:\n lines.append(\"No errors occurred\")\n lines.append(\"\")\n\n # END of ripper status report\n # in EAC this isn't always the second to last line in the log because\n # plugins information are included beneath (but before log checksum)\n lines.append(\"End of status report\")\n lines.append(\"\")\n\n # Log checksum (uppercase hex encoded SHA256 hash of all lines)\n # It isn't compatible with EAC's one: checklog fail\n lines.append(\"\")\n hasher = hashlib.sha256()\n hasher.update(\"\\n\".join(lines).encode(\"utf-8\"))\n lines.append(\"==== Log checksum %s ====\" % hasher.hexdigest().upper())\n lines.append(\"\")\n\n return lines\n\n def trackLog(self, trackResult):\n \"\"\"Returns tracks section lines: data picked from trackResult\"\"\"\n\n lines = []\n\n # Track number (formatting like EAC's one)\n lines.append(\"Track %2d\" % trackResult.number)\n lines.append(\"\")\n\n # Filename (including path) of ripped track\n lines.append(\" Filename %s\" % trackResult.filename)\n lines.append(\"\")\n\n # Pre-gap length\n # EAC always adds 2 seconds to the first track pre-gap\n pregap = trackResult.pregap\n if trackResult.number == 1:\n pregap += 2 * common.FRAMES_PER_SECOND\n if pregap:\n lines.append(\" Pre-gap length %s\" % self._framesToHMSF(\n pregap))\n lines.append(\"\")\n\n # Peak level\n # EAC seems to format peak differently, truncating to the 3rd digit,\n # and also calculating it against a max of 32767\n # MBV - Feed me with your kiss: replaygain 0.809875,\n # EAC's peak level 80.9 % instead of 90.0 %\n peak = trackResult.peak * 32768 / 32767\n lines.append(\" Peak level %.1f %%\" % (\n int(peak * 1000) / 10.0))\n\n # Extraction speed\n if trackResult.copyspeed:\n lines.append(\" Extraction speed %.1f X\" % (\n trackResult.copyspeed))\n\n # Track quality\n if trackResult.quality and trackResult.quality > 0.001:\n lines.append(\" Track quality %.1f %%\" % (\n trackResult.quality * 100.0, ))\n\n # Ripper test CRC\n if trackResult.testcrc is not None:\n lines.append(\" Test CRC %08X\" % trackResult.testcrc)\n\n # Ripper copy CRC\n if trackResult.copycrc is not None:\n lines.append(\" Copy CRC %08X\" % trackResult.copycrc)\n\n # AccurateRip track status\n # No support for AccurateRip v2 in morituri\n if trackResult.accurip:\n self._inARDatabase += 1\n if trackResult.ARCRC == trackResult.ARDBCRC:\n lines.append(\n \" Accurately ripped (confidence %d) [%08X] (AR v1)\" %\n (trackResult.ARDBConfidence, trackResult.ARCRC))\n self._accuratelyRipped += 1\n else:\n lines.append(\n \" Cannot be verified as accurate \"\n \"(confidence %d), [%08X], AccurateRip \"\n \"returned [%08x] (AR v1)\" %\n (trackResult.ARDBConfidence,\n trackResult.ARCRC, trackResult.ARDBCRC))\n else:\n lines.append(\" Track not present in AccurateRip database\")\n\n # EAC emits zero warnings even when a CRC mismatch occurs\n if trackResult.testcrc == trackResult.copycrc:\n lines.append(\" Copy OK\")\n return lines\n", "id": "5875118", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "eaclogger/logger/eac.py" }, { "content": "from setuptools import setup\nfrom eaclogger import __version__ as plugin_version\n\nsetup(\n name=\"whipper-plugin-eaclogger\",\n version=plugin_version,\n description=\"A plugin for whipper which provides EAC style log reports\",\n author=\"JoeLametta, supermanvelo\",\n maintainer=\"JoeLametta\",\n license=\"ISC License\",\n url=\"https://github.com/whipper-team/whipper-plugin-eaclogger\",\n packages=[\"eaclogger\", \"eaclogger.logger\"],\n entry_points={\n \"whipper.logger\": [\n \"eac = eaclogger.logger.eac:EacLogger\"\n ]\n }\n)\n", "id": "34029", "language": "Python", "matching_score": 0, "max_stars_count": 9, "path": "setup.py" } ]
0
kersulis
[ { "content": "\"\"\"Testing modules.\"\"\"\n", "id": "7255596", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/__init__.py" }, { "content": "\"\"\"Test console module.\"\"\"\n\nfrom unittest.mock import Mock\n\nimport click.testing\nfrom click.testing import CliRunner\nimport pytest\nfrom pytest_mock import MockFixture\nimport requests\n\nfrom python_test import console\n\n\n@pytest.fixture\ndef runner() -> CliRunner:\n \"\"\"Click runner fixture.\"\"\"\n return click.testing.CliRunner()\n\n\n@pytest.fixture\ndef mock_wikipedia_random_page(mocker: MockFixture) -> Mock:\n \"\"\"Use mocker for random_page.\"\"\"\n return mocker.patch(\"python_test.wikipedia.random_page\")\n\n\ndef test_main_succeeds(runner: CliRunner, mock_requests_get: Mock) -> None:\n \"\"\"It exits with a status code of zero.\"\"\"\n result = runner.invoke(console.main)\n assert result.exit_code == 0\n\n\ndef test_main_prints_title(runner: CliRunner, mock_requests_get: Mock) -> None:\n \"\"\"It prints the Page title.\"\"\"\n result = runner.invoke(console.main)\n assert \"Lorem Ipsum\" in result.output\n\n\ndef test_main_invokes_requests_get(runner: CliRunner, mock_requests_get: Mock) -> None:\n \"\"\"It invokes requests.get.\"\"\"\n runner.invoke(console.main)\n assert mock_requests_get.called\n\n\ndef test_main_uses_en_wikipedia(runner: CliRunner, mock_requests_get: Mock) -> None:\n \"\"\"It uses English Wikipedia by default.\"\"\"\n runner.invoke(console.main)\n args, _ = mock_requests_get.call_args\n assert \"en.wikipedia.org\" in args[0]\n\n\ndef test_main_fails_on_request_error(\n runner: CliRunner, mock_requests_get: Mock\n) -> None:\n \"\"\"It exits with 1 when requests.get has an error.\"\"\"\n mock_requests_get.side_effect = Exception(\"error\")\n result = runner.invoke(console.main)\n assert result.exit_code == 1\n\n\ndef test_main_prints_message_on_request_error(\n runner: CliRunner, mock_requests_get: Mock\n) -> None:\n \"\"\"It prints an error message when requests.get errors.\"\"\"\n mock_requests_get.side_effect = requests.RequestException\n result = runner.invoke(console.main)\n assert \"Error\" in result.output\n\n\ndef test_main_uses_specified_language(\n runner: CliRunner, mock_wikipedia_random_page: Mock\n) -> None:\n \"\"\"It uses the language passed with a given language code.\"\"\"\n runner.invoke(console.main, [\"--language=pl\"])\n mock_wikipedia_random_page.assert_called_with(language=\"pl\")\n\n\n@pytest.mark.e2e\ndef test_main_succeeds_in_production_env(runner: CliRunner) -> None:\n \"\"\"It runs when it has access to the live Wikipedia API.\"\"\"\n result = runner.invoke(console.main)\n assert result.exit_code == 0\n", "id": "9829094", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/test_console.py" } ]
0
nyholmniklas
[ { "content": "import struct\nfrom socket import *\n\nclass Scanner:\n\n def scan(self, ip, port):\n ping_result = self.isPortOpen(ip, port)\n if (ping_result):\n result= \"\"\n result += str(ip)+\":\"+str(port)+\" >>> OPEN <<<\"\n return result\n \n def isPortOpen(self, ip, port):\n connSkt = socket(AF_INET, SOCK_STREAM)\n connSkt.settimeout(1)\n try:\n connSkt.connect((ip, int(port)))\n return True\n except:\n return False\n finally:\n connSkt.close()\n \n def getHostByIp(self, ip):\n return gethostbyaddr(ip)[0]\n \n def getIpAddressesFromRange(self, start, end):\n ipstruct = struct.Struct('>I')\n start, = ipstruct.unpack(inet_aton(start))\n end, = ipstruct.unpack(inet_aton(end))\n return [inet_ntoa(ipstruct.pack(i)) for i in range(start, end+1)]\n", "id": "7620484", "language": "Python", "matching_score": 2.534065008163452, "max_stars_count": 3, "path": "Scanopy/scanner.py" }, { "content": "from Tkinter import *\nimport threading\nimport scanner\nimport time\n\nclass Gui(threading.Thread):\n def __init__(self, scanner):\n threading.Thread.__init__(self)\n self.root = Tk()\n self.root.geometry(\"550x500\")\n self.root.title(\"Scanopy - Port Scanner\")\n self.scanner = scanner\n self.initComponents()\n self.console_rows = 0\n self.max_console_rows = 20\n self.stop = False\n\n def run(self):\n self.root.mainloop()\n\n def output_console(self, new_text):\n self.consoleText.config(state=NORMAL)\n self.consoleText.insert(END, \"\\n\" + new_text)\n self.consoleText.see(END)\n self.consoleText.config(state=DISABLED)\n\n def initComponents(self):\n root = self.root\n\n # Input Frame\n inputFrame = Frame(root, width=\"200\")\n inputFrame.pack(pady=15, padx=15)\n\n # Init Components\n startLabel = Label(inputFrame, text=\"Start:\")\n endLabel = Label(inputFrame, text=\"End:\")\n self.rangeStartEntry = Entry(inputFrame)\n self.rangeStartEntry.insert(0, \"192.168.3.11\")\n self.rangeEndEntry = Entry(inputFrame)\n self.rangeEndEntry.insert(0, \"172.16.31.10\")\n portLabel = Label(inputFrame, text=\"Port:\")\n self.portEntry = Entry(inputFrame)\n self.portEntry.insert(0, \"80\")\n hostButton = Button(inputFrame, text=\"Resolve host\", command=self.output_host)\n stopButton = Button(inputFrame, text=\"Stop\", command=self.stopScan)\n scanButton = Button(inputFrame, text=\"Scan\", command=self.scan)\n\n # Set Component Grid Positions\n startLabel.grid(row=0, column=0, padx=5, pady=5, sticky=W)\n endLabel.grid(row=0, column=2, padx=5, pady=5, sticky=W)\n self.rangeStartEntry.grid(row=0, column=1, padx=5, pady=5)\n self.rangeEndEntry.grid(row=0, column=3, padx=5, pady=5)\n portLabel.grid(row=1, column=2, padx=5, pady=5, sticky=W)\n self.portEntry.grid(row=1, column=3, padx=5, pady=5)\n hostButton.grid(row=1, column=1, padx=5, pady=5)\n scanButton.grid(row=0, column=4, padx=10, pady=5)\n stopButton.grid(row=1, column=4, padx=10, pady=5)\n\n # Console Frame\n self.consoleFrame = Frame(root)\n self.consoleFrame.pack(expand=1, pady=15, padx=15, fill= BOTH)\n self.consoleText = Text(self.consoleFrame, fg=\"green\", bg=\"black\",state=DISABLED)\n self.consoleText.pack(expand=1, fill= BOTH)\n\n def scan(self):\n self.stop = False\n start_ip = self.rangeStartEntry.get()\n end_ip = self.rangeEndEntry.get()\n ip_list = self.scanner.getIpAddressesFromRange(start_ip, end_ip)\n port = self.portEntry.get()\n self.ip_scan_index = 0\n # Kindof recursive function with call to root.after() to keep gui from freezing\n self.output_console(\"\\nScanning ip range \" + start_ip + \" - \" + end_ip+\"...\")\n def scanIp():\n result = self.scanner.scan(ip_list[self.ip_scan_index], port)\n self.output_console(result)\n self.ip_scan_index += 1\n if self.ip_scan_index < len(ip_list):\n if self.stop:\n self.stop = False\n return\n self.root.after(1400, scanIp)\n scanIp()\n \n def output_host(self):\n ip = self.rangeStartEntry.get()\n try:\n self.output_console(\"\\nResolved hostname to: \" + self.scanner.getHostByIp(ip))\n except:\n self.output_console(\"\\nCould not resolve hostname for: \"+ ip)\n \n def stopScan(self):\n self.stop = True\n", "id": "4885351", "language": "Python", "matching_score": 1.7112542390823364, "max_stars_count": 3, "path": "Scanopy/gui.py" }, { "content": "from gui import *\nfrom scanner import *\n\nif __name__ == '__main__':\n scanner = Scanner()\n gui_thread = Gui(scanner)\n gui_thread.run()\n", "id": "1340515", "language": "Python", "matching_score": 0.14642640948295593, "max_stars_count": 3, "path": "Scanopy/scanopy.py" } ]
1.711254
mzs0207
[ { "content": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport datetime\nimport time\n\nfrom jd_assistant import Assistant\n\nif __name__ == '__main__':\n \"\"\"\n 重要提示:此处为示例代码之一,请移步下面的链接查看使用教程👇\n https://github.com/tychxn/jd-assistant/wiki/1.-%E4%BA%AC%E4%B8%9C%E6%8A%A2%E8%B4%AD%E5%8A%A9%E6%89%8B%E7%94%A8%E6%B3%95\n \"\"\"\n\n\n def buy():\n today = '2020-03-07'\n sku_ids = '100006394713' # 商品id\n area = '19_1607_3155' # 区域id\n yue_yue_time = '{0} 15:00:01.5'.format(today) # 预约时间\n buy_time = '{0} 20:00:00.5'.format(today)\n asst = Assistant() # 初始化\n asst.login_by_QRcode() # 扫码登陆\n asst.make_reserve_by_time(sku_ids, yue_yue_time) # 执行预约\n # asst.exec_reserve_seckill_by_time(sku_ids, buy_time, 4, 1.5) # 执行抢购\n asst.exec_seckill_by_time(sku_ids, buy_time, 10, 0.5, 1)\n\n\n buy()\n\n # 6个参数:\n # sku_ids: 商品id。可以设置多个商品,也可以带数量,如:'1234' 或 '1234,5678' 或 '1234:2' 或 '1234:2,5678:3'\n # area: 地区id\n # wait_all: 是否等所有商品都有货才一起下单,可选参数,默认False\n # stock_interval: 查询库存时间间隔,可选参数,默认3秒\n # submit_retry: 提交订单失败后重试次数,可选参数,默认3次\n # submit_interval: 提交订单失败后重试时间间隔,可选参数,默认5秒\n", "id": "329128", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "daishu_qianggou.py" }, { "content": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport datetime\nimport time\n\nfrom jd_assistant import Assistant\n\nif __name__ == '__main__':\n \"\"\"\n 重要提示:此处为示例代码之一,请移步下面的链接查看使用教程👇\n https://github.com/tychxn/jd-assistant/wiki/1.-%E4%BA%AC%E4%B8%9C%E6%8A%A2%E8%B4%AD%E5%8A%A9%E6%89%8B%E7%94%A8%E6%B3%95\n \"\"\"\n\n\n def buy():\n\n sku_ids = '100011521400' # 商品id\n area = '19_1607_3155' # 区域id\n today = datetime.datetime.now()\n tomorow = today + datetime.timedelta(days=1)\n yue_yue_time = '{0} 21:00:01.5'.format(today.strftime(\"%Y-%m-%d\")) # 预约时间\n #buy_time = '{0} 10:00:00.1'.format(tomorow.strftime(\"%Y-%m-%d\"))\n buy_time = '2020-03-11 10:00:00.5'\n asst = Assistant() # 初始化\n asst.login_by_QRcode() # 扫码登陆\n #asst.make_reserve_by_time(sku_ids, yue_yue_time) # 执行预约\n asst.exec_seckill_by_time(sku_ids, buy_time, 10, 0.5, 1) # 执行抢购\n\n\n buy()\n\n # 6个参数:\n # sku_ids: 商品id。可以设置多个商品,也可以带数量,如:'1234' 或 '1234,5678' 或 '1234:2' 或 '1234:2,5678:3'\n # area: 地区id\n # wait_all: 是否等所有商品都有货才一起下单,可选参数,默认False\n # stock_interval: 查询库存时间间隔,可选参数,默认3秒\n # submit_retry: 提交订单失败后重试次数,可选参数,默认3次\n # submit_interval: 提交订单失败后重试时间间隔,可选参数,默认5秒\n", "id": "2798342", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "yuyue_qianggou.py" } ]
0
Fibertree-Project
[ { "content": "import sys\nimport random\nnum_nodes = int(sys.argv[1])\nsparsity = int(sys.argv[2]) # (num_nodes / 100) * 3\n\n# frontier\nwith open(sys.argv[3], 'w') as f:\n i = 0\n for i in range(1, int(num_nodes / sparsity)):\n num = random.randrange(1, num_nodes)\n f.write(\"{}\\n\".format(num))\n", "id": "4457695", "language": "Python", "matching_score": 2.153804302215576, "max_stars_count": 2, "path": "fibertree/codec/gen_frontier.py" }, { "content": "import sys\nimport random\nnum_nodes = int(sys.argv[1])\nsparsity = int(sys.argv[2]) \nnnz = num_nodes * num_nodes \n# mtx\nwith open(sys.argv[3], 'w') as f:\n f.write(\"{} {} {}\\n\".format(num_nodes, num_nodes, nnz))\n for i in range(1, num_nodes):\n for j in range(1, num_nodes):\n num = random.random()\n # if num < er_prob:\n if i % sparsity == 0 and j % sparsity == 0:\n f.write(\"{} {} {}\\n\".format(i, j, 1))\n", "id": "2290142", "language": "Python", "matching_score": 0.24635271728038788, "max_stars_count": 2, "path": "fibertree/codec/gen_unif.py" }, { "content": "import subprocess\nimport sys\n\n# given a frontier and graph, run all U_ formats on the configuration\nfrontier = sys.argv[1]\ngraph = sys.argv[2]\ntop_format = \"U\"\nformats = [\"U\", \"C\", \"H\", \"T\", \"B\"]\nfor i in range(0, len(formats)):\n descriptor = \"U\" + formats[i]\n process = subprocess.Popen(['python3', 'codec-nknk.py', descriptor, frontier, graph, '>', 'out'])\n", "id": "7027268", "language": "Python", "matching_score": 1.7169831991195679, "max_stars_count": 2, "path": "fibertree/codec/meta-python.py" }, { "content": "import subprocess\n\ntop_rank = \"U\"\nlower_ranks = ['U', 'C', 'B', 'H', 'T']\nrefs = ['codec-nknk-ref.py', 'codec-knkn-ref.py']\n\nfor ref in refs:\n for rank in lower_ranks:\n descriptor = top_rank + rank\n process = subprocess.Popen(['python3', ref, descriptor])\n", "id": "3405929", "language": "Python", "matching_score": 0.517588198184967, "max_stars_count": 2, "path": "fibertree/codec/run-all-ref.py" }, { "content": "\"\"\" Tests of fiber operators \"\"\"\n\nimport unittest\n\nfrom fibertree import Fiber\nfrom fibertree import Payload\n\nclass TestFiberOperators(unittest.TestCase):\n \"\"\" Tests of fiber operators \"\"\"\n\n def test_add_int(self):\n \"\"\"Test __add__ integers\"\"\"\n\n f_in = Fiber.fromUncompressed([1, 2, 3, 0, 0, 6])\n f_ref = Fiber.fromUncompressed([3, 4, 5, 2, 2, 8])\n\n with self.subTest(\"f_in + 2\"):\n f_out = f_in + 2\n self.assertEqual(f_ref, f_out)\n\n with self.subTest(\"2 + f_in\"):\n f_out = 2 + f_in\n self.assertEqual(f_ref, f_out)\n\n with self.subTest(\"f_in += 2\"):\n # f_in gets clobbered!\n f_in += 2\n self.assertEqual(f_ref, f_in)\n\n\n def test_add_payload(self):\n \"\"\"Test __add__ payload\"\"\"\n\n f_in = Fiber.fromUncompressed([1, 2, 3, 0, 0, 6])\n f_ref = Fiber.fromUncompressed([3, 4, 5, 2, 2, 8])\n two = Payload(2)\n\n with self.subTest(\"f_in + 2\"):\n f_out = f_in + two\n self.assertEqual(f_ref, f_out)\n\n with self.subTest(\"2 + f_in\"):\n f_out = two + f_in\n self.assertEqual(f_ref, f_out)\n\n with self.subTest(\"f_in += 2\"):\n # f_in gets clobbered!\n f_in += two\n self.assertEqual(f_ref, f_in)\n\n\n def test_add_fiber(self):\n \"\"\"Test __add__ fiber\"\"\"\n\n f_in = Fiber.fromUncompressed([1, 2, 3, 0, 0, 6])\n g_in = Fiber([6, 8], [20, 22])\n fg_ref = Fiber([0, 1, 2, 5, 6, 8], [1, 2, 3, 6, 20, 22])\n\n with self.subTest(\"f+g\"):\n fg_out = f_in + g_in\n self.assertEqual(fg_ref, fg_out)\n\n with self.subTest(\"f+=g\"):\n # f_in gets clobbered!\n f_in += g_in\n self.assertEqual(fg_ref, f_in)\n\n\n def test_mul_int(self):\n \"\"\"Test __mul__ integers\"\"\"\n\n f_in = Fiber.fromUncompressed([1, 2, 3, 0, 0, 6])\n f_ref = Fiber([0, 1, 2, 5], [2, 4, 6, 12])\n\n with self.subTest(\"f_in * 2\"):\n f_out = f_in * 2\n self.assertEqual(f_ref, f_out)\n\n with self.subTest(\"2*f_in\"):\n f_out = 2 * f_in\n self.assertEqual(f_ref, f_out)\n\n with self.subTest(\"f_in *=2\"):\n # f_in gets clobbered!\n f_in *= 2\n self.assertEqual(f_ref, f_in)\n\n def test_mul_payload(self):\n \"\"\"Test __mul__ payload\"\"\"\n\n f_in = Fiber.fromUncompressed([1, 2, 3, 0, 0, 6])\n f_ref = Fiber([0, 1, 2, 5], [2, 4, 6, 12])\n two = Payload(2)\n\n with self.subTest(\"f_in * 2\"):\n f_out = f_in * two\n self.assertEqual(f_ref, f_out)\n\n with self.subTest(\"2*f_in\"):\n f_out = two * f_in\n self.assertEqual(f_ref, f_out)\n\n with self.subTest(\"f_in *=2\"):\n # f_in gets clobbered!\n f_in *= two\n self.assertEqual(f_ref, f_in)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "2200212", "language": "Python", "matching_score": 1.870247483253479, "max_stars_count": 2, "path": "test/test_fiber_operators.py" }, { "content": "import unittest\n\nfrom fibertree import Payload\nfrom fibertree import Fiber\n\nclass TestFiberPrint(unittest.TestCase):\n\n def test_print_1D(self):\n \"\"\"Test str format 1D\"\"\"\n\n c = [2, 4, 6, 8]\n p = [3, 5, 7, 9]\n\n a = Fiber(c, p)\n\n ss = f\"{a:n*}\"\n ss_ref = \"F/[(2 -> <3>) \\n (4 -> <5>) \\n (6 -> <7>) \\n (8 -> <9>) ]\"\n\n self.assertEqual(ss, ss_ref)\n\n sr = f\"{a!r}\"\n sr_ref = \"Fiber([2, 4, 6, 8], [3, 5, 7, 9])\"\n\n self.assertEqual(sr, sr_ref)\n\n\n def test_print_2D_flattened(self):\n \"\"\"Test str format 2D flattened\"\"\"\n\n c = [(2,3), (2,4), (3,1), (8,2)]\n p = [3, 5, 7, 9]\n\n a = Fiber(c, p)\n\n ss = f\"{a:n*}\"\n ss_ref = \"F/[((2, 3) -> <3>) \\n ((2, 4) -> <5>) \\n ((3, 1) -> <7>) \\n ((8, 2) -> <9>) ]\"\n\n self.assertEqual(ss, ss_ref)\n\n sr = f\"{a!r}\"\n sr_ref = \"Fiber([(2, 3), (2, 4), (3, 1), (8, 2)], [3, 5, 7, 9])\"\n self.assertEqual(sr, sr_ref)\n\n\n def test_print_2D(self):\n \"\"\"Test str format 2D\"\"\"\n\n c0 = [2, 4, 6, 8]\n p0 = [3, 5, 7, 9]\n f0 = Fiber(c0, p0)\n\n c1 = [3, 5, 7]\n p1 = [4, 6, 8]\n f1 = Fiber(c1, p1)\n\n c = [2,5]\n\n a = Fiber(c, [f0, f1])\n\n # Plain formating\n\n s = f\"{a}\"\n\n s_ref = \"F/[( 2 -> F/[(2 -> <3>) \" + \\\n \"(4 -> <5>) \" + \\\n \" ... \" + \\\n \" ... ])\" + \\\n \"( 5 -> F/[(3 -> <4>) \" + \\\n \"(5 -> <6>) \" + \\\n \" ... \" + \\\n \" ... ])\"\n\n self.assertEqual(s, s_ref)\n\n # Plain formating, no cutoff\n\n ss = f\"{a:*}\"\n\n ss_ref = \"F/[( 2 -> F/[(2 -> <3>) \" + \\\n \"(4 -> <5>) \" + \\\n \"(6 -> <7>) \" + \\\n \"(8 -> <9>) ])\" + \\\n \"( 5 -> F/[(3 -> <4>) \" + \\\n \"(5 -> <6>) \" + \\\n \"(7 -> <8>) ])\"\n\n self.assertEqual(ss, ss_ref)\n\n\n # Format with newline and cutoff\n\n sn = f\"{a:n}\"\n\n sn_ref = \"F/[( 2 -> F/[(2 -> <3>) \\n\" + \\\n \" (4 -> <5>) \\n\" + \\\n \" ... \\n\" + \\\n \" ... ])\\n\" + \\\n \" ( 5 -> F/[(3 -> <4>) \\n\" + \\\n \" (5 -> <6>) \\n\" + \\\n \" ... \\n\" + \\\n \" ... ])\"\n\n self.assertEqual(sn, sn_ref)\n \n # Format with newline and no cutoff\n\n sns = f\"{a:n*}\"\n\n sns_ref = \"F/[( 2 -> F/[(2 -> <3>) \\n\" + \\\n \" (4 -> <5>) \\n\" + \\\n \" (6 -> <7>) \\n\" + \\\n \" (8 -> <9>) ])\\n\" + \\\n \" ( 5 -> F/[(3 -> <4>) \\n\" + \\\n \" (5 -> <6>) \\n\" + \\\n \" (7 -> <8>) ])\"\n\n self.assertEqual(sns, sns_ref)\n\n # Format coord and payload and with newline and no cutoff\n\n snscp = f\"{a:(02,03)n*}\"\n\n snscp_ref = \"F/[( 02 -> F/[(02 -> <003>) \\n\" + \\\n \" (04 -> <005>) \\n\" + \\\n \" (06 -> <007>) \\n\" + \\\n \" (08 -> <009>) ])\\n\" + \\\n \" ( 05 -> F/[(03 -> <004>) \\n\" + \\\n \" (05 -> <006>) \\n\" + \\\n \" (07 -> <008>) ])\"\n\n self.assertEqual(snscp, snscp_ref)\n\n sr = f\"{a!r}\"\n\n sr_ref = \"Fiber([2, 5], [Fiber([2, 4, 6, 8], [3, 5, 7, 9]), Fiber([3, 5, 7], [4, 6, 8])])\"\n self.assertEqual(sr, sr_ref)\n\n\n def test_print_3D_flattened(self):\n \"\"\"Test str format 3D flattened\"\"\"\n\n c0 = [2, 4, 6, 8]\n p0 = [3, 5, 7, 9]\n f0 = Fiber(c0, p0)\n\n c1 = [3, 5, 7]\n p1 = [4, 6, 8]\n f1 = Fiber(c1, p1)\n\n c = [(0, 2), (1, 5)]\n\n a = Fiber(c, [f0, f1])\n\n ss = f\"{a:n*}\"\n\n ss_ref = \"F/[( (0, 2) -> F/[(2 -> <3>) \\n\" + \\\n \" (4 -> <5>) \\n\" + \\\n \" (6 -> <7>) \\n\" + \\\n \" (8 -> <9>) ])\\n\" + \\\n \" ( (1, 5) -> F/[(3 -> <4>) \\n\" + \\\n \" (5 -> <6>) \\n\" + \\\n \" (7 -> <8>) ])\"\n\n self.assertEqual(ss, ss_ref)\n\n sr = f\"{a!r}\"\n\n sr_ref = \"Fiber([(0, 2), (1, 5)], [Fiber([2, 4, 6, 8], [3, 5, 7, 9]), Fiber([3, 5, 7], [4, 6, 8])])\"\n self.assertEqual(sr, sr_ref)\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "12324259", "language": "Python", "matching_score": 1.473057746887207, "max_stars_count": 2, "path": "test/test_fiber_print.py" }, { "content": "import unittest\n\nfrom fibertree import Fiber, Payload, CoordPayload\n\nclass TestPayloadCoord(unittest.TestCase):\n\n def test_new(self):\n\n cp = CoordPayload(1, 10)\n\n self.assertEqual(cp.coord, 1)\n self.assertEqual(cp.payload, 10)\n\n def test_iter(self):\n\n cp_ref = [1, 10]\n\n cp = CoordPayload(1, 10)\n\n for x, x_ref in zip(cp, cp_ref):\n self.assertEqual(x, x_ref)\n\n def test_getitem_2D(self):\n\n b0 = Fiber([1, 4, 7], [2, 5, 8])\n b1 = Fiber([2, 4, 6], [3, 5, 7])\n a0 = Fiber([2, 4], [b0, b1])\n\n self.assertEqual(a0[1][1], 5)\n \n\n def test_setitem_2D(self):\n\n b0 = Fiber([1, 4, 7], [2, 5, 8])\n b1 = Fiber([2, 4, 6], [3, 5, 7])\n a0 = Fiber([2, 4], [b0, b1])\n\n a0[1][1] = 10\n\n self.assertEqual(a0[1][1], 10)\n\n\n def test_add(self):\n\n cp1 = CoordPayload(5, 11)\n cp2 = CoordPayload(6, 12)\n\n payload_ref = Payload(23)\n \n self.assertEqual(cp1+12, payload_ref)\n self.assertEqual(cp1+cp2, payload_ref)\n\n self.assertEqual(12+cp1, payload_ref)\n\n cp1 += cp2\n self.assertEqual(cp1, payload_ref)\n\n cp2 += 11\n self.assertEqual(cp2, payload_ref)\n\n def test_sub(self):\n\n cp1 = CoordPayload(5, 12)\n cp2 = CoordPayload(6, 10)\n\n payload_ref = Payload(2)\n \n self.assertEqual(cp1-10, payload_ref)\n self.assertEqual(cp1-cp2, payload_ref)\n\n self.assertEqual(14-cp1, payload_ref)\n\n cp1 -= cp2\n self.assertEqual(cp1, payload_ref)\n\n cp2 -= 8\n self.assertEqual(cp2, payload_ref)\n \n def test_multiply(self):\n\n cp1 = CoordPayload(5, 4)\n cp2 = CoordPayload(6, 5)\n\n payload_ref = Payload(20)\n \n self.assertEqual(cp1*5, payload_ref)\n self.assertEqual(cp1*cp2, payload_ref)\n\n self.assertEqual(5*cp1, payload_ref)\n\n cp1 *= cp2\n self.assertEqual(cp1, payload_ref)\n\n cp2 *= 4\n self.assertEqual(cp2, payload_ref)\n\n#\n# Not working\n#\n# def test_div(self):\n#\n# cp1 = CoordPayload(5, 20)\n# cp2 = CoordPayload(6, 4)\n#\n# payload_ref = Payload(5)\n# \n# self.assertEqual(cp1/4.0, payload_ref)\n# self.assertEqual(cp1/cp2, payload_ref)\n#\n# self.assertEqual(20/cp2, payload_ref)\n#\n# cp1 /= cp2\n# self.assertEqual(cp1, payload_ref)\n#\n# cp2 /= 4\n# self.assertEqual(cp2, Payload(1))\n\n def test_comparisons(self):\n\n cp1 = CoordPayload(5, 4)\n cp2 = CoordPayload(6, 5)\n cp1a = CoordPayload(5, 4)\n\n self.assertTrue(cp1 == cp1a)\n self.assertTrue(cp1 == 4)\n\n self.assertFalse(cp1 == cp2)\n self.assertFalse(cp1 == 5)\n \n self.assertTrue(cp1 < cp2)\n self.assertTrue(cp1 < 5)\n\n self.assertFalse(cp2 < cp1)\n self.assertFalse(cp2 < 5)\n\n self.assertTrue(cp1 <= cp1a)\n self.assertTrue(cp1 <= cp2)\n self.assertTrue(cp1 <= 4)\n self.assertTrue(cp1 <= 5) \n\n self.assertTrue(cp2 > cp1)\n self.assertTrue(cp2 > 4)\n\n self.assertTrue(cp1 >= cp1a)\n self.assertTrue(cp2 >= cp1)\n self.assertTrue(cp1 >= 4)\n self.assertTrue(cp1 >= 3) \n\n self.assertTrue(cp1 != cp2)\n self.assertTrue(cp1 != 5)\n\n self.assertFalse(cp1 != cp1)\n self.assertFalse(cp1 != 4)\n", "id": "12734538", "language": "Python", "matching_score": 3.0096678733825684, "max_stars_count": 2, "path": "test/test_payload_coord.py" }, { "content": "import unittest\n\nfrom fibertree import Payload\n\nclass TestPayload(unittest.TestCase):\n\n def test_new(self):\n av = 1\n bv = 1\n\n a = Payload(av)\n self.assertEqual(a.value, av)\n \n b = Payload(bv)\n self.assertEqual(b.value, bv)\n\n def test_plus(self):\n av = 1\n bv = 1\n\n a = Payload(av)\n b = Payload(bv)\n\n a_plus_1 = a + 1\n self.assertEqual(a_plus_1.value, av+1)\n\n a_plus_b = a + b\n self.assertEqual(a_plus_b.value, av+bv)\n\n a += 1\n self.assertEqual(a.value, av+1)\n \n a += b\n self.assertEqual(a.value, av+1+bv)\n\n def test_minus(self):\n av = 1\n bv = 1\n\n a = Payload(av)\n b = Payload(bv)\n\n a_sub_1 = a - 1\n self.assertEqual(a_sub_1.value, av-1)\n \n a_sub_b = a - b\n self.assertEqual(a_sub_b.value, av-bv)\n\n def test_multiply(self):\n av = 1\n bv = 2\n\n a = Payload(av)\n b = Payload(bv)\n\n\n a_mul_2 = a * 2\n self.assertEqual(a_mul_2.value, av*2)\n\n two_mul_a = 2 * a\n self.assertEqual(two_mul_a.value, 2*av)\n\n a_mul_b = a * b\n self.assertEqual(a_mul_b.value, av*bv)\n\n a *= 2\n self.assertEqual(a.value, av*2)\n\n a *= b\n self.assertEqual(a.value, av*2*b)\n\n def test_equality(self):\n cv = 8\n dv = 8\n ev = 1\n \n c = Payload(cv)\n d = Payload(dv)\n e = Payload(ev)\n \n self.assertTrue(c == d)\n self.assertFalse(c == e)\n\n self.assertFalse(c != d)\n self.assertTrue(c != e)\n\n self.assertTrue(c == 8)\n self.assertFalse(c == 1)\n self.assertFalse(c != 8)\n self.assertTrue(c != 1)\n\n self.assertTrue(8 == c)\n self.assertFalse(1 == c)\n self.assertFalse(8 != c)\n self.assertTrue(1 != c)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "7991319", "language": "Python", "matching_score": 0.45693644881248474, "max_stars_count": 2, "path": "test/test_payload.py" }, { "content": "import unittest\n\nfrom copy import deepcopy\n\nfrom fibertree import Payload\nfrom fibertree import Fiber\n\nfrom fibertree import TensorImage\n\nclass TestFiberMutator(unittest.TestCase):\n\n def test_swapRanks_empty(self):\n \"\"\"Test that swapRanks raises an error if the fiber is empty\"\"\"\n z_m = Fiber()\n\n with self.assertRaises(AssertionError):\n z_m.swapRanks()\n\n def test_split_uniform_below(self):\n \"\"\"Test splitUniformBelow\"\"\"\n\n c0 = [0, 1, 9, 10, 12, 31, 41]\n p0 = [ 0, 10, 20, 100, 120, 310, 410 ]\n f0 = Fiber(c0, p0)\n\n c1 = [1, 2, 10, 11, 13, 32, 42]\n p1 = [ 1, 11, 21, 101, 121, 311, 411 ]\n f1 = Fiber(c1, p1)\n\n c = [2, 4]\n f = Fiber(c, [f0, f1])\n\n f.splitUniformBelow(10, depth=0)\n\n f0_split = f0.splitUniform(10)\n f1_split = f1.splitUniform(10)\n\n f_ref = Fiber(c, [f0_split, f1_split])\n\n self.assertEqual(f, f_ref)\n\n def test_split_nonuniform_below(self):\n \"\"\"Test splitNonUniformBelow\"\"\"\n\n c0 = [0, 1, 9, 10, 12, 31, 41]\n p0 = [ 0, 10, 20, 100, 120, 310, 410 ]\n f0 = Fiber(c0, p0)\n\n c1 = [1, 2, 10, 11, 13, 32, 42]\n p1 = [ 1, 11, 21, 101, 121, 311, 411 ]\n f1 = Fiber(c1, p1)\n\n c = [2, 4]\n f = Fiber(c, [f0, f1])\n\n f.splitNonUniformBelow([0, 20,45, 50], depth=0)\n\n f0_split = f0.splitNonUniform([0, 20, 45, 50])\n f1_split = f1.splitNonUniform([0, 20, 45, 50])\n\n f_ref = Fiber(c, [f0_split, f1_split])\n\n self.assertEqual(f, f_ref)\n\n\n def test_split_equal_below(self):\n \"\"\"Test splitEqualBelow\"\"\"\n\n c0 = [0, 1, 9, 10, 12, 31, 41]\n p0 = [ 0, 10, 20, 100, 120, 310, 410 ]\n f0 = Fiber(c0, p0)\n\n c1 = [1, 2, 10, 11, 13, 32, 42]\n p1 = [ 1, 11, 21, 101, 121, 311, 411 ]\n f1 = Fiber(c1, p1)\n\n c = [2, 4]\n f = Fiber(c, [f0, f1])\n\n f.splitEqualBelow(4, depth=0)\n\n f0_split = f0.splitEqual(4)\n f1_split = f1.splitEqual(4)\n\n f_ref = Fiber(c, [f0_split, f1_split])\n\n self.assertEqual(f, f_ref)\n\n\n def test_split_unequal_below(self):\n \"\"\"Test splitUnEqualBelow\"\"\"\n\n c0 = [0, 1, 9, 10, 12, 31, 41]\n p0 = [ 0, 10, 20, 100, 120, 310, 410 ]\n f0 = Fiber(c0, p0)\n\n c1 = [1, 2, 10, 11, 13, 32, 42]\n p1 = [ 1, 11, 21, 101, 121, 311, 411 ]\n f1 = Fiber(c1, p1)\n\n c = [2, 4]\n f = Fiber(c, [f0, f1])\n\n f.splitUnEqualBelow([3, 3, 1], depth=0)\n\n f0_split = f0.splitUnEqual([3, 3, 1])\n f1_split = f1.splitUnEqual([3, 3, 1])\n\n f_ref = Fiber(c, [f0_split, f1_split])\n\n self.assertEqual(f, f_ref)\n\n\n def test_flatten_below(self):\n \"\"\"Test {,un}flattenRanksBelow\"\"\"\n\n c0 = [0, 1, 9, 10, 12, 31, 41]\n p0 = [ 0, 10, 20, 100, 120, 310, 410 ]\n f0 = Fiber(c0, p0)\n\n c1 = [1, 2, 10, 11, 13, 32, 42]\n p1 = [ 1, 11, 21, 101, 121, 311, 411 ]\n f1 = Fiber(c1, p1)\n\n c = [2, 4]\n f = Fiber(c, [f0, f1])\n\n # This just creates another level...\n f.splitUnEqualBelow([3, 3, 1], depth=0)\n f_ref = deepcopy(f)\n\n # Flattening and unflattening should do nothing\n f.flattenRanksBelow()\n f.unflattenRanksBelow()\n\n self.assertEqual(f, f_ref)\n\n\n def test_swap_below(self):\n \"\"\"Test swapRanksBelow\"\"\"\n\n c0 = [0, 1, 9, 10, 12, 31, 41]\n p0 = [ 0, 10, 20, 100, 120, 310, 410 ]\n f0 = Fiber(c0, p0)\n\n c1 = [1, 2, 10, 11, 13, 32, 42]\n p1 = [ 1, 11, 21, 101, 121, 311, 411 ]\n f1 = Fiber(c1, p1)\n\n c = [2, 4]\n f = Fiber(c, [f0, f1])\n\n # This just creates another level...\n f.splitUnEqualBelow([3, 3, 1], depth=0)\n f_ref = deepcopy(f)\n\n # Swapping twice should do nothing\n f.swapRanksBelow()\n f.swapRanksBelow()\n\n self.assertEqual(f, f_ref)\n\n\n def test_split_equal_below_deep(self):\n \"\"\"Test splitEqualBelow with depth=1\"\"\"\n\n c0 = [0, 1, 9, 10, 12, 31, 41]\n p0 = [ 0, 10, 20, 100, 120, 310, 410 ]\n f0 = Fiber(c0, p0)\n\n c1 = [1, 2, 10, 11, 13, 32, 42]\n p1 = [ 1, 11, 21, 101, 121, 311, 411 ]\n f1 = Fiber(c1, p1)\n\n c = [2, 4]\n f = Fiber(c, [f0, f1])\n\n # This just creates another level...\n f.splitUnEqualBelow([3, 3, 1], depth=0)\n f_ref = deepcopy(f)\n\n f.splitEqualBelow(2, depth=1)\n\n for fc, fp in f_ref:\n fp.splitEqualBelow(2)\n\n self.assertEqual(f, f_ref)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "6282395", "language": "Python", "matching_score": 3.4658679962158203, "max_stars_count": 2, "path": "test/test_fiber_mutator.py" }, { "content": "import unittest\nfrom fibertree import Payload\nfrom fibertree import Fiber\n\nfrom fibertree import TensorImage\n\nclass TestFiberSplit(unittest.TestCase):\n\n def test_split_uniform_empty(self):\n \"\"\"Test splitUniform on empty fiber\"\"\"\n empty = Fiber()\n split = empty.splitUniform(5)\n\n # After we split, we need to make sure that we have actually added\n # another level to the empty fiber\n self.assertIsInstance(split.getDefault(), Fiber)\n\n def test_split_uniform(self):\n \"\"\"Test splitUniform\"\"\"\n\n #\n # Create the fiber to be split\n #\n c = [0, 1, 9, 10, 12, 31, 41]\n p = [ 0, 10, 20, 100, 120, 310, 410 ]\n\n f = Fiber(c,p)\n\n #\n # Create list of reference fibers after the split\n #\n split_ref_coords = [0, 10, 30, 40 ]\n\n css = [ [ 0, 1, 9 ],\n [ 10, 12 ],\n [ 31 ],\n [ 41 ] ]\n\n pss = [ [ 0, 10, 20 ],\n [ 100, 120 ],\n [ 310 ],\n [ 410 ] ]\n\n split_ref_payloads = []\n\n for (cs, ps) in zip(css, pss):\n split_ref_payloads.append(Fiber(cs, ps))\n\n #\n # Do the split\n #\n coords = 10\n split = f.splitUniform(coords)\n\n #\n # Check the split\n #\n for i, (sc, sp) in enumerate(split):\n self.assertEqual(sc, split_ref_coords[i])\n self.assertEqual(sp, split_ref_payloads[i])\n\n\n def test_split_uniform_then_flatten(self):\n \"\"\"Test that flattenRanks() can undo splitUniform\"\"\"\n\n #\n # Create the fiber to be split\n #\n c = [0, 1, 9, 10, 12, 31, 41]\n p = [ 0, 10, 20, 100, 120, 310, 410 ]\n\n f = Fiber(c,p)\n\n #\n # Do the split\n #\n coords = 10\n split = f.splitUniform(coords)\n\n #\n # Check that flattening after splitting gives us the same answer\n #\n self.assertEqual(split.flattenRanks(style=\"absolute\"), f)\n\n\n def test_split_uniform_relative(self):\n \"\"\"Test splitUniform\"\"\"\n\n #\n # Create the fiber to be split\n #\n c = [0, 1, 9, 10, 12, 31, 41]\n p = [ 0, 10, 20, 100, 120, 310, 410 ]\n\n f = Fiber(c,p)\n\n #\n # Create list of reference fibers after the split\n #\n split_ref_coords = [0, 10, 30, 40 ]\n\n css = [ [ 0, 1, 9 ],\n [ 0, 2 ],\n [ 1 ],\n [ 1 ] ]\n\n pss = [ [ 0, 10, 20 ],\n [ 100, 120 ],\n [ 310 ],\n [ 410 ] ]\n\n split_ref_payloads = []\n\n for (cs, ps) in zip(css, pss):\n split_ref_payloads.append(Fiber(cs, ps))\n\n #\n # Do the split\n #\n coords = 10\n split = f.splitUniform(coords, relativeCoords=True)\n\n #\n # Check the split\n #\n for i, (sc, sp) in enumerate(split):\n self.assertEqual(sc, split_ref_coords[i])\n self.assertEqual(sp, split_ref_payloads[i])\n\n\n def test_split_uniform_relative_then_flatten(self):\n \"\"\"Test that flattenRanks can undo splitUniform (relative)\"\"\"\n\n #\n # Create the fiber to be split\n #\n c = [0, 1, 9, 10, 12, 31, 41]\n p = [ 0, 10, 20, 100, 120, 310, 410 ]\n\n f = Fiber(c,p)\n\n #\n # Do the split\n #\n coords = 10\n split = f.splitUniform(coords, relativeCoords=True)\n\n #\n # Check the split\n #\n self.assertEqual(split.flattenRanks(style=\"relative\"), f)\n\n def test_split_nonuniform_empty(self):\n \"\"\"Test splitNonUniform on empty fiber\"\"\"\n empty = Fiber()\n split = empty.splitNonUniform([1, 5, 17])\n\n # After we split, we need to make sure that we have actually added\n # another level to the empty fiber\n self.assertIsInstance(split.getDefault(), Fiber)\n\n def test_split_nonuniform1(self):\n \"\"\"Test splitNonUniform - starting at coordinate 0\"\"\"\n\n #\n # Create the fiber to be split\n #\n c = [0, 1, 9, 10, 12, 31, 41]\n p = [ 0, 10, 20, 100, 120, 310, 410 ]\n\n f = Fiber(c,p)\n\n #\n # Create list of reference fibers after the split\n #\n css = [ [ 0, 1, 9, 10 ],\n [ 12 ],\n [ 31, 41 ] ]\n\n pss = [ [ 0, 10, 20, 100 ],\n [ 120 ],\n [ 310, 410 ] ]\n\n split_ref = []\n\n for (cs, ps) in zip(css, pss):\n split_ref.append(Fiber(cs, ps))\n\n #\n # Do the split\n #\n splits = [0, 12, 31]\n split = f.splitNonUniform(splits)\n\n #\n # Check the split\n #\n for i, (sc, sp) in enumerate(split):\n self.assertEqual(sc, splits[i])\n self.assertEqual(sp, split_ref[i])\n\n def test_split_nonuniform2(self):\n \"\"\"Test splitNonUniform - not starting at coordinate 0\"\"\"\n\n #\n # Create the fiber to be split\n #\n c = [0, 1, 9, 10, 12, 31, 41]\n p = [ 0, 10, 20, 100, 120, 310, 410 ]\n\n f = Fiber(c,p)\n\n #\n # Create list of reference fibers after the split\n #\n css = [ [ 9, 10 ],\n [ 12 ],\n [ 31, 41 ] ]\n\n pss = [ [ 20, 100 ],\n [ 120 ],\n [ 310, 410 ] ]\n\n split_ref = []\n\n for (cs, ps) in zip(css, pss):\n split_ref.append(Fiber(cs, ps))\n\n #\n # Do the split\n #\n splits = [8, 12, 31]\n split = f.splitNonUniform(splits)\n\n #\n # Check the split\n #\n for i, (sc, sp) in enumerate(split):\n self.assertEqual(sc, splits[i])\n self.assertEqual(sp, split_ref[i])\n\n def test_split_nonuniform_then_flatten(self):\n \"\"\"Test that flattenRanks can undo splitNonUniform\"\"\"\n\n #\n # Create the fiber to be split\n #\n c = [0, 1, 9, 10, 12, 31, 41]\n p = [ 0, 10, 20, 100, 120, 310, 410 ]\n\n f = Fiber(c,p)\n\n #\n # Do the split\n #\n splits = [0, 12, 31]\n split = f.splitNonUniform(splits)\n\n #\n # Check the split\n #\n self.assertEqual(split.flattenRanks(style=\"absolute\"), f)\n\n def test_split_equal_empty(self):\n \"\"\"Test splitEqual on empty fiber\"\"\"\n empty = Fiber()\n split = empty.splitEqual(3)\n\n # After we split, we need to make sure that we have actually added\n # another level to the empty fiber\n self.assertIsInstance(split.getDefault(), Fiber)\n\n\n def test_split_equal(self):\n \"\"\"Test splitEqual\"\"\"\n\n #\n # Create the fiber to be split\n #\n c = [0, 1, 9, 10, 12, 31, 41]\n p = [ 0, 10, 20, 100, 120, 310, 410 ]\n\n f = Fiber(c,p)\n\n #\n # Create list of reference fibers after the split\n #\n css = [ [0, 1 ],\n [9, 10 ],\n [12, 31 ],\n [41 ] ]\n\n pss = [ [0, 10 ],\n [20, 100 ],\n [120, 310 ],\n [410 ] ]\n\n split_ref = []\n\n for (cs, ps) in zip(css, pss):\n split_ref.append(Fiber(cs, ps))\n\n #\n # Do the split\n #\n size = 2\n split = f.splitEqual(size)\n\n #\n # Check the split\n #\n for i, (sc, sp) in enumerate(split):\n self.assertEqual(sc, css[i][0])\n self.assertEqual(sp, split_ref[i])\n\n def test_split_equal_then_flatten(self):\n \"\"\"Test that flattenRanks can undo splitEqual\"\"\"\n\n #\n # Create the fiber to be split\n #\n c = [0, 1, 9, 10, 12, 31, 41]\n p = [ 0, 10, 20, 100, 120, 310, 410 ]\n\n f = Fiber(c,p)\n\n #\n # Do the split\n #\n size = 2\n split = f.splitEqual(size)\n\n #\n # Check the split\n #\n self.assertEqual(split.flattenRanks(style=\"absolute\"), f)\n\n def test_split_unequal_empty(self):\n \"\"\"Test splitUnEqual on empty fiber\"\"\"\n empty = Fiber()\n split = empty.splitUnEqual([1, 5, 17])\n\n # After we split, we need to make sure that we have actually added\n # another level to the empty fiber\n self.assertIsInstance(split.getDefault(), Fiber)\n\n\n def test_split_unequal(self):\n \"\"\"Test splitUnequal\"\"\"\n\n #\n # Create the fiber to be split\n #\n c = [0, 1, 9, 10, 12, 31, 41]\n p = [ 0, 10, 20, 100, 120, 310, 410 ]\n\n f = Fiber(c,p)\n\n #\n # Create list of reference fibers after the split\n #\n css = [ [0],\n [1, 9],\n [10, 12, 31, 41] ]\n\n pss = [ [0],\n [10, 20],\n [ 100, 120, 310, 410 ] ]\n\n split_ref = []\n\n for (cs, ps) in zip(css, pss):\n split_ref.append(Fiber(cs, ps))\n\n #\n # Do the split\n #\n sizes = [1, 2, 4]\n split = f.splitUnEqual(sizes)\n\n #\n # Check the split\n #\n for i, (sc, sp) in enumerate(split):\n self.assertEqual(sc, css[i][0])\n self.assertEqual(sp, split_ref[i])\n\n\n def test_split_unequal_then_flatten(self):\n \"\"\"Test that flattenRanks can undo splitUnequal\"\"\"\n\n #\n # Create the fiber to be split\n #\n c = [0, 1, 9, 10, 12, 31, 41]\n p = [ 0, 10, 20, 100, 120, 310, 410 ]\n\n f = Fiber(c,p)\n\n #\n # Do the split\n #\n sizes = [1, 2, 4]\n split = f.splitUnEqual(sizes)\n\n #\n # Check the split\n #\n self.assertEqual(split.flattenRanks(style=\"absolute\"), f)\n\n\n def test_split_equal_partioned(self):\n \"\"\"Test splitEqual(2, partitions=2)\"\"\"\n\n #\n # Create the fiber to be split\n #\n c = [0, 1, 9, 10, 12, 31, 41]\n p = [ 0, 10, 20, 100, 120, 310, 410 ]\n\n f = Fiber(c,p)\n\n #\n # Create list of reference fibers after the split\n #\n a_coords = [0, 12]\n a1 = Fiber([0, 1], [0, 10])\n a2 = Fiber([12, 31], [120, 310])\n a = Fiber(coords=a_coords, payloads=[a1, a2])\n\n b_coords = [9, 41]\n b1 = Fiber([9, 10], [20, 100])\n b2 = Fiber([41], [410])\n b = Fiber(coords=b_coords, payloads=[b1, b2])\n\n split_ref = Fiber(payloads=[a, b])\n\n #\n # Do the split\n #\n size = 2\n split = f.splitEqual(size, partitions=2)\n\n #\n # Check the split\n #\n self.assertEqual(split, split_ref)\n\n @staticmethod\n def _make_fiber_a():\n\n f = Fiber([0, 1, 2, 10, 12, 31, 41], [ 0, 10, 20, 100, 120, 310, 410 ])\n return f\n\n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "10299179", "language": "Python", "matching_score": 2.6242547035217285, "max_stars_count": 2, "path": "test/test_fiber_split.py" }, { "content": "from fibertree import Fiber\n\nf = Fiber([0, 1, 2, 10, 12, 31, 41], [ 0, 10, 20, 100, 120, 310, 410 ])\n\nprint(\"Original fiber\\n\")\nf.print()\n\n#\n# Unform coordiate-based split\n#\ncoords = 10\nprint(\"Uniform coordinate split (groups of %s coordinates)\\n\" % coords)\n\nfibers = f.splitUniform(coords)\n\nfor c,s in fibers:\n s.print()\n\n#\n# Non-unform coordiate-based split\n#\nsplits = [0, 12, 31]\nprint(f\"NonUniform coordinate split (splits at {splits})\\n\")\n\nfibers = f.splitNonUniform(splits)\n\nfor c,s in fibers:\n s.print()\n\n#\n# Equal position-based split\n#\nsize = 2\nprint(f\"Equal position split (groups of {size})\\n\")\n\nfibers = f.splitEqual(size)\n\nfor c,s in fibers:\n s.print()\n\n\nsizes = [1, 2, 4]\nprint(f\"NonEqual position split (splits of sizes {sizes})\\n\")\n\nfibers = f.splitUnEqual(sizes)\n\nfor c,s in fibers:\n s.print()\n\n#\n# Create multiple partitions\n#\n\n#\n# Equal position-based split\n#\nsize = 2\nprint(f\"Two partitions with equal position split (groups of {size})\\n\")\n\nfibers = f.splitEqual(size, partitions=2)\n\nfor c,s in fibers:\n s.print()\n\n", "id": "4019106", "language": "Python", "matching_score": 0.45147833228111267, "max_stars_count": 2, "path": "examples/scripts/methods/split.py" }, { "content": "import yaml\nfrom fibertree import Tensor\nimport sys\nimport time\nfrom fibertree import Codec\n\n# matrix market converter\ndef mm_to_hfa_yaml(infilename, tensor_name, rank_ids, outfilename):\n # read mm into csr\n shape_m = 0\n shape_n = 0\n nnz = 0\n csr = None\n m_coords = list()\n with open(infilename, 'r') as infile:\n for line in infile:\n if line.startswith(\"%\"):\n continue\n parts = line.split(\" \")\n elt_1 = int(parts[0])\n elt_2 = int(parts[1])\n elt_3 = int(parts[2])\n if shape_m == 0: # not filled in yet\n shape_m = elt_1\n shape_n = elt_2\n nnz = elt_3\n csr = list()\n for i in range(0, shape_m):\n csr.append([list(), list()])\n else:\n # 0 indexing\n elt_1 -= 1\n elt_2 -= 1\n \n assert elt_1 < shape_m\n # struct of arrays for (coord, payload)\n if len(m_coords) == 0 or m_coords[-1] != elt_1:\n m_coords.append(elt_1)\n \n csr[elt_1][0].append(elt_2)\n csr[elt_1][1].append(elt_3)\n\n print(\"finished reading in CSR\")\n # write to YAML (manually? since fiber is not a unique key)\n twospace = \" \"\n fourspace = twospace * 2\n sixspace = fourspace + twospace\n eightspace = fourspace * 2\n tenspace = eightspace + twospace\n twelvespace = sixspace * 2\n fourteenspace = twelvespace + twospace\n with open(outfilename, 'w') as outfile:\n outfile.write(\"tensor:\\n\")\n outfile.write(twospace + \"name: {}\\n\".format(tensor_name))\n outfile.write(twospace + \"rank_ids: {}\\n\".format(rank_ids))\n outfile.write(twospace + \"shape: {}\\n\".format([shape_m, shape_n]))\n outfile.write(twospace + \"root:\\n\")\n outfile.write(fourspace + \"- fiber:\\n\")\n outfile.write(eightspace + \"coords: {}\\n\".format(m_coords))\n outfile.write(eightspace + \"payloads:\\n\")\n for i in range(0, len(m_coords)):\n outfile.write(tenspace + \"- fiber:\\n\")\n outfile.write(fourteenspace + \"coords: {}\\n\".format(csr[m_coords[i]][0]))\n outfile.write(fourteenspace + \"payloads: {}\\n\".format(csr[m_coords[i]][1]))\n print(\"finished writing out YAML\")\n\ndef preproc_mtx_dsds():\n tensor_name = sys.argv[1]\n infilename = sys.argv[2]\n outfilename = sys.argv[3]\n outdir = sys.argv[4]\n splits = sys.argv[5].split(',') # get tilings\n\n # matrix market to the YAML that HFA reads\n mm_to_hfa_yaml(infilename, tensor_name, ['S', 'D'], outdir + outfilename)\n t0 = time.clock()\n # test reading the yaml into HFA\n a_sd = Tensor.fromYAMLfile(outdir + outfilename)\n t1 = time.clock() - t0\n print(\"time to read into HFA: {}\".format(t1)) # cpu seconds\n a_sd.dump(outdir +\"sd_\" + outfilename)\n \n # swap (S, D) to (D, S)\n t0 = time.clock()\n a_ds = a_sd.swapRanks()\n t1 = time.clock() - t0\n print(\"time to swap S, D in HFA: {}\".format(t1)) # cpu seconds\n a_ds.dump(outdir +\"ds_\" + outfilename)\n\n # split D\n t0 = time.clock()\n a_dds = a_ds.splitUniform(int(splits[0])) # split D\n t1 = time.clock() - t0\n print(\"time to splitUniform DS on D {}\".format(t1)) # cpu seconds\n a_dds.dump(outdir +\"dds_\" + outfilename)\n\n # split S\n t0 = time.clock()\n a_ddss = a_dds.splitUniform(int(splits[1]), depth=2)\n t1 = time.clock() - t0\n print(\"time to splitUniform DS on S: {}\".format(t1)) # cpu seconds \n a_ddss.dump(outdir +\"ddss_\" + outfilename)\n\n # DDSS -> DSDS\n t0 = time.clock()\n a_dsds = a_ddss.swapRanks(depth=1)\n t1 = time.clock() - t0\n print(\"time to swap intermediate D, S in HFA: {}\".format(t1)) # cpu seconds\n a_dsds.dump(outdir +\"dsds_\" + outfilename)\n\ndef preproc_mtx_sdsd():\n tensor_name = sys.argv[1]\n \n # input in matrix market\n infilename = sys.argv[2]\n \n # output file suffix (.yaml)\n outfilename = sys.argv[3]\n outdir = sys.argv[4]\n\n splits = sys.argv[5].split(',') # get tilings\n \n # matrix market to the YAML that HFA reads\n mm_to_hfa_yaml(infilename, tensor_name, ['S', 'D'], outdir + outfilename)\n t0 = time.clock()\n # test reading the yaml into HFA\n a_sd = Tensor.fromYAMLfile(outdir + outfilename)\n t1 = time.clock() - t0\n print(\"time to read into HFA: {}\".format(t1)) # cpu seconds\n a_sd.dump(outdir +\"sd_\" + outfilename)\n \n # split S\n t0 = time.clock()\n a_ssd = a_sd.splitUniform(int(splits[0]), relativeCoords=False)\n t1 = time.clock() - t0\n print(\"time to splitUniform SD on S: {}\".format(t1)) # cpu seconds \n a_ssd.dump(outdir +\"ssd_\" + outfilename)\n \n # split D\n t0 = time.clock()\n a_ssdd = a_ssd.splitUniform(int(splits[1]), depth=2, relativeCoords=False) # split D\n t1 = time.clock() - t0\n print(\"time to splitUniform SSD on D {}\".format(t1)) # cpu seconds\n a_ssdd.dump(outdir +\"ssdd_\" + outfilename)\n\n # SSDD -> SDSD\n t0 = time.clock()\n a_sdsd = a_ssdd.swapRanks(depth=1)\n t1 = time.clock() - t0\n print(\"time to swap intermediate D, S in HFA: {}\".format(t1)) # cpu seconds\n a_sdsd.dump(outdir +\"sdsd_\" + outfilename)\n\nif __name__ == \"__main__\":\n preproc_mtx_sdsd()\n preproc_mtx_dsds()\n", "id": "11473381", "language": "Python", "matching_score": 1.2746102809906006, "max_stars_count": 2, "path": "fibertree/codec/tiling_preproc.py" }, { "content": "import os\n\nfrom fibertree import Tensor\nfrom fibertree import Payload\n\nprint(\"----------------------------------------\")\nprint(\" BFS graph traversal\")\nprint(\"----------------------------------------\")\nprint(\"\")\n\ndata_dir = \"../../data\"\n\n#a = Tensor.fromYAMLfile(os.path.join(data_dir, \"graph-a.yaml\"))\n\n# Adjacency matrix\na = Tensor.fromUncompressed([ \"S\", \"D\"],\n [ [ 0, 1, 1, 0, 0, 0 ],\n [ 0, 0, 1, 1, 0, 0 ],\n [ 0, 0, 0, 1, 1, 0 ],\n [ 0, 0, 0, 0, 1, 1 ],\n [ 1, 0, 0, 0, 0, 1 ],\n [ 1, 1, 0, 0, 0, 0 ] ])\n\n# Fringe (current and next)\nf0 = Tensor.fromUncompressed([ \"D\" ], [ 1, 0, 0, 0, 0, 0 ])\n\n# Distance\nd = Tensor(rank_ids=[ \"S\" ])\n\n# Get root fibers\na_s = a.getRoot()\nf0_d = f0.getRoot()\nd_d = d.getRoot()\n\nprint(\"BFS\")\n\nlevel = 1\n\n\nwhile (f0_d.countValues() > 0):\n f0_d.print(\"\\nFringe\")\n\n f1 = Tensor(rank_ids=[ \"D\" ]) \n f1_d = f1.getRoot()\n\n for s, (_, a_d) in f0_d & a_s:\n print(f\"Processing source {s}\")\n print(f\"Neighbors:\\n {a_d}\")\n\n# print(f\"\\na_d:\\n{a_d}\")\n# print(f\"\\nd_d:\\n{d_d})\")\n\n# a_less_d = a_d - d_d\n# print(f\"\\na_less_d:\\n{a_less_d})\")\n\n# assignment1 = d_d << a_less_d\n# print(f\"\\nd_d << (a_d - d_d):\\n{assignment1}\")\n\n# assignment2 = f1_d << assignment1\n# print(f\"\\nf1_d << (d_d << (a_d - d_d)):\\n{assignment2}\")\n\n for d, (f1_ref, (d_ref, _)) in f1_d << (d_d << a_d):\n print(f\" Processing destination {d} = {d_ref}\")\n\n if Payload.isEmpty(d_ref):\n print(f\"Adding destination {d}\")\n\n f1_ref += 1\n d_ref += level\n\n level += 1\n f0 = f1\n f0_d = f0.getRoot()\n\nd_d.print(\"\\nDistance Tensor\")\n\nprint(\"\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\n", "id": "12808665", "language": "Python", "matching_score": 1.5708636045455933, "max_stars_count": 2, "path": "examples/scripts/graphs/bfs.py" }, { "content": "from fibertree import Fiber, Tensor, TensorImage\n\nx = [ [ [ 1, 2, 8, 20, 0, 0, 11 ],\n [ 1, 0, 0, 11, 0, 0, 33 ],\n [ 0, 0, 0, 0, 0, 0, 0 ],\n [ 1, 1, 8, 12, 0, 0, 44 ],\n [ 1, 3, 0, 13, 0, 0, 42 ],\n [ 0, 0, 4, 14, 0, 0, 0 ],\n [ 0, 0, 0, 0, 0, 0, 0 ] ],\n\n [ [ 0, 0, 0, 0, 0, 0, 0 ],\n [ 0, 0, 0, 0, 0, 0, 0 ],\n [ 0, 0, 0, 0, 0, 0, 0 ],\n [ 0, 0, 0, 0, 0, 0, 0 ],\n [ 0, 0, 0, 0, 0, 0, 0 ],\n [ 0, 0, 0, 0, 0, 0, 0 ],\n [ 0, 0, 0, 0, 0, 0, 0 ] ],\n\n [ [ 1, 2, 8, 20, 0, 0, 11 ],\n [ 1, 0, 0, 11, 0, 0, 33 ],\n [ 0, 0, 0, 0, 0, 0, 0 ],\n [ 1, 1, 8, 12, 0, 0, 44 ],\n [ 1, 3, 0, 13, 0, 0, 42 ],\n [ 0, 0, 4, 14, 0, 0, 0 ],\n [ 0, 0, 0, 0, 0, 0, 0 ] ] ]\n\n \n\n \n\nf = Fiber.fromUncompressed(x)\nf.dump(\"/tmp/tensor-3d.yaml\")\nf.print(\"Fiber from uncompressed\")\n\nt1 = Tensor.fromFiber([\"X\", \"Y\", \"Z\"], f)\nt1.print(\"Tensor from fiber\")\n\nt2 = Tensor.fromUncompressed([\"X\", \"Y\", \"Z\"], x)\nt2.print(\"Tensor from uncompressed\")\n", "id": "4638974", "language": "Python", "matching_score": 2.921204090118408, "max_stars_count": 2, "path": "examples/scripts/basic/uncompressed2tensor.py" }, { "content": "from fibertree import Tensor, TensorImage\n\n\na = Tensor.fromUncompressed(root=2)\nprint(a)\n\ni = TensorImage(a)\ni.show()\n", "id": "4012879", "language": "Python", "matching_score": 2.0712151527404785, "max_stars_count": 2, "path": "examples/scripts/visualization/draw-tensor-0D.py" }, { "content": "from fibertree import Fiber, Tensor, TensorImage\n\na = Fiber([ 0, 2, 8], [ 5, 6, 7 ])\n\na.print(\"Fiber\")\ni = TensorImage(a)\ni.show()\n", "id": "12593616", "language": "Python", "matching_score": 0.015002531930804253, "max_stars_count": 2, "path": "examples/scripts/visualization/draw-fiber.py" }, { "content": "\"\"\"AAHR Module\"\"\"\n\nimport logging\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.core.aahr')\n\n\nclass AAHR():\n \"\"\"The AAHR class is used to define an \"axis aligned hyper-rectangle\"\n based on the points in the open range between upper left and lower\n right corners of the hyper-rectangle. It is useful for defining a\n range of points for highlighting.\n\n TBD: Allow a highlight point to be an AAHR. A naive way to do that\n would be to expand the AAHR in canonicalizeHighlights()....\n\n \"\"\"\n\n def __init__(self, upper_left, lower_right):\n\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.core.aahr')\n\n self.length = len(upper_left)\n\n assert self.length == len(lower_right)\n\n self.upper_left = upper_left\n self.lower_right = lower_right\n\n\n def expand(self):\n \"\"\"Expand AAHR into all its points\n\n \"\"\"\n\n region = [()]\n\n for start, end in zip(self.upper_left, self.lower_right):\n region = self._cross(region, range(start, end))\n\n return region\n\n @staticmethod\n def _cross(a, b):\n\n result = []\n\n for i in a:\n for j in b:\n result.append(i + (j,))\n\n return result\n \n\n def __contains__(self, point):\n\n if not isinstance(point, tuple) or len(point) != self.length:\n return False\n\n for p, u, l in zip(point, self.upper_left, self.lower_right):\n if p < u or p >= l:\n return False\n\n return True\n", "id": "11618994", "language": "Python", "matching_score": 1.0306252241134644, "max_stars_count": 2, "path": "fibertree/graphics/aahr.py" }, { "content": "\"\"\"Highlight Module\"\"\"\n\nimport logging\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.graphics.highlights')\n\n\n\nclass HighlightManager():\n \"\"\"HighlightManager \"\"\"\n\n def __init__(self, highlights={}, highlight_subtensor={}, parent=None, level=None):\n\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.graphics.highlights')\n\n self.highlights = highlights\n self.highlight_subtensor = highlight_subtensor\n\n self.parent = parent\n self.level = level\n\n self.current_coord = None\n self.highlight_coords = {}\n\n #\n # The points to highlight for each worker at this level are\n # based on the first coordinate in each point, unless the\n # coordinate is a wildcard ('?')\n #\n self.active_coords = {}\n \n for worker, points in highlights.items():\n\n active_coords_temp = []\n\n for point in points:\n if point[0] not in ['?']:\n active_coords_temp.append(point[0])\n\n self.active_coords[worker] = set(active_coords_temp)\n\n\n def addFiber(self, c):\n #\n # For each payload that was a fiber we need to recurse, but we\n # also need to figure out what to highlight at the next level\n # So these variables hold the highlight information with one\n # less coordinate (in \"highlights_next\") for each worker, and\n # a dictionary of workers (in \"highlight_subtensor_next\") that\n # are highlighting the remaining levels of the subtensor\n #\n self.current_coord = c\n\n highlights = self.highlights\n highlight_subtensor = self.highlight_subtensor\n active_coords = self.active_coords\n\n highlights_next = {}\n highlight_subtensor_next = {}\n\n for worker, points in highlights.items():\n #\n # Once we start highlighting a fiber, highlight the entire subtensor.\n #\n # TBD: Maybe we should have just copied highlight_subtensor\n #\n if worker in highlight_subtensor:\n highlight_subtensor_next[worker] = True\n\n #\n # Create the tail of the highlight coordinates as the next\n # highlights\n #\n highlights_next[worker] = []\n\n for point in points:\n len_point = len(point)\n\n #\n # If there are more than one coordinate in the point,\n # then add the remaining coordinates to the next\n # highlights\n #\n if len_point > 1 and (point[0] == c or point[0] == '?'):\n highlights_next[worker].append(point[1:])\n\n #\n # If this was the last coordinate\n # maybe start highlighting a subtensor\n #\n if len_point == 1 and point[0] == c and c in active_coords[worker]:\n highlight_subtensor_next[worker] = True\n self.addHighlight(worker)\n\n\n highlight_manager_next = HighlightManager(highlights_next,\n highlight_subtensor_next,\n self,\n self.level-1)\n\n return highlight_manager_next\n\n def addHighlight(self, worker):\n\n if not worker in self.highlight_coords:\n self.highlight_coords[worker] = set([self.current_coord])\n else:\n self.highlight_coords[worker].add(self.current_coord)\n\n parent = self.parent\n if parent is not None:\n parent.addHighlight(worker)\n\n\n def getColorCoord(self, c):\n\n #\n # For level 0, the highlight coords are the active coords and\n # tell the parent which of this child's workers were\n # highlighted\n #\n if self.level <= 0:\n self.highlight_coords = self.active_coords\n\n for worker, coords in self.highlight_coords.items():\n if c in coords:\n # print(f\"highlights[{worker}] = {self.highlights[worker]}\")\n parent = self.parent\n if parent is not None:\n self.parent.addHighlight(worker)\n\n\n color_coord = set([worker for worker, coords in self.highlight_coords.items() if c in coords])\n\n return color_coord\n\n def getColorSubtensor(self):\n\n color_subtensor = set([worker for worker in self.highlight_subtensor.keys()])\n return color_subtensor\n\n\n @staticmethod\n def canonicalizeHighlights(highlights, worker=\"PE\"):\n \"\"\"canonicalizeHighlights\n\n In methods that accept highlights there is considerable\n flexibility in the form that the highlights are provided. This\n method converts any of those forms into the canonical form,\n using keyword \"worker\" to assign a worker if one isn't\n provided in the \"highlights\" argument. The canonical form is\n a dictionary of workers and lists of their highlighted points:\n\n\n {worker0: [(point0_coord0, point0_coord1, ...),\n (point1_coord0, point1_coord1, ...),\n ...],\n worker1: [(point0_coord0, point0_coord1, ...),\n (point1_coord0, point1_coord1, ...),\n ...],\n ...,\n }\n\n\n Alternative forms:\n\n 1) Single point per worker\n\n {worker0: (point0_coord0, point0_coord1, ...),\n worker1: (point0_coord0, point0_coord1, ...),\n ...\n }\n\n\n 2) List of points, no worker\n\n [(point0_coord0, point0_coord1, ...),\n (point1_coord0, point1_coord1, ...),\n ...]\n\n\n 3) Single point, no worker\n\n (point1_coord0, point1_coord1, ...)\n\n\n Warning: if a coordinate is a tuple there is ambiguity in forms\n 1 and 3, so they cannot be used.\n\n\n Parameters:\n -----------\n\n highlights: dictionary, list or tuple\n A specification of highlights, maybe not in canonical form\n\n worker: string\n A name to use for the worker, if highlights doesn't include one\n\n Returns:\n --------\n\n highlights: dictionary\n A specification of highlights in canonical form\n\n\n Raises:\n -------\n\n Nothing\n\n\n Bugs:\n -----\n\n A single point with a character as a coordinate is\n misinterpreted as a list of points\n\n \"\"\"\n\n if not isinstance(highlights, dict):\n #\n # Massage highlights into proper form\n #\n highlights = {worker: highlights}\n\n #\n # Wrap highlights specified as a single point into a list\n #\n for pe, pe_highlights in highlights.items():\n #\n # If highlights is a single point convert to list\n #\n if len(pe_highlights):\n try:\n temp = pe_highlights[0][0]\n\n except Exception:\n temp = pe_highlights\n pe_highlights = []\n pe_highlights.append(temp)\n highlights[pe] = pe_highlights\n\n return highlights\n\n", "id": "8973074", "language": "Python", "matching_score": 2.04799485206604, "max_stars_count": 2, "path": "fibertree/graphics/highlights.py" }, { "content": "\"\"\"CoordPayload\n\nA class used to represent an **element** of a fiber, i.e., a\ncoordinate/payload tuple.\n\n\"\"\"\nimport logging\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.core.coord_payload')\n\n\nclass CoordPayload():\n \"\"\"An element of a fiber.\n\n Instances of this class are returned by some `Fiber` methods, most\n significantly, iteration (see `Fiber.__iter__()`).\n\n In many instances one just wants to operate on the `payload` part\n of the element. Therefore, this class provides a variety of\n overloaded operations that do just that. These include index\n operations ([]) and many operators, including:\n\n - Addition: (+, +-)\n - Subtraction: (-, -=)\n - Multiplication: (*, *=)\n - Division: (/, /=)\n - Integer division: (//)\n - Left shift: (<<)\n - Boolean and: (&)\n - Boolean or: (|)\n - Equal: (=)\n - Not equal: (!=)\n - Less than: (<)\n - Great than: (&gt;)\n - Less than or equal: (<=)\n - Greater than or equal: (&gt;=)\n\n In addition to the above operators one needs to be able to\n conveniently assign a new value to the payload of a\n `CoordPayload`. Since the normal Python assignment operator (=)\n will replace a pointer to a class rather than update a value in\n the class (i.e., the **boxed** value) we overload the operator\n \"<<=\" to assign a new **boxed** value to the payload (see\n `CoordPayload.__ilshift__()`)\n\n Attributes\n ----------\n\n coord: a hashable value\n A value used as a coordinate\n\n payload: a legal payload\n A legal \"payload\" value.\n\n\n Note - these attributes are just left public in the class, so\n given an instance of this class named `element` one can access the\n attributes as `element.coord` and `element.payload`.\n\n Constructor\n -----------\n\n The `CoordPayload` constructor creates an element of a fiber with\n a given `coord` and `payload`.\n\n Parameters\n ----------\n\n coord: a hashable value\n A value used as a coordinate\n\n payload: a legal payload\n A legal \"payload\" value.\n\n\n Notes\n -----\n\n Construction of a element of this class relies on the `payload`\n argument already being a legal payload. Frequently, that will be\n will a instance of a `Payload` (see `fibertree.core.payload`). But\n because it is already a legal payload `__init__()` does not try to\n invoke `Payload.maybe_box()`.\n\n Iteration through an instance of this class results in the\n \"coordinate\" followed by the \"payload\".\n\n \"\"\"\n def __init__(self, coord, payload):\n \"\"\"__init__\"\"\"\n\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.core.coord_payload')\n\n\n self.coord = coord\n self.payload = payload\n\n\n def __iter__(self):\n \"\"\"__iter__\"\"\"\n\n yield self.coord\n yield self.payload\n\n #\n # Position based methods\n #\n def __getitem__(self, keys):\n \"\"\"Index into the payload\n\n Do a `__getitem__()` on the payload of `self`. Generally this\n will only be meaningful if the payload is a fiber. So see\n `Fiber.__getitem__()` for more information.\n\n Parameters\n ----------\n keys: single integer/slice or tuple of integers/slices\n The positions or slices in an n-D fiber\n\n Returns\n -------\n tuple or Fiber\n A tuple of a coordinate and payload or a Fiber of the slice\n\n Raises\n ------\n\n IndexError\n Index out of range\n\n TypeError\n Invalid key type\n\n \"\"\"\n return self.payload.__getitem__(keys)\n\n\n def __setitem__(self, key, newvalue):\n \"\"\"Index into a payload and update the value\n\n Do a `__setitem__()` on the payload of `self`. Generally this\n will only be meaningful if the payload is a fiber. So see\n `Fiber.__setitem__()` for more information.\n\n Parameters\n ----------\n key: single integer\n The position in the fiber to be set\n\n newvalue: a CoordPayload or a payload value\n The coordinate/payload or just payload to assign\n\n Returns\n -------\n Nothing\n\n Raises\n ------\n\n IndexError\n Index out of range\n\n TypeError\n Invalid key type\n\n CoordinateError\n Invalid coordinate\n\n \"\"\"\n\n self.payload.__setitem__(key, newvalue)\n\n #\n # Assignment operator\n #\n def __ilshift__(self, other):\n \"\"\"Assign a new **boxed** value.\n\n Assigns a new value to a `Payload`. Since the normal\n Python assignment operator (=) will replace a pointer to a class\n rather than update a value in the class (i.e., the **boxed**\n value) we overload the \"<<=\" operator to assign a new **boxed**\n value to a `Payload`\n\n Parameters\n ----------\n other: Payload or scalar\n A value to assign as the new **boxed** value.\n\n Returns\n -------\n Nothing\n\n\n Examples\n --------\n\n ```\n >>> a = CoordPayload(1, 4)\n >>> print(a)\n CoordPayload(1, 4)\n >>> b = a\n >>> b <<= 6\n >>> print(a)\n CoordPayload(1, 6)\n >>> b = 8\n >>> print(a)\n CoordPayload(1, 6)\n ```\n\n Notes\n -----\n\n There is an analogous assignment operator for the `Payload`\n and `Fiber` classes, so one can \"assign\" a new value to a\n \"payload\" irrespective of whether the \"payload\" is a\n `Payload`, a `CoordPayload` or a `Fiber`.\n\n \"\"\"\n if isinstance(other, CoordPayload):\n self.payload <<= other.payload\n else:\n self.payload <<= self.payload + other\n\n\n #\n # Arithmetic operations\n #\n def __add__(self, other):\n \"\"\"__add__\"\"\"\n\n if isinstance(other, CoordPayload):\n ans = self.payload + other.payload\n else:\n ans = self.payload + other\n\n return ans\n\n def __radd__(self, other):\n \"\"\"__radd__\"\"\"\n\n return other + self.payload\n\n def __iadd__(self, other):\n \"\"\"__iadd__\"\"\"\n\n if isinstance(other, CoordPayload):\n self.payload += other.payload\n else:\n self.payload += other\n\n return self\n\n def __sub__(self, other):\n \"\"\"__sub__\"\"\"\n\n if isinstance(other, CoordPayload):\n ans = self.payload - other.payload\n else:\n ans = self.payload - other\n\n return ans\n\n def __rsub__(self, other):\n \"\"\"__rsub__\"\"\"\n\n return other - self.payload\n\n def __isub__(self, other):\n \"\"\"__isub__\"\"\"\n\n if isinstance(other, CoordPayload):\n self.payload -= other.payload\n else:\n self.payload -= other\n\n return self\n\n def __mul__(self, other):\n \"\"\"__mul__\"\"\"\n\n if isinstance(other, CoordPayload):\n ans = self.payload * other.payload\n else:\n ans = self.payload * other\n\n return ans\n\n def __rmul__(self, other):\n \"\"\"__rmul__\"\"\"\n\n return other * self.payload\n\n def __imul__(self, other):\n \"\"\"__imul__\"\"\"\n\n if isinstance(other, CoordPayload):\n self.payload *= other.payload\n else:\n self.payload *= other\n\n return self\n\n def __div__(self, other):\n \"\"\"__div__\"\"\"\n\n if isinstance(other, CoordPayload):\n ans = self.payload / other.payload\n else:\n ans = self.payload / other\n\n return ans\n\n def __rdiv__(self, other):\n \"\"\"__rdiv__\"\"\"\n\n return other / self.payload\n\n def __idiv__(self, other):\n \"\"\"__idiv__\"\"\"\n\n if isinstance(other, CoordPayload):\n self.payload /= other.payload\n else:\n self.payload /= other\n\n return self\n\n\n#\n# Comparison operations\n#\n\n def __eq__(self, other):\n \"\"\"__eq__\"\"\"\n\n if isinstance(other, CoordPayload):\n return self.payload == other.payload\n\n return self.payload == other\n\n def __lt__(self, other):\n \"\"\"__lt__\"\"\"\n\n if isinstance(other, CoordPayload):\n return self.payload < other.payload\n\n return self.payload < other\n\n def __le__(self, other):\n \"\"\"__le__\"\"\"\n\n if isinstance(other, CoordPayload):\n return self.payload <= other.payload\n\n return self.payload <= other\n\n def __gt__(self, other):\n \"\"\"__gt__\"\"\"\n\n if isinstance(other, CoordPayload):\n return self.payload > other.payload\n\n return self.payload > other\n\n def __ge__(self, other):\n \"\"\"__ge__\"\"\"\n\n if isinstance(other, CoordPayload):\n return self.payload >= other.payload\n\n return self.payload >= other\n\n def __ne__(self, other):\n \"\"\"__ne__\"\"\"\n\n if isinstance(other, CoordPayload):\n return self.payload != other.payload\n\n return self.payload != other\n\n #\n # Printing\n #\n def __repr__(self):\n \"\"\"__repr__\"\"\"\n\n return str(f\"CoordPayload(coord={self.coord}, payload={self.payload})\")\n\n#\n# Pdoc stuff\n#\n__pdoc__ = {'CoordPayload.__getitem__': True,\n 'CoordPayload.__setitem__': True,\n 'CoordPayload.__ilshift__': True,\n }\n\n##############################################\n\nif __name__ == \"__main__\":\n\n from fibertree import Payload\n\n print(\"\")\n print(\"Start test\")\n print(\"\")\n\n a = CoordPayload(5, 4)\n print(f\"a = CoordPayload(5, 4) -> {a}\")\n\n x, y = a\n print(x, y)\n\n print(\"\")\n\n b = CoordPayload(coord=6, payload=2)\n print(f\"b = CoordPayload(6, 2) -> {b}\")\n\n print(\"\")\n\n z = a + b\n print(f\"a+b -> {z}\")\n\n z = a + 1\n print(f\"a+1 -> {z}\")\n\n z = 1 + a\n print(f\"1+a -> {z}\")\n\n print(\"\")\n\n p = Payload(4)\n print(f\"p = Payload(4) -> {p}\")\n\n z = a + p\n print(f\"a+p -> {z}\")\n\n z = p + a\n print(f\"p+a -> {z}\")\n\n print(\"\")\n\n a += b\n print(f\"a+=b -> {a}\")\n a = CoordPayload(5, 4)\n\n a += 2\n print(f\"a+=2 -> {a}\")\n a = CoordPayload(5, 4)\n\n a += p\n print(f\"a+=p -> {a}\")\n a = CoordPayload(5, 4)\n\n print(\"\")\n\n c = CoordPayload(coord=7, payload=Payload(8))\n print(f\"c = CoordPayload(7, Payload(8)) -> {c}\")\n\n print(\"\")\n\n z = a + c\n print(f\"a+c -> {z}\")\n\n z = c + a\n print(f\"c+a -> {z}\")\n\n z = c + 1\n print(f\"c+1 -> {z}\")\n\n z = 1 + c\n print(f\"1+c -> {z}\")\n\n print(\"\")\n\n c += b\n print(f\"c+=b -> {c}\")\n c = CoordPayload(coord=7, payload=Payload(8))\n\n c += 2\n print(f\"c+=2 -> {c}\")\n c = CoordPayload(coord=7, payload=Payload(8))\n\n c += p\n print(f\"c+=p -> {c}\")\n c = CoordPayload(coord=7, payload=Payload(8))\n\n print(\"\")\n\n a = CoordPayload(5, 4)\n print(f\"a = CoordPayload(5, 4) -> {a}\")\n\n x, y = a\n print(x, y)\n\n print(\"\")\n\n b = CoordPayload(coord=6, payload=2)\n print(f\"b = CoordPayload(6, 2) -> {b}\")\n\n print(\"\")\n\n z = a * b\n print(f\"a*b -> {z}\")\n\n z = a * 1\n print(f\"a*1 -> {z}\")\n\n z = 1 * a\n print(f\"1*a -> {z}\")\n\n print(\"\")\n\n p = Payload(4)\n print(f\"p = Payload(4) -> {p}\")\n\n z = a * p\n print(f\"a*p -> {z}\")\n\n z = p * a\n print(f\"p*a -> {z}\")\n\n print(\"\")\n\n a *= b\n print(f\"a*=b -> {a}\")\n a = CoordPayload(5, 4)\n\n a *= 2\n print(f\"a*=2 -> {a}\")\n a = CoordPayload(5, 4)\n\n a *= p\n print(f\"a*=p -> {a}\")\n a = CoordPayload(5, 4)\n\n print(\"\")\n\n c = CoordPayload(coord=7, payload=Payload(8))\n print(f\"c = CoordPayload(7, Payload(8)) -> {c}\")\n\n print(\"\")\n\n z = a * c\n print(f\"a*c -> {z}\")\n\n z = c * a\n print(f\"c*a -> {z}\")\n\n z = c * 1\n print(f\"c*1 -> {z}\")\n\n z = 1 * c\n print(f\"1*c -> {z}\")\n\n print(\"\")\n\n c *= b\n print(f\"c*=b -> {c}\")\n c = CoordPayload(coord=7, payload=Payload(8))\n\n c *= 2\n print(f\"c*=2 -> {c}\")\n c = CoordPayload(coord=7, payload=Payload(8))\n\n c *= p\n print(f\"c*=p -> {c}\")\n c = CoordPayload(coord=7, payload=Payload(8))\n", "id": "6796732", "language": "Python", "matching_score": 5.449694633483887, "max_stars_count": 2, "path": "fibertree/core/coord_payload.py" }, { "content": "\"\"\"Payload\n\nA class implementing a **boxed** value to use as a payload\nof an element of a fiber.\n\n\"\"\"\nimport logging\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.core.payload')\n\n\nclass Payload:\n \"\"\"A class to hold the payload of an element of a fiber.\n\n This class supports to ability for a program operating on the\n payload of an element of a fiber to have a reference to that\n payload separate from the element containing it. Since, one needs\n to be able update that payload and see the update reflected in the\n fiber this class has the effect of **boxing** the payload for\n immutable types.\n\n Frequently, a `Payload` will appear as the payload of an element\n of a fiber as part of an instance of a `CoordPayload` (see\n `fibertree.core.coord_payload`).\n\n A substantial set of infix operators are provided that operate on\n a `Payload` and another `Payload` or a scalar value. These\n include:\n\n - Addition: (+, +-)\n - Subtraction: (-, -=)\n - Multiplication: (*, *=)\n - Division: (/, /=)\n - Integer division: (//)\n - Left shift: (<<)\n - Boolean and: (&)\n - Boolean or: (|)\n - Equal: (=)\n - Not equal: (!=)\n - Less than: (<)\n - Great than: (&gt;)\n - Less than or equal: (<=)\n - Greater than or equal: (&gt;=)\n\n In addition to the above operators one needs to be able to\n conveniently assign a new value to a `Payload`. Since the normal\n Python assignment operator (=) will replace a pointer to a class\n rather than update a value in the class (i.e., the **boxed**\n value) we overload the operator \"<<=\" to assign a new **boxed**\n value to a `Payload` (see `Payload.__ilshift__()`)\n\n Constructor\n -----------\n\n The `Payload` constructor creates an payload with an optionally\n given `value`.\n\n Parameters\n ----------\n value: immutable value, default=None\n The value to **box**.\n\n\n Notes\n -----\n\n Currently, a variety of immutable types, e.g., int, str and\n tuples, are **boxed** by the `Payload` class, while the `Fiber`\n class, which is the other common payload, is not **boxed** by this\n class. The `Payload.maybe_box()` method can be used to selectively\n wrap a fiber element's payload in the `Payload` class.\n\n Furthermore, to make it more convenient for a program to operate\n on an arbitrary type of fiber element payload, this class provides\n a variety of **static** methods that selectively peek inside the\n **box** to do their job. These include:\n\n - `Payload.isEmpty()`\n - `Payload.is_payload()`\n - `Payload.contains()`\n - `Payload.get()`\n\n \"\"\"\n\n def __new__(cls, value=None):\n\n from .fiber import Fiber\n\n #\n # Since we do not wrap Fibers in a Payload, we check if we\n # just want to just return the Fiber.\n #\n if isinstance(value, Fiber):\n return value\n\n #\n # Just handle regular Payload creation\n #\n self = super(Payload, cls).__new__(cls)\n self.__init__(value=value)\n return self\n\n\n def __init__(self, value=None):\n \"\"\"__init__\"\"\"\n\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.core.payload')\n\n self.value = value\n\n def v(self):\n \"\"\"Return the **boxed** value\n\n Parameters\n ----------\n None\n\n Returns\n -------\n value: some immutable type\n The **boxed** value\n\n \"\"\"\n\n return self.value\n\n def __setattr__(self, name, value):\n \"\"\"Set an attribute of the Payload.\n\n Allow users to set the **boxed** value as an attribute (.v) of\n an instance of the class.\n\n Examples\n --------\n\n ```\n >>> payload = Payload()\n >>> payload.v = 5\n ```\n\n Notes\n -----\n\n The `Payload.__ilshift__()` payload assignment operator (<<=)\n should be used in preference to this method.\n\n \"\"\"\n\n if name == \"v\":\n name = \"value\"\n\n # If value is a Payload copy in its value\n if isinstance(value, Payload):\n value = value.v()\n\n self.__dict__[name] = value\n\n def __iter__(self):\n \"\"\"__iter__\"\"\"\n\n for v in self.value:\n yield v\n\n def __reversed__(self):\n \"\"\"__reversed__\"\"\"\n\n return reversed(self.value)\n\n\n def __bool__(self):\n \"\"\"__bool__\"\"\"\n\n return bool(self.value)\n\n def __int__(self):\n \"\"\"__int__\"\"\"\n\n return int(self.value)\n\n#\n# Static methods\n#\n @staticmethod\n def isEmpty(p):\n \"\"\"Check if a fiber element's payload is empty.\n\n Selectively look into the given argument (`p`) to see if it is\n empty. In essence, if the given argument is a `Payload` check\n if its **boxed** value is the **empty** value. If the given\n argument is a `Fiber`, then check if the fiber is empty.\n\n Parameters\n ----------\n p: a payload\n The payload of an element of a fiber.\n\n\n Returns\n -------\n is_empty: Boolean\n Whether `p` was **empty**.\n\n\n Todo\n ----\n\n This needs to work when **empty** is something other than zero.\n\n \"\"\"\n\n from .fiber import Fiber\n\n if isinstance(p, Fiber):\n return p.isEmpty()\n\n if isinstance(p, tuple):\n assert isinstance(p, tuple)\n\n if (p == 0):\n return True\n\n return False\n\n#\n#\n# Transition methods\n#\n# Note: The following methods are used as part of a future transition\n# from Fibers holding a raw Fiber as a payload to that Fiber\n# being embedded in in a Payload object.\n#\n @staticmethod\n def maybe_box(value):\n \"\"\"Selectively **box** a value.\n\n Selectively wrap certain values in a `Payload` wrapper.\n Currently only certain immutable types are **boxed**, and\n notably `Fibers` are not boxed nor are values that are already\n a `Payload`.\n\n Parameters\n ----------\n value: any type\n A value to possibly be **boxed** as a `Payload`\n\n\n Returns\n -------\n maybe_boxed: updatable type\n A reference to a value that can be updated\n\n Todo\n ----\n\n For consistency maybe this should be maybeBox().\n\n \"\"\"\n\n if isinstance(value, (bool, float, int, str, tuple, frozenset)):\n return Payload(value)\n\n return value\n\n\n @staticmethod\n def is_payload(payload):\n \"\"\"Check if argument is a payload.\n\n Check if the given argument (`payload`) is potentially the\n payload of an element of a fiber. In essence, check if the\n given argument is a `Payload` or a `Fiber`.\n\n Parameters\n ----------\n payload: a payload\n The potential payload of an element of a fiber.\n\n\n Returns\n -------\n is_payload: Boolean\n Whether `payload` was a `Payload` or `Fiber`\n\n Todo\n ----\n\n For consistency maybe this should be isPayload().\n\n \"\"\"\n\n from .fiber import Fiber\n\n return isinstance(payload, (Payload, Fiber))\n\n\n @staticmethod\n def contains(payload, type):\n \"\"\"Return whether `payload` is of type `type`.\n\n Selectively look into the given argument (`payload`) to see if\n it is of the requested type. In essence, if the given argument\n is a `Payload` check if its **boxed** value is of type\n `type`. If the given argument is a `Fiber`, the result is True\n if the caller is checking for a `Fiber`.\n\n Parameters\n ----------\n payload: a payload\n The payload of an element of a fiber.\n\n Returns\n -------\n contains: Boolean\n Whether `payload` was of type `type`.\n\n \"\"\"\n\n assert type != Payload, \"Cannot check for Payload type\"\n\n if not isinstance(payload, Payload):\n return isinstance(payload, type)\n\n return isinstance(payload.value, type)\n\n\n @staticmethod\n def get(payload):\n \"\"\"Return value of `payload`.\n\n Selectively look into the given argument (`payload`) and\n return its value. In essence, if the given argument is a\n `Payload` return its **boxed** value. If the given argument\n is a `Fiber`, then return it.\n\n Parameters\n ----------\n payload: a payload\n The payload of an element of a fiber.\n\n Returns\n -------\n value: any type\n The **boxed** value of a `Payload` or a `Fiber`.\n\n \"\"\"\n\n if not isinstance(payload, Payload):\n return payload\n\n return payload.value\n\n#\n# Srtring operations\n#\n\n def print(self, title=None):\n \"\"\"print\"\"\"\n\n return self.value.print(title)\n\n\n def __format__(self, spec=\"\"):\n \"\"\"__format__\"\"\"\n\n if len(spec) > 0:\n return f\"<{self.value:{spec}}>\"\n else:\n return f\"<{self.value}>\"\n\n\n def __str__(self):\n \"\"\"__str__\"\"\"\n\n return f\"<{self.value.__str__()}>\"\n\n\n def __repr__(self):\n \"\"\"__repr__\"\"\"\n\n return f\"Payload({self.value.__repr__()})\"\n#\n# Arithmetic operations\n#\n def __add__(self, other):\n \"\"\"__add__\"\"\"\n\n if isinstance(other, Payload):\n ans = self.value + other.value\n else:\n ans = self.value + other\n\n return Payload(ans)\n\n def __radd__(self, other):\n \"\"\"__radd__\"\"\"\n\n assert not isinstance(other, Payload)\n\n return Payload(other + self.value)\n\n def __iadd__(self, other):\n \"\"\"__iadd__\"\"\"\n\n if isinstance(other, Payload):\n self.value = self.value + other.value\n else:\n self.value = self.value + other\n return self\n\n # Note: we use <<= in place of base '=' so this is a pure overwrite\n def __ilshift__(self, other):\n \"\"\"Assign a new **boxed** value.\n\n Assigns a new value to a `Payload`. Since the normal\n Python assignment operator (=) will replace a pointer to a class\n rather than update a value in the class (i.e., the **boxed**\n value) we overload the \"<<=\" operator to assign a new **boxed**\n value to a `Payload`\n\n Parameters\n ----------\n other: Payload or scalar\n A value to assign as the new **boxed** value.\n\n Returns\n -------\n Nothing\n\n\n Examples\n --------\n\n ```\n >>> a = Payload(4)\n >>> print(a)\n 4\n >>> b = a\n >>> b <<= 6\n >>> print(a)\n 6\n >>> b = 8\n >>> print(a)\n 6\n ```\n\n Notes\n -----\n\n There is an analogous assignment operator for the `Fiber` and\n `CoordPayload` classes, so one can \"assign\" a new value to a\n \"payload\" irrespective of whether the \"payload\" is a\n `Payload`, `CoordPayload` or a `Fiber`.\n\n \"\"\"\n\n if isinstance(other, Payload):\n self.value = other.value\n else:\n self.value = other\n return self\n\n\n def __sub__(self, other):\n \"\"\"__sub__\"\"\"\n\n if isinstance(other, Payload):\n ans = self.value - other.value\n else:\n ans = self.value - other\n\n return Payload(ans)\n\n def __rsub__(self, other):\n \"\"\"__rsub__\"\"\"\n\n assert not isinstance(other, Payload)\n return Payload(other - self.value)\n\n\n def __isub__(self, other):\n \"\"\"__isub__\"\"\"\n\n if isinstance(other, Payload):\n self.value = self.value - other.value\n else:\n self.value = self.value - other\n return self\n\n\n def __mul__(self, other):\n \"\"\"__mul__\"\"\"\n\n if isinstance(other, Payload):\n ans = self.value * other.value\n else:\n ans = self.value * other\n\n return Payload(ans)\n\n def __truediv__(self, other):\n \"\"\"__truediv__\"\"\"\n\n if isinstance(other, Payload):\n ans = self.value / other.value\n else:\n ans = self.value / other\n\n return Payload(ans)\n\n def __rmul__(self, other):\n \"\"\"__rmul__\"\"\"\n\n assert not isinstance(other, Payload)\n\n return Payload(other * self.value)\n\n\n def __imul__(self, other):\n \"\"\"__imul__\"\"\"\n\n if isinstance(other, Payload):\n self.value = self.value * other.value\n else:\n self.value = self.value * other\n return self\n\n\n#\n# Comparison operations\n#\n def __eq__(self, other):\n \"\"\"__eq__\"\"\"\n\n if isinstance(other, Payload):\n return self.value == other.value\n\n return self.value == other\n\n def __lt__(self, other):\n \"\"\"__lt__\"\"\"\n\n if isinstance(other, Payload):\n return self.value < other.value\n\n return self.value < other\n\n def __le__(self, other):\n \"\"\"__le__\"\"\"\n\n if isinstance(other, Payload):\n return self.value <= other.value\n\n return self.value <= other\n\n def __gt__(self, other):\n \"\"\"__gt__\"\"\"\n\n if isinstance(other, Payload):\n return self.value > other.value\n\n return self.value > other\n\n def __ge__(self, other):\n \"\"\"__ge__\"\"\"\n\n if isinstance(other, Payload):\n return self.value >= other.value\n\n return self.value >= other\n\n def __ne__(self, other):\n \"\"\"__ne__\"\"\"\n\n if isinstance(other, Payload):\n return self.value != other.value\n\n return self.value != other\n\n#\n# Logical operatons\n# Note: primarily used by fiber iterators\n#\n\n def __and__(self, other):\n \"\"\"__and__\"\"\"\n\n if isinstance(other, Payload):\n ans = self.value & other.value\n else:\n ans = self.value & other\n\n return Payload(ans)\n\n\n def __or__(self, other):\n \"\"\"__or__\"\"\"\n\n if isinstance(other, Payload):\n ans = self.value | other.value\n else:\n ans = self.value | other\n\n return Payload(ans)\n\n\n def __lshift__(self, other):\n \"\"\"__lshift__\"\"\"\n\n if isinstance(other, Payload):\n ans = self.value << other.value\n else:\n ans = self.value << other\n\n return Payload(ans)\n\n#\n# Conversion methods - to/from dictionaries\n#\n\n @staticmethod\n def payload2dict(payload):\n \"\"\"Return payload converted to dictionry or simple value\"\"\"\n\n from .fiber import Fiber\n\n if isinstance(payload, Fiber):\n # Note: this leg is deprecated and should be removed\n return payload.fiber2dict()\n elif isinstance(payload, Payload):\n if Payload.contains(payload, Fiber):\n return payload.value.fiber2dict()\n else:\n return payload.value\n else:\n return payload\n\n\n#\n# Pdoc stuff\n#\n__pdoc__ = { 'Payload.payload2dict': False,\n 'Payload.__setattr__': True,\n 'Payload.__ilshift__': True,\n }\n\n\nif __name__ == \"__main__\":\n\n a = Payload(1)\n print(\"A = %s\" % a)\n print(\"---\")\n", "id": "3770610", "language": "Python", "matching_score": 1.1744768619537354, "max_stars_count": 2, "path": "fibertree/core/payload.py" }, { "content": "\nDEFAULT_TRACE_LEVEL = 3\n# \n# NoTransmit\n#\n# An alternative to None that indicates no transmission on a field in a struct.\n\nclass NoTransmit:\n pass\n\n# \n# Marker\n#\n# An explicit Marker indicating which level of marker it is.\nclass Marker:\n def __init__(self, level=1):\n self.level = level\n\n def offset(self, delta):\n return Marker(self.level + delta)\n \n def __str__(self):\n return f\"Marker_{self.level}\"\n\n\n#\n# SwoopTensor\n#\n# This is a format-independent abstraction for a SwoopTensor that contains abstract\n# \"Rank\"s that can be later set to have a concrete format and/or data.\n#\n\n\nclass SwoopTensor:\n def __init__(self, name, rank_ids):\n assert len(rank_ids) >= 0\n # All tensors have a root rank.\n my_rank_ids = rank_ids[:]\n my_rank_ids.insert(0, \"root\")\n self.ranks = {}\n for (n, r) in enumerate(my_rank_ids):\n self.ranks[r] = Rank(r, self, n)\n self.name = name\n self.rank_ids = my_rank_ids\n \n def __getitem__(self, rank_id):\n return self.ranks[rank_id]\n\n def setImplementations(self, rank_id, imps):\n self.ranks[rank_id].setImplementations(imps)\n \n def getRootHandle(self):\n return Stream0(FiberHandle(self.ranks[\"root\"], 0), instance_name=self.name + \"_root\")\n \n def getStartHandle(self):\n r_1 = self.getRankByIndex(1)\n return Stream0(FiberHandle(r_1, 0), instance_name=self.name + \"_\" + r_1.name)\n \n def getRankByIndex(self, idx):\n return self.ranks[self.rank_ids[idx]]\n\n#\n# Rank\n#\n# This is a format-independent abstraction for a Rank that dispatches\n# method calls to a format-specific version. The reason this class\n# exists is that it lets us define swoop programs first, then fill in\n# the formats as a later step (using setImplementation()).\n#\n\n\nclass Rank:\n def __init__(self, name, tensor, rank_index):\n self.implementations = []\n self.name = name\n self.tensor = tensor\n self.rank_index = rank_index\n\n def setImplementations(self, imps):\n self.implementations = imps\n\n def setupSlice(self, fiber_idx, base_coord = 0, bound_coord = None, max_num = None):\n # print(\"{}_{}:: implementations {}, fiber idx {}\".format(self.tensor.name, self.name, self.implementations, fiber_idx))\n self.implementations[fiber_idx].setupSlice(base_coord, bound_coord, max_num)\n return fiber_idx\n\n def nextInSlice(self, fiber_idx):\n return self.implementations[fiber_idx].nextInSlice()\n \n def handleToCoord(self, fiber_idx, handle):\n return self.implementations[fiber_idx].handleToCoord(handle)\n \n def handleToPayload(self, fiber_idx, handle):\n return self.implementations[fiber_idx].handleToPayload(handle)\n \n def payloadToFiberHandle(self, fiber_idx, payload):\n next_rank_index = self.tensor.getRankByIndex(self.rank_index + 1)\n return FiberHandle(next_rank_index, self.implementations[fiber_idx].payloadToFiberHandle(payload))\n\n def payloadToValue(self, fiber_idx, payload):\n return self.implementations[fiber_idx].payloadToValue(payload)\n \n def coordToHandle(self, fiber_idx, coord):\n return self.implementations[fiber_idx].coordToHandle(coord)\n \n def insertElement(self, fiber_idx, coord):\n return self.implementations[fiber_idx].insertElement(coord)\n \n def updatePayload(self, fiber_idx, handle, payload):\n return self.implementations[fiber_idx].updatePayload(handle, payload)\n \n def getUpdatedFiberHandle(self, fiber_idx):\n return self.implementations[fiber_idx].getUpdatedFiberHandle()\n\n def fiberHandleToPayload(self, fiber_idx):\n return self.implementations[fiber_idx].fiberHandleToPayload()\n\n def valueToPayload(self, fiber_idx):\n return self.implementations[fiber_idx].valueToPayload()\n\n def dumpStats(self, stats_dict):\n for impl in self.implementations:\n impl.dumpStats(stats_dict)\n \n def __str__(self):\n return self.tensor.name + \"_\" + self.name\n\n#\n# FiberHandle\n#\n# A convenience class for a fiber index that also has a reference to the rank\n# that the fiber comes from. Just dispatches calls into the rank.\n\nclass FiberHandle:\n def __init__(self, rank, pos):\n self.rank = rank\n self.position = pos\n \n def __str__(self):\n return str(self.rank) + \"[\" + str(self.position) + \"]\"\n \n def setupSlice(self, base_coord = 0, bound_coord = None, max_num = None):\n return self.rank.setupSlice(self.position, base_coord, bound_coord, max_num)\n\n def nextInSlice(self):\n return self.rank.nextInSlice(self.position)\n \n def handleToCoord(self, handle):\n return self.rank.handleToCoord(self.position, handle)\n \n def handleToPayload(self, handle):\n return self.rank.handleToPayload(self.position, handle)\n \n def payloadToFiberHandle(self, payload):\n return self.rank.payloadToFiberHandle(self.position, payload)\n\n def payloadToValue(self, payload):\n return self.rank.payloadToValue(self.position, payload)\n \n def coordToHandle(self, coord):\n return self.rank.coordToHandle(self.position, coord)\n \n def insertElement(self, coord):\n return self.rank.insertElement(self.position, coord)\n \n def updatePayload(self, handle, payload):\n return self.rank.updatePayload(self.position, handle, payload)\n \n def getUpdatedFiberHandle(self):\n return self.rank.getUpdatedFiberHandle(self.position)\n\n def fiberHandleToPayload(self, fiber_handle):\n return self.rank.fiberHandleToPayload(self.position, fiber_handle)\n\n def valueToPayload(self, value):\n return self.rank.valueToPayload(self.position, value)\n\n def dumpStats(self, stats_dict):\n return self.rank.dumpStats(stats_dict)\n\n#\n# AST\n#\n# Base Class for all AST nodes\n# Most have a reference to a stream of Fiber Handles. \n# Additionally, track the \"fanout\" number which is the number of receivers. \n# If this remains 0, this Node is unconnected \n# and can be eliminated as dead code.\n#\nclass AST:\n\n def __init__(self, class_name, fiber_handles = None, num_fields = 1):\n self.class_name = class_name\n self.num_fields = num_fields\n self.cur_results = [] # 1 dic per field, 1 dic-entry per fanout\n for f in range(num_fields):\n self.cur_results.append({})\n self.producers = []\n self.initialized = False\n self.finalized = False\n self.trace_level = DEFAULT_TRACE_LEVEL\n self.current_fiber = None\n if fiber_handles is not None:\n self.fiber_handles = fiber_handles\n fiber_handles.connect(self)\n \n def _addProducer(self, other):\n self.producers.append(other)\n \n def connect(self, other, field=0):\n other._addProducer(self)\n self.cur_results[field][other] = []\n\n def initialize(self):\n self.initialized = True\n for prod in self.producers:\n if not prod.initialized:\n prod.initialize()\n \n def evaluate(self):\n self.trace(\"Unimplemented Evaluate\")\n assert False\n \n def finalize(self, stats_dict):\n self.dumpStats(stats_dict)\n self.finalized = True\n for prod in self.producers:\n # print(\"producer {}, finalized {}\".format(prod, prod.finalized))\n # if not prod.finalized:\n prod.finalize(stats_dict)\n \n def setupCurrentFiber(self, offset=1):\n assert self.fiber_handles is not None\n # See if we need a new fiber handle\n if self.current_fiber is None:\n next_fh = self.fiber_handles.nextValue(self)\n # If it was a marker, just pass it through\n if isinstance(next_fh, Marker):\n # Check for consistency in all other producers\n for producer in self.producers:\n if producer != self.fiber_handles:\n # They should be giving us markers, and of the same level.\n marker = producer.nextValue(self)\n assert isinstance(marker, Marker)\n assert marker.level == next_fh.level + offset\n # The caller should return this marker (possibly with an offset)\n return next_fh.offset(offset)\n else:\n # Just a normal new fiber handle.\n self.trace(3, \"New fiber handle:{}\".format(next_fh))\n self.current_fiber = next_fh\n return None\n \n def nextValue(self, other, field=0):\n if other is not None:\n res_q = self.cur_results[field][other]\n # If we have queue'd up a value because of fanout, just use it.\n if len(res_q) > 0:\n self.trace(4, f\"Fanout: {other} => {res_q[0]}\")\n return res_q.pop(0) # Important: We need 0 here, otherwise it will skip None\n\n # Proceed with normal evaluation.\n # Call evaluate, but only once and fan out the result to later callers.\n self.trace(4, f\"Eval {other}\")\n res = self.evaluate()\n for n in range(self.num_fields):\n for (caller, q) in self.cur_results[n].items():\n if caller != other or n != field:\n if self.num_fields == 1:\n q.append(res)\n else:\n q.append(res[n])\n self.trace(4, f\"Eval: {other} => {res}\")\n if self.num_fields == 1:\n return res\n else:\n return res[field]\n\n def trace(self, level, args):\n if (level > self.trace_level):\n return\n print(self.getName() + \":\", args)\n\n def setTraceLevel(self, level):\n self.trace_level = level\n\n def getName(self):\n if (hasattr(self, \"fiber_handles\")):\n return self.class_name + \":\" + str(self.current_fiber)\n else:\n return self.class_name\n\n # These are for Nodes that want to return a tuple from evaluate.\n def __iter__(self):\n self.cur_field = 0\n return self\n \n def __next__(self):\n if self.cur_field > (self.num_fields - 1):\n raise StopIteration\n sp = Splitter(self, self.cur_field)\n self.cur_field += 1\n return sp\n \n def __getitem__(self, n):\n assert self.num_fields != 0\n assert n < self.num_fields\n sp = Splitter(self, n)\n return sp\n \n def dumpStats(self, stats_dict):\n # print(\"dumpStats2 {}\".format(self.class_name))\n if (hasattr(self, \"accesses\")): # and self.current_fiber is None):\n stats_dict[self.class_name] = self.accesses\n # pass\n\n#\n# Slice\n#\n# Given a fiber handle, and a slice spec returns a rank-1 stream of all handles\n# to elements in that slice.\n# \n\nclass Slice (AST):\n def __init__(self, fiber_handle, base = 0, bound = None, max_num = None):\n super().__init__(\"Slice\", rank)\n self.fiber_handle = fiber_handle\n self.base = base\n self.bound = bound\n self.max_num = max_num\n \n \n def initialize(self):\n self.trace(3, f\"SetupSlice: {self.base}, {self.bound}, {self.max_num}\")\n self.fiber_handle.setupSlice(self.base, self.bound, self.max_num)\n super().initialize()\n \n def evaluate(self):\n res = fiber_handle.nextInSlice()\n self.trace(2, f\"NextInSlice: {res}\")\n if res is None:\n return Marker()\n return res\n \n#\n# Scan\n#\n# Given a N-stream of fiber_handles,\n# returns a N+1-stream of all handles to elements in those fibers.\n# \n\nclass Scan (AST):\n def __init__(self, fiber_handles):\n super().__init__(\"Scan\", fiber_handles)\n \n def evaluate(self):\n \n # Make sure we have a fiber handle to work with.\n starting_new_fiber = self.current_fiber is None\n marker = self.setupCurrentFiber()\n # If it's a marker, pass it through\n if marker is not None:\n return marker\n \n # If we started a new fiber, setup the Slice.\n if starting_new_fiber: \n self.current_fiber.setupSlice(0, 0, 0)\n \n # At this point, we should always have a current fiber.\n res = self.current_fiber.nextInSlice()\n if res is None:\n self.trace(3, \"Fiber Done.\")\n self.current_fiber = None\n return Marker()\n self.trace(2, f\"Next: {res}\")\n return res\n\n\n#\n# InsertionScan\n#\n# Given an N-stream of fiber_handles, and a N+1-stream of \n# coords, returns a N+1-stream of handles to elements of those coords in \n# those fibers. Also returns a N-stream of updated fiber handles.\n# \n\nclass InsertionScan (AST):\n def __init__(self, fiber_handles, coords):\n super().__init__(\"InsertionScan\", fiber_handles, num_fields=2)\n self.coords = coords\n coords.connect(self)\n self.active = False\n \n def evaluate(self):\n # Make sure we have a fiber handle to work with.\n marker = self.setupCurrentFiber()\n # If it's a marker, pass it through (different offset per field)\n if marker is not None:\n return (marker, marker.offset(-1))\n \n coord = self.coords.nextValue(self)\n if isinstance(coord, Marker):\n assert coord.level == 1\n new_handle = self.current_fiber.getUpdatedFiberHandle()\n self.current_fiber = None\n self.trace(3, f\"Fiber Done. New Handle: {new_handle}\")\n return (coord, new_handle)\n handle = self.current_fiber.insertElement(coord)\n self.trace(2, f\"{coord} => {handle}\")\n return (handle, NoTransmit)\n\n\n\n#\n# Iterate\n#\n# Simple convenience alias for iterating over an entire fiber\n#\n\ndef Iterate(rank):\n return Slice(rank)\n\n#\n# HandlesToCoords\n#\n# Given a reference to a N-1 stream of fiber handles, and an AST Node that \n# that produces a N-stream of handles, produces a N-stream of coordinates\n#\nclass HandlesToCoords (AST):\n\n def __init__(self, fiber_handles, handles):\n super().__init__(\"HandlesToCoords\", fiber_handles)\n self.handles = handles\n handles.connect(self)\n\n def evaluate(self):\n # Make sure we have a fiber handle to work with.\n marker = self.setupCurrentFiber()\n # If it's a marker, pass it through\n if marker is not None:\n return marker\n\n handle = self.handles.nextValue(self)\n if isinstance(handle, Marker):\n self.trace(3, f\"{handle}\")\n assert handle.level == 1\n self.current_fiber = None\n return handle\n coord = self.current_fiber.handleToCoord(handle)\n self.trace(2, f\"{handle} => {coord}\")\n return coord\n \n#\n# HandlesToPayloads\n#\n# Given a reference to an AST Rank, and an AST Node that \n# that produces a N-stream of handles, produces a N-stream of payloads\n#\nclass HandlesToPayloads (AST):\n\n def __init__(self, fiber_handles, handles):\n super().__init__(\"HandlesToPayloads\", fiber_handles)\n self.handles = handles\n handles.connect(self)\n\n def evaluate(self):\n # Make sure we have a fiber handle to work with.\n marker = self.setupCurrentFiber()\n # If it's a marker, pass it through\n if marker is not None:\n return marker\n\n handle = self.handles.nextValue(self)\n if isinstance(handle, Marker):\n self.trace(3, f\"{handle}\")\n assert handle.level == 1\n self.current_fiber = None\n return handle\n payload = self.current_fiber.handleToPayload(handle)\n self.trace(2, f\"{handle} => {payload}\")\n return payload\n\n\n#\n# PayloadsToFiberHandles\n#\n# Given a reference to an N-1 stream of fiber handles, and an AST Node that \n# that produces a N-stream of payloads, produces a N-stream of Fiber Handles\n#\nclass PayloadsToFiberHandles (AST):\n\n def __init__(self, fiber_handles, payloads):\n super().__init__(\"PayloadsToFiberHandles\", fiber_handles)\n self.payloads = payloads\n payloads.connect(self)\n\n def evaluate(self):\n # Make sure we have a fiber handle to work with.\n marker = self.setupCurrentFiber()\n # If it's a marker, pass it through\n if marker is not None:\n return marker\n \n payload = self.payloads.nextValue(self)\n if isinstance(payload, Marker):\n self.trace(3, f\"{payload}\")\n assert payload.level == 1\n self.current_fiber = None\n return payload\n fiber_handle = self.current_fiber.payloadToFiberHandle(payload)\n self.trace(2, f\"{payload} => {fiber_handle}\")\n return fiber_handle\n\n#\n# PayloadsToValues\n#\n# Given a reference to an N-1 stream of fiber handles, and an AST Node that \n# that produces a N-stream of payloads, produces a N-stream of Values\n#\nclass PayloadsToValues (AST):\n\n def __init__(self, fiber_handles, payloads):\n super().__init__(\"PayloadsToValues\", fiber_handles)\n self.payloads = payloads\n payloads.connect(self)\n\n def evaluate(self):\n # Make sure we have a fiber handle to work with.\n marker = self.setupCurrentFiber()\n # If it's a marker, pass it through\n if marker is not None:\n return marker\n \n payload = self.payloads.nextValue(self)\n if isinstance(payload, Marker):\n self.trace(3, f\"{payload}\")\n assert payload.level == 1\n self.current_fiber = None\n return payload\n value = self.current_fiber.payloadToValue(payload)\n self.trace(2, f\"{payload} => {value}\")\n return value\n\n\n\n#\n# CoordsToHandles\n#\n# Given a reference to an N-1 stream of fiber handles, and an AST Node that \n# that produces a N-stream of coords, produces a N-stream of handles\n# (NOTE: EXPENSIVE FOR MOST FORMATS)\n# TODO: Add starting position.\n#\nclass CoordsToHandles (AST):\n\n def __init__(self, fiber_handles, coords):\n super().__init__(\"CoordsToHandles\", fiber_handles)\n self.coords = coords\n coords.connect(self)\n\n def evaluate(self):\n # Make sure we have a fiber handle to work with.\n marker = self.setupCurrentFiber()\n # If it's a marker, pass it through\n if marker is not None:\n return marker\n \n coord = self.coords.nextValue(self)\n if isinstance(coord, Marker):\n self.trace(3, f\"{coord}\")\n assert coord.level == 1\n self.current_fiber = None\n return coord\n handle = self.current_fiber.coordToHandle(coord)\n self.trace(2, f\"{coord} => {handle}\")\n return handle\n\n#\n# InsertElements\n#\n# Given a reference to an N-1 stream of fiber handles, and an AST Node that \n# that produces a N-stream of coords, produces a N-stream of handles\n# after creating that (coord, payload) element and initializing coord\n# (NOTE: EXPENSIVE FOR MOST FORMATS)\n# TODO: Add starting position.\n#\n\nclass InsertElements (AST):\n\n def __init__(self, fiber_handles, coords):\n super().__init__(\"InsertElements\", fiber_handles, num_fields=2)\n self.coords = coords\n coords.connect(self)\n\n def evaluate(self):\n # Make sure we have a fiber handle to work with.\n marker = self.setupCurrentFiber()\n # If it's a marker, pass it through\n if marker is not None:\n return (marker, marker.offset(-1))\n \n coord = self.coords.nextValue(self)\n if isinstance(coord, Marker):\n new_handle = self.current_fiber.getUpdatedFiberHandle()\n self.trace(3, f\"Fiber done. New Handle: {new_handle}\")\n assert coord.level == 1\n self.current_fiber = None\n return (coord, new_handle)\n handle = self.current_fiber.insertElement(coord)\n self.trace(2, f\"{coord} => {handle}\")\n return (handle, NoTransmit)\n\n#\n# UpdatePayloads\n#\n# Given a reference to an N-1 stream of fiber handles, and an AST Node that produces\n# a N-stream of handles, and an AST Node that produces a N-stream\n# of payloads, updates each element (coord, payload) to the new payload.\n#\n\nclass UpdatePayloads (AST):\n\n def __init__(self, fiber_handles, handles, payloads):\n super().__init__(\"UpdatePayloads\", fiber_handles)\n self.handles = handles\n handles.connect(self)\n self.payloads = payloads\n payloads.connect(self)\n\n def evaluate(self):\n # Make sure we have a fiber handle to work with.\n marker = self.setupCurrentFiber()\n # If it's a marker, pass it through\n if marker is not None:\n return marker\n \n handle = self.handles.nextValue(self)\n payload = self.payloads.nextValue(self)\n if isinstance(handle, Marker) or isinstance(payload, Marker):\n assert isinstance(handle, Marker) and isinstance(payload, Marker)\n self.trace(2, f\"{handle}, {payload}\")\n assert handle.level == 1\n assert payload.level == 1\n self.current_fiber = None\n return handle\n self.trace(2, f\"{handle} => {payload}\")\n return self.current_fiber.updatePayload(handle, payload)\n\n\n# \n# Intersect\n# \n#\n\nclass Intersect (AST):\n def __init__(self, a_coords, a_handles, b_coords, b_handles, instance_name=None):\n # Note: we return a 3-tuple, so tell the super-class that.\n name = \"Intersect\"\n if instance_name is not None:\n name = name + \"_\" + instance_name\n super().__init__(name, num_fields=3)\n self.a_coords = a_coords\n a_coords.connect(self)\n self.a_handles = a_handles\n a_handles.connect(self)\n self.b_coords = b_coords\n b_coords.connect(self)\n self.b_handles = b_handles\n b_handles.connect(self)\n\n def evaluate(self):\n a_coord = -2\n b_coord = -1\n a_handle = Marker()\n b_handle = Marker()\n while not isinstance(a_coord, Marker) and not isinstance(b_coord, Marker):\n if a_coord == b_coord:\n self.trace(2, f\"Intersection found at: {a_coord}: ({a_handle}, {b_handle})\")\n return (a_coord, a_handle, b_handle)\n while not isinstance(a_coord, Marker) and not isinstance(b_coord, Marker) and a_coord < b_coord:\n a_coord = self.a_coords.nextValue(self)\n a_handle = self.a_handles.nextValue(self)\n self.trace(3, f\"Advancing A: {a_coord}, {b_coord} ({a_handle}, {b_handle})\") \n while not isinstance(b_coord, Marker) and not isinstance(a_coord, Marker) and b_coord < a_coord:\n b_coord = self.b_coords.nextValue(self)\n b_handle = self.b_handles.nextValue(self)\n self.trace(3, f\"Advancing B: {a_coord}, {b_coord} ({a_handle}, {b_handle})\")\n # If one ended, drain the other\n if isinstance(a_coord, Marker):\n while not isinstance(b_coord, Marker):\n b_coord = self.b_coords.nextValue(self)\n b_handle = self.b_handles.nextValue(self)\n self.trace(3, f\"Draining B: {b_coord} ({b_handle})\")\n self.trace(3, \"Done.\")\n return (b_coord, a_handle, b_handle)\n elif isinstance(b_coord, Marker):\n while not isinstance(a_coord, Marker):\n a_coord = self.a_coords.nextValue(self)\n a_handle = self.a_handles.nextValue(self)\n self.trace(3, f\"Draining A: {a_coord} ({a_handle})\")\n self.trace(3, \"Done.\")\n return (a_coord, a_handle, b_handle)\n\n self.trace(3, \"Done.\")\n return (a_coord, a_handle, b_handle)\n \n\n#\n# Splitter\n#\n# Helper module for splitting tuple streams.\n# Note: explicitly over-rides default fanout behavior.\n#\n\nclass Splitter (AST):\n def __init__(self, stream, num):\n super().__init__(\"Splitter(\" + stream.class_name + \")[\" + str(num) + \"]\")\n self.stream = stream\n stream.connect(self, num)\n self.num = num\n\n def evaluate(self):\n res = NoTransmit\n while res is NoTransmit:\n res = self.stream.nextValue(self, self.num)\n self.trace(3, f\"{self.num} => {res}\")\n return res\n\n#\n# Compute\n#\n# Given an N-argument function and a list of N AST nodes that produce\n# N-streams of values, apply the function to the values to produce an N-stream\n# of outputs\n#\n\nclass Compute (AST):\n def __init__(self, function, *streams, instance_name=None):\n name = \"Compute\"\n if instance_name is not None:\n name += \"_\" + instance_name\n super().__init__(name)\n self.streams = streams\n for stream in streams:\n stream.connect(self)\n self.function = function\n\n def evaluate(self):\n args = [None] * len(self.streams)\n for x, stream in enumerate(self.streams):\n args[x] = stream.nextValue(self)\n # If one arg is a Marker, they all should be Markers (in which case, skip the func)\n any_is_marker = False\n all_are_markers = True\n marker_level = None\n self.trace(1, f\"{args}\")\n for arg in args:\n assert arg is not NoTransmit\n is_marker = isinstance(arg, Marker)\n if is_marker:\n assert marker_level is None or marker_level == arg.level, f\"Compute: Inconsistent markers: {marker_level}, {arg.level}\"\n marker_level = arg.level\n any_is_marker |= is_marker\n all_are_markers &= is_marker\n\n if (any_is_marker and not all_are_markers):\n self.trace(0, f\"Inconsistent Markers: {args}\")\n assert not any_is_marker or all_are_markers\n \n if all_are_markers:\n self.trace(3, f\"{args[0]}\")\n return args[0]\n\n result = self.function(*args)\n self.trace(1, f\"{args} => {result}\")\n return result\n\n#\n# Amplify\n#\n# Given an AST node that produces an N-stream, and a Node that produces an\n# N+1-stream, replicate each element from the N-stream, so that the output\n# is an N+1-stream.\n#\n\nclass Amplify (AST):\n def __init__(self, smaller, bigger, instance_name = None):\n name = \"Amplify\"\n if instance_name is not None:\n name += \"_\" + instance_name\n super().__init__(name)\n self.smaller = smaller\n smaller.connect(self)\n self.bigger = bigger\n bigger.connect(self)\n self.current_value = None\n self.accesses = 0\n\n def evaluate(self):\n\n # Make sure we have a value to amplify.\n if self.current_value is None:\n next_val = self.smaller.nextValue(self)\n # If it was a marker, just pass it through\n if isinstance(next_val, Marker):\n # Check for consistency in bigger\n marker = self.bigger.nextValue(self)\n assert isinstance(marker, Marker)\n # It should be giving us markers, offset by 1\n assert marker.level == next_val.level + 1\n # Use the offset marker to take into account the markers below.\n return marker\n else:\n # Just a normal new value.\n self.trace(3, f\"{self.smaller.class_name}: New value: {next_val}\")\n self.current_value = next_val\n\n next = self.bigger.nextValue(self)\n # See if we are done with current amplification\n if isinstance(next, Marker):\n assert next.level == 1\n self.current_value = None\n self.trace(2, f\"{self.smaller.class_name}: Done.\")\n return next\n # increment stat for buffer access to smaller\n self.accesses += 1\n self.trace(2, f\"{self.smaller.class_name}: {next} => {self.current_value}\")\n return self.current_value\n \n def dumpStats(self, stats_dict):\n #print(\"dumpStats {}\".format(self.class_name))\n stats_dict[self.class_name] = self.accesses\n\n#\n# Reduce\n#\n# Given an AST node that produces an N-stream, and a Node that produces an\n# N-1-stream, reduce each element from the N-stream, so that the output\n# is an N-1-stream.\n#\n\nclass Reduce (AST):\n def __init__(self, bigger, smaller = None, instance_name = None):\n name = \"Reduce\"\n if instance_name is not None:\n name += \"_\" + instance_name\n super().__init__(name)\n self.smaller = smaller\n if smaller is not None:\n smaller.connect(self)\n self.bigger = bigger\n bigger.connect(self)\n self.accesses = 0\n\n def evaluate(self):\n # If we are connected to an initial value stream, use it.\n if self.smaller is not None:\n current_value = self.smaller.nextValue(self)\n if isinstance(current_value, Marker):\n next = self.bigger.nextValue(self)\n if not isinstance(next, Marker):\n self.trace(0, f\"Inconsitent Marker: {current_value} => {next}\")\n assert False\n assert current_value.level + 1 == next.level\n self.trace(3, f\"Passthrough: {current_value}\")\n return current_value\n self.trace(3, f\"Init: {current_value}\")\n else:\n current_value = 0\n\n next = self.bigger.nextValue(self)\n while not isinstance(next, Marker):\n self.trace(2, f\"{current_value} + {next} => {current_value + next}\")\n current_value += next\n # increment a stat for smaller (thing being reduced into)\n self.accesses += 1\n next = self.bigger.nextValue(self)\n if next.level == 1:\n self.trace(3, f\"Output: {current_value}\")\n return current_value\n else:\n assert self.smaller == None\n self.trace(3, f\"Passthrough: {next.offset(-1)}\")\n return next.offset(-1)\n\n def dumpStats(self, stats_dict):\n #print(\"dumpStats {}\".format(self.class_name))\n stats_dict[self.class_name] = self.accesses\n\n\n#\n# Stream0\n#\n# Turn a scalar into a 0-stream that transmits exactly that scalar.\n#\n\n\nclass Stream0 (AST):\n def __init__(self, val, instance_name=None):\n name = \"Stream0\"\n if instance_name is not None:\n name += \"_\" + instance_name\n super().__init__(name)\n self.val = val\n self.done = False\n \n \n def evaluate(self):\n #assert(not self.done)\n if self.done:\n self.trace(3, \"Done\")\n return Marker(0)\n self.done = True\n self.trace(3, f\"{self.val}\")\n return self.val\n\n#\n# Distribute\n#\n# Route a N-stream down one of M routes based on a distribution choice N-stream.\n#\n\nclass Distribute (AST):\n def __init__(self, N, distribution_choices, stream, instance_name=None):\n name = \"Distribute\"\n if instance_name is not None:\n name += \"_\" + instance_name\n super().__init__(name, num_fields=N)\n self.N = N\n self.distribution_choices = distribution_choices\n distribution_choices.connect(self)\n self.stream = stream\n stream.connect(self)\n \n def evaluate(self):\n choice = self.distribution_choices.nextValue(self)\n if isinstance(choice, Marker):\n self.trace(3, f\"{choice}\")\n marker = self.stream.nextValue(self)\n assert isinstance(marker, Marker)\n assert marker.level == choice.level\n return [choice] * self.N\n assert choice < self.N\n res = [NoTransmit] * self.N\n val = self.stream.nextValue(self)\n res[choice] = val\n self.trace(3, f\"{val} => {choice}\")\n return res\n\n#\n# Collect\n#\n# Route one of M N-streams together into an N-Stream based on a \n# distribution choice N-stream. Usually used to undo a Distribute by\n# passing the same distribution_choice stream to both.\n#\n\nclass Collect (AST):\n def __init__(self, N, distribution_choices, stream_array):\n super().__init__(\"Collect\")\n self.N = N\n self.distribution_choices = distribution_choices\n distribution_choices.connect(self)\n self.stream_array = stream_array\n for n in range(self.N):\n stream_array[n].connect(self)\n \n def evaluate(self):\n choice = self.distribution_choices.nextValue(self)\n if isinstance(choice, Marker):\n self.trace(3, f\"{choice}\")\n for stream in self.stream_array:\n marker = stream.nextValue(self)\n assert isinstance(marker, Marker)\n assert marker.level == choice.level\n return choice\n assert choice < self.N\n val = self.stream_array[choice].nextValue(self)\n assert not isinstance(val, Marker)\n self.trace(3, f\"{choice} => {val}\")\n return val\n\n#\n# BasicIntermediateRankImplementation\n#\n# Rank implementation JUST to test out the program below.\n\nclass BasicIntermediateRankImplementation:\n def __init__(self, shape, shape_of_next_rank, pos=0):\n self.shape = shape\n self.shape_of_next_rank = shape_of_next_rank\n self.pos = pos\n \n def setupSlice(self, base, bound, max_num):\n # ignore base/bound/max num because this class is BASIC.\n self.max_num = self.shape\n self.cur_num = 0\n \n def nextInSlice(self):\n if self.cur_num >= self.max_num:\n return None\n num = self.cur_num\n self.cur_num += 1\n return num\n \n def handleToCoord(self, handle):\n return handle\n \n def handleToPayload(self, handle):\n assert handle < self.shape_of_next_rank\n return (self.pos * self.shape) + handle\n \n def payloadToFiberHandle(self, payload):\n return payload\n \n def payloadToValue(self, payload):\n assert False\n \n def coordToHandle(self, coord):\n return coord\n \n def insertElement(self, coord):\n return coord\n \n def updatePayload(self, handle, payload):\n return handle\n \n def getUpdatedFiberHandle(self):\n return self.shape\n\n def fiberHandleToPayload(self, fiber_handle):\n return fiber_handle\n\n def valueToPayload(self, value):\n assert False\n\n\n def dumpStats(self, stats_dict):\n pass\n\n#\n# BasicFiberImplementation\n#\n# Rank implementation JUST to test out the programs below.\n\nclass BasicFiberImplementation:\n def __init__(self, vals):\n self.vals = vals\n \n def setupSlice(self, base, bound, max_num):\n # ignore base/bound/max num because this class is BASIC.\n self.max_num = len(self.vals)\n self.cur_num = 0\n \n def nextInSlice(self):\n if self.cur_num >= self.max_num:\n return None\n num = self.cur_num\n self.cur_num += 1\n return num\n \n def handleToCoord(self, handle):\n return handle\n \n def handleToPayload(self, handle):\n return self.vals[handle]\n \n def payloadToFiberHandle(self, payload):\n assert False\n \n def payloadToValue(self, payload):\n return payload\n \n def coordToHandle(self, coord):\n return coord\n \n def insertElement(self, coord):\n if coord >= len(self.vals):\n self.vals.append(0)\n return coord\n \n def updatePayload(self, handle, payload):\n assert handle is not None\n self.vals[handle] = payload\n return handle\n \n def getUpdatedFiberHandle(self):\n return len(self.vals)\n\n def fiberHandleToPayload(self, fiber_handle):\n assert False\n\n def valueToPayload(self, value):\n return value\n\n\n def dumpStats(self, stats_dict):\n pass\n\n#\n# evaluate\n#\n# Run the given node (and all nodes connected to it) until it returns a Marker\n# N times in a row.\n#\n\n\ndef evaluate(node, n = 1, stats_dict = {}):\n assert n >= 0\n node.initialize()\n consecutive_markers = -1\n while (consecutive_markers != n):\n res = node.nextValue(None)\n print(f\"+++++++++\")\n print(f\"Evaluate: {res}\")\n print(f\"+++++++++\")\n if isinstance(res, Marker):\n consecutive_markers += 1\n else:\n consecutive_markers = 0\n node.finalize(stats_dict)\n\n\nif __name__ == \"__main__\":\n\n\n ## Test program: Element-wise multiplication\n #\n #\n # Z_k = A_k * B_k\n #\n #\n # a_k = A.getRoot()\n # b_k = B.getRoot()\n # z_k = Z.getRoot()\n #\n # for k, (z, (a, b)) in z_k << (a_k & b_k):\n # z <<= a * b\n\n # Define the tensors\n a = SwoopTensor(name = \"A\", rank_ids = [\"K\"])\n b = SwoopTensor(name = \"B\", rank_ids = [\"K\"])\n z = SwoopTensor(name = \"Z\", rank_ids = [\"K\"])\n\n # Get handles to the tree start.\n a_k = a.getStartHandle() # GetStartingFiber(a)\n b_k = b.getStartHandle() # GetStartingFiber(b)\n z_root = z.getRootHandle()\n z_k = z.getStartHandle() # GetStartingFiber(z)\n\n # Iterate the K rank and get handles to contents\n a_handles = Scan(a_k)\n b_handles = Scan(b_k)\n # Convert handles to coordinates\n a_coords = HandlesToCoords(a_k, a_handles)\n b_coords = HandlesToCoords(b_k, b_handles)\n # Intersect the K rank\n (ab_coords, ab_a_handles, ab_b_handles) = Intersect(a_coords, a_handles, b_coords, b_handles)\n # Only insert elements that survive intersection\n (z_handles, z_k_new_fiber_handle) = InsertionScan(z_k, ab_coords)\n # Only retrieve the values that survive intersection\n a_payloads = HandlesToPayloads(a_k, ab_a_handles)\n b_payloads = HandlesToPayloads(b_k, ab_b_handles)\n a_values = PayloadsToValues(a_k, a_payloads)\n b_values = PayloadsToValues(b_k, b_payloads)\n # Calculate the loop body\n results = Compute(lambda a, b: a*b, a_values, b_values)\n # Final writeback\n z_k_update_acks = UpdatePayloads(z_k, z_handles, results)\n \n # Update final occupancies.\n z_root_update_acks = UpdatePayloads(z_root, Stream0(0), z_k_new_fiber_handle)\n\n # Create some example implmentations\n my_a_root = BasicIntermediateRankImplementation(1, 1)\n my_a_K = BasicFiberImplementation([1, 2, 3])\n my_b_root = BasicIntermediateRankImplementation(1, 1)\n my_b_K = BasicFiberImplementation([4, 5, 6])\n my_z_root = BasicIntermediateRankImplementation(1, 1)\n my_z_K = BasicFiberImplementation([])\n\n # Use those implementations in practice\n a.setImplementations(\"root\", [my_a_root])\n a.setImplementations(\"K\", [my_a_K])\n b.setImplementations(\"root\", [my_b_root])\n b.setImplementations(\"K\", [my_b_K])\n z.setImplementations(\"root\", [my_z_root])\n z.setImplementations(\"K\", [my_z_K])\n\n # Run the program and check and print the result\n evaluate(z_k_update_acks)\n evaluate(z_root_update_acks, 0)\n print(\"===========================\")\n print(f\"Final element-wise result: {my_z_K.vals}\")\n print(\"===========================\")\n assert my_z_K.vals == [4, 10, 18]\n\n\n\n ## Test program: A-Stationary vector-matrix multiplication\n #\n # Z_n = A_k * B_kn\n #\n #\n # for k, (a, b_n) in a_k & b_k:\n # for n, (z, b) in z_n << b_n:\n # z += a * b\n\n a = SwoopTensor(name=\"A\", rank_ids=[\"K\"])\n b = SwoopTensor(name=\"B\", rank_ids=[\"K\", \"N\"])\n z = SwoopTensor(name=\"Z\", rank_ids=[\"N\"])\n\n # Get handles to the tree start.\n a_k = a.getStartHandle()\n b_k = b.getStartHandle()\n z_root = z.getRootHandle()\n z_n = z.getStartHandle()\n\n # a_k & b_k\n a_k_handles = Scan(a_k)\n b_k_handles = Scan(b_k)\n a_k_coords = HandlesToCoords(a_k, a_k_handles)\n b_k_coords = HandlesToCoords(b_k, b_k_handles)\n (ab_k_coords, ab_a_k_handles, ab_b_k_handles) = Intersect(a_k_coords, a_k_handles, b_k_coords, b_k_handles)\n ab_a_k_payloads = HandlesToPayloads(a_k, ab_a_k_handles)\n ab_b_k_payloads = HandlesToPayloads(b_k, ab_b_k_handles)\n b_ns = PayloadsToFiberHandles(b_k, ab_b_k_payloads)\n\n # z_n << b_n\n b_n_handless = Scan(b_ns)\n b_n_coordss = HandlesToCoords(b_ns, b_n_handless)\n b_n_payloadss = HandlesToPayloads(b_ns, b_n_handless)\n z_ns = Amplify(z_n, ab_k_coords)\n (z_n_handless, z_n_updated_fiber_handles) = InsertionScan(z_ns, b_n_coordss)\n z_n_payloadss = HandlesToPayloads(z_ns, z_n_handless)\n\n # z_ref += a_val * b_val\n a_values = PayloadsToValues(a_k, ab_a_k_payloads)\n b_valuess = PayloadsToValues(b_ns, b_n_payloadss)\n z_valuess = PayloadsToValues(z_ns, z_n_payloadss)\n # We need to repeat A value across every Z\n a_valuess = Amplify(a_values, b_n_coordss)\n # Actual computation\n body_func = lambda a_val, b_val, z_ref: z_ref + a_val * b_val\n z_new_valuess = Compute(body_func, a_valuess, b_valuess, z_valuess)\n # Final write-back\n z_n_update_ackss = UpdatePayloads(z_ns, z_n_handless, z_new_valuess)\n\n # Record occupancy\n z_root_handles = Amplify(Stream0(0), ab_k_coords)\n z_root_acks = UpdatePayloads(z_root, z_root_handles, z_n_updated_fiber_handles)\n\n K=3\n N=3\n my_a_root = BasicIntermediateRankImplementation(1, 1)\n my_a_k = BasicFiberImplementation([1, 2, 3])\n my_b_root = BasicIntermediateRankImplementation(1, 1)\n my_b_k = BasicIntermediateRankImplementation(K, N)\n my_b_n = [\n BasicFiberImplementation([4, 5, 6]), \n BasicFiberImplementation([5, 6, 7]), \n BasicFiberImplementation([6, 7, 8])\n ]\n my_z_root = BasicIntermediateRankImplementation(1, 1)\n my_z_n = BasicFiberImplementation([])\n\n a.setImplementations(\"root\", [my_a_root])\n a.setImplementations(\"K\", [my_a_k])\n b.setImplementations(\"root\", [my_b_root])\n b.setImplementations(\"K\", [my_b_k])\n b.setImplementations(\"N\", my_b_n)\n z.setImplementations(\"root\", [my_z_root])\n z.setImplementations(\"N\", [my_z_n])\n\n evaluate(z_n_update_ackss, 2)\n evaluate(z_root_acks, 1)\n print(\"==========================\")\n print(f\"Final A-Stationary result: {my_z_n.vals}\")\n print(\"==========================\")\n assert my_z_n.vals == [32, 38, 44]\n \n\n\n\n ## Test program: Z-Stationary vector-matrix multiplication\n #\n # Z_n = A_k * B_kn\n #\n #for n, (z, b_k) in z_n << b_n:\n # for k, (a, b) in a_k & b_k:\n # z += a * b\n\n a = SwoopTensor(name=\"A\", rank_ids=[\"K\"])\n b = SwoopTensor(name=\"B\", rank_ids=[\"N\", \"K\"])\n z = SwoopTensor(name=\"Z\", rank_ids=[\"N\"])\n\n a_k = a.getStartHandle()\n b_n = b.getStartHandle()\n z_root = z.getRootHandle()\n z_n = z.getStartHandle()\n\n # z_n << b_n\n b_n_handles = Scan(b_n)\n b_n_coords = HandlesToCoords(b_n, b_n_handles)\n b_n_payloads = HandlesToPayloads(b_n, b_n_handles)\n (z_n_handles, z_n_new_fiber_handle) = InsertionScan(z_n, b_n_coords)\n z_n_payloads = HandlesToPayloads(z_n, z_n_handles)\n b_ks = PayloadsToFiberHandles(b_n, b_n_payloads)\n\n\n # a_k & b_k\n b_k_handless = Scan(b_ks)\n # Repeat a_k iteration for each b_k\n a_ks = Amplify(a_k, b_ks)\n a_k_handless = Scan(a_ks) \n a_k_coordss = HandlesToCoords(a_ks, a_k_handless)\n b_k_coordss = HandlesToCoords(b_ks, b_k_handless)\n (ab_k_coordss, ab_a_k_handless, ab_b_k_handless) = Intersect(a_k_coordss, a_k_handless, b_k_coordss, b_k_handless)\n ab_a_k_payloadss = HandlesToPayloads(a_ks, ab_a_k_handless)\n ab_b_k_payloadss = HandlesToPayloads(b_ks, ab_b_k_handless)\n\n\n # z_ref += a_val * b_val\n a_valuess = PayloadsToValues(a_ks, ab_a_k_payloadss)\n b_valuess = PayloadsToValues(b_ks, ab_b_k_payloadss)\n # NOTE: MUL and ADD broken out for efficiency\n body_func = lambda a_val, b_val: a_val * b_val\n partial_productss = Compute(body_func, a_valuess, b_valuess)\n z_values = PayloadsToValues(z_n, z_n_payloads)\n # Reduce into the same value until end of rank\n z_new_values = Reduce(partial_productss, z_values)\n z_n_update_acks = UpdatePayloads(z_n, z_n_handles, z_new_values)\n \n # Update occupancy\n z_root_update_ack = UpdatePayloads(z_root, Stream0(0), z_n_new_fiber_handle)\n\n my_a_root = BasicIntermediateRankImplementation(1, 1)\n my_a_k = BasicFiberImplementation([1, 2, 3])\n my_b_root = BasicIntermediateRankImplementation(1, 1)\n my_b_n = BasicIntermediateRankImplementation(N, K)\n my_b_k = [BasicFiberImplementation([4, 5, 6]), \n BasicFiberImplementation([5, 6, 7]), \n BasicFiberImplementation([6, 7, 8])]\n my_z_root = BasicIntermediateRankImplementation(1, 1)\n my_z_n = BasicFiberImplementation([])\n\n\n a.setImplementations(\"root\", [my_a_root])\n a.setImplementations(\"K\", [my_a_k])\n b.setImplementations(\"root\", [my_b_root])\n b.setImplementations(\"N\", [my_b_n])\n b.setImplementations(\"K\", my_b_k)\n z.setImplementations(\"root\", [my_z_root])\n z.setImplementations(\"N\", [my_z_n])\n\n evaluate(z_n_update_acks)\n evaluate(z_root_update_ack, 0)\n print(\"==========================\")\n print(f\"Final Z-Stationary result: {my_z_n.vals}\")\n print(\"==========================\")\n assert my_z_n.vals == [32, 38, 44]\n", "id": "2455568", "language": "Python", "matching_score": 4.176944255828857, "max_stars_count": 2, "path": "fibertree/codec/swoop.py" }, { "content": "from swoop import *\n\n\n## Test program: Parallelized dot product\n#\n# Z = Sum(k). A_k * B_k\n# Tiled:\n# Z = Sum(k1).Sum(K0). A_k1k0 * Bk1k0 # k0 is parallel\n#\n# Option 1: \n# A_KK = A_K.splitUniform(K0)\n# B_KK = B_K.splitUniform(K0)\n# \n# Option 2: \n# A_KK = A_K.splitEqual(K0)\n# B_KK = B_K.splitNonUniform(A_KK.getRoot().getCoords()).swapRanks()\n#\n# for k1, (a_k0, b_k0) in a_k1 & b_k1:\n# parallel for k0, (a_val, b_val) in a_k0 & b_k0:\n# z_ref <<= a_val * b_val\n#\n\n\na = SwoopTensor(name=\"A\", rank_ids=[\"K1\", \"K0\"])\nb = SwoopTensor(name=\"B\", rank_ids=[\"K1\", \"K0\"])\nz = SwoopTensor(name=\"Z\", rank_ids=[])\n\na_k1 = a.getStartHandle()\nb_k1 = b.getStartHandle()\nz_root = z.getRootHandle()\n\n\n# k1 & operator:\na_k1_handles = Scan(a_k1)\nb_k1_handles = Scan(b_k1)\na_k1_coords = HandlesToCoords(a_k1, a_k1_handles)\nb_k1_coords = HandlesToCoords(b_k1, b_k1_handles)\n# Intersect the K1 rank\n(ab_k1_coords, ab_k1_a_handles, ab_k1_b_handles) = Intersect(a_k1_coords, a_k1_handles, b_k1_coords, b_k1_handles)\n# Only retrieve the fibers that survive intersection\na_k1_payloads = HandlesToPayloads(a_k1, ab_k1_a_handles)\nb_k1_payloads = HandlesToPayloads(b_k1, ab_k1_b_handles)\na_k0s = PayloadsToFiberHandles(a_k1, a_k1_payloads)\nb_k0s = PayloadsToFiberHandles(b_k1, b_k1_payloads)\n\n\n# k0 & operator:\na_k0_handless = Scan(a_k0s)\nb_k0_handless = Scan(b_k0s)\na_k0_coordss = HandlesToCoords(a_k0s, a_k0_handless)\nb_k0_coordss = HandlesToCoords(b_k0s, b_k0_handless)\n# Intersect the K0 rank\n(ab_k0_coordss, ab_k0_a_handless, ab_k0_b_handless) = Intersect(a_k0_coordss, a_k0_handless, b_k0_coordss, b_k0_handless)\n# Only retrieve the values that survive intersection\na_k0_payloadss = HandlesToPayloads(a_k0s, ab_k0_a_handless)\nb_k0_payloadss = HandlesToPayloads(b_k0s, ab_k0_b_handless)\na_valuess = PayloadsToValues(a_k0s, a_k0_payloadss)\nb_valuess = PayloadsToValues(b_k0s, b_k0_payloadss)\n\n# Compute result and reduce\n\n# Original sequential code\n#partial_productss = Compute(lambda a, b: a * b, a_valuess, b_valuess)\n\n# BEGIN PARALLEL_FOR\nNUM_PES = 4\ndist_func = lambda n: n % NUM_PES \nk0_distribution_choicess = Compute(dist_func, ab_k0_coordss)\na_valuess_distributed = Distribute(4, k0_distribution_choicess, a_valuess)\nb_valuess_distributed = Distribute(4, k0_distribution_choicess, b_valuess)\n\nbody_func = lambda a, b: a * b\nresultss = []\nfor pe in range(NUM_PES):\n resultss.append(Compute(body_func, a_valuess_distributed[pe], b_valuess_distributed[pe], instance_name=str(pe)))\n \npartial_productss = Collect(NUM_PES, k0_distribution_choicess, resultss)\n# END PARALLEL FOR\n\npartial_sums = Reduce(partial_productss)\nz_root_new_value = Reduce(partial_sums)\nz_root_handle = Stream0(0) # XXX Improve this\nz_root_ack = UpdatePayloads(z_root, z_root_handle, z_root_new_value)\n\n\nK1 = 2\nK0 = 3\nmy_a_root = BasicIntermediateRankImplementation(1, 1)\nmy_a_k1 = BasicIntermediateRankImplementation(K1, K0)\nmy_a_k0 = [BasicFiberImplementation([1, 2, 3]), BasicFiberImplementation([2, 4, 6])]\nmy_b_root = BasicIntermediateRankImplementation(1, 1)\nmy_b_k1 = BasicIntermediateRankImplementation(K1, K0)\nmy_b_k0 = [BasicFiberImplementation([4, 5, 6]), BasicFiberImplementation([8, 10, 12])]\nmy_z_root = BasicFiberImplementation([0])\n\na.setImplementations(\"root\", [my_a_root])\na.setImplementations(\"K1\", [my_a_k1])\na.setImplementations(\"K0\", my_a_k0)\nb.setImplementations(\"root\", [my_b_root])\nb.setImplementations(\"K1\", [my_b_k1])\nb.setImplementations(\"K0\", my_b_k0)\nz.setImplementations(\"root\", [my_z_root])\n\nevaluate(z_root_ack, 0)\n\nexpected_val = 160\n\nprint(\"==========================\")\nprint(f\"Final K1-K0 result:\")\nprint(my_z_root.vals)\nassert(my_z_root.vals == [expected_val])\nprint(\"==========================\")\n", "id": "8639704", "language": "Python", "matching_score": 5.477778911590576, "max_stars_count": 2, "path": "fibertree/codec/dotproduct.py" }, { "content": "from swoop import *\n\n\n## Test program: Parallelized Cartesian product\n#\n# Z_mn = A_m * B_n\n# Tiled:\n# Z_n1mn0 = Am * Bn1n0 # n0 is parallel\n#\n# Option 1: \n# B_NN = B_N.splitUniform(N0)\n# Z_NMN = Z_MN.splitUniform(N0).swapRanks()\n# \n# Option 2: \n# B_NN = B_N.splitEqual(N0)\n# Z_NMN = Z_MN.splitNonUniform(B_NN.getRoot().getCoords()).swapRanks()\n#\n# for n1, (z_m, b_n0) in z_n1 << b_n1:\n# for m, (z_n0, a_val) in z_m << a_m:\n# parallel for n0, (z_ref, b_val) in z_n0 << b_n0:\n# z_ref <<= a_val * b_val\n#\n\n\na = SwoopTensor(name=\"A\", rank_ids=[\"M\"])\nb = SwoopTensor(name=\"B\", rank_ids=[\"N1\", \"N0\"])\nz = SwoopTensor(name=\"Z\", rank_ids=[\"N1\", \"M\", \"N0\"])\n\na_m = a.getStartHandle()\nb_n1 = b.getStartHandle()\nz_root = z.getRootHandle()\nz_n1 = z.getStartHandle()\n\n# n1 << operator, RHS:\nb_n1_handles = Scan(b_n1)\nb_n1_coords = HandlesToCoords(b_n1, b_n1_handles)\nb_n1_payloads = HandlesToPayloads(b_n1, b_n1_handles)\nb_n0s = PayloadsToFiberHandles(b_n1, b_n1_payloads)\n# n1 << operator:\n(z_n1_handles, z_n1_updated_fiber_handle) = InsertionScan(z_n1, b_n1_coords)\nz_n1_payloads = HandlesToPayloads(z_n1, z_n1_handles)\nz_ms = PayloadsToFiberHandles(z_n1, z_n1_payloads)\n\n# m << operator, RHS, repeated b_n1 more times:\na_ms = Amplify(a_m, b_n1_handles)\na_m_handless = Scan(a_ms)\na_m_coordss = HandlesToCoords(a_ms, a_m_handless)\na_m_payloadss = HandlesToPayloads(a_ms, a_m_handless)\na_valuess = PayloadsToValues(a_ms, a_m_payloadss)\n# m << operator:\n(z_m_handless, z_m_updated_fiber_handles) = InsertionScan(z_ms, a_m_coordss)\nz_m_payloadss = HandlesToPayloads(z_ms, z_m_handless)\nz_n0ss = PayloadsToFiberHandles(z_ms, z_m_payloadss)\n\n# n0 << operator, RHS, repeated a_m more times:\nb_n0ss = Amplify(b_n0s, a_m_handless, instance_name=\"B_N0\")\nb_n0_handlesss = Scan(b_n0ss)\nb_n0_coordsss = HandlesToCoords(b_n0ss, b_n0_handlesss)\nb_n0_payloadsss = HandlesToPayloads(b_n0ss, b_n0_handlesss)\na_valuesss = Amplify(a_valuess, b_n0_handlesss, instance_name=\"A_N0\")\nb_valuesss = PayloadsToValues(b_n0ss, b_n0_payloadsss)\n# n0 << operator:\n(z_n0_handlesss, z_n0_updated_fiber_handless) = InsertionScan(z_n0ss, b_n0_coordsss)\n# z_values not referenced in loop body, so don't retrieve it\n\n# z_ref <<= a_val * b_val\n# Original sequential code\nresultsss = Compute(lambda a, b: a * b, a_valuesss, b_valuesss)\n\n# BEGIN PARALLEL_FOR\n#NUM_PES = 4\n#dist_func = lambda n: n % NUM_PES \n#n0_distribution_choices = Compute(dist_func, b_n0_coords)\n#b_values_distributed = Distribute(4, n0_distribution_choices, b_values)\n#body_func = lambda a_val, b_val: a_val * b_val\n#results = []\n#for n0 in range(NUM_PES):\n# results.append(Compute(body_func, a_values, b_values_distributed[n0], instance_name=str(n0)))\n \n#resultsss = Collect(NUM_PES, n0_distribution_choices, results)\n# END PARALLEL FOR\n\n# n0 << operator, LHS:\nz_n0_acksss = UpdatePayloads(z_n0ss, z_n0_handlesss, resultsss)\n\n# m << operator, LHS:\nz_m_ackss = UpdatePayloads(z_ms, z_m_handless, z_n0_updated_fiber_handless)\n\n# n1 << operator, LHS:\nz_n1_acks = UpdatePayloads(z_n1, z_n1_handles, z_m_updated_fiber_handles)\nz_root_ack = UpdatePayloads(z_root, Stream0(0), z_n1_updated_fiber_handle)\n\nM = 3\nN1 = 1\nN0 = 3\nmy_a_root = BasicIntermediateRankImplementation(1, 1)\nmy_a_m = BasicFiberImplementation([1, 2, 3])\nmy_b_root = BasicIntermediateRankImplementation(1, 1)\nmy_b_n1 = BasicIntermediateRankImplementation(N1, N0)\nmy_b_n0 = [BasicFiberImplementation([4, 5, 6])]\nmy_z_root = BasicIntermediateRankImplementation(1, 1)\nmy_z_n1 = BasicIntermediateRankImplementation(N1, M)\nmy_z_m = BasicIntermediateRankImplementation(M, N0)\nmy_z_n0 = []\nfor m in range(M):\n for n1 in range(N1):\n my_z_n0.append(BasicFiberImplementation([0] * N0))\n\n\na.setImplementations(\"root\", [my_a_root])\na.setImplementations(\"M\", [my_a_m])\nb.setImplementations(\"root\", [my_b_root])\nb.setImplementations(\"N1\", [my_b_n1])\nb.setImplementations(\"N0\", my_b_n0)\nz.setImplementations(\"root\", [my_z_root])\nz.setImplementations(\"N1\", [my_z_n1])\nz.setImplementations(\"M\", [my_z_m])\nz.setImplementations(\"N0\", my_z_n0)\n\n\nevaluate(z_n0_acksss, 3)\nevaluate(z_m_ackss, 2)\nevaluate(z_n1_acks, 1)\nevaluate(z_root_ack, 0)\n\nexpected_vals = [[4, 5, 6], [8, 10, 12], [12, 15, 18]]\n\nprint(\"==========================\")\nprint(f\"Final Z-Stationary result:\")\nfor n1 in range(N1):\n for m in range(M):\n print(my_z_n0[n1 * M + m].vals)\nfor n1 in range(N1):\n for m in range(M):\n assert(my_z_n0[n1 * M + m].vals == expected_vals[n1 * M + m])\nprint(\"==========================\")\n", "id": "11814556", "language": "Python", "matching_score": 5.341973304748535, "max_stars_count": 2, "path": "fibertree/codec/cartesian.py" }, { "content": "from swoop import *\n\n\n\n## Test program: Tiled K-Stationary vector-matrix multiplication\n#\n# Z_n = A_k * B_kn\n# Tiled:\n# Z_n1n0 = A_k1k0 * B_k1n1k0n0\n#\n#for k1, (a_k0, b_n1) in a_k1 & b_k1:\n# for n1, (z_n0, b_n0) in z_n1 << b_n1:\n# for k0, (a, b_n0) in a_k0 & b_k0:\n# for n0, (z, b) in z_n0 << b_n0:\n# z += a * b\n\na = SwoopTensor(name=\"A\", rank_ids=[\"K1\", \"K0\"])\nb = SwoopTensor(name=\"B\", rank_ids=[\"K1\", \"N1\", \"K0\", \"N0\"])\nz = SwoopTensor(name=\"Z\", rank_ids=[\"N1\", \"N0\"])\n\n\na_k1 = a.getStartHandle()\nb_k1 = b.getStartHandle()\nz_n1 = z.getStartHandle()\nz_root = z.getRootHandle()\n\n# a_k1 & b_k1\na_k1_handles = Scan(a_k1)\nb_k1_handles = Scan(b_k1)\na_k1_coords = HandlesToCoords(a_k1, a_k1_handles)\nb_k1_coords = HandlesToCoords(b_k1, b_k1_handles)\n(ab_k1_coords, ab_a_k1_handles, ab_b_k1_handles) = Intersect(a_k1_coords, a_k1_handles, b_k1_coords, b_k1_handles, instance_name=\"K1\")\nab_a_k1_payloads = HandlesToPayloads(a_k1, ab_a_k1_handles)\nab_b_k1_payloads = HandlesToPayloads(b_k1, ab_b_k1_handles)\na_k0s = PayloadsToFiberHandles(a_k1, ab_a_k1_payloads)\nb_n1s = PayloadsToFiberHandles(b_k1, ab_b_k1_payloads)\n\n\n# z_n1 << b_n1\nb_n1_handless = Scan(b_n1s)\nb_n1_coordss = HandlesToCoords(b_n1s, b_n1_handless)\nb_n1_payloadss = HandlesToPayloads(b_n1s, b_n1_handless)\n# Repeat z_n1 iteration for each b_n1\nz_n1s = Amplify(z_n1, b_n1s)\n(z_n1_handless, z_n1_new_fiber_handles) = InsertionScan(z_n1s, b_n1_coordss)\nz_n1_payloadss = HandlesToPayloads(z_n1s, z_n1_handless)\nb_k0ss = PayloadsToFiberHandles(b_n1s, b_n1_payloadss)\nz_n0ss = PayloadsToFiberHandles(z_n1s, z_n1_payloadss)\n\n\n# a_k0 & b_k0\nb_k0_handlesss = Scan(b_k0ss)\n# Repeat a_k0 iteration for each b_k0\na_k0ss = Amplify(a_k0s, b_k0ss, instance_name=\"K0\")\na_k0_handlesss = Scan(a_k0ss)\na_k0_coordsss = HandlesToCoords(a_k0ss, a_k0_handlesss)\nb_k0_coordsss = HandlesToCoords(b_k0ss, b_k0_handlesss)\n(ab_k0_coordsss, ab_a_k0_handlesss, ab_b_k0_handlesss) = Intersect(a_k0_coordsss, a_k0_handlesss, b_k0_coordsss, b_k0_handlesss, instance_name=\"K0\")\nab_a_k0_payloadsss = HandlesToPayloads(a_k0ss, ab_a_k0_handlesss)\nab_b_k0_payloadsss = HandlesToPayloads(b_k0ss, ab_b_k0_handlesss)\na_valuesss = PayloadsToValues(a_k0ss, ab_a_k0_payloadsss)\nb_n0sss = PayloadsToFiberHandles(b_k0ss, ab_b_k0_payloadsss)\n\n\n# z_n0 << b_n0\nb_n0_handlessss = Scan(b_n0sss)\nb_n0_coordssss = HandlesToCoords(b_n0sss, b_n0_handlessss)\nb_n0_payloadssss = HandlesToPayloads(b_n0sss, b_n0_handlessss)\n# Repeat z_n0 iteration for each b_n0\nz_n0sss = Amplify(z_n0ss, b_n0sss, instance_name=\"N0\")\n(z_n0_handlessss, z_n0_new_fiber_handlesss) = InsertionScan(z_n0sss, b_n0_coordssss)\nz_n0_payloadssss = HandlesToPayloads(z_n0sss, z_n0_handlessss)\na_valuessss = Amplify(a_valuesss, b_n0_handlessss)\nb_valuessss = PayloadsToValues(b_n0sss, b_n0_payloadssss)\nz_valuessss = PayloadsToValues(z_n0sss, z_n0_payloadssss)\n\n# z_ref += a_val * b_val\n# NOTE: MUL and ADD broken out for efficiency\nbody_func = lambda a_val, b_val, z_val: z_val + a_val * b_val\nresultssss = Compute(body_func, a_valuessss, b_valuessss, z_valuessss)\n# Reduce into the same value until end of rank\nz_n0_update_ackssss = UpdatePayloads(z_n0sss, z_n0_handlessss, resultssss)\n\n# Update N0 occupancy. (Should we be reducing here?)\n\nz_n1s = Amplify(z_n1, b_n1s)\nz_n1ss = Amplify(z_n1s, b_k0ss)\nz_n1_handlesss = Amplify(z_n1_handless, b_n0sss)\nz_n1_update_ackss = UpdatePayloads(z_n1ss, z_n1_handlesss, z_n0_new_fiber_handlesss)\n\n# Update root occupancy\nz_root_handles = Amplify(Stream0(0), z_n1_new_fiber_handles)\nz_root_update_acks = UpdatePayloads(z_root, z_root_handles, z_n1_new_fiber_handles)\n\n\nN1 = 2\nN0 = 3\n\nK1 = 2\nK0 = 3\n\nmy_a_root = BasicIntermediateRankImplementation(1, 1)\nmy_a_k1 = BasicIntermediateRankImplementation(K1, K0)\nmy_a_k0 = [BasicFiberImplementation([1, 2, 3]), BasicFiberImplementation([2, 4, 6])]\nmy_b_root = BasicIntermediateRankImplementation(1, 1)\nmy_b_k1 = BasicIntermediateRankImplementation(K1, N1)\nmy_b_n1 = [BasicIntermediateRankImplementation(N1, K0), BasicIntermediateRankImplementation(N1, K0, 1)]\nmy_b_k0 = [BasicIntermediateRankImplementation(K0, N0), BasicIntermediateRankImplementation(K0, N0, 1), BasicIntermediateRankImplementation(K0, N0, 2), BasicIntermediateRankImplementation(K0, N0, 3)]\nmy_b_n0 = [BasicFiberImplementation([4, 5, 6]), \n BasicFiberImplementation([5, 6, 7]), \n BasicFiberImplementation([6, 7, 8]),\n BasicFiberImplementation([12, 15, 18]), \n BasicFiberImplementation([15, 18, 21]), \n BasicFiberImplementation([18, 21, 24]),\n BasicFiberImplementation([8, 10, 12]), \n BasicFiberImplementation([10, 12, 14]), \n BasicFiberImplementation([12, 14, 16]),\n BasicFiberImplementation([16, 20, 24]), \n BasicFiberImplementation([20, 24, 28]), \n BasicFiberImplementation([24, 28, 32])]\n\nmy_z_root = BasicIntermediateRankImplementation(1, 1)\nmy_z_n1 = BasicIntermediateRankImplementation(N1, N0)\nmy_z_n0 = []\nfor n1 in range(N1):\n my_z_n0.append(BasicFiberImplementation([0] * N0))\n\n\na.setImplementations(\"root\", [my_a_root])\na.setImplementations(\"K1\", [my_a_k1])\na.setImplementations(\"K0\", my_a_k0)\nb.setImplementations(\"root\", [my_b_root])\nb.setImplementations(\"K1\", [my_b_k1])\nb.setImplementations(\"N1\", my_b_n1)\nb.setImplementations(\"K0\", my_b_k0)\nb.setImplementations(\"N0\", my_b_n0)\nz.setImplementations(\"root\", [my_z_root])\nz.setImplementations(\"N1\", [my_z_n1])\nz.setImplementations(\"N0\", my_z_n0)\n\nevaluate(z_n0_update_ackssss, 4)\nevaluate(z_n1_update_ackss, 2)\nevaluate(z_root_update_acks, 1)\n\nexpected_vals = [[160, 190, 220], [352, 418, 484]]\n\nprint(f\"Final K-Stationary result:\")\nfor n1 in range(N1):\n print(my_z_n0[n1].vals)\nfor n1 in range(N1):\n assert(my_z_n0[n1].vals == expected_vals[n1])\nprint(\"==========================\")\n", "id": "4932836", "language": "Python", "matching_score": 7.524872779846191, "max_stars_count": 2, "path": "fibertree/codec/matrix-vector-knkn.py" }, { "content": "from swoop import *\nfrom swoop_util import *\nfrom fibertree import Tensor\nimport sys\nimport yaml\n\n## Test program: Tiled Z-Stationary vector-matrix multiplication\n#\n# Z_n = A_k * B_kn\n# Tiled:\n# Z_n1n0 = A_k1k0 * B_k1n1k0n0\n#\n#for n1, (z_n1, b_k1) in z_n1 << b_n1:\n# for k1, (a_k0, b_k0) in a_k1 & b_k1:\n# for n0, (z, b_k0) in z_n0 << b_n0:\n# for k0, (a, b) in a_k0 & b_k0:\n# z += a * b\n\na = SwoopTensor(name=\"A\", rank_ids=[\"K1\", \"K0\"])\nb = SwoopTensor(name=\"B\", rank_ids=[\"N1\", \"K1\", \"N0\", \"K0\"])\nz = SwoopTensor(name=\"Z\", rank_ids=[\"N1\", \"N0\"])\n\na_k1 = a.getStartHandle()\nb_n1 = b.getStartHandle()\nz_root = z.getRootHandle()\nz_n1 = z.getStartHandle()\n\n# z_n1 << b_n1\nb_n1_handles = Scan(b_n1)\nb_n1_coords = HandlesToCoords(b_n1, b_n1_handles)\nb_n1_payloads = HandlesToPayloads(b_n1, b_n1_handles)\n(z_n1_handles, z_n1_new_fiber_handle) = InsertionScan(z_n1, b_n1_coords)\nz_n1_payloads = HandlesToPayloads(z_n1, z_n1_handles)\nb_k1s = PayloadsToFiberHandles(b_n1, b_n1_payloads)\nz_n0s = PayloadsToFiberHandles(z_n1, z_n1_payloads)\n\n\n# a_k1 & b_k1\nb_k1_handless = Scan(b_k1s)\n# Repeat a_k1 iteration for each b_k1\na_k1s = Amplify(a_k1, b_k1s, instance_name=\"K1\")\na_k1_handless = Scan(a_k1s) \na_k1_coordss = HandlesToCoords(a_k1s, a_k1_handless)\nb_k1_coordss = HandlesToCoords(b_k1s, b_k1_handless)\n(ab_k1_coordss, ab_a_k1_handless, ab_b_k1_handless) = Intersect(a_k1_coordss, a_k1_handless, b_k1_coordss, b_k1_handless, instance_name=\"K1\")\nab_a_k1_payloadss = HandlesToPayloads(a_k1s, ab_a_k1_handless)\nab_b_k1_payloadss = HandlesToPayloads(b_k1s, ab_b_k1_handless)\na_k0ss = PayloadsToFiberHandles(a_k1s, ab_a_k1_payloadss)\nb_n0ss = PayloadsToFiberHandles(b_k1s, ab_b_k1_payloadss)\n\n# z_n0 << b_n0\nb_n0_handlesss = Scan(b_n0ss)\nb_n0_coordsss = HandlesToCoords(b_n0ss, b_n0_handlesss)\nb_n0_payloadsss = HandlesToPayloads(b_n0ss, b_n0_handlesss)\n# Repeat z_n0 iteration for each b_n0\nz_n0ss = Amplify(z_n0s, b_n0ss, instance_name=\"N0\")\n(z_n0_handlesss, z_n0_new_fiber_handless) = InsertionScan(z_n0ss, b_n0_coordsss)\nz_n0_payloadsss = HandlesToPayloads(z_n0ss, z_n0_handlesss)\nb_k0sss = PayloadsToFiberHandles(b_n0ss, b_n0_payloadsss)\nz_valuesss = PayloadsToValues(z_n0ss, z_n0_payloadsss)\n\n# a_k0 & b_k0\nb_k0_handlessss = Scan(b_k0sss)\n# Repeat a_k0 iteration for each b_k0\na_k0sss = Amplify(a_k0ss, b_k0sss, instance_name=\"K0\")\na_k0_handlessss = Scan(a_k0sss)\na_k0_coordssss = HandlesToCoords(a_k0sss, a_k0_handlessss)\nb_k0_coordssss = HandlesToCoords(b_k0sss, b_k0_handlessss)\n(ab_k0_coordssss, ab_a_k0_handlessss, ab_b_k0_handlessss) = Intersect(a_k0_coordssss, a_k0_handlessss, b_k0_coordssss, b_k0_handlessss, instance_name=\"K0\")\nab_a_k0_payloadssss = HandlesToPayloads(a_k0sss, ab_a_k0_handlessss)\nab_b_k0_payloadssss = HandlesToPayloads(b_k0sss, ab_b_k0_handlessss)\na_valuessss = PayloadsToValues(a_k0sss, ab_a_k0_payloadssss)\nb_valuessss = PayloadsToValues(b_k0sss, ab_b_k0_payloadssss)\n\n\n# z_ref += a_val * b_val\n# NOTE: MUL and ADD broken out for efficiency\nbody_func = lambda a_val, b_val: a_val * b_val\npartial_productssss = Compute(body_func, a_valuessss, b_valuessss)\n# Reduce into the same value until end of rank\nz_new_valuesss = Reduce(partial_productssss, z_valuesss, instance_name=\"K0\")\n#z_new_valuesss = Reduce(partial_productssss, instance_name=\"K0\")\nz_n0_update_acksss = UpdatePayloads(z_n0ss, z_n0_handlesss, z_new_valuesss)\n\n# Update N0 occupancy. (Should we be reducing here somehow?)\nz_n1_handless = Amplify(z_n1_handles, b_n0ss, instance_name=\"N1_Upd\")\nz_n1s = Amplify(z_n1, ab_k1_coordss)\nz_n1_update_acks = UpdatePayloads(z_n1s, z_n1_handless, z_n0_new_fiber_handless)\n\n# Update root occupancy\nz_root_update_ack = UpdatePayloads(z_root, Stream0(0), z_n1_new_fiber_handle)\n\n\nN1 = 2\nN0 = 3\n\nK1 = 2\nK0 = 3\n\nA_data = [[1, 2, 3], [2, 4, 6]]\nB_data = [\n[[[4, 5, 6],\n[5, 6, 7],\n[6, 7, 8]],\n[[8, 10, 12],\n[10, 12, 14],\n[12, 14, 16]]],\n[[[12, 15, 18],\n[15, 18, 21],\n[18, 21, 24]],\n[[16, 20, 24],\n[20, 24, 28],\n[24, 28, 32]]]\n]\n\nZ_data = [[0, 0, 0], [0, 0, 0]]\n\nA_HFA = Tensor.fromUncompressed([\"K1\", \"K0\"], A_data, name = \"A\")\nB_HFA = Tensor.fromUncompressed([\"N1\", \"K1\", \"N0\", \"K0\"], B_data, name = \"B\")\nZ_HFA = Tensor.fromUncompressed([\"N1\", \"N0\"], Z_data, shape=[N1, N0], name = \"Z\")\n\nstr_desc = sys.argv[1]\nfrontier_descriptor = [str_desc[0], str_desc[1]]\n\nmyA = encodeSwoopTensorInFormat(A_HFA, frontier_descriptor)\nprint(\"encoded A\")\nmyB = encodeSwoopTensorInFormat(B_HFA, [\"U\", \"U\", \"U\", \"C\"])\nprint(\"encoded B\")\nmyZ = encodeSwoopTensorInFormat(Z_HFA, frontier_descriptor)\nprint(\"done encoding\\n\")\n\na.setImplementations(\"root\", myA[0])\na.setImplementations(\"K1\", myA[1])\na.setImplementations(\"K0\", myA[2])\nb.setImplementations(\"root\", myB[0])\nb.setImplementations(\"N1\", myB[1])\nb.setImplementations(\"K1\", myB[2])\nb.setImplementations(\"N0\", myB[3])\nb.setImplementations(\"K0\", myB[4])\nz.setImplementations(\"root\", myZ[0])\nz.setImplementations(\"N1\", myZ[1])\nz.setImplementations(\"N0\", myZ[2])\n\n\"\"\"\nmy_a_root = BasicIntermediateRankImplementation(1, 1)\nmy_a_k1 = BasicIntermediateRankImplementation(K1, K0)\nmy_a_k0 = [BasicFiberImplementation([1, 2, 3]), BasicFiberImplementation([2, 4, 6])]\nmy_b_root = BasicIntermediateRankImplementation(1, 1)\nmy_b_n1 = BasicIntermediateRankImplementation(N1, K1)\nmy_b_k1 = [BasicIntermediateRankImplementation(K1, N0), BasicIntermediateRankImplementation(K1, N0, 1)]\nmy_b_n0 = [BasicIntermediateRankImplementation(N0, K0), BasicIntermediateRankImplementation(N0, K0, 1), BasicIntermediateRankImplementation(N0, K0, 2), BasicIntermediateRankImplementation(N0, K0, 3)]\nmy_b_k0 = [BasicFiberImplementation([4, 5, 6]), \n BasicFiberImplementation([5, 6, 7]), \n BasicFiberImplementation([6, 7, 8]),\n BasicFiberImplementation([8, 10, 12]), \n BasicFiberImplementation([10, 12, 14]), \n BasicFiberImplementation([12, 14, 16]),\n BasicFiberImplementation([12, 15, 18]), \n BasicFiberImplementation([15, 18, 21]), \n BasicFiberImplementation([18, 21, 24]),\n BasicFiberImplementation([16, 20, 24]), \n BasicFiberImplementation([20, 24, 28]), \n BasicFiberImplementation([24, 28, 32])]\n\nmy_z_root = BasicIntermediateRankImplementation(1, 1)\nmy_z_n1 = BasicIntermediateRankImplementation(N1, N0)\nmy_z_n0 = []\nfor n1 in range(N1):\n my_z_n0.append(BasicFiberImplementation([0] * N0))\n\n\na.setImplementations(\"root\", [my_a_root])\na.setImplementations(\"K1\", [my_a_k1])\na.setImplementations(\"K0\", my_a_k0)\nb.setImplementations(\"root\", [my_b_root])\nb.setImplementations(\"N1\", [my_b_n1])\nb.setImplementations(\"K1\", my_b_k1)\nb.setImplementations(\"N0\", my_b_n0)\nb.setImplementations(\"K0\", my_b_k0)\nz.setImplementations(\"root\", [my_z_root])\nz.setImplementations(\"N1\", [my_z_n1])\nz.setImplementations(\"N0\", my_z_n0)\n\"\"\"\n#evaluate(b_n0ss, 2) # 0, 1, x, 2, 3, x, x\n#evaluate(b_n0_handlesss, 3) # 0, 1, 2, x, 0, 1, 2, x, x, 0, 1, 2, x, 0, 1, 2, x, x, x\n#evaluate(b_n0_payloadsss, 3) # 0, 1, 2, x, 0, 1, 2, x, x, \n\n#evaluate(b_k0sss, 3) # 0, 1, 2, x, 3, 4, 5, x, x, 6, 7, 8, x, 9, 10, 11, x, x, x\n#evaluate(b_k0_handlessss, 4) # 0, 1, 2, x, 0, 1, 2, x, 0, 1, 2, x, x, 0, 1, 2, x, 0, 1, 2, x, 0, 1, 2, x, x, x\n#evaluate(b_k0_coordssss, 4) # 0, 1, 2, x, 0, 1, 2, x, 0, 1, 2, x, x, 0, 1, 2, x, 0, 1, 2, x\n\n#evaluate(b_valuessss, 4)\n#exit(0)\n\nevaluate(z_n0_update_acksss, 3)\nevaluate(z_n1_update_acks, 1)\nevaluate(z_root_update_ack, 0)\n\nexpected_vals = [[160, 190, 220], [352, 418, 484]]\n# print(\"Z: {}\".format(myZ))\n# myZ[1][0].printFiber()\n# myZ[1][0].payloads[0].printFiber()\n# myZ[1][0].payloads[1].printFiber()\n\noutput_for_check = [myZ[2][0].getPayloads(), myZ[2][1].getPayloads()]\nprint(output_for_check)\nassert(output_for_check == expected_vals)\n\"\"\"\nstats_dict = dict()\ndumpAllStatsFromTensor(myA, stats_dict)\ndumpAllStatsFromTensor(myB, stats_dict)\ndumpAllStatsFromTensor(myZ, stats_dict)\n# print(\"\\nZ-stationary vector-matrix: A = <T>, B = <T, T>, Z = <T>\")\nprint(yaml.dump(stats_dict))\n\nprint(f\"Final Z-Stationary result:\")\nfor n1 in range(N1):\n print(my_z_n0[n1].vals)\nfor n1 in range(N1):\n assert(my_z_n0[n1].vals == expected_vals[n1])\nprint(\"==========================\")\n\"\"\"\n", "id": "4390272", "language": "Python", "matching_score": 6.975650787353516, "max_stars_count": 2, "path": "fibertree/codec/codec-nknk-ref.py" }, { "content": "from swoop import *\nfrom swoop_util import *\nfrom fibertree import Tensor\nimport sys\nimport time\n## Test program: Tiled K-Stationary vector-matrix multiplication\n#\n# Z_n = A_k * B_kn\n# Tiled:\n# Z_n1n0 = A_k1k0 * B_k1n1k0n0\n#\n#for k1, (a_k0, b_n1) in a_k1 & b_k1:\n# for n1, (z_n0, b_n0) in z_n1 << b_n1:\n# for k0, (a, b_n0) in a_k0 & b_k0:\n# for n0, (z, b) in z_n0 << b_n0:\n# z += a * b\n\na = SwoopTensor(name=\"A\", rank_ids=[\"K1\", \"K0\"])\nb = SwoopTensor(name=\"B\", rank_ids=[\"K1\", \"N1\", \"K0\", \"N0\"])\nz = SwoopTensor(name=\"Z\", rank_ids=[\"N1\", \"N0\"])\n\n\na_k1 = a.getStartHandle()\nb_k1 = b.getStartHandle()\nz_n1 = z.getStartHandle()\nz_root = z.getRootHandle()\n\n# a_k1 & b_k1\na_k1_handles = Scan(a_k1)\nb_k1_handles = Scan(b_k1)\na_k1_coords = HandlesToCoords(a_k1, a_k1_handles)\nb_k1_coords = HandlesToCoords(b_k1, b_k1_handles)\n(ab_k1_coords, ab_a_k1_handles, ab_b_k1_handles) = Intersect(a_k1_coords, a_k1_handles, b_k1_coords, b_k1_handles, instance_name=\"K1\")\nab_a_k1_payloads = HandlesToPayloads(a_k1, ab_a_k1_handles)\nab_b_k1_payloads = HandlesToPayloads(b_k1, ab_b_k1_handles)\na_k0s = PayloadsToFiberHandles(a_k1, ab_a_k1_payloads)\nb_n1s = PayloadsToFiberHandles(b_k1, ab_b_k1_payloads)\n\n\n# z_n1 << b_n1\nb_n1_handless = Scan(b_n1s)\nb_n1_coordss = HandlesToCoords(b_n1s, b_n1_handless)\nb_n1_payloadss = HandlesToPayloads(b_n1s, b_n1_handless)\n# Repeat z_n1 iteration for each b_n1\nz_n1s = Amplify(z_n1, b_n1s)\n(z_n1_handless, z_n1_new_fiber_handles) = InsertionScan(z_n1s, b_n1_coordss)\nz_n1_payloadss = HandlesToPayloads(z_n1s, z_n1_handless)\nb_k0ss = PayloadsToFiberHandles(b_n1s, b_n1_payloadss)\nz_n0ss = PayloadsToFiberHandles(z_n1s, z_n1_payloadss)\n\n\n# a_k0 & b_k0\nb_k0_handlesss = Scan(b_k0ss)\n# Repeat a_k0 iteration for each b_k0\na_k0ss = Amplify(a_k0s, b_k0ss, instance_name=\"K0\")\na_k0_handlesss = Scan(a_k0ss)\na_k0_coordsss = HandlesToCoords(a_k0ss, a_k0_handlesss)\nb_k0_coordsss = HandlesToCoords(b_k0ss, b_k0_handlesss)\n(ab_k0_coordsss, ab_a_k0_handlesss, ab_b_k0_handlesss) = Intersect(a_k0_coordsss, a_k0_handlesss, b_k0_coordsss, b_k0_handlesss, instance_name=\"K0\")\nab_a_k0_payloadsss = HandlesToPayloads(a_k0ss, ab_a_k0_handlesss)\nab_b_k0_payloadsss = HandlesToPayloads(b_k0ss, ab_b_k0_handlesss)\na_valuesss = PayloadsToValues(a_k0ss, ab_a_k0_payloadsss)\nb_n0sss = PayloadsToFiberHandles(b_k0ss, ab_b_k0_payloadsss)\n\n\n# z_n0 << b_n0\nb_n0_handlessss = Scan(b_n0sss)\nb_n0_coordssss = HandlesToCoords(b_n0sss, b_n0_handlessss)\nb_n0_payloadssss = HandlesToPayloads(b_n0sss, b_n0_handlessss)\n# Repeat z_n0 iteration for each b_n0\nz_n0sss = Amplify(z_n0ss, b_n0sss, instance_name=\"N0\")\n(z_n0_handlessss, z_n0_new_fiber_handlesss) = InsertionScan(z_n0sss, b_n0_coordssss)\nz_n0_payloadssss = HandlesToPayloads(z_n0sss, z_n0_handlessss)\na_valuessss = Amplify(a_valuesss, b_n0_handlessss)\nb_valuessss = PayloadsToValues(b_n0sss, b_n0_payloadssss)\nz_valuessss = PayloadsToValues(z_n0sss, z_n0_payloadssss)\n\n# z_ref += a_val * b_val\n# NOTE: MUL and ADD broken out for efficiency\nbody_func = lambda a_val, b_val, z_val: z_val + a_val * b_val\nresultssss = Compute(body_func, a_valuessss, b_valuessss, z_valuessss)\n# Reduce into the same value until end of rank\nz_n0_update_ackssss = UpdatePayloads(z_n0sss, z_n0_handlessss, resultssss)\n\n# Update N0 occupancy. (Should we be reducing here?)\n\nz_n1s = Amplify(z_n1, b_n1s)\nz_n1ss = Amplify(z_n1s, b_k0ss)\nz_n1_handlesss = Amplify(z_n1_handless, b_n0sss)\nzeros = Amplify(Stream0(0), z_n1s)\nzeross = Amplify(zeros, z_n1_handless)\n# final_resultsss = Reduce(resultssss, zerosss)\nz_n0_new_fiber_handless = Reduce(z_n0_new_fiber_handlesss, zeross)\n# z_n1_update_acksss = UpdatePayloads(z_n1ss, z_n1_handlesss, z_n0_new_fiber_handlesss)\nz_n1_update_ackss = UpdatePayloads(z_n1s, z_n1_handless, z_n0_new_fiber_handless)\n# Update root occupancy\nz_root_handles = Amplify(Stream0(0), z_n1_new_fiber_handles)\nz_root_update_acks = UpdatePayloads(z_root, z_root_handles, z_n1_new_fiber_handles)\n\n# read in inputs\n# jhu_len = 5157\nshape = 500 # TODO: take this as input or get it from yaml somehow\n# generate input frontier and tile it\nA_data = [0] * shape\n\n# read in frontier\nwith open(sys.argv[2], 'r') as f:\n for line in f:\n elt = int(line)\n A_data[elt] = 1\n\nA_untiled = Tensor.fromUncompressed([\"S\"], A_data, name = \"A\")\nA_HFA = A_untiled.splitUniform(32, relativeCoords=False) # split S\nprint(\"A untiled shape {}, tiled shape {}\".format(A_untiled.getShape(), A_HFA.getShape()))\n# A_HFA.dump(\"tiled_frontier.yaml\")\n\n\nprint(\"reading tiled mtx from yaml\")\nt0 = time.clock()\nB_HFA = Tensor.fromYAMLfile(sys.argv[3])\nt1 = time.clock() - t0\nprint(\"read B from yaml in {} s\".format(t1))\n\n# output\nZ_data = [[0], [0]]\nZ_HFA = Tensor.fromUncompressed([\"D1\", \"D0\"], Z_data, shape=[B_HFA.getShape()[1], B_HFA.getShape()[3]], name = \"Z\")\n# Z_HFA = Z_untiled.splitUniform(256) # split D\n# print(\"A shape {}, Z shape {}\".format(A_HFA.getShape(), Z_HFA.getShape()))\nprint(\"A shape {}, B shape {}, Z shape {}\".format(A_HFA.getShape(), B_HFA.getShape(), Z_HFA.getShape()))\n\n# exit(0)\nstr_desc = sys.argv[1]\n# output_desc = sys.argv[2]\nfrontier_descriptor = [str_desc[0], str_desc[1]]\noutput_descriptor = frontier_descriptor\n# output_descriptor = [output_desc[0], output_desc[1]]\n\nmyA = encodeSwoopTensorInFormat(A_HFA, frontier_descriptor)\nt0 = time.clock()\nmyB = encodeSwoopTensorInFormat(B_HFA, [\"U\", \"U\", \"U\", \"C\"])\nt1 = time.clock() - t0\nprint(\"encoded B in {} s\".format(t1))\n\nmyZ = encodeSwoopTensorInFormat(Z_HFA, output_descriptor)\n\na.setImplementations(\"root\", myA[0])\na.setImplementations(\"K1\", myA[1])\na.setImplementations(\"K0\", myA[2])\nb.setImplementations(\"root\", myB[0])\nb.setImplementations(\"K1\", myB[1])\nb.setImplementations(\"N1\", myB[2])\nb.setImplementations(\"K0\", myB[3])\nb.setImplementations(\"N0\", myB[4])\nz.setImplementations(\"root\", myZ[0])\nz.setImplementations(\"N1\", myZ[1])\nz.setImplementations(\"N0\", myZ[2])\n\n# print(\"Z[1]: {}\".format(myZ[1]))\n# print(\"Z[2]: {}\".format(myZ[2]))\n\n# print(\"len B[0] {}, B[1] {}, B[2] {}, B[3] {}, B[4] {}\".format(len(myB[0]), len(myB[1]), len(myB[2]), len(myB[3]), len(myB[4])))\n#for i in range(0, len(myB[3])):\n# myB[3][i].printFiber()\n\n# exit(0)\n\nevaluate(z_n0_update_ackssss, 4)\nevaluate(z_n1_update_ackss, 2)\nevaluate(z_root_update_acks, 1)\n\n# do verification\na_k1 = A_HFA.getRoot()\nz_n1 = Z_HFA.getRoot()\nb_k1 = B_HFA.getRoot()\nfor k1, (a_k0, b_n1) in a_k1 & b_k1:\n for n1, (z_n0, b_k0) in z_n1 << b_n1:\n for k0, (a, b_n0) in a_k0 & b_k0:\n for n0, (z, b) in z_n0 << b_n0:\n z += a * b\nZ_HFA.print()\n\noutput_lin = []\nmyZ[1][0].printFiber()\nfor i in range(0, len(myZ[2])):\n myZ[2][i].printFiber()\n output_lin.append(myZ[2][i].getPayloads())\n\nz_n1 = Z_HFA.getRoot()\noutput_ref = []\nfor (z, z_n0) in z_n1:\n output_ref.append(z_n0.getPayloads())\n\nassert(output_lin == output_ref)\n", "id": "4352251", "language": "Python", "matching_score": 8.53461742401123, "max_stars_count": 2, "path": "fibertree/codec/codec-knkn.py" }, { "content": "from swoop import *\nfrom swoop_util import *\nfrom fibertree import Tensor\nimport sys\n\n## Test program: Tiled K-Stationary vector-matrix multiplication\n#\n# Z_n = A_k * B_kn\n# Tiled:\n# Z_n1n0 = A_k1k0 * B_k1n1k0n0\n#\n#for k1, (a_k0, b_n1) in a_k1 & b_k1:\n# for n1, (z_n0, b_n0) in z_n1 << b_n1:\n# for k0, (a, b_n0) in a_k0 & b_k0:\n# for n0, (z, b) in z_n0 << b_n0:\n# z += a * b\n\na = SwoopTensor(name=\"A\", rank_ids=[\"K1\", \"K0\"])\nb = SwoopTensor(name=\"B\", rank_ids=[\"K1\", \"N1\", \"K0\", \"N0\"])\nz = SwoopTensor(name=\"Z\", rank_ids=[\"N1\", \"N0\"])\n\n\na_k1 = a.getStartHandle()\nb_k1 = b.getStartHandle()\nz_n1 = z.getStartHandle()\nz_root = z.getRootHandle()\n\n# a_k1 & b_k1\na_k1_handles = Scan(a_k1)\nb_k1_handles = Scan(b_k1)\na_k1_coords = HandlesToCoords(a_k1, a_k1_handles)\nb_k1_coords = HandlesToCoords(b_k1, b_k1_handles)\n(ab_k1_coords, ab_a_k1_handles, ab_b_k1_handles) = Intersect(a_k1_coords, a_k1_handles, b_k1_coords, b_k1_handles, instance_name=\"K1\")\nab_a_k1_payloads = HandlesToPayloads(a_k1, ab_a_k1_handles)\nab_b_k1_payloads = HandlesToPayloads(b_k1, ab_b_k1_handles)\na_k0s = PayloadsToFiberHandles(a_k1, ab_a_k1_payloads)\nb_n1s = PayloadsToFiberHandles(b_k1, ab_b_k1_payloads)\n\n\n# z_n1 << b_n1\nb_n1_handless = Scan(b_n1s)\nb_n1_coordss = HandlesToCoords(b_n1s, b_n1_handless)\nb_n1_payloadss = HandlesToPayloads(b_n1s, b_n1_handless)\n# Repeat z_n1 iteration for each b_n1\nz_n1s = Amplify(z_n1, b_n1s)\n(z_n1_handless, z_n1_new_fiber_handles) = InsertionScan(z_n1s, b_n1_coordss)\nz_n1_payloadss = HandlesToPayloads(z_n1s, z_n1_handless)\nb_k0ss = PayloadsToFiberHandles(b_n1s, b_n1_payloadss)\nz_n0ss = PayloadsToFiberHandles(z_n1s, z_n1_payloadss)\n\n\n# a_k0 & b_k0\nb_k0_handlesss = Scan(b_k0ss)\n# Repeat a_k0 iteration for each b_k0\na_k0ss = Amplify(a_k0s, b_k0ss, instance_name=\"K0\")\na_k0_handlesss = Scan(a_k0ss)\na_k0_coordsss = HandlesToCoords(a_k0ss, a_k0_handlesss)\nb_k0_coordsss = HandlesToCoords(b_k0ss, b_k0_handlesss)\n(ab_k0_coordsss, ab_a_k0_handlesss, ab_b_k0_handlesss) = Intersect(a_k0_coordsss, a_k0_handlesss, b_k0_coordsss, b_k0_handlesss, instance_name=\"K0\")\nab_a_k0_payloadsss = HandlesToPayloads(a_k0ss, ab_a_k0_handlesss)\nab_b_k0_payloadsss = HandlesToPayloads(b_k0ss, ab_b_k0_handlesss)\na_valuesss = PayloadsToValues(a_k0ss, ab_a_k0_payloadsss)\nb_n0sss = PayloadsToFiberHandles(b_k0ss, ab_b_k0_payloadsss)\n\n\n# z_n0 << b_n0\nb_n0_handlessss = Scan(b_n0sss)\nb_n0_coordssss = HandlesToCoords(b_n0sss, b_n0_handlessss)\nb_n0_payloadssss = HandlesToPayloads(b_n0sss, b_n0_handlessss)\n# Repeat z_n0 iteration for each b_n0\nz_n0sss = Amplify(z_n0ss, b_n0sss, instance_name=\"N0\")\n(z_n0_handlessss, z_n0_new_fiber_handlesss) = InsertionScan(z_n0sss, b_n0_coordssss)\nz_n0_payloadssss = HandlesToPayloads(z_n0sss, z_n0_handlessss)\na_valuessss = Amplify(a_valuesss, b_n0_handlessss)\nb_valuessss = PayloadsToValues(b_n0sss, b_n0_payloadssss)\nz_valuessss = PayloadsToValues(z_n0sss, z_n0_payloadssss)\n\n# z_ref += a_val * b_val\n# NOTE: MUL and ADD broken out for efficiency\nbody_func = lambda a_val, b_val, z_val: z_val + a_val * b_val\nresultssss = Compute(body_func, a_valuessss, b_valuessss, z_valuessss)\n# Reduce into the same value until end of rank\nz_n0_update_ackssss = UpdatePayloads(z_n0sss, z_n0_handlessss, resultssss)\n\n# Update N0 occupancy. (Should we be reducing here?)\n\nz_n1s = Amplify(z_n1, b_n1s)\nz_n1ss = Amplify(z_n1s, b_k0ss)\nz_n1_handlesss = Amplify(z_n1_handless, b_n0sss)\n# was here before\n# z_n1_update_ackss = UpdatePayloads(z_n1ss, z_n1_handlesss, z_n0_new_fiber_handlesss)\n\n# below here is new test\nzeros = Amplify(Stream0(0), z_n1s)\nzeross = Amplify(zeros, z_n1_handless)\nz_n0_new_fiber_handless = Reduce(z_n0_new_fiber_handlesss, zeross)\nz_n1_update_ackss = UpdatePayloads(z_n1s, z_n1_handless, z_n0_new_fiber_handless)\n# Update root occupancy\nz_root_handles = Amplify(Stream0(0), z_n1_new_fiber_handles)\nz_root_update_acks = UpdatePayloads(z_root, z_root_handles, z_n1_new_fiber_handles)\n\n\nN1 = 2\nN0 = 3\n\nK1 = 2\nK0 = 3\n\nA_data = [[1, 2, 3], [2, 4, 6]]\nB_data = [\n [[[4, 5, 6],\n [5, 6, 7],\n [6, 7, 8]],\n [[12, 15, 18],\n [15, 18, 21],\n [18, 21, 24]]],\n [[[8, 10, 12],\n [10, 12, 14],\n [12, 14, 16]],\n [[16, 20, 24],\n [20, 24, 28],\n [24, 28, 32]]]\n ]\n\nZ_data = [[0, 0, 0], [0, 0, 0]]\n\nA_HFA = Tensor.fromUncompressed([\"K1\", \"K0\"], A_data, name = \"A\")\nB_HFA = Tensor.fromUncompressed([\"K1\", \"N1\", \"K0\", \"N0\"], B_data, name = \"B\")\nZ_HFA = Tensor.fromUncompressed([\"N1\", \"N0\"], Z_data, name = \"Z\")\n\nstr_desc = sys.argv[1]\n# output_desc = sys.argv[2]\nfrontier_descriptor = [str_desc[0], str_desc[1]]\noutput_descriptor = frontier_descriptor\n# output_descriptor = [output_desc[0], output_desc[1]]\n\nmyA = encodeSwoopTensorInFormat(A_HFA, frontier_descriptor)\nmyB = encodeSwoopTensorInFormat(B_HFA, [\"U\", \"U\", \"U\", \"C\"])\nmyZ = encodeSwoopTensorInFormat(Z_HFA, output_descriptor)\nprint(myA)\nprint(myB)\nprint(myZ)\n\na.setImplementations(\"root\", myA[0])\na.setImplementations(\"K1\", myA[1])\na.setImplementations(\"K0\", myA[2])\nb.setImplementations(\"root\", myB[0])\nb.setImplementations(\"K1\", myB[1])\nb.setImplementations(\"N1\", myB[2])\nb.setImplementations(\"K0\", myB[3])\nb.setImplementations(\"N0\", myB[4])\nz.setImplementations(\"root\", myZ[0])\nz.setImplementations(\"N1\", myZ[1])\nz.setImplementations(\"N0\", myZ[2])\n\nevaluate(z_n0_update_ackssss, 4)\nevaluate(z_n1_update_ackss, 2)\nevaluate(z_root_update_acks, 1)\n\nexpected_vals = [[160, 190, 220], [352, 418, 484]]\n\nprint(\"Z: {}\".format(myZ))\noutput_for_check = [myZ[2][0].getPayloads(), myZ[2][1].getPayloads()]\nprint(output_for_check)\nassert(output_for_check == expected_vals)\n\"\"\"\nprint(f\"Final K-Stationary result:\")\nfor n1 in range(N1):\n print(my_z_n0[n1].vals)\nfor n1 in range(N1):\n assert(my_z_n0[n1].vals == expected_vals[n1])\nprint(\"==========================\")\n\"\"\"\n", "id": "2025995", "language": "Python", "matching_score": 2.248260259628296, "max_stars_count": 2, "path": "fibertree/codec/codec-knkn-ref.py" }, { "content": "from fibertree import Codec\nfrom boltons.cacheutils import LRU\nfrom fibertree import Tensor\nimport time\nimport os\n# take an HFA tensor, convert it to compressed representation in python\ndef encodeSwoopTensorInFormat(tensor, descriptor, tensor_shape=None, cache_size=32):\n codec = Codec(tuple(descriptor), [True]*len(descriptor))\n\n # get output dict based on rank names\n rank_names = tensor.getRankIds()\n # print(\"encode tensor: rank names {}, descriptor {}\".format(rank_names, descriptor))\n # TODO: move output dict generation into codec\n output = codec.get_output_dict(rank_names)\n # print(\"output dict {}\".format(output))\n output_tensor = []\n for i in range(0, len(descriptor)+1):\n output_tensor.append(list())\n\n # print(\"encode, output {}\".format(output_tensor))\n codec.encode(-1, tensor.getRoot(), tensor.getRankIds(), output, output_tensor, shape=tensor_shape)\n\n # name the fibers in order from left to right per-rank\n rank_idx = 0\n rank_names = [\"root\"] + tensor.getRankIds()\n # tensor_cache = dict()\n\n tensor_cache = LRU(max_size = cache_size)\n for rank in output_tensor:\n fiber_idx = 0\n for fiber in rank:\n fiber_name = \"_\".join([tensor.getName(), rank_names[rank_idx], str(fiber_idx)])\n fiber.setName(fiber_name)\n # fiber.printFiber()\n fiber.cache = tensor_cache\n fiber_idx += 1\n rank_idx += 1\n return output_tensor\n\n# tensor is a 2d linearized tensor (one list per rank)\n# dump all stats into output dict\ndef dumpAllStatsFromTensor(tensor, output, cache_output, name):\n for rank in tensor:\n for fiber in rank:\n fiber.dumpStats(output)\n cache_output[name + '_buffer_access'] = tensor[0][0].cache.hit_count\n cache_output[name + '_DRAM_access'] = tensor[0][0].cache.miss_count\n\n# HFA reading in utils\ndef get_A_HFA(a_file):\n # read in inputs\n # jhu_len = 5157\n shape = 500 # TODO: take this as input\n # generate input frontier and tile it\n A_data = [0] * shape\n\n # read in frontier\n count = 1\n A_HFA = None\n if not a_file.endswith('.yaml'):\n with open(a_file, 'r') as f:\n for line in f:\n elt = int(line)\n A_data[elt] = count\n count += 1\n A_untiled = Tensor.fromUncompressed([\"S\"], A_data, name =\"A\")\n A_HFA = A_untiled.splitUniform(32, relativeCoords=True) # split S\n print(\"A untiled shape {}, tiled shape {}\".format(A_untiled.getShape(), A_HFA.getShape()\n ))\n\n else: # already in pretiled yaml\n A_HFA = Tensor.fromYAMLfile(a_file)\n A_HFA.setName(\"A\")\n return A_HFA\n\ndef get_B_HFA(b_file):\n print(\"reading tiled mtx from yaml\")\n t0 = time.time()\n B_HFA = Tensor.fromYAMLfile(b_file)\n t1 = time.time() - t0\n print(\"read B from yaml in {} s\".format(t1))\n # B_HFA.print()\n return B_HFA\n\ndef get_Z_HFA(B_shape):\n Z_data = [[0], [0]]\n Z_HFA = Tensor.fromUncompressed([\"D1\", \"D0\"], Z_data, shape=[B_shape[0], B_shape[2]],\n name=\"Z\")\n return Z_HFA\n\ndef get_stats_dir(a_file, b_file):\n # experiment in dir stats/<frontier>_<graph>\n b_file = b_file.split('/')[-1]\n b_file = b_file.split('.')[-2]\n a_file = a_file.split('/')[-1]\n a_file = a_file.split('.')[-2]\n outpath = 'stats/'+a_file+'_'+b_file+'/'\n if not os.path.exists(outpath):\n os.makedirs(outpath)\n return outpath\n\n# linearize payloads in Z and get rid of zeroes\ndef compress_HFA_payloads(Z_HFA):\n z_n1 = Z_HFA.getRoot()\n output_ref = []\n\n # compress payloads in Z HFA\n for (z, z_n0) in z_n1:\n temp = []\n for (z_coord, z_val) in z_n0:\n if z_val.value != 0:\n temp.append(z_val)\n output_ref.append(temp)\n return output_ref\n\ndef get_lin_codec(myZ):\n output_lin = []\n for i in range(0, len(myZ[2])):\n output_lin.append(myZ[2][i].getPayloads())\n\n # compressing payloads in codec\n output_lin_2 = []\n for i in range(0, len(output_lin)):\n temp = []\n # add only nonzero payloads\n for j in range(0, len(output_lin[i])):\n if output_lin[i][j] != 0:\n temp.append(output_lin[i][j])\n output_lin_2.append(temp)\n return output_lin_2\n", "id": "10394499", "language": "Python", "matching_score": 2.8768908977508545, "max_stars_count": 2, "path": "fibertree/codec/swoop_util.py" }, { "content": "import yaml\nfrom fibertree import Payload\nfrom fibertree import Fiber\nfrom fibertree import Tensor\nfrom .formats.uncompressed import Uncompressed\n#import compression groupings\nfrom .compression_types import descriptor_to_fmt\n\nclass Codec:\n # format descriptor should be a tuple of valid formats\n # order descriptor specified SoA or AoS at each rank (currently unused)\n # AoS / SoA doesn't apply to some formats (e.g. U) -> C (SoA, default should be here) / Ca (AoS)\n def __init__(self, format_descriptor, cumulative_payloads):\n # take a list of compression formats\n # TODO: check that they are valid\n self.format_descriptor = format_descriptor\n self.cumulative_payloads = cumulative_payloads\n # print(cumulative_payloads)\n assert len(cumulative_payloads) == len(format_descriptor)\n # convert descriptor to list of formats\n self.fmts = list()\n for fmt in self.format_descriptor:\n self.fmts.append(descriptor_to_fmt[fmt])\n \n # assumes pre-flattened for now\n self.num_ranks = len(format_descriptor)\n\n def add_payload(self, depth, output, cumulative, noncumulative):\n if self.cumulative_payloads[depth]:\n output.append(cumulative)\n else:\n output.append(noncumulative)\n\n def get_format_descriptor(self):\n return self.format_descriptor\n\n \n def get_start_occ(self, depth):\n return self.fmts[depth].startOccupancy()\n\n @staticmethod\n def get_num_ranks(self):\n return self.num_ranks\n\n # return a list of occupancies per-rank \n @staticmethod\n def get_occupancies(self, depth, a, num_ranks, output):\n if depth >= num_ranks:\n return \n count = 0\n for ind, (val) in a:\n self.get_occupancies(depth + 1, val, num_ranks, output)\n count = count + 1\n output[depth] = output[depth] + count\n\n # return coords_{rank}, payloads_{rank}\n @staticmethod\n def get_keys(ranks, depth):\n assert depth < len(ranks)\n return \"coords_{}\".format(ranks[depth].lower()), \"payloads_{}\".format(ranks[depth].lower()),\n\n # encode\n def encode(self, depth, a, ranks, output, output_tensor, shape=None):\n if depth >= len(ranks):\n return -1\n # keys are in the form payloads_{rank name}, coords_{rank name}\n # deal with the root separately\n coords_key, payloads_key = self.get_keys(ranks, depth)\n\n if depth == -1: \n # recurse one level down without adding to output yet\n root, size = self.encode(depth + 1, a, ranks, output, output_tensor, shape=shape)\n HFA_root = Uncompressed()\n HFA_root.shape = 1\n # print(\"HFA root, next fmt {}\".format(self.fmts[0]))\n if self.fmts[0].encodeUpperPayload():\n # store at most one payload at the root (size of first rank)\n payloads_key = \"payloads_root\"\n output[payloads_key].append(size)\n HFA_root.occupancies = [0]\n HFA_root.count_payload_reads = True\n\n HFA_root.payloads = [root]\n output_tensor[0] = [HFA_root]\n return root, size\n\n # otherwise, we are in the fibertree\n fmt = self.fmts[depth]\n fiber = fmt()\n dim_len = a.getShape()[0]\n # print(\"shape arg {}\".format(shape))\n if shape != None:\n dim_len = shape[depth]\n # print(\"depth {}, HFA shape {}, real shape {}\".format(depth, a.getShape()[0], dim_len))\n assert dim_len >= a.getShape()[0]\n stats_key = ranks[depth] + \"_\" + str(len(output_tensor[depth+1]))\n fiber.setName(stats_key)\n fiber_occupancy = fiber.encodeFiber(a, dim_len, self, depth, ranks, output, output_tensor, shape=shape)\n fiber.nnz = fiber_occupancy\n fiber.idx_in_rank = len(output_tensor[depth + 1])\n if len(output_tensor[depth + 1]) == 0:\n fiber.occupancy_so_far = 0\n else:\n # exclusive prefix for indexing\n if isinstance(fiber_occupancy, int):\n # fiber.occupancy_so_far = fiber_occupancy + output_tensor[depth + 1][-1].occupancy_so_far\n fiber.occupancy_so_far = output_tensor[depth + 1][-1].occupancy_so_far + output_tensor[depth + 1][-1].nnz\n # print(\"in encode, name {}, occupancy so far {}\".format(fiber.name, fiber.occupancy_so_far))\n else:\n print(fiber_occupancy)\n assert isinstance(fiber_occupancy, list)\n fiber.occupancy_so_far = fiber_occupancy[0] + output_tensor[depth + 1][-1].occupancy_so_far\n\n output_tensor[depth+1].append(fiber)\n \n # print(\"\\tencode at depth {}: {}\".format(depth+1, output_tensor[depth+1]))\n return fiber, fiber_occupancy\n \n # encode\n # static functions\n # rank output dict based on rank names\n # @staticmethod\n # def get_output_dict(rank_names, format_descriptor):\n def get_output_dict(self, rank_names):\n output = dict()\n output[\"payloads_root\"] = []\n\n # print(\"in output dict {}\".format(self))\n for i in range(0, len(rank_names)):\n coords_key, payloads_key = Codec.get_keys(rank_names, i)\n\n output[coords_key] = []\n output[payloads_key] = [] \n if self.format_descriptor[i] == \"H\":\n ptrs_key = \"ptrs_{}\".format(rank_names[i].lower())\n ht_key = \"ht_{}\".format(rank_names[i].lower())\n\n output[ptrs_key] = []\n output[ht_key] = []\n return output\n\n # given a tensor, descriptor, and dict of tensor encoded in that format\n # print and write out yaml in that format\n # TODO: change the output file name (currently just writes it to [descriptor string].yaml)\n # @staticmethod\n def write_yaml(self, tensor, rank_names, descriptor, tensor_in_format):\n # header\n header = dict()\n header[\"name\"] = \"tensor-a\" # TODO: take this as input later\n header[\"rank_ids\"] = tensor.getRankIds()\n header[\"shapes\"] = tensor.getShape()\n header[\"formats\"] = descriptor\n occupancies = [0]*len(rank_names)\n self.get_occupancies(0, tensor.getRoot(), len(rank_names), occupancies)\n\n header[\"occupancies\"] = occupancies\n # print(tensor_in_format)\n # hierarchical yaml according to ranks\n scratchpads = dict()\n if len(tensor_in_format[\"payloads_root\"]) > 0:\n scratchpads[\"rank_0\"] = { \"payloads\" : tensor_in_format[\"payloads_root\"] }\n\n # write one rank at a time\n for i in range(0, len(rank_names)):\n rank_name = rank_names[i].lower()\n coords_key = \"coords_{}\".format(rank_name)\n payloads_key = \"payloads_{}\".format(rank_name)\n ptrs_key = \"ptrs_{}\".format(rank_name)\n ht_key = \"ht_{}\".format(rank_name)\n key = \"rank_\" + str(i+1)\n rank_dict = dict()\n \n # only write if scratchpad is nonempty\n if len(tensor_in_format[coords_key]) > 0:\n rank_dict[\"coords\"] = tensor_in_format[coords_key]\n if len(tensor_in_format[payloads_key]) > 0:\n if descriptor[i] == \"U\" and i < len(rank_names) - 1:\n rank_dict[\"offsets\"] = tensor_in_format[payloads_key]\n elif descriptor[i] == \"H\" and i < len(rank_names) - 1:\n rank_dict[\"offsets\"] = tensor_in_format[payloads_key]\n else:\n rank_dict[\"payloads\"] = tensor_in_format[payloads_key]\n if descriptor[i] == \"H\":\n rank_dict[\"ptrs\"] = tensor_in_format[ptrs_key]\n rank_dict[\"bin_heads\"] = tensor_in_format[ht_key]\n if len(rank_dict) > 0:\n scratchpads[key] = rank_dict\n \n header[\"scratchpads\"] = scratchpads\n \n data = dict()\n data[\"tensor\"] = header\n\n print(yaml.dump(data, default_flow_style=None, sort_keys=False))\n\n # outfilename = ''.join(descriptor) + '.yaml'\n # with open(outfilename, \"w\") as f:\n # print(yaml.dump(data, default_flow_style=None, sort_keys=False))\n # yaml.dump(data, f)\n\n", "id": "11245927", "language": "Python", "matching_score": 2.728214740753174, "max_stars_count": 2, "path": "fibertree/codec/tensor_codec.py" }, { "content": "from .compression_format import CompressionFormat\nimport sys\nimport math\n\n# coordinate-payload list format (C)\nclass CoordinateList(CompressionFormat):\n def __init__(self):\n self.name = \"C\"\n CompressionFormat.__init__(self)\n # self.depth = None\n self.is_leaf = False\n # self.next_fmt = None\n # list of sizes of fibers so far in this rank\n self.occupancy_so_far = None\n\n # cache line locality\n self.elts_per_line = 4\n # encode fiber into C format\n def encodeFiber(self, a, dim_len, codec, depth, ranks, output, output_tensor, shape=None):\n # import codec\n from ..tensor_codec import Codec\n coords_key, payloads_key = codec.get_keys(ranks, depth)\n self.depth = depth\n\n # init vars\n fiber_occupancy = 0\n\n # TODO: HT to one payload\n cumulative_occupancy = 0\n if depth < len(ranks) - 1 and codec.format_descriptor[depth + 1] == \"Hf\":\n \t cumulative_occupancy = [0, 0] \n\n prev_nz = 0\n occ_list = list()\n if depth < len(ranks) - 1:\n self.next_fmt = codec.fmts[depth + 1]\n else:\n self.is_leaf = True\n for ind, (val) in a:\n # store coordinate explicitly\n coords = CoordinateList.encodeCoord(prev_nz, ind)\n\n # TODO: make the fiber rep an intermediate to YAML\n output[coords_key].extend(coords)\n self.coords.extend(coords)\n\n # keep track of nnz in this fiber\n fiber_occupancy = fiber_occupancy + 1\n\n # if at leaves, store payloads directly\n if depth == len(ranks) - 1:\n output[payloads_key].append(val.value)\n self.payloads.append(val.value)\n else:\n # print(\"{}::set next fmt to {}\".format(self.name, self.next_fmt))\n fiber, child_occupancy = codec.encode(depth + 1, val, ranks, output, output_tensor,shape=shape)\n # keep track of actual occupancy (nnz in this fiber)\n \n # print(\"ind {}, depth{}, child {}, cumulative {}\".format(ind, depth, child_occupancy, cumulative_occupancy))\n if isinstance(cumulative_occupancy, int):\n cumulative_occupancy = cumulative_occupancy + child_occupancy\n else:\n cumulative_occupancy = [a + b for a, b in zip(cumulative_occupancy, child_occupancy)]\n # add cumulative or non-cumulative depending on settings\n codec.add_payload(depth, occ_list, cumulative_occupancy, child_occupancy)\n \n assert depth < len(ranks) - 1\n if codec.fmts[depth+1].encodeUpperPayload():\n # TODO: make the choice for this to be cumulative\n output[payloads_key].append(cumulative_occupancy)\n self.occupancies.append(cumulative_occupancy)\n self.payloads.append(fiber)\n\n prev_nz = ind + 1\n # print(\"{}:: coords {}, payloads {}\".format(self.name, self.coords, self.payloads))\n self.fiber_occupancy = fiber_occupancy \n return fiber_occupancy\n \n #### fiber functions for AST\n\n # max length of slice\n def getSliceMaxLength(self):\n return len(self.coords)\n\n # return handle to existing coord that is at least coord\n def coordToHandle(self, coord):\n # print(\"\\t{} coordToHandle for coord {}\".format(self.name, coord))\n # if out of range, return None\n if len(self.coords) == 0:\n return None\n \n elif coord > self.coords[-1]: # short path to end\n # print(\"\\tcoord searched off the end\")\n key = self.name + \"_handleToCoord_\" + str(len(self.coords) - 1)\n print(key)\n print(self.cache)\n cached_val = self.cache.get(key)\n self.cache[key] = self.coords[-1]\n if self.name.startswith(\"Z\"):\n print(\"{} coordToHandle coord {}, misses {}\".format(self.name, coord, self.cache.miss_count))\n\n self.stats[self.coords_read_key] += 1; # add to num accesses in binary search\n return None\n elif coord <= self.coords[0]: # short path to beginning\n # print(\"\\tcoord searched off the beginning\")\n key = self.name + \"_handleToCoord_0\"\n cached_val = self.cache.get(key)\n self.cache[key] = self.coords[0]\n if self.name.startswith(\"Z\"):\n print(\"{} coordToHandle coord {}, misses {}\".format(self.name, coord, self.cache.miss_count))\n self.stats[self.coords_read_key] += 1; # add to num accesses in binary search\n return 0\n\n # do a binary search if in range\n lo = 0\n hi = len(self.coords) - 1\n mid = 0\n # print(\"\\t{} access before binary search {}\".format(self.name, self.num_accesses))\n while lo <= hi:\n # cache along the way in the binary search\n self.stats[self.coords_read_key] += 1; # add to num accesses in binary search\n\n # print(\"\\t coordToHandle: target {}, lo {}, mid {}, hi {}, reads {}\".format(coord, lo, mid, hi, self.stats[self.coords_read_key]))\n mid = math.ceil((hi + lo) / 2)\n coord_key = self.name + \"_handleToCoord_\" + str(mid)\n coord_at_mid = self.cache.get(coord_key)\n self.cache[coord_key] = self.coords[mid]\n # print(\"target {}, lo: {}, hi: {}, mid {}, coord {}\".format(coord, lo, hi, mid, self.coords[mid]))\n if self.coords[mid] == coord:\n return mid\n elif self.coords[mid] < coord:\n lo = mid + 1\n else: # self.coords[mid] > coord:\n hi = mid - 1\n if (coord > self.coords[mid]):\n mid += 1\n # self.prevCoordSearched = coord\n # self.prevHandleAtCoordSearched = mid\n # print(\"\\taccess after binary search {}\".format(self.num_accesses))\n return mid\n\n # make space in coords and payloads for elt\n # return the handle\n def insertElement(self, coord):\n if coord == None:\n return None\n print(\"{} insertElt: coord {}, coords currently {}, misses before {}\".format(self.name, coord, self.coords, self.cache.miss_count))\n\n handle_to_add = self.coordToHandle(coord)\n \n print(\"{} insertElt: coord {}, handle_to_add {}, misses before {}\".format(self.name, coord, handle_to_add, self.cache.miss_count))\n # if went off the end \n if handle_to_add == None:\n self.coords = self.coords + [coord]\n if self.is_leaf:\n self.payloads = self.payloads + [0]\n else:\n self.payloads = self.payloads + [self.next_fmt()]\n self.stats[self.coords_write_key] += 1\n handle = len(self.coords) - 1\n coords_key = self.name + \"_handleToCoord_\" + str(handle)\n payloads_key = self.name + \"_handleToPayload_\" + str(handle)\n\n print(self.cache)\n self.cache.get(coords_key)\n \n print(\"{} insertElt: coord {}, handle_to_add {}, misses after {}\".format(self.name, coord, handle_to_add, self.cache.miss_count))\n self.cache.get(payloads_key)\n \n print(\"{} insertElt: coord {}, handle_to_add {}, misses after {}\".format(self.name, coord, handle_to_add, self.cache.miss_count))\n self.cache[coords_key] = coord\n self.cache[payloads_key] = self.payloads[handle]\n print(self.cache)\n\n # fill out cache to end of line\n assert(len(self.payloads) == len(self.coords))\n end_of_line = self.round_up(handle, self.words_in_line)\n for i in range(handle, end_of_line):\n coords_key = self.name + \"_handleToCoord_\" + str(i)\n payloads_key = self.name + \"_handleToPayload_\" + str(i)\n if i < len(self.coords):\n self.cache[coords_key] = self.coords[i]\n self.cache[payloads_key] = self.payloads[i]\n else:\n self.cache[coords_key] = 0\n self.cache[payloads_key] = 0\n print(self.cache)\n return len(self.coords) - 1\n\n # if adding a new coord, make room for it\n if self.coords[handle_to_add] != coord:\n # add coord to coord list\n self.coords = self.coords[:handle_to_add] + [coord] + self.coords[handle_to_add:]\n\n # move payloads to make space\n if self.is_leaf:\n self.payloads = self.payloads[:handle_to_add] + [0] + self.payloads[handle_to_add:]\n else:\n self.payloads = self.payloads[:handle_to_add] + [self.next_fmt()] + self.payloads[handle_to_add:]\n\n # fill out cache to end of line\n assert(len(self.payloads) == len(self.coords))\n for i in range(handle_to_add, len(self.coords)):\n coords_key = self.name + \"_handleToCoord_\" + str(i)\n payloads_key = self.name + \"_handleToPayload_\" + str(i)\n cached_coord = self.cache.get(coords_key)\n\n self.cache[coords_key] = self.coords[i]\n self.cache[payloads_key] = self.payloads[i]\n if cached_coord == None: # DRAM miss\n # bring the rest of the line in\n end_of_line = self.round_up(i, self.words_in_line)\n for j in range(i, end_of_line): \n coords_key = self.name + \"_handleToCoord_\" + str(j)\n payloads_key = self.name + \"_handleToPayload_\" + str(j)\n if j < len(self.coords):\n self.cache[coords_key] = self.coords[j]\n self.cache[payloads_key] = self.payloads[j]\n else:\n self.cache[coords_key] = 0\n self.cache[payloads_key] = 0\n \n self.stats[self.coords_write_key] += len(self.coords) - handle_to_add\n # print(\"\\t{} inserted coord {}\".format(self.name, coord))\n return handle_to_add\n\n # API Methods\n def handleToPayload(self, handle):\n # if next level has implicit payloads above (e.g. U), payload is implicit\n if self.next_fmt != None and not self.next_fmt.encodeUpperPayload():\n print(\"\\t\\tnext level not encoded, ret {}\".format(self.occupancy_so_far))\n \n return self.occupancy_so_far # self.idx_in_rank + handle\n return handle\n\n # API Methods\n def payloadToFiberHandle(self, payload):\n # if next level has implicit payloads above (e.g. U), payload is implicit\n if not self.next_fmt.encodeUpperPayload():\n print(\"\\t{} next level not encoded, payload {} ret {}\".format(self.name, payload, payload))\n return payload # self.idx_in_rank # + payload\n \n # print(\"\\t{} payloadToFiberHandle:: ret {}\".format(self.name, payload))\n return payload\n\n\n # return handle for termination\n def updatePayload(self, handle, payload):\n # print(\"\\t{} updatePayload, handle = {}, payload = {}\".format(self.name, handle, payload))\n if handle == None:\n return None\n \n if handle >= 0 and handle < len(self.payloads):\n # print(self.payloads)\n # print(\"setting payload at {} to {}\".format(handle, payload))\n self.stats[self.payloads_write_key] += 1\n self.payloads[handle] = payload\n \n key = self.name + \"_handleToPayload_\" + str(handle)\n print(\"{} handleToPayload key: {}, miss count before {}\".format(self.name, key, self.cache.miss_count))\n \n cached_val = self.cache.get(key)\n assert cached_val != None\n self.cache[key] = payload\n print(self.cache)\n print(\"{} handleToPayload key: {}, miss count after {}\".format(self.name, key, self.cache.miss_count))\n\n return handle\n\n def getUpdatedFiberHandle(self):\n return len(self.coords)\n \n # print this fiber representation in C\n def printFiber(self):\n print(\"{} :: coords: {}, occupancies: {}, payloads: {}\".format(self.name, self.coords, self.occupancies, self.payloads))\n \n # get size of representation\n def getSize(self): \n # self.printFiber()\n if self.next_fmt != None and self.next_fmt.encodeUpperPayload():\n assert(len(self.payloads) > 0)\n\n size = len(self.coords) + len(self.occupancies)\n # Don't need to store occupancies if lower level is U\n # if not isinstance(self.payloads[0], CompressionFormat):\n size += len(self.payloads) \n return size\n\n #### static methods\n\n # encode coord explicitly\n @staticmethod\n def encodeCoord(prev_ind, ind):\n return [ind]\n\n @staticmethod\n def encodePayload(prev_ind, ind, payload):\n return [payload]\n\n # explicit coords\n @staticmethod\n def encodeCoords():\n return True\n\n # explicit prev payloads\n @staticmethod\n def encodeUpperPayload():\n return True\n", "id": "7027758", "language": "Python", "matching_score": 6.400378704071045, "max_stars_count": 2, "path": "fibertree/codec/formats/coord_list.py" }, { "content": "from .compression_format import CompressionFormat\n\nclass Uncompressed(CompressionFormat):\n # constructor\n def __init__(self):\n self.name = \"U\"\n CompressionFormat.__init__(self)\n self.occupancies = list()\n self.count_payload_reads = False\n\n # instantiate this fiber in the format\n def encodeFiber(self, a, dim_len, codec, depth, ranks, output, output_tensor, shape=None):\n # import codec\n from ..tensor_codec import Codec\n coords_key, payloads_key = codec.get_keys(ranks, depth)\n \n # init vars\n fiber_occupancy = 0\n \n occ_list = list()\n\n # keep track of shape during encoding\n self.shape = dim_len\n \n if depth < len(ranks) - 1:\n cumulative_occupancy = codec.get_start_occ(depth + 1)\n self.next_fmt = codec.fmts[depth + 1]\n if codec.fmts[depth + 1].encodeUpperPayload():\n self.count_payload_reads = True\n else: # leaf level is always read payloads\n self.count_payload_reads = True\n for i in range(0, dim_len):\n # internal levels\n if depth < len(ranks) - 1:\n fiber, child_occupancy = codec.encode(depth + 1, a.getPayload(i), ranks, output, output_tensor, shape=shape)\n self.payloads.append(fiber)\n # keep track of occupancy (cumulative requires ordering)\n if isinstance(cumulative_occupancy, int):\n cumulative_occupancy = cumulative_occupancy + child_occupancy\n else:\n cumulative_occupancy = [a + b for a, b in zip(cumulative_occupancy, child_occupancy)]\n codec.add_payload(depth, occ_list, cumulative_occupancy, child_occupancy)\n\n # store occupancy\n if codec.fmts[depth+1].encodeUpperPayload():\n output[payloads_key].append(cumulative_occupancy)\n self.occupancies.append(cumulative_occupancy)\n else: # leaf level\n if a.getPayload(i) == 0:\n output[payloads_key].append(0)\n self.payloads.append(0)\n else:\n output[payloads_key].append(a.getPayload(i).value)\n self.payloads.append(a.getPayload(i).value)\n \n return len(output_tensor[depth])\n\n ## SWOOP API functions \n def handleToCoord(self, handle):\n return handle\n\n # TODO: stop passing around the ptr? maybe payload could just be the offset \n def payloadToFiberHandle(self, payload):\n assert payload < self.shape\n to_ret = self.idx_in_rank * self.shape + payload\n # print(\"{} payloadToFiberHandle: idx in rank {}, shape {}, payload {}, ret {}\".format(self.name, self.idx_in_rank, self.shape, payload, to_ret))\n if self.next_fmt is not None and self.next_fmt.encodeUpperPayload():\n key = self.name + \"_fiberHandle_\" + str(payload)\n if self.name.startswith(\"Z\"):\n print(\"{} payloadToFiberHandle, payload {}, to ret {}, misses before {}\".format(self.name, payload, to_ret, self.cache.miss_count))\n self.cache.get(key)\n self.cache[key] = to_ret\n\n # fill in cache line\n end_of_line = self.round_up(payload, self.words_in_line)\n for i in range(payload, end_of_line):\n key = self.name + \"_fiberHandle_\" + str(i)\n self.cache[key] = self.idx_in_rank * self.shape + i\n if self.name.startswith(\"Z\"):\n print(\"{} payloadToFiberHandle, payload {}, to ret {}, misses after {}\".format(self.name, payload, to_ret, self.cache.miss_count))\n return to_ret\n \n # max number of elements in a slice is proportional to the shape\n def getSliceMaxLength(self):\n return self.shape\n\n def coordToHandle(self, coord):\n # print(\"{} coordToHandle {}, shape {}\".format(self.name, coord, self.shape))\n if coord < 0 or coord >= self.shape:\n return None\n return coord\n \n def insertElement(self, coord):\n assert coord < self.shape\n return coord\n\n def updatePayload(self, handle, payload):\n assert handle is not None and handle < self.shape\n # testing adding to the cache\n key = self.name + \"_handleToPayload_\" + str(handle)\n if self.next_fmt is not None:\n key = self.name + \"_fiberHandle_\" + str(handle)\n if self.next_fmt.encodeUpperPayload():\n self.cache.get(key) # try to access it\n self.cache[key] = payload # put it in the cache\n \n # print(\"{} updatePayload: handle {}, payload {}, misses so far {}\".format(self.name, handle, payload, self.cache.miss_count))\n # if the payloads from lower level are explicit \n if self.count_payload_reads:\n self.stats[self.payloads_write_key] += 1\n # print(\"\\tupdate {}, handle {}, payload {}\".format(self.name, handle, payload))\n \n if isinstance(payload, tuple):\n self.occupancies[handle] = payload[0]\n self.payloads[handle] = payload[1]\n else:\n self.payloads[handle] = payload\n print(\"updatePayload {}, handle {}, payload {}, payloads {}\".format(self.name, handle, payload, self.payloads))\n return handle\n\n def getPayloads(self):\n return self.payloads\n\n def printFiber(self):\n print(\"{} :: occupancies {}, payloads {}\".format(self.name, self.occupancies, self.payloads))\n \n def getSize(self):\n assert(len(self.payloads) > 0)\n assert(len(self.coords) == 0)\n size = len(self.occupancies)\n if not isinstance(self.payloads[0], CompressionFormat):\n size += len(self.payloads)\n \n # print(\"size of {} = {}. coords {}, occupancies {}, payloads {}\".format(self.name, size, self.coords, self.occupancies, self.payloads))\n return size\n\n ### static methods\n @staticmethod\n def encodeCoord(prev_ind, ind):\n return []\n\n # default implementation is like in C\n # overwrite if this is changed\n @staticmethod\n def encodePayload(prev_ind, ind, payload):\n output = list()\n for i in range(prev_ind, ind):\n output.append(0)\n output.append(payload)\n return output\n\n @staticmethod\n def endPayloads(num_to_pad):\n return [0] * num_to_pad\n\n # implicit coords\n @staticmethod\n def encodeCoords():\n return False\n\n # implicit prev payloads\n @staticmethod\n def encodeUpperPayload():\n return False\n", "id": "4210571", "language": "Python", "matching_score": 0.4915049076080322, "max_stars_count": 2, "path": "fibertree/codec/formats/uncompressed.py" }, { "content": "\"\"\"Uncompressed Image Module\"\"\"\n\nimport logging\n\nfrom PIL import Image, ImageDraw\n\nfrom fibertree import Tensor\nfrom fibertree import Fiber\nfrom fibertree import Payload\n\nfrom fibertree import ImageUtils\nfrom fibertree import HighlightManager\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.graphics.uncompressed_image')\n\n\nclass UncompressedImage():\n \"\"\"UncompressedImage\n\n This class is used to draw an uncompressed representation of a tensor\n\n Constructor\n -----------\n\n Parameters\n ----------\n\n object: tensor or fiber\n A tensor or fiber object to draw\n\n highlights: dictionary\n A dictionary of \"workers\" each with list of points to highlight\n\n extent: tuple\n Maximum row/col to use for image\n \"\"\"\n\n def __init__(self, object, highlights={}, extent=(100, 200), row_map=None):\n \"\"\"__init__\"\"\"\n\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.graphics.uncompressed_image')\n\n #\n # Record paramters\n #\n # Note: We conditionally unwrap Payload objects\n #\n self.object = Payload.get(object)\n self.row_extent = extent[0]\n self.col_extent = extent[1]\n self.row_map = row_map\n\n level = self.object.getDepth()-1\n self.highlight_manager = HighlightManager(highlights, level=level)\n\n #\n # Cache worker colors\n #\n worker_color = {}\n\n for n, worker in enumerate(highlights.keys()):\n worker_color[worker] = ImageUtils.getColor(worker)\n\n self.worker_color = worker_color\n\n #\n # Draw the tensor\n #\n self._create_uncompressed()\n\n\n def _create_uncompressed(self):\n \"\"\"Create uncompressed image\n\n Create an image of a tensor or fiber tree\n\n Notes\n ------\n\n The drawing is made in a coordinate space where the X\n and Y are the positions in the tensor.\n Translation to pixels happens in the draw_*() methods.\n\n \"\"\"\n\n object = self.object\n\n #\n # Create the objects for the image\n #\n self._image_setup()\n\n #\n # Display either the root of a tensor or a raw fiber\n #\n if isinstance(object, Tensor):\n #\n # Handle a tensor\n #\n root = object.getRoot()\n self._color = object.getColor()\n #\n # Print tensor name\n #\n name = object.getName()\n if not name:\n name = \"unknown\"\n\n ranks = \", \".join([str(r) for r in object.getRankIds()])\n\n self._draw_label(0, 0, f\"Tensor: {name}[{ranks}]\")\n\n elif isinstance(object, Fiber):\n #\n # Handle a fiber\n #\n root = object\n self._color = \"red\"\n else:\n #\n # Handle a scalar\n #\n root = None\n self._color = \"red\"\n\n #\n # Process appropriately if root has 0 dimensions or more\n #\n if not Payload.contains(root, Fiber):\n # Draw a 0-D tensor, i.e., a value\n\n # TBD\n region_size = [1, 1]\n\n else:\n # Draw a non-0-D tensor or a fiber, i.e., the fiber tree\n region_size = self._traverse(root)\n\n #\n # Crop the image\n #\n if region_size[0] > self.row_extent or region_size[1] > self.col_extent:\n msg = f\"Uncompressed image too large [ {region_size[0]}, {region_size[1]}\"\n self.logger.info(msg)\n return\n\n right = 200+self._col2x(region_size[1])\n lower = 20+self._row2y(region_size[0])\n\n self.logger.debug(f\"right: {region_size[1]}/{right}, lower: {region_size[0]}/{lower}\")\n self.im = self.im.crop((0, 0, right, lower))\n\n\n def show(self):\n \"\"\"Show the fibertree image\"\"\"\n\n self.im.show()\n\n\n#\n# Method to traverse (and draw) all the cells in the tensor\n#\n def _traverse(self, fiber):\n \"\"\"traverse\"\"\"\n\n #\n # Assume this is a rank-3 or less tensor\n #\n if not Payload.contains(fiber, Fiber):\n #\n # Draw a 0-D tensor, i.e., a value (NOT a fiber)\n #\n\n # TBD\n region_size = [1, 1]\n else:\n #\n # Recursively draw the fibers of a non-0-D tensor\n #\n shape = fiber.getShape(all_ranks=True)\n dimensions = len(shape)\n\n hl_manager = self.highlight_manager\n\n if dimensions == 4:\n region_size = self._traverse_hypercube(shape, fiber, highlight_manager=hl_manager)\n elif dimensions == 3:\n region_size = self._traverse_cube(shape, fiber, highlight_manager=hl_manager)\n elif dimensions == 2:\n region_size = self._traverse_matrix(shape, fiber, highlight_manager=hl_manager)\n elif dimensions == 1:\n region_size = self._traverse_vector(shape, fiber, highlight_manager=hl_manager)\n else:\n self.logger.info(f\"Unsupported number of ranks for uncompressed image ({dimensions})\")\n region_size = [1, 1]\n\n return region_size\n\n\n def _traverse_hypercube(self, shape, fiber, row_origin=1, col_origin=0, highlight_manager=None):\n \"\"\" traverse_hypercube - unimplemented \"\"\"\n\n self.logger.debug(\"Display a hypercube\")\n\n #\n # Print out the rank information (if available)\n #\n self._draw_label(row_origin, col_origin, \"Rank: \"+self._getId(fiber))\n self._draw_label(row_origin+1, col_origin, \"|\")\n self._draw_label(row_origin+2, col_origin, \"V\")\n\n row_cur = row_origin + 3\n row_max = row_origin + 3\n col_cur = col_origin\n\n #\n # Just show the nonEmpty cubes\n #\n for cube_c, cube_p in fiber:\n\n self._draw_label(row_cur, col_origin, f\"{cube_c}\")\n row_cur += 2\n row_max = row_cur\n\n highlight_manager_next = highlight_manager.addFiber(cube_c)\n\n self.logger.debug(f\"Coord: {cube_c} - draw as [{row_cur}, {col_origin}]\")\n\n rc_range = self._traverse_cube(shape[1:],\n cube_p,\n row_origin=row_cur,\n col_origin=col_origin,\n highlight_manager=highlight_manager_next)\n\n self.logger.debug(f\"Coord: {cube_c} - rc_range: {rc_range}\")\n\n # row_cur does not change\n row_cur = rc_range[0] + 2\n row_max = row_cur\n\n # col_cur does not change\n col_max = max(row_max, rc_range[1])\n\n return [row_max, col_max]\n\n\n def _traverse_cube(self, shape, fiber, row_origin=1, col_origin=0, highlight_manager=None):\n \"\"\" traverse_cube \"\"\"\n\n self.logger.debug(f\"Drawing cube at [{row_origin}, {col_origin}]\")\n #\n # Print out the rank information (if available)\n #\n self._draw_label(row_origin, col_origin, \"Rank: \"+self._getId(fiber)+\" ----->\")\n\n row_cur = row_origin + 1\n row_max = row_origin + 1\n col_cur = col_origin\n col_max = col_origin\n\n #\n # Just show the nonEmpty matrices\n #\n for matrix_c, matrix_p in fiber:\n\n self._draw_label(row_origin, col_cur+5, f\"{matrix_c}\")\n\n highlight_manager_next = highlight_manager.addFiber(matrix_c)\n\n rc_range = self._traverse_matrix(shape[1:],\n matrix_p,\n row_origin=row_cur,\n col_origin=col_cur,\n highlight_manager=highlight_manager_next)\n\n # row_cur does not change\n row_max = max(row_max, rc_range[0])\n\n col_cur = rc_range[1] + 2\n col_max = col_cur\n\n if col_cur > self.col_extent: break\n\n return [row_max, col_max]\n\n\n\n def _traverse_matrix(self, shape, fiber, row_origin=1, col_origin=0, highlight_manager=None):\n \"\"\" traverse_matrix \"\"\"\n\n #\n # Print out the rank information (if available)\n #\n label = \"Rank: \"+self._getId(fiber)\n self._draw_label(row_origin+2, col_origin, label)\n\n #\n # Set up variables to track rows and columns (note offset for rank label)\n #\n row_cur = row_origin\n row_max = row_cur\n\n col_cur = col_origin + (len(label)+2)//3\n col_max = col_cur\n\n #\n # Set up for loop\n #\n row_p = Fiber([], [])\n row_first = True\n\n #\n # For integer coordinates traverse all the coordinates in the shape\n # otherwise traverse all the non-empty coordinates\n #\n coords = range(shape[0])\n\n if isinstance(fiber, Fiber):\n if len(fiber) > 0 and not isinstance(fiber.coords[0], int):\n coords = fiber.coords\n\n\n for row_c in coords:\n\n if self.row_map:\n coord_label = str(self.row_map[row_c])\n else:\n coord_label = row_c\n\n if fiber is not None:\n row_p = fiber.getPayload(row_c)\n\n highlight_manager_next = highlight_manager.addFiber(row_c)\n\n rc_range = self._traverse_vector(shape[1:],\n row_p,\n row_origin=row_cur,\n col_origin=col_cur,\n highlight_manager=highlight_manager_next,\n rank_label=row_first,\n coord_label=coord_label)\n\n row_max = max(row_max, rc_range[0])\n row_cur = row_max\n row_first = False\n\n # col_cur does not change\n col_max = max(col_max, rc_range[1])\n\n if row_cur > self.row_extent: break\n\n\n return [row_max, col_max]\n\n\n def _traverse_vector(self,\n shape,\n fiber,\n row_origin=1,\n col_origin=0,\n highlight_manager=None,\n rank_label=True,\n coord_label=None):\n\n #\n # Print out the rank information (if available)\n #\n # TBD: Align column more inteligently\n #\n if coord_label is not None:\n col_hack = 3\n else:\n col_hack = 0\n\n if rank_label:\n self._draw_label(row_origin, col_origin+col_hack, \"Rank: \"+self._getId(fiber))\n\n for c in range(fiber.getShape(all_ranks=False)):\n self._draw_label(row_origin+1, col_origin+col_hack+c, f\"{c:^3}\")\n\n rank_label_offset = 2\n else:\n rank_label_offset = 0\n\n #\n # Handle spans of empty rows\n #\n if len(fiber) != 0 or rank_label:\n #\n # On non-empty (or first) row reset empty row counter\n #\n self._empty_count = 0\n else:\n #\n # After first row, check for empty rows\n #\n self._empty_count += 1\n\n if self._empty_count == 2:\n self._draw_label(row_origin, col_origin+col_hack, \"...\")\n return [ row_origin+1, col_origin]\n\n if self._empty_count > 2:\n return [ row_origin, col_origin]\n\n #\n # Print out coordinate information (if available)\n #\n if coord_label is not None:\n try:\n label = f\"{coord_label:>9}\"\n except Exception:\n label = f\"{str(coord_label):>9}\"\n\n self._draw_label(row_origin+rank_label_offset, col_origin, label)\n coord_label_offset = col_hack\n else:\n coord_label_offset = 0\n\n\n #\n # Set up variables to track rows and columns\n #\n # Note: offsets for rank and coordinate labels\n #\n row_cur = row_origin + rank_label_offset\n row_max = row_origin + rank_label_offset\n\n col_cur = col_origin + coord_label_offset\n col_max = col_origin + coord_label_offset\n\n #\n # Determine if coordinates are integers\n #\n if len(fiber) > 0 and isinstance(fiber.coords[0], int):\n coord_is_int = True\n else:\n coord_is_int = False\n\n #\n # Process each coordinate in the shape\n #\n for coord in range(shape[0]):\n #\n # Get highlighting information from highlight manager\n #\n color_coord = highlight_manager.getColorCoord(coord)\n color_subtensor = highlight_manager.getColorSubtensor()\n color_coord_or_subtensor = color_coord | color_subtensor\n\n if isinstance(fiber, Fiber):\n #\n # For printing a non-empty row\n #\n if coord_is_int:\n payload = fiber.getPayload(coord)\n else:\n #\n # Just show non-integer coordinates in order\n #\n try:\n payload = fiber.payloads[coord]\n except:\n payload = 0\n\n assert not isinstance(payload, Fiber)\n else:\n #\n # For printing a empty row\n #\n payload = 0\n\n row_count = self._draw_value(row_cur, col_cur, payload, color_coord_or_subtensor)\n\n # row_cur does not change\n row_max = max(row_max, row_cur+row_count)\n\n col_cur += 1\n col_max = col_cur\n\n if col_max > self.col_extent: break\n\n\n return [row_max, col_max]\n\n#\n# Utility methods\n#\n\n def _getId(self, fiber):\n \"\"\" _getId - get fiber's rank id \"\"\"\n\n if fiber.getOwner() is None:\n return \"\"\n\n return str(fiber.getOwner().getId())\n\n#\n# Image methods\n#\n def _image_setup(self):\n\n # Constrain image size (overage matches crop above)\n\n x_pixels = self._col2x(self.col_extent+1) + 200 # was 8192\n y_pixels = self._row2y(self.row_extent+1) + 20 # was 1024\n\n # Create an image at least this tall (in pixels)\n self.max_y = 100\n\n\n # Do image related setup\n self.im = Image.new(\"RGB\", (x_pixels, y_pixels), \"wheat\")\n self.fnt = ImageUtils.getFont()\n self.draw = ImageDraw.Draw(self.im)\n\n#\n#\n#\n def _anti_alias(self, fill_color):\n r, g, b = fill_color\n return (r//4, g//4, b//4)\n \n\n#\n# Methods to draw objects on the drawing canvas\n#\n# Note: Input arguments place the objects at a position specified by:\n# - row\n# - column\n#\n def _draw_label(self, row, column, label):\n \"\"\"draw_label\"\"\"\n\n x1 = self._col2x(column) + 20\n y1 = self._row2y(row) - 10\n\n # Hack: drawing text twice looks better in PIL\n self.draw.text((x1+10,y1+10), label, font=self.fnt, fill=\"black\")\n self.draw.text((x1+10,y1+10), label, font=self.fnt, fill=\"black\")\n\n\n\n def _draw_value(self, row, column, value, highlight=[]):\n \"\"\"draw_value\"\"\"\n\n #\n # Check if we're outside the box\n #\n if row >= self.row_extent or column >= self.col_extent:\n if row == self.row_extent or column == self.col_extent:\n self._draw_label(row, column, \"...\")\n return 2\n\n return 0\n\n\n if isinstance(value, Payload):\n value = value.value\n\n if not isinstance(value, tuple):\n value = ( value, )\n\n row_count = len(value)\n\n font_y = 30\n\n x1 = self._col2x(column) + 20\n y1 = self._row2y(row) - 10\n\n x2 = x1 + 40\n y2 = y1 + row_count*(font_y+10)\n \n if y2 > self.max_y:\n self.max_y = y2\n\n\n if len(highlight) == 0:\n fill_color = self._color if value != (0, ) else 0\n self.draw.rectangle(((x1,y1), (x2,y2)), fill_color, 1)\n else:\n step = (y2-y1) // len(highlight)\n y1c = y1\n for worker in highlight:\n y2c = y1c + step\n fill_color = self.worker_color[worker]\n self.draw.rectangle(((x1,y1c), (x2,y2c)), fill_color, 1)\n y1c = y2c\n\n for i, v in enumerate(value):\n if isinstance(v, Payload):\n v = v.value\n\n x_text = x1+15\n y_text = y1+10+(i*font_y)\n if (isinstance(v, int)):\n abs_v = abs(v)\n\n if v < 0:\n x_text = x_text - 7\n if abs_v >= 10:\n x_text = x_text - 7\n if abs_v >= 100:\n x_text = x_text - 7\n if abs_v >= 1000:\n x_text = x_text - 7\n elif (isinstance(v, float)):\n v = round(v, 2)\n\n\n # Hack: drawing text twice looks better in PIL\n self.draw.text((x_text, y_text),\n str(v),\n font=self.fnt,\n fill=\"white\")\n self.draw.text((x_text, y_text),\n str(v),\n font=self.fnt,\n fill=\"white\")\n\n return row_count\n#\n#\n# Methods to convert positions specified in col/row space into pixels\n#\n def _col2x(self, col):\n return 200 + 40*col\n \n def _row2y(self, row):\n return 40 + 40*row\n\n\nif __name__ == \"__main__\":\n \n print(\"a - multiple highlights\")\n a = Tensor.fromYAMLfile(\"../../examples/data/sparse-matrix-a.yaml\")\n a.setColor(\"blue\")\n i = UncompressedImage(a, highlights={\"PE\": [(0, 1), (1, 2), (3,)]})\n i.show()\n\n #\n print(\"a - single highlights\")\n i = UncompressedImage(a, {\"PE\": [(1, 2)]})\n i.show()\n\n #\n print(\"b\")\n b = Tensor.fromUncompressed([\"X\"], [1, 2, 0, 0, 4])\n i = UncompressedImage(b, {\"PE\": [(1,), (4,)]})\n i.show()\n\n #\n print(\"c\")\n a_root = a.getRoot()\n c = Tensor.fromFiber([\"X\", \"Y\", \"Z\"], Fiber([0, 1, 2], [a_root, Fiber([],[]), a_root]))\n i = UncompressedImage(c)\n i.show()\n\n #\n print(\"d\")\n d = c.getRoot()\n print(\"Original\")\n i = UncompressedImage(d)\n i.show()\n\n #\n# print(\"e\")\n# d_flattened = d.flattenRanks()\n# print(\"Flattened\")\n# i = UncompressedImage(d_flattened)\n# i.show()\n", "id": "258004", "language": "Python", "matching_score": 5.644737720489502, "max_stars_count": 2, "path": "fibertree/graphics/uncompressed_image.py" }, { "content": "\"\"\"Tree Image Module\"\"\"\n\nimport logging\n\nfrom PIL import Image, ImageDraw\n\nfrom fibertree import Tensor\nfrom fibertree import Fiber\nfrom fibertree import Payload\n\nfrom fibertree import ImageUtils\nfrom fibertree import HighlightManager\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.graphics.tree_image')\n\n\nclass TreeImage():\n \"\"\"TreeImage\n\n Class to create a fibertree image of a tensor\n\n Parameters\n ----------\n object: tensor or fiber\n A tensor or fiber object to draw\n\n highlights: dictionary\n A dictionary of \"workers\" each with list of points to highlight\n\n extent: tuple\n Maximum row/col to use for image\n\n \"\"\"\n\n def __init__(self, object, highlights={}, extent=(30, 200)):\n \"\"\"__init__\"\"\"\n\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.graphics.tree_image')\n\n #\n # Record parameters\n #\n # Note: We conditionally unwrap Payload objects\n #\n self.object = Payload.get(object)\n self.row_extent = extent[0]\n self.col_extent = extent[1]\n\n level = self.object.getDepth()-1\n self.highlight_manager = HighlightManager(highlights, level=level)\n\n #\n # Cache worker colors\n #\n worker_color = {}\n\n for n, worker in enumerate(highlights.keys()):\n worker_color[worker] = ImageUtils.getColor(worker)\n\n self.worker_color = worker_color\n\n #\n # Create the tree image\n #\n self._create_tree()\n\n\n def _create_tree(self):\n \"\"\"create_tree\n\n Create an image of a tensor or fiber tree\n\n Notes\n ------\n\n The drawing is made in a coordinate space where the X\n dimension is measured in the number of non-empty fiber\n coordinates being displayed and the Y dimension is measured in\n layers of the tree. Translation to pixels happens in the\n draw_*() methods.\n\n \"\"\"\n\n object = self.object\n\n #\n # Create the objects for the image\n #\n self._image_setup()\n\n #\n # Display either the root of a tensor or a raw fiber\n #\n if isinstance(object, Tensor):\n #\n # Displaying a tensor\n #\n root = object.getRoot()\n #\n # Get tensor's name\n #\n name = object.getName()\n #\n # Get tensor's color\n #\n self._color = object.getColor()\n #\n # Create rank_id string\n #\n # Note: if rank_id is a list, convert to a string\n #\n ranks = \", \".join([str(r) for r in object.getRankIds()])\n\n if name:\n self._draw_rank(0, f\"Tensor: {name}[{ranks}]\")\n else:\n self._draw_rank(0, f\"File: {object.yamlfile}\")\n elif isinstance(object, Fiber):\n #\n # Displaying a fiber\n #\n root = object\n self._color = \"red\"\n else:\n #\n # Displaying nothing?\n #\n root = None\n self._color = \"red\"\n\n #\n # Process appropriately if root has 0 dimensions or more\n #\n if not Payload.contains(root, Fiber):\n #\n # Draw a 0-D tensor, i.e., a value\n #\n hlm = self.highlight_manager\n hl = hlm.getColorCoord(0)\n\n self._draw_coord(0, 0, \"R\")\n self._draw_line(0, 1/2, 1, 1/2)\n self._draw_value(1,\n 0,\n Payload.get(root),\n hl)\n region_end = 1\n else:\n #\n # Draw a non-0-D tensor or a fiber, i.e., the fiber tree\n #\n region_end = self._traverse(root,\n highlight_manager=self.highlight_manager)\n\n #\n # Crop the image\n #\n right = 200+self._offset2x(region_end)\n lower = 20+self.max_y\n\n self.im = self.im.crop((0, 0, right, lower))\n\n\n\n def show(self):\n \"\"\"Show the fibertree image\"\"\"\n\n self.im.show()\n\n\n#\n# Method to traverse (and draw) all the levels of the tree\n#\n def _traverse(self,\n fiber,\n level=0,\n offset=0,\n highlight_manager=None):\n \"\"\"traverse\"\"\"\n\n\n #\n # Check if this is level0, which may just be a payload\n #\n if level == 0:\n region_start = 0\n\n if not Payload.contains(fiber, Fiber):\n #\n # Draw a 0-D tensor, i.e., a value (NOT a fiber)\n #\n self._draw_coord(0, 0, \"R\")\n self._draw_line(0, 1/2, 1, 1/2)\n self._draw_value(1, 0, Payload.get(fiber))\n region_end = 1\n else:\n #\n # Recursively traverse and draw the fibers of a non-0-D tensor\n #\n region_end = self._traverse(fiber,\n level=1,\n offset=offset,\n highlight_manager=highlight_manager)\n\n region_size = region_end - region_start\n #\n # Draw root of tree\n #\n fiber_size = 1\n fiber_start = region_start + (region_size - fiber_size)/2\n self._draw_coord(0, fiber_start, \"R\")\n self._draw_line(0, region_size/2, 1, region_size/2)\n\n return region_end\n\n #\n # Process the fibers of the tree (level > 0)\n #\n\n #\n # Print out the rank information (if available)\n #\n if offset == 0 and not fiber.getOwner() is None:\n self._draw_rank(level, \"Rank: %s \" % fiber.getOwner().getId())\n\n #\n # Initialize drawing region information\n #\n region_start = offset\n region_end = offset\n\n #\n # Figure out space of region below this fiber\n #\n targets = []\n coordinate_start = region_start\n\n #\n # Traverse the fiber at this level\n #\n for n, (c, p) in enumerate(fiber):\n #\n # TBD: Truncate fibers with too many elements\n #\n # if n > 10: break\n\n if Payload.contains(p, Fiber):\n #\n # Configure highlights for this fiber\n #\n next_highlight_manager = highlight_manager.addFiber(c)\n\n #\n # Draw the object below this coordinate (in \"c\")\n #\n region_end = self._traverse(Payload.get(p),\n level=level+1,\n offset=region_end,\n highlight_manager=next_highlight_manager)\n\n else:\n region_end += 1\n\n #\n # Record (in \"targets\") the middle of the object below\n # this coordinate to draw a line to it later, and\n # calculate where the next object starts (\"coordinate_start\")\n #\n targets.append(coordinate_start+(region_end-coordinate_start)/2)\n coordinate_start = region_end\n\n #\n # If the fiber was empty we still occupy a single space\n #\n if len(fiber) == 0:\n region_end += 1\n\n region_size = region_end - region_start\n\n #\n # Set up the highlighting for this level\n #\n highlight_subtensor = highlight_manager.highlight_subtensor\n\n #\n # Display fiber for this level\n #\n fiber_size = len(fiber)\n fiber_start = region_start + (region_size - fiber_size)/2\n\n self._draw_fiber(level,\n fiber_start,\n fiber_start+fiber_size,\n highlight_subtensor)\n\n pos = fiber_start\n\n for c, p in fiber:\n #\n # Gets sets of workers to be colored\n #\n color_coord = highlight_manager.getColorCoord(c)\n color_subtensor = highlight_manager.getColorSubtensor()\n color_coord_or_subtensor = color_coord | color_subtensor\n\n #\n # Draw the coordinates, lines and maybe values\n #\n self._draw_coord(level, pos, c, color_coord_or_subtensor)\n\n if len(color_coord - color_subtensor):\n self._draw_intra_line(level,\n fiber_start + fiber_size / 2,\n pos+0.5,\n True)\n\n #\n # Draw the line if the next level will actually draw something.\n #\n if not Payload.contains(p, Fiber) or len(p.coords) > 0:\n self._draw_line(level,\n pos+0.5,\n level+1,\n targets.pop(0),\n len(color_coord_or_subtensor) > 0)\n else:\n #\n # Nothing to connect a line to so pop target\n #\n targets.pop(0)\n\n\n if not Payload.contains(p, Fiber):\n #\n # How could this not be the leaf ---\n # \"and rest_of_highlighting == []\"\n #\n self._draw_value(level+1,\n pos,\n Payload.get(p),\n color_coord_or_subtensor)\n\n pos += 1\n\n return region_end\n\n#\n# Image methods\n#\n def _image_setup(self):\n\n # Constrain image size (overage matches crop above)\n\n #\n # Size used to be (8192, 1024)\n #\n x_pixels = self._offset2x(self.col_extent+1) + 200\n y_pixels = self._level2y(self.row_extent+1) + 20\n\n #\n # Create an image at least this tall (in pixels)\n #\n self.max_y = 100\n\n #\n # Do image related setup\n #\n self.im = Image.new(\"RGB\", (x_pixels, y_pixels), \"wheat\")\n self.fnt = ImageUtils.getFont()\n self.draw = ImageDraw.Draw(self.im)\n\n#\n#\n#\n def _anti_alias(self, fill_color):\n r, g, b = fill_color\n return (r//4, g//4, b//4)\n\n\n#\n# Methods to draw objects on the drawing canvas\n#\n# Note: Input arguments place the objects at a position specified by:\n# - level: layer in the tree (Y)\n# - offset: number of drawn fiber coordinates (X)\n#\n def _draw_rank(self, level, rank):\n \"\"\"draw_rank\"\"\"\n\n x1 = 0\n y1 = self._level2y(level)\n\n # Hack: drawing text twice looks better in PIL\n self.draw.text((x1+10, y1+10), rank, font=self.fnt, fill=\"black\")\n self.draw.text((x1+10, y1+10), rank, font=self.fnt, fill=\"black\")\n\n\n def _draw_fiber(self, level, start_offset, end_offset, highlight=False):\n \"\"\"draw_fiber\"\"\"\n\n height = 60\n gap = 5\n\n x1 = self._offset2x(start_offset) + gap\n y1 = self._level2y(level) - 10\n x2 = self._offset2x(end_offset) - gap\n y2 = y1 + height\n fill_color = (128,128,128) if not highlight else (233,198,109)\n\n self.draw.ellipse(((x1, y1), (x2, y2)), fill_color, (0, 0, 0))\n\n\n def _draw_coord(self, level, offset, coord, highlight=[]):\n \"\"\"draw_coord\"\"\"\n\n x1 = self._offset2x(offset) + 20\n y1 = self._level2y(level)\n x2 = x1 + 40\n y2 = y1 + 40\n\n color = \"goldenrod\" if len(highlight) > 0 else \"saddlebrown\"\n\n x_text = x1+15\n\n if coord != \"R\" and isinstance(coord, int):\n if int(coord) >= 10:\n x_text = x_text - 7\n if int(coord) >= 100:\n x_text = x_text - 7\n if int(coord) >= 1000:\n x_text = x_text - 7\n\n if coord != \"R\":\n self.draw.ellipse(((x1,y1), (x2,y2)), color, 1)\n else:\n self._draw_diamond(x1, y1, x2, y2, \"black\")\n\n #\n # Hack: drawing text twice looks better in PIL\n #\n for n in range(2):\n self.draw.text((x_text, y1+10),\n str(coord),\n font=self.fnt,\n fill=\"white\")\n\n\n def _draw_value(self, level, offset, value, highlight=[]):\n \"\"\"draw_value\"\"\"\n\n if isinstance(value, Payload):\n value = value.value\n\n if not isinstance(value, tuple):\n value = ( value, )\n\n value = self._flatten_value(value)\n\n font_y = 30\n\n #\n # Calculate location of rectangle around value\n #\n x1 = self._offset2x(offset) + 20\n y1 = self._level2y(level) - 10\n\n x2 = x1 + 40\n y2 = y1 + 10 + len(value)*(font_y)\n\n if y2 > self.max_y:\n self.max_y = y2\n\n #\n # Draw rectangle (with or without highlights)\n #\n if len(highlight) == 0:\n fill_color = self._color\n self.draw.rectangle(((x1, y1), (x2, y2)), fill_color, 1)\n else:\n step = (y2-y1) // len(highlight)\n y1c = y1\n for worker in highlight:\n y2c = y1c + step\n fill_color = self.worker_color[worker]\n self.draw.rectangle(((x1, y1c), (x2, y2c)), fill_color, 1)\n y1c = y2c\n\n #\n # Draw vertical stack of values from tuple\n #\n for i, v in enumerate(value):\n if isinstance(v, Payload):\n v = v.value\n\n #\n # Calculate location of text\n #\n x_text = x1+15\n y_text = y1+10+(i*font_y)\n\n if (isinstance(v, int)):\n if v >= 10:\n x_text = x_text - 7\n if v >= 100:\n x_text = x_text - 7\n if v >= 1000:\n x_text = x_text - 7\n elif (isinstance(v, float)):\n v = round(v, 2)\n\n #\n # Hack: drawing text twice looks better in PIL\n #\n for n in range(2):\n self.draw.text((x_text, y_text),\n str(v),\n font=self.fnt,\n fill=\"white\")\n\n\n def _flatten_value(self, value, first=True):\n\n if isinstance(value, Payload):\n value = value.value\n\n if isinstance(value, tuple):\n result = []\n\n if not first:\n result.append(\"(\")\n\n for i in value:\n result.extend(self._flatten_value(i, first=False))\n\n if not first:\n result.append(\")\")\n else:\n result = [ value ]\n\n return result\n\n\n def _draw_diamond(self, x1, y1, x2, y2, fill_color):\n mid_x = x1+(x2-x1)/2\n mid_y = y1+(y2-y1)/2\n\n self.draw.polygon([(mid_x, y1), (x1, mid_y), (mid_x, y2), (x2, mid_y)], fill_color, 1)\n\n\n def _draw_line(self, level1, offset1, level2, offset2, highlight=False):\n\n # Bottom of source is 40 below level2y result (see draw_coord)\n # Top of target is 10 above level2y results (see draw_fiber)\n\n x1 = self._offset2x(offset1)\n y1 = self._level2y(level1) + 40\n x2 = self._offset2x(offset2)\n y2 = self._level2y(level2) - 10\n\n fill_color = \"goldenrod\" if highlight else \"black\"\n\n self.draw.line([(x1, y1), (x2, y2)], width=3, fill=fill_color)\n\n\n def _draw_intra_line(self, level, fiber_offset, coord_offset, highlight=False):\n\n # Bottom of source is 10 above level2y results (see draw_line)\n # Top of target is level2y result (see draw_coord)\n\n x1 = self._offset2x(fiber_offset)\n y1 = self._level2y(level) - 10\n x2 = self._offset2x(coord_offset)\n y2 = self._level2y(level)\n\n fill_color = \"goldenrod\" if highlight else \"black\"\n\n self.draw.line([(x1, y1), (x2, y2)], width=3, fill=fill_color)\n\n#\n#\n# Methods to convert positions specified in offset/level space into pixels\n#\n def _offset2x(self, offset):\n return 200 + 80*offset\n\n\n def _level2y(self, level):\n return 40 + 80*level\n\n\nif __name__ == \"__main__\":\n\n a = Tensor.fromYAMLfile(\"../../examples/data/draw-a.yaml\")\n a.print()\n i = TreeImage(a)\n i.show()\n", "id": "11215753", "language": "Python", "matching_score": 2.024472713470459, "max_stars_count": 2, "path": "fibertree/graphics/tree_image.py" }, { "content": "\"\"\"Tensor Canvas Module\"\"\"\n\nimport logging\n\nimport copy\nfrom collections import namedtuple\n\nfrom fibertree import Tensor\nfrom fibertree import Fiber\nfrom fibertree import Payload\n\nfrom .image_utils import ImageUtils\nfrom .highlights import HighlightManager\n\nfrom .movie_canvas import MovieCanvas\nfrom .spacetime_canvas import SpacetimeCanvas\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.graphics.tensor_canvas')\n\n\nclass TensorCanvas():\n \"\"\"TensorCanvas\n\n The TensorCanvas class is a frontend for the\n {Movie,SpaceTime}Canvas classes. It creates a canvas of the\n requested animation type and shadows of the tracked tensors and\n passes method calls on to the created class.\n\n It manges the shadows of the tracked tensors in an addActivity()\n method. That method is used to support incremental addition of\n highlights at a specfic time and at a particular worker, which is\n is specified by the \"spacetime\" keyword. In more detail,\n addActivity() uses the shadows of the tracked tensors, logs\n highlights and logs changes to mutable tensors. Then when a frame\n is to be displayed it collects the appropriate highlights and\n replays the changes into the appropriate shadow tensor and passes\n highlights to the addFrame() method in a {Movie,SpaceTime}Canvas,\n which displays those highlights on the current state of the shadow\n tensors.\n\n The addFrame() method can be called exclictly to output the\n information from the oldest frame, but it is best to just wait\n until the information for all frames has been recorded, and all\n frames will be output..\n\n This class also provides primitive support for having an activity\n \"wait\" for a coordinate in another tensor to be updated. To do\n this it tracks the update time of each coordinate that changes\n a value. This capability is enabled with the \"enable_wait\" keyword.\n\n Constructor\n -----------\n\n Create an animation canvas of the requested type for for the given\n tensors.\n\n Parameters\n ----------\n tensors: list\n A list of tensors or fiber objects to track\n\n animation: string\n Type of animation ('none', 'movie', 'spacetime')\n\n style: string\n Display style for movies ('tree', 'uncompressed', 'tree+uncompressed')\n\n enable_wait: Boolean\n Enable tracking update times to allow waiting for an update\n\n \"\"\"\n\n def __init__(self, *tensors, animation='movie', style='tree', enable_wait=False):\n \"\"\"__init__\n\n \"\"\"\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.graphics.tensor_canvas')\n\n #\n # Places to collect information about the frames\n #\n num_tensors = len(tensors)\n\n self.num_tensors = num_tensors\n self.orig_tensors = []\n self.shadow_tensors = []\n\n self.using_spacetimestamp = None\n\n self.update_times = [] if enable_wait else None\n self.waitname_map = {}\n\n for t in tensors:\n #\n # Build list of orignal tensors,\n # but convert Fibers into a Tensor with the Fiber as its root\n #\n if isinstance(t, Fiber):\n # TBD: We do not really know if the fiber is mutable...\n t = Tensor.fromFiber(fiber=t)\n\n self.orig_tensors.append(t)\n #\n # Create a tensor to hold a shadow tensor that tracks\n # updates to the tracked tensors at the right time\n #\n if t.isMutable():\n self.shadow_tensors.append(copy.deepcopy(t))\n else:\n self.shadow_tensors.append(t)\n\n #\n # Create a tensor to hold last update time\n # of each element of a tensor\n #\n if enable_wait:\n self.update_times.append(Tensor(rank_ids=t.getRankIds()))\n\n #\n # Create a list to hold a record of activity at each timestamp\n #\n self.log = []\n\n #\n # Flag to help addFrame() know if it is adding activity\n # to an existing frame\n #\n self.inframe = False\n\n #\n # Global cycle tracker\n #\n self.cycle = 0\n\n #\n # Reset image highlighting\n #\n ImageUtils.resetColors()\n\n #\n # Create desired canvas\n #\n # Note: We create the canvas with the shadow tensors, so that\n # the visualized activity happens in the desired order\n #\n if animation == 'movie':\n self.canvas = MovieCanvas(*self.shadow_tensors, style=style)\n elif animation == 'spacetime':\n self.canvas = SpacetimeCanvas(*self.shadow_tensors)\n elif animation == 'none':\n self.canvas = NoneCanvas(*self.shadow_tensors, style=style)\n else:\n self.logger.warning(\"TensorCanvas: No animation type: %s\", animation)\n\n\n def addActivity(self, *highlights, spacetime=None, worker=\"anon\", skew=0, wait=None, end_frame=False):\n \"\"\"addActivity\n\n Add an activity to be displayed in an animation.\n\n Parameters\n ----------\n highlights: list\n A list of highlight specifications for each tensor being animated\n See highlights.py for formats for highlights.\n\n spacetime: tuple\n A tuple containing the \"worker\" performing the activity and a \"timestamp\"\n specifying the time the activity occurs. Timestamps are tuples of integers\n\n worker: string\n Name of the worker performing the action (mutually exclusive with `spacetime`)\n\n skew: integer\n Time the activity occurs relative to current time (mutually exclusive with `spacetime`)\n\n wait: list of tensors\n Specify a list of tensors that must be updated before this activiy can occur. After\n the dependency is satisfied add the skew.\n\n end_frame: Boolean\n If true call addFrame() after adding activity. Deprecated.\n\n\n Notes\n -----\n\n For mutable tensors the `highlights` parameter must\n authoritatively indicate the **points** that have been changed\n for the first time.\n\n \"\"\"\n #\n # Don't need to do anything for `NoneCanvas`\n #\n if isinstance(self.canvas, NoneCanvas):\n return\n\n #\n # Rename spacetime to spacetimestamp to avoid confusion\n # with the spacetime style\n #\n spacetimestamp = spacetime\n\n #\n # Set that we are in a frame\n #\n self.inframe = True\n\n if spacetimestamp is not None:\n #\n # Spacetimestamp mode\n #\n assert self.using_spacetimestamp in [None, True], \"One cannot mix spacetimestamp and skew\"\n\n self.using_spacetimestamp = True\n\n worker = spacetimestamp[0]\n timestamp = spacetimestamp[1]\n else:\n #\n # Skew mode - user must invoke AddFrame()\n #\n assert self.using_spacetimestamp in [None, False], \"One cannot mix spacetimestamp and skew\"\n\n self.using_spacetimestamp = False\n\n #\n # Convert skew into a global time\n #\n timestamp = self.cycle + skew\n\n #\n # Canonicalize highlights\n #\n # Note 1: The highlights parameter is a list of points or list\n # of lists of points one for each tracked tensor They will be\n # turned into an actual highlight data strcture here.\n #\n # Note 2: The highlight specification must not contain a worker,\n # because that will override the default worker, but is not checked.\n #\n highlights_list = []\n\n for hl in highlights:\n highlights_list.append(HighlightManager.canonicalizeHighlights(hl, worker=worker))\n\n #\n # If wait is a list it is a list of input tensors that this\n # activity depended on and the skew is delayed by the latest\n # time among those inputs\n #\n if wait is not None:\n assert False, \"Wait is currently broken\"\n assert self.update_times is not None, \"Keyword 'enable_wait' not set\"\n\n delay = -1\n\n #\n # Look at each input and see which is the latest\n #\n # TBD: We wait for all the highlighted points in an input,\n # maybe it should be selective\n #\n for tname, xmit_time in wait.items():\n\n tnum = self.waitname_map.get(tname, self._insertWaitname(tname))\n\n for hl in highlights_list[tnum][worker]:\n update_time = self.update_times[tnum].getPayload(*hl)\n update_delay = update_time.value - self.cycle + xmit_time\n if update_delay > delay:\n delay = update_delay\n\n assert delay >= 0, \"Tensor never updated for wait\"\n\n skew = max(skew, delay)\n\n #\n # Tell the canvas to remember the current tensor states\n #\n log_idx = self._logChanges(*highlights_list, timestamp=timestamp)\n\n #\n # Collect the highlights for this frame accounting for global time\n #\n # TBD: There must be a better way to combine the highlights.\n # Using this code exactly one addActivity() must have all\n # the activity for a worker\n #\n active_highlights = self.log[log_idx].highlights\n\n for n, highlight in enumerate([highlights[worker] for highlights in highlights_list]):\n\n if worker not in active_highlights[n]:\n active_highlights[n][worker] = highlight\n self.logger.debug(\"New highlight %s\", highlight)\n self.logger.debug(\"After: %s\", active_highlights[n][worker])\n else:\n self.logger.debug(\"Before: %s\", active_highlights[n][worker])\n self.logger.debug(\"Appending highlight %s\", highlight)\n active_highlights[n][worker].extend(highlight)\n self.logger.debug(\"After: %s\", active_highlights[n][worker])\n\n #\n # Sometimes addActivity should end the frame\n #\n if end_frame or worker == \"anon\":\n self.addFrame()\n\n\n def _insertWaitname(self, tname):\n\n if isinstance(tname, int):\n self.waitname_map[tname] = tname\n return tname\n\n for tnum, t in enumerate(self.orig_tensors):\n if t.getName() == tname:\n self.waitname_map[tname] = tnum\n return tnum\n\n #\n # Didn't find the tensor, so\n # return None to cause an error in the caller\n #\n return None\n\n\n def addFrame(self, *highlights):\n \"\"\"Add a step to the movie or spacetime diagram\n\n A step (or cycle) to the animation. For movies this\n corresponds to a frame in the movie.\n\n Parameters\n ----------\n\n highlighted_coords_per_tensor: list of highlights\n Highlights to add to the registered tensors\n\n \"\"\"\n #\n # Don't need to do anything for `NoneCanvas`\n #\n if isinstance(self.canvas, NoneCanvas):\n return\n\n\n #\n # For situations where caller did not use addActivity()\n # call it one time for them\n #\n # Note: highlights is a list of highlights objects\n #\n if highlights:\n self.addActivity(*highlights, worker=\"PE\")\n\n self.inframe = False\n\n #\n # Highlights were collected by addActivity\n #\n highlights = self.log[0].highlights if len(self.log) else {}\n\n #\n # Populate shadow tensors with values for this frame\n #\n # Note: The log gets popped, so we needed to get the\n # highlights out before this call\n #\n self._replayChanges()\n\n #\n # Add the frame\n #\n self.canvas.addFrame(*highlights)\n\n\n def getLastFrame(self, message=None):\n \"\"\"Finalize the movie/spacetime diagram\n\n Finalize the animation by adding all the pending cycles to the\n animation. Get an image of the final frame.\n\n Parameters\n ---------\n\n message: string, default=None\n A message to add to the image\n\n Returns\n -------\n final_frame: image\n An image of the final frame\n\n \"\"\"\n\n #\n # Push out any remaining logged activity\n #\n for n in range(len(self.log)):\n self.addFrame()\n\n return self.canvas.getLastFrame(message=message)\n\n\n def saveMovie(self, filename=None):\n \"\"\"Save the animation to a file\n\n If the animation can be saved to a file, this method will do\n that.\n\n Parameters\n ----------\n\n filename: string, default=None\n Name of a file to save the movie\n\n \"\"\"\n\n #\n # Push out any remaining logged activity\n #\n self.getLastFrame()\n\n return self.canvas.saveMovie(filename=filename)\n\n#\n# Utility function to log and replay a series of changes to the\n# tensors being tracked\n#\n\n def _logChanges(self, *highlights, timestamp=None):\n \"\"\"logChanges\n\n Log current values (at the highlighted points) to the mutable\n tensors for later replay into the shadow tensors at time\n \"timestamp\".\n\n Parameters\n ----------\n\n highlights: a highlights dictionary\n A per PE list of highlighted points for each tracked tensor\n\n timestamp: tuple of integers\n The time at which these values are to be replayed\n\n \"\"\"\n\n assert timestamp is not None, \"Timestamp error\"\n\n tensors = self.orig_tensors\n update_times = self.update_times\n\n #\n # Find the log entry for \"timestamp\" or create one\n #\n log_idx_list = [ idx for idx, element in enumerate(self.log) if element.timestamp == timestamp]\n if len(log_idx_list) >= 1:\n log_idx = log_idx_list[0]\n self.logger.debug(\"Found existing timestamp at %s\", log_idx)\n else:\n log_idx = self._createChanges(timestamp)\n\n #\n # Get references to the lists of points and values updated at timestamp\n #\n points = self.log[log_idx].points\n values = self.log[log_idx].values\n\n for tnum, highlight in enumerate(highlights):\n #\n # Skip immutable tensors\n #\n if not tensors[tnum].isMutable():\n continue\n\n #\n # Log the points being highlighted\n #\n for worker, highlight_list in highlight.items():\n for point in highlight_list:\n if not isinstance(point, tuple):\n point = (point,)\n\n points[tnum].append(point)\n\n payload = tensors[tnum].getPayload(*point)\n values[tnum].append(copy.deepcopy(payload))\n\n if update_times is not None:\n updatetime_ref = update_times[tnum].getPayloadRef(*point)\n updatetime_ref <<= timestamp\n\n return log_idx\n\n\n def _replayChanges(self):\n \"\"\"replayChanges \"\"\"\n\n if len(self.log) == 0:\n return\n\n points = self.log[0].points\n values = self.log[0].values\n\n for shadow, point_list, value_list in zip(self.shadow_tensors,\n points,\n values):\n\n if shadow.isMutable():\n for point, value in zip(point_list, value_list):\n\n if Payload.isEmpty(value):\n continue\n\n ref = shadow.getPayloadRef(*point)\n ref <<= value\n\n del self.log[0]\n\n #\n # Increment cycle\n #\n if not self.using_spacetimestamp:\n self.cycle += 1\n\n\n def _createChanges(self, timestamp):\n \"\"\" _createChanges \"\"\"\n\n FrameLog = namedtuple('FrameLog', ['timestamp', 'points', 'values', 'highlights'])\n\n num_tensors = self.num_tensors\n\n new_points = [[] for n in range(num_tensors)]\n new_values = [[] for n in range(num_tensors)]\n new_highlights = [{} for n in range(num_tensors)]\n\n framelog = FrameLog(timestamp, new_points, new_values, new_highlights)\n\n if len(self.log) == 0:\n self.log.append(framelog)\n return 0\n\n #\n # Insert new changes at proper place in the log\n # TBD: Do a more sophisticated insertion\n #\n\n for i in range(len(self.log)):\n if self.log[i].timestamp > timestamp:\n log_idx = i\n self.log.insert(log_idx, framelog)\n return log_idx\n\n self.log.append(framelog)\n\n return len(self.log)-1\n\n\n#\n# Utility class to manage cycles\n#\nclass CycleManager():\n \"\"\"CycleManager\n\n A class to allow a program to manage the current cycle, for using\n in canvas displays\n\n TBD: Allow nested parallel regions\n\n \"\"\"\n\n def __init__(self):\n \"\"\"__init__\n\n Initialize some variables\n\n \"\"\"\n\n self.cycle = 0\n self.parallel = 0\n self.worker_max = 0\n\n\n def __call__(self):\n \"\"\"__call__\n\n Call the class to return the current cycle and move to the\n next cycle\n\n \"\"\"\n cycle = self.cycle\n self.cycle += 1\n\n return cycle\n\n\n def startParallel(self):\n \"\"\"startParallel\n\n Start a parallel region by remembering the current cycle\n\n \"\"\"\n\n self.parallel = self.cycle\n\n\n def startWorker(self):\n \"\"\"startWorker\n\n Reset the cycle for a worker\n\n \"\"\"\n\n self.cycle = self.parallel\n\n\n def finishWorker(self):\n \"\"\"finishWorker\n\n Remember the maximum cycle (actually the cycle after any\n activity in that worker) arrived at by any worker in the\n parallel region.\n\n \"\"\"\n\n self.worker_max = max(self.worker_max, self.cycle)\n\n\n def finishParallel(self):\n \"\"\"finishParallel\n\n Finish the parallel region and set the current cycle to the\n cycle after the longest running worker\n\n \"\"\"\n\n self.cycle = self.worker_max\n\n\nclass NoneCanvas():\n \"\"\"NoneCanvas - does nothing\"\"\"\n\n def __init__(self, *tensors, animation='movie', style='tree', **kwargs):\n \"\"\"__init__\"\"\"\n\n # For 'none' we create a movie but don't add any frames\n self.canvas = TensorCanvas(*tensors, animation='movie', style=style, **kwargs)\n\n return\n\n def addFrame(self, *highlighted_coords_per_tensor):\n \"\"\"addFrame - should never get called\"\"\"\n\n return\n\n def getLastFrame(self, message=None):\n \"\"\"getLastFrame\"\"\"\n\n im = self.canvas.getLastFrame(message=message)\n\n return im\n\n\n def saveMovie(self, filename=None):\n \"\"\"saveMovie\"\"\"\n\n self.logger.info(\"NoneCanvas: saveMovie - unimplemented\")\n return None\n", "id": "4306508", "language": "Python", "matching_score": 4.320976734161377, "max_stars_count": 2, "path": "fibertree/graphics/tensor_canvas.py" }, { "content": "\"\"\"Spacetime Canvas Module\"\"\"\n\nimport logging\nimport copy\n\nfrom fibertree import Tensor\nfrom fibertree import Fiber\nfrom fibertree import Payload\n\nfrom fibertree import TensorImage\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.graphics.spacetime_canvas')\n\n\n\nclass SpacetimeCanvas():\n \"\"\"SpaceTimeCanvas\n\n A class to create a spacetime diagram of activity in a set of\n tensors. This class is used by the `TensorCanvas` class as one of\n the ways it can display activity.\n\n Constructor\n -----------\n\n Parameters\n ----------\n tensors: list\n A list of tensors or fibers objects to track\n\n \"\"\"\n\n def __init__(self, *tensors):\n \"\"\"__init__\"\"\"\n\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.graphics.spacetime_canvas')\n\n #\n # Structures to hold infomation about each tracked tensor\n #\n self.tensors = []\n self.spacetime = []\n self.highlights = []\n\n for tensor in tensors:\n #\n # Append each tensor being tracked, conditionally\n # unwraping it if it is a Payload object\n #\n self.tensors.append(Payload.get(tensor))\n\n #\n # Create a \"spacetime\" tensor to hold the spacetime\n # information for this tracked tensor\n #\n if isinstance(tensor, Tensor):\n assert tensor.getShape() != [], \"No support for 0-D tensors\"\n\n spacetime = Tensor(rank_ids=[\"T\"] + tensor.getRankIds())\n spacetime.setName(tensor.getName())\n spacetime.setColor(tensor.getColor())\n else:\n assert tensor.getDepth() == 1, \"Only 1-D fibers are supported\"\n\n spacetime = Tensor(rank_ids=[\"T\", \"S\"])\n\n #\n # Append the \"spacetime\" tensor to hold this tracked\n # tensor's spacetime information\n #\n self.spacetime.append(spacetime)\n #\n # Append an empty highlight object to hold the highlighting\n # information for this tracked tensor\n #\n self.highlights.append({})\n\n self.frame_num = 0\n\n\n def addFrame(self, *highlighted_coords_per_tensor):\n \"\"\"Add a timestep to the spacetime diagram\n\n Parameters\n ----------\n\n highlighted_coords_per_tensor: list of highlights\n Highlights to add to the registered tensors\n\n \"\"\"\n\n #\n # Handle the case where nothing should be highlighted anywhere.\n #\n if not highlighted_coords_per_tensor:\n final_coords = [{} for n in range(len(self.tensors))]\n else:\n final_coords = highlighted_coords_per_tensor\n\n #\n # For each tracked tensor collect the information for the new frame\n #\n for tensor, spacetime, highlights, hl_info in zip(self.tensors,\n self.spacetime,\n self.highlights,\n final_coords):\n\n #\n # Get fiber holding current state\n #\n # TBD: Should fiber append get the root,\n # if you try to append a tensor\n #\n if isinstance(tensor, Tensor):\n timestep = tensor.getRoot()\n else:\n timestep = tensor\n\n #\n # Append current tracked tensor state to spacetime tensor\n # with a coordinate coresponding the the frame number\n #\n spacetime.getRoot().append(self.frame_num, copy.deepcopy(timestep))\n\n #\n # Delicate sequence to add highlight into\n # spacetime tensor's highlight object\n #\n for worker, hl_list in hl_info.items():\n hl_list_new = []\n for point in hl_list:\n if len(point) == 1:\n point = point[0]\n\n hl_list_new.append((point, self.frame_num))\n\n if worker not in highlights:\n highlights[worker] = hl_list_new\n else:\n highlights[worker] = highlights[worker] + hl_list_new\n\n self.frame_num += 1\n\n\n def getLastFrame(self, message=None):\n \"\"\"Get the final frame\n\n Create a image of the final spacetime diagram.\n\n Parameters\n ---------\n\n message: string, default=None\n A message to add to the image\n\n Returns\n -------\n final_frame: image\n An image of the spacetime diagram\n\n \"\"\"\n\n images = []\n\n for spacetime, highlights in zip(self.spacetime, self.highlights):\n #\n # Get spacetime tensor name & ranks\n #\n #\n spacetime_name = spacetime.getName()\n spacetime_ranks = spacetime.getDepth()\n\n if spacetime_ranks > 2:\n #\n # Original tensor was a matrix or bigger, so flatten it\n #\n # Note: points in the tensor look like (time, coord0,\n # coord1, ..) so we need to skip over the first\n # rank before flattening\n #\n spacetime = spacetime.flattenRanks(depth=1,\n levels=spacetime_ranks-2)\n\n #\n # Swap the space and time ranks\n #\n spacetime_swapped = spacetime.swapRanks()\n spacetime_swapped.setName(spacetime_name)\n\n #\n # Create spacetime image for this tensor and append to\n # full image\n #\n image = TensorImage(spacetime_swapped,\n style='uncompressed',\n highlights=highlights).im\n\n images.append(image)\n\n return images\n\n\n def saveMovie(self):\n \"\"\"saveMovie\n\n Does nothing for spacetime diagrams.\n\n \"\"\"\n\n print(\"SpaceTimeCanvas: saveMovie - unimplemented\")\n return None\n\n\nif __name__ == \"__main__\":\n\n #\n # This is broken...\n #\n a = Tensor.fromYAMLfile(\"../examples/data/draw-a.yaml\")\n b = Tensor.fromYAMLfile(\"../examples/data/draw-b.yaml\")\n canvas = TensorCanvas(a, b)\n canvas.addFrame()\n canvas.addFrame([10], [4])\n canvas.addFrame([10, 40], [4, 1])\n canvas.addFrame([10, 40, 1], [4, 1, 0])\n canvas.addFrame()\n canvas.saveMovie(\"tmp.mp4\")\n", "id": "3271409", "language": "Python", "matching_score": 1.6815900802612305, "max_stars_count": 2, "path": "fibertree/graphics/spacetime_canvas.py" }, { "content": "import os\n\nfrom fibertree import Fiber, Tensor\n\nprint(\"---------------\")\nprint(\"YAML I/O Tests\")\nprint(\"---------------\")\n\ndata_dir = \"../../data\"\ntmp_dir = \"/tmp\"\n\n# Read in a Tensor\n\ndraw_a_file = os.path.join(data_dir, \"draw-a.yaml\")\nt1 = Tensor.fromYAMLfile(draw_a_file)\n\n# Dump the Tensor to /tmp\"\n\ndraw_a_file_tmp = os.path.join(tmp_dir, \"draw-a.yaml\")\nt1.dump(draw_a_file_tmp)\n\n# Read in the Tensor from /tmp\"\n\nt2 = Tensor.fromYAMLfile(draw_a_file_tmp)\n\nprint(f\"Tensor read/write test: {(t1.getRoot() == t2.getRoot())}\")\n\n# Read in a Fiber\n\ndraw_fiber_a_file = os.path.join(data_dir, \"draw-fiber-a.yaml\")\nf1 = Fiber.fromYAMLfile(draw_fiber_a_file)\n\nprint(f\"Fiber read test: {(t1.getRoot() == f1)}\")\n\n# Dump the Fiber to /tmp\n\ndraw_fiber_a_file_tmp = os.path.join(tmp_dir, \"draw-fiber-a.yaml\")\nf1.dump(draw_fiber_a_file_tmp)\n\n# Read in the Fiber from /tmp\n\nf2 = Fiber.fromYAMLfile(draw_fiber_a_file_tmp)\n\nprint(f\"Fiber read/write test: {(f1 == f2)}\")\n\n", "id": "7176911", "language": "Python", "matching_score": 1.5482462644577026, "max_stars_count": 2, "path": "examples/scripts/methods/yaml-io.py" }, { "content": "#!/usr/bin/python3\n\nimport os\nimport argparse\n\nfrom fibertree import Tensor, TensorImage\n\ndata_dir = \"../examples/data\"\nexample_file = os.path.join(data_dir, \"draw-a.yaml\")\n\nparser = argparse.ArgumentParser(description='Display a tensor')\nparser.add_argument(\"tensorfile\", nargs=\"?\", default=example_file)\nargs = parser.parse_args()\n\nfilename = args.tensorfile\n\na = Tensor(filename)\na.print(filename)\n\ni = TensorImage(a)\ni.show()\n", "id": "10835622", "language": "Python", "matching_score": 2.520625114440918, "max_stars_count": 2, "path": "scripts/draw.py" }, { "content": "import os\n\nfrom fibertree import Tensor, TensorImage\n\ndata_dir = \"../../data\"\nfilename = os.path.join(data_dir, \"draw-a.yaml\")\n\na = Tensor(filename)\na.print(filename)\n\ni = TensorImage(a)\ni.show()\n", "id": "2927661", "language": "Python", "matching_score": 0.5800923705101013, "max_stars_count": 2, "path": "examples/scripts/visualization/draw-tensor-3D.py" }, { "content": "import unittest\n\nfrom fibertree import Tensor, Fiber, Payload\n\n\nclass TestCalculations(unittest.TestCase):\n\n def test_traverse(self):\n \"\"\"Traverse a tensor\"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n \n a_m = a.getRoot()\n\n sum = 0\n\n for m, (a_k) in a_m:\n for k, (a_val) in a_k:\n sum += a_val\n\n self.assertEqual(sum, 2713)\n\n\n def test_copy(self):\n \"\"\"Copy a tensor\"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n z = Tensor(rank_ids=[\"M\", \"K\"])\n\n a_m = a.getRoot()\n z_m = z.getRoot()\n\n for m, (z_k, a_k) in z_m << a_m:\n for k, (z_ref, a_val) in z_k << a_k:\n z_ref += a_val\n\n self.assertEqual(a, z)\n\n def test_sum(self):\n \"Test sum\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_sum_a.yaml\")\n b = Tensor.fromYAMLfile(\"./data/tensor_sum_b.yaml\")\n z = Tensor(rank_ids=[\"M\"])\n\n a_m = a.getRoot()\n b_m = b.getRoot()\n z_m = z.getRoot()\n\n for m_coord, (z_ref, (op, a_k, b_k)) in z_m << (a_m | b_m):\n for k_coord, (op, a_val, b_val) in a_k | b_k:\n z_ref += a_val + b_val\n\n\n z_correct = Tensor.fromYAMLfile(\"./data/tensor_sum_z.yaml\")\n\n self.assertEqual(z, z_correct)\n\n def test_dot(self):\n \"Test dot product\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_sum_a.yaml\")\n b = Tensor.fromYAMLfile(\"./data/tensor_sum_b.yaml\")\n z = Tensor(rank_ids=[\"M\"])\n\n a_m = a.getRoot()\n b_m = b.getRoot()\n z_m = z.getRoot()\n\n for m_coord, (z_ref, (a_k, b_k)) in z_m << (a_m & b_m):\n for k_coord, (a_val, b_val) in a_k & b_k:\n z_ref += a_val * b_val\n\n z_correct = Tensor.fromYAMLfile(\"./data/tensor_dot_z.yaml\")\n\n self.assertEqual(z, z_correct)\n\n def test_0D(self):\n \"Test sum to rank 0 tensor\"\n\n a = Tensor.fromYAMLfile(\"./data/conv-activations-a.yaml\")\n z = Tensor(rank_ids=[])\n\n a_m = a.getRoot()\n z_ref = z.getRoot()\n\n for m_coord, (a_val) in a_m:\n z_ref += a_val\n\n self.assertEqual(z_ref, 12)\n\n def test_conv1d_ws(self):\n \"\"\"Convolution 1d ws\"\"\"\n\n w = Tensor.fromYAMLfile(\"./data/conv-weights-a.yaml\")\n i = Tensor.fromYAMLfile(\"./data/conv-activations-a.yaml\")\n o = Tensor(rank_ids=[\"Q\"])\n\n w_r = w.getRoot()\n i_h = i.getRoot()\n o_q = o.getRoot()\n\n W = w_r.maxCoord() + 1\n I = i_h.maxCoord() + 1\n Q = I - W + 1\n\n for r, (w_val) in w_r:\n for q, (o_q_ref, i_val) in o_q << i_h.project(lambda h: h-r, (0, Q)):\n o_q_ref += w_val * i_val\n\n o_ref = Tensor.fromYAMLfile(\"./data/conv-output-a.yaml\")\n \n self.assertEqual(o, o_ref)\n\n def test_conv1d_is(self):\n \"\"\"Convolution 1d is\"\"\"\n\n w = Tensor.fromYAMLfile(\"./data/conv-weights-a.yaml\")\n i = Tensor.fromYAMLfile(\"./data/conv-activations-a.yaml\")\n o = Tensor(rank_ids=[\"Q\"])\n\n w_r = w.getRoot()\n i_h = i.getRoot()\n o_q = o.getRoot()\n\n W = w_r.maxCoord() + 1\n I = i_h.maxCoord() + 1\n Q = I - W + 1\n\n for h, (i_val) in i_h:\n for q, (o_q_ref, w_val) in o_q << w_r.project(lambda r: h-r, (0, Q)):\n o_q_ref += w_val * i_val\n\n o_ref = Tensor.fromYAMLfile(\"./data/conv-output-a.yaml\")\n \n self.assertEqual(o, o_ref)\n\n def test_conv1d_os(self):\n \"\"\"Convolution 1d os\"\"\"\n\n w = Tensor.fromYAMLfile(\"./data/conv-weights-a.yaml\")\n i = Tensor.fromYAMLfile(\"./data/conv-activations-a.yaml\")\n o = Tensor(rank_ids=[\"Q\"])\n\n w_r = w.getRoot()\n i_h = i.getRoot()\n o_q = o.getRoot()\n\n W = w_r.maxCoord() + 1\n I = i_h.maxCoord() + 1\n Q = I - W + 1\n\n output_shape = Fiber(coords=range(Q), initial=1)\n\n for q, (o_q_ref, _) in o_q << output_shape:\n for r, (w_val, i_val) in w_r.project(lambda r: q+r) & i_h:\n o_q_ref += w_val * i_val\n\n o_ref = Tensor.fromYAMLfile(\"./data/conv-output-os-a.yaml\")\n \n self.assertEqual(o, o_ref)\n \n \nif __name__ == '__main__':\n unittest.main()\n\n", "id": "8125907", "language": "Python", "matching_score": 4.372045516967773, "max_stars_count": 2, "path": "test/test_calculations.py" }, { "content": "import os\n\nfrom fibertree import Tensor, Fiber\n\n\nprint(\"----------------------------------------\")\nprint(\" Convolution 1-D Output Stationary\")\nprint(\"----------------------------------------\")\nprint(\"\")\n\ndata_dir = \"../../data\"\n\nw = Tensor.fromYAMLfile(os.path.join(data_dir, \"conv-weights-a.yaml\"))\ni = Tensor.fromYAMLfile(os.path.join(data_dir, \"conv-activations-a.yaml\"))\no = Tensor(rank_ids=[\"Q\"])\n\nw.print(\"W Tensor\")\ni.print(\"I Tensor\")\no.print(\"O Tensor\")\n\nw_r = w.getRoot()\ni_h = i.getRoot()\no_q = o.getRoot()\n\nW = w_r.maxCoord() + 1\nI = i_h.maxCoord() + 1\nQ = I - W + 1\n\nw_r.print(f\"W Tensor - R rank - size={W}\")\ni_h.print(f\"I Tensor - H rank - size={I}\")\no_q.print(f\"O Tensor - Q rank - size={Q}\")\n\nprint(\"Convolution\")\n\noutput_shape = Fiber(coords=range(Q), initial=1)\n\nfor q, (o_q_ref, _) in o_q << output_shape:\n print(f\"Processing output: ({q}, ({o_q_ref}))\")\n for r, (w_val, i_val) in w_r.project(lambda r: q+r) & i_h:\n print(f\" Processing weights and activations ({r}, ({w_val}, {i_val})\")\n o_q_ref += w_val * i_val\n\no.print(\"\\nOutput Tensor\")\n\nprint(\"\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\n", "id": "593112", "language": "Python", "matching_score": 4.555087089538574, "max_stars_count": 2, "path": "examples/scripts/conv-1d/conv-1d-os.py" }, { "content": "import os\n\nfrom fibertree import Tensor\n\nprint(\"----------------------------------------\")\nprint(\" Convolution 1-D Weight Stationary\")\nprint(\"----------------------------------------\")\nprint(\"\")\n\ndata_dir = \"../../data\"\n\nw = Tensor.fromYAMLfile(os.path.join(data_dir, \"conv-weights-a.yaml\"))\ni = Tensor.fromYAMLfile(os.path.join(data_dir, \"conv-activations-a.yaml\"))\no = Tensor(rank_ids=[\"Q\"])\n\nw.print(\"W Tensor\")\ni.print(\"I Tensor\")\no.print(\"O Tensor\")\n\nw_r = w.getRoot()\ni_h = i.getRoot()\no_q = o.getRoot()\n\nW = w_r.maxCoord() + 1\nI = i_h.maxCoord() + 1\nQ = I - W + 1\n\nw_r.print(f\"W Tensor - R rank - size={W}\")\ni_h.print(f\"I Tensor - H rank - size={I}\")\no_q.print(f\"O Tensor - Q rank - size={Q}\")\n\nprint(\"Convolution\")\n\nfor r, (w_val) in w_r:\n print(f\"Processing weight: ({r}, ({w_val}))\")\n for q, (o_q_ref, i_val) in o_q << i_h.project(lambda h: h-r, (0, Q)):\n print(f\" Processing output ({q}, ({o_q_ref}, {i_val})\")\n o_q_ref += w_val * i_val\n\no.print(\"\\nOutput Tensor\")\n\nprint(\"\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\n", "id": "7926414", "language": "Python", "matching_score": 4.694914817810059, "max_stars_count": 2, "path": "examples/scripts/conv-1d/conv-1d-ws.py" }, { "content": "import os\n\nfrom fibertree import Tensor\n\nprint(\"----------------------------------------\")\nprint(\" Convolution 1-D Input Stationary\")\nprint(\"----------------------------------------\")\nprint(\"\")\n\ndata_dir = \"../../data\"\n\nw = Tensor.fromYAMLfile(os.path.join(data_dir, \"conv-weights-a.yaml\"))\ni = Tensor.fromYAMLfile(os.path.join(data_dir, \"conv-activations-a.yaml\"))\no = Tensor(rank_ids=[\"Q\"])\n\nw.print(\"W Tensor\")\ni.print(\"I Tensor\")\no.print(\"O Tensor\")\n\nw_r = w.getRoot()\ni_h = i.getRoot()\no_q = o.getRoot()\n\nW = w_r.maxCoord() + 1\nI = i_h.maxCoord() + 1\nQ = I - W + 1\n\nw_r.print(\"W Tensor - R rank - size=%s\" % W)\ni_h.print(\"I Tensor - H rank - size=%s\" % I)\no_q.print(\"O Tensor - Q rank - size=%s\" % I)\n\nprint(\"Convolution\")\n\nfor h, (i_val) in i_h:\n print(f\"Processing input: ({h}, ({i_val}))\")\n for q, (o_q_ref, w_val) in o_q << w_r.project(lambda r: h-r, (0, Q)):\n print(f\" Processing output ({q}, ({o_q_ref}, {w_val})\")\n o_q_ref += w_val * i_val\n\no.print(\"\\nOutput Tensor\")\n\nprint(\"\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\n", "id": "2752136", "language": "Python", "matching_score": 2.2244958877563477, "max_stars_count": 2, "path": "examples/scripts/conv-1d/conv-1d-is.py" }, { "content": "\nimport os\n\nfrom fibertree import Tensor\n\n#\n# Merge-based matrix-vector multiply\n#\n\nprint(\"--------------------------------------\")\nprint(\" Merge-based spMspV\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\ndata_dir = \"../../data\"\n\na = Tensor.fromYAMLfile(os.path.join(data_dir, \"spMspV-a-t.yaml\"))\nb = Tensor.fromYAMLfile(os.path.join(data_dir, \"spMspV-b.yaml\"))\n\nz = Tensor(rank_ids=[\"M\"])\n\na.print(\"A Tensor\")\nb.print(\"B Tensor\")\nz.print(\"Z Tensor\")\n\na_k = a.getRoot()\nb_k = b.getRoot()\nz_m = z.getRoot()\n\na_k.print(\"A Tensor - Rank K\")\nb_k.print(\"B Tensor - Rank K\")\nz_m.print(\"Z Tensor - Rank M\")\n\nab = a_k & b_k\nab_m = ab.swapRanks()\n\nfor m_coord, (z_ref, ab_k) in z_m << ab_m:\n for k_coord, (a_val, b_val) in ab_k:\n z_ref += a_val * b_val # reducing a scalar\n\nz.print(\"\\nZ Tensor\")\n\nprint(\"\")\nprint(\"--------------------------------------\")\nprint(\"\")\n", "id": "12551638", "language": "Python", "matching_score": 4.776478290557861, "max_stars_count": 2, "path": "examples/scripts/spMspV/merge-based.py" }, { "content": "\nimport os\n\nfrom fibertree import Tensor\n\n#\n# B-stationary matrix-vector multiply\n# i.e., outer-product style\n# or A-stationary column-major style\n\nprint(\"--------------------------------------\")\nprint(\" B-stationary spMspV\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\ndata_dir = \"../../data\"\n\na = Tensor.fromYAMLfile(os.path.join(data_dir, \"spMspV-a-t.yaml\"))\nb = Tensor.fromYAMLfile(os.path.join(data_dir, \"spMspV-b.yaml\"))\n\nz = Tensor(rank_ids=[\"M\"])\n\na.print(\"A Tensor\")\nb.print(\"B Tensor\")\nz.print(\"Z Tensor\")\n\na_k = a.getRoot()\nb_k = b.getRoot()\nz_m = z.getRoot()\n\na_k.print(\"A Tensor - Rank K\")\nb_k.print(\"B Tensor - Rank K\")\nz_m.print(\"Z Tensor - Rank M\")\n\nfor k_coord, (a_m, b_val) in (a_k & b_k):\n for m_coord, (z_ref, a_val) in (z_m << a_m):\n z_ref += a_val * b_val # reducing a vector\n\nz.print(\"\\nZ Tensor\")\n\nprint(\"\")\nprint(\"--------------------------------------\")\nprint(\"\")\n", "id": "9807070", "language": "Python", "matching_score": 5.151057243347168, "max_stars_count": 2, "path": "examples/scripts/spMspV/b-stationary.py" }, { "content": "\nimport os\n\nfrom fibertree import Tensor\n\n#\n# C-stationary matrix-vector multiply\n# i.e., output-stationary or inner-product style\n# i.e., or A-stationary row major\n\nprint(\"--------------------------------------\")\nprint(\" C-stationary spMspV\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\ndata_dir = \"../../data\"\n\na = Tensor.fromYAMLfile(os.path.join(data_dir, \"spMspV-a.yaml\"))\nb = Tensor.fromYAMLfile(os.path.join(data_dir, \"spMspV-b.yaml\"))\n\nz = Tensor(rank_ids=[\"M\"])\n\na.print(\"A Tensor\")\nb.print(\"B Tensor\")\nz.print(\"Z Tensor\")\n\na_m = a.getRoot()\nb_k = b.getRoot()\nz_m = z.getRoot()\n\na_m.print(\"A Tensor - Rank M\")\nb_k.print(\"B Tensor - Rank K\")\nz_m.print(\"Z Tensor - Rank M\")\n\nfor m_coord, (z_ref, a_k) in (z_m << a_m):\n for k_coord, (a_val, b_val) in (a_k & b_k):\n z_ref += a_val * b_val # reducing a scalar\n\nz.print(\"\\nZ Tensor\")\n\nprint(\"\")\nprint(\"--------------------------------------\")\nprint(\"\")\n", "id": "11000757", "language": "Python", "matching_score": 3.5798845291137695, "max_stars_count": 2, "path": "examples/scripts/spMspV/c-stationary.py" }, { "content": "import os\n\nfrom fibertree import Tensor\n\n#\n# To do a dot-product we need a \"row\" for an output.\n# So we represent the vectors as 2-D tensors\n#\n\nprint(\"--------------------------------------\")\nprint(\"Dot product on two single row matrices\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\ndata_dir = \"../../data\"\n\na = Tensor.fromYAMLfile(os.path.join(data_dir, \"dot-product-a.yaml\"))\nb = Tensor.fromYAMLfile(os.path.join(data_dir, \"dot-product-b.yaml\"))\nz = Tensor(rank_ids=[\"M\"])\n\na.print(\"A Tensor\")\nb.print(\"B Tensor\")\nz.print(\"Z Tensor\")\n\na_m = a.getRoot()\nb_m = b.getRoot()\nz_m = z.getRoot()\n\na_m.print(\"A Tensor - Rank M\")\nb_m.print(\"B Tensor - Rank M\")\nz_m.print(\"Z Tensor - Rank M\")\n\nfor m_coord, (z_ref, (a_k, b_k)) in z_m << (a_m & b_m):\n for k_coord, (a_val, b_val) in a_k & b_k:\n print(f\"Processing: [{k_coord} -> ( {z_ref}, ({a_val}, {b_val})]\")\n\n z_ref += a_val * b_val\n\nz.print(\"\\nZ Tensor\")\n\nprint(\"\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\n", "id": "8571490", "language": "Python", "matching_score": 5.018126010894775, "max_stars_count": 2, "path": "examples/scripts/basic/dot-product.py" }, { "content": "import os\n\nfrom fibertree import Tensor\n\n#\n# Do a sum of sums of the rows of two matrices\n#\n\nprint(\"--------------------------------------\")\nprint(\" Sum of sums of matrix rows\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\ndata_dir = \"../../data\"\n\na = Tensor.fromYAMLfile(os.path.join(data_dir, \"dot-product-a.yaml\"))\nb = Tensor.fromYAMLfile(os.path.join(data_dir, \"dot-product-b.yaml\"))\n\nz = Tensor(rank_ids=[\"M\"])\n\na.print(\"A Tensor\")\nb.print(\"B Tensor\")\nz.print(\"Z Tensor\")\n\na_m = a.getRoot()\nb_m = b.getRoot()\nz_m = z.getRoot()\n\na_m.print(\"A Tensor - Rank M\")\nb_m.print(\"B Tensor - Rank M\")\nz_m.print(\"Z Tensor - Rank M\")\n\nfor m_coord, (z_ref, (op, a_k, b_k)) in z_m << (a_m | b_m):\n for k_coord, (op, a_val, b_val) in a_k | b_k:\n print(f\"Processing: [{k_coord} -> ( {z_ref}, ({op}, {a_val}, {b_val}]\")\n\n z_ref += a_val + b_val\n\nz.print(\"\\nZ Tensor\")\n\nprint(\"\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\n", "id": "4511278", "language": "Python", "matching_score": 3.688189744949341, "max_stars_count": 2, "path": "examples/scripts/basic/matrix-sum.py" }, { "content": "import os\n\nfrom fibertree import Tensor\n\nprint(\"--------------------------------------\")\nprint(\" Elementwise multiply\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\ndata_dir = \"../../data\"\n\na = Tensor.fromYAMLfile(os.path.join(data_dir, \"elementwise-a.yaml\"))\nb = Tensor.fromYAMLfile(os.path.join(data_dir, \"elementwise-b.yaml\"))\nz = Tensor(rank_ids=[\"M\"])\n\na.print(\"A Tensor\")\nb.print(\"B Tensor\")\nz.print(\"Z Tensor\")\n\na_m = a.getRoot()\nb_m = b.getRoot()\nz_m = z.getRoot()\n\na_m.print(\"A Tensor - M rank\")\nb_m.print(\"B Tensor - M rank\")\nz_m.print(\"Z Tensor - M rank\")\n\nprint(\"Z < A Fiber\")\n\nfor m_coord, (z_ref, (a_val, b_val)) in z_m << (a_m & b_m):\n print(f\"Processing: ({m_coord}, ({z_ref}, ({a_val}, {b_val})))\")\n\n\n z_ref += a_val * b_val\n\nz.print(\"\\nZ Tensor\")\n\nprint(\"\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\n", "id": "8072419", "language": "Python", "matching_score": 3.487504482269287, "max_stars_count": 2, "path": "examples/scripts/basic/elementwise-multiply.py" }, { "content": "import os\n\nfrom fibertree import Tensor\n\nprint(\"--------------------------------------\")\nprint(\" Elementwise copy\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\ndata_dir = \"../../data\"\n\na = Tensor.fromYAMLfile(os.path.join(data_dir, \"elementwise-a.yaml\"))\nz = Tensor(rank_ids=[\"M\"])\n\na.print(\"A Tensor\")\nz.print(\"Z Tensor\")\n\na_m = a.getRoot()\nz_m = z.getRoot()\n\na_m.print(\"A Tensor - Rank M\")\nz_m.print(\"Z Fiber - Rank M\")\n\nprint(\"Z < A Fiber\")\n\nfor m, (z_ref, a_val) in z_m << a_m:\n print(f\"Processing: ({m}, ({z_ref}, {a_val}))\")\n\n z_ref += a_val\n\nz.print(\"\\nZ Fiber - Rank M\")\n\nprint(\"\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\n", "id": "9353178", "language": "Python", "matching_score": 3.5437376499176025, "max_stars_count": 2, "path": "examples/scripts/basic/elementwise-copy.py" }, { "content": "import os\n\nfrom fibertree import Tensor\n\nprint(\"--------------------------------------\")\nprint(\" Matrix copy\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\ndata_dir = \"../../data\"\n\na = Tensor.fromYAMLfile(os.path.join(data_dir, \"matrix-a.yaml\"))\nz = Tensor(rank_ids=[\"M\", \"K\"])\n\na.print(\"A Tensor\")\nz.print(\"Z Tensor\")\n\na_m = a.getRoot()\nz_m = z.getRoot()\n\na_m.print(\"A Tensor - M rank\")\nz_m.print(\"Z Tensor - M rank\")\n\nfor m, (z_k, a_k) in z_m << a_m:\n print(f\"Processing: Coord: {m}\")\n print(f\" z_k: {z_k}\")\n print(f\" a_k: {a_k}\")\n \n for k, (z_ref, a_val) in z_k << a_k:\n z_ref += a_val\n\nprint(\"\")\na.print(\"A Tensor\")\nz.print(\"Z Tensor\")\n\nprint(\"\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\n", "id": "2883227", "language": "Python", "matching_score": 3.103578567504883, "max_stars_count": 2, "path": "examples/scripts/basic/matrix-copy.py" }, { "content": "import os\n\nfrom fibertree import Tensor\n\nprint(\"--------------------------------------\")\nprint(\" Matrix Traverse\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\ndata_dir = \"../../data\"\n\na = Tensor.fromYAMLfile(os.path.join(data_dir, \"matrix-a.yaml\"))\n\na.print(\"Matrix\")\n\na_m = a.getRoot()\n\na_m.print(\"Matrix - M Fiber\")\n\nfor m, (a_k) in a_m:\n print(f\"({m}, {a_k})\")\n for k, (a_val) in a_k:\n print(f\"Processing: ({k}, {a_val})\")\n\nprint(\"\")\nprint(\"--------------------------------------\")\nprint(\"\")\n\n", "id": "5929958", "language": "Python", "matching_score": 0.2227470725774765, "max_stars_count": 2, "path": "examples/scripts/basic/matrix-traverse.py" }, { "content": "#!/usr/bin/python\n\nclass NilNode(object):\n \"\"\"\n The nil class is specifically for balancing a tree by giving all traditional leaf noes tw children that are null\n and waiting to be filled\n \"\"\"\n def __init__(self):\n self.red = False\n self.size = 0\n self.data = None\n\nNIL = NilNode() # Nil is the sentinel value for nodes\n\n\nclass RBNode(object):\n \"\"\"\n Class for implementing the nodes that the tree will use\n For self.red:\n red == True\n black == False\n If the node is a leaf it will either\n \"\"\"\n def __init__(self,data):\n self.red = True\n self.parent = None\n self.data = data\n self.left = NIL\n self.right = NIL\n self.size = 1\n\n # TODO: current leaving out the bit that says whether you are red or black\n def getSize(self):\n base = 3 # parent, left, right\n data_size = len(self.data) - 1 \n return base + data_size\nclass RedBlackTree(object):\n \"\"\"\n Class for implementing a standard red-black tree\n \"\"\"\n def __init__(self):\n self.root = None\n self.size = 0\n self.lastNodeAccessed = None\n\n # add a new node with data \n # also return number of reads, writes\n def add(self,data,cache=None,name=None,curr = None):\n \"\"\"\n :param data: an int, float, or any other comparable value\n :param curr:\n \"\"\"\n # print(\"\\t add to RB, {}\".format(data))\n self.size += 1\n new_node = RBNode(data)\n # Base Case - Nothing in the tree\n if self.root == None:\n new_node.red = True\n self.root = new_node\n return 1, 1, new_node\n \n # Search to find the node's correct place\n currentNode = self.root\n num_reads = 0\n num_writes = 0\n current_dram = None\n if cache is not None:\n current_dram = cache.miss_count\n while currentNode != NIL:\n # increment size along root-to-leaf path\n currentNode.size += 1\n # print(\"current node data {}, size {}\".format(currentNode.data, currentNode.size))\n potentialParent = currentNode\n num_reads += 1\n if cache is not None:\n key = name + '_coordToHandle_' + str(currentNode.data[0])\n res = cache.get(key) # check if its in the cache during the search\n print(\"\\tin add, key {}, res {}\".format(key, res))\n if new_node.data[0] == currentNode.data[0]:\n # go back up the tree and fix sizes\n temp = currentNode\n while temp is not None and temp != NIL:\n temp.size -= 1\n temp = temp.parent\n print(\"\\t\\tfound {}\".format(new_node.data[0]))\n return num_reads, 0, currentNode\n if new_node.data[0] < currentNode.data[0]:\n currentNode = currentNode.left\n else:\n currentNode = currentNode.right\n\n # Assign parents and siblings to the new node\n new_node.parent = potentialParent\n if cache is not None:\n key = name + '_coordToHandle_' + str(new_node.parent.data[0]) \n res = cache.get(key) # check if its in the cache during the search\n print(\"\\tin add, key {}, res {}\".format(key, res))\n\n if new_node.data[0] < new_node.parent.data[0]:\n new_node.parent.left = new_node\n else:\n new_node.parent.right = new_node\n if cache is not None:\n key = name + '_coordToHandle_' + str(data)\n res = cache.get(key)\n cache[key] = new_node\n assert(res == None)\n \n # TODO: get num writes from fix tree after add\n num_writes = self.fix_tree_after_add(new_node,cache,name)\n num_writes += 1\n # print(\"\\tinsert {}, reads {}, writes {}\".format(data, num_reads, num_writes))\n assert(self.root.red == False)\n if cache is not None:\n assert cache.miss_count != current_dram\n return num_reads, num_writes, new_node # return handle to this node that was just added\n\n # search on coord and return the node that contains the elt\n # TODO: make sure that this is ok if coord is not present\n def contains(self,data, curr=None):\n \"\"\"\n :return:\n \"\"\"\n if curr == None:\n curr = self.root\n prev_parent = None\n while curr != NIL and data != curr.data[0]:\n # print(\"searching for {}, curr {}\".format(data, curr.data[0]))\n prev_parent = curr\n if data < curr.data[0]:\n\n curr = curr.left\n else:\n curr = curr.right\n if isinstance(curr, NilNode):\n curr = prev_parent\n return curr\n\n def getRank(self,data):\n \"\"\"\n\n :return:\n \"\"\"\n curr = self.root\n result = 0\n prev_added = 0\n print(\"\\tin getRank of {}, curr {}\".format(data,curr.data))\n while curr != NIL and data != curr.data[0]:\n printf(\"find rank of {}, curr {}, curr size {}\".format(data, curr.data[0], curr.size))\n if data < curr.data[0]:\n result -= curr.size\n curr = curr.left\n else:\n result += curr.size\n prev_added = curr.size\n curr = curr.right\n\n print(\"\\trank {}\".format(result))\n return result\n\n\n def fix_tree_after_add(self,new_node,cache=None,name=None):\n \"\"\"\n This method is meant to check and rebalnce a tree back to satisfying all of the red-black properties\n :return: num additional reads / writes (assume we have new_node and new_node.parent)\n modifies tree\n \"\"\"\n # print(\"new_node parent {}\".format(new_node.parent.red))\n # print(\"new node data {}, parent {}, root data {}\".format(new_node.data, new_node.parent.data, self.root.data))\n # print(\"\\tin fix tree after add\")\n num_writes = 0\n while new_node != self.root and new_node.parent.red == True and new_node.parent.parent is not None:\n if cache is not None:\n key1 = name + \"_coordToHandle_\" + str(new_node.parent.data[0])\n cache.get(key1)\n key2 = name + \"_coordToHandle_\" + str(new_node.parent.parent.data[0])\n cache.get(key2)\n\n # if you are in the left subtree\n if new_node.parent == new_node.parent.parent.left:\n uncle = new_node.parent.parent.right\n if cache is not None:\n key = name + \"_coordToHandle_\" + str(uncle.data[0])\n cache.get(key)\n if uncle.red:\n # This is Case 1\n new_node.parent.red = False\n uncle.red = False\n new_node.parent.parent.red = True\n new_node = new_node.parent.parent\n num_writes += 4\n print(\"\\t\\tcase 1\")\n else:\n if new_node == new_node.parent.right:\n # This is Case 2\n new_node = new_node.parent\n self.left_rotate(new_node)\n # print(\"\\t\\tcase 2\")\n # This is Case 3\n new_node.parent.red = False\n new_node.parent.parent.red = True\n self.right_rotate(new_node.parent.parent)\n num_writes += 3\n # print(\"\\t\\tcase 3\")\n else:\n uncle = new_node.parent.parent.left\n if cache is not None and uncle.data is not None:\n key = name + \"_coordToHandle_\" + str(uncle.data[0])\n cache.get(key)\n if uncle.red:\n # Case 1\n new_node.parent.red = False\n uncle.red = False\n new_node.parent.parent.red = True\n new_node = new_node.parent.parent\n num_writes += 4\n # print(\"\\t\\tcase 1b\")\n else:\n if new_node == new_node.parent.left:\n # Case 2\n new_node = new_node.parent\n # print(\"second right rotate\")\n self.right_rotate(new_node)\n # print(\"\\t\\tcase 2b\")\n # Case 3\n new_node.parent.red = False\n new_node.parent.parent.red = True\n # left rotate writes to input and one other node\n self.left_rotate(new_node.parent.parent)\n num_writes += 3\n \n # print(\"\\t\\tcase 3b\")\n # print(\"new node {}\".format(new_node.data))\n self.root.red = False\n return num_writes\n\n def delete(self):\n \"\"\"\n\n :return:\n \"\"\"\n pass\n def left_rotate(self,new_node):\n \"\"\"\n\n :return:\n \"\"\"\n # print(\"Rotating left!\")\n sibling = new_node.right\n new_node.right = sibling.left\n\n # print(\"new node {}, sibling {}\".format(new_node.data, sibling.data))\n # Turn sibling's left subtree into node's right subtree\n if sibling.left is not None:\n sibling.left.parent = new_node\n sibling.parent = new_node.parent\n if new_node.parent == None:\n self.root = sibling\n else:\n if new_node == new_node.parent.left:\n new_node.parent.left = sibling\n else:\n new_node.parent.right = sibling\n # from clrs\n sibling.size = new_node.size\n new_node.size = new_node.left.size + new_node.right.size + 1\n\n sibling.left = new_node\n new_node.parent = sibling\n\n\n def right_rotate(self,new_node):\n \"\"\"\n\n :return:\n \"\"\"\n # print(\"Rotating right!\")\n sibling = new_node.left\n new_node.left = sibling.right\n # print(\"new_node data {}, sibling {}\".format(new_node.data, sibling.data))\n self.inorder(new_node.parent)\n print(\"\\n\")\n # Turn sibling's left subtree into node's right subtree\n if sibling.right is not None:\n sibling.right.parent = new_node\n sibling.parent = new_node.parent\n if new_node.parent == None:\n self.root = sibling\n else:\n if new_node == new_node.parent.right:\n new_node.parent.right = sibling\n else:\n new_node.parent.left = sibling\n sibling.right = new_node\n new_node.parent = sibling\n self.inorder(new_node.parent)\n # from clrs\n new_node.size = sibling.size\n sibling.size = sibling.left.size + sibling.right.size + 1\n\n def inorder(self, root):\n # Base Case - Nothing in the tree\n if root == None or root == NIL:\n print(\"NIL\")\n return\n \n print(\"data {}, size {}, color {}\".format(root.data, root.size, root.red))\n self.inorder(root.left)\n self.inorder(root.right)\n return\n\n def get_all_nodes(self):\n \"\"\"\n\n :return:\n \"\"\"\n pass\n def is_red(self):\n \"\"\"\n This is the class that usually decides that a node is wither red or black, some implementations take the ecurrtra\n bit and will implement an is_black method for additional clarity.\n Generally, True == Red and False == Black\n\n :return:\n \"\"\"\n return self.root is not None and self.root.red == 1;\n def is_black(self):\n \"\"\"\n Note that this method is not necessary as some implementations only check is the is_red class method is True or False\n :return:\n True if the node is black or is a leaf\n \"\"\"\n return self.root is not None and self.root.black == 1;\n\n # min-val is down the left spine if it exists\n def min_val(self, root, cache, name):\n p = root\n if p == NIL:\n return p\n node = root.left\n num_reads = 0\n\n key = name + \"_coordToHandle_\" + str(p.data[0])\n res = cache.get(key)\n cache[key] = p\n while node != NIL:\n key = name + \"_coordToHandle_\" + str(node.data[0])\n res = cache.get(key)\n cache[key] = node\n\n p = node\n node = node.left\n num_reads += 1\n # print(\"\\tnum reads in min_val {}, current node {}\".format(num_reads, node.left))\n return num_reads, p\n\n # given a node, find its successor \n def get_successor(self, root, cache, name):\n # if successor is in the right subtree\n if root == None or root == NIL:\n return 0, None\n # if there is a right subtree, find the min value in it\n if root.right is not None and root.right != NIL:\n # print(\"\\t\\t going right\")\n return self.min_val(root.right, cache, name)\n \n # else successor is higher up in the tree\n p = root.parent\n num_reads = 0\n while p is not None and p != NIL:\n if root != p.right:\n break\n root = p\n p = p.parent\n if p is not None and p != NIL:\n # look for the node in the cache\n key = name + \"_coordToHandle_\" + str(p.data[0])\n res = cache.get(key)\n cache[key] = p\n num_reads += 1\n # print(\"\\tget_successor: num reads going up tree {}, node {}\".format(num_reads, p))\n return num_reads, p\n\nif __name__ == \"__main__\":\n tree = RedBlackTree()\n tree.add(1)\n # print(tree.root)\n tree.inorder(tree.root)\n tree.add(3)\n tree.inorder(tree.root)\n tree.add(4)\n tree.inorder(tree.root)\n tree.add(8)\n tree.inorder(tree.root)\n tree.add(5)\n # tree.add(6)\n # tree.add(2)\n tree.inorder(tree.root)\n", "id": "6259910", "language": "Python", "matching_score": 3.1989874839782715, "max_stars_count": 2, "path": "fibertree/codec/formats/redBlack.py" }, { "content": "from .compression_format import CompressionFormat\nfrom .redBlack import *\n\nclass RBTree(CompressionFormat):\n def __init__(self):\n CompressionFormat.__init__(self)\n\n self.curHandle = None\n \n # given a node as input, compute the height of that node\n @staticmethod \n def getHeight(root):\n if root == None or root == NIL: return 0\n left_height = RBTree.getHeight(root.left)\n right_height = RBTree.getHeight(root.right)\n if left_height > right_height:\n return left_height + 1\n else: return right_height + 1\n\n # TODO: full binary tree serialization\n # preorder serializiation\n @staticmethod\n def serializeTree(root, output, depth, ind, empty, height):\n if depth == height:\n # if root == None: \n return\n # otherwise, depth < height\n if root == NIL:\n output.append(empty)\n RBTree.serializeTree(NIL, output, depth + 1, ind, empty, height)\n RBTree.serializeTree(NIL, output, depth + 1, ind, empty, height)\n return\n \n # write data at node into a string\n strout = ''\n if isinstance(root.data, int):\n # strout = str(root.data)\n output.append(root.data)\n else:\n # strout = str(root.data[ind])\n # strout = ','.join(str(v) for v in root.data)\n if isinstance(root.data[ind], tuple):\n strout = \"({})\".format(','.join(str(v) for v in root.data[ind]))\n output.append(strout)\n else:\n output.append(root.data[ind])\n \n RBTree.serializeTree(root.left, output, depth + 1, ind, empty, height)\n RBTree.serializeTree(root.right, output, depth + 1, ind, empty, height)\n\n\n # TODO: merge with getPayloads?\n # preorder serializiation\n @staticmethod\n def treeToString(root, depth, height, strout):\n # otherwise, depth < height\n if root == NIL:\n return\n # return\n # write data at node into a string\n \n RBTree.treeToString(root.left, depth + 1, height, strout)\n strout.append(root.data)\n RBTree.treeToString(root.right, depth + 1, height, strout)\n return\n\n # populate output with payloads\n def getPayloadsHelper(self, node, depth, height, output):\n if node == None or node == NIL:\n return\n self.getPayloadsHelper(node.left, depth +1, height, output)\n output.append(node.data[1])\n self.getPayloadsHelper(node.right, depth + 1, height, output)\n return\n\n # inorder traversal of the tree to serialize payloads\n def getPayloads(self):\n output = list()\n\n height = RBTree.getHeight(self.tree.root)\n self.getPayloadsHelper(self.tree.root, 0, height, output)\n return output\n\n # encode fiber into T format\n def encodeFiber(self, a, dim_len, codec, depth, ranks, output, output_tensor, shape=None):\n # print(\"encode fiber for T\")\n # import codec\n from ..tensor_codec import Codec\n coords_key, payloads_key = codec.get_keys(ranks, depth)\n\n self.tree = RedBlackTree()\n\n # init vars\n fiber_occupancy = 0\n\n cumulative_occupancy = codec.get_start_occ(depth)\n occ_list = list()\n occ_list.append(cumulative_occupancy)\n prev_nz = 0\n \n # for each nonzero in fiber\n for ind, (val) in a:\n # internal levels encode explicit coords and corresponding offset / fiber ptr\n if depth < len(ranks) - 1:\n # keep track of actual occupancy (nnz in this fiber)\n fiber, child_occupancy = codec.encode(depth + 1, val, ranks, output, output_tensor, shape)\n \n if isinstance(cumulative_occupancy, int):\n cumulative_occupancy = cumulative_occupancy + child_occupancy\n else:\n cumulative_occupancy = [a + b for a, b in zip(cumulative_occupancy, child_occupancy)]\n # encode (coord, payload)\n if codec.fmts[depth + 1].encodeUpperPayload():\n if codec.cumulative_payloads[depth]:\n self.tree.add((ind, cumulative_occupancy, ind))\n else:\n self.tree.add((ind, child_occupancy, ind)) \n else: # if a leaf, encode (coord, value)\n self.tree.add(ind, ind)\n else:\n self.tree.add((ind, val.value))\n \n # search for it in the tree for verification\n # assert ind == self.tree.contains(ind).data[0]\n\n fiber_occupancy = fiber_occupancy + 1\n \n # serialize tree\n # null value for empty nodes\n empty = -1\n\n tree = self.tree\n result = list()\n height = RBTree.getHeight(tree.root)\n size_of_tree = 2**height - 1\n \n # serialize only coords\n if tree.root == None or isinstance(tree.root.data, int):\n RBTree.serializeTree(tree.root, result, 0, 0, empty, height)\n assert len(result) == size_of_tree\n # add to coords list\n output[coords_key].extend(result)\n else: # struct of arrays in yaml, write two serializations\n RBTree.serializeTree(tree.root, result, 0, 0, empty, height)\n # add to coords list\n output[coords_key].extend(result)\n\n # payloads\n result = list()\n\n RBTree.serializeTree(tree.root, result, 0, 1, empty, height)\n assert len(result) == size_of_tree\n \n # add to coords list\n output[payloads_key].extend(result)\n # explicit payloads for next level\n # return size of (serialized) tree representation\n return len(result)\n\n # get handle to the corresponding node\n def coordToHandle(self, coord):\n if self.tree.root == None:\n return None\n return self.tree.contains(coord)\n\n # slice on coordinates\n def setupSlice(self, base = 0, bound = None, max_num = None):\n self.num_ret_so_far = 0\n self.num_to_ret = max_num\n self.base = base\n self.bound = bound\n res = self.coordToHandle(base)\n if res != None and not isinstance(res, NilNode):\n key = self.name + \"_coordToHandle_\" + str(res.data[0])\n # map coord to node\n self.cache.get(key)\n self.cache[key] = res\n self.curHandle = res.data[0]\n \n # iterator\n def nextInSlice(self):\n # self.printFiber()\n if self.num_to_ret != None and self.num_to_ret < self.num_ret_so_far:\n return None\n \n to_ret = self.curHandle # keep track of current handle\n self.num_ret_so_far += 1\n \n if to_ret == None or isinstance(to_ret, NilNode):\n return None\n\n key = self.name + \"_coordToHandle_\" + str(self.curHandle)\n print(key)\n node_at_cur_handle = self.cache.get(key)\n assert(node_at_cur_handle != None)\n # if you know where you are in the tree, you know where the successor is \n # without having to read if you have to look right\n num_reads, node_at_next_handle = self.tree.get_successor(node_at_cur_handle, self.cache, self.name)\n\n if node_at_next_handle == None:\n self.curHandle = None\n \n elif not isinstance(node_at_next_handle, NilNode):\n self.curHandle = node_at_next_handle.data[0]\n key = self.name + \"_coordToHandle_\" + str(self.curHandle)\n self.cache.get(key)\n self.cache[key] = node_at_next_handle\n print(\"\\t{} nextInSlice, current handle {}, to ret {}\".format(self.name, self.curHandle, to_ret))\n if self.curHandle != None and to_ret != None:\n assert self.curHandle != to_ret # make sure you advance\n # self.printFiber()\n\n return to_ret\n \n # handle to coord takes in a handle which is a node\n def handleToCoord(self, handle):\n if handle == None:\n return None\n print(\"\\t\\tin tree {} handleToCoord: handle {}, curHandle {}\".format(self.name, handle, self.curHandle))\n\n self.stats[self.coords_read_key] += 1\n return handle\n\n # given a handle (tree node ptr), update the payload there\n def handleToPayload(self, handle):\n if handle == None:\n return None\n if self.count_payload_reads:\n self.stats[self.payloads_read_key] += 1\n key = self.name + \"_coordToHandle_\" + str(handle)\n self.cache.get(key)\n \n node = self.cache[key]\n assert(node != None)\n # print(\"{} handleToPayload: node {}, handle {}\".format(self.name, node, handle))\n return node.data[-1]\n \n def payloadToValue(self, payload):\n # print(\"\\t{}: payloadToValue in T, payloads {}, payload {}\".format(self.name, self.getPayloads(), payload))\n return payload\n\n def payloadToFiberHandle(self, handle):\n # print(\"\\tpayload to fiber handle in T, ret {}\".format(handle))\n return handle\n\n # return handle to inserted elt\n # make the handle the coord\n def insertElement(self, coord):\n if coord == None:\n return None\n # print(\"{} insertElement {}\".format(self.name, coord))\n assert self.cache is not None\n num_reads, num_writes, handle = self.tree.add([coord, 0], cache=self.cache, name=self.name)\n \n # handle must be something that can index into a list, we want the i-th\n assert isinstance(handle, RBNode)\n self.stats[self.coords_read_key] += num_reads\n self.stats[self.coords_write_key] += num_writes\n\n # handle needs to be indexable\n key = self.name + \"_coordToHandle_\" + str(coord)\n self.cache.get(key)\n self.cache[key] = handle\n print(\"{} tree insertElt {}, misses {}\".format(self.name, coord, self.cache.miss_count))\n print(self.cache)\n return coord # self.curHandle\n \n # return a handle to the updated payload\n def updatePayload(self, handle, payload):\n print(\"{} updatePayload: handle {}, payload {}\".format(self.name, handle, payload))\n if handle == None or handle == NIL:\n return None\n # print(\"update payload:: handle {}, payload {}\".format(handle, payload))\n # assert handle is self.curHandle\n key = self.name + \"_coordToHandle_\" + str(handle)\n node_at_handle = self.cache.get(key)\n if node_at_handle != None:\n assert node_at_handle.data[0] == handle\n node_at_handle.data[1] = payload\n \n self.stats[self.payloads_write_key] += 1\n return handle\n\n # updated fiber handle returns (size of tree, internal fiber object)\n def getUpdatedFiberHandle(self):\n return self.getSize()\n\n def printFiber(self):\n output = list()\n RBTree.treeToString(self.tree.root, 0, RBTree.getHeight(self.tree.root), output)\n print(\"{} :: {}\".format(self.name, output))\n\n # get size of the binary tree representation\n def getSize(self):\n height = RBTree.getHeight(self.tree.root)\n num_nodes = 2**height -1 \n node_size = 0\n if self.tree.root != None:\n self.tree.root.getSize()\n \n # print(\"tree get size, height {}, num nodes {}, node size {}\".format(height, num_nodes, node_size))\n return num_nodes * node_size\n # encode coord explicitly\n @staticmethod\n def encodeCoord(prev_ind, ind):\n return [ind]\n\n @staticmethod\n def encodePayload(prev_ind, ind, payload):\n return [payload]\n\n # explicit coords\n @staticmethod\n def encodeCoords():\n return True\n\n # explicit prev payloads\n @staticmethod\n def encodeUpperPayload():\n return True\n", "id": "11567114", "language": "Python", "matching_score": 5.544440746307373, "max_stars_count": 2, "path": "fibertree/codec/formats/balanced_tree.py" }, { "content": "\"\"\"\nCompressionFormat class - can be instantiated to represent a fiber\nmostly just here to be inherited\n\"\"\"\nimport sys\nclass CompressionFormat:\n def __init__(self, name = None):\n self.coords = list()\n self.payloads = list()\n self.occupancies = list()\n self.cur_handle = -1\n self.idx_in_rank = None\n self.shape = None\n self.words_in_line = 4\n\n # stats \n self.stats = dict()\n self.coords_write_key = \"num_coords_writes\"\n self.stats[self.coords_write_key] = 0\n self.payloads_write_key = \"num_payloads_writes\"\n self.stats[self.payloads_write_key] = 0\n self.coords_read_key = \"num_coords_reads\"\n self.stats[self.coords_read_key] = 0\n self.payloads_read_key = \"num_payloads_reads\"\n self.stats[self.payloads_read_key] = 0\n self.count_payload_reads = True\n self.count_payload_writes = True \n\n self.cache = None\n self.next_fmt = None \n \n # API Methods\n def payloadToFiberHandle(self, payload):\n print(\"\\t{} payloadToFiberHandle:: ret {}\".format(self.name, payload))\n return payload\n\n # default payload to value\n def payloadToValue(self, payload):\n print(\"\\t{}: payloadToValue, payload {}, len payloads {}\".format(self.name, payload, len(self.payloads)))\n # self.printFiber()\n if payload >= len(self.payloads):\n return None\n self.stats[self.payloads_read_key] += 1\n \n print(\"DRAM {} payloadToValue {}, miss count before {}\".format(self.name, payload, self.cache.miss_count))\n # TODO: cache line here\n # key = self.name + \"_payloadToValue_\" + str(payload)\n key = self.name + \"_handleToPayload_\" + str(payload)\n cached_val = self.cache.get(key) # try to access it\n self.cache[key] = self.payloads[payload] # put it in the cache \n print(\"DRAM {} payloadToValue {}, miss count after {}\".format(self.name, payload, self.cache.miss_count))\n print(self.cache)\n\n # read in the cache line\n end_of_range = self.round_up(max(1, payload), self.words_in_line)\n # end_of_range = min(end_of_line, len(self.payloads)) \n for i in range(payload, end_of_range):\n key = self.name + \"_handleToPayload_\" + str(i)\n if i < len(self.payloads):\n self.cache[key] = self.payloads[i]\n else:\n self.cache[key] = 0 # end of cache line, so read it as empty\n return self.payloads[payload]\n # helpers\n # have to overwrite this in subclasses, depends on the format\n def getSliceMaxLength(self):\n return None\n\n def setName(self, name):\n self.name = name\n\n def round_up(self, n, multiple):\n if n % multiple == 0:\n n += 1\n return ((n + multiple - 1) // multiple) * multiple\n # main functions\n # given a handle, return a coord at that handle\n # if handle is out of range, return None\n def handleToCoord(self, handle):\n # print(\"\\t{} handleToCoord: handle {}, coords {}\".format(self.name, handle, self.coords))\n if handle == None or handle >= len(self.coords):\n return None\n\t\n key = self.name + \"_handleToCoord_\" + str(handle)\n cached_val = self.cache.get(key)\n self.cache[key] = self.coords[handle]\n # read in a line\n end_of_line = self.round_up(handle, self.words_in_line)\n print(\"\\thandle {}, end of line {}\".format(handle, end_of_line))\n end_of_range = min(end_of_line, len(self.coords))\n for i in range(handle, end_of_range):\n key = self.name + \"_handleToCoord_\" + str(i)\n self.cache[key] = self.coords[i]\n print(\"\\t\\t{}, misses {}\".format(key, self.cache.miss_count)) \n print(self.cache)\n # coords read charge\n self.stats[self.coords_read_key] += 1\n \n return self.coords[handle]\n\n # given a handle, return payload there if in range, otherwise None\n def handleToPayload(self, handle):\n if handle == None or handle >= len(self.payloads):\n return None\n # do stats counting in handleToPayload because it later can go to\n # -> payloadToValue\n # -> payloadToFiberHandle\n if self.count_payload_reads:\n self.stats[self.payloads_read_key] += 1\n print(\"\\t{} handleToPayload {}\".format(self.name, handle))\n return handle # switch to just passing around the ptr\n\n # slice on coordinates\n def setupSlice(self, base = 0, bound = None, max_num = None):\n self.num_ret_so_far = 0\n self.num_to_ret = max_num\n self.base = base\n self.bound = bound\n # print(\"setupSlice for {}, base = {}, bound = {}, max_num = {}\".format(self.name, base, bound, max_num))\n self.coords_handle = self.coordToHandle(base)\n # self.printFiber()\n \n # get next handle during iteration through slice\n def nextInSlice(self):\n # print(\"\\t{} in next: handle {}, slice max {}, num to ret {}, ret so far {}\".format(self.name, self.coords_handle, self.getSliceMaxLength(), self.num_to_ret, self.num_ret_so_far))\n if self.coords_handle == None or self.coords_handle >= self.getSliceMaxLength():\n return None\n if self.num_to_ret != None and self.num_to_ret < self.num_ret_so_far:\n return None\n # for formats that don't need to touch memory to get next\n to_ret = self.coords_handle\n self.num_ret_so_far += 1\n self.coords_handle += 1\n # print(\"\\t\\thandle to ret: {}\".format(to_ret))\n # don't need to increment accesses for moving the handle forward\n return to_ret\n\n # these need to be filled in in subclasses\n # TODO: python syntax to require that you have to fill this in or assert(false)\n def coordToHandle(self, coord):\n assert(False)\n\n def insertElement(self, coord):\n assert(False)\n\n def updatePayload(self, handle, payload):\n return handle\n\n def getUpdatedFiberHandle(self):\n return 0 # TODO: make this an actual indexable fiber handle to you\n # return self\n\n def getPayloads(self):\n return self.payloads\n\n # get size of the representation in words\n # needs to be implemented by subclasses\n def getSize(self):\n assert(False)\n\n # at the end of execution, dump stats in YAML\n # add to the stats dict\n def dumpStats(self, stats_dict):\n self.stats[\"size\"] = self.getSize() \n # print(\"dump stats {}\".format(self.name))\n stats_dict[self.name] = self.stats\n\n def getSize(self):\n assert(False)\n #### class methods\n # e.g. U, C\n @staticmethod \n def getName(self):\n return self.name\n\n @staticmethod \n # current_fiber = HFA fiber\n def encodeCoord(prev_ind, ind):\n return []\n\n # coord\n @staticmethod\n def encodePayload(prev_ind, ind, payload):\n return None\n\n # pad end of coordinates if necessary\n @staticmethod\n def endCoords(num_to_pad):\n return [] \n\n # pad end of payloads if necessary\n @staticmethod\n def endPayloads(num_to_pad):\n return []\n\n @staticmethod\n def startOccupancy():\n return 0\n\n # todo: maybe eventually combine the encode and decode like serialization\n", "id": "5505050", "language": "Python", "matching_score": 3.62304949760437, "max_stars_count": 2, "path": "fibertree/codec/formats/compression_format.py" }, { "content": "from .compression_format import CompressionFormat\nimport sys \n# hash table per fiber\n\nclass HashTable(CompressionFormat):\n def __init__(self):\n self.name = \"Hf\"\n # if the hashtable length is fixed, don't need to write it as a payload\n CompressionFormat.__init__(self)\n self.hashtable_len = 8\n self.max_density = .8\n self.ht = [None] * self.hashtable_len\n self.ptrs = list()\n self.coords = list()\n self.payloads = list()\n \n self.ht_read_key = \"num_ht_reads\"\n self.ht_write_key = \"num_ht_writes\"\n self.ptrs_read_key = \"num_ptrs_reads\"\n self.ptrs_write_key = \"num_ptrs_writes\"\n self.stats[self.ht_read_key] = 0 \n self.stats[self.ht_write_key] = 0\n self.stats[self.ptrs_read_key] = 0\n self.stats[self.ptrs_write_key] = 0\n\n @staticmethod\n def helper_add(output, key, to_add):\n if key in output:\n output[key].extend(to_add)\n else:\n output[key] = to_add\n\n # encode fiber in H format\n def encodeFiber(self, a, dim_len, codec, depth, ranks, output, output_tensor, shape=None):\n # import codec\n from ..tensor_codec import Codec\n coords_key, payloads_key = codec.get_keys(ranks, depth) \n \n # init vars\n fiber_occupancy = 0\n cumulative_occupancy = 0\n if depth < len(ranks) - 1 and codec.format_descriptor[depth + 1] == \"Hf\":\n cumulative_occupancy = (0, 0)\n occ_list = list()\n num_coords = len(a.getCoords())\n\n # init scratchpads\n # TODO: doubling\n # encode nonzeroes\n for ind, (val) in a:\n payload_to_add = None\n # add to payloads\n # if at the leaves, add the actual payloads\n if depth == len(ranks) - 1:\n payload_to_add = val.value\n # if in internal levels, also get the fiber\n else: \n fiber, child_occupancy = codec.encode(depth + 1, val, ranks, output, output_tensor)\n \n if isinstance(cumulative_occupancy, int):\n cumulative_occupancy = cumulative_occupancy + child_occupancy\n else:\n cumulative_occupancy = [a + b for a, b in zip(cumulative_occupancy, child_occupancy)]\n \n if codec.fmts[depth + 1].encodeUpperPayload():\n payload_to_add = cumulative_occupancy\n else:\n payload_to_add = fiber_occupancy\n\n # add to HT\n self.insertElement(ind, payload=payload_to_add, count_stats=False)\n\n fiber_occupancy = fiber_occupancy + 1\n\n coords_key, payloads_key = codec.get_keys(ranks, depth)\n\n output[coords_key].extend(self.coords)\n output[payloads_key].extend(self.payloads)\n\n # linearize output dict\n # coords in the format of two lists: \n # 1. like the segment table in CSR, that points to the start of what was in that bucket\n # 2. linearization of buckets in contiguous order\n\n total_size = self.hashtable_len + len(self.ptrs) + len(self.coords) + len(self.payloads)\n return [fiber_occupancy, self.hashtable_len]\n\n # TODO: fillin\n def getSize(self):\n return self.hashtable_len + len(self.ptrs) + len(self.coords) + len(self.payloads)\n\n def printFiber(self):\n print(\"{} :: ht: {}, ptrs {}, coords {}, payloads {}\".format(self.name, self.ht, self.ptrs, self.coords, self.payloads))\n\n # swoop API functions\n # given a coord, give a handle (same for coords, payloads) to it\n # TODO: if coord doesn't exist, return None? return next?\n def coordToHandle(self, coord):\n # encode coord\n hash_key = self.get_hash_key(coord)\n bin_head = self.ht[hash_key]\n\n # assert bin_head != None\n # look for cached\n key = self.name + '_HT_' + str(hash_key)\n cached_val = self.cache.get(key)\n self.cache[key] = bin_head\n self.stats[self.ht_read_key] += 1\n print(\"\\t{} coordToHandle: coord {}, hash_key {}\".format(self.name, coord, hash_key))\n # search this bucket\n while bin_head != None:\n self.stats[self.coords_read_key] += 1 \n # print(\"\\tbin head {}\".format(bin_head))\n key = self.name + '_IdxToCoords_' + str(bin_head)\n cached_val = self.cache.get(key)\n self.cache[key] = self.coords[bin_head]\n\n # if found coord, return the pointer to it\n if self.coords[bin_head] == coord:\n return bin_head\n # advance pointer in bucket\n\n key = self.name + '_IdxToPtrs_' + str(bin_head)\n cached_val = self.cache.get(key)\n self.cache[key] = self.ptrs[bin_head]\n self.stats[self.ptrs_read_key] += 1\n\n bin_head = self.ptrs[bin_head]\n return None # not found\n \n # swoop API functions\n # given a coord, give a handle (same for coords, payloads) to it\n # TODO: if coord doesn't exist, return None? return next?\n def coordToHandleNoStats(self, coord):\n # encode coord\n hash_key = self.get_hash_key(coord)\n bin_head = self.ht[hash_key]\n\n assert bin_head != None\n # look for cached\n print(\"\\t{} coordToHandle: coord {}, hash_key {}\".format(self.name, coord, hash_key))\n # search this bucket\n while bin_head != None:\n if self.coords[bin_head] == coord:\n return bin_head\n # advance pointer in bucket\n bin_head = self.ptrs[bin_head]\n return None # not found\n \n # must return elts in sorted order on coord\n def setupSlice(self, base = 0, bound = None, max_num = None):\n super().setupSlice(base, bound, max_num)\n self.cur_handle = self.coordToHandle(base)\n \n if self.cur_handle == None: # not found\n val_at_min_handle = sys.maxsize\n min_handle = None\n\n # do a search through the coords to find the min greater than base\n for i in range(0, len(self.coords)):\n # look in the cache for it\n key = self.name + '_IdxToCoords_' + str(i)\n cached_val = self.cache.get(i)\n self.cache[key] = self.coords[i]\n\n self.stats[self.coords_read_key] += 1\n print(\"\\tsearching coords: ind {}, coord {}, min_val {}\".format(i, self.coords[i], val_at_min_handle))\n if min_handle == None:\n if self.coords[i] > base:\n min_handle = i\n val_at_min_handle = self.coords[min_handle]\n else: \n assert min_handle != None\n if self.coords[i] > base and self.coords[i] < val_at_min_handle:\n min_handle = i\n val_at_min_handle = self.coords[min_handle]\n\n self.cur_handle = min_handle \n\n # print(\"\\t{} setupSlice: curHandle = {}\".format(self.name, self.cur_handle))\n\n # get hashtable key mod by table length\n def get_hash_key(self, val):\n return hash(str(val)) % self.hashtable_len\n \n # get next in iteration\n def nextInSlice(self):\n if self.cur_handle == None:\n return None\n if self.num_to_ret != None and self.num_to_ret < self.num_ret_so_far:\n return None\n if self.num_ret_so_far >= len(self.coords):\n return None\n cur_coord = self.coords[self.cur_handle]\n to_ret = self.cur_handle\n\n # look in the cache for it\n key = self.name + '_IdxToCoords_' + str(self.cur_handle)\n cached_val = self.cache.get(self.cur_handle)\n self.cache[key] = self.coords[self.cur_handle]\n self.stats[self.coords_read_key] += 1\n \n next_handle = None\n # need to do a linear pass to find the next coord in sorted order\n for i in range(0, len(self.coords)):\n key = self.name + '_IdxToCoords_' + str(i)\n cached_val = self.cache.get(i)\n self.cache[key] = self.coords[i]\n \n self.stats[self.coords_read_key] += 1\n if self.coords[i] > cur_coord:\n if next_handle == None or (self.coords[next_handle] > self.coords[i] and self.coords[i] > cur_coord):\n next_handle = i\n self.cur_handle = next_handle\n return to_ret\n \n def double_table(self, count_stats):\n print(\"\\t table doubling\")\n # reset HT and ptrs\n self.hashtable_len = self.hashtable_len * 2\n self.ptrs = list()\n self.ht = [None] * self.hashtable_len\n for i in range(0, len(self.coords)):\n self.insertElement(self.coords[i], add_coord=False, count_stats=count_stats)\n assert(len(self.ptrs) == len(self.coords))\n # search for them all\n for i in range(0, len(self.coords)):\n assert self.coordToHandleNoStats(self.coords[i]) != None\n\n # modify coords, need to append 1 to payloads\n def insertElement(self, coord, payload=0, count_stats=True, add_coord=True):\n if coord == None:\n return None\n \n # encode coord\n hash_key = self.get_hash_key(coord)\n print(\"\\tcoord: {}, hash key {}\".format(coord, hash_key))\n bin_head = self.ht[hash_key]\n if count_stats:\n self.stats[self.ht_read_key] += 1\n key = self.name + '_HT_' + str(hash_key)\n cached_val = self.cache.get(key)\n self.cache[key] = bin_head\n\n # traverse this bucket\n while bin_head != None:\n # print(\"\\tbin head {}\".format(bin_head))\n if count_stats:\n key = self.name + '_IdxToCoords_' + str(bin_head)\n cached_val = self.cache.get(key)\n self.cache[key] = self.coords[bin_head]\n\n if self.coords[bin_head] == coord:\n # update payload or return because found\n return bin_head \n bin_head = self.ptrs[bin_head]\n assert bin_head == None\n\n # make room for elt\n self.ptrs.append(self.ht[hash_key])\n self.ht[hash_key] = len(self.ptrs) - 1 \n # don't need to readd during doubling\n if add_coord:\n self.coords.append(coord)\n self.payloads.append(payload)\n self.stats[self.coords_write_key] += 1\n\n if count_stats:\n # add to stats\n self.stats[self.ht_write_key] += 1\n self.stats[self.ptrs_write_key] += 1\n\n # add payloads access to cache\n key = self.name + '_IdxToPayloads_' + str(len(self.coords) - 1)\n cached_val = self.cache.get(key)\n self.cache[key] = payload\n\n density = float(len(self.coords)) / self.hashtable_len\n if density >= self.max_density:\n self.double_table(count_stats)\n assert(len(self.coords) == len(self.payloads))\n return len(self.coords) - 1 # handle to coord == at the end\n\n def updatePayload(self, handle, payload):\n if handle == None:\n return None\n key = self.name + '_IdxToPayloads_' + str(handle)\n cached_val = self.cache.get(key)\n self.cache[key] = payload\n\n # update payload\n self.stats[self.payloads_write_key] += 1\n self.payloads[handle] = payload\n return handle\n\n # fiber handles must be reducible with int\n def getUpdatedFiberHandle(self):\n return len(self.payloads) + len(self.ht) \n # return ((len(self.ht), len(self.payloads)), self)\n\n # default implementation == like in C\n # overwrite if this is changed\n @staticmethod\n def encodePayload(prev_ind, ind, payload):\n output = list()\n for i in range(prev_ind, ind):\n output.append(0)\n output.append(payload)\n return output\n\n @staticmethod\n def endPayloads(num_to_pad):\n return []\n\n # implicit coords\n @staticmethod\n def encodeCoords():\n return True\n\n # explicit prev payloads\n @staticmethod\n def encodeUpperPayload():\n return True\n \n @staticmethod \n def startOccupancy():\n return [0, 0]\n", "id": "4194736", "language": "Python", "matching_score": 5.026723384857178, "max_stars_count": 2, "path": "fibertree/codec/formats/hashtable.py" }, { "content": "from .compression_format import CompressionFormat\nimport math\n\n\nclass TwoHandle():\n def __init__(self, coords_handle = None, payloads_handle = None):\n self.coords_handle = coords_handle\n self.payloads_handle = payloads_handle\n\nclass Bitvector(CompressionFormat):\n # untruncated bitvector\n def __init__(self):\n CompressionFormat.__init__(self)\n self.occupancies = list()\n self.bits_per_word = 32\n self.bits_per_line = self.bits_per_word * self.words_in_line\n self.iter_handle = TwoHandle()\n\n # instantiate current fiber in B format\n def encodeFiber(self, a, dim_len, codec, depth, ranks, output, output_tensor, shape=None):\n # import codec\n from ..tensor_codec import Codec\n coords_key, payloads_key = codec.get_keys(ranks, depth)\n \n # init vars\n fiber_occupancy = 0\n cumulative_occupancy = 0\n \n if depth < len(ranks) - 1:\n self.next_fmt = codec.fmts[depth + 1] \n \n if depth < len(ranks) - 1:\n if codec.format_descriptor[depth + 1] == \"Hf\" or codec.format_descriptor[depth + 1] == \"T\":\n cumulative_occupancy = [0, 0]\n occ_list = list()\n occ_list.append(cumulative_occupancy)\n prev_nz = 0\n # TODO: get dim_len from shape if it is there\n self.coords = [0]*dim_len\n \n for ind, (val) in a:\n if depth < len(ranks) - 1:\n fiber, child_occupancy = codec.encode(depth + 1, val, ranks, output, output_tensor)\n # store coordinate explicitly\n self.payloads.append(fiber)\n if isinstance(cumulative_occupancy, int):\n cumulative_occupancy = cumulative_occupancy + child_occupancy\n else:\n cumulative_occupancy = [a + b for a, b in zip(cumulative_occupancy, child_occupancy)]\n codec.add_payload(depth, occ_list, cumulative_occupancy, child_occupancy)\n\n\n if codec.fmts[depth+1].encodeUpperPayload():\n output[payloads_key].append(cumulative_occupancy)\n self.occupancies.append(cumulative_occupancy)\n\n # TODO: make this store more than one bit per entry\n # set coord bit\n self.coords[ind] = 1\n\n fiber_occupancy += 1\n # fiber_occupancy = fiber_occupancy + len(coords) \n \t # encode payload if necessary\n if depth == len(ranks) - 1:\n output[payloads_key].append(val.value)\n self.payloads.append(val.value)\n prev_nz = ind + 1\n\n # pad end if necessary\n output[coords_key].extend(self.coords)\n # print(\"encode fiber: coords {}, payloads {}\".format(self.coords, self.payloads))\n return fiber_occupancy\n\n def getWordStart(self, index):\n return math.floor(float(index) / self.bits_per_line) * self.bits_per_line\n\n # TODO: fix at 0\n def getWordEnd(self, index):\n return math.ceil(float(index) / self.bits_per_line) * self.bits_per_line\n \n def countCoordsCache(self, handle):\n handle_start = self.getWordStart(handle)\n cache_key = self.name + \"_handleToCoords_\" + str(handle_start)\n found = self.cache.get(cache_key) # look for it if there\n range_end = min(handle_start + self.bits_per_line, len(self.coords))\n self.cache[cache_key] = self.coords[handle_start:range_end]\n # if found is None:\n self.stats[self.coords_read_key] += 1\n\n \n \"\"\"\n # handle = coord_handle\n def countCoordsRead(self, handle):\n handle_start = self.getWordStart(handle)\n cache_key = self.name + \"_handleToCoordsRead_\" + str(handle_start)\n found = self.cache.get(cache_key) # look for it if there\n range_end = min(handle_start + self.bits_per_line, len(self.coords))\n self.cache[cache_key] = self.coords[handle_start:range_end]\n \n if found == None:\n self.stats[self.coords_read_key] += 1\n\n # handle = coord_handle\n def countCoordsWrite(self, handle):\n handle_start = self.getWordStart(handle)\n cache_key = self.name + \"_handleToCoordsWrite_\" + str(handle_start)\n found = self.cache.get(cache_key) # look for it if there\n range_end = min(handle_start + self.bits_per_line, len(self.coords))\n self.cache[cache_key] = self.coords[handle_start:range_end]\n if found == None:\n self.stats[self.coords_write_key] += 1\n \"\"\"\n # given a handle (index into bit vector) return the coord \n def handleToCoord(self, iter_handle):\n assert(isinstance(iter_handle, TwoHandle)) \n handle = iter_handle.coords_handle\n\n # print(\"{} handleToCoord, coord handle {}\".format(self.name, handle))\n if handle == None or handle >= len(self.coords):\n return None\n # if nothing is saved\n self.countCoordsCache(handle)\n return handle\n\n def handleToPayload(self, iter_handle):\n # save the previous coord that we looked up for cost measure\n # self.prev_coord_at_payload = iter_handle.coords_handle\n return super().handleToPayload(iter_handle.payloads_handle)\n\n # does this need to return a payload handle?\n # NOTE: in bitvector, handles to coords and payloads are different\n def coordToHandle(self, coord):\n return coord\n\n # size of fiber is actually like (shape / wordsize) + num_payloads\n # but can analytically find out shape / wordsize, so just return num_payloads\n def getUpdatedFiberHandle(self):\n return len(self.payloads)\n # return (len(self.payloads), self)\n\n def getSize(self):\n size = math.ceil(len(self.coords) / self.bits_per_word) + len(self.occupancies)\n \n # count payloads only if next level is encoded\n if self.next_fmt == None or self.next_fmt.encodeUpperPayload():\n size += len(self.payloads)\n return size\n\n # insertElement makes space for coord, payload and returns handle\n def insertElement(self, coord):\n if coord == None:\n return TwoHandle(None, None)\n coord_handle_to_add = self.handleToCoord(TwoHandle(coord))\n \n # stats counting\n self.countCoordsCache(coord_handle_to_add)\n \n # either way, need to count left\n payload_to_add_handle = self.countLeft(coord_handle_to_add) \n # if unset, make space for it\n if self.coords[coord_handle_to_add] == 0:\n self.payloads = self.payloads[:payload_to_add_handle] + [0] + self.payloads[payload_to_add_handle:]\n self.stats[self.payloads_write_key] += len(self.payloads) - payload_to_add_handle\n self.coords[coord_handle_to_add] = 1\n return TwoHandle(coord_handle_to_add, payload_to_add_handle)\n \n def updatePayload(self, handle, payload):\n print(\"\\t{} updatePayload: handle {}, payload {}\".format(self.name, handle, payload))\n payload_handle = handle.payloads_handle\n if payload_handle == None:\n return None\n if payload_handle >= 0 and payload_handle < len(self.payloads):\n # we are always calling update payload right after inserting it, so don't need to count it twice\n # self.stats[self.payloads_write_key] += 1\n self.payloads[payload_handle] = payload\n return payload_handle\n\n def countLeft(self, coords_handle):\n # count_left has cost = number of words to the left\n result = 0 # count left 1s \n for i in range(0, coords_handle):\n result += self.coords[i]\n # count up coords read per line\n if i % self.bits_per_line == 0:\n self.countCoordsCache(i)\n return result\n\n # setup coord and payload handle\n def setupSlice(self, base = 0, bound = None, max_num = None):\n super().setupSlice(base, bound, max_num)\n # start payloads handle\n self.iter_handle.coords_handle = self.coordToHandle(base)\n self.iter_handle.payloads_handle = self.countLeft(self.coords_handle)\n print(\"setup slice in B: coords handle {}, payloads handle {}\".format(self.iter_handle.coords_handle, self.iter_handle.payloads_handle))\n \n # iterate through coords, finding next nonempty coord\n # then move payloads forward by 1 (compressed payloads)\n def nextInSlice(self):\n if self.iter_handle.coords_handle >= len(self.coords) or self.iter_handle.payloads_handle >= len(self.payloads):\n return None\n if self.num_to_ret != None and self.num_to_ret < self.num_ret_so_far:\n return None\n # move to the next nonzero\n while self.iter_handle.coords_handle < len(self.coords) and self.coords[self.iter_handle.coords_handle] != 1:\n self.countCoordsCache(self.iter_handle.coords_handle)\n self.iter_handle.coords_handle += 1\n \n # if in range, return \n if self.iter_handle.coords_handle < len(self.coords):\n to_ret = TwoHandle(self.iter_handle.coords_handle, self.iter_handle.payloads_handle)\n self.iter_handle.coords_handle +=1 \n self.iter_handle.payloads_handle += 1\n self.num_ret_so_far += 1\n return to_ret\n else:\n return None\n\n def printFiber(self):\n print(\"{} :: bitvector: {}, occupancies: {}, payloads: {}\".format(self.name, self.coords, self.occupancies, self.payloads))\n \n # explicit coords\n @staticmethod\n def encodeCoords():\n return True\n\n # explicit prev payloads because payloads at this level are compressed\n @staticmethod\n def encodeUpperPayload():\n return True\n", "id": "2745142", "language": "Python", "matching_score": 4.901393890380859, "max_stars_count": 2, "path": "fibertree/codec/formats/bitvector.py" }, { "content": "from .compression_format import CompressionFormat\n\nclass RunLengthEncoding(CompressionFormat):\n def __init__(self):\n self.name = \"R\"\n\n @staticmethod\n def encodeFiber(a, dim_len, codec, depth, ranks, output):\n # import codec\n from ..tensor_codec import Codec\n coords_key, payloads_key = codec.get_keys(ranks, depth)\n \n # init vars\n fiber_occupancy = 0\n\n # TODO: HT to one payload\n cumulative_occupancy = 0\n if depth < len(ranks) - 1 and codec.format_descriptor[depth + 1] == \"Hf\":\n \t cumulative_occupancy = [0, 0] \n\n occ_list = list()\n # occ_list.append(cumulative_occupancy)\n prev_nz = 0\n \n for ind, (val) in a:\n child_occupancy = codec.encode(depth + 1, val, ranks, output)\n \n if isinstance(cumulative_occupancy, int):\n cumulative_occupancy = cumulative_occupancy + child_occupancy\n else:\n cumulative_occupancy = [a + b for a, b in zip(cumulative_occupancy, child_occupancy)]\n \n # add cumulative or non-cumulative depending on settings\n codec.add_payload(depth, occ_list, cumulative_occupancy, child_occupancy)\n \n # store coordinate explicitly\n coords = RunLengthEncoding.encodeCoord(prev_nz, ind)\n output[coords_key].extend(coords)\n\n # keep track of nnz in this fiber\n fiber_occupancy = fiber_occupancy + 1\n\n # if at leaves, store payloads directly\n if depth == len(ranks) - 1:\n output[payloads_key].append(val.value)\n\n prev_nz = ind\n \n # explicit payloads for next level\n if depth < len(ranks) - 1 and codec.fmts[depth+1].encodeUpperPayload():\n output[payloads_key].extend(occ_list)\n return fiber_occupancy, occ_list\n\n # encode coord explicitly\n @staticmethod\n def encodeCoord(prev_ind, ind):\n return [ind - prev_ind]\n\n @staticmethod\n def encodePayload(prev_ind, ind, payload):\n return [payload]\n\n # explicit coords\n @staticmethod\n def encodeCoords():\n return True\n\n # explicit prev payloads\n @staticmethod\n def encodeUpperPayload():\n return True\n", "id": "4880719", "language": "Python", "matching_score": 1.3920550346374512, "max_stars_count": 2, "path": "fibertree/codec/formats/rle.py" }, { "content": "from .formats.uncompressed import Uncompressed\nfrom .formats.coord_list import CoordinateList\nfrom .formats.bitvector import Bitvector\nfrom .formats.hashtable import HashTable\nfrom .formats.balanced_tree import RBTree\nfrom .formats.rle import RunLengthEncoding\n\"\"\"\n# U = uncompressed\n # size of vector = shape of fiber\n # contents = 0 if nothing in position, payload otherwise\n # fibers serialized in position order\nuncompressed = \"U\"\n\n# Bu = untruncated bit vector\n # size of vector = shape of this fiber\n # contents = 0 in position if empty, 1 if not\n # when each rank is serialized, fibers are serialized in order\nuntruncated_bitvector = \"Bu\"\n\n# Bt = truncated bit vector\n # cut off bit vector at last 1, store number of bits in previous rank's payloads\n # size of vector <= shape of fiber\n # when each rank is serialized, fibers are serialized in order\ntruncated_bitvector = \"Bt\"\n\n# C = coordinate list\n # size of vector = occupancy of this fiber\n # contents = sorted / deduplicated coordinates in this fiber\n # when each rank is serialized, fibers are serialized in order\ncoord_list = \"C\"\n\n# list of all valid formats\nvalid_formats = [uncompressed, coord_list, untruncated_bitvector, truncated_bitvector] \n# [\"U\", \"C\", \"R\", \"A\", \"B\", \"D\", \"Hf\", \"Hr\"]\n\n# types of bitvectors\nbitvectors = [untruncated_bitvector, truncated_bitvector]\n\n# TO BE IMPLEMENTED\n# D = delta compressed\n # num elements in vector = occupancy of fiber\n # contents = delta-compressed coordinate list\n # serialize according to position order\n# Hf = hash table per fiber\n # TODO\n\"\"\"\n# mapping descriptors to formats\n\"\"\"\nuncompressed = Uncompressed().getName()\ncoord_list = CoordinateList.getName()\ndescriptor_to_fmt = {uncompressed : Uncompressed, coord_list: CoordinateList}\n\"\"\"\n\n# TODO: figure out how to register yourself\n\ndescriptor_to_fmt = {\"U\" : Uncompressed, \"C\":CoordinateList, \"B\": Bitvector, \"T\": RBTree, \"H\":HashTable }\n\n# , \"C\": CoordinateList, \"B\": Bitvector, \"Hf\" : HashTable(), \"T\": RBTree, \"R\": RunLengthEncoding }# , \"UB\": UncompressedBitvector}\n", "id": "283470", "language": "Python", "matching_score": 0.5445511937141418, "max_stars_count": 2, "path": "fibertree/codec/compression_types.py" }, { "content": "import operator\n\n\"\"\" Spec \"\"\"\n\ndef GetIndexLevel(index_name, remaining_indices):\n # Omit index if it only occurs once in the mapping.\n # if all_indices.count(index_name) == 0:\n # return None\n cnt = remaining_indices.count(index_name)\n if cnt:\n return cnt - 1\n else:\n return None\n return cnt\n\n\nclass ProblemSpec:\n \"\"\" ProblemSpec Class \"\"\"\n\n def __init__(self, lhs, rhs):\n \"\"\"__init__\"\"\"\n\n self.lhs = lhs\n self.rhs = rhs\n # This is a bit hacky.\n SpecTensor.tensor_id = 0\n \n def GetTensors(self):\n return self.lhs.GetTensors() | self.rhs.GetTensors()\n \n def PrintBody(self, indent):\n result = indent * \" \"\n result += self.lhs.PrintBody(True)\n result += \" += \"\n result += self.rhs.PrintBody(False)\n return result\n\n def PrintCoIterators(self, index_name, remaining_indices):\n result_lhs = self.lhs.PrintCoIterators(index_name, remaining_indices, True)\n result_rhs = self.rhs.PrintCoIterators(index_name, remaining_indices, False)\n if result_lhs and result_rhs:\n result = \"(\"\n result += result_lhs\n result += \", \"\n result += result_rhs\n result += \")\"\n return result\n elif result_lhs:\n return result_lhs\n elif result_rhs:\n return result_rhs\n else:\n return \"\"\n\n def PrintCoIteration(self, index_name, remaining_indices):\n result_lhs = self.lhs.PrintCoIteration(index_name, remaining_indices, True)\n result_rhs = self.rhs.PrintCoIteration(index_name, remaining_indices, False)\n if result_lhs and result_rhs:\n result = result_lhs\n result += \" << \"\n result += result_rhs\n return result\n elif result_lhs:\n return result_lhs\n elif result_rhs:\n return result_rhs\n else:\n return \"\"\n\nclass TensorOp:\n \"\"\" TensorOp Class \"\"\"\n\n def __init__(self, lhs, rhs, op):\n \"\"\"__init__\"\"\"\n\n self.lhs = lhs\n self.rhs = rhs\n self.op = op\n\n def GetTensors(self):\n return self.lhs.GetTensors() | self.rhs.GetTensors()\n\n def PrintBody(self, is_ref):\n result = self.lhs.PrintBody(is_ref)\n result += \" \" + str(self.op) + \" \" # TODO\n result += self.rhs.PrintBody(is_ref)\n return result\n\n def PrintCoIterators(self, index_name, remaining_indices, is_ref):\n result_lhs = self.lhs.PrintCoIterators(index_name, remaining_indices, is_ref)\n result_rhs = self.rhs.PrintCoIterators(index_name, remaining_indices, is_ref)\n if result_lhs and result_rhs:\n result = \"(\" + result_lhs\n result += \", \"\n result += result_rhs + \")\"\n return result\n elif result_lhs:\n return result_lhs\n elif result_rhs:\n return result_rhs\n else:\n return None\n\n def PrintCoIteration(self, index_name, remaining_indices, is_ref):\n result_lhs = self.lhs.PrintCoIteration(index_name, remaining_indices, is_ref)\n result_rhs = self.rhs.PrintCoIteration(index_name, remaining_indices, is_ref)\n if result_lhs and result_rhs:\n result = \"(\" + result_lhs\n result += \" & \" # TODO: Smarter here.\n result += result_rhs + \")\"\n return result\n elif result_lhs:\n return result_lhs\n elif result_rhs:\n return result_rhs\n else:\n return None\n\nclass TensorAccess:\n \"\"\" TensorAccess Class \"\"\"\n\n def __init__(self, tensor, rank_ids):\n \"\"\"__init__\"\"\"\n\n self.target = tensor\n # I hate python at the moment\n if not isinstance(rank_ids, tuple):\n self.rank_ids = [rank_ids]\n else:\n self.rank_ids = rank_ids\n \n def __mul__(self, other):\n return TensorOp(self, other, operator.__mul__)\n \n def __lshift__(self, other):\n return ProblemSpec(self, other)\n \n def GetTensors(self):\n return set([self.target])\n\n def PrintCoIterators(self, index_name, remaining_indices, is_ref):\n if self.target.HasIndex(index_name):\n next_index = self.target.GetNextIndex(remaining_indices)\n if next_index is None:\n return self.target.GetValueName(is_ref)\n else:\n index_level = GetIndexLevel(next_index, remaining_indices)\n return self.target.GetFiberName(next_index, index_level)\n else:\n return None\n\n def PrintCoIteration(self, index_name, remaining_indices, is_ref):\n if self.target.HasIndex(index_name):\n index_level = GetIndexLevel(index_name, remaining_indices)\n return self.target.GetFiberName(index_name, index_level)\n else:\n return None\n\n def PrintBody(self, is_ref):\n return self.target.GetValueName(is_ref)\n\nclass SpecTensor:\n \"\"\" SpecTensor Class \"\"\"\n\n tensor_id = 0\n\n def __init__(self, *rank_ids):\n \"\"\"__init__\"\"\"\n\n self.rank_ids = rank_ids\n self.id = SpecTensor.tensor_id\n SpecTensor.tensor_id = SpecTensor.tensor_id + 1\n \n # This is a rank-0 acccess (e.g., no indices)\n def __lshift__(self, other):\n assert(self.rank_ids is ())\n return ProblemSpec(TensorAccess(self, ()), other)\n\n # This is a rank-0 acccess (e.g., no indices)\n def __mul__(self, other):\n assert(self.rank_ids is ())\n return TensorOp(TensorAccess(self, ()), other, operator.__mul__)\n \n def __getitem__(self, rank_ids):\n return TensorAccess(self, rank_ids)\n \n def GetID(self):\n return self.id\n \n def GetIndexVar(self, position):\n if not self.rank_ids:\n return None\n else:\n return self.rank_ids[position]\n\n def GetName(self):\n return \"T\" + str(self.id)\n \n def HasIndex(self, index_name):\n for rank_id in self.rank_ids:\n if index_name in rank_id.GetIndexNames():\n return True\n return False\n \n def GetNextIndex(self, remaining_indices):\n for rank_id in self.rank_ids:\n for index_name in remaining_indices:\n if self.HasIndex(index_name):\n return index_name\n return None\n\n def GetValueName(self, is_ref):\n if is_ref:\n return self.GetName() + \"_ref\"\n else:\n return self.GetName() + \"_val\"\n\n def GetFiberName(self, index_name, index_level):\n assert(self.HasIndex(index_name))\n result = self.GetName() + \"_\" + index_name\n result += str(index_level)\n return result\n\nclass IndexOp:\n \"\"\" IndexOp Class \"\"\"\n\n def __init__(self, lhs, rhs, op):\n \"\"\"__init__\"\"\"\n self.lhs = lhs\n self.rhs = rhs\n self.op = op\n\n def __add__(self, other):\n return IndexOp(self, other, operator.__add__)\n\n def __sub__(self, other):\n return IndexOp(self, other, operator.__sub__)\n\n def __mul__(self, other):\n return IndexOp(self, other, operator.__mul__)\n\n def __div__(self, other):\n return IndexOp(self, other, operator.__div__)\n\n def GetIndexNames(self):\n if isinstance(self.lhs, IndexOp):\n lhs_names = self.lhs.GetIndexNames()\n elif isinstance(self.lhs, SpecIndex):\n lhs_names = self.lhs.GetIndexNames()\n else:\n lhs_names = set()\n if isinstance(self.rhs, IndexOp):\n rhs_names = self.rhs.GetIndexNames()\n elif isinstance(self.rhs, SpecIndex):\n rhs_names = self.rhs.GetIndexNames()\n else:\n rhs_names = set()\n return lhs_names | rhs_names\n\nclass SpecIndex:\n \"\"\" SpecIndex Class \"\"\"\n\n spec_id = 0\n\n def __init__(self, name=None):\n \"\"\"__init__\"\"\"\n\n self.id = SpecIndex.spec_id\n SpecIndex.spec_id = SpecIndex.spec_id + 1\n if name is None:\n self.name = \"I\" + str(self.id)\n else:\n self.name = name\n \n def __add__(self, other):\n return IndexOp(self, other, operator.__add__)\n\n def __sub__(self, other):\n return IndexOp(self, other, operator.__sub__)\n\n def __mul__(self, other):\n return IndexOp(self, other, operator.__mul__)\n\n def __div__(self, other):\n return IndexOp(self, other, operator.__div__)\n\n def GetIndexNames(self):\n return set([self.name])\n\nclass Schedule:\n \"\"\" Schedule Class\"\"\"\n \n def __init__(self, problem_spec, mapping):\n \n self.problem_spec = problem_spec\n self.mapping = mapping\n # A bit hacky\n SpecIndex.spec_id = 0\n \n def PrintLoop(self, position, indent):\n index_name = self.mapping[position]\n remaining_indices = self.mapping[position:]\n result_iterators = self.problem_spec.PrintCoIterators(index_name, remaining_indices[1:])\n result_iteration = self.problem_spec.PrintCoIteration(index_name, remaining_indices)\n result = indent * \" \"\n index_level = GetIndexLevel(index_name, remaining_indices)\n result += \"for \" + index_name + str(index_level)\n if result_iterators:\n result += \", \"\n result += result_iterators\n assert(result_iteration)\n result += \" in \"\n result += result_iteration\n result += \":\\n\"\n return result\n \n def __str__(self, indent = 0):\n\n all_tensors = self.problem_spec.GetTensors()\n result = indent * \" \"\n for tensor in all_tensors:\n result += indent * \" \"\n next_index = tensor.GetNextIndex(self.mapping)\n if next_index is None:\n # rank 0 tensor\n result += tensor.GetValueName(True) # TODO: really True?\n else:\n next_level = GetIndexLevel(next_index, self.mapping)\n result += tensor.GetFiberName(next_index, next_level)\n result += \" = \"\n result += tensor.GetName() + \".getRoot()\\n\"\n result += \"\\n\"\n for position in range(0, len(self.mapping)):\n result += self.PrintLoop(position, indent)\n indent += 4\n result += self.problem_spec.PrintBody(indent)\n return result \n \n\n#Temporary conveniences\nT=SpecTensor\nI=SpecIndex\n\ndef spec_dot_product():\n m = I(\"m\")\n Z = T()\n A = T(m)\n return Z << A[m]\n \nm0_dot_product = Schedule(spec_dot_product(), [\"m\"])\n\ndef spec_cartesian_mul():\n m = I(\"m\")\n n = I(\"n\")\n Z = T(m, n)\n A = T(m)\n B = T(n)\n return Z[m, n] << A[m] * B[n]\n \nm0n0_cartesian_mul = Schedule(spec_cartesian_mul(), [\"m\", \"n\"])\nn0m0_cartesian_mul = Schedule(spec_cartesian_mul(), [\"n\", \"m\"])\n\ndef spec_matrix_mul():\n m = I(\"m\")\n k = I(\"k\")\n n = I(\"n\")\n Z = T(m, n)\n A = T(m, k)\n B = T(k, n)\n return Z[m, n] << A[m, k] * B[k, n]\n\nmkn_matrix_mul = Schedule(spec_matrix_mul(), [\"m\", \"k\", \"n\"])\nmnk_matrix_mul = Schedule(spec_matrix_mul(), [\"m\", \"n\", \"k\"])\nkmn_matrix_mul = Schedule(spec_matrix_mul(), [\"k\", \"m\", \"n\"])\n\nn_mkn_matrix_mul = Schedule(spec_matrix_mul(), [\"n\", \"m\", \"k\", \"n\"])\nk_mnk_matrix_mul = Schedule(spec_matrix_mul(), [\"k\", \"m\", \"n\", \"k\"])\nn_kmn_matrix_mul = Schedule(spec_matrix_mul(), [\"n\", \"k\", \"m\", \"n\"])\n\nkn_mkn_matrix_mul = Schedule(spec_matrix_mul(), [\"k\", \"n\", \"m\", \"k\", \"n\"])\nnk_mnk_matrix_mul = Schedule(spec_matrix_mul(), [\"n\", \"k\", \"m\", \"n\", \"k\"])\nmn_kmn_matrix_mul = Schedule(spec_matrix_mul(), [\"m\", \"n\", \"k\", \"m\", \"n\"])\n\nmkn_mkn_matrix_mul = Schedule(spec_matrix_mul(), [\"m\", \"k\", \"n\", \"m\", \"k\", \"n\"])\nmnk_mnk_matrix_mul = Schedule(spec_matrix_mul(), [\"m\", \"n\", \"k\", \"m\", \"n\", \"k\"])\nkmn_kmn_matrix_mul = Schedule(spec_matrix_mul(), [\"k\", \"m\", \"n\", \"k\", \"m\", \"n\"])\n\nn_mkn_mkn_matrix_mul = Schedule(spec_matrix_mul(), [\"n\", \"m\", \"k\", \"n\", \"m\", \"k\", \"n\"])\nk_mnk_mnk_matrix_mul = Schedule(spec_matrix_mul(), [\"k\", \"m\", \"n\", \"k\", \"m\", \"n\", \"k\"])\nn_kmn_kmn_matrix_mul = Schedule(spec_matrix_mul(), [\"n\", \"k\", \"m\", \"n\", \"k\", \"m\", \"n\"])\n\n\ntactile_matrix_mul = Schedule(spec_matrix_mul(), [\"m\", \"k\", \"n\", \"m\", \"k\", \"n\", \"m\", \"k\"])\ntactil2_matrix_mul = Schedule(spec_matrix_mul(), [\"n\", \"k\", \"m\", \"n\", \"k\", \"m\", \"n\", \"k\"])\n\n\ndef spec_2d_conv():\n p = I(\"p\")\n q = I(\"q\")\n r = I(\"r\")\n s = I(\"s\")\n Out = T(p, q)\n In = T(p + r - 1, q + s - 1)\n Wt = T(r, s)\n return Out[p, q] << In[p + r, q + s] * Wt[r, s]\n\npqrs_2d_conv = Schedule(spec_2d_conv(), [\"p\", \"q\" ,\"r\", \"s\"])\nprqs_2d_conv = Schedule(spec_2d_conv(), [\"p\", \"r\" ,\"q\", \"s\"])\nrspq_2d_conv = Schedule(spec_2d_conv(), [\"r\", \"s\" ,\"p\", \"q\"])\nrpsq_2d_conv = Schedule(spec_2d_conv(), [\"r\", \"p\" ,\"s\", \"q\"])\n\ndef convert_xy_to_yxyx():\n x = I(\"x\")\n y = I(\"y\")\n x0 = I(\"x0\")\n x1 = I(\"x1\")\n y0 = I(\"y0\")\n y1 = I(\"y1\")\n Z = T(y1, x1, y0, x0)\n A = T(x, y)\n return Z[y1, x1, y0, x0] << A[x, y]\n\nyxyx_convert_xy_to_yxyx = Schedule(convert_xy_to_yxyx(), [\"y\", \"x\", \"y\", \"x\"])\n\n#t_yxyx = t.transform([\"y\", \"x\", \"y\", \"x\")], {\"y\" = [YO], \"x\" = [X0]}\n\n#\n#for y, t_x in t_y:\n# y1 = \n# y0 = \n# out_x1 = out.insertOrLookup(y1, Fiber())\n# for x, t_val in t_x:\n# x1 = \n# x0 =\n# out_y0 = out_x1.insertOrLookup(x1, Fiber())\n# out_x0 = out_y0.insertOrLookup(y0, Fiber())\n# out_x0.append(x0, t_val)\n#\n#\n#src_fiber = src.getRoot()\n#src_fiber_stack.push(src_fiber)\n#\n#while !src_fiber.isLeaf():\n# src_rank_id = src_fiber.rankID()\n# for src_coord, src_payload in src_fiber:\n# for tgt_rank in tgt_ranks[src_rank_id]:\n# tgt_coord[src_rank_id][tgt_rank] = \n# coord_func[src_rank_id][tgt_rank](src_coord)\n# src_fiber = src_payload\n# \n# \n# \n\n\n", "id": "3212087", "language": "Python", "matching_score": 1.417728304862976, "max_stars_count": 2, "path": "spec/spec.py" }, { "content": "import unittest\n\nfrom fibertree import Payload\nfrom fibertree import Fiber\nfrom fibertree import Rank\nfrom fibertree import Tensor\n\n\nclass TestTensorTransform(unittest.TestCase):\n\n def test_truediv(self):\n \"\"\" Test /, the __truediv__ operator \"\"\"\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-b.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-b-truediv.yaml\")\n\n a_out = a / 4\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M.1\", \"M.0\", \"N\"])\n self.assertEqual(a_out.getShape(), [16, 20, 10])\n\n def test_splitUniform_0(self):\n \"\"\" Test splitUniform - depth=0 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-a-splitUniform_0.yaml\")\n\n tests = { \"by-depth\": {\"depth\": 0},\n \"by-name\": {\"rankid\": \"M\"}}\n\n for test, kwargs in tests.items():\n with self.subTest(test=test):\n a_out = a.splitUniform(25, **kwargs)\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M.1\", \"M.0\", \"N\", \"K\"])\n self.assertEqual(a_out.getShape(), [26, 41, 42, 10])\n\n\n\n def test_splitUniform_1(self):\n \"\"\" Test splitUniform - depth=1 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-a-splitUniform_1.yaml\")\n\n tests = { \"by-depth\": {\"depth\": 1},\n \"by-name\": {\"rankid\": \"N\"}}\n\n for test, kwargs in tests.items():\n with self.subTest(test=test):\n a_out = a.splitUniform(15, **kwargs)\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M\", \"N.1\", \"N.0\", \"K\"])\n self.assertEqual(a_out.getShape(), [41, 31, 42, 10])\n\n\n def test_splitUniform_2(self):\n \"\"\" Test splitUniform - depth=2 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-a-splitUniform_2.yaml\")\n tests = { \"by-depth\": {\"depth\": 2},\n \"by-name\": {\"rankid\": \"K\"}}\n\n for test, kwargs in tests.items():\n with self.subTest(test=test):\n a_out = a.splitUniform(4, **kwargs)\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M\", \"N\", \"K.1\", \"K.0\"])\n self.assertEqual(a_out.getShape(), [41, 42, 9, 10])\n\n\n def test_splitNonUniform_0(self):\n \"\"\" Test splitNonUniform - depth=0 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-a-splitNonUniform_0.yaml\")\n\n tests = { \"by-depth\": {\"depth\": 0},\n \"by-name\": {\"rankid\": \"M\"}}\n\n for test, kwargs in tests.items():\n with self.subTest(test=test):\n a_out = a.splitNonUniform([0, 15, 35], **kwargs)\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M.1\", \"M.0\", \"N\", \"K\"])\n self.assertEqual(a_out.getShape(), [36, 41, 42, 10])\n\n\n def test_splitNonUniform_1(self):\n \"\"\" Test splitNonUniform - depth=1 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-a-splitNonUniform_1.yaml\")\n\n tests = { \"by-depth\": {\"depth\": 1},\n \"by-name\": {\"rankid\": \"N\"}}\n\n for test, kwargs in tests.items():\n with self.subTest(test=test):\n a_out = a.splitNonUniform([0, 15, 25], **kwargs)\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M\", \"N.1\", \"N.0\", \"K\"])\n self.assertEqual(a_out.getShape(), [41, 26, 42, 10])\n\n\n def test_splitNonUniform_2(self):\n \"\"\" Test splitNonUniform - depth=2 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-a-splitNonUniform_2.yaml\")\n\n tests = { \"by-depth\": {\"depth\": 2},\n \"by-name\": {\"rankid\": \"K\"}}\n\n for test, kwargs in tests.items():\n with self.subTest(test=test):\n a_out = a.splitNonUniform([0, 4, 19], **kwargs)\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M\", \"N\", \"K.1\", \"K.0\"])\n self.assertEqual(a_out.getShape(), [41, 42, 5, 10])\n\n def test_floordiv(self):\n \"\"\" Test /, the __floordiv__ operator \"\"\"\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-b.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-b-floordiv.yaml\")\n\n a_out = a // 4\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M.1\", \"M.0\", \"N\"])\n self.assertEqual(a_out.getShape(), [17, 20, 10])\n\n def test_splitEqual_0(self):\n \"\"\" Test splitEqual - depth=0 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-a-splitEqual_0.yaml\")\n\n tests = { \"by-depth\": {\"depth\": 0},\n \"by-name\": {\"rankid\": \"M\"}}\n\n for test, kwargs in tests.items():\n with self.subTest(test=test):\n a_out = a.splitEqual(2, **kwargs)\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M.1\", \"M.0\", \"N\", \"K\"])\n self.assertEqual(a_out.getShape(), [41, 41, 42, 10])\n\n\n def test_splitEqual_1(self):\n \"\"\" Test splitEqual - depth=1 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-a-splitEqual_1.yaml\")\n\n tests = { \"by-depth\": {\"depth\": 1},\n \"by-name\": {\"rankid\": \"N\"}}\n\n for test, kwargs in tests.items():\n with self.subTest(test=test):\n a_out = a.splitEqual(2, **kwargs)\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M\", \"N.1\", \"N.0\", \"K\"])\n self.assertEqual(a_out.getShape(), [41, 34, 42, 10])\n\n\n def test_splitEqual_2(self):\n \"\"\" Test splitEqual - depth=2 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-a-splitEqual_2.yaml\")\n\n tests = { \"by-depth\": {\"depth\": 2},\n \"by-name\": {\"rankid\": \"K\"}}\n\n for test, kwargs in tests.items():\n with self.subTest(test=test):\n a_out = a.splitEqual(2, **kwargs)\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M\", \"N\", \"K.1\", \"K.0\"])\n self.assertEqual(a_out.getShape(), [41, 42, 10, 10])\n\n\n\n def test_splitUnEqual_0(self):\n \"\"\" Test splitUnEqual - depth=0 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-a-splitUnEqual_0.yaml\")\n\n tests = { \"by-depth\": {\"depth\": 0},\n \"by-name\": {\"rankid\": \"M\"}}\n\n for test, kwargs in tests.items():\n with self.subTest(test=test):\n a_out = a.splitUnEqual([2, 1], **kwargs)\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M.1\", \"M.0\", \"N\", \"K\"])\n self.assertEqual(a_out.getShape(), [41, 41, 42, 10])\n\n\n\n def test_splitUnEqual_1(self):\n \"\"\" Test splitUnEqual - depth=1 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-a-splitUnEqual_1.yaml\")\n\n tests = { \"by-depth\": {\"depth\": 1},\n \"by-name\": {\"rankid\": \"N\"}}\n\n for test, kwargs in tests.items():\n with self.subTest(test=test):\n a_out = a.splitUnEqual([2, 1], **kwargs)\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M\", \"N.1\", \"N.0\", \"K\"])\n self.assertEqual(a_out.getShape(), [41, 42, 42, 10])\n\n\n def test_splitUnEqual_2(self):\n \"\"\" Test splitUnEqual - depth=2 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-a-splitUnEqual_2.yaml\")\n\n tests = { \"by-depth\": {\"depth\": 2},\n \"by-name\": {\"rankid\": \"K\"}}\n\n for test, kwargs in tests.items():\n with self.subTest(test=test):\n a_out = a.splitUnEqual([2, 1], **kwargs)\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M\", \"N\", \"K.1\", \"K.0\"])\n self.assertEqual(a_out.getShape(), [41, 42, 9, 10])\n\n\n def test_swapRanks_0(self):\n \"\"\" Test swapRanks - depth=0 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-a-swapRanks_0.yaml\")\n\n a_out = a.swapRanks(depth=0)\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"N\", \"M\", \"K\"])\n self.assertEqual(a_out.getShape(), [42, 41, 10])\n\n\n def test_swapRanks_1(self):\n \"\"\" Test swapRanks - depth=1 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_verify = Tensor.fromYAMLfile(\"./data/tensor_transform-a-swapRanks_1.yaml\")\n\n a_out = a.swapRanks(depth=1)\n\n self.assertEqual(a_out, a_verify)\n self.assertEqual(a_out.getRankIds(), [\"M\", \"K\", \"N\"])\n self.assertEqual(a_out.getShape(), [41, 10, 42])\n\n\n def test_swizzleRanks(self):\n \"\"\" Test swizzleRanks \"\"\"\n\n a_MK = Tensor.fromUncompressed([\"M\", \"K\"],\n [[0, 0, 4, 0, 0, 5],\n [3, 2, 0, 3, 0, 2],\n [0, 2, 0, 0, 1, 2],\n [0, 0, 0, 0, 0, 0],\n [2, 5, 0, 0, 0, 5],\n [4, 1, 0, 0, 0, 0],\n [5, 0, 0, 1, 0, 0],\n [4, 0, 0, 5, 1, 3]])\n\n a_KM = a_MK.swapRanks()\n\n M = 8\n M1 = 2\n M0 = (M+1)//M1\n\n K = 6\n K1 = 2\n K0 = (K+1)//K1\n\n a_MMKK = a_MK.splitUniform(M0).splitUniform(K0, depth=2)\n a_MKMK = a_MMKK.swapRanks(depth=1)\n a_KMMK = a_KM.splitUniform(K0).swapRanks(depth=1).splitUniform(M0, depth=1)\n\n a_KM_2 = a_MK.swizzleRanks([\"K\", \"M\"])\n self.assertEqual(a_KM_2, a_KM)\n\n a_MK_2 = a_KM_2.swizzleRanks([\"M\", \"K\"])\n self.assertEqual(a_MK_2, a_MK)\n\n a_MKMK_2 = a_MMKK.swizzleRanks([\"M.1\",\"K.1\", \"M.0\", \"K.0\"])\n self.assertEqual(a_MKMK_2, a_MKMK)\n\n a_MMKK_2 = a_MKMK.swizzleRanks([\"M.1\", \"M.0\", \"K.1\", \"K.0\"])\n self.assertEqual(a_MMKK_2, a_MMKK)\n\n def test_swizzleRanks_empty(self):\n \"\"\" Test swizzleRanks() on an empty tensor \"\"\"\n Z_MNOP = Tensor(rank_ids=[\"M\", \"N\", \"O\", \"P\"])\n Z_PNMO = Z_MNOP.swizzleRanks(rank_ids=[\"P\", \"N\", \"M\", \"O\"])\n\n self.assertEqual(Z_MNOP.getRankIds(), [\"M\", \"N\", \"O\", \"P\"])\n self.assertEqual(Z_PNMO.getRankIds(), [\"P\", \"N\", \"M\", \"O\"])\n\n\n def test_flattenRanks_0(self):\n \"\"\" Test flattenRanks - depth=0 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_out = a.flattenRanks(depth=0)\n a_again = a_out.unflattenRanks(depth=0)\n\n self.assertEqual(a_again, a)\n self.assertEqual(a_out.getRankIds(), [[\"M\", \"N\"], \"K\"])\n\n # TBD: Semantics for non-integer coordinates\n# self.assertEqual(a_out.getShape(), [7, 10])\n\n\n def test_flattenRanks_1(self):\n \"\"\" Test flattenRanks - depth=1 \"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/tensor_transform-a.yaml\")\n a_out = a.flattenRanks(depth=1)\n a_again = a_out.unflattenRanks(depth=1)\n\n self.assertEqual(a_again, a)\n self.assertEqual(a_out.getRankIds(), [\"M\", [\"N\", \"K\"]])\n\n # TBD: Semantics for non-integer coordinates\n# self.assertEqual(a_out.getShape(), [7, 10])\n\n def test_flattenRanks_f01(self):\n \"\"\" Test flattenRanks - f01 \"\"\"\n\n t0 = Tensor.fromYAMLfile(\"./data/tensor_3d-0.yaml\")\n\n f01 = t0.flattenRanks(depth=0, levels=1)\n u01 = f01.unflattenRanks(depth=0, levels=1)\n\n self.assertEqual(u01, t0)\n\n def test_flattenRanks_f02(self):\n \"\"\" Test flattenRanks - f02 \"\"\"\n\n t0 = Tensor.fromYAMLfile(\"./data/tensor_3d-0.yaml\")\n\n f02 = t0.flattenRanks(depth=0, levels=2)\n u02a = f02.unflattenRanks(depth=0, levels=1)\n u02b = u02a.unflattenRanks(depth=1, levels=1)\n\n self.assertEqual(u02b, t0)\n\n u02 = f02.unflattenRanks(depth=0, levels=2)\n\n self.assertEqual(u02, t0)\n\n def test_flattenRanks_f12(self):\n \"\"\" Test flattenRanks - f12 \"\"\"\n\n t0 = Tensor.fromYAMLfile(\"./data/tensor_3d-0.yaml\")\n\n f12 = t0.flattenRanks(depth=1, levels=1)\n u12 = f12.unflattenRanks(depth=1, levels=1)\n self.assertEqual(u12, t0)\n\n\n def test_flattenRanks_f02(self):\n \"\"\" Test flattenRanks - f02 \"\"\"\n\n t0 = Tensor.fromYAMLfile(\"./data/tensor_3d-0.yaml\")\n t1 = Tensor.fromYAMLfile(\"./data/tensor_3d-1.yaml\")\n\n t2 = Tensor.fromFiber([\"A\", \"B\", \"C\", \"D\"],\n Fiber([1, 4], [t0.getRoot(), t1.getRoot()]),\n name=\"t2\")\n\n f13 = t2.flattenRanks(depth=1, levels=2)\n u13 = f13.unflattenRanks(depth=1, levels=2)\n\n self.assertEqual(u13, t2)\n\n f04 = t2.flattenRanks(depth=0, levels=3)\n u04 = f04.unflattenRanks(depth=0, levels=3)\n\n self.assertEqual(u04, t2)\n\n def test_flattenRanks_l3_sa(self):\n \"\"\"Test flattenRanks - levels=3, coord_style=absolute\"\"\"\n t0 = Tensor.fromUncompressed(rank_ids=[\"A\"], root=list(range(16)))\n s1 = t0.splitUniform(8, depth=0)\n s2 = s1.splitUniform(4, depth=1)\n s3 = s2.splitUniform(2, depth=2)\n\n f4 = s3.flattenRanks(levels=3, coord_style=\"absolute\")\n f4.setRankIds([\"A\"])\n\n self.assertEqual(f4, t0)\n\n def test_unflattenRanks_empty(self):\n t = Tensor(rank_ids=[\"X\", \"Y\", \"Z\"])\n t2 = t.flattenRanks()\n t3 = t2.unflattenRanks()\n t3.setRankIds([\"X\", \"Y\", \"Z\"])\n\n self.assertEqual(t, t3)\n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "6893951", "language": "Python", "matching_score": 4.688005447387695, "max_stars_count": 2, "path": "test/test_tensor_transform.py" }, { "content": "import unittest\nfrom fibertree import Payload\nfrom fibertree import Fiber\nfrom fibertree import Tensor\n\n\nclass TestFiberTensorSplitDeep(unittest.TestCase):\n\n def setUp(self):\n\n self.input = {}\n\n self.input['f'] = Fiber([0, 1, 2],\n [Fiber([0, 1, 2],\n [Fiber([0, 1, 2, 3], [2, 4, 2, 4]),\n Fiber([0, 1, 2, 3], [3, 2, 4, 3]),\n Fiber([0, 1, 3], [3, 2, 1])]),\n Fiber([0, 1, 2],\n [Fiber([0, 1, 2, 3], [3, 2, 5, 4]),\n Fiber([0, 1, 3], [5, 2, 1]),\n Fiber([0, 1, 2], [1, 1, 5])]),\n Fiber([0, 1, 2],\n [Fiber([1, 2], [4, 2]),\n Fiber([0, 1, 2, 3], [2, 2, 2, 3]),\n Fiber([0, 1, 2, 3], [2, 4, 3, 1])])])\n\n self.input['t'] = Tensor.fromFiber([\"C\", \"H\", \"W\"], self.input['f'])\n\n\n def test_split_deep(self):\n \"\"\"Test splitDeep - assumes basic split works\"\"\"\n\n f = self.input['f']\n t = self.input['t']\n\n rankids = t.getRankIds()\n\n for depth, rankid in enumerate(rankids):\n \n with self.subTest(test=f\"splitUniform(2, [depth={depth} | rankid='{rankid}'])\"):\n t1a = t.splitUniform(2, depth=depth)\n t1b = t.splitUniform(2, rankid=rankid)\n self.assertEqual(t1b,t1a)\n\n i1a = f.splitUniform(2, depth=depth)\n self.assertEqual(i1a, t1b.getRoot())\n\n i1b = f.splitUniform(2, rankid=rankid)\n self.assertEqual(i1b, t1a.getRoot())\n\n\n with self.subTest(test=f\"splitNonUniform[0,1], [depth={depth} | rankid='{rankid}'])\"):\n \n t2a = t.splitNonUniform([0, 1], depth=depth)\n t2b = t.splitNonUniform([0, 1], rankid=rankid)\n self.assertEqual(t2b, t2a)\n\n i2a = f.splitNonUniform([0, 1], depth=depth)\n self.assertEqual(i2a, t2b.getRoot())\n\n i2b = f.splitNonUniform([0, 1], rankid=rankid)\n self.assertEqual(i2b, i2a)\n\n with self.subTest(test=f\"splitEqual(2, [depth={depth} | rankid='{rankid}'])\"):\n \n t3a = t.splitEqual(2, depth=depth)\n t3b = t.splitEqual(2, rankid=rankid)\n self.assertEqual(t3b, t3a)\n\n i3a = f.splitEqual(2, depth=depth)\n self.assertEqual(i3a, t3b.getRoot())\n\n i3b = f.splitEqual(2, rankid=rankid)\n self.assertEqual(i3b, i3a)\n\n \n with self.subTest(test=f\"splitUnEqual([1,3], [depth={depth}, rankid='{rankid}'])\"):\n\n t4a = t.splitUnEqual([1,3], depth=depth)\n t4b = t.splitUnEqual([1,3], rankid=rankid)\n self.assertEqual(t4b, t4a)\n\n i4a = f.splitUnEqual([1,3], depth=depth)\n self.assertEqual(i4a, t4b.getRoot())\n\n i4b = f.splitUnEqual([1,3], rankid=rankid)\n self.assertEqual(i4b, i4a)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "5038394", "language": "Python", "matching_score": 4.101055145263672, "max_stars_count": 2, "path": "test/test_split_deep.py" }, { "content": "import unittest\nfrom fibertree import Payload\nfrom fibertree import Fiber\n\n\nclass TestFiberInfixSplit(unittest.TestCase):\n\n def setUp(self):\n\n self.input = {}\n\n self.input[0] = Fiber([3, 6, 8, 9, 12, 16, 19, 20, 28, 30, 32, 38, 40, 43, 46, 47, 48, 49],\n [8, 9, 6, 3, 5, 4, 1, 4, 6, 4, 1, 6, 2, 6, 5, 9, 2, 5])\n\n\n def test_split_uniform(self):\n \"\"\"Test splitUniform\"\"\"\n\n self.ans = {}\n\n self.ans[2] = Fiber([0, 25],\n [Fiber([3, 6, 8, 9, 12, 16, 19, 20],\n [8, 9, 6, 3, 5, 4, 1, 4]),\n Fiber([28, 30, 32, 38, 40, 43, 46, 47, 48, 49],\n [6, 4, 1, 6, 2, 6, 5, 9, 2, 5])])\n\n self.ans[3] = Fiber([0, 17, 34],\n [Fiber([3, 6, 8, 9, 12, 16],\n [8, 9, 6, 3, 5, 4]),\n Fiber([19, 20, 28, 30, 32],\n [1, 4, 6, 4, 1]),\n Fiber([38, 40, 43, 46, 47, 48, 49],\n [6, 2, 6, 5, 9, 2, 5])])\n\n self.ans[4] = Fiber([0, 13, 26, 39],\n [Fiber([3, 6, 8, 9, 12],\n [8, 9, 6, 3, 5]),\n Fiber([16, 19, 20],\n [4, 1, 4]),\n Fiber([28, 30, 32, 38],\n [6, 4, 1, 6]),\n Fiber([40, 43, 46, 47, 48, 49],\n [2, 6, 5, 9, 2, 5])])\n\n self.ans[5] = Fiber([0, 10, 20, 30, 40],\n [Fiber([3, 6, 8, 9],\n [8, 9, 6, 3]),\n Fiber([12, 16, 19],\n [5, 4, 1]),\n Fiber([20, 28], [4, 6]),\n Fiber([30, 32, 38],\n [4, 1, 6]),\n Fiber([40, 43, 46, 47, 48, 49],\n [2, 6, 5, 9, 2, 5])])\n\n f = self.input[0]\n \n for p in range(2,6):\n with self.subTest(test=p):\n ans = f / p\n self.assertEqual(ans, self.ans[p])\n\n\n def test_split_equal(self):\n \"\"\"Test splitUniform\"\"\"\n\n self.ans = {}\n\n self.ans[2] = Fiber([3, 30],\n [Fiber([3, 6, 8, 9, 12, 16, 19, 20, 28],\n [8, 9, 6, 3, 5, 4, 1, 4, 6]),\n Fiber([30, 32, 38, 40, 43, 46, 47, 48, 49],\n [4, 1, 6, 2, 6, 5, 9, 2, 5])])\n\n self.ans[3] = Fiber([3, 19, 40],\n [Fiber([3, 6, 8, 9, 12, 16],\n [8, 9, 6, 3, 5, 4]),\n Fiber([19, 20, 28, 30, 32, 38],\n [1, 4, 6, 4, 1, 6]),\n Fiber([40, 43, 46, 47, 48, 49],\n [2, 6, 5, 9, 2, 5])])\n\n self.ans[4] = Fiber([3, 16, 32, 47],\n [Fiber([3, 6, 8, 9, 12],\n [8, 9, 6, 3, 5]),\n Fiber([16, 19, 20, 28, 30],\n [4, 1, 4, 6, 4]),\n Fiber([32, 38, 40, 43, 46],\n [1, 6, 2, 6, 5]),\n Fiber([47, 48, 49],\n [9, 2, 5])])\n\n self.ans[5] = Fiber([3, 12, 28, 40, 48],\n [Fiber([3, 6, 8, 9],\n [8, 9, 6, 3]),\n Fiber([12, 16, 19, 20],\n [5, 4, 1, 4]),\n Fiber([28, 30, 32, 38],\n [6, 4, 1, 6]),\n Fiber([40, 43, 46, 47],\n [2, 6, 5, 9]),\n Fiber([48, 49], \n [2, 5])])\n f = self.input[0]\n\n for p in range(2,6):\n with self.subTest(test=p):\n ans = f // p\n self.assertEqual(ans, self.ans[p])\n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "7353515", "language": "Python", "matching_score": 1.9991614818572998, "max_stars_count": 2, "path": "test/test_fiber_infix_split.py" }, { "content": "import unittest\nfrom fibertree import Payload\nfrom fibertree import Fiber\nfrom fibertree import Tensor\n\n\nclass TestConstructor(unittest.TestCase):\n\n def setUp(self):\n\n self.input = {}\n\n self.input[\"c1\"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n self.input[\"p1\"] = [7, 1, 8, 3, 8, 4, 6, 3, 7, 5]\n\n self.input[\"c2\"] = [ 0, 1, 2]\n self.input[\"p2\"] = [ Fiber([2], [4]), Fiber([1], [4]), Fiber([2], [2])]\n\n\n def test_constructor_1D(self):\n \"\"\"Test constructor 1D\"\"\"\n\n ans = Fiber([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n [7, 1, 8, 3, 8, 4, 6, 3, 7, 5])\n\n attrs = []\n attrs.append([[0], 1, None, [10]])\n attrs.append([[-1], 1, None, [10]])\n attrs.append([[0], 1, None, [20]])\n attrs.append([[-2], 1, None, [20]])\n\n\n fs = []\n\n fs.append(Fiber(self.input[\"c1\"], self.input[\"p1\"]))\n fs.append(Fiber(self.input[\"c1\"], self.input[\"p1\"], default=-1))\n fs.append(Fiber(self.input[\"c1\"], self.input[\"p1\"], shape=[20]))\n fs.append(Fiber(self.input[\"c1\"], self.input[\"p1\"], shape=[20], default=-2))\n\n for test, f in enumerate(fs):\n with self.subTest(test=test):\n f_attr = self.attributes(f)\n\n for n, (c, p) in enumerate(f):\n self.assertEqual(c, self.input[\"c1\"][n])\n self.assertEqual(p, self.input[\"p1\"][n])\n\n self.assertEqual(f, ans)\n self.assertEqual(f_attr, attrs[test])\n\n\n def test_constructor_2D(self):\n \"\"\"Test constructor 2D\"\"\"\n\n ans = Fiber([0, 1, 2],\n [Fiber([2], [4]),\n Fiber([1], [4]),\n Fiber([2], [2])])\n\n #\n # Note: the default value does not get propaged down\n # the fibertree.... so it doesn't make sense when\n # fibers are inserted as the payloads.\n #\n attrs = []\n attrs.append([[Fiber, 0], 2, None, [3,3]])\n attrs.append([[Fiber, 0], 2, None, [3,3]])\n attrs.append([[Fiber, 0], 2, None, [8,8]])\n attrs.append([[Fiber, 0], 2, None, [8,8]])\n\n fs = []\n\n fs.append(Fiber(self.input[\"c2\"], self.input[\"p2\"]))\n fs.append(Fiber(self.input[\"c2\"], self.input[\"p2\"], default=-1))\n fs.append(Fiber(self.input[\"c2\"], self.input[\"p2\"], shape=[8, 8]))\n fs.append(Fiber(self.input[\"c2\"], self.input[\"p2\"], shape=[8, 8], default=-2))\n\n\n for test, f in enumerate(fs):\n with self.subTest(test=test):\n f_attr = self.attributes(f)\n\n for n, (c, p) in enumerate(f):\n self.assertEqual(c, self.input[\"c2\"][n])\n self.assertEqual(p, self.input[\"p2\"][n])\n\n self.assertEqual(f, ans)\n self.assertEqual(f_attr, attrs[test])\n\n\n def test_fromRandom_1D_dense(self):\n \"\"\"Test random 1d sparse\"\"\"\n\n ans = Fiber([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n [7, 1, 8, 3, 8, 4, 6, 3, 7, 5])\n\n attr = [[0], 1, None, [10]]\n\n f = Fiber.fromRandom([10], [1.0], 9, 10)\n f_attr = self.attributes(f)\n\n self.assertEqual(f, ans)\n self.assertEqual(f_attr, attr)\n\n\n def test_fromRandom_1D_sparse(self):\n \"\"\"Test random 1d sparse\"\"\"\n\n ans = Fiber([3, 6, 8, 9, 12, 16, 19, 20, 28, 30, 32, 38, 40, 43, 46, 47, 48, 49],\n [8, 9, 6, 3, 5, 4, 1, 4, 6, 4, 1, 6, 2, 6, 5, 9, 2, 5])\n\n attr = [[0], 1, None, [50]]\n\n f = Fiber.fromRandom([50], [0.3], 9, 10)\n f_attr = self.attributes(f)\n\n self.assertEqual(f, ans)\n self.assertEqual(f_attr, attr)\n\n\n# self.makeTest(f, f_attr)\n\n\n def test_fromRandom_2D_dense(self):\n \"\"\"Test random 2d sparse\"\"\"\n\n ans = Fiber([0, 1, 2],\n [Fiber([0, 1, 2], [1, 4, 2]),\n Fiber([0, 1, 2], [1, 3, 2]),\n Fiber([0, 1, 2], [3, 3, 3])])\n\n attr = [[Fiber, 0], 2, None, [3, 3]]\n\n f = Fiber.fromRandom([3, 3], [1.0, 1.0], 4, 10)\n f_attr = self.attributes(f)\n\n self.assertEqual(f, ans)\n self.assertEqual(f_attr, attr)\n\n\n def test_fromRandom_2D_sparse(self):\n \"\"\"Test random 1d sparse\"\"\"\n\n ans = Fiber([0, 1, 2],\n [Fiber([2], [4]),\n Fiber([1], [4]),\n Fiber([2], [2])])\n\n attr = [[Fiber, 0], 2, None, [3, 3]]\n \n f = Fiber.fromRandom([3, 3], [1.0, 0.3], 4, 10)\n f_attr = self.attributes(f)\n\n self.assertEqual(f, ans)\n self.assertEqual(f_attr, attr)\n\n\n @staticmethod\n def makeTest(f, a):\n \"\"\"Make a check for a test\"\"\"\n\n# self.makeTest(f, f_attr)\n\n print(\"\")\n print(f\" ans = {f!r}\")\n print(f\" attr = {a}\")\n print(\"\")\n\n\n @staticmethod\n def attributes(f):\n \"\"\"Get all attributes of a fiber\"\"\"\n\n defaults = []\n\n ff = f\n while isinstance(ff, Fiber):\n defaults.append(ff.getDefault())\n ff = (ff.payloads or [Fiber([],[])])[0]\n\n \n attributes = [ defaults,\n f.getDepth(),\n f.getOwner(),\n f.getShape() ]\n\n return attributes\n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "6765978", "language": "Python", "matching_score": 1.391440510749817, "max_stars_count": 2, "path": "test/test_fiber_contructor.py" }, { "content": "import unittest\nfrom copy import deepcopy\n\nfrom fibertree import Payload\nfrom fibertree import Fiber\nfrom fibertree import Tensor\n\n\nclass TestFiberTensorUpdateDeep(unittest.TestCase):\n\n def setUp(self):\n\n self.input = {}\n\n self.input['f'] = Fiber([0, 1, 2],\n [Fiber([0, 1, 2],\n [Fiber([0, 1, 2, 3], [2, 4, 2, 4]),\n Fiber([0, 1, 2, 3], [3, 2, 4, 3]),\n Fiber([0, 1, 3], [3, 2, 1])]),\n Fiber([0, 1, 2],\n [Fiber([0, 1, 2, 3], [3, 2, 5, 4]),\n Fiber([0, 1, 3], [5, 2, 1]),\n Fiber([0, 1, 2], [1, 1, 5])]),\n Fiber([0, 1, 2],\n [Fiber([1, 2], [4, 2]),\n Fiber([0, 1, 2, 3], [2, 2, 2, 3]),\n Fiber([0, 1, 2, 3], [2, 4, 3, 1])])])\n\n self.input['t'] = Tensor.fromFiber([\"C\", \"H\", \"W\"], self.input['f'])\n\n\n def test_updateCoords_deep(self):\n \"\"\"Test updateCoords deep - assumes basic updateCoords works\"\"\"\n\n f = self.input['f']\n t = self.input['t']\n\n rankids = t.getRankIds()\n\n c_updates = [lambda n, c, p: c+1,\n lambda n, c, p: 20-c]\n\n for depth, rankid in enumerate(rankids):\n\n for n, update in enumerate(c_updates):\n test = f\"tensor: updateCoords(lambda[{n}], [depth={depth} | rankid='{rankid}'])\"\n with self.subTest(test=test):\n\n t1 = t.updateCoords(update, depth=depth)\n t2 = t.updateCoords(update, rankid=rankid)\n\n self.assertEqual(t1, t2)\n\n #\n # Note: Fiber.updateCoords() mutates the fiber, so we create a copy\n #\n f1 = deepcopy(f)\n f1.updateCoords(update, depth=depth)\n\n self.assertEqual(f1, t2.getRoot())\n\n f2 = deepcopy(f)\n f2.updateCoords(update, rankid=rankid)\n\n self.assertEqual(f2, f1)\n\n def test_updatePayloads_deep(self):\n \"\"\"Test updatePayloads deep - assumes basic updatePayloads works\"\"\"\n\n f = self.input['f']\n t = self.input['t']\n\n depth = 2\n rankid = \"W\"\n\n update = lambda p: p+1\n\n t1 = t.updatePayloads(update, depth=depth)\n t2 = t.updatePayloads(update, rankid=rankid)\n\n self.assertEqual(t1, t2)\n\n #\n # Note: Fiber.updateCoords() mutates the fiber, so we create a copy\n #\n f1 = deepcopy(f)\n f1.updatePayloads(update, depth=depth)\n\n self.assertEqual(f1, t2.getRoot())\n\n f2 = deepcopy(f)\n f2.updatePayloads(update, rankid=rankid)\n\n self.assertEqual(f2, f1)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "3979694", "language": "Python", "matching_score": 2.8586807250976562, "max_stars_count": 2, "path": "test/test_update_deep.py" }, { "content": "import unittest\nfrom fibertree import Payload\nfrom fibertree import Fiber\nfrom fibertree import Rank\n\n\nclass TestRank(unittest.TestCase):\n\n def test_new(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "5741965", "language": "Python", "matching_score": 0.3344704508781433, "max_stars_count": 2, "path": "test/test_rank.py" }, { "content": "import unittest\nfrom fibertree import Payload\nfrom fibertree import Fiber\nfrom fibertree import Tensor\n\n\nclass TestUnionIntersect(unittest.TestCase):\n\n def setUp(self):\n\n self.input = {}\n\n self.input[\"a1_M\"] = Tensor.fromUncompressed([\"M\"], [1, 0, 3, 0, 5, 0, 7])\n self.input[\"a1_m\"] = self.input[\"a1_M\"].getRoot()\n\n self.input[\"b1_M\"] = Tensor.fromUncompressed([\"M\"], [2, 0, 4, 5])\n self.input[\"b1_m\"] = self.input[\"b1_M\"].getRoot()\n\n self.input['c1_M'] = Tensor.fromUncompressed([\"M\"], [1, 2, 3])\n self.input[\"c1_m\"] = self.input[\"c1_M\"].getRoot()\n\n self.input[\"a2_MK\"] = Tensor.fromUncompressed([\"M\", \"K\"], [[1, 0, 3, 0, 5, 0, 7],\n [2, 2, 0, 3, 0, 0, 8],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [4, 0, 5, 0, 8, 0, 9]])\n\n self.input[\"a2_m\"] = self.input[\"a2_MK\"].getRoot()\n\n self.input[\"b2_MK\"] = Tensor.fromUncompressed([\"M\", \"K\"], [[2, 0, 4, 5],\n [0, 0, 0, 0],\n [3, 4, 6, 0],\n [0, 0, 0, 0],\n [1, 2, 3, 4]])\n self.input[\"b2_m\"] = self.input[\"b2_MK\"].getRoot()\n\n\n def test_union_2x_1d(self):\n \"\"\"Test union 2-way for 1d fibers\"\"\"\n\n ans = Fiber([0, 2, 3, 4, 6],\n [('AB', Payload(1), Payload(2)),\n ('AB', Payload(3), Payload(4)),\n ('B', Payload(0), Payload(5)),\n ('A', Payload(5), Payload(0)),\n ('A', Payload(7), Payload(0))])\n\n a_m = self.input[\"a1_m\"]\n b_m = self.input[\"b1_m\"]\n\n z_m1 = a_m | b_m\n z_m2 = Fiber.union(a_m, b_m)\n\n for test, z_m in enumerate([z_m1, z_m2]):\n with self.subTest(test=test):\n # Check for right answer\n self.assertEqual(z_m, ans)\n\n # Check that payloads are of correct type\n self.assertIsInstance(z_m[0].payload.value[1], Payload)\n self.assertIsInstance(z_m[2].payload.value[1], Payload)\n self.assertIsInstance(z_m[3].payload.value[2], Payload)\n\n # Check that default was set properly\n z_m_default=z_m.getDefault()\n self.assertEqual(z_m_default, Payload(('', 0, 0)))\n self.assertIsInstance(z_m_default, Payload)\n\n # Check final shape is correct\n z_m_shape = z_m.getShape()\n self.assertEqual(z_m_shape, [7])\n\n\n def test_union_2x_2d(self):\n \"\"\"Test union 2-way for 2d fibers\"\"\"\n\n ans = Fiber([0, 1, 2, 4],\n [('AB',\n Fiber([0, 2, 4, 6], [1, 3, 5, 7]),\n Fiber([0, 2, 3], [2, 4, 5])),\n ('A',\n Fiber([0, 1, 3, 6], [2, 2, 3, 8]),\n Fiber([], [])),\n ('B',\n Fiber([], []),\n Fiber([0, 1, 2], [3, 4, 6])),\n ('AB',\n Fiber([0, 2, 4, 6], [4, 5, 8, 9]),\n Fiber([0, 1, 2, 3], [1, 2, 3, 4]))])\n\n a_m = self.input[\"a2_m\"]\n b_m = self.input[\"b2_m\"]\n\n z_m1 = a_m | b_m\n z_m2 = Fiber.union(a_m, b_m)\n\n for test, z_m in enumerate([z_m1, z_m2]):\n with self.subTest(test=test):\n # Check for right answer\n self.assertEqual(z_m, ans)\n\n # Check that payloads are of correct type\n self.assertIsInstance(z_m[0].payload.value[1], Fiber)\n self.assertIsInstance(z_m[0].payload.value[2], Fiber)\n self.assertIsInstance(z_m[2].payload.value[1], Fiber)\n self.assertIsInstance(z_m[3].payload.value[2], Fiber)\n\n # Check that default was set properly\n z_m_default=z_m.getDefault()\n self.assertEqual(z_m_default, Payload(('', Fiber, Fiber)))\n self.assertIsInstance(z_m_default, Payload)\n\n # Check final shape is correct (note it is 1-D)\n z_m_shape = z_m.getShape()\n self.assertEqual(z_m_shape, [5])\n\n\n def test_union_2x_1d2d(self):\n \"\"\"Test union 2-way for 1d/2d fibers\"\"\"\n\n ans = Fiber([0, 2, 4, 6],\n [('AB', 1, Fiber([0, 2, 3], [2, 4, 5])),\n ('AB', 3, Fiber([0, 1, 2], [3, 4, 6])),\n ('AB', 5, Fiber([0, 1, 2, 3], [1, 2, 3, 4])),\n ('A', 7, Fiber([], []))])\n\n\n a_m = self.input[\"a1_m\"]\n b_m = self.input[\"b2_m\"]\n\n z_m1 = a_m | b_m\n z_m2 = Fiber.union(a_m, b_m)\n\n for test, z_m in enumerate([z_m1, z_m2]):\n with self.subTest(test=test):\n # Check for right answer\n self.assertEqual(z_m, ans)\n \n # Check that payloads are of correct type\n self.assertIsInstance(z_m[0].payload.value[1], Payload)\n self.assertIsInstance(z_m[0].payload.value[2], Fiber)\n self.assertIsInstance(z_m[2].payload.value[1], Payload)\n self.assertIsInstance(z_m[3].payload.value[2], Fiber)\n\n # Check that default was set properly\n z_m_default=z_m.getDefault()\n self.assertEqual(z_m_default, Payload(('', 0, Fiber)))\n self.assertIsInstance(z_m_default, Payload)\n\n # Check final shape is correct (note it is 1-D)\n z_m_shape = z_m.getShape()\n self.assertEqual(z_m_shape, [7])\n\n\n def test_union_3x_1d(self):\n \"\"\"Test union 3-way for 1d fibers\"\"\"\n\n ans = Fiber([0, 1, 2, 3, 4, 6],\n [('ABC', Payload(1), Payload(2), Payload(1)),\n ('C', Payload(0), Payload(0), Payload(2)),\n ('ABC', Payload(3), Payload(4), Payload(3)),\n ('B', Payload(0), Payload(5), Payload(0)),\n ('A', Payload(5), Payload(0), Payload(0)),\n ('A', Payload(7), Payload(0), Payload(0))])\n\n a_m = self.input[\"a1_m\"]\n b_m = self.input[\"b1_m\"]\n c_m = self.input[\"c1_m\"]\n\n z_m1 = Fiber.union(a_m, b_m, c_m)\n\n for test, z_m in enumerate([z_m1]):\n with self.subTest(test=test):\n # Check for right answer\n self.assertEqual(z_m, ans)\n\n # Check that payloads are of correct type\n self.assertIsInstance(z_m[0].payload.value[1], Payload)\n self.assertIsInstance(z_m[0].payload.value[2], Payload)\n self.assertIsInstance(z_m[0].payload.value[3], Payload)\n self.assertIsInstance(z_m[1].payload.value[1], Payload)\n self.assertIsInstance(z_m[1].payload.value[2], Payload)\n\n # Check that default was set properly\n z_m_default=z_m.getDefault()\n self.assertEqual(z_m_default, Payload(('', 0, 0, 0)))\n self.assertIsInstance(z_m_default, Payload)\n\n # Check final shape is correct (note it is 1-D)\n z_m_shape = z_m.getShape()\n self.assertEqual(z_m_shape, [7])\n \n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "421235", "language": "Python", "matching_score": 2.333850622177124, "max_stars_count": 2, "path": "test/test_union_intersect.py" }, { "content": "\"\"\"Tests related to shape of a tensor\"\"\"\n\nimport unittest\n\nfrom fibertree import Payload\nfrom fibertree import Fiber\nfrom fibertree import Rank\nfrom fibertree import Tensor\n\n\nclass TestTensorShape(unittest.TestCase):\n\n def test_shape_empty(self):\n \"\"\"Test shape of empty tensor\"\"\"\n\n t1 = Tensor(rank_ids=[\"M\", \"K\"])\n\n self.assertEqual(t1.getRankIds(), [\"M\", \"K\"])\n self.assertEqual(t1.getShape(), [0, 0])\n\n t2 = Tensor(rank_ids=[\"M\", \"K\"], shape=[10,20])\n\n self.assertEqual(t2.getRankIds(), [\"M\", \"K\"])\n self.assertEqual(t2.getShape(), [10, 20])\n\n def test_shape_0D(self):\n \"\"\"Test shpe of 0-D tensor\"\"\"\n\n t = Tensor(rank_ids=[])\n p = t.getRoot()\n p += 1\n\n self.assertEqual(t.getRankIds(), [])\n self.assertEqual(t.getShape(), [])\n \n def test_shape_new(self):\n \"\"\"Test shape of a tensor from a file\"\"\"\n\n t1 = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n\n self.assertEqual(t1.getRankIds(), [\"M\", \"K\"])\n self.assertEqual(t1.getShape(), [7, 4])\n\n\n # Note: We cannot override the shape of shape from a YAML file\n\n\n def test_shape_fromUncompressed_1D(self):\n \"\"\"Test shape of a tensor from 1D nested lists\"\"\"\n\n l1 = [ 100, 101, 0, 102 ]\n\n t1 = Tensor.fromUncompressed([\"M\"], l1)\n\n self.assertEqual(t1.getRankIds(), [\"M\"])\n self.assertEqual(t1.getShape(), [ 4 ])\n\n l2 = [ 100, 101, 0, 0 ]\n\n t2 = Tensor.fromUncompressed([\"M\"], l2)\n\n self.assertEqual(t2.getRankIds(), [\"M\"])\n self.assertEqual(t2.getShape(), [ 4 ])\n\n def test_shape_fromUncompressed_2D_A1(self):\n \"\"\"Test shape of a tensor from 2D nested lists (tensor A)\"\"\"\n\n # 0 1 2 3\n #\n l1 = [ [ 0, 0, 0, 0 ], # 0\n [ 100, 101, 102, 0 ], # 1\n [ 0, 201, 0, 203 ], # 2\n [ 0, 0, 0, 0 ], # 3\n [ 400, 0, 402, 0 ], # 4\n [ 0, 0, 0, 0 ], # 5\n [ 0, 601, 0, 603 ] ] # 6\n\n\n t1 = Tensor.fromUncompressed([\"M\", \"K\"], l1)\n\n with self.subTest(test=\"All ranks\"):\n self.assertEqual(t1.getRankIds(), [\"M\", \"K\"])\n self.assertEqual(t1.getShape(), [ 7, 4 ])\n\n with self.subTest(test=\"All ranks specified\"):\n self.assertEqual(t1.getShape([\"M\", \"K\"]), [7, 4])\n\n with self.subTest(test=\"Just rank 'M' as list\"):\n self.assertEqual(t1.getShape([\"M\"]), [7])\n\n with self.subTest(test=\"Just rank 'K' as list\"):\n self.assertEqual(t1.getShape([\"K\"]), [4])\n\n with self.subTest(test=\"Just rank 'M'\"):\n self.assertEqual(t1.getShape(\"M\"), 7)\n\n with self.subTest(test=\"Just rank 'K'\"):\n self.assertEqual(t1.getShape(\"K\"), 4)\n\n with self.subTest(test=\"Check authoritative\"):\n self.assertEqual(t1.getShape(authoritative=True), [7, 4])\n self.assertEqual(t1.getShape([\"M\", \"K\"], authoritative=True), [7, 4])\n self.assertEqual(t1.getShape([\"M\"], authoritative=True), [7])\n self.assertEqual(t1.getShape([\"K\"], authoritative=True), [4])\n self.assertEqual(t1.getShape(\"M\", authoritative=True), 7)\n self.assertEqual(t1.getShape(\"K\", authoritative=True), 4)\n \n\n def test_shape_fromUncompressed_2D_A2(self):\n \"\"\"Test shape of a tensor from 2D nested lists (tensor A, multiletter ranks_ids)\"\"\"\n\n # 0 1 2 3\n #\n l1 = [ [ 0, 0, 0, 0 ], # 0\n [ 100, 101, 102, 0 ], # 1\n [ 0, 201, 0, 203 ], # 2\n [ 0, 0, 0, 0 ], # 3\n [ 400, 0, 402, 0 ], # 4\n [ 0, 0, 0, 0 ], # 5\n [ 0, 601, 0, 603 ] ] # 6\n\n\n t1 = Tensor.fromUncompressed([\"MA\", \"KA\"], l1)\n\n with self.subTest(test=\"All ranks\"):\n self.assertEqual(t1.getRankIds(), [\"MA\", \"KA\"])\n self.assertEqual(t1.getShape(), [ 7, 4 ])\n\n with self.subTest(test=\"All ranks specified\"):\n self.assertEqual(t1.getShape([\"MA\", \"KA\"]), [7, 4])\n\n with self.subTest(test=\"Just rank 'MA' as list\"):\n self.assertEqual(t1.getShape([\"MA\"]), [7])\n\n with self.subTest(test=\"Just rank 'KA' as list\"):\n self.assertEqual(t1.getShape([\"KA\"]), [4])\n\n with self.subTest(test=\"Just rank 'MA'\"):\n self.assertEqual(t1.getShape(\"MA\"), 7)\n\n with self.subTest(test=\"Just rank 'KA'\"):\n self.assertEqual(t1.getShape(\"KA\"), 4)\n\n with self.subTest(test=\"Check authoritative\"):\n self.assertEqual(t1.getShape(authoritative=True), [7, 4])\n self.assertEqual(t1.getShape([\"MA\", \"KA\"], authoritative=True), [7, 4])\n self.assertEqual(t1.getShape([\"MA\"], authoritative=True), [7])\n self.assertEqual(t1.getShape([\"KA\"], authoritative=True), [4])\n self.assertEqual(t1.getShape(\"MA\", authoritative=True), 7)\n self.assertEqual(t1.getShape(\"KA\", authoritative=True), 4)\n\n\n def test_shape_fromUncompressed_2D_B(self):\n \"\"\"Test shape of a tensor from 2D nested lists (tensor B)\"\"\"\n\n # 0 1 2 3\n #\n l2 = [ [ 0, 0, 0, 0 ], # 0\n [ 100, 101, 102, 0 ], # 1\n [ 0, 201, 0, 0 ], # 2\n [ 0, 0, 0, 0 ], # 3\n [ 400, 0, 402, 0 ], # 4\n [ 0, 0, 0, 0 ], # 5\n [ 0, 601, 0, 0 ] ] # 6\n\n t2 = Tensor.fromUncompressed([\"M\", \"K\"], l2)\n\n self.assertEqual(t2.getRankIds(), [\"M\", \"K\"])\n self.assertEqual(t2.getShape(), [7, 4])\n\n\n def test_shape_fromFiber(self):\n \"\"\"Test shape of a tensor from a fiber without authoritative shape\"\"\"\n\n y1 = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n f1 = y1.getRoot()\n\n t1 = Tensor.fromFiber([\"M\", \"K\"], f1)\n\n with self.subTest(test=\"All ranks\"):\n self.assertEqual(t1.getRankIds(), [\"M\", \"K\"])\n self.assertEqual(t1.getShape(), [7, 4])\n\n with self.subTest(test=\"All ranks specified\"):\n self.assertEqual(t1.getShape([\"M\", \"K\"]), [7, 4])\n\n with self.subTest(test=\"Just rank 'M' as list\"):\n self.assertEqual(t1.getShape([\"M\"]), [7])\n\n with self.subTest(test=\"Just rank 'K' as list\"):\n self.assertEqual(t1.getShape([\"K\"]), [4])\n\n with self.subTest(test=\"Just rank 'M'\"):\n self.assertEqual(t1.getShape(\"M\"), 7)\n\n with self.subTest(test=\"Just rank 'K'\"):\n self.assertEqual(t1.getShape(\"K\"), 4)\n\n with self.subTest(test=\"Check authoritative\"):\n self.assertIsNone(t1.getShape(authoritative=True))\n self.assertIsNone(t1.getShape([\"M\", \"K\"], authoritative=True))\n self.assertIsNone(t1.getShape([\"M\"], authoritative=True))\n self.assertIsNone(t1.getShape([\"K\"], authoritative=True))\n self.assertIsNone(t1.getShape(\"M\", authoritative=True))\n self.assertIsNone(t1.getShape(\"K\", authoritative=True))\n \n\n def test_shape_fromFiber_authoritative(self):\n \"\"\"Test shape of a tensor from a fiber with authoritative shape\"\"\"\n\n y1 = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n f1 = y1.getRoot()\n t1 = Tensor.fromFiber([\"M\", \"K\"], f1, [100,200])\n\n with self.subTest(test=\"All ranks\"):\n self.assertEqual(t1.getRankIds(), [\"M\", \"K\"])\n self.assertEqual(t1.getShape(), [100, 200])\n\n with self.subTest(test=\"All ranks specified\"):\n self.assertEqual(t1.getShape([\"M\", \"K\"]), [100, 200])\n\n with self.subTest(test=\"Just rank 'M'\"):\n self.assertEqual(t1.getShape([\"M\"]), [100])\n\n with self.subTest(test=\"Just rank 'K'\"):\n self.assertEqual(t1.getShape([\"K\"]), [200])\n\n with self.subTest(test=\"Just rank 'M'\"):\n self.assertEqual(t1.getShape(\"M\"), 100)\n\n with self.subTest(test=\"Just rank 'K'\"):\n self.assertEqual(t1.getShape(\"K\"), 200)\n\n with self.subTest(test=\"Check authoritative\"):\n self.assertEqual(t1.getShape(authoritative=True), [100, 200])\n self.assertEqual(t1.getShape([\"M\", \"K\"], authoritative=True), [100, 200])\n self.assertEqual(t1.getShape([\"M\"], authoritative=True), [100])\n self.assertEqual(t1.getShape([\"K\"], authoritative=True), [200])\n self.assertEqual(t1.getShape(\"M\", authoritative=True), 100)\n self.assertEqual(t1.getShape(\"K\", authoritative=True), 200)\n\n def test_rankid_2D(self):\n \"\"\"Test setting rank ids of 2D tensor\"\"\"\n\n # 0 1 2 3\n #\n l1 = [ [ 0, 0, 0, 0 ], # 0\n [ 100, 101, 102, 0 ], # 1\n [ 0, 201, 0, 203 ], # 2\n [ 0, 0, 0, 0 ], # 3\n [ 400, 0, 402, 0 ], # 4\n [ 0, 0, 0, 0 ], # 5\n [ 0, 601, 0, 603 ] ] # 6\n\n\n rank_ids = [\"M\", \"K\"]\n t1 = Tensor.fromUncompressed(rank_ids, l1)\n\n rank_ids2 = t1.getRankIds()\n\n self.assertEqual(rank_ids2, rank_ids)\n\n rank_ids_new = [\"M2\", \"M1\"]\n t1.setRankIds(rank_ids_new)\n\n rank_ids3 = t1.getRankIds()\n\n self.assertEqual(rank_ids3, rank_ids_new)\n\n \nif __name__ == '__main__':\n unittest.main()\n\n", "id": "598662", "language": "Python", "matching_score": 3.640563488006592, "max_stars_count": 2, "path": "test/test_tensor_shape.py" }, { "content": "import unittest\n\nfrom fibertree import Payload\nfrom fibertree import Fiber\nfrom fibertree import Rank\nfrom fibertree import Tensor\n\n\nclass TestTensor(unittest.TestCase):\n\n def test_constructor_empty(self):\n \"\"\"Test construction of empty tensor\"\"\"\n\n ranks = [\"M\", \"K\"]\n\n t = Tensor(rank_ids=ranks)\n self.assertEqual(t.getRankIds(), ranks)\n self.assertEqual(t.getRoot().getRankIds(), ranks)\n\n def test_constructor_shape(self):\n \"\"\"Test construction of shape of tensor\"\"\"\n\n ranks = [\"M\", \"K\"]\n shape = [4, 8]\n\n t = Tensor(rank_ids=ranks, shape=shape)\n\n self.assertEqual(t.getRankIds(), ranks)\n self.assertEqual(t.getRoot().getRankIds(), ranks)\n\n self.assertEqual(t.getShape(), shape)\n self.assertEqual(t.getRoot().getShape(), shape)\n\n\n def test_constructor_shape(self):\n \"\"\"Test construction of shape of tensor\"\"\"\n\n ranks = [\"M\", \"K\"]\n name = \"ME\"\n\n t1 = Tensor(rank_ids=ranks, name=name)\n\n self.assertEqual(t1.getName(), name)\n\n t2 = Tensor(rank_ids=ranks)\n t2.setName(name)\n\n self.assertEqual(t2.getName(), name)\n\n\n def test_constructor_rank_0D(self):\n \"\"\"Test construction of 0-D tensor\"\"\"\n\n t = Tensor(rank_ids=[])\n p = t.getRoot()\n p += 1\n\n self.assertEqual(p, 1)\n\n\n def test_new(self):\n \"\"\"Test construction of a tensor from a file\"\"\"\n\n t = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n\n # Filename no longer becomes tensor name\n # self.assertEqual(t.getName(), \"test_tensor-1\")\n\n self.assertEqual(t.getRankIds(),[ \"M\", \"K\" ])\n self.assertEqual(t.getShape(), [7, 4])\n\n\n def test_equal(self):\n \"\"\"Test equality comparison\"\"\"\n\n tensor1 = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n tensor2 = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n\n self.assertTrue(tensor1 == tensor2)\n\n def test_fromYAML(self):\n \"\"\"Test construction from a YAML file\"\"\"\n\n tensor_ref = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n\n tensor = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n\n self.assertTrue(tensor == tensor_ref)\n\n\n def test_fromYAMLfile_0D(self):\n \"\"\"Test construction of 0-D tensor from a YAML file\"\"\"\n\n tensor_ref = Tensor(rank_ids=[])\n root = tensor_ref.getRoot()\n root += 2\n\n tensor = Tensor.fromYAMLfile(\"./data/tensor_0d.yaml\")\n\n self.assertTrue(tensor == tensor_ref)\n\n\n def test_fomYAMLfile_3D(self):\n \"\"\"Test construction of 0-D tensor from a YAML file\"\"\"\n\n t = Tensor.fromYAMLfile(\"./data/tensor_3d-0.yaml\")\n\n # TBD: Check that data is good\n\n rankids_ref = [\"M\", \"N\", \"K\"]\n shape_ref = [21, 51, 11]\n\n self.assertEqual(t.getRankIds(), rankids_ref)\n self.assertEqual(t.getShape(), shape_ref)\n\n\n def test_fromUncompressed_1D(self):\n \"\"\"Test construction of a tensor from nested lists\"\"\"\n\n tensor_ref = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n\n # Manual copy of test_tensor-1.yaml\n\n # 0 1 2 3\n #\n t = [ 100, 101, 0, 102 ]\n\n fiber = Fiber( [0, 1, 3], [100, 101, 102])\n tensor = Tensor.fromUncompressed([\"M\"], t)\n\n self.assertEqual(tensor.getRoot(), fiber)\n\n\n def test_fromUncompressed_2D(self):\n \"\"\"Test construction of a tensor from nested lists\"\"\"\n\n tensor_ref = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n\n # Manual copy of test_tensor-1.yaml\n\n # 0 1 2 3\n #\n t = [ [ 0, 0, 0, 0 ], # 0\n [ 100, 101, 102, 0 ], # 1\n [ 0, 201, 0, 203 ], # 2\n [ 0, 0, 0, 0 ], # 3\n [ 400, 0, 402, 0 ], # 4\n [ 0, 0, 0, 0 ], # 5\n [ 0, 601, 0, 603 ] ] # 6\n\n tensor = Tensor.fromUncompressed([\"M\", \"K\"], t)\n\n self.assertEqual(tensor, tensor_ref)\n\n def test_fromUncompressed_2D_wo_ids(self):\n \"\"\"Test construction of a tensor from nested lists without ids\"\"\"\n\n tensor_in = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n\n root = tensor_in.getRoot()\n tensor_ref = Tensor.fromFiber([\"R1\", \"R0\"], root)\n\n # Manual copy of test_tensor-1.yaml\n\n # 0 1 2 3\n #\n t = [ [ 0, 0, 0, 0 ], # 0\n [ 100, 101, 102, 0 ], # 1\n [ 0, 201, 0, 203 ], # 2\n [ 0, 0, 0, 0 ], # 3\n [ 400, 0, 402, 0 ], # 4\n [ 0, 0, 0, 0 ], # 5\n [ 0, 601, 0, 603 ] ] # 6\n\n tensor = Tensor.fromUncompressed([\"R1\", \"R0\"], t)\n\n self.assertEqual(tensor, tensor_ref)\n\n def test_fromUncompressed_20(self):\n \"\"\"Test construction of a tensor a scalar\"\"\"\n\n t_ref = Tensor(rank_ids=[])\n p = t_ref.getRoot()\n p += 2\n\n t0 = Tensor.fromUncompressed([], 2)\n\n self.assertEqual(t0, t_ref)\n\n t1 = Tensor.fromUncompressed(rank_ids=[], root=2)\n\n self.assertEqual(t1, t_ref)\n\n t2 = Tensor.fromUncompressed(root=2)\n\n self.assertEqual(t2, t_ref)\n\n\n def test_fromFiber(self):\n \"\"\"Test construction of a tensor from a fiber\"\"\"\n\n tensor_ref = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n\n root = tensor_ref.getRoot()\n\n tensor = Tensor.fromFiber([\"M\", \"K\"], root)\n\n self.assertEqual(tensor, tensor_ref)\n\n def test_fromFiber_wo_ids(self):\n \"\"\"Test construction of a tensor from a fiber without rank ids\"\"\"\n\n tensor_in = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n\n root = tensor_in.getRoot()\n tensor_ref = Tensor.fromFiber([\"R1\", \"R0\"], root)\n\n tensor = Tensor.fromFiber(fiber=root)\n\n self.assertEqual(tensor, tensor_ref)\n\n\n def test_fromRandom(self):\n \"\"\"Test construction of a random tensor\"\"\"\n\n rank_ids = [\"X\", \"Y\"]\n shape = [10, 10]\n tensor_ref = Tensor.fromUncompressed(rank_ids,\n [[0, 10, 10, 1, 0, 9, 8, 0, 0, 3],\n [9, 1, 0, 10, 1, 0, 10, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 3, 0, 3, 5, 0, 5, 7, 0, 0],\n [6, 0, 0, 0, 0, 0, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 2, 8, 2, 3, 7, 0, 0, 10],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 4, 0, 2, 9, 4, 0, 5],\n [6, 3, 0, 8, 0, 10, 0, 9, 4, 0]])\n\n tensor = Tensor.fromRandom(rank_ids, shape, [0.5, 0.5], 10, seed=3)\n\n self.assertEqual(tensor, tensor_ref)\n self.assertEqual(tensor.getRankIds(), rank_ids)\n\n\n def test_fromRandom_wo_ids(self):\n \"\"\"Test construction of a random tensor without rankids\"\"\"\n\n rank_ids = [\"R1\", \"R0\"]\n shape = [10, 10]\n tensor_ref = Tensor.fromUncompressed(rank_ids,\n [[0, 10, 10, 1, 0, 9, 8, 0, 0, 3],\n [9, 1, 0, 10, 1, 0, 10, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 3, 0, 3, 5, 0, 5, 7, 0, 0],\n [6, 0, 0, 0, 0, 0, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 2, 8, 2, 3, 7, 0, 0, 10],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 4, 0, 2, 9, 4, 0, 5],\n [6, 3, 0, 8, 0, 10, 0, 9, 4, 0]])\n\n tensor = Tensor.fromRandom(None, shape, [0.5, 0.5], 10, seed=3)\n\n self.assertEqual(tensor, tensor_ref)\n self.assertEqual(tensor.getRankIds(), rank_ids)\n\n\n def test_print_0D(self):\n \"\"\"Test printing a 0-D tensor\"\"\"\n\n a = Tensor(rank_ids=[])\n p = a.getRoot()\n p += 2\n\n a_s_ref = \"<2>\"\n\n a_s = f\"{a}\"\n\n self.assertEqual(a_s, a_s_ref)\n\n a_r_ref = \"T()/[Payload(2)]\"\n\n a_r = f\"{a!r}\"\n\n self.assertEqual(a_r, a_r_ref)\n\n\n def test_print_2D(self):\n \"\"\"Test printing a 2-D tensor\"\"\"\n\n a = Tensor.fromYAMLfile(\"./data/matrix-a.yaml\")\n\n#\n# Old style print\n#\n# a_s_ref = \"T(M,K)/[\\n\" + \\\n# \" Rank: M F(M)/[( 0 -> F(K)/[(0 -> <1>) \\n\" + \\\n# \" (2 -> <3>) ])\\n\" + \\\n# \" ( 1 -> F(K)/[(0 -> <1>) \\n\" + \\\n# \" (3 -> <4>) ])\\n\" + \\\n# \" ( 3 -> F(K)/[(2 -> <3>) \\n\" + \\\n# \" (3 -> <4>) ])\\n\" + \\\n# \" Rank: K F(K)/[(0 -> <1>) \\n\" + \\\n# \" (2 -> <3>) ],\\n\" + \\\n# \" F(K)/[(0 -> <1>) \\n\" + \\\n# \" (3 -> <4>) ],\\n\" + \\\n# \" F(K)/[(2 -> <3>) \\n\" + \\\n# \" (3 -> <4>) ]\\n\" + \\\n# \"]\"\n\n a_s_ref = \"F(M)/[( 0 -> F(K)/[(0 -> <1>) (2 -> <3>) ])( 1 -> F(K)/[(0 -> <1>) (3 -> <4>) ])......\"\n\n a_s = f\"{a}\"\n\n self.assertEqual(a_s, a_s_ref)\n\n a_r_ref = \"T(M,K)/[\\n\" + \\\n \" R(M)/[Fiber([0, 1, 3], [Fiber([0, 2], [1, 3], owner=K), Fiber([0, 3], [1, 4], owner=K), Fiber([2, 3], [3, 4], owner=K)], owner=M)]\\n\" + \\\n \" R(K)/[Fiber([0, 2], [1, 3], owner=K), Fiber([0, 3], [1, 4], owner=K), Fiber([2, 3], [3, 4], owner=K)]\\n\" + \\\n \"]\"\n a_r = f\"{a!r}\"\n\n self.assertEqual(a_r, a_r_ref)\n\n\n def test_setRoot(self):\n \"\"\"Test adding a new root\"\"\"\n\n tensor_ref = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n\n root = tensor_ref.getRoot()\n\n tensor = Tensor(rank_ids=[\"M\", \"K\"])\n tensor.setRoot(root)\n\n self.assertEqual(tensor, tensor_ref)\n\n\n def test_getPayload_0d(self):\n \"\"\"Test getPayload of a 0-D tensor\"\"\"\n\n p_ref = 10\n\n t = Tensor(rank_ids=[])\n r = t.getRoot()\n r <<= p_ref\n\n p = t.getPayload()\n self.assertEqual(p_ref, p)\n\n p = t.getPayload(0)\n self.assertEqual(p_ref, p)\n\n p = t.getPayload(1)\n self.assertEqual(p_ref, p)\n\n\n\n def test_getPayload_2d(self):\n \"\"\"Test getPayload of a 2-D tensor\"\"\"\n\n t = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n\n with self.subTest(test=\"Existing element\"):\n p23_ref = 203\n p23 = t.getPayload(2, 3)\n\n self.assertEqual(p23_ref, p23)\n\n # Make sure change is seen\n p23_new_ref = 310\n p23 <<= p23_new_ref\n\n p23_new = t.getPayload(2, 3)\n\n self.assertEqual(p23_new_ref, p23_new)\n\n\n with self.subTest(test=\"Non-existing element\"):\n p31_ref = 0\n p31 = t.getPayload(3, 1)\n\n self.assertEqual(p31_ref, p31)\n\n # Make sure change is NOT seen\n p31_new_ref = 100\n\n p31 <<= p31_new_ref\n p31_new = t.getPayload(3, 1)\n\n self.assertEqual(0, p31_new)\n\n\n with self.subTest(test=\"Element of non-existing fiber\"):\n p51_ref = 0\n p51 = t.getPayload(5, 1)\n\n self.assertEqual(p51_ref, p51)\n\n # Make sure change is NOT seen\n p51_new_ref = 100\n\n p51 <<= p51_new_ref\n p51_new = t.getPayload(5, 1)\n\n self.assertEqual(0, p51_new)\n\n\n with self.subTest(test=\"Existing fiber\"):\n p4_ref = Fiber([0, 2], [400, 402])\n p4 = t.getPayload(4)\n\n self.assertEqual(p4_ref, p4)\n\n\n def test_getPayloadRef_0d(self):\n \"\"\"Test getPayloadRef of a 0-D tensor\"\"\"\n\n p_ref = 10\n\n t = Tensor(rank_ids=[])\n r = t.getRoot()\n r <<= p_ref\n\n p = t.getPayloadRef()\n self.assertEqual(p_ref, p)\n\n p = t.getPayloadRef(0)\n self.assertEqual(p_ref, p)\n\n p = t.getPayloadRef(1)\n self.assertEqual(p_ref, p)\n\n\n\n def test_getPayloadRef_2d(self):\n \"\"\"Test getPayloadRef of a 2-D tensor\"\"\"\n\n t = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n\n with self.subTest(test=\"Existing element\"):\n p23_ref = 203\n p23 = t.getPayloadRef(2, 3)\n\n self.assertEqual(p23_ref, p23)\n\n # Make sure change is seen\n p23_new_ref = 310\n p23 <<= p23_new_ref\n\n p23_new = t.getPayload(2, 3)\n\n self.assertEqual(p23_new_ref, p23_new)\n\n\n with self.subTest(test=\"Non-existing element\"):\n p31_ref = 0\n p31 = t.getPayloadRef(3, 1)\n\n self.assertEqual(p31_ref, p31)\n\n # Make sure change is seen\n p31_new_ref = 100\n\n p31 <<= p31_new_ref\n p31_new = t.getPayload(3, 1)\n\n self.assertEqual(p31_new_ref, p31_new)\n\n\n with self.subTest(test=\"Element of non-existing fiber\"):\n p51_ref = 0\n p51 = t.getPayloadRef(5, 1)\n\n self.assertEqual(p51_ref, p51)\n\n # Make sure change is NOT seen\n p51_new_ref = 100\n\n p51 <<= p51_new_ref\n p51_new = t.getPayload(5, 1)\n\n self.assertEqual(p51_new_ref, p51_new)\n\n\n with self.subTest(test=\"Existing fiber\"):\n p4_ref = Fiber([0, 2], [400, 402])\n p4 = t.getPayloadRef(4)\n\n self.assertEqual(p4_ref, p4)\n\n\n def test_default(self):\n \"\"\"Test of default default\"\"\"\n\n t = Tensor(rank_ids=[\"X\", \"Y\", \"Z\"])\n\n e = Fiber([], [])\n\n self.assertEqual(t.getDefault(), 0)\n\n t_root = t.getRoot()\n\n x = t_root.getPayload(1)\n self.assertEqual(x, e)\n self.assertEqual(x.getDefault(), Fiber)\n\n y = t_root.getPayload(1, 2)\n self.assertEqual(y, e)\n self.assertEqual(y.getDefault(), 0)\n\n z = t_root.getPayload(1, 2, 3)\n self.assertEqual(z, 0)\n\n\n def test_default_nonzero(self):\n \"\"\"Test set/get of nonzero default\"\"\"\n\n t = Tensor(rank_ids=[\"X\", \"Y\", \"Z\"])\n\n v = 10\n e = Fiber([], [])\n\n t.setDefault(v)\n self.assertEqual(t.getDefault(), v)\n\n t_root = t.getRoot()\n\n x = t_root.getPayload(1)\n self.assertEqual(x, e)\n\n y = t_root.getPayload(1, 2)\n self.assertEqual(y, e)\n\n z = t_root.getPayload(1, 2, 3)\n self.assertEqual(z, v)\n\n\n def test_default_nonscalar(self):\n \"\"\"Test set/get of nonzero default\"\"\"\n\n t = Tensor(rank_ids=[\"X\", \"Y\", \"Z\"])\n\n v = (10, 10)\n e = Fiber([], [])\n\n t.setDefault(v)\n self.assertEqual(t.getDefault(), v)\n\n t_root = t.getRoot()\n\n x = t_root.getPayload(1)\n self.assertEqual(x, e)\n\n y = t_root.getPayload(1, 2)\n self.assertEqual(y, e)\n\n z = t_root.getPayload(1, 2, 3)\n self.assertEqual(z, v)\n\n\n def test_values(self):\n \"\"\"Test counting values in a tensor\"\"\"\n\n tensor = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n\n count = tensor.countValues()\n\n self.assertEqual(count, 9)\n\n def test_dump(self):\n \"\"\"Test dumping a tensor\"\"\"\n\n tensor = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n tensor.dump(\"/tmp/test_tensor-1.yaml\")\n\n tensor_tmp = Tensor.fromYAMLfile(\"/tmp/test_tensor-1.yaml\")\n\n self.assertTrue(tensor == tensor_tmp)\n\n def test_init_mutable(self):\n t = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n self.assertFalse(t.isMutable())\n\n t2 = Tensor(rank_ids=[\"X\", \"Y\", \"Z\"])\n self.assertTrue(t2.isMutable())\n\n def test_mutable(self):\n t = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n t.setMutable(True)\n self.assertTrue(t.isMutable())\n\n def test_mutable_after_split(self):\n t = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n t2 = t.splitUniform(10)\n self.assertFalse(t2.isMutable())\n\n t3 = Tensor(rank_ids=[\"X\", \"Y\", \"Z\"])\n t4 = t3.splitUniform(10)\n self.assertTrue(t4.isMutable())\n\n def test_mutable_after_swap(self):\n t = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n t2 = t.swapRanks()\n self.assertFalse(t2.isMutable())\n\n t3 = Tensor(rank_ids=[\"X\", \"Y\", \"Z\"])\n t4 = t3.swapRanks()\n self.assertTrue(t4.isMutable())\n\n def test_mutable_after_flatten(self):\n t = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n t2 = t.flattenRanks()\n self.assertFalse(t2.isMutable())\n\n t3 = Tensor(rank_ids=[\"X\", \"Y\", \"Z\"])\n t4 = t3.flattenRanks()\n self.assertTrue(t4.isMutable())\n\n def test_mutable_after_unflatten(self):\n t = Tensor.fromYAMLfile(\"./data/test_tensor-1.yaml\")\n t2 = t.flattenRanks()\n t3 = t2.unflattenRanks()\n self.assertFalse(t3.isMutable())\n\n t4 = Tensor(rank_ids=[\"X\", \"Y\", \"Z\"])\n t5 = t4.flattenRanks()\n t6 = t5.unflattenRanks()\n self.assertTrue(t6.isMutable())\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "7587102", "language": "Python", "matching_score": 4.050909042358398, "max_stars_count": 2, "path": "test/test_tensor.py" }, { "content": "import unittest\n\nfrom fibertree import *\n\nclass TestFiber(unittest.TestCase):\n\n def test_new_1d(self):\n \"\"\"Create a 1d fiber\"\"\"\n\n a = Fiber([2, 4, 6], [3, 5, 7])\n\n def test_new_2d(self):\n \"\"\"Create a 1d fiber\"\"\"\n\n b0 = Fiber([1, 4, 7], [2, 5, 8])\n b1 = Fiber([2, 4, 6], [3, 5, 7])\n a0 = Fiber([2, 4], [b0, b1])\n\n def test_new_empty(self):\n \"\"\"Create an empty fiber\"\"\"\n\n a = Fiber([], [])\n\n def test_comparison_eq_ne(self):\n\n a = Fiber([2, 4, 6], [3, 5, 7])\n b = Fiber([2, 4, 6], [3, 5, 7])\n c = Fiber([2, 5, 6], [3, 5, 7])\n d = Fiber([2, 4, 6], [3, 6, 7])\n\n self.assertTrue(a == b)\n self.assertTrue(a != c)\n self.assertTrue(a != d)\n\n\n def test_comparison_eq(self):\n\n a = Fiber([2, 4, 6], [3, 5, 7])\n b = Fiber([2, 4, 6], [3, 5, 7])\n\n self.assertEqual(a, b)\n\n def test_comparison_eq_1D(self):\n\n a = Fiber([2, 4, 6], [3, 5, 7])\n b1 = Fiber([2, 4, 6], [3, 5, 7])\n b2 = Fiber([2, 4, 6, 8], [3, 5, 7, 0])\n b3 = Fiber([2, 4, 6], [3, 6, 7])\n b4 = Fiber([2, 4, 8], [3, 5, 7])\n\n self.assertEqual(a, b1)\n self.assertEqual(a, b2)\n self.assertNotEqual(a, b3)\n self.assertNotEqual(a, b4)\n\n c = Fiber([], [])\n d1 = Fiber([0, 1], [0, 0])\n d2 = Fiber([0, 1], [0, 10])\n\n self.assertEqual(c, d1)\n self.assertNotEqual(c, d2)\n\n def test_comparison_eq_2D(self):\n\n a = Fiber([2, 4, 6], [3, 5, 7])\n b1 = Fiber([2, 4, 6], [3, 5, 7])\n b2 = Fiber([2, 4, 6, 8], [3, 5, 7, 0])\n b3 = Fiber([2, 4, 6], [3, 6, 7])\n b4 = Fiber([2, 4, 8], [3, 5, 7])\n\n x0 = Fiber([2, 4], [a, a])\n x1 = Fiber([2, 4], [a, b1])\n x2 = Fiber([2, 4], [a, b2])\n x3 = Fiber([2, 4], [a, b3])\n x4 = Fiber([2, 4], [a, b4])\n\n self.assertEqual(x0, x1)\n self.assertEqual(x0, x2)\n self.assertNotEqual(x0, x3)\n self.assertNotEqual(x0, x4)\n self.assertEqual(x1, x2)\n self.assertNotEqual(x1, x3)\n self.assertNotEqual(x1, x4)\n self.assertNotEqual(x2, x3)\n self.assertNotEqual(x2, x4)\n self.assertNotEqual(x3, x4)\n\n def test_fromCoordPayloadList(self):\n\n cp = [(2, 3), (4, 5), (6, 7)]\n\n (coords, payloads) = zip(*cp)\n\n a_ref = Fiber(coords=coords, payloads=payloads)\n\n a1 = Fiber.fromCoordPayloadList(*cp)\n self.assertEqual(a1, a_ref)\n self.assertEqual(a1.getDefault(), 0)\n\n # Removed functionality to set fiber default\n\n# a2 = Fiber.fromCoordPayloadList(*cp, default=1)\n# self.assertEqual(a2, a_ref)\n# self.assertEqual(a2.getDefault(), 1)\n\n# a3 = Fiber.fromCoordPayloadList(default=2, *cp)\n# self.assertEqual(a3, a_ref)\n# self.assertEqual(a3.getDefault(), 2)\n\n\n def test_fromYAMLfile_1D(self):\n \"\"\"Read a YAMLfile 1-D\"\"\"\n\n a_ref = Fiber([2, 4, 6], [3, 5, 7])\n\n a = Fiber.fromYAMLfile(\"./data/test_fiber-1.yaml\")\n\n self.assertEqual(a, a_ref)\n\n def test_fromYAMLfile_2D(self):\n \"\"\"Read a YAMLfile 2-D\"\"\"\n\n b0 = Fiber([1, 4, 7], [2, 5, 8])\n b1 = Fiber([2, 4, 6], [3, 5, 7])\n a_ref = Fiber([2, 4], [b0, b1])\n\n a = Fiber.fromYAMLfile(\"./data/test_fiber-2.yaml\")\n\n self.assertEqual(a, a_ref)\n\n def test_fromUncompressed_1D(self):\n \"\"\"Create from uncompressed 1-D\"\"\"\n\n f_ref = Fiber([0, 1, 3, 4], [1, 2, 4, 5])\n\n f = Fiber.fromUncompressed([1, 2, 0, 4, 5, 0])\n\n self.assertEqual(f, f_ref)\n\n def test_fromUncompressed_2D(self):\n \"\"\"Create from uncompressed 2-D\"\"\"\n\n a1 = Fiber([0, 1, 3, 4], [1, 2, 4, 5])\n a2 = Fiber([2, 3], [3, 4])\n\n f_ref = Fiber([0, 2], [a1, a2])\n\n f = Fiber.fromUncompressed([[1, 2, 0, 4, 5, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 3, 4, 0, 0]])\n\n self.assertEqual(f, f_ref)\n\n def test_fromUncompressed_3D(self):\n \"\"\"Create from uncomrpessed 3-D\"\"\"\n\n f_ref = Fiber.fromYAMLfile(\"./data/test_fiber-3.yaml\")\n\n u_t = [[[1, 2, 3, 0],\n [1, 0, 3, 4],\n [0, 2, 3, 4],\n [1, 2, 0, 4]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[1, 2, 3, 0],\n [1, 0, 3, 4],\n [0, 0, 0, 0],\n [1, 2, 0, 4]]]\n\n f = Fiber.fromUncompressed(u_t)\n\n self.assertEqual(f, f_ref)\n\n def test_fromUncompressed_1D_empty(self):\n \"\"\"Create empty tensor from uncompressed 1-D\"\"\"\n\n f_ref = Fiber([], [])\n\n f = Fiber.fromUncompressed([0, 0, 0, 0, 0])\n\n self.assertEqual(f, f_ref)\n\n def test_fromUncompressed_2D_empty(self):\n \"\"\"Create empty tensor from uncompressed 2-D\"\"\"\n\n f_ref = Fiber([], [])\n\n f = Fiber.fromUncompressed([[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]])\n\n self.assertEqual(f, f_ref)\n\n def test_fromRandom_2D(self):\n \"\"\"Create a random 2D tensor\"\"\"\n\n shape = [10, 10]\n\n fiber_ref = Fiber.fromUncompressed([[0, 10, 10, 1, 0, 9, 8, 0, 0, 3],\n [9, 1, 0, 10, 1, 0, 10, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 3, 0, 3, 5, 0, 5, 7, 0, 0],\n [6, 0, 0, 0, 0, 0, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 2, 8, 2, 3, 7, 0, 0, 10],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 4, 0, 2, 9, 4, 0, 5],\n [6, 3, 0, 8, 0, 10, 0, 9, 4, 0]])\n\n fiber = Fiber.fromRandom(shape, [0.5, 0.5], 10, seed=3)\n\n self.assertEqual(fiber, fiber_ref)\n\n\n def test_getCoords(self):\n \"\"\"Extract coordinates\"\"\"\n\n c_ref = [2, 4, 6]\n p_ref = [3, 5, 7]\n\n a = Fiber(c_ref, p_ref)\n\n c = a.getCoords()\n\n self.assertEqual(c, c_ref)\n\n def test_getPayloads(self):\n \"\"\"Extract payloads\"\"\"\n\n c_ref = [2, 4, 6]\n p_ref = [3, 5, 7]\n\n a = Fiber(c_ref, p_ref)\n\n p = a.getPayloads()\n\n self.assertEqual(p, p_ref)\n\n def test_isempty_1D(self):\n \"\"\"Test for empty fiber\"\"\"\n\n a = Fiber([], [])\n self.assertTrue(a.isEmpty())\n\n b = Fiber([0, 1], [0, 0])\n self.assertTrue(b.isEmpty())\n\n c = Fiber([0, 1], [0, 1])\n self.assertFalse(c.isEmpty())\n\n def test_isempty_2D(self):\n \"\"\"Test for empty fiber\"\"\"\n\n a1 = Fiber([], [])\n a2 = Fiber([0, 1], [0, 0])\n a3 = Fiber([0, 1], [0, 1])\n\n a = Fiber([2, 3], [a1, a1])\n self.assertTrue(a.isEmpty())\n\n b = Fiber([3, 4], [a2, a2])\n self.assertTrue(b.isEmpty())\n\n c = Fiber([3, 4], [a1, a2])\n self.assertTrue(c.isEmpty())\n\n d = Fiber([4, 5], [a1, a3])\n self.assertFalse(d.isEmpty())\n\n def test_nonempty_2D(self):\n \"\"\"Test for empty fiber\"\"\"\n\n a1 = Fiber([], [])\n a2 = Fiber([0, 1], [0, 0])\n a3 = Fiber([0, 1], [0, 1])\n\n a = Fiber([1, 2, 3], [a1, a2, a3])\n\n ne = a.nonEmpty()\n\n ne3 = Fiber([1], [1])\n ne_ref = Fiber([3], [ne3])\n\n self.assertEqual(ne, ne_ref)\n\n def test_setDefault(self):\n \"\"\"Test setting defaults - unimplemented\"\"\"\n\n pass\n\n def test_setOwner(self):\n \"\"\"Test setting owner - unimplemented\"\"\"\n\n pass\n\n\n def test_minCoord(self):\n \"\"\"Find minimum coordinate\"\"\"\n\n c_ref = [2, 4, 6]\n p_ref = [3, 5, 7]\n\n c_min = min(c_ref)\n\n a = Fiber(c_ref, p_ref)\n\n self.assertEqual(a.minCoord(), c_min)\n\n\n def test_maxCoord(self):\n \"\"\"Find minimum coordinate\"\"\"\n\n c_ref = [2, 4, 6]\n p_ref = [3, 5, 7]\n\n c_max = max(c_ref)\n\n a = Fiber(c_ref, p_ref)\n\n self.assertEqual(a.maxCoord(), c_max)\n\n\n def test_minmaxCoord_empty(self):\n\n f = Fiber([], [])\n\n self.assertIsNone(f.minCoord())\n self.assertIsNone(f.maxCoord())\n\n\n def test_values_2D(self):\n \"\"\"Count values in a 2-D fiber\"\"\"\n\n a = Fiber.fromYAMLfile(\"./data/test_fiber-2.yaml\")\n\n self.assertEqual(a.countValues(), 6)\n\n\n def test_values_with_zero(self):\n \"\"\"Count values in a 1-D fiber with an explict zero\"\"\"\n\n a = Fiber([1, 8, 9], [2, 0, 10])\n\n self.assertEqual(a.countValues(), 2)\n\n\n def test_iter(self):\n \"\"\"Test iteration over a fiber\"\"\"\n\n c0 = [1, 8, 9]\n p0 = [2, 0, 10]\n\n a = Fiber(c0, p0)\n\n i = 0\n for (c, p) in a:\n self.assertEqual(c, c0[i])\n self.assertEqual(p, p0[i])\n i += 1\n\n\n def test_iterShape(self):\n \"\"\"Test iteration over a fiber's shape\"\"\"\n\n c0 = [1, 8, 9]\n p0 = [2, 0, 10]\n\n c0_ans = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n p0_ans = [0, 2, 0, 0, 0, 0, 0, 0, 0, 10]\n\n a = Fiber(c0, p0)\n\n i = 0\n for (c, p) in a.iterShape():\n with self.subTest(test=f\"Element {i}\"):\n self.assertEqual(c, c0_ans[i])\n self.assertEqual(p, p0_ans[i])\n self.assertIsInstance(p, Payload)\n i += 1\n\n with self.subTest(test=\"Test fiber internals\"):\n self.assertEqual(a.coords, c0)\n self.assertEqual(a.payloads, p0)\n\n def test_iterShapeRef(self):\n \"\"\"Test iteration over a fiber's shape with allocation\"\"\"\n\n c0 = [1, 8, 9]\n p0 = [2, 0, 10]\n\n c0_ans = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n p0_ans = [0, 2, 0, 0, 0, 0, 0, 0, 0, 10]\n\n a = Fiber(c0, p0)\n\n i = 0\n for (c, p) in a.iterShapeRef():\n with self.subTest(test=f\"Element {i}\"):\n self.assertEqual(c, c0_ans[i])\n self.assertEqual(p, p0_ans[i])\n self.assertIsInstance(p, Payload)\n i += 1\n\n with self.subTest(test=\"Test fiber internals\"):\n self.assertEqual(a.coords, c0_ans)\n self.assertEqual(a.payloads, p0_ans)\n\n\n\n def test_getitem_simple(self):\n \"\"\"Get item - simple\"\"\"\n\n c_ref = [2, 4, 6, 8]\n p_ref = [3, 5, 7, 9]\n\n a = Fiber(c_ref, p_ref)\n\n (coord0, payload0) = a[0]\n\n self.assertEqual(coord0, 2)\n self.assertEqual(payload0, 3)\n\n (coord1, payload1) = a[1]\n\n self.assertEqual(coord1, 4)\n self.assertEqual(payload1, 5)\n\n (coord2, payload2) = a[-2]\n self.assertEqual(coord2, 6)\n self.assertEqual(payload2, 7)\n\n (coord3, payload3) = a[-1]\n self.assertEqual(coord3, 8)\n self.assertEqual(payload3, 9)\n\n\n def test_getitem_slice(self):\n \"\"\"Get item - slices\"\"\"\n\n c_ref = [2, 4, 6, 8]\n p_ref = [3, 5, 7, 9]\n\n a = Fiber(c_ref, p_ref)\n\n slice1 = a[0:2]\n\n slice1_coord_ref = a.coords[0:2]\n slice1_payload_ref = a.payloads[0:2]\n slice1_ref = Fiber(slice1_coord_ref, slice1_payload_ref)\n\n self.assertEqual(slice1, slice1_ref)\n\n\n def test_getitem_nD(self):\n \"\"\"Get item - multi-dimensional\"\"\"\n\n c00 = [1, 2, 3]\n p00 = [2, 3, 4]\n f00 = Fiber(c00, p00)\n\n c01 = [4, 6, 8]\n p01 = [5, 7, 9]\n f01 = Fiber(c01, p01)\n\n c02 = [5, 7]\n p02 = [6, 8]\n f02 = Fiber(c02, p02)\n\n c0 = [4, 5, 8]\n p0 = [f00, f01, f02]\n f = Fiber(c0, p0)\n\n f_1_1 = f[1, 1]\n f_1_1_ref = CoordPayload(c0[1], f01[1])\n\n self.assertEqual(f_1_1, f_1_1_ref)\n\n f_02_1 = f[0:2, 1]\n f_02_1_ref = Fiber(c0[0:2], [f00[1], f01[1]])\n\n self.assertEqual(f_02_1, f_02_1_ref)\n\n f_12_1 = f[1:2, 1]\n f_12_1_ref = Fiber(c0[1:2], [f01[1]])\n\n self.assertEqual(f_12_1, f_12_1_ref)\n\n f_02_01 = f[0:2, 0:1]\n f_02_01_ref = Fiber(c0[0:2], [f00[0:1], f01[0:1]])\n\n self.assertEqual(f_02_01, f_02_01_ref)\n\n f_13_02 = f[1:3, 0:2]\n f_13_02_ref = Fiber(c0[1:3], [f01[0:2], f02[0:2]])\n\n self.assertEqual(f_13_02, f_13_02_ref)\n\n f_13_12 = f[1:3, 1:2]\n f_13_12_ref = Fiber(c0[1:3], [f01[1:2], f02[1:2]])\n\n self.assertEqual(f_13_12, f_13_12_ref)\n\n def test_setitem_scalar(self):\n \"\"\"test_setitem_scalar\"\"\"\n\n f = Fiber([0,1,3], [1,0,4])\n\n newf = Fiber([], [])\n\n newcoords = [ None, 0, 1, 2, 3, 4 ]\n newpayloads = [ 6, (4, 8), newf, None]\n\n ans_c = [0, 1, 3]\n ans_p = [6, (4, 8), newf, newf]\n\n for i in range(len(f)):\n for j, p in enumerate(newpayloads):\n f[i] = p\n a = f[i]\n self.assertEqual(a.coord, ans_c[i])\n self.assertEqual(a.payload, ans_p[j])\n\n\n def test_setitem_coordpayload(self):\n \"\"\"test_setitem_coordpayload\"\"\"\n\n f = Fiber([0,1,3], [1,0,4])\n\n newf = Fiber([], [])\n\n newcoords = [ None, 0, 1, 2, 3, 4 ]\n newpayloads = [ 6, (4, 8), newf, None]\n\n #\n # Dimensions position, newcoords-index, newpayload-index\n #\n ans_cvv = [[[0, 0, 0, 0],\n [0, 0, 0, 0],\n [None, None, None, None],\n [None, None, None, None],\n [None, None, None, None],\n [None, None, None, None]],\n [[1, 1, 1, 1],\n [None, None, None, None],\n [1, 1, 1, 1],\n [2, 2, 2, 2],\n [None, None, None, None],\n [None, None, None, None]],\n [[3, 3, 3, 3],\n [None, None, None, None],\n [None, None, None, None],\n [None, None, None, None],\n [3, 3, 3, 3],\n [4, 4, 4, 4]]]\n\n ans_pvv = [[[6, (4, 8), newf, newf],\n [6, (4, 8), newf, newf],\n [None, None, None, None],\n [None, None, None, None],\n [None, None, None, None],\n [None, None, None, None]],\n [[6, (4, 8), newf, newf],\n [None, None, None, None],\n [6, (4, 8), newf, newf],\n [6, (4, 8), newf, newf ],\n [None, None, None, None],\n [None, None, None, None]],\n [[6, (4, 8), newf, newf],\n [None, None, None, None],\n [None, None, None, None],\n [None, None, None, None],\n [6, (4, 8), newf, newf],\n [6, (4, 8), newf, newf]]]\n\n for i in range(len(f)):\n\n for j, c in enumerate(newcoords):\n for k, p in enumerate(newpayloads):\n a = f[i]\n if ans_cvv[i][j][k] is not None:\n f[i] = CoordPayload(c, p)\n b = f[i]\n self.assertEqual(b.coord, ans_cvv[i][j][k])\n self.assertEqual(b.payload, ans_pvv[i][j][k])\n else:\n with self.assertRaises(CoordinateError):\n f[i] = CoordPayload(c, p)\n\n\n def test_len(self):\n \"\"\"Find lenght of a fiber\"\"\"\n\n a = Fiber.fromYAMLfile(\"./data/test_fiber-2.yaml\")\n\n self.assertEqual(len(a), 2)\n\n\n def test_getPayload(self):\n \"\"\"Access payloads\"\"\"\n\n coords = [2, 4, 6]\n payloads = [3, 5, 7]\n\n a = Fiber(coords, payloads)\n\n test = [0, 4, 6, 3]\n answer_allocate = [0, 5, 7, 0]\n answer_noallocate = [None, 5, 7, None]\n answer_default = [-1, 5, 7, -1]\n\n for i in range(len(test)):\n self.assertEqual(a.getPayload(test[i]),\n answer_allocate[i])\n self.assertEqual(a.getPayload(test[i], allocate=True),\n answer_allocate[i])\n self.assertEqual(a.getPayload(test[i], allocate=False),\n answer_noallocate[i])\n self.assertEqual(a.getPayload(test[i], allocate=False, default=-1),\n answer_default[i])\n\n def test_getPayloadRef_update(self):\n \"\"\"Update payload references\"\"\"\n\n #\n # Test that each payload or allocated payload is an unique object\n # but updates do not get reflected back to the original fiber\n #\n coords = [2, 4, 6]\n payloads = [3, 5, 7]\n\n a = Fiber(coords, payloads)\n\n test = [0, 4, 5, 6, 3]\n update = [10, 11, 12, 13, 14]\n answer = [0, 5, 0, 7, 0]\n\n for i in range(len(test)):\n x = a.getPayload(test[i])\n x <<= update[i]\n self.assertEqual(a.getPayload(test[i]), answer[i])\n\n\n def test_getPayload_2(self):\n \"\"\"Access payloads - multilevel\"\"\"\n\n a = Fiber.fromUncompressed([[1, 2, 0, 4, 5, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 3, 4, 0, 0]])\n\n # Simple test\n self.assertEqual(a.getPayload(2, 2), 3)\n\n # Multiple tests\n test = [(0, 0), (2, 2), (0, 3), (2, 1)]\n answer_allocate = [1, 3, 4, 0]\n answer_noallocate = [1, 3, 4, None]\n\n for i in range(len(test)):\n p = a.getPayload(*test[i])\n self.assertEqual(p, answer_allocate[i])\n p = a.getPayload(*test[i], allocate=True)\n self.assertEqual(p, answer_allocate[i])\n p = a.getPayload(*test[i], allocate=False)\n self.assertEqual(p, answer_noallocate[i])\n\n def test_getPayload_2_update(self):\n \"\"\"Update payloads - multilevel\"\"\"\n\n a = Fiber.fromUncompressed([[1, 2, 0, 4, 5, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 3, 4, 0, 0]])\n\n\n test = [(0,), (1,), (2, 0), (2, 2), (1, 1)]\n update = [ Fiber([3], [20]), Fiber([4], [21]), 22, 23, 24 ]\n check = [(0,3), (1, 3), (2,0), (2,2), (1,1)]\n answer = [20, 0, 0, 23, 0]\n\n for i in range(len(test)):\n with self.subTest(test=i):\n p = a.getPayload(*test[i], allocate=True)\n p <<= update[i]\n q = a.getPayload(*check[i])\n self.assertEqual(q, answer[i])\n\n\n\n def test_getPayload_3(self):\n \"\"\"Access payloads - complex\"\"\"\n\n a = Fiber.fromUncompressed([[1, 2, 0, 4, 5, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 3, 4, 0, 0]])\n\n a_1 = Fiber([], [])\n a_2 = Fiber([2, 3],[3, 4])\n\n # Simple test\n self.assertEqual(a.getPayload(2), a_2)\n\n # Multiple tests\n test = [(2,), (1,), (1, 2)]\n answer_allocate = [a_2, a_1, 0 ]\n answer_noallocate = [a_2, None, None ]\n answer_default = [a_2, -1, -1]\n\n for i in range(len(test)):\n p = a.getPayload(*test[i])\n self.assertEqual(p, answer_allocate[i])\n p = a.getPayload(*test[i], allocate=True)\n self.assertEqual(p, answer_allocate[i])\n p = a.getPayload(*test[i], allocate=False)\n self.assertEqual(p, answer_noallocate[i])\n p = a.getPayload(*test[i], allocate=False, default=-1)\n self.assertEqual(p, answer_default[i])\n\n\n def test_getPayload_shortcut(self):\n \"\"\"getPayload with shortcut\"\"\"\n\n coords = [2, 4, 6]\n payloads = [3, 5, 7]\n\n a = Fiber(coords, payloads)\n\n test = [0, 4, 6, 3, 6]\n start_pos = [0, 0, 1, 1, Payload(2)]\n\n answer_saved_pos = [3, 1, 2, 3, 2]\n answer_saved_stats = [(1, 3),\n (2, 4),\n (3, 5),\n (4, 7),\n (5, 7)]\n\n\n for i in range(len(test)):\n p = a.getPayload(test[i], start_pos=start_pos[i])\n saved_pos = a.getSavedPos()\n saved_pos_stats = a.getSavedPosStats(clear=False)\n self.assertEqual(saved_pos, answer_saved_pos[i])\n self.assertEqual(saved_pos_stats, answer_saved_stats[i])\n\n\n def test_getPayloadRef(self):\n \"\"\"Get payload references\"\"\"\n\n coords = [2, 4, 6]\n payloads = [3, 5, 7]\n\n a = Fiber(coords, payloads)\n\n test = [0, 4, 6, 3]\n answer = [0, 5, 7, 0]\n\n for i in range(len(test)):\n self.assertEqual(a.getPayloadRef(test[i]), answer[i])\n\n def test_getPayloadRef_update(self):\n \"\"\"Update payload references\"\"\"\n\n #\n # Test that each payload or allocated payload is an unique object\n #\n coords = [2, 4, 6]\n payloads = [3, 5, 7]\n\n a = Fiber(coords, payloads)\n\n test = [0, 4, 5, 6, 3]\n update = [10, 11, 12, 13, 14]\n answer = [10, 11, 12, 13, 14]\n\n for i in range(len(test)):\n x = a.getPayloadRef(test[i])\n x <<= update[i]\n self.assertEqual(a.getPayload(test[i]), answer[i])\n\n\n def test_getPayloadRef2(self):\n \"\"\"Get payload references 2-D\"\"\"\n\n t = Tensor(rank_ids=[\"m\", \"n\"])\n a = t.getRoot()\n\n test = [(0,), (2,), (1, 3), (2, 1)]\n answer = [Fiber([], []), Fiber([], []), 0, 0]\n\n for i in range(len(test)):\n p = a.getPayloadRef(*test[i])\n self.assertEqual(p, answer[i])\n\n with self.assertRaises(AssertionError):\n a.getPayloadRef(3, 2, 4)\n\n def test_getPayloadRef2_update(self):\n \"\"\"Update payload references 2-D\"\"\"\n\n t = Tensor(rank_ids=[\"m\", \"n\"])\n a = t.getRoot()\n\n test = [(0,), (2,), (1, 3), (2, 1)]\n update = [ Fiber([3], [20]), Fiber([4], [21]), 22, 23 ]\n check = [(0,3), (2,4), (1,3), (2,1)]\n answer = [20, 21, 22, 23]\n\n for i in range(len(test)):\n with self.subTest(test=i):\n p = a.getPayloadRef(*test[i])\n p <<= update[i]\n q = a.getPayload(*check[i])\n self.assertEqual(q, answer[i])\n\n\n def test_getPayloadRef_shortcut(self):\n \"\"\"getPayloadRef with shortcut\"\"\"\n\n # TBD: Fill in this test...\n\n pass\n\n def test_ilshift(self):\n \"\"\"<<= infix operator\"\"\"\n\n coords = [2, 4, 6, 8, 9, 12, 15, 16, 17, 20 ]\n payloads = [3, 5, 7, 9, 10, 13, 16, 17, 18, 21]\n\n a = Fiber(coords, payloads)\n b = Fiber()\n\n b <<= a\n\n self.assertEqual(a, b)\n\n\n def test_getRange(self):\n \"\"\"getRange\"\"\"\n\n coords = [2, 4, 6, 8, 9, 12, 15, 16, 17, 20 ]\n payloads = [3, 5, 7, 9, 10, 13, 16, 17, 18, 21]\n\n a = Fiber(coords, payloads)\n\n startc = [4, 3, 5, 13, 9, 15]\n size = [2, 3, 4, 2, 4, 3]\n end_coord = [6, 6, 9, 15, 13, 18]\n\n ans = [Fiber(coords[1:2], payloads[1:2]),\n Fiber(coords[1:2], payloads[1:2]),\n Fiber(coords[2:4], payloads[2:4]),\n Fiber([], []),\n Fiber(coords[4:6], payloads[4:6]),\n Fiber(coords[6:9], payloads[6:9]),\n ]\n\n for i in range(len(startc)):\n b = a.getRange(startc[i], size[i])\n self.assertEqual(b, ans[i])\n\n c = a.getRange(startc[i], end_coord=end_coord[i])\n self.assertEqual(c, ans[i])\n\n\n def test_getRange_flattened(self):\n \"\"\"getRange flattened coordinates\"\"\"\n\n coords = [(0, 2), (0, 4), (0, 6), (0, 8), (0, 9),\n (1, 2), (1, 5), (1, 6), (1, 7),\n (2, 0)]\n\n payloads = [3, 5, 7, 9, 10, 13, 16, 17, 18, 21]\n\n a = Fiber(coords, payloads)\n\n startc = [(0, 4), (0, 3), (0, 5), (1, 3) , (0, 9), (1, 5)]\n end_coord = [(0, 6),(0, 6), (0, 9), (1, 5), (1, 3), (1, 8)]\n\n ans = [Fiber(coords[1:2], payloads[1:2]),\n Fiber(coords[1:2], payloads[1:2]),\n Fiber(coords[2:4], payloads[2:4]),\n Fiber([], []),\n Fiber(coords[4:6], payloads[4:6]),\n Fiber(coords[6:9], payloads[6:9]),\n ]\n\n for i in range(len(startc)):\n c = a.getRange(startc[i], end_coord=end_coord[i])\n self.assertEqual(c, ans[i])\n\n\n def test_getRange_shortcut(self):\n \"\"\"getRange_shortcut\"\"\"\n\n coords = [2, 4, 6, 8, 9, 12 ]\n payloads = [3, 5, 7, 9, 10, 13]\n\n a = Fiber(coords, payloads)\n\n startc = [4, 3, 5, 13, 9]\n size = [2, 3, 4, 2, 3]\n startp = [0, 1, 2, 3, Payload(4)]\n ans = [[ 2, 2, None, None, None],\n [2, 2, None, None, None],\n [4, 4, 4, None, None],\n [5, 5, 5, 5, 5],\n [5, 5, 5, 5, 5]]\n\n saved_pos_stats = (12,15)\n\n for i in range(len(startc)):\n for sp, aaa in zip(startp,ans[i]):\n if aaa is not None:\n b = a.getRange(startc[i], size[i], start_pos=sp)\n self.assertEqual(a.getSavedPos(), aaa),\n else:\n with self.assertRaises(AssertionError):\n b = a.getRange(startc[i], size[i], start_pos=sp)\n\n self.assertEqual(a.getSavedPosStats(), saved_pos_stats)\n\n\n def test_append(self):\n \"\"\"Append element at end of fiber\"\"\"\n\n coords = [2, 4, 6]\n payloads = [3, 5, 7]\n\n a = Fiber(coords, payloads)\n\n aa_coords = [2, 4, 6, 7]\n aa_payloads = [3, 5, 7, 10]\n\n aa_ref = Fiber(aa_coords, aa_payloads)\n\n retval = a.append(7, 10)\n\n self.assertIsNone(retval)\n self.assertEqual(a, aa_ref)\n\n def test_append_empty(self):\n \"\"\"Append to empty fiber\"\"\"\n\n a = Fiber([], [])\n a_ref = Fiber( [4], [8])\n\n retval = a.append(4, 8)\n\n self.assertIsNone(retval)\n self.assertEqual(a, a_ref)\n\n\n def test_append_assert(self):\n \"\"\"Append element at end of fiber - and assert\"\"\"\n\n coords = [2, 4, 6]\n payloads = [3, 5, 7]\n\n a = Fiber(coords, payloads)\n\n with self.assertRaises(AssertionError):\n a.append(3, 10)\n\n def test_extend(self):\n \"\"\"Extend fiber\"\"\"\n\n a_coords = [2, 4, 6]\n a_payloads = [3, 5, 7]\n\n a = Fiber(a_coords, a_payloads)\n\n b_coords = [7, 10, 12]\n b_payloads = [4, 6, 8]\n\n b = Fiber(b_coords, b_payloads)\n\n ae_coords = [2, 4, 6, 7, 10, 12]\n ae_payloads = [3, 5, 7, 4, 6, 8]\n\n ae_ref = Fiber(ae_coords, ae_payloads)\n\n retval = a.extend(b)\n\n self.assertIsNone(retval)\n self.assertEqual(a, ae_ref)\n\n\n def test_extend_assert(self):\n \"\"\"Extend fiber - and assert\"\"\"\n\n a_coords = [2, 4, 6]\n a_payloads = [3, 5, 7]\n\n a = Fiber(a_coords, a_payloads)\n\n b_coords = [6, 10, 12]\n b_payloads = [4, 6, 8]\n\n b = Fiber(b_coords, b_payloads)\n\n with self.assertRaises(AssertionError):\n a.extend(b)\n\n with self.assertRaises(AssertionError):\n a.extend(1)\n\n\n# def test_insert(self):\n# \"\"\"Insert payload at coordinates 0, 3, 7\"\"\"\n#\n# coords = [2, 4, 6]\n# payloads = [3, 5, 7]\n#\n# a = Fiber(coords, payloads)\n#\n# insert_at = [0, 3, 7]\n#\n# ans = {}\n# ans[0] = Fiber([0, 2, 4, 6], [1, 3, 5, 7])\n# ans[3] = Fiber([0, 2, 3, 4, 6], [1, 3, 10, 5, 7])\n# ans[7] = Fiber([0, 2, 3, 4, 6, 7], [1, 3, 10, 5, 7, 50])\n#\n# for i in insert_at:\n# p = i*i+1\n# retval = a.insert(i, p)\n#\n# self.assertIsNone(retval)\n# self.assertEqual(a, ans[i])\n\n\n def test_shape(self):\n \"\"\"Test determining shape of a fiber\"\"\"\n\n a = Fiber.fromYAMLfile(\"./data/test_fiber-2.yaml\")\n\n s = a.getShape()\n\n self.assertEqual(s, [5, 8])\n\n def test_shape_empty(self):\n \"\"\"Test determining shape of an empty fiber\"\"\"\n\n a = Fiber([], [])\n\n s = a.getShape()\n\n self.assertEqual(s, [0])\n\n def test_rankids(self):\n \"\"\"Test finding rankids of a fiber\"\"\"\n\n a = Fiber.fromYAMLfile(\"./data/test_fiber-2.yaml\")\n\n r = a.getRankIds()\n\n self.assertEqual(r, [\"X.1\", \"X.0\"])\n\n def test_uncompress(self):\n \"\"\"Test uncompress\"\"\"\n\n uncompressed_ref = [[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 2, 0, 0, 5, 0, 0, 8],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 3, 0, 5, 0, 7, 0]]\n\n\n a = Fiber.fromYAMLfile(\"./data/test_fiber-2.yaml\")\n\n uncompressed = a.uncompress()\n\n self.assertEqual(uncompressed, uncompressed_ref)\n\n\n def test_uncompress_default(self):\n \"\"\"Test uncompress with non-zero default\"\"\"\n\n uncompressed_ref = [[-1, -1, -1, -1, -1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1, -1, -1],\n [-1, 2, -1, -1, 5, -1, -1, 8],\n [-1, -1, -1, -1, -1, -1, -1, -1],\n [-1, -1, 3, -1, 5, -1, 7, -1]]\n\n\n a = Fiber.fromYAMLfile(\"./data/test_fiber-2.yaml\")\n # Dirty way of setting non-zero default...\n for c, p in a:\n p._setDefault(-1)\n\n uncompressed = a.uncompress()\n\n self.assertEqual(uncompressed, uncompressed_ref)\n\n def test_project(self):\n \"\"\"Test projections\"\"\"\n\n c = [0, 1, 10, 20]\n p = [1, 2, 11, 21]\n a = Fiber(c, p)\n\n cp = [1, 2, 11, 21]\n ap_ref = Fiber(cp, p)\n\n ap = a.project(lambda c: c + 1)\n\n self.assertEqual(ap, ap_ref)\n\n\n def test_prune(self):\n \"\"\"Test pruning a fiber\"\"\"\n\n f = Fiber([2, 4, 6, 8], [4, 8, 12, 16])\n\n fl2_ref = Fiber([2, 4], [4, 8])\n fu2_ref = Fiber([6, 8], [12, 16])\n\n #\n # Prune out lower half\n #\n f0 = f.prune(lambda n, c, p: n < 2)\n self.assertEqual(f0, fl2_ref)\n\n f1 = f.prune(lambda n, c, p: c < 5)\n self.assertEqual(f1, fl2_ref)\n\n f2 = f.prune(lambda n, c, p: p < 10)\n self.assertEqual(f2, fl2_ref)\n\n #\n # Prune out upper half\n #\n f3 = f.prune(lambda n, c, p: n >= 2)\n self.assertEqual(f3, fu2_ref)\n\n f4 = f.prune(lambda n, c, p: c > 5)\n self.assertEqual(f4, fu2_ref)\n\n f5 = f.prune(lambda n, c, p: p > 10)\n self.assertEqual(f5, fu2_ref)\n\n #\n # Prune out lower half and stop\n #\n f6 = f.prune(lambda n, c, p: True if p < 10 else None)\n self.assertEqual(f6, fl2_ref)\n\n\n def test_upzip(self):\n \"\"\"Test unzipping a fiber\"\"\"\n\n c = [0, 1, 10, 20]\n p_a = [0, 1, 10, 20]\n p_b = [1, 2, 11, 21]\n\n p_ab = [(0, 1), (1, 2), (10, 11), (20, 21)]\n\n a_ref = Fiber(c, p_a)\n b_ref = Fiber(c, p_b)\n ab = Fiber(c, p_ab)\n\n (a, b) = ab.unzip()\n\n self.assertEqual(a, a_ref)\n self.assertEqual(b, b_ref)\n\n def test_updateCoords(self):\n \"\"\"Update coords\"\"\"\n\n #\n # Create the fiber to be split\n #\n c = [0, 1, 9, 10, 12, 31, 41]\n p = [ 0, 10, 20, 100, 120, 310, 410 ]\n\n f = Fiber(c,p)\n\n #\n # Do the split\n #\n coords = 10\n\n split = f.splitUniform(coords)\n flat_split = split.flattenRanks()\n flat_split.updateCoords(lambda i, c, p: c[1])\n\n self.assertEqual(f, flat_split)\n\n def test_updateCoords_reversed(self):\n \"\"\"Update coords - where coordinates need to be reversed\"\"\"\n\n #\n # Create the fiber to be split\n #\n c = [0, 1, 9, 10, 12, 31, 41]\n p = [ 0, 10, 20, 100, 120, 310, 410 ]\n\n f = Fiber(c,p)\n\n f_ans = Fiber([ 100-c for c in reversed(c)], list(reversed(p)))\n\n f.updateCoords(lambda i, c, p: 100-c)\n\n self.assertEqual(f, f_ans)\n\n def test_add(self):\n \"\"\"Add fibers\"\"\"\n\n a_coords = [2, 4, 6]\n a_payloads = [3, 5, 7]\n\n a = Fiber(a_coords, a_payloads)\n\n b_coords = [7, 10, 12]\n b_payloads = [4, 6, 8]\n\n b = Fiber(b_coords, b_payloads)\n\n ae_coords = [2, 4, 6, 7, 10, 12]\n ae_payloads = [3, 5, 7, 4, 6, 8]\n\n ae_ref = Fiber(ae_coords, ae_payloads)\n\n self.assertEqual(a+b, ae_ref)\n\n def test_iadd(self):\n \"\"\"iadd fibers\"\"\"\n\n a_coords = [2, 4, 6]\n a_payloads = [3, 5, 7]\n\n a = Fiber(a_coords, a_payloads)\n\n b_coords = [7, 10, 12]\n b_payloads = [4, 6, 8]\n\n b = Fiber(b_coords, b_payloads)\n\n ae_coords = [2, 4, 6, 7, 10, 12]\n ae_payloads = [3, 5, 7, 4, 6, 8]\n\n ae_ref = Fiber(ae_coords, ae_payloads)\n\n a += b\n\n self.assertEqual(a, ae_ref)\n\n\n def test_and(self):\n \"\"\"Intersection test\"\"\"\n\n a = Fiber([1, 5, 8, 9], [2, 6, 9, 10])\n b = Fiber([0, 5, 9], [2, 7, 11])\n\n ab_ref = Fiber([5, 9], [(6, 7), (10, 11)])\n\n ab = a & b\n\n self.assertEqual(ab, ab_ref)\n\n def test_and_empty(self):\n \"\"\"Intersection test - with explict zeros\"\"\"\n\n a = Fiber([1, 5, 8, 9], [0, 6, 0, 10])\n b = Fiber([1, 5, 8, 9], [2, 0, 0, 11])\n\n ab_ref = Fiber([9], [(10, 11)])\n\n ab = a & b\n\n self.assertEqual(ab, ab_ref)\n\n\n def test_or(self):\n \"\"\"Union test\"\"\"\n\n a = Fiber([1, 5, 8, 9], [2, 6, 9, 10])\n b = Fiber([0, 5, 9], [2, 7, 11])\n\n ab_ref = Fiber([0, 1, 5, 8, 9],\n [(\"B\", 0, 2),\n (\"A\", 2, 0),\n (\"AB\", 6, 7),\n (\"A\", 9, 0),\n (\"AB\", 10, 11)])\n\n ab = a | b\n\n self.assertEqual(ab, ab_ref)\n\n def test_or_empty(self):\n \"\"\"Uniontest - with explict zeros\"\"\"\n\n a = Fiber([1, 5, 8, 9], [0, 6, 0, 10])\n b = Fiber([1, 5, 8, 9], [2, 0, 0, 11])\n\n ab_ref = Fiber([1, 5, 9],\n [(\"B\", 0, 2),\n (\"A\", 6, 0),\n (\"AB\", 10, 11)])\n\n ab = a | b\n\n self.assertEqual(ab, ab_ref)\n\n def test_or_2d(self):\n \"\"\"Union test 2d\"\"\"\n\n a1 = [[1, 2, 3, 0],\n [1, 0, 3, 4],\n [0, 2, 3, 4],\n [1, 2, 0, 4]]\n\n a2 = [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]]\n\n a3 = [[2, 3, 4, 5],\n [0, 0, 0, 0],\n [1, 0, 3, 4],\n [1, 2, 0, 4]]\n\n b1 = a2\n b2 = a1\n b3 = a3\n\n au = [a1, a2, a3]\n bu = [b1, b2, b3]\n\n a = Fiber.fromUncompressed(au)\n b = Fiber.fromUncompressed(bu)\n\n x = a|b\n\n ab_ref = [\"A\", \"B\", \"AB\"]\n a1_fiber = Fiber.fromUncompressed(a1)\n a2_fiber = Fiber([],[])\n a3_fiber = Fiber.fromUncompressed(a3)\n\n ab_a_ref = [a1_fiber, a2_fiber, a3_fiber]\n ab_b_ref = [a2_fiber, a1_fiber, a3_fiber]\n\n for n, (c, (ab, ab_a, ab_b)) in enumerate(x):\n self.assertEqual(ab, ab_ref[n])\n self.assertEqual(ab_a, ab_a_ref[n])\n self.assertEqual(ab_b, ab_b_ref[n])\n\n\n def test_xor(self):\n \"\"\"Xor test\"\"\"\n\n a = Fiber([1, 5, 8, 9], [2, 6, 9, 10])\n b = Fiber([0, 5, 9], [2, 7, 11])\n\n ab_ref = Fiber([0, 1, 8],\n [(\"B\", 0, 2),\n (\"A\", 2, 0),\n (\"A\", 9, 0)])\n\n ab = a ^ b\n\n self.assertEqual(ab, ab_ref)\n\n def test_xor_2d(self):\n \"\"\"Union test 2d\"\"\"\n\n a1 = [[1, 2, 3, 0],\n [1, 0, 3, 4],\n [0, 2, 3, 4],\n [1, 2, 0, 4]]\n\n a2 = [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]]\n\n a3 = [[2, 3, 4, 5],\n [0, 0, 0, 0],\n [1, 0, 3, 4],\n [1, 2, 0, 4]]\n\n b1 = a2\n b2 = a1\n b3 = a3\n\n au = [a1, a2, a3]\n bu = [b1, b2, b3]\n abu_ref = [a1, b2, []]\n\n a = Fiber.fromUncompressed(au)\n b = Fiber.fromUncompressed(bu)\n\n x = a ^ b\n\n ab_ref = [\"A\", \"B\"]\n a1_fiber = Fiber.fromUncompressed(a1)\n a2_fiber = Fiber([],[])\n\n ab_a_ref = [a1_fiber, a2_fiber]\n ab_b_ref = [a2_fiber, a1_fiber]\n\n for n, (c, (ab, ab_a, ab_b)) in enumerate(x):\n self.assertEqual(ab, ab_ref[n])\n self.assertEqual(ab_a, ab_a_ref[n])\n self.assertEqual(ab_b, ab_b_ref[n])\n\n\n\n def test_xor_empty(self):\n \"\"\"Uniontest - with explict zeros\"\"\"\n\n a = Fiber([1, 5, 8, 9], [0, 6, 0, 10])\n b = Fiber([1, 5, 8, 9], [2, 0, 0, 11])\n\n ab_ref = Fiber([1, 5],\n [(\"B\", 0, 2),\n (\"A\", 6, 0)])\n\n ab = a ^ b\n\n self.assertEqual(ab, ab_ref)\n\n\n def test_diff(self):\n \"\"\"Difference test\"\"\"\n\n a = Fiber([1, 5, 8, 9, 12, 14], [2, 6, 9, 10, 0, 0])\n b = Fiber([0, 5, 9, 12], [2, 7, 0, 5])\n\n # Notes:\n # coordinate 9 stays since b@9 is zero\n # coordinat 12 goes away even though explict zero at a@12\n # coordinate 14 does not go away with explict zero at a@14\n\n\n ab_ref = Fiber([1, 8, 9, 14],\n [2, 9, 10, 0])\n\n ab = a - b\n\n self.assertEqual(ab, ab_ref)\n\n\n def test_assignment(self):\n \"\"\"Assignment test\"\"\"\n\n a = Fiber([0, 5, 9], [0, 10, 0])\n b = Fiber([1, 5, 8, 9, 14], [2, 6, 9, 10, 0])\n\n # Note:\n # coordinate 9 stays although a@9 is zero\n # coordinate 14 does not appear since b@14 is 0\n\n ab_ref = Fiber([1, 5, 8, 9],\n [(0, 2),\n (10, 6),\n (0, 9),\n (0, 10)])\n\n ab = a << b\n\n self.assertEqual(ab, ab_ref)\n\n def test_flatten(self):\n \"\"\"Test flattening/unflattening 1 level\"\"\"\n\n u_t = [[[1, 2, 3, 0],\n [1, 0, 3, 4],\n [0, 2, 3, 4],\n [1, 2, 0, 4]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[1, 2, 3, 0],\n [1, 0, 3, 4],\n [0, 0, 0, 0],\n [1, 2, 0, 4]]]\n\n f = Fiber.fromUncompressed(u_t)\n\n ff = f.flattenRanks()\n\n ff_ref = Fiber([(0, 0), (0, 1), (0, 2), (0, 3), (2, 0), (2, 1), (2, 3)],\n [Fiber([0, 1, 2], [1, 2, 3]),\n Fiber([0, 2, 3], [1, 3, 4]),\n Fiber([1, 2, 3], [2, 3, 4]),\n Fiber([0, 1, 3], [1, 2, 4]),\n Fiber([0, 1, 2], [1, 2, 3]),\n Fiber([0, 2, 3], [1, 3, 4]),\n Fiber([0, 1, 3], [1, 2, 4])])\n\n self.assertEqual(ff, ff_ref)\n\n fu = ff.unflattenRanks()\n\n self.assertEqual(fu, f)\n\n\n def test_flatten_levels_2(self):\n \"\"\"Test flattening/unflattening 2 levels\"\"\"\n\n u_t = [[[1, 2, 3, 0],\n [1, 0, 3, 4],\n [0, 2, 3, 4],\n [1, 2, 0, 4]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[1, 2, 3, 0],\n [1, 0, 3, 4],\n [0, 0, 0, 0],\n [1, 2, 0, 4]]]\n\n f = Fiber.fromUncompressed(u_t)\n\n ff = f.flattenRanks(levels=2)\n\n ref_coords = [(0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 1, 0),\n (0, 1, 2), (0, 1, 3), (0, 2, 1), (0, 2, 2),\n (0, 2, 3), (0, 3, 0), (0, 3, 1), (0, 3, 3),\n (2, 0, 0), (2, 0, 1), (2, 0, 2), (2, 1, 0),\n (2, 1, 2), (2, 1, 3), (2, 3, 0), (2, 3, 1),\n (2, 3, 3)]\n\n ref_payloads = [1, 2, 3, 1, 3, 4, 2, 3, 4, 1, 2, 4, 1, 2,\n 3, 1, 3, 4, 1, 2, 4]\n\n ff_ref = Fiber(coords=ref_coords, payloads=ref_payloads)\n\n self.assertEqual(ff, ff_ref)\n\n #\n # Now unflatten back to the original\n #\n fu = ff.unflattenRanks(levels=2)\n\n self.assertEqual(fu, f)\n\n #\n # Now unflatten in two steps\n #\n fu1 = ff.unflattenRanks(levels=1)\n fu1.updatePayloads(lambda p: p.unflattenRanks(levels=1))\n\n self.assertEqual(fu1, f)\n\n def test_flatten_levels_3(self):\n \"\"\"Test flattening/unflattening 3 levels\"\"\"\n\n # TBD\n pass\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "5608839", "language": "Python", "matching_score": 5.910552501678467, "max_stars_count": 2, "path": "test/test_fiber.py" }, { "content": "\"\"\"Fiber\n\nA class used to implement the fibers (or the entire **fibertree**) of\na tensor.\n\n\"\"\"\n\nimport logging\n\nfrom functools import partialmethod\nfrom copy import deepcopy\nimport yaml\nimport random\n\nfrom .coord_payload import CoordPayload\nfrom .payload import Payload\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.core.fiber')\n\n\n#\n# Define an error class\n#\nclass CoordinateError(Exception):\n \"\"\"CoordinateError\"\"\"\n pass\n\n\nclass PayloadError(Exception):\n \"\"\"PayloadError\"\"\"\n pass\n\n\n#\n# Define the fiber class\n#\nclass Fiber:\n \"\"\"A base class for representing a tensor as a tree of fibers\n\n The Fiber class implements the concept of a fiber of a tensor. A\n Fiber is comprised of a list of **coordinates** and associated\n **payloads**.\n\n Fiber coordinates are unique tags for each element of the Fiber,\n they are often integers in the open range [0 to **shape**), where\n **shape** is an attribute of the Fiber. However, coordinates may\n be other types, such as `tuples`. For example, `tuple` coordinates\n can be generated by `Fiber.flattenRanks()`.\n\n Fiber payloads are a value associated with each\n coordinate. Payloads may themselves be Fibers, which permits the\n creation of **fibertrees**. The payloads of the leaf level of the\n fibertree will be terminal values. Since we want to enable code to\n hold and update a payload (outside the context of the Fiber) we\n usually **box** the leaf level payloads using the `Payload` class\n (see `fibertree.core.payload`).\n\n A Fiber may have not have values for every possible coordinate\n (i.e., the coordinate list may have gaps). Such missing\n coordinates are referred to as being **empty**.\n\n Fibers can be iterated and return a series of `CoordPayload`\n elements (see `fibertree.core.coord_payload`).\n\n Attributes\n ----------\n\n Fibers have a set of attributes that can be set and\n accessed. These include:\n\n - A **default** value, which is the value associated with an\n **empty** coordinate and is accessed with `Fiber.getDefault()`\n and may be returned by methods that access a point in the Fiber\n such as `Fiber.getPayload()` and `Fiber.getPayloadRef()`.\n\n - A **shape**, which specifies the **size** of the fiber and\n controls the range of legal (integer) coordinates. The shape of\n an individual fiber is an integer, while the shape of a\n fibertree will be a list of integers corresponding to the\n shapes of the fibers at each level of the tree. A fiber's (or\n fibertree's) shape can be accessed with `Fiber.getShape()`\n\n - An **ordered** attribute which indicates and maintains the\n invariant that the coordinates of the fiber are monitonically\n increasing.\n\n - A **unique** attribute which indicates and maintains the\n invariant that the coordinates of the fiber are unique.\n\n - An **owner** rank. Since fibers are often part of a `Tensor`,\n the fiber may have an owning rank (see `Fiber.setOwner()`). When\n owned by a rank, some fiber atributes are actually obtained from\n that rank rather than being stored locally.\n\n - A **savedPos**, which is a value stored by certain methods that\n records a postion in a fiber that can be used in the future as a\n **shortcut** to accelerate a search in the fiber by another\n method. See `Fiber.getRange()` for an example of a method that\n both saves and can use these **shortcuts**.\n\n Constructor\n -----------\n\n The `Fiber` constructor creates an fiber with a given set of\n coordinates and payloads (in struct of lists form).\n\n Parameters\n ----------\n\n coords: list, default=[]\n List of coordinate values\n\n payloads: list, default=[]\n List of corresponding payloads for the coordinates\n\n default: value, default=0\n Default value of a payload in this fiber\n\n shape: integer, default=None\n Declared shape for the fiber\n\n initial: value, default=None\n A value to initialize all payloads.\n\n max_coord: value, default=no maximum coordinate\n The maximum legal coordinate value (this is really \"shape-1\")\n\n ordered: Boolean, default=True\n Attribute specifing that the coordinates are monotonically increasing\n\n unique: Boolean, default=True\n Attribute specifing that the coordinates are unique\n\n\n Notes\n -----\n\n Elements of the fiber have a `position` that can be used to\n reference the element. Generally one can think of the `position` as\n the relative address of the element in the concrete represention of\n the fiber. A variety of methods use a `postion` to reference an\n element, such as `Fiber.__getitem__()` and `Fiber.__setitem__()`\n\n Currently, the internal implementation of a Fiber is to hold a list\n of coordinates and a list of payloads (struct of lists) and the\n instance variables holding those lists (coords and payloads) are\n currently left public...\n\n \"\"\"\n\n\n def __init__(self,\n coords=None,\n payloads=None,\n default=0,\n shape=None,\n initial=None,\n max_coord=None,\n ordered=True,\n unique=True):\n\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.core.fiber')\n\n #\n # Collect attributes\n #\n self._ordered = ordered\n self._unique = unique\n\n #\n # Handle cases with missing inputs\n #\n if coords is None:\n if payloads is None:\n #\n # If neither coords or payloads are given create an empty fiber\n #\n coords = []\n payloads = []\n else:\n #\n # If only payloads are given create a \"dense\" fiber\n #\n # TBD: Reconcile coords with shape\n #\n coords = list(range(len(payloads)))\n else:\n if payloads is None:\n #\n # If only coords are given create a set of payloads\n #\n # TBD: Creating a set of zeros is odd...\n #\n payloads = len(coords)*[initial]\n\n\n assert (len(coords) == len(payloads)), \\\n \"Coordinates and payloads must be same length\"\n\n #\n # Note:\n # If a payload is already boxed, Payload.maybe_box() will\n # NOT double box it.\n #\n # We do not eliminate explicit zeros in the payloads\n # so zeros will be preserved.\n #\n self.coords = coords\n \"\"\"The list of coordinates of the fiber\"\"\"\n\n self.payloads = [Payload.maybe_box(p) for p in payloads]\n \"\"\"The list of payloads of the fiber\"\"\"\n\n #\n # Check that fiber attributes are satisfied\n #\n self._checkOrdered()\n self._checkUnique()\n\n #\n # Set a specific contstant value for the shape of this fiber\n #\n # Note1: this value is overridden by the owning rank's shape\n # Note2: no checks are done to see if this value is violated\n #\n self._shape = shape\n #\n # Set a specific constant value to the maximum legal coordinate\n #\n # TBD: If not None there are lots of places this should be checked\n #\n self._max_coord = max_coord\n if max_coord is not None:\n Fiber._deprecated(\"Explicitly setting a fiber's max_coord is deprecated\")\n #\n # Owner rank... set later when fiber is appended to a rank\n #\n self.setOwner(None)\n\n #\n # Create default value\n #\n self._default = Payload.maybe_box(default)\n\n #\n # Initialize \"saved position\"\n #\n self._saved_pos = 0\n\n #\n # Clear all stats\n #\n self.clearStats()\n\n\n @classmethod\n def fromCoordPayloadList(cls, *cp, **kwargs):\n \"\"\"Construct a Fiber from a coordinate/payload list.\n\n The base Fiber contructor creates a fiber from a list of a\n coordinates and a list of payloads (i.e., struct of lists\n form) this constructor takes a list of coordinate/payload\n tuples (i.e., list of structs form).\n\n Parameters\n ----------\n\n cp: list of tuples\n A sequence of fiber elements as (coord, payload) tuples\n\n kwargs: keyword arguments\n Keyword arguments accepted by `Fiber.__init__()`\n\n Todo\n ----\n\n Support a list of `CoordPayload` elements\n\n \"\"\"\n\n (coords, payloads) = zip(*cp)\n\n return cls(coords, payloads, **kwargs)\n\n\n @classmethod\n def fromYAMLfile(cls, yamlfile, default=0, **kwargs):\n \"\"\"Construct a Fiber from a YAML file\n\n Parameters\n ----------\n\n yamlfile: str\n The name of a YAML file holding a description of a fiber\n\n kwargs: keyword arguments\n Keyword arguments accepted by `Fiber.__init__()`\n\n \"\"\"\n\n (coords, payloads) = Fiber.parse(yamlfile, default)\n\n return cls(coords, payloads, default=default, **kwargs)\n\n\n @classmethod\n def fromUncompressed(cls, payload_list):\n \"\"\"Construct a Fiber from an uncompressed nest of lists.\n\n The coordinates of the Fiber will be infered from the postions\n of the values in the lists. Zeros in the lists will be\n interpreted as the empty value of the Fiber.\n\n\n Parameters\n ----------\n\n payload_list: list\n A nest of lists holding the values of the Fiber.\n\n kwargs: keyword arguments\n Keyword arguments accepted by `Fiber.__init__()`\n\n\n Notes\n -----\n\n Zero values and sub-fibers that are all zeros are squeezed\n out, i.e., they will have no coordinates. Unless the entire\n input is zeros.\n\n \"\"\"\n\n f = Fiber._makeFiber(payload_list)\n\n #\n # Check if the list was all zeros, so return an empty fiber\n #\n if f is None:\n # Return something for an entirely empty input\n return Fiber([], [])\n\n return f\n\n\n @staticmethod\n def _makeFiber(payload_list):\n \"\"\"Recursively make a fiber out of an uncompressed nest of lists\"\"\"\n\n assert(isinstance(payload_list, list))\n\n if isinstance(payload_list[0], list):\n size_check = len(payload_list[0])\n\n for p in payload_list:\n assert size_check == len(p), \\\n \"All lists must be the same length\"\n\n # Create zipped list of (non-empty) coordinates/payloads\n zipped = [(c, p) for c, p in enumerate(payload_list) if p != 0]\n\n #\n # Recursively unzip the lists into a Fiber\n #\n if len(zipped) == 0:\n # Got an empty subtree\n return None\n\n if isinstance(payload_list[0], list):\n coords = []\n payloads = []\n\n for c, p in zipped:\n real_p = Fiber._makeFiber(p)\n if real_p is not None:\n coords.append(c)\n payloads.append(real_p)\n else:\n coords = [c for c, _ in zipped]\n payloads = [p for _, p in zipped]\n\n if len(coords) == 0:\n return None\n\n #\n # Create fiber\n #\n # Note: max_coord dervived from input argument list and\n # assuming coordinates start at 0\n #\n return Fiber(coords, payloads, shape=len(payload_list))\n\n\n @classmethod\n def fromRandom(cls, shape, density, interval=10, seed=None):\n \"\"\"Create a fiber populated with random values.\n\n Multi-level fibers are supported by recursively creating\n fibers.\n\n Parameters\n -----------\n\n shape: list\n The `shape` (i.e., size) of the fibers at each level of\n the tree.\n\n density: list or scalar\n The probability that an element of the fiber will not be\n empty for each level of the tree. A scalar is density for\n leaf level of tree and other levels have density 1.0.\n\n interval: number\n The range (from 0 to `interval`) of each value at the leaf\n level of the tree.\n\n seed: a valid argument for `random.seed`\n A seed to pass to `random.seed`.\n\n \"\"\"\n\n if not isinstance(density, list):\n #\n # Convert scalar density to per rank density list\n #\n density = (len(shape)-1)*[1.0] + [density]\n\n\n assert len(shape) == len(density), \\\n \"Density and shape arrays must be same length\"\n\n if seed is not None:\n random.seed(seed)\n\n coords = []\n payloads = []\n\n for c in range(shape[0]):\n if random.random() < density[0]:\n if len(shape) == 1:\n payload = random.randint(1, interval)\n if payload == 0:\n break\n else:\n payload = Fiber.fromRandom(shape[1:],\n density[1:],\n interval)\n if payload.isEmpty():\n continue\n\n coords.append(c)\n payloads.append(payload)\n\n f = Fiber(coords, payloads)\n\n return f\n\n\n#\n# Stats-related methods\n#\n\n def clearStats(self):\n \"\"\"Clear savedPos-related statistics\n\n See `Fiber.getSavedPos()` for more information.\n\n \"\"\"\n\n self._clearSavedPosStats()\n\n\n#\n# Accessor methods\n#\n def getCoords(self):\n \"\"\"Return the list of coordinates in fiber\n\n Returns\n -------\n coord_list: list\n List of coordinates\n\n Notes\n -----\n\n This method should be used in preference to accessing the\n `Fiber.coords` class instance variable directly.\n\n \"\"\"\n\n return self.coords\n\n def getPayloads(self):\n \"\"\"Return the list of payloads in fiber\n\n Returns\n -------\n payload_list: list\n List of payloads\n\n Notes\n -----\n\n This method should be used in preference to accessing the\n `Fiber.payloads` class instance variable directly.\n\n \"\"\"\n\n return self.payloads\n\n\n def isOrdered(self):\n \"\"\"Return the status of the \"ordered\" attribute\n\n Returns\n -------\n is_ordered: Boolean\n Set to True if the coordinates are ordered\n\n\n Note: this attribute cannot be changed after fiber creation.\n\n \"\"\"\n\n return self._ordered\n\n\n def isUnique(self):\n \"\"\"Return the status of the \"unique\" attribute\n\n Returns\n -------\n is_unique: Boolean\n Set to True if the coordinates are ordered\n\n Note: this attribute cannot be changed after fiber creation.\n\n \"\"\"\n\n return self._unique\n\n\n#\n# Coordinate-based methods\n#\n# The following set of methods all reference an element in the fiber\n# by coordinate. Some just obtain information about the element (non-mutating)\n# others change the content of the fiber (mutating)\n#\n\n def getPayload(self, *coords, default=None, allocate=True, start_pos=None):\n \"\"\"Get the payload at a **point** in a fibertree\n\n Return the final payload after recursively traversing the\n levels of the fiber tree for at each coordinate in coords. If\n the list of coordinates reaches a leaf of the tree, it returns\n a value otherwise it will return a fiber. [Non-mutating]\n\n This method operates in two modes: allocate=True and False.\n\n For \"allocate=True\" mode, if any coordinate refers to a\n non-existent element, a payload is created (a fiber for at a\n non-leaf level or a zero at the leaf) recursively, but not\n inserted into the fiber tree. The final such payload is\n returned to the caller.\n\n For \"allocate=False\" mode, if any coordinate refers to a\n non-existent element, nothing is created and the `default`\n value is returned.\n\n If `start_pos` is specified it is used as a shortcut to start the\n search for the coordinate. And a new position is saved for use in\n a later search. Only works for a one-deep search.\n\n Parameters\n ----------\n\n coords: list\n list of coordinates to traverse\n\n allocate: Boolean, default=True\n Automatically generate the default value if needed at each\n level of the tree, but don't insert into the tree.\n\n default: value, default=None\n A constant default value to return if coordinate is empty on\n no-allocate\n\n start_pos: scalar or Payload() containing a scalar, default=None\n An optional shortcut value to optimize search\n\n Returns\n -------\n\n payload: a scalar or Fiber\n The payload of the element at the specified coordinate(s)\n\n Raises\n ------\n\n None\n\n \"\"\"\n\n assert default is None or not allocate\n assert start_pos is None or len(coords) == 1\n\n start_pos = Payload.get(start_pos)\n\n # TBD: Actually optimize the search\n\n try:\n index = self.coords.index(coords[0])\n payload = self.payloads[index]\n except Exception:\n #\n # The requested coordinate did not exist\n #\n # If we are allocating missing elements or\n # are not at the last coordinate in the given coord list\n # create a default value to return (or recurse into),\n # but do not change anything in the actual fiber.\n #\n # Otherwise return the provided default.\n #\n # TBD: We (arbitarily) record we found it at the final\n # index, this may not work for the next\n # shortcut-based lookup\n #\n index = len(self.coords)\n\n if allocate or len(coords) > 1:\n payload = self._createDefault(addtorank=False)\n else:\n payload = Payload.maybe_box(default)\n\n if len(coords) > 1:\n assert Payload.contains(payload, Fiber), \\\n \"getPayload too many coordinates\"\n\n # Recurse to the next level's fiber\n return payload.getPayload(*coords[1:],\n default=default,\n allocate=allocate)\n\n if start_pos is not None:\n self.setSavedPos(index, distance=index - start_pos)\n\n return payload\n\n\n def getPayloadRef(self, *coords, start_pos=None):\n \"\"\"Get a (mutable) reference to a payload in a fibertree\n\n Return the final payload after recursively traversing the\n levels of the fiber tree for at each coordinate in coords,\n which are essential the coordinates of a `point`. If the\n payload is empty, then recursively return the `default`\n payload\n\n If `start_pos` is specified it is used as a shortcut to start the\n search for the coordinate. And a new position is saved for use in\n a later search. Only works for a one-deep search.\n\n Parameters\n ----------\n coords: coordinates\n List of coordinates to traverse, i.e., a \"point\"\n\n start_pos: scalar or Payload() containing a scalar\n Optional shortcut value to optimize search\n\n Returns\n -------\n payload: a (boxed) scalar or Fiber\n The payload of the element at the specified coordinate(s)\n\n Raises\n ------\n\n None\n\n \"\"\"\n\n assert start_pos is None or len(coords) == 1\n\n # TBD: Actually optimize the search\n\n start_pos = Payload.get(start_pos)\n\n try:\n #\n # Try to find the index for the first coordinate in the \"point\"\n # and get the payload at that index\n #\n index = self.coords.index(coords[0])\n payload = self.payloads[index]\n except Exception:\n #\n # Coordinate didn't exist so create a payload\n # at that coordinate then the index will exist\n #\n payload = self._create_payload(coords[0])\n index = self.coords.index(coords[0])\n\n if len(coords) > 1:\n # Recurse to the next level's fiber\n assert Payload.contains(payload, Fiber), \"Too many coordinates\"\n\n return payload.getPayloadRef(*coords[1:])\n\n if start_pos is not None:\n self.setSavedPos(index, distance=index - start_pos)\n\n assert Payload.is_payload(payload)\n\n return payload\n\n\n def _create_payload(self, coord):\n \"\"\"Create a payload in the fiber at coord\n\n Optionally insert into the owners rank.\n\n \"\"\"\n\n # Create a payload at coord\n # Iemporary value (should be None)\n\n payload = self._createDefault()\n\n assert Payload.is_payload(payload)\n\n try:\n index = next(x for x, val in enumerate(self.coords) if val > coord)\n self.coords.insert(index, coord)\n self.payloads.insert(index, payload)\n except StopIteration:\n index = len(self.coords)\n self.coords.append(coord)\n self.payloads.append(payload)\n\n\n #\n # Get the payload out of the payloads array\n # TBD: Not sure why I felt this was needed\n #\n payload = self.payloads[index]\n\n assert Payload.is_payload(payload)\n\n return payload\n\n\n def getRange(self,\n start_coord,\n size=None,\n end_coord=None,\n trans_fn=None,\n start_pos=None):\n \"\"\"Extract a range of coordinates from a Fiber\n\n Return a fiber in the range starting at `start_coord` and ending\n either when the `size` is exceeded or the fiber reaches the end\n of the open interval ending at `end_coord`.\n\n Parameters\n ----------\n start_coord: coordinate\n A coordinate indicating where to start the new fiber\n\n size: integer, default=None\n The size of the range in coordinate space\n\n end_coord: coordinate, default=None\n A coordinate indicating the end of the open interval\n\n trans_fn: function: coord -> coord, default=None\n A function that converts a coordinate in the orginal fiber\n into a cordinate in the new fiber\n\n start_pos: scalar or Payload() containing a scalar, default=None\n Optional **shortcut** value to optimize search for `start_coord`\n\n Returns\n -------\n\n Fiber\n Fiber containing the requested range\n\n\n Notes\n -----\n\n 1) Either `size` or `end_coord` must be specified, but not both.\n\n 2) The resulting fiber will NOT include `end_coord`\n\n\n \"\"\"\n\n assert not (size is None and end_coord is None)\n assert size is not None or end_coord is not None\n assert self._ordered\n\n if trans_fn is None:\n # Default trans_fn is identify function (inefficient but easy)\n trans_fn = lambda x: x\n\n start_pos = Payload.get(start_pos)\n\n if start_pos is not None:\n assert start_pos < len(self.coords)\n assert start_pos == 0 or self.coords[start_pos - 1] < start_coord\n\n range_start = start_pos\n else:\n range_start = 0\n\n # Invariant: trans_fn is order preserving, but we check for reversals\n\n min = start_coord\n\n if size is not None:\n max = start_coord + size\n else:\n max = end_coord\n\n coords = []\n payloads = []\n\n # Start at start_pos (if any)\n\n first_pos = None\n\n for pos in range(range_start, len(self.coords)):\n c = self.coords[pos]\n p = self.payloads[pos]\n new_c = trans_fn(c)\n if new_c >= max:\n break\n if new_c >= min:\n\n # For statistics\n if first_pos is None:\n first_pos = pos\n\n coords.append(new_c)\n payloads.append(p)\n\n\n # Note: This reversal implies a complex read order\n\n if len(coords) > 1 and coords[1] < coords[0]:\n coords.reverse()\n payloads.reverse()\n\n if start_pos is not None:\n if first_pos is None:\n self.setSavedPos(pos)\n else:\n self.setSavedPos(pos, distance=first_pos - start_pos)\n\n\n return self._newFiber(coords, payloads)\n\n\n def prune(self, trans_fn=None, start_pos=None):\n \"\"\"Create a new fiber by pruning the elements of an existing fiber\n\n Return a fiber containing a subset of the coordinates of the\n input fiber. The input fiber is traversed calling `trans_fn`\n for each element. Based on the return value the element is\n included [True] (or not [False]) in the new fiber or\n traversal is stopped [None].\n\n Note: It is the responsibility of the `trans_fn` to cope with\n the fact that the coordinates of the fiber are \"ordered\"\n and/or \"unique\".\n\n Parameters\n ----------\n trans_fn: function: postion, coord, payload -> {True, False, None}, default=None\n\n A function that specifies what to do with each element of\n the original fiber.\n\n\n start_pos: scalar or Payload() containing a scalar, default=None\n Optional **shortcut** value to optimize search for `start_coord`\n\n Returns\n -------\n\n Fiber\n Fiber containing the pruned element of the input Fiber.\n\n \"\"\"\n\n start_pos = Payload.get(start_pos)\n\n if start_pos is not None:\n assert start_pos < len(self.coords)\n range_start = start_pos\n else:\n range_start = 0\n\n coords = []\n payloads = []\n\n # Start at start_pos (if any)\n\n first_pos = None\n end_pos_offset = 1\n\n #\n # Traverse positions in fiber starting at range_start\n #\n for pos in range(range_start, len(self.coords)):\n c = self.coords[pos]\n p = self.payloads[pos]\n\n #\n # Call pruning function\n #\n status = trans_fn(pos, c, p)\n\n #\n # End processing if status is None\n #\n if status is None:\n end_pos_offset = 0\n break\n\n #\n # Include element if status is True\n #\n if status:\n if first_pos is None:\n first_pos = pos\n\n coords.append(c)\n payloads.append(p)\n\n #\n # Record start_pos information\n #\n if start_pos is not None:\n if first_pos is None:\n self.setSavedPos(pos + end_pos_offset)\n else:\n self.setSavedPos(pos + end_pos_offset,\n distance=first_pos - start_pos)\n\n\n return self._newFiber(coords, payloads)\n\n\n def getPosition(self, coord, start_pos=None):\n \"\"\"Find position of element associated with a coordinate [non-mutating]\n\n Return the position of the elemnt at `coord`, if any. If the\n the element at `coord` is **empty** return None. This method\n is non-mutating, i.e., the fiber will not change as a side\n effect of a call to this method.\n\n If `start_pos` is specified it is used as a shortcut to start the\n search for the coordinate. And a new position is saved for use in\n a later search. Only works for a one-deep search.\n\n Parameters\n ----------\n coord: coordinate\n Coordinate to look up\n\n start_pos: scalar or Payload() containing a scalar, default=None\n Optional shortcut value to optimize search\n\n Returns\n -------\n\n position: integer or None\n An index that can be used to _getitem_()\n\n Raises\n ------\n\n None\n\n \"\"\"\n\n # TBD: Actually optimize the search\n\n start_pos = Payload.get(start_pos)\n\n try:\n index = self.coords.index(coord)\n except Exception:\n index = None\n\n if start_pos is not None and index is not None:\n self.setSavedPos(index, distance=index - start_pos)\n\n return index\n\n\n def getPositionRef(self, coord, start_pos=None):\n \"\"\"Find position of element associated with a coordinate [mutating]\n\n Return the position of the elemnt at `coord`. If the the\n element at `coord` is **empty** then create it and assign the\n appropriate default payload, e.g., an empty Fiber or a zero.\n\n If `start_pos` is specified it is used as a shortcut to start the\n search for the coordinate. And a new position is saved for use in\n a later search. Only works for a one-deep search.\n\n\n Parameters\n ----------\n coord: coordinate\n Coordinate to look up\n\n start_pos: scalar or Payload() containing a scalar\n Optional shortcut value to optimize search\n\n Returns\n -------\n\n position: integer\n An index that can be used to _getitem_()\n\n\n Raises\n ------\n\n None\n\n \"\"\"\n\n start_pos = Payload.get(start_pos)\n\n # TBD: Actually optimize the search\n\n try:\n index = self.coords.index(coord)\n except Exception:\n self._create_payload(coord)\n index = len(self.payloads) - 1 # TODO: This is wrong...\n\n if start_pos is not None and index is not None:\n self.setSavedPos(index, distance=index - start_pos)\n\n return index\n\n\n def project(self, trans_fn=None, interval=None):\n \"\"\"Create a new fiber with coordinates projected according to `trans_fn`\n\n This method creates a new fiber with the same payloads as the\n original fiber, but with the coordinates transformed by `trans_fn`.\n\n\n Parameters\n ----------\n\n trans_fn: function with signature: lambda coord -> coord\n Function to convert a original fiber coordinate into a new\n fiber coordinate.\n\n interval: tuple, default=None (all coordinates)\n Restict projection to this range of original coordinates\n\n Returns\n -------\n\n fiber\n Fiber with coordinates projected according to `trans_fn`\n\n Raises\n ------\n\n None\n\n\n Notes\n -----\n\n This method returns a fiber that carries forward the \"ordered\"\n and \"unique\" attributes of the original fiber. However, it\n largely does not check that the `trans_fn` maintains those\n attributes. Although it does a crude check to see if the\n coordinates seem to have been reversed.\n\n TBD\n ----\n\n Add support for **shortcuts**.\n\n \"\"\"\n\n if trans_fn is None:\n # Default trans_fn is identify function (inefficient but easy)\n trans_fn = lambda x: x\n\n # Invariant: trans_fn is order preserving, but we check for reversals\n\n if interval is None:\n # All coordinates are legal\n\n coords = [trans_fn(c) for c in self.coords]\n payloads = self.payloads\n else:\n # Only pass coordinates in [ interval[0], interval[1] )\n\n min = interval[0]\n max = interval[1]\n\n coords = []\n payloads = []\n\n for c, p in zip(self.coords, self.payloads):\n new_c = trans_fn(c)\n if new_c >= min and new_c < max:\n coords.append(new_c)\n payloads.append(p)\n\n # Note: This reversal implies a complex read order\n\n if len(coords) > 1 and coords[1] < coords[0]:\n coords.reverse()\n payloads.reverse()\n\n return self._newFiber(coords, payloads)\n\n#\n# Deprecated coordinate-based methods\n#\n\n def insertOrLookup(self, coord, value=None):\n \"\"\".. deprecated::\"\"\"\n\n Fiber._deprecated(\"Fiber.insertOrLookup() is deprecated use getPayloadRef()\")\n\n if value is None:\n value = self._createDefault()\n\n payload = Payload.maybe_box(value)\n\n index = 0\n try:\n index = next(x for x, val in enumerate(self.coords) if val >= coord)\n if self.coords[index] == coord:\n return self.payloads[index]\n self.coords.insert(index, coord)\n self.payloads.insert(index, payload)\n return self.payloads[index]\n except StopIteration:\n self.coords.append(coord)\n self.payloads.append(payload)\n return self.payloads[-1]\n\n\n def insert(self, coord, value):\n \"\"\".. deprecated::\"\"\"\n\n Fiber._deprecated(\"Fiber.insert() is deprecated use getPayloadRef()\")\n\n payload = Payload.maybe_box(value)\n\n try:\n index = next(x for x, val in enumerate(self.coords) if val > coord)\n self.coords.insert(index, coord)\n self.payloads.insert(index, payload)\n except StopIteration:\n self.coords.append(coord)\n self.payloads.append(payload)\n\n return None\n\n\n #\n # Owner rank related methods\n #\n def setOwner(self, owner):\n \"\"\"Set rank that owns this fiber\n\n This method allows one to set the **owning rank** of a\n Fiber. This allows support for certain attributes of a Fiber\n that are common to all the fibers in a rank to be accessed\n from the rank. This includes `Fiber.getDefault()`,\n `Fiber.getShape() and `Fiber.getRankId()`.\n\n Parameters\n ----------\n\n owner: Rank\n The rank that owns this fiber\n\n\n \"\"\"\n\n self._owner = owner\n\n def getOwner(self):\n \"\"\"Get rank that owns this fiber.\n\n This method allows one to get the **owning rank** of a\n Fiber.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n owner_rank: Rank\n The rank that owns this fiber\n\n \"\"\"\n\n return self._owner\n\n\n #\n # Default payload methods\n #\n def setDefault(self, default):\n \"\"\".. deprecated::\"\"\"\n\n Fiber._deprecated(\"Fiber.setDefault() default values should be set by the owning rank\")\n\n self._setDefault(default)\n\n\n def _setDefault(self, default):\n \"\"\"_setDefault - internal use version\"\"\"\n\n owner = self.getOwner()\n\n #\n # Try to set default at owning rank, otherwise hold value locally\n #\n if owner is not None:\n owner.setDefault(default)\n else:\n self._default = Payload.maybe_box(default)\n\n\n def getDefault(self):\n \"\"\"Get the default payload for this fiber.\n\n Ideally the **default** value from a fiber is obtained from the owner rank.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n value: value\n A copy of the default payload of fibers in this rank\n\n Raises\n ------\n None\n\n \"\"\"\n\n #\n # Try to get default from owning rank\n #\n owner = self.getOwner()\n\n if owner is not None:\n return owner.getDefault()\n\n #\n # For unowned fibers, try to guess a default value\n #\n # Note: the payload being a Fiber overwrides self._default\n #\n if len(self.payloads) > 0 and Payload.contains(self.payloads[0], Fiber):\n return Fiber\n\n if self._default != 0:\n return deepcopy(self._default)\n\n return Payload(0)\n\n\n\n def _createDefault(self, addtorank=True):\n \"\"\"_createDefault\n\n Obtain the default payload for a fiber. This method goes one\n step further than getDefault() because if the default payload\n is itself a fiber it creates a Fiber().\n\n Finally, if the current fiber is part of a a non-leaf rank\n it (optionally) adds the new fiber into the **next** rank.\n\n TBD: Fold this into an option to getDefault()\n\n \"\"\"\n\n owner = self.getOwner()\n\n default = self.getDefault()\n\n a_payload = (self.payloads or [None])[0]\n if isinstance(a_payload, Fiber):\n next_default = a_payload.getDefault()\n else:\n next_default = None\n\n return Fiber._instantiateDefault(owner, default, next_default, addtorank)\n\n\n @staticmethod\n def _instantiateDefault(owner, default, next_default=None, addtorank=False):\n \"\"\"_instantiateDefault\n\n Create (recursively for default values that are tuples) an\n instance of a default payload for a fiber. This method goes\n one step further than getDefault() because if the default\n payload is itself (or contains) a fiber it creates a Fiber()\n object.\n\n Finally, if the newly created fiber is part of a a non-leaf\n rank it (optionally) adds the new fiber into the **next**\n rank.\n\n Parameters\n ----------\n\n owner: rank\n The rank that owns the fiber we are creating a payload for\n\n default: a payload (boxed or unboxed)\n A default value from a fiber\n\n next_default: a payload (boxed or unboxed)\n If `default` is a Fiber then a default payload for that fiber\n\n addtorank: Boolean\n If the newly created value is a fiber, then should that fiber\n be added to the its owning rank (owner.next_rank)\n\n Returns\n -------\n\n A (boxed) payload\n\n \"\"\"\n #\n # Selectively unbox the default\n #\n default = Payload.get(default)\n\n if isinstance(default, tuple):\n #\n # Recursively create defaults. Note each of the elements of the tuple\n # will be **boxed** as will the final result...\n #\n return Payload(tuple([Fiber._instantiateDefault(owner, e) for e in default]))\n\n if callable(default):\n #\n # Call the method to create the value\n #\n # Note, currently, this must be a fiber..\n #\n value = default()\n\n #\n # Conditionaly set the owning rank of the\n # newly created fiber by appending it to the\n # next rank of the tensor.\n #\n # Adding it to the owner.next_rank sets the\n # \"default\" for the rank, otherwise we set a\n # \"default\" explcitly.\n #\n # TBD: This is a messy interaction with rank\n # See Rank.append()\n #\n if Payload.contains(value, Fiber):\n if owner and owner.next_rank is not None:\n #\n # The new fiber is nominally part of\n # \"owner.next_rank\"\n #\n # TBD: Rank.append() sets owner, maybe that should be done here.\n #\n if addtorank:\n #\n # Actually add it to the rank\n #\n owner.next_rank.append(value)\n else:\n #\n # Set the owner, but do not add to rank\n #\n value.setOwner(owner.next_rank)\n else:\n value._setDefault(next_default)\n else:\n assert False, \"Unsupported Payload type\"\n else:\n assert not isinstance(default, Payload)\n\n value = default\n\n return Payload.maybe_box(value)\n\n\n #\n # Saved position shortcut related methods\n #\n\n def setSavedPos(self, position, distance=None):\n \"\"\"Set the postion for a **shortcut**\n\n Save the postion in a fiber for use as a future **shortcut**.\n Typically to shorten the duration of some search. The\n (optional) distance is used to maintain statistics on the\n number of elements traversed before arriving at this new\n postion.\n\n Parameters\n ----------\n postion: integer\n Postion (index) in the fiber to remember\n\n distance, integer, default=None\n Distance searched to arrive at this postion\n\n Returns\n -------\n None\n\n See also\n --------\n\n `Fiber.getSavedPos()`\n `Fiber.getSavedPosStats()`\n\n \"\"\"\n\n position = Payload.get(position)\n\n self._saved_pos = position\n\n #\n # Optionally save distanced moved statistics\n #\n if distance is not None:\n self._saved_count += 1\n self._saved_dist += abs(distance)\n\n\n def getSavedPos(self):\n \"\"\"Set the postion for a **shortcut**\n\n Get the postion in a fiber for use as a **shortcut**.\n Typically to shorten the duration of some search.\n\n Parameters\n ----------\n postion: integer\n Postion (index) in the fiber last remembered\n\n\n Returns\n -------\n postion: integer\n\n\n See also\n --------\n\n `Fiber.setSavedPos()`\n `Fiber.getSavedPosStats()`\n\n \"\"\"\n\n return self._saved_pos\n\n\n def getSavedPosStats(self, clear=True):\n \"\"\"Get the statistcs assocaited with **shortcuts**\n\n Get the number of shortcuts used and the distance searched\n using those shortcuts, and optionally clear the statistics\n\n\n Parameters\n ----------\n clear: Bool\n Clear the statistics\n\n\n Returns\n -------\n stats: tuple\n Tuple of number of **shortcuts** set and total search distance\n\n\n See also\n --------\n\n `Fiber.getSavedPos()`\n `Fiber.setSavedPos()`\n\n \"\"\"\n\n stats = (self._saved_count, self._saved_dist)\n\n if clear:\n self._clearSavedPosStats()\n\n return stats\n\n\n def _clearSavedPosStats(self):\n \"\"\"_clearSavedPosStats\"\"\"\n\n self._saved_count = 0\n self._saved_dist = 0\n\n #\n # Computed attribute acccessors\n #\n def minCoord(self):\n \"\"\"Return the minimum coordinate that exists in the fiber\n\n Parameters\n ----------\n None\n\n Returns\n -------\n min_coordinate: coordinate\n\n\n Notes\n -----\n\n This is only meaningful for coordinates that have an lexographical order.\n\n \"\"\"\n\n # TBD: Should check that the candidate is not an explicit zero\n\n if len(self.coords) == 0:\n return None\n\n return min(self.coords)\n\n def maxCoord(self):\n \"\"\"Return the maximum coordinate that exists in the fiber\n\n Parameters\n ----------\n None\n\n Returns\n -------\n max_coordinate: coordinate\n\n\n Notes\n -----\n\n This is only meaningful for coordinates that have an lexographical order.\n\n \"\"\"\n\n #\n # If _max_coord is set we assume it is correct\n #\n # TBD: _max_coord is not always maintained properly for\n # some fiber mutations\n #\n if self._max_coord is not None:\n return self._max_coord\n\n if len(self.coords) == 0:\n return None\n\n #\n # TBD: Check is there is a lexographical order...\n #\n if not isinstance(self.coords[0], int):\n #\n # Coordinates aren't integers, so maxCoord doesn't make sense\n #\n return None\n\n #\n # TBD: Maybe should actually look for largest non-empty coordinate\n #\n return max(self.coords)\n\n\n def countValues(self):\n \"\"\"Count values in the fiber tree\n\n Count the number of leaf elements in the fibertree that are\n not **empty** and have do not have the a payload with the\n fiber's **default** value.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n value_count: integer\n Number of non-empty, non-default values in the fibertee.\n\n Notes\n -----\n\n An explcit zero scalar value will NOT count as a value in a\n\n \"\"\"\n\n count = 0\n for p in self.payloads:\n if Payload.contains(p, Fiber):\n count += Payload.get(p).countValues()\n else:\n count += 1 if not Payload.isEmpty(p) else 0\n\n return count\n\n#\n# Position based methods\n#\n def __getitem__(self, keys):\n \"\"\"__getitem__\n\n For an integer key return a (coordinate, payload) tuple\n containing the contents of a fiber at `position`, i.e., an\n offset in the coordinate and payload arrays. For a slice key\n return a new fiber for the slice\n\n Parameters\n ----------\n keys: single integer/slicr or tuple of integers/slices\n The positions or slices in an n-D fiber\n\n Returns\n -------\n tuple or Fiber\n A tuple of a coordinate and payload or a Fiber of the slice\n\n Raises\n ------\n\n IndexError\n Index out of range\n\n TypeError\n Invalid key type\n\n \"\"\"\n\n if not isinstance(keys, tuple):\n # Keys is a single value for 1-D access\n key = keys\n key_cdr = ()\n else:\n # Keys is a tuple for for n-D access\n key = keys[0]\n key_cdr = keys[1:]\n\n if isinstance(key, int):\n # Handle key as single index\n\n if key < 0:\n # Handle negative indices\n key += len(self)\n\n if key < 0 or key >= len(self):\n raise(IndexError, f\"The index ({key}) is out of range\")\n\n new_payload = self.payloads[key]\n\n if len(key_cdr):\n # Recurse down the fiber tree\n new_payload = new_payload[key_cdr]\n\n return CoordPayload(self.coords[key], new_payload)\n\n if isinstance(key, slice):\n # Key is a slice\n\n # Get the start, stop, and step from the slice\n slice_range = range(*key.indices(len(self)))\n\n coords = [self.coords[ii] for ii in slice_range]\n\n if len(key_cdr):\n # Recurse down the fiber tree for each payload in slice\n payloads = [self.payloads[ii][key_cdr] for ii in slice_range]\n else:\n # Just use each payload in slice\n payloads = [self.payloads[ii] for ii in slice_range]\n\n return Fiber(coords, payloads)\n\n raise(TypeError, \"Invalid key type.\")\n\n\n def __setitem__(self, key, newvalue):\n \"\"\"__setitem__\n\n The `newvalue` parameter is either a CoordPayload or an\n arbitrary value to assign to the position \"key\" in the fiber.\n If `newvalue` is not a CoordPayload or the Coord in the\n CoordPayload is None the current coordinate will be left\n unchanged. The payload will be boxed if appropriate. If the\n payload is None, then the payload will be left unchanged.\n\n Parameters\n ----------\n key: single integer\n The position in the fiber to be set\n\n newvalue: a CoordPayload or a payload value\n The coordinate/payload or just payload to assign\n\n Returns\n -------\n Nothing\n\n Raises\n ------\n\n IndexError\n Index out of range\n\n TypeError\n Invalid key type\n\n CoordinateError\n Invalid coordinate\n\n\n Notes\n ------\n\n If this fiber does has the \"unique\" attribute but not the\n \"ordered\" attribute this method does not check that the new\n coordinate is unique.\n\n \"\"\"\n\n position = key\n\n #\n # TBD: Get isinstance of CoordPayload to work...\n #\n try:\n coord = newvalue.coord\n payload = newvalue.payload\n except Exception:\n coord = None\n payload = newvalue\n\n if coord is not None:\n #\n # Check that coordinate order is maintained\n #\n if self._ordered:\n if position > 0 and coord <= self.coords[position - 1]:\n raise CoordinateError\n\n if position + 1 < len(self.coords) and coord >= self.coords[position + 1]:\n raise CoordinateError\n\n self.coords[position] = coord\n\n #\n # A payload of None just updates the coordinate\n #\n if payload is not None:\n self.payloads[position] = Payload.maybe_box(payload)\n\n\n def __len__(self):\n \"\"\"__len__\"\"\"\n\n return len(self.coords)\n\n\n def isEmpty(self):\n \"\"\"Check if Fiber is empty\n\n Empty is defined as of zero length, only containing\n **default** values or only containing subfibers that are\n empty.\n\n Returns\n -------\n\n empty_p: Bool\n Boolean indicating the fiber was empty\n\n\n Notes\n -----\n\n Need to check for **default** values that are not zero\n\n \"\"\"\n\n return all(map(Payload.isEmpty, self.payloads))\n\n\n def nonEmpty(self):\n \"\"\"Create Fiber with only non-empty elements\n\n Because our fiber representation might have explicit zeros in\n it this method (recursively) creates a new fiber with those\n elements pruned out.\n\n Returns\n -------\n\n pruned_fiber: Fiber\n Copy of original fiber with only non-empty elements\n\n \"\"\"\n coords = []\n payloads = []\n\n for c, p in zip(self.coords, self.payloads):\n if not Payload.isEmpty(p):\n coords.append(c)\n if Payload.contains(p, Fiber):\n payloads.append(p.nonEmpty())\n else:\n payloads.append(p)\n\n return self._newFiber(coords, payloads)\n\n#\n# Iterator methods\n#\n\n def __iter__(self):\n \"\"\"__iter__\"\"\"\n\n for i in range(len(self.coords)):\n yield CoordPayload(self.coords[i], self.payloads[i])\n\n def __reversed__(self):\n \"\"\"Return reversed fiber\"\"\"\n\n for coord, payload in zip(reversed(self.coords),\n reversed(self.payloads)):\n yield CoordPayload(coord, payload)\n\n\n def iterShape(self):\n \"\"\"Iterate over fiber shape\n\n Iterate over every coordinate in the shape, returning a\n CoordPayload for each one, with a **default** value for\n empty payloads.\n\n Parameters\n ----------\n None\n\n \"\"\"\n\n for c in range(self.getShape(all_ranks=False)):\n p = self.getPayload(c)\n yield CoordPayload(c, p)\n\n\n def iterShapeRef(self):\n \"\"\"Iterate over fiber shape\n\n Iterate over every coordinate in the shape, returning a\n CoordPayload for each one, and creating elements for empty\n payloads.\n\n Parameters\n ----------\n None\n\n \"\"\"\n\n for c in range(self.getShape(all_ranks=False)):\n p = self.getPayloadRef(c)\n yield CoordPayload(c, p)\n\n\n#\n# Core methods\n#\n\n def clear(self):\n \"\"\"Clear all coordinates/payloads in a fiber\n\n Returns\n -------\n Nothing\n\n \"\"\"\n\n self.coords.clear()\n self.payloads.clear()\n\n\n def payload(self, coord):\n \"\"\".. deprecated::\"\"\"\n\n Fiber._deprecated(\"Fiber.payload() is deprecated use getPayload()\")\n\n return self.getPayload(coord)\n\n\n def append(self, coord, value):\n \"\"\"Append an element at the end of fiber\n\n Parameters\n ----------\n\n coord: scalar\n The coordinate of the element to add to the fiber\n\n value: payload\n The payload of the elemnt to add to the fiber\n\n Note\n ----\n\n For \"ordered\" fibers, the coordinates in the Fiber must be\n monotonically increasing.\n\n The \"unique\" property is not checked for \"unordered\" fibers.\n\n The payload will be optionally be **boxed**.\n\n \"\"\"\n\n if self._ordered:\n assert self.maxCoord() is None or self.maxCoord() < coord, \\\n \"Fiber coordinates in 'ordered' fibers must be monotonically increasing\"\n\n payload = Payload.maybe_box(value)\n\n self.coords.append(coord)\n self.payloads.append(payload)\n\n\n def extend(self, other):\n \"\"\"Extend a fiber with another fiber\n\n Extends the fiber with the contents of another fiber\n\n Parameters\n ----------\n other: Fiber\n A fiber to extend the original fiber with\n\n\n Returns\n -------\n Nothing\n\n\n Notes\n ------\n\n The `other` fiber is not copied, so beware of multiple\n references to the same objects.\n\n The \"unique\" property is not checked for \"unordered\" fibers.\n\n \"\"\"\n\n assert Payload.contains(other, Fiber), \\\n \"Fibers can only be extended with another fiber\"\n\n if other.isEmpty():\n # Extending with an empty fiber is a nop\n return None\n\n if self._ordered:\n assert self.maxCoord() is None or self.maxCoord() < other.coords[0], \\\n \"Fiber coordinates in 'ordered' fibers must be monotonically increasing\"\n\n self.coords.extend(other.coords)\n self.payloads.extend(other.payloads)\n\n return None\n\n\n def updateCoords(self, func, depth=0, rankid=None):\n \"\"\"Update (rewrite) the values of the coordinates of a fiber\n\n Update each coordinate in the the fibers at a depth of `depth`\n below `self` by invoking `func` on it. Therefore, a depth of\n zero will update the coordinates in the current fiber. Higher\n depths with result in a depth first search down to `depth`\n before traversing the coordinates.\n\n Parameters\n ----------\n\n func: function: position, coordinate, payload -> coordinate\n A function that is invoked with each coordinate as its argument\n\n depth: integer\n The depth in the fiber tree to dive before traversing\n\n rankid: string, default=None\n The name of a rank, i.e., a rankid, at which to perform\n the split, overrides the `depth` argument.\n\n Returns\n --------\n Nothing\n\n Raises\n ------\n\n Nothing\n\n\n Notes\n -----\n\n This method checks and that coordinates remain monotonically\n increasing and re-orders to make sure\n self.coords/self.payloads preserve monotonacity.\n\n The \"unique\" property is not checked.\n\n \"\"\"\n if rankid is not None:\n depth = self._rankid2depth(rankid)\n\n if depth > 0:\n # Recurse down to depth...\n for p in self.payloads:\n p.updateCoords(func, depth=depth - 1)\n else:\n # Update my coordinates\n\n no_sort_needed = True\n\n last_coord = None\n\n for i in range(len(self.coords)):\n new_coord = func(i, self.coords[i], self.payloads[i])\n self.coords[i] = new_coord\n\n no_sort_needed = no_sort_needed and ((last_coord is None) or (last_coord <= new_coord))\n last_coord = new_coord\n\n if self._ordered and not no_sort_needed:\n #\n # Resort the coords/payloads\n #\n self.logger.debug(\"Fiber.updateCoords() - sort needed\")\n\n zipped_cp = zip(self.coords, self.payloads)\n sorted_cp = sorted(zipped_cp)\n self.coords, self.payloads = [ list(tuple) for tuple in zip(*sorted_cp)]\n\n return None\n\n\n def updatePayloads(self, func, depth=0, rankid=None):\n \"\"\"Update the values of the payloads of a fiber\n\n Update each payload in the the fibers at a depth of `depth`\n below \"self\" by invoking \"func\" on it. Therefore, a depth of\n zero will update the payloads in the current fiber. Higher\n depths with result in a depth first search down to `depth`\n before traversing the payloads.\n\n Parameters\n ----------\n\n func: function: postion, coordinate, payload -> payload\n A function that is invoked with each payload as its argument\n\n depth: integer\n The depth in the fibertree to dive before traversing\n\n rankid: string, default=None\n The name of a rank, i.e., a rankid, at which to perform\n the split, overrides the `depth` argument.\n\n Returns\n --------\n\n None\n\n Raises\n ------\n\n TBD: currently nothing\n\n \"\"\"\n if rankid is not None:\n depth = self._rankid2depth(rankid)\n\n if depth > 0:\n # Recurse down to depth...\n for p in self.payloads:\n p.updatePayloads(func, depth=depth - 1)\n else:\n # Update my payloads\n for i in range(len(self.payloads)):\n self.payloads[i] = func(self.payloads[i])\n\n return None\n\n\n def unzip(self):\n \"\"\"Unzip the payloads of a fiber\n\n Unzip a fiber whose payloads are a tuple into two fibers each\n with the same coordinates\n\n Parameters\n ----------\n\n None\n\n Returns\n -------\n\n unziped_fibers: tuple\n A tuple of fibers\n\n \"\"\"\n\n coords_a = list(self.coords)\n coords_b = list(self.coords)\n\n (payloads_a, payloads_b) = zip(*self.payloads)\n\n return (self._newFiber(coords_a, payloads_a),\n self._newFiber(coords_b, payloads_b))\n\n#\n# Shape-related methods\n#\n\n def getShape(self, all_ranks=True):\n \"\"\"Return the shape of a fibertree\n\n Find the **shape** of the current fiber (`all_ranks`=False) or\n the entire fibertree rooted at the current fiber (`all_ranks`=True)\n\n Parameters\n ----------\n\n all_ranks: Bool, default=True\n\n Returns\n -------\n\n shape: integer or list of integers\n The shape of the current fiber or the entire tree\n\n \"\"\"\n\n owner = self.getOwner()\n\n shape = None\n\n if owner is not None:\n shape = owner.getShape(all_ranks=all_ranks)\n\n if shape is not None:\n return shape\n\n if self._shape is not None:\n return self._shape\n\n #\n # Backup for cases where there is no owner\n # or owner didn't know shape\n #\n shape = self.estimateShape(all_ranks=all_ranks)\n\n return shape\n\n\n def estimateShape(self, all_ranks=True):\n \"\"\"estimateShape\n\n Traverse a fiber tree to estimate its shape\n\n \"\"\"\n shape = self._calcShape(all_ranks=all_ranks)\n\n #\n # Since _calcShape() always returns a list we may\n # need to get out first value\n #\n if not all_ranks:\n shape = shape[0]\n\n return shape\n\n\n def _calcShape(self, shape=None, level=0, all_ranks=True):\n \"\"\" _calcShape()\n\n Find the maximum coordinate at each level of the tree\n\n TBD: Using maximum coordinate isn't really right because\n the original array may have a empty value at its\n maximum coordinate location\n\n \"\"\"\n\n #\n # Start recursion\n #\n if shape is None:\n shape = []\n\n #\n # Conditionaly append a new level to the shape array\n #\n if len(shape) < level + 1:\n shape.append(0)\n\n #\n # If fiber is empty then shape doesn't change\n #\n if not len(self.coords):\n return shape\n\n #\n # Try to determine the maximum coordinate\n #\n max_coord = self.maxCoord()\n\n #\n # The fiber is not empty, but max_coord isn't meaningful,\n # so assume fiber is dense and coodinates start at zero,\n # and return count of elements minus one.\n #\n if max_coord is None:\n max_coord = len(self.coords)-1\n\n #\n # Update shape for this Fiber at this level\n #\n shape[level] = max(shape[level], max_coord + 1)\n\n #\n # Recursively process payloads that are Fibers\n #\n if all_ranks and Payload.contains(self.payloads[0], Fiber):\n for p in self.payloads:\n shape = Payload.get(p)._calcShape(shape, level + 1)\n\n return shape\n\n#\n# Rankid methods\n#\n def getRankIds(self, all_ranks=True):\n \"\"\"Return rankids of a fibertree\n\n Find the **rank ids** of the current fiber (`all_ranks`=False) or\n the entire fibertree rooted at the current fiber (`all_ranks`=True)\n\n Parameters\n ----------\n\n all_ranks: Bool, default=True\n\n Returns\n -------\n\n rank_ids: str or list of str\n The rank ids of the current fiber or the entire tree\n\n \"\"\"\n\n owner = self.getOwner()\n\n if owner is not None:\n return owner.getRankIds(all_ranks=True)\n\n #\n # Approximate rankids for fiber not in a tensor\n #\n\n rankids = [f\"X.{d}\" for d in reversed(range(self.getDepth()))]\n\n return rankids\n\n#\n# Dimensionality method\n#\n def getDepth(self):\n \"\"\"Get the depth of the fiber\n\n Get the depth, i.e., number of dimensions, of the fiber\n\n Parameters\n ----------\n None\n\n Returns\n -------\n depth: integer\n The depth of the fibertree\n\n Raises\n ------\n None\n\n \"\"\"\n\n owner = self.getOwner()\n\n if owner is not None:\n #\n # In a tensor, so get the number of ranks starting at this fiber\n #\n depth = 0\n\n while owner is not None:\n depth += 1\n owner = owner.next_rank\n\n return depth\n\n #\n # Just have a raw fiber, so count levels\n #\n fiber = self\n\n depth = 1\n\n while len(fiber.payloads) > 0 and isinstance(fiber.payloads[0], Fiber):\n depth += 1\n fiber = fiber.payloads[0]\n\n return depth\n\n\n#\n# Miscelaneous methods\n#\n def uncompress(self, shape=None, level=0):\n \"\"\"Return an uncompressed fibertree (i.e., a nest of lists)\n\n Recursively create a nest of lists that corresponding to the\n **uncompressed** represention of the current\n fibertree. **Empty** coordinates at a fiber will be\n converted into the **default** value for that fiber.\n\n Parameters\n ----------\n\n shape: list of integers, default=None\n Impose a fixed shape on the result\n\n\n Returns\n -------\n uncompressed: list of lists\n\n Notes\n ------\n\n All elements of the lists are **unboxed**, i.e., never of type\n `Payload`. However, nested elements, e.g., as part of a\n `tuple`, of type `Payload` are not **unboxed**.\n\n This method only works for \"ordered\", \"unique\" fibers.\n\n \"\"\"\n\n assert self._ordered and self._unique\n\n if shape is None:\n shape = self.getShape(all_ranks=True)\n\n f = []\n\n shape_fiber = Fiber(coords=list(range(shape[level])), initial=1)\n for c, (mask, p, _) in self | shape_fiber:\n\n if (mask == \"AB\"):\n if Payload.contains(p, Fiber):\n f.append(Payload.get(p).uncompress(shape, level + 1))\n else:\n f.append(Payload.get(p))\n\n if (mask == \"B\"):\n f.append(self._fillempty(shape, level + 1))\n\n return f\n\n\n def _fillempty(self, shape, level):\n \"\"\"Recursive fill empty\"\"\"\n\n if level + 1 > len(shape):\n #\n # Find a fiber at the leaf level\n #\n f = self\n while isinstance(f.payloads[0], Fiber):\n f = f.payloads[0]\n\n #\n # Use the **unboxed** default from the leaf level fiber\n #\n return Payload.get(f.getDefault())\n\n f = []\n\n for i in range(shape[level]):\n f.append(self._fillempty(shape, level + 1))\n\n return f\n\n#\n# Arithmetic operations\n#\n\n def __ilshift__(self, other):\n \"\"\"Fiber assignment\n\n This operator will make a recursive assignment of all the\n elements of one fiber (`other`) into another fiber (`self`)\n using getPayloadRef(), so subfibers in new fiber are\n properly inserted into their owning rank/tensor\n\n Note: we use <<= in place of base '=' since we don't want a\n pointer to the existing fiber but an copy of `other` in the\n new fiber.\n\n Parameters\n ----------\n\n other: Fiber\n A fiber whose elements will be inserted into `self`\n\n Notes\n -----\n\n There is an analogous assignment operator for the `Payload`\n and `CoordPayload` classes, so one can \"assign\" a new value to\n a \"payload\" irrespective of whether the \"payload\" is a\n `Payload`, a `CoordPayload` or a `Fiber`.\n\n This method is not supported for fibers without the \"unique\"\n attribute.\n\n \"\"\"\n\n assert Payload.contains(other, Fiber)\n assert self._unique\n\n if len(self.coords) != 0:\n #\n # Clear out any existing data\n #\n self.coords = []\n self.payloads = []\n\n\n for c, p in other:\n #\n # For each non-empty element of other, insert it into the\n # target, note that this works regardless of whether p is\n # a Fiber or a Payload\n #\n if Payload.isEmpty(p):\n continue\n\n ref = self.getPayloadRef(c)\n ref <<= p\n\n return self\n\n\n def __add__(self, other):\n \"\"\"Scalar/fiber or fiber/fiber elementwise addition\n\n This operation does one of two things based on the type of\n `other`. If `other` is a fiber then `other` is added\n element-wise with `self`. Otherwise `other` is treated as a\n scalar and added to each element of `self`. In either case a\n new fiber is created with those sums.\n\n Parameters\n ----------\n\n other: scalar | fiber\n The scalar to add to each element of `self`\n or the fiber to add elementwise to `self`\n\n Returns\n -------\n\n result_fiber: Fiber\n The fiber after the addition of `other` to `self`\n\n\n Examples\n --------\n\n >>> f = Fiber.fromUncompressed([1, 2, 3, 0, 0, 6])\n >>> f + 2\n Fiber([0, 1, 2, 5], [3, 4, 5, 2, 2, 8])\n\n\n Note\n -----\n\n From the persepctive of modeling activity, this operation has\n implict loops that get exectuted atomically. Therefore, it\n should be used selectively when one is trying to show all the\n activity in a program's flow.\n\n When doing scalar/fiber addition, empty elements of `self`\n will be treated as zero and the value of `other` will appear\n in the output for those coordinates. When doing fiber/fiber\n addition the result will have the union of the coordinates of\n `self` and `other`.\n\n \"\"\"\n\n coords = []\n payloads = []\n\n #\n # If `other` is a fiber element-wise add the two fibers\n # Otherwise add `other` to each element of `self`\n #\n if isinstance(other, Fiber):\n for c, (_, self_val, other_val) in self | other:\n coords.append(c)\n payloads.append(self_val + other_val)\n else:\n for c, p in self.iterShape():\n coords.append(c)\n payloads.append(other + p.value)\n\n return self._newFiber(coords, payloads)\n\n\n def __radd__(self, other):\n \"\"\" Scalar/fiber addition\n\n See __add__ for more information.\n\n Examples\n --------\n\n >>> f = Fiber.fromUncompressed([1, 2, 3, 0, 0, 6])\n >>> 2 + f\n Fiber([0, 1, 2, 5], [3, 4, 5, 2, 2, 8])\n\n \"\"\"\n\n return self.__add__(other)\n\n\n def __iadd__(self, other):\n \"\"\"Add a scalar or a fiber to a fiber\n\n This operation does one of two things based on the type of\n `other`. If `other` is a fiber then `other` is added\n elementwise to `self`. Otherwise it is treated as a scalar and\n added to each element of `self`. In either case `self` is\n updated with the sum.\n\n Parameters\n ----------\n\n other: scalar | fiber\n The scalar to add to each element of `self`\n or the fiber to add elementwise to `self`\n\n Returns\n -------\n\n None\n\n\n Examples\n --------\n\n >>> f = Fiber.fromUncompressed([1, 2, 3, 0, 0, 6])\n >>> f += 2\n >>> f\n Fiber([0, 1, 2, 5], [3, 4, 5, 2, 2, 8])\n\n\n Note\n -----\n\n From the persepctive of modeling activity, this operation has\n implict loops that get exectuted atomically. Therefore, it\n should be used selectively when one is trying to show all the\n activity in a program's flow.\n\n When doing the addtions, empty elements of `self` will be\n treated as zero and and will be included in the sum (only with\n non-empty coordiantes of `other` if it is a fiber), creating\n new non-empty elements in `self`.\n\n \"\"\"\n\n #\n # If `other` is a fiber add each element of `other` to `self`\n #\n if isinstance(other, Fiber):\n for _, (self_ref, other_val) in self << other:\n self_ref += other_val\n return self\n\n #\n # Othewise add `other` to each element of `self`\n #\n for c, p in self.iterShapeRef():\n p += other\n\n return self\n\n\n def __mul__(self, other):\n \"\"\"Scalar/fiber and fiber/fiber elementwise multiplication\n\n This operation does one of two things based on the type of\n `other`. If `other` is a fiber then `other` is multiplied\n element-wise with `self`. Otherwise `other` is treated as a\n scalar and used to scale each element of `self`. In either\n case a new fiber is created with those products.\n\n Parameters\n ----------\n\n other: scalar | Fiber\n The scalar to scale each element of `self` by\n or the fiber to multiply elementwise with `self`.\n\n Returns\n -------\n\n result_fiber: Fiber\n A fiber scaled or elementwise multiplied by `other`\n\n\n Examples\n --------\n\n >>> f = Fiber.fromUncompressed([1, 2, 3, 0, 0, 6])\n >>> f * 2\n Fiber([0, 1, 2, 5], [2, 4, 6, 12])\n\n Notes\n -----\n\n From the persepctive of modeling activity, this operation has\n implict loops that get exectuted atomically. Therefore, it\n should be used selectively when one is trying to show all the\n activity in a program's flow.\n\n \"\"\"\n\n coords = []\n payloads = []\n\n if isinstance(other, Fiber):\n for c, (a_val, b_val) in self & other:\n coords.append(c)\n payloads.append(a_val * b_val)\n else:\n for c, p in self:\n coords.append(c)\n payloads.append(other * p.value)\n\n result = self._newFiber(coords, payloads)\n return result\n\n\n def __rmul__(self, other):\n \"\"\" Scalar/fiber multiplication\n\n See __mul__ for more information.\n\n Examples\n --------\n\n >>> f = Fiber.fromUncompressed([1, 2, 3, 0, 0, 6])\n >>> 2 * f\n Fiber([0, 1, 2, 5], [2, 4, 6, 12])\n\n \"\"\"\n\n return self.__mul__(other)\n\n\n def __imul__(self, other):\n \"\"\"__imul__\n\n This operation does one of two things based on the type of\n `other`. If `other` is a fiber then `other` is multiplied\n elementwise by `self`. Otherwise it is treated as a scalar and\n scales each element of `self`. In either case `self` is\n updated with the products.\n\n Parameters\n ----------\n\n other: scalar | Fiber\n The scalar to scale each element of `self`\n or the fiber to multiply elementwise to `self`\n\n Returns\n -------\n\n None\n\n\n Examples\n --------\n\n >>> f = Fiber.fromUncompressed([1, 2, 3, 0, 0, 6])\n >>> f *= 2\n >>> f\n Fiber([0, 1, 2, 5], [2, 4, 6, 12])\n\n Notes\n -----\n\n From the persepctive of modeling activity, this operation has\n implict loops that get exectuted atomically. Therefore, it\n should be used selectively when one is trying to show all the\n activity in a program's flow.\n\n \"\"\"\n\n #\n # If `other` is a fiber, elementwise multiply `other` by `self`\n #\n if isinstance(other, Fiber):\n for c, (self_val, other_val) in self & other:\n #\n # Get a reference to the c coordinate in `self`. Note\n # that is may be a zero and therefore a hard zero will\n # be included in the final fiber.\n #\n self_ref = self.getPayloadRef(c)\n self_ref <<= self_val * other_val\n\n return self\n\n #\n # Othewise multiply `other` to each element of `self`\n #\n for _, p in self:\n p *= other\n\n return self\n\n#\n# Split methods\n#\n# Note: all these methods return a new fiber\n#\n def __truediv__(self, partitions):\n \"\"\"Split a fiber uniformly in coordinate space into `partitions` partitions\n\n Parameters\n ----------\n\n partitions: integer\n The number of partions to split the fiber into\n\n\n Returns\n -------\n\n split_fiber: Fiber\n A fiber split uniformly in coorindate space\n\n\n Examples\n --------\n\n >>> f = Fiber.fromUncompressed([1, 2, 3, 0, 0, 6])\n >>> f / 2\n F[ 0 -> F [ 0 -> 1, 1 -> 2, 2-> 3],\n 3 -> F [ 5 -> 6]]\n\n\n Notes\n -----\n\n This method depends on maxCoord() being meaningful\n\n TBD\n ---\n\n Is there a reasonable semantic if `partitions` is a fiber\n \"\"\"\n\n shape = self.getShape(all_ranks=False)\n\n assert shape is not None, \\\n \"Cannot partition a fiber without a maximum coordinate\"\n\n return self.splitUniform((shape+partitions-1)//partitions)\n\n\n def __floordiv__(self, partitions):\n \"\"\"Split a fiber evenly in position space into `partitions` partitions\n\n Parameters\n ----------\n\n partitions: integer\n The number of partions to split the fiber into\n\n\n Returns\n -------\n\n split_fiber: Fiber\n A fiber split equally in postion space\n\n\n Examples\n --------\n\n >>> f = Fiber.fromUncompressed([1, 2, 3, 0, 0, 6])\n >>> f / 2\n F[ 0 -> F [ 0 -> 1, 1 -> 2 ],\n 2 -> F [ 2 -> 3, 5 -> 6 ]]\n\n\n Notes\n -----\n\n None\n\n TBD\n ---\n\n Is there a reasonable semantic if `partitions` is a fiber\n \"\"\"\n\n\n occupancy = len(self.coords)\n\n return self.splitEqual ((occupancy+partitions-1)//partitions)\n\n\n def splitUniform(self, step, partitions=1, relativeCoords=False, depth=0, rankid=None):\n \"\"\"Split a fiber uniformly in coordinate space\n\n Parameters\n ----------\n step: integer\n The `step` between initial coordinates in each split\n\n relative_coords: Bool\n Should the coordinate in the split fibers match the\n original coordinates (`relativeCoords`=False) or always\n start at zero (`relativeCoords`=True)\n\n depth: integer, default=0\n The depth in the fibertree to perform the split\n\n rankid: string, default=None\n The name of a rank, i.e., a rankid, at which to perform\n the split, overrides the `depth` argument.\n\n Returns\n -------\n split_fiber: Fiber\n A fibertree with one more level corresonding to the\n splits of the original fiber.\n\n Notes:\n -------\n References to pieces of the original tensor may be returned as\n pieces of the returned tensor.\n\n \"\"\"\n\n class _SplitterUniform():\n\n def __init__(self, step):\n self.step = step\n self.cur_group = 0\n\n def nextGroup(self, i, c):\n count = 0\n last_group = self.cur_group\n\n while c >= self.cur_group:\n count += 1\n last_group = self.cur_group\n self.cur_group += self.step\n\n return count, last_group\n\n if rankid is not None:\n depth = self._rankid2depth(rankid)\n\n if depth > 0:\n split_fiber = deepcopy(self)\n update_lambda = lambda p: p.splitUniform(step, partitions, relativeCoords)\n split_fiber.updatePayloads(update_lambda, depth=depth-1)\n return split_fiber\n\n splitter = _SplitterUniform(step)\n\n return self._splitGeneric(splitter,\n partitions,\n relativeCoords=relativeCoords)\n\n\n def splitNonUniform(self, splits, partitions=1, relativeCoords=False, depth=0, rankid=None):\n \"\"\"Split a fiber non-uniformly in coordinate space\n\n Parameters\n ----------\n splits: list of integers\n A list of the starting coordinates for each split\n\n relative_coords: Bool\n Should the coordinate in the split fibers match the\n original coordinates (`relativeCoords`=False) or always\n start at zero (`relativeCoords`=True)\n\n depth: integer, default=0\n The depth in the fibertree to perform the split\n\n rankid: string, default=None\n The name of a rank, i.e., a rankid, at which to perform\n the split, overrides the `depth` argument.\n\n Returns\n -------\n split_fiber: Fiber\n A fibertree with one more level corresonding to the\n splits of the original fiber\n\n\n Notes:\n -------\n One does not needs to include a split starting at coordinate zero.\n\n References to pieces of the original tensor may be returned as\n pieces of the returned tensor.\n\n \"\"\"\n\n class _SplitterNonUniform():\n\n def __init__(self, splits):\n if Payload.contains(splits, Fiber):\n self.splits = splits.coords.copy()\n else:\n self.splits = splits.copy()\n\n self.cur_split = self.splits.pop(0)\n\n def nextGroup(self, i, c):\n count = 0\n last_group = self.cur_split\n\n while c >= self.cur_split:\n count += 1\n last_group = self.cur_split\n if self.splits:\n self.cur_split = self.splits.pop(0)\n else:\n self.cur_split = float(\"inf\")\n\n return count, last_group\n\n if rankid is not None:\n depth = self._rankid2depth(rankid)\n\n if depth > 0:\n split_fiber = deepcopy(self)\n update_lambda = lambda p: p.splitNonUniform(splits, partitions, relativeCoords)\n split_fiber.updatePayloads(update_lambda, depth=depth-1)\n return split_fiber\n\n splitter = _SplitterNonUniform(splits)\n\n return self._splitGeneric(splitter,\n partitions,\n relativeCoords=relativeCoords)\n\n\n def splitEqual(self, step, partitions=1, relativeCoords=False, depth=0, rankid=None):\n \"\"\"Split a fiber equally in postion space\n\n Parameters\n ----------\n step: integer\n The `step` in number of elements in each split\n\n relative_coords: Bool\n Should the coordinate in the split fibers match the\n original coordinates (`relativeCoords`=False) or always\n start at zero (`relativeCoords`=True)\n\n depth: integer, default=0\n The depth in the fibertree to perform the split\n\n rankid: string, default=None\n The name of a rank, i.e., a rankid, at which to perform\n the split, overrides the `depth` argument.\n\n Returns\n -------\n split_fiber: Fiber\n A fibertree with one more level corresonding to the\n splits of the original fiber\n\n Notes:\n -------\n References to pieces of the original tensor may be returned as\n pieces of the returned tensor.\n\n \"\"\"\n\n class _SplitterEqual():\n\n def __init__(self, step):\n self.step = step\n self.cur_count = 0\n\n def nextGroup(self, i, c):\n count = 0\n\n while i >= self.cur_count:\n count += 1\n self.cur_count += self.step\n\n return count, c\n\n if rankid is not None:\n depth = self._rankid2depth(rankid)\n\n if depth > 0:\n split_fiber = deepcopy(self)\n update_lambda = lambda p: p.splitEqual(step, partitions, relativeCoords)\n split_fiber.updatePayloads(update_lambda, depth=depth-1)\n return split_fiber\n\n splitter = _SplitterEqual(step)\n\n return self._splitGeneric(splitter,\n partitions,\n relativeCoords=relativeCoords)\n\n\n def splitUnEqual(self, sizes, partitions=1, relativeCoords=False, depth=0, rankid=None):\n \"\"\"Split a fiber unequally in postion space\n\n Split a fiber by the sizes in `sizes`.\n\n Parameters\n ----------\n sizes: list of integers\n The `sizes` of the splits in number of elements in each split\n\n relative_coords: Bool\n Should the coordinate in the split fibers match the\n original coordinates (`relativeCoords`=False) or always\n start at zero (`relativeCoords`=True)\n\n depth: integer, default=0\n The depth in the fibertree to perform the split\n\n rankid: string, default=None\n The name of a rank, i.e., a rankid, at which to perform\n the split, overrides the `depth` argument.\n\n Returns\n -------\n split_fiber: Fiber\n A fibertree with one more level corresonding to the\n splits of the original fiber\n\n Notes\n ------\n If there are more coordinates than the sum of the `sizes` all\n remaining coordinates are put into the final split.\n\n References to pieces of the original tensor may be returned as\n pieces of the returned tensor.\n\n \"\"\"\n\n class _SplitterUnEqual():\n\n def __init__(self, sizes):\n self.sizes = sizes.copy()\n self.cur_count = -1\n\n def nextGroup(self, i, c):\n count = 0\n\n while i > self.cur_count:\n count += 1\n if self.sizes:\n self.cur_count += self.sizes.pop(0)\n else:\n self.cur_count = float(\"inf\")\n\n return count, c\n\n if rankid is not None:\n depth = self._rankid2depth(rankid)\n\n if depth > 0:\n split_fiber = deepcopy(self)\n update_lambda = lambda p: p.splitUnEqual(sizes, partitions, relativeCoords)\n split_fiber.updatePayloads(update_lambda, depth=depth-1)\n return split_fiber\n\n splitter = _SplitterUnEqual(sizes)\n\n return self._splitGeneric(splitter,\n partitions,\n relativeCoords=relativeCoords)\n\n\n def _rankid2depth(self, rankid):\n \"\"\"_rankid2depth\n\n Finds the depth of a given rankid\n \"\"\"\n\n owner = self.getOwner()\n\n assert owner is not None, \"Rankids exist only for fibers in a tensor\"\n\n rankids = owner.getRankIds()\n\n return rankids.index(rankid)\n\n\n\n def _splitGeneric(self, splitter, partitions, relativeCoords):\n \"\"\"_splitGeneric\n\n Takes the current fiber and splits it according to the\n boundaries defined by splitter(). The result is a new rank\n (for paritions = 1) or two new ranks (for partitions > 1).\n\n rank2 - uppermost rank with one coordinate per partition\n only exists for partitions > 1\n rank1 - middle rank with one coordinate per split\n rank0 - lowest rank with fibers split out from the original fiber\n\n \"\"\"\n\n rank0_fiber_group = []\n rank0_fiber_coords = []\n rank0_fiber_payloads = []\n\n rank1_fiber_coords = []\n rank1_fiber_payloads = []\n\n # Create arrays for rank1 fibers per partition\n\n for i in range(partitions):\n rank1_fiber_coords.append([])\n rank1_fiber_payloads.append([])\n\n cur_coords = None\n rank1_count = -1\n\n # Split apart the fiber into groups according to \"splitter\"\n\n for i0, (c0, p0) in enumerate(zip(self.coords, self.payloads)):\n # Check if we need to start a new rank0 fiber\n count, next_rank1_coord = splitter.nextGroup(i0, c0)\n if (count > 0):\n rank1_count += count\n\n # Old style: upper rank's coordinates were a dense range\n # rank1_coord = rank1_count\n\n # New style: upper rank's coordinates are first coordinate of group\n rank1_coord = next_rank1_coord\n rank0_offset = rank1_coord\n\n rank0_fiber_group.append(rank1_coord)\n\n cur_coords = []\n rank0_fiber_coords.append(cur_coords)\n\n cur_payloads = []\n rank0_fiber_payloads.append(cur_payloads)\n\n # May not be in a group yet\n if cur_coords is not None:\n if relativeCoords:\n cur_coords.append(c0 - rank0_offset)\n else:\n cur_coords.append(c0)\n\n cur_payloads.append(p0)\n\n\n # Deal the split fibers out to the partitions\n\n partition = 0\n\n for c1, c0, p0 in zip(rank0_fiber_group,\n rank0_fiber_coords,\n rank0_fiber_payloads):\n\n rank1_fiber_coords[partition].append(c1)\n rank1_fiber_payloads[partition].append(Fiber(c0, p0))\n partition = (partition + 1) % partitions\n\n # For 1 partition don't return a extra level of Fiber\n\n if partitions == 1:\n fiber = self._newFiber(rank1_fiber_coords[0], rank1_fiber_payloads[0])\n fiber._setDefault(Fiber())\n return fiber\n\n # For >1 partitions return a Fiber with a payload for each partition\n\n payloads = []\n\n for c1, p1 in zip(rank1_fiber_coords, rank1_fiber_payloads):\n payload = self._newFiber(c1, p1)\n payloads.append(payload)\n\n return self._newFiber(payloads=payloads)\n\n#\n# Operation methods\n#\n\n def concat(self, other):\n \"\"\" concat\n\n Concatenate two fibers\n\n TBD: Make sure coordinates are monitonically increasing\n\n \"\"\"\n\n assert Payload.contains(other, Fiber), \\\n \"Fiber concatenation must involve two fibers\"\n\n #\n # TBD: Set default for Fiber\n #\n return self._newFiber(coords=self.coords + other.coords,\n payloads=self.payloads + other.payloads)\n\n\n\n#\n# Aggretated intersection/union methods\n#\n\n @staticmethod\n def intersection(*args):\n \"\"\"Intersect a set of fibers.\n\n Create a new fiber containing all the coordinates that are\n common to **all** the fibers in `args` and for each of those\n coordinates create a payload that is the combination of the\n payloads of all the input fibers. Note, however, unlike a\n sequence of two-operand intersections (see Fiber.__and__()`)\n the payloads are combined together in one long `tuple`.\n\n Parameters\n ----------\n\n args: list of Fibers\n The set of fibers to intersect\n\n Returns\n -------\n\n result: Fiber\n A fiber containing the intersection of all the input fibers.\n\n\n Note\n ----\n\n Currently only supported for \"ordered\", \"unique\" fibers.\n\n \"\"\"\n\n nested_result = args[0] & args[1]\n\n for arg in args[2:]:\n nested_result = nested_result & arg\n\n c_result = []\n p_result = []\n\n for c_nested, p_nested in nested_result:\n #\n # Add coordinate\n #\n c_result.append(c_nested)\n\n #\n # Get out payload value\n #\n p_nested = Payload.get(p_nested)\n\n #\n # Create flattened payload\n #\n p = ()\n while isinstance(p_nested, tuple):\n p = (p_nested[1],) + p\n p_nested = p_nested[0]\n p_nested = Payload.get(p_nested)\n\n\n p = (Payload.maybe_box(p_nested),) + p\n\n p_result.append(Payload(p))\n\n result = Fiber(coords=c_result, payloads=p_result)\n result._setDefault(tuple([arg.getDefault() for arg in args]))\n\n return result\n\n @staticmethod\n def union(*args):\n \"\"\"Union a set of fibers.\n\n Create a new fiber containing the coordinates that exist in\n **any** of the fibers in `args` and for each of those\n coordinates create a payload that is the combination of the\n payloads of all the input fibers. Note, however, unlike a\n sequence of two-operand unions (see `Fiber.__or__()`) the\n payloads are combined together in one long `tuple` with a mask\n at the begining indicating all the fibers that had a non-empty\n payload at that coordinate.\n\n Parameters\n ----------\n\n args: list of Fibers\n The set of fibers to union\n\n Returns\n -------\n\n result: Fiber\n A fiber containing the union of all the input fibers.\n\n Note\n ----\n\n Currently only supported for \"ordered\", \"unique\" fibers.\n\n \"\"\"\n\n nested_result = args[0] | args[1]\n\n for arg in args[2:]:\n nested_result = nested_result | arg\n\n c_result = []\n p_result = []\n\n for c_nested, p_nested in nested_result:\n #\n # Add coordinate\n #\n c_result.append(c_nested)\n\n #\n # Get out payload value\n #\n p_nested = p_nested.value\n\n #\n # Create flattened payload\n #\n p = ()\n mask=\"\"\n mask_num = ord(\"A\")+len(args)\n\n while isinstance(p_nested, tuple):\n mask_num -= 1\n ab_mask = p_nested[0]\n\n if \"B\" in ab_mask:\n mask = chr(mask_num) + mask\n\n p = (p_nested[2],) + p\n p_nested = Payload.get(p_nested[1])\n\n if \"A\" in ab_mask:\n mask = \"A\" + mask\n\n p = (mask, Payload.maybe_box(p_nested),) + p\n\n p_result.append(Payload(p))\n\n result = Fiber(coords=c_result, payloads=p_result)\n result._setDefault(tuple([\"\"]+[arg.getDefault() for arg in args]))\n\n return result\n\n\n\n#\n# Merge methods\n#\n def __and__(self, other):\n \"\"\"Two-operand intersection\n\n Return the intersection of `self` and `other` by considering\n all possible coordinates and returning a fiber consisting of\n payloads containing a tuple of the payloads of the inputs for\n coordinates where the following truth table returns True:\n\n ```\n coordinate not | coordinate\n present in `other` | present in `other`\n +-----------------------+-----------------------+\n | | |\n coordinate | | |\n not present | False | False |\n in `self` | | |\n | | |\n ------------+-----------------------+-----------------------+\n | | |\n coordinate | | |\n present in | False | True |\n `self` | | |\n | | |\n ------------+-----------------------+-----------------------+\n ```\n\n Parameters\n ----------\n other: Fiber\n A fiber to intersect with the current fiber\n\n\n Returns\n --------\n result: Fiber\n A fiber created according to the intersection rules\n\n Note\n ----\n\n Currently only supported for \"ordered\", \"unique\" fibers.\n\n \"\"\"\n\n def get_next(iter):\n \"\"\"get_next\"\"\"\n\n try:\n coord, payload = next(iter)\n except StopIteration:\n return (None, None)\n\n return CoordPayload(coord, payload)\n\n\n def get_next_nonempty(iter):\n \"\"\"get_next_nonempty\"\"\"\n\n (coord, payload) = get_next(iter)\n\n while Payload.isEmpty(payload):\n (coord, payload) = get_next(iter)\n\n return CoordPayload(coord, payload)\n\n assert self._ordered and self._unique\n\n a_fiber = self\n b_fiber = other\n\n a = self.__iter__()\n b = other.__iter__()\n\n z_coords = []\n z_payloads = []\n\n a_coord, a_payload = get_next_nonempty(a)\n b_coord, b_payload = get_next_nonempty(b)\n\n while not (a_coord is None or b_coord is None):\n if a_coord == b_coord:\n z_coords.append(a_coord)\n z_payloads.append((a_payload, b_payload))\n\n a_coord, a_payload = get_next_nonempty(a)\n b_coord, b_payload = get_next_nonempty(b)\n continue\n\n if a_coord < b_coord:\n a_coord, a_payload = get_next_nonempty(a)\n continue\n\n if a_coord > b_coord:\n b_coord, b_payload = get_next_nonempty(b)\n continue\n\n result = Fiber(z_coords, z_payloads)\n result._setDefault((a_fiber.getDefault(), b_fiber.getDefault()))\n\n return result\n\n\n def __or__(self, other):\n \"\"\"__or__\n\n Return the union of `self` and `other` by considering all possible\n coordinates and returning a fiber consisting of payloads containing\n a tuple of the payloads of the inputs for coordinates where the\n following truth table returns True:\n\n\n ```\n coordinate not | coordinate\n present in `other` | present in `other`\n +-----------------------+-----------------------+\n | | |\n coordinate | | |\n not present | False | True |\n in `self` | | |\n | | |\n ------------+-----------------------+-----------------------+\n | | |\n coordinate | | |\n present in | True | True |\n `self` | | |\n | | |\n ------------+-----------------------+-----------------------+\n ```\n\n Parameters\n ----------\n other: Fiber\n A fiber to union with the current fiber\n\n\n Returns\n --------\n result: Fiber\n A fiber created according to the union rules\n\n Note\n ----\n\n Currently only supported for \"ordered\", \"unique\" fibers.\n\n \"\"\"\n\n\n def get_next(iter):\n \"\"\"get_next\"\"\"\n\n try:\n coord, payload = next(iter)\n except StopIteration:\n return (None, None)\n\n return CoordPayload(coord, payload)\n\n def get_next_nonempty(iter):\n \"\"\"get_next_nonempty\"\"\"\n\n (coord, payload) = get_next(iter)\n\n while Payload.isEmpty(payload):\n (coord, payload) = get_next(iter)\n\n return CoordPayload(coord, payload)\n\n assert self._ordered and self._unique\n\n a_fiber = self\n b_fiber = other\n\n a = self.__iter__()\n b = other.__iter__()\n\n z_coords = []\n z_payloads = []\n\n a_coord, a_payload = get_next_nonempty(a)\n b_coord, b_payload = get_next_nonempty(b)\n\n while not (a_coord is None or b_coord is None):\n if a_coord == b_coord:\n z_coords.append(a_coord)\n\n z_payloads.append((\"AB\", a_payload, b_payload))\n\n a_coord, a_payload = get_next_nonempty(a)\n b_coord, b_payload = get_next_nonempty(b)\n continue\n\n if a_coord < b_coord:\n z_coords.append(a_coord)\n\n b_default = b_fiber._createDefault()\n z_payloads.append((\"A\", a_payload, b_default))\n\n a_coord, a_payload = get_next_nonempty(a)\n continue\n\n if a_coord > b_coord:\n z_coords.append(b_coord)\n\n a_default = a_fiber._createDefault()\n z_payloads.append((\"B\", a_default, b_payload))\n\n b_coord, b_payload = get_next_nonempty(b)\n continue\n\n while a_coord is not None:\n z_coords.append(a_coord)\n\n b_default = b_fiber._createDefault()\n z_payloads.append((\"A\", a_payload, b_default))\n\n a_coord, a_payload = get_next_nonempty(a)\n\n while b_coord is not None:\n z_coords.append(b_coord)\n\n a_default = a_fiber._createDefault()\n z_payloads.append((\"B\", a_default, b_payload))\n\n b_coord, b_payload = get_next_nonempty(b)\n\n result = Fiber(z_coords, z_payloads)\n result._setDefault((\"\", a_fiber.getDefault(), b_fiber.getDefault()))\n\n return result\n\n\n def __xor__(self, other):\n \"\"\"__xor__\n\n Return the xor of `self` and `other` by considering all possible\n coordinates and returning a fiber consisting of payloads containing\n a tuple of the payloads of the inputs for coordinates where the\n following truth table returns True:\n\n\n ```\n coordinate not | coordinate\n present in `other` | present in `other`\n +-----------------------+-----------------------+\n | | |\n coordinate | | |\n not present | False | True |\n in `self` | | |\n | | |\n ------------+-----------------------+-----------------------+\n | | |\n coordinate | | |\n present in | True | False |\n `self` | | |\n | | |\n ------------+-----------------------+-----------------------+\n ```\n\n Parameters\n ----------\n other: Fiber\n A fiber to xor with the current fiber\n\n\n Returns\n --------\n result: Fiber\n A fiber created according to the xor rules\n\n Note\n ----\n\n Currently only supported for \"ordered\", \"unique\" fibers.\n\n \"\"\"\n\n\n def get_next(iter):\n \"\"\"get_next\"\"\"\n\n try:\n coord, payload = next(iter)\n except StopIteration:\n return (None, None)\n return CoordPayload(coord, payload)\n\n def get_next_nonempty(iter):\n \"\"\"get_next_nonempty\"\"\"\n\n (coord, payload) = get_next(iter)\n\n while Payload.isEmpty(payload):\n (coord, payload) = get_next(iter)\n\n return CoordPayload(coord, payload)\n\n assert self._ordered and self._unique\n\n a_fiber = self\n b_fiber = other\n\n a = self.__iter__()\n b = other.__iter__()\n\n z_coords = []\n z_payloads = []\n\n a_coord, a_payload = get_next_nonempty(a)\n b_coord, b_payload = get_next_nonempty(b)\n\n while not (a_coord is None or b_coord is None):\n if a_coord == b_coord:\n a_coord, a_payload = get_next_nonempty(a)\n b_coord, b_payload = get_next_nonempty(b)\n continue\n\n if a_coord < b_coord:\n z_coords.append(a_coord)\n\n b_default = b_fiber._createDefault()\n z_payloads.append((\"A\", a_payload, b_default))\n\n a_coord, a_payload = get_next_nonempty(a)\n continue\n\n if a_coord > b_coord:\n z_coords.append(b_coord)\n\n a_default = a_fiber._createDefault()\n z_payloads.append((\"B\", a_default, b_payload))\n\n b_coord, b_payload = get_next_nonempty(b)\n continue\n\n while a_coord is not None:\n z_coords.append(a_coord)\n\n b_default = b_fiber._createDefault()\n z_payloads.append((\"A\", a_payload, b_default))\n\n a_coord, a_payload = get_next_nonempty(a)\n\n while b_coord is not None:\n z_coords.append(b_coord)\n\n a_default = a_fiber._createDefault()\n z_payloads.append((\"B\", a_default, b_payload))\n\n b_coord, b_payload = get_next_nonempty(b)\n\n result = Fiber(z_coords, z_payloads)\n result._setDefault((\"\", a_fiber.getDefault(), b_fiber.getDefault()))\n\n return result\n\n\n\n def __lshift__(self, other):\n \"\"\"Fiber assignment\n\n Return the \"assignment\" of `other` to `self` by considering\n all possible coordinates and returning a fiber consisting of\n payloads containing a tuple of the payloads of the inputs for\n coordinates where the following truth table returns True:\n\n\n ```\n coordinate not | coordinate\n present in `other` | present in `other`\n +-----------------------+-----------------------+\n | | |\n coordinate | | |\n not present | False | True |\n in `self` | | |\n | | |\n ------------+-----------------------+-----------------------+\n | | |\n coordinate | | |\n present in | False | True |\n `self` | | |\n | | |\n ------------+-----------------------+-----------------------+\n ```\n\n Parameters\n ----------\n other: Fiber\n A fiber to assign into the current fiber\n\n\n Returns\n --------\n result: Fiber\n A fiber created according to the assignment rules\n\n\n Notes\n ------\n\n An explcit zero in the input will NOT generate a corresponding\n coordinate in the output!\n\n \"\"\"\n\n\n def get_next(iter):\n \"\"\"get_next\"\"\"\n\n try:\n coord, payload = next(iter)\n except StopIteration:\n return (None, None)\n\n return CoordPayload(coord, payload)\n\n def get_next_nonempty(iter):\n \"\"\"get_next_nonempty\"\"\"\n\n (coord, payload) = get_next(iter)\n\n while Payload.isEmpty(payload):\n (coord, payload) = get_next(iter)\n\n return CoordPayload(coord, payload)\n\n a_fiber = self\n b_fiber = other\n\n # \"a\" is self!\n b = other.__iter__()\n\n z_coords = []\n z_a_payloads = []\n z_b_payloads = []\n z_payloads = []\n\n b_coord, b_payload = get_next_nonempty(b)\n\n while b_coord is not None:\n z_coords.append(b_coord)\n\n # TBD: Optimize with co-iteration...\n\n a_payload = self.getPayload(b_coord, allocate=False)\n\n z_a_payloads.append(a_payload)\n z_b_payloads.append(b_payload)\n\n b_coord, b_payload = get_next_nonempty(b)\n\n #\n # Collect z_payloads allowing for repeated coordinates\n #\n for b_coord, a_payload, b_payload in zip(z_coords, z_a_payloads, z_b_payloads):\n\n if a_payload is None:\n a_payload = self._create_payload(b_coord)\n\n z_payloads.append((a_payload, b_payload))\n\n result = self._newFiber(z_coords, z_payloads)\n\n return result\n\n\n def __sub__(self, other):\n \"\"\"__sub__\n\n Return the \"difference\" of `other` from `self` by considering\n all possible coordinates and returning a fiber consisting of\n payloads containing a tuple of the payloads of the inputs for\n coordinates where the following truth table returns True:\n\n\n ```\n coordinate not | coordinate\n present in `other` | present in `other`\n +-----------------------+-----------------------+\n | | |\n coordinate | | |\n not present | False | False |\n in `self` | | |\n | | |\n ------------+-----------------------+-----------------------+\n | | |\n coordinate | | |\n present in | True | False |\n `self` | | |\n | | |\n ------------+-----------------------+-----------------------+\n ```\n\n Parameters\n ----------\n other: Fiber\n A fiber to subtract from the current fiber\n\n\n Returns\n --------\n result: Fiber\n A fiber created according to the subtraction rules\n\n\n Note\n ----\n\n Currently only supported for \"ordered\", \"unique\" fibers.\n\n \"\"\"\n\n\n def get_next(iter):\n \"\"\"get_next\"\"\"\n\n try:\n coord, payload = next(iter)\n except StopIteration:\n return (None, None)\n return CoordPayload(coord, payload)\n\n def get_next_nonempty(iter):\n \"\"\"get_next_nonempty\"\"\"\n\n (coord, payload) = get_next(iter)\n\n while Payload.isEmpty(payload):\n (coord, payload) = get_next(iter)\n\n return CoordPayload(coord, payload)\n\n assert self._ordered and self._unique\n\n a_fiber = self\n b_fiber = other\n\n a = self.__iter__()\n b = other.__iter__()\n\n z_coords = []\n z_payloads = []\n\n a_coord, a_payload = get_next(a)\n b_coord, b_payload = get_next_nonempty(b)\n\n while not (a_coord is None or b_coord is None):\n if a_coord == b_coord:\n a_coord, a_payload = get_next(a)\n b_coord, b_payload = get_next_nonempty(b)\n continue\n\n if a_coord < b_coord:\n z_coords.append(a_coord)\n z_payloads.append(a_payload)\n\n a_coord, a_payload = get_next(a)\n continue\n\n if a_coord > b_coord:\n b_coord, b_payload = get_next(b)\n continue\n\n while a_coord is not None:\n z_coords.append(a_coord)\n z_payloads.append(a_payload)\n\n a_coord, a_payload = get_next(a)\n\n result = Fiber(z_coords, z_payloads)\n result._setDefault(a_fiber.getDefault())\n\n return result\n\n\n#\n# Multilayer methods\n#\n# Note: all these methods return a new fiber\n#\n def swapRanks(self):\n \"\"\"Swap the (highest) two ranks of the fiber.\n\n By swapping two ranks this method effects the equivalent of\n merging those two ranks.\n\n Returns\n -------\n\n result: Fiber\n The result of swapping the top two ranks of the fiber\n\n Notes\n -----\n\n This function relies on flattenRanks() and unflattenRanks().\n FIXME: flattenRanks() could be more general to support all p1 types,\n including tuples.\n\n Note\n ----\n\n Currently only supported for \"ordered\", \"unique\" fibers.\n\n \"\"\"\n\n assert self._ordered and self._unique\n\n #\n # Flatten the (highest) two ranks\n #\n flattened = self.flattenRanks(style=\"pair\")\n\n # Make sure that the flattened fiber has at least one coordinate\n assert(len(flattened.coords) > 0)\n\n # Make sure the coord is a >=2-element tuple\n assert(len(flattened.coords[0]) >= 2)\n\n #\n # Sort on secord coordinate of flattened fiber\n # and create new sorted fiber\n #\n sorted_cp = sorted([(c[::-1], p) for c, p in flattened])\n\n coords = [c for c, _ in sorted_cp]\n payloads = [p for _, p in sorted_cp]\n\n flattened_sorted = Fiber(coords, payloads)\n\n #\n # Unflatten to get original coordinates in swapped ranks\n #\n swapped = flattened_sorted.unflattenRanks()\n\n #\n # TBD: set default\n #\n return swapped\n\n\n def flattenRanks(self, levels=1, style=\"tuple\"):\n \"\"\"Flatten two ranks into one - COO-style\n\n Takes `levels` ranks and **flattens** them into a single\n rank.The coordinates of the combined rank can be created with\n a specified `style`:\n\n - tuple - flattened tuple of coordinates from all flattend ranks\n - pair - nested tuples of coordinates from all flattend ranks\n - absolute - the coordinate of the lowest rank\n - relative - the sum of the corrdinate of the flattened ranks\n\n Parameters\n ----------\n\n levels: integer\n Number of levels to flatten into the top level\n\n style: One of ['tuple', 'pair', 'absolute', 'relative'], default='tuple'\n\n\n Returns\n -------\n\n result: Fiber\n Fiber with `level` ranks flattened into the current rank\n\n Note\n ----\n\n Currently only supported for \"ordered\", \"unique\" fibers.\n\n \"\"\"\n\n assert self._ordered and self._unique\n\n #\n # Flatten deeper levels first, if requested\n #\n if levels == 1:\n cur_payloads = self.payloads\n else:\n assert Payload.contains(self.payloads[0], Fiber), \\\n \"Insuffient levels to flatten\"\n\n cur_payloads = []\n\n for p in self.payloads:\n cur_payloads.append(p.flattenRanks(levels=levels - 1, style=style))\n\n #\n # Flatten this level\n #\n coords = []\n payloads = []\n\n for c1, p1 in zip(self.coords, cur_payloads):\n\n if Payload.contains(p1, Fiber):\n #\n # Handle case where payload of next rank is a Fiber\n #\n p1 = Payload.get(p1)\n\n for c0, p0 in p1:\n coords.append(self._flattenCoords(c1,\n c0,\n style=style))\n payloads.append(p0)\n\n elif Payload.contains(p1, tuple):\n #\n # Handle case where payload is a tuple. In this case,\n # look for the first fiber in the \"p1\" tuple and use\n # that fiber to flatten the ranks. The payloads of the\n # new flattened rank is a new tuple containing a\n # payload from that fiber and the the other values in\n # the \"p1\" tuple.\n #\n # Note: unflattening the fiber tree created in this\n # scenario will not recreate the original pre-flattened\n # fiber tree\n #\n p1 = Payload.get(p1)\n\n #\n # Find the fiber..\n #\n p1_fiber = None\n\n for n, p1_p in enumerate(p1):\n if Payload.contains(p1_p, Fiber):\n p1_fiber = p1_p\n\n if len(p1) == 2:\n # value is other element of tuple\n p1_value = p1[(n + 1) % 2]\n else:\n # value is tuple without fiber\n p1_value = p1[:n] + p1[n + 1:]\n\n break\n\n if p1_fiber is None:\n raise PayloadError\n\n #\n # Create the flattened fiber\n #\n for c0, p0 in p1_fiber:\n coords.append(self._flattenCoords(c1,\n c0,\n style=style))\n\n payloads.append((p0, p1_value))\n else:\n #\n # I don't know how to handle this payload\n #\n raise PayloadError\n #\n # TBD: set default\n #\n return Fiber(coords, payloads)\n\n @staticmethod\n def _flattenCoords(c1, c0, style=\"nested\"):\n \"\"\"_flattenCoords\"\"\"\n\n if style == \"tuple\":\n #\n # Combine coordinates into a single flat tuple, flattening\n # contents of the individual coordinates that are tuples\n #\n # Convert c1 to tuple, if necessary\n #\n if not isinstance(c1, tuple):\n c1 = (c1, )\n\n #\n # Convert c0 to tuple, if necessary\n #\n if not isinstance(c0, tuple):\n c0 = (c0,)\n\n #\n # Concatenate the two coordinates\n #\n c1_c0 = c1 + c0\n elif style == \"pair\":\n #\n # Create a new coordinate as a two element tuple\n #\n c1_c0 = (c1, c0)\n elif style == \"absolute\":\n c1_c0 = c0\n elif style == \"relative\":\n c1_c0 = c1 + c0\n else:\n assert False, \\\n f\"Supported coordinate styles are: tuple, pair, absolute, relative. Got: {style}\"\n\n return c1_c0\n\n\n def unflattenRanks(self, levels=1):\n \"\"\"Unflatten a ranks into two or more ranks\n\n Takes a single level of a fiber and expands extract out\n `levels` more levels.\n\n Parameters\n ----------\n\n levels: integer\n The number of extra levels to create\n\n\n Returns\n -------\n\n result: Fiber\n A fiber with `levels`' levels unflatten from the top level\n\n Notes\n -----\n\n This method may not produce the correct result if the original\n fiber was flattened with either the 'relative' or 'absolute'\n styles.\n\n Note\n ----\n\n Currently only supported for \"ordered\", \"unique\" fibers.\n\n \"\"\"\n\n assert isinstance(self.coords[0], tuple)\n assert self._ordered and self._unique\n\n #\n # Place to collect cordinates/payloads for new top rank\n #\n coords1 = []\n payloads1 = []\n\n first = True\n\n #\n # Traverse the rank being unflattened in order to collect all\n # the coordinates/payloads to put in the final top (c1) rank\n #\n for cx, p0 in zip(self.coords, self.payloads):\n #\n # Little dance to get the coordinates of the two ranks,\n # which also deals with the case where the coordinates\n # were not nested.\n #\n c1 = cx[0]\n if len(cx) > 2:\n c0 = cx[1:]\n else:\n c0 = cx[1]\n\n #\n # Check if we finished all the elements of the lower (c0)\n # rank and have moved on to a new coordinate in the higher\n # (c1) rank. If so add the collected c0 coordinates and\n # payloads as a fiber to the c1 rank, and start a new\n # collection of coordinates and fibers.\n #\n if first or (c1 > c1_last):\n if not first:\n #\n # Except when starting to work on the first c1\n # coordinate, add a new coordinate/payload pair\n # (maybe after a recursive unflattening) to the\n # new top (c1) rank\n #\n coords1.append(c1_last)\n\n cur_fiber = Fiber(coords0, payloads0)\n if levels > 1:\n cur_fiber = cur_fiber.unflattenRanks(levels=levels - 1)\n\n payloads1.append(cur_fiber)\n\n #\n # Start working on a new c1 coordinate (c1_last) and\n # create a place to collect lower rank (c0)\n # coordinates/payloads to form the payload (fiber) of\n # that c1 coordinate.\n #\n first = False\n c1_last = c1\n coords0 = []\n payloads0 = []\n\n coords0.append(c0)\n payloads0.append(p0)\n\n #\n # Pick up the last element of the new top rank\n #\n coords1.append(c1_last)\n\n cur_fiber = Fiber(coords0, payloads0)\n if levels > 1:\n cur_fiber = cur_fiber.unflattenRanks(levels=levels - 1)\n\n payloads1.append(cur_fiber)\n\n #\n # Create (and return) the new top (c1) rank\n #\n #\n # TBD: set default\n #\n return self._newFiber(coords1, payloads1)\n\n#\n# Closures to operate on all payloads at a specified depth\n#\n# Note: all these methods mutate the fibers\n#\n# TBD: Reimpliment with Guowei's cleaner Python closure/wrapper\n#\n\n def updatePayloadsBelow(self, func, *args, depth=0, **kwargs):\n \"\"\"updatePayloadsBelow\n\n Utility function used as a closure on updatePayloads() to\n change all the payloads in fibers at `depth` in the tree by\n applying `func` with parameters *args and **kwargs to the\n payloads.\n\n \"\"\"\n\n update_lambda = lambda p: func(p, *args, **kwargs)\n return self.updatePayloads(update_lambda, depth=depth)\n\n\n splitUniformBelow = partialmethod(updatePayloadsBelow,\n splitUniform)\n\n splitNonUniformBelow = partialmethod(updatePayloadsBelow,\n splitNonUniform)\n\n splitEqualBelow = partialmethod(updatePayloadsBelow,\n splitEqual)\n\n splitUnEqualBelow = partialmethod(updatePayloadsBelow,\n splitUnEqual)\n\n swapRanksBelow = partialmethod(updatePayloadsBelow,\n swapRanks)\n\n flattenRanksBelow = partialmethod(updatePayloadsBelow,\n flattenRanks)\n\n unflattenRanksBelow = partialmethod(updatePayloadsBelow,\n unflattenRanks)\n\n\n#\n# Comparison operations\n#\n\n def __eq__(self, other):\n \"\"\"__eq__ - Equality check for Fibers\n\n Note: explict zeros do not result in inequality\n \"\"\"\n\n if not isinstance(other, Fiber):\n return False\n\n for c, (mask, ps, po) in self | other:\n if mask == \"A\" and not Payload.isEmpty(ps):\n return False\n\n if mask == \"B\" and not Payload.isEmpty(po):\n return False\n\n if mask == \"AB\" and not (ps == po):\n return False\n\n return True\n\n#\n# String methods\n#\n def print(self, title=None):\n \"\"\"Print a fiber\"\"\"\n\n if title is not None:\n print(\"%s\" % title)\n\n print(\"%s\" % self)\n print(\"\")\n\n def __format__(self, format):\n \"\"\"__format__\n\n Format a fiber\n\n Spec:\n\n [(<coord spec>,<scalar spec>)][n][*]\n\n where:\n \"n\" means add newlines\n \"*\" means do not truncate with elipsis\n\n \"\"\"\n import re\n\n kwargs = {}\n\n regex0 = r'(\\(.*,.*\\))?(n)?(\\*)?'\n match0 = re.search(regex0, format)\n group1 = match0.group(1)\n\n if group1 is not None:\n regex1 = r'\\((.*),(.*)\\)'\n match1 = re.search(regex1, group1)\n kwargs['coord_fmt'] = match1.group(1)\n kwargs['payload_fmt'] = match1.group(2)\n\n if match0.group(2) == 'n':\n kwargs['newline'] = True\n\n if match0.group(3) == '*':\n kwargs['cutoff'] = 10000\n\n return self.__str__(**kwargs)\n\n\n def __str__(self,\n coord_fmt=\"d\",\n payload_fmt=\"d\",\n newline=False,\n cutoff=2,\n indent=0):\n \"\"\"__str__\"\"\"\n\n def format_coord(coord):\n \"\"\"Return \"coord\" properly formatted with \"coord_fmt\" \"\"\"\n\n if not isinstance(coord, tuple):\n return f\"{coord:{coord_fmt}}\"\n\n return '(' + ', '.join(format_coord(c) for c in coord) + ')'\n\n\n def format_payload(payload):\n \"\"\"Return \"payload\" properly formatted with \"payload_fmt\" \"\"\"\n\n try:\n result = f\"{payload:{payload_fmt}}\"\n except Exception:\n result = f\"{payload}\"\n\n return result\n\n\n def cond_string(string):\n \"\"\"Return \"string\" if newline is True\"\"\"\n\n if newline:\n return string\n\n return ''\n\n str = ''\n\n if self._owner is None:\n str += \"F/[\"\n else:\n str += f\"F({self._owner.getId()})/[\"\n\n coord_indent = 0\n next_indent = 0\n items = len(self.coords)\n\n if self.payloads and Payload.contains(self.payloads[0], Fiber):\n\n for (c, p) in zip(self.coords[0:cutoff], self.payloads[0:cutoff]):\n if coord_indent == 0:\n coord_indent = indent + len(str)\n str += f\"( {format_coord(c)} -> \"\n if newline:\n next_indent = indent + len(str)\n else:\n str += cond_string('\\n' + coord_indent * ' ')\n str += f\"( {format_coord(c)} -> \"\n\n str += p.__str__(coord_fmt=coord_fmt,\n payload_fmt=payload_fmt,\n newline=newline,\n cutoff=cutoff,\n indent=next_indent)\n str += ')'\n\n if items > cutoff:\n str += cond_string('\\n')\n str += next_indent * ' ' + \"...\"\n str += cond_string('\\n')\n str += next_indent * ' ' + \"...\"\n\n return str\n\n if newline:\n next_indent = indent + len(str)\n\n for i in range(min(items, cutoff)):\n if coord_indent != 0:\n str += cond_string('\\n')\n\n str += cond_string(coord_indent * ' ')\n str += f\"({format_coord(self.coords[i])} -> \"\n str += f\"{format_payload(self.payloads[i])}) \"\n coord_indent = next_indent\n\n if items > cutoff:\n str += cond_string('\\n' + next_indent * ' ')\n str += \" ... \"\n str += cond_string('\\n' + next_indent * ' ')\n str += \" ... \"\n\n str += \"]\"\n return str\n\n def __repr__(self):\n \"\"\"__repr__\"\"\"\n\n # TBD: Owner is not properly reflected in representation\n\n payloads = [Payload.get(r) for r in self.payloads]\n str = f\"Fiber({self.coords!r}, {payloads!r}\"\n\n if self._owner:\n str += f\", owner={self._owner.getId()}\"\n\n str += \")\"\n\n return str\n\n#\n# Yaml input/output methods\n#\n\n @staticmethod\n def parse(yamlfile, default):\n \"\"\"Parse a yaml file containing a tensor\"\"\"\n\n with open(yamlfile, 'r') as stream:\n try:\n y_file = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n exit(1)\n\n #\n # Make sure key \"fiber\" exists\n #\n if not isinstance(y_file, dict) or 'fiber' not in y_file:\n print(\"Yaml is not a fiber\")\n exit(1)\n\n newfiber = Fiber.dict2fiber(y_file)\n\n return (newfiber.getCoords(), newfiber.getPayloads())\n\n\n\n def dump(self, yamlfile):\n \"\"\"Dump a tensor to a file in YAML format\"\"\"\n\n fiber_dict = self.fiber2dict()\n\n with open(yamlfile, 'w') as stream:\n yaml.dump(fiber_dict, stream)\n\n#\n# Conversion methods - to/from dictionaries\n#\n\n @staticmethod\n def dict2fiber(y_payload_dict, level=0):\n \"\"\"Parse a yaml-based tensor payload, creating Fibers as appropriate\"\"\"\n\n if isinstance(y_payload_dict, dict) and 'fiber' in y_payload_dict:\n # Got a fiber, so need to get into the Fiber class\n\n y_fiber = y_payload_dict['fiber']\n\n #\n # Error checking\n #\n if not isinstance(y_fiber, dict):\n print(\"Malformed payload\")\n exit(0)\n\n if 'coords' not in y_fiber:\n print(\"Malformed fiber\")\n exit(0)\n\n if 'payloads' not in y_fiber:\n print(\"Malformed fiber\")\n exit(0)\n\n #\n # Process corrdinates and payloads\n #\n f_coords = y_fiber['coords']\n y_f_payloads = y_fiber['payloads']\n\n f_payloads = []\n for y_f_payload in y_f_payloads:\n f_payloads.append(Fiber.dict2fiber(y_f_payload, level + 1))\n\n #\n # Turn into a fiber\n #\n subtree = Fiber(coords=f_coords, payloads=f_payloads)\n else:\n # Got scalars, so format is unchanged\n subtree = y_payload_dict\n\n return subtree\n\n\n\n def fiber2dict(self):\n \"\"\"Return dictionary with fiber information\"\"\"\n\n f = {'fiber':\n {'coords': self.coords,\n 'payloads': [Payload.payload2dict(p) for p in self.payloads]}}\n\n return f\n\n#\n# Utility functions\n#\n def _newFiber(self,\n coords=None,\n payloads=None,\n default=None,\n shape=None,\n initial=None,\n max_coord=None,\n ordered=None,\n unique=None):\n \"\"\"Create a new fiber carrying over attributes from `self`\n\n Note: Input parameters must be kept in sync with `__init__`,\n and certain input parameters that come in as `None` take their\n values from `self`.\n\n \"\"\"\n\n if ordered is None:\n ordered = self._ordered\n\n if unique is None:\n unique = self._unique\n\n if default is None:\n default = self._default\n\n if shape is None:\n default = self._shape\n\n return Fiber(coords=coords,\n payloads=payloads,\n default=default,\n shape=shape,\n initial=initial,\n max_coord=max_coord,\n ordered=ordered,\n unique=unique)\n\n\n def _checkOrdered(self):\n \"\"\" Check that coordinates satisfy the \"ordered\" attribute \"\"\"\n\n if not self._ordered or len(self.coords) == 0:\n return True\n\n coords = self.coords\n\n last = coords[0]\n\n for c in coords[1:]:\n if c <= last:\n assert False, \"Illegal non-monotonic coordinate\"\n\n last = c\n\n\n def _checkUnique(self):\n \"\"\" Check that coordinates satisfy the \"unique\" attribute \"\"\"\n\n if not self._unique:\n return True\n\n if self._ordered:\n coords = self.coords\n else:\n coords = sorted(self.coords)\n\n last = None\n\n for c in coords:\n if c == last:\n assert False, \"Illegal repeated coordinate\"\n\n last = c\n\n\n @staticmethod\n def _deprecated(message):\n import warnings\n\n warnings.warn(message, FutureWarning, stacklevel=3)\n\n\n#\n# Pdoc stuff\n#\n__pdoc__ = {'Fiber.dict2fiber': False,\n 'Fiber.fiber2dict': False,\n 'Fiber.parse': False,\n 'Fiber.__setitem__': True,\n 'Fiber.__getitem__': True,\n 'Fiber.__ilshift__': True,\n 'Fiber.__truediv__': True,\n 'Fiber.__floordiv__': True,\n 'Fiber.__add__': True,\n 'Fiber.__radd__': True,\n 'Fiber.__iadd__': True,\n 'Fiber.__mul__': True,\n 'Fiber.__rmul__': True,\n 'Fiber.__imul__': True,\n 'Fiber.__and__': True,\n 'Fiber.__or__': True,\n 'Fiber.__xor__': True,\n 'Fiber.__lshift__': True,\n 'Fiber.__sub__': True,\n }\n\n\nif __name__ == \"__main__\":\n\n a = Fiber([2, 4, 6], [3, 5, 7])\n\n print(\"Simple print\")\n a.print()\n print(\"----\\n\\n\")\n", "id": "11881490", "language": "Python", "matching_score": 8.001556396484375, "max_stars_count": 2, "path": "fibertree/core/fiber.py" }, { "content": "\"\"\"Tensor\n\nA class used to implement the a tensor based on the **fibertree**\nabstraction for representing tensors.\n\n\"\"\"\nimport logging\n\nimport copy\nimport yaml\nfrom copy import deepcopy\n\nfrom .rank import Rank\nfrom .fiber import Fiber\nfrom .payload import Payload\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.core.tensor')\n\n\nclass Tensor:\n \"\"\"Tensor Class\n\n The Tensor class is a foundational class in this system and is\n used to model a tensor using a largely format-agnostic\n representation of the tensor. More specifically, this class uses a\n tree structure of fibers (called a **fibertree**) to represent the\n ranks of a tensor. More details on this representation can be\n found in sections 8.2 and 8.3 of the book \"Efficient Processing of\n Deep Neural Networks\" [1].\n\n Attributes\n ----------\n\n The principal attributes of a tensor are:\n\n - **name**: A name of the tensor.\n\n - **color**: A color to use to represent the tensor when it is\n drawn.\n\n - **root**: The root of a tensor is a reference to the top fiber\n of the fibertree comprising the structure of the tensor. The\n fibertree is implemented using the `Fiber` class.\n\n - **ranks**: The tensor contains a list of ranks, each of which\n contains all of the fibers at each level of the tensor's\n fibertree. The list contains instances of the `Rank` class,\n and each rank has a **rank id**, a **shape**, a **default\n value** and a **next_rank**. The **next rank** field is used\n to create a linked list of ranks in the tensor.\n\n - **rank ids**: The rank ids of a tensor is a list of the names\n (or rank ids) of each rank of the tensor.\n\n - **shape**: The shape of a tensor is a list of the shapes of the\n fibers in each rank.\n\n - **default value**: The default value of a tensor is the default\n value of payloads of the fibers in the leaf rank of the\n tensor.\n\n\n See the `Rank` and `Fiber` classes for more details on the\n attributes associated with those classes.\n\n\n Constructor\n -----------\n\n The main tensor constructor should be used to create an empty\n tensor, which has the given `rank_ids` and optionally the given\n `shape`, `name` and `color`.\n\n Parameters\n -----------\n\n rank_ids: list of strings\n List containing names of ranks.\n\n shape: list of integers, default=None\n A list of shapes of the ranks\n\n default: value, default=0\n A default value for elements in the leaf rank\n\n name: string, default=\"\"\n A name for the tensor\n\n color: string, default=\"red\"\n The color to paint values when displaying the tensor\n\n\n Notes\n -----\n\n For historical reasons, this constructor tries to get a Tensor\n from the specified \"yamlfile\", which is the first argument, so\n existing code uses it with the \"yamlfile\" keyword. That usage\n is deprecated in favor of using Tensor.fromYAMLfile().\n\n Bibliography\n ------------\n\n [1] \"[Efficient Processing of Deep Neural Networks](http://www.morganclaypoolpublishers.com/catalog_Orig/product_info.php?products_id=1530)\",\n <NAME>, <NAME>, <NAME>, and <NAME>,\n Synthesis Lectures on Computer Architecture,\n June 2020, 15:2, 1-341.\n\n \"\"\"\n\n def __init__(self,\n yamlfile=\"\",\n rank_ids=None,\n shape=None,\n default=0,\n name=\"\",\n color=\"red\"):\n \"\"\"__init__\"\"\"\n\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.core.tensor')\n\n self.yamlfile = yamlfile\n\n # TBD: Encourage use of Tensor.fromYAMLfile instead...\n\n if (yamlfile != \"\"):\n assert(rank_ids is None and shape is None)\n\n (rank_ids, root, shape, name) = self.parse(yamlfile)\n\n if shape is None:\n shape = root.estimateShape()\n\n self.setRankInfo(rank_ids, shape, default)\n self.setRoot(root)\n self.setName(name)\n self.setColor(color)\n self.setMutable(False)\n return\n\n #\n # Initialize an empty tensor with an empty root fiber\n #\n assert(rank_ids is not None)\n\n self.setRankInfo(rank_ids, shape, default)\n self.setName(name)\n self.setColor(color)\n self.setMutable(True)\n\n if rank_ids == []:\n # Create a rank zero tensor, i.e., just a payload\n\n self._root = Payload(0)\n return\n\n root_fiber = Fiber()\n self.setRoot(root_fiber)\n\n\n @classmethod\n def fromYAMLfile(cls, yamlfile):\n \"\"\"Construct a tensor from a YAML file\n\n This constructor creates a Tensor from the specified\n `yamlfile`.\n\n Parameters\n -----------\n\n yamlfile: string\n Filename of file containing a YAML representation of a tensor\n\n\n Todo\n ----\n\n YAML file does not provide a non-zero default value\n\n \"\"\"\n (rank_ids, root, shape, name) = Tensor.parse(yamlfile)\n\n if not isinstance(root, Fiber):\n t = Tensor(rank_ids=[], shape=shape, name=name)\n t.setMutable(False)\n t._root = Payload(root)\n return t\n\n return Tensor.fromFiber(rank_ids, root, shape=shape)\n\n\n @classmethod\n def fromUncompressed(cls,\n rank_ids=None,\n root=None,\n shape=None,\n name=\"\",\n color=\"red\"):\n \"\"\"Construct a Tensor from uncompressed nest of lists\n\n Parameters\n ----------\n\n rank_ids: list, default=[\"Rn\", \"Rn-1\", ... \"R0\"]\n List containing names of ranks.\n\n root: list of lists\n A list of lists with an uncompressed represenation of the\n tensor, zero values are assumed empty.\n\n shape: list, default=(calculated from shape of \"root\")\n A list of shapes of the ranks\n\n name: string, default=\"\"\n A name for the tensor\n\n color: string, default=\"red\"\n The color to paint values when displaying the tensor\n\n \"\"\"\n\n assert(root is not None)\n\n if not isinstance(root, list):\n # Handle a rank zero tensor\n t = Tensor(rank_ids=[], shape=[])\n t._root = Payload(root)\n return t\n\n assert(rank_ids is not None)\n\n fiber = Fiber.fromUncompressed(root)\n\n if shape is None:\n # TBD: Maybe this is not needed because fibers get a max_coord...\n shape = Tensor._calc_shape(root)\n\n return Tensor.fromFiber(rank_ids,\n fiber,\n shape=shape,\n name=name,\n color=color)\n\n\n @staticmethod\n def _calc_shape(ll):\n \"\"\"_calc_shape\"\"\"\n\n shape = [len(ll)]\n\n if not isinstance(ll[0], list):\n return shape\n\n if len(ll) == 1:\n shape.extend(Tensor._calc_shape(ll[0]))\n return shape\n\n ll0 = Tensor._calc_shape(ll[0])\n ll1 = Tensor._calc_shape(ll[1:])[1:]\n rest = [max(a, b) for a, b in zip(ll0, ll1)]\n shape.extend(rest)\n\n return shape\n\n @classmethod\n def fromFiber(cls,\n rank_ids=None,\n fiber=None,\n shape=None,\n name=\"\",\n color=\"red\"):\n \"\"\"Construct a tensor from a fiber\n\n Parameters\n -----------\n\n rank_ids: list, default=[\"Rn\", \"Rn-1\", ... \"R0\"]\n List containing names of ranks.\n\n fiber: Fiber\n A fiber to form the root of the new Tensor\n\n shape: list, default=(the shape of \"fiber\")\n A list of shapes of the ranks\n\n name: string, default=\"\"\n A name for the tensor\n\n color: string, default=\"red\"\n The color to paint values when displaying the tensor\n\n \"\"\"\n\n assert(fiber is not None)\n\n #\n # If rank_ids is not given, synthesize something reasonable\n #\n if rank_ids is None:\n if shape is not None:\n maxrank = len(shape) - 1\n else:\n maxrank = fiber.getDepth() - 1\n\n rank_ids = [f\"R{maxrank-i}\" for i in range(maxrank + 1)]\n\n #\n # Create empty Tensor, which gets populated with a fiber below\n #\n tensor = cls(rank_ids=rank_ids,\n shape=shape,\n name=name,\n color=color)\n\n tensor.setRoot(fiber)\n tensor.setMutable(False)\n\n return tensor\n\n\n @classmethod\n def fromRandom(cls,\n rank_ids=None,\n shape=None,\n density=None,\n interval=10,\n seed=None,\n name=\"\",\n color=\"red\"):\n \"\"\"Create a random tensor\n\n Parameters\n ----------\n\n rank_ids: list\n The \"rank ids\" for the tensor\n\n shape: list\n The \"shape\" (i.e., size) of each level of the tree\n\n density: list\n The probability that an element of the fiber will not be\n *empty* for each level of the tree\n\n interval: integer\n The closed range [0:`interval`] of each value at the leaf\n level of the tree\n\n seed: a valid argument for `random.seed`\n A seed to pass to `random.seed`\n\n \"\"\"\n\n f = Fiber.fromRandom(shape, density, interval, seed)\n\n return Tensor.fromFiber(rank_ids=rank_ids,\n fiber=f,\n shape=shape,\n name=name,\n color=color)\n\n\n\n @staticmethod\n def _shape2lists(shape):\n \"\"\" Return a nest of lists of \"shape\" filled with zeros\"\"\"\n\n if len(shape) > 1:\n subtree = Tensor._shape2lists(shape[1:])\n else:\n subtree = 0\n\n result = [subtree for _ in range(shape[0])]\n\n return result\n\n#\n# Accessor methods\n#\n def setRankInfo(self, rank_ids, shape, default=0):\n \"\"\"Initialize rank info\n\n This method creates and initializes the list of ranks in this\n tensor with the provided lists of `rank_ids` and `shape`. The\n fibers associated with the ranks get set separately with\n `Tensor.setRoot()`.\n\n Parameters\n ----------\n\n rank_ids: list of strings\n Names to assign to ranks\n\n shape: list of integers\n Shapes to assign to ranks\n\n default: value, default=0\n A value to use as the default for the leaf rank\n\n Returns\n -------\n Nothing\n\n \"\"\"\n\n if shape is None:\n shape = [None] * len(rank_ids)\n\n #\n # Create a linked list of ranks\n #\n self.ranks = []\n last_rank = None\n\n #\n # Populate the list of ranks (in reverse) so the \"next_rank\" field\n # can be filled in\n #\n for id, dimension in reversed(list(zip(rank_ids, shape))):\n new_rank = Rank(id=id, shape=dimension, next_rank=last_rank)\n self.ranks.insert(0, new_rank)\n last_rank = new_rank\n\n #\n # If provided, set leaf rank with a non-zero default\n #\n if default != 0:\n self.ranks[-1].setDefault(default)\n\n\n def syncRankInfo(self, ranks):\n \"\"\".. deprecated::\"\"\"\n\n # TBD: Currently unused and untested, so probably broken\n\n self.ranks = []\n last_rank = None\n\n for rank in reversed(ranks):\n rank.set_next(last_rank)\n last_rank = rank\n\n\n def getRankIds(self):\n \"\"\"Get the rank ids of the tensor\n\n Parameters\n ----------\n None\n\n Returns\n -------\n\n rank_ids: list of strings\n List of names of ranks\n\n \"\"\"\n\n #\n # Get the rank id for each rank\n #\n return [r.getId() for r in self.ranks]\n\n\n def setRankIds(self, rank_ids):\n \"\"\"Set the rank ids of the tensor\n\n Parameters\n ----------\n rank_ids: list of strings\n List of names of ranks\n\n Returns\n -------\n self: tensor\n Returns `self` so method can be used in a chain\n\n \"\"\"\n rank = self._root.getOwner()\n\n for rank_id in rank_ids:\n rank.setId(rank_id)\n rank = rank.getNextRank()\n\n return self\n\n\n def getShape(self, rank_ids=[], authoritative=False):\n \"\"\"Get the shape of the tensor.\n\n Get the shape(s) of the ranks that comprise the tensor. If a\n single `rank_id` is provided the shape of that rank is\n returned as a scalar. If a list of `rank_ids` is provided a\n list will be returned with the shapes of the requested\n ranks. Or if no `rank_id` is provided a list of the shapes of\n **all** the ranks of the tensor is returned.\n\n Since the shape may sometimes be estimated, this method gives\n the option of insisting that the returned shape be known\n authoritatively (if not the method returns None).\n\n Parameters\n ----------\n rank_ids: list of strings or a string, default=[]\n A list of rankids or a single rankid\n\n authoritative: Boolean, default=False\n Control whether to return an estimated (non-authoritative) shape\n\n Returns\n -------\n shape: integer, list of integers or None\n The shape or a list of the shapes of the requested rank(s).\n\n\n Notes\n -----\n\n A rank zero tensor will return an empty list\n\n \"\"\"\n #\n # Convert rankids into a list, but remember if it was list originally\n #\n if isinstance(rank_ids, str):\n return_scalar = True\n rank_ids = [rank_ids]\n else:\n return_scalar = False\n\n #\n # Rank-0 tensors have no shape\n #\n if len(self.ranks) == 0:\n return []\n\n #\n # Return shapes for desired rank_ids\n #\n all_rank_ids = self.getRankIds()\n all_shapes = self.ranks[0].getShape(all_ranks=True,\n authoritative=authoritative)\n\n #\n # Maybe there is no authoritative shape\n #\n if all_shapes is None:\n return None\n\n if len(rank_ids) == 0:\n requested_rank_ids = all_rank_ids\n else:\n requested_rank_ids = rank_ids\n\n #\n # Get shape for each requested rank\n #\n shapes = []\n\n for rank_id in requested_rank_ids:\n rank_num = all_rank_ids.index(rank_id)\n shapes.append(all_shapes[rank_num])\n\n #\n # If exactly one shape was requested return a scalar\n #\n if return_scalar:\n return shapes[0]\n\n #\n # Return list of requested ranks\n #\n return shapes\n\n\n def getDepth(self):\n \"\"\"Get the depth of the tensor\n\n Get the depth, i.e., number of dimensions, of the tensor.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n depth: integer\n Number of dimensions in the tensor\n\n Raises\n ------\n None\n\n \"\"\"\n\n return len(self.ranks)\n\n\n def setRoot(self, root):\n \"\"\"Set the root fiber of tensor\n\n The method will (re-)populate the ranks of the tensor\n (`self.ranks`) with the fibertree contents of the provided\n `root` fiber.\n\n Parameters\n ----------\n\n root: Fiber\n The fiber that will be root of the tensor\n\n\n Returns\n --------\n Nothing\n\n \"\"\"\n\n #\n # Note: rank 0 tensors are not allowed in this path\n #\n assert isinstance(root, Fiber)\n\n #\n # Copy fiber if it already belongs to another tensor\n #\n # Note: shapes and owners will be overwritten in _addFiber()\n #\n if root.getOwner() is not None:\n root = deepcopy(root)\n\n self._root = root\n\n #\n # Clear out existing rank information\n #\n for r in self.ranks:\n r.clearFibers()\n\n self._addFiber(root)\n\n\n def _addFiber(self, fiber, level=0):\n \"\"\"Recursively fill in ranks from \"fiber\".\"\"\"\n\n self.ranks[level].append(fiber)\n\n # Note: The code below handles the (probably abandoned)\n # transistion from raw fibers as payloads to fibers in\n # Payload\n\n for p in fiber.getPayloads():\n if Payload.contains(p, Fiber):\n self._addFiber(Payload.get(p), level + 1)\n\n\n def getRoot(self):\n \"\"\"Get the root fiber of the tensor\n\n Parameters\n ----------\n None\n\n Returns\n -------\n\n root: Fiber\n The fibertree at the root of the tensor\n\n \"\"\"\n\n root = self._root\n\n #\n # Either we have a 0-D tensor or the root is a Fiber\n #\n # TBD: This is broken if Fibers are wrapped in a Payload\n #\n assert (isinstance(root, Payload) or\n root == self.ranks[0].getFibers()[0])\n\n return root\n\n\n def root(self):\n \"\"\".. deprecated::\"\"\"\n\n Tensor._deprecated(\"Tensor.root() is deprecated, use getRoot()\")\n\n return self.getRoot()\n\n\n def setName(self, name):\n \"\"\"Set name for the tensor\n\n Parameters\n ----------\n\n name: string\n Name to use for tensor\n\n Returns\n -------\n self: Tensor\n So method can be used in a chain\n\n Raises\n ------\n None\n\n \"\"\"\n\n self._name = name\n return self\n\n\n def getName(self):\n \"\"\"Get name of tensor\n\n Parameters\n ----------\n None\n\n Returns\n -------\n name: string\n Name of tensor\n\n Raises\n ------\n None\n\n \"\"\"\n\n return self._name\n\n\n def setColor(self, color):\n \"\"\"Set color for elements of tensor\n\n Parameters\n ----------\n color: string\n Color to use for scalar values in tensor\n\n Returns\n -------\n self: Tensor\n So method can be used in a chain\n\n Raises\n ------\n None\n\n \"\"\"\n\n self._color = color\n return self\n\n\n def getColor(self):\n \"\"\"Get color for elements of tensor\n\n Parameters\n ----------\n None\n\n Returns\n -------\n color: string\n Color being used for scalar values in the tensor\n\n Raises\n ------\n None\n\n \"\"\"\n\n return self._color\n\n\n def setDefault(self, value):\n \"\"\"Set the default value for the leaf payloads of the tensor\n\n Parameters\n ----------\n value: value\n A value to use for leaf payload values in tensor\n\n Returns\n -------\n self: Tensor\n So method can be used in a chain\n\n Raises\n ------\n None\n\n Notes\n -----\n\n The **default** value will be **boxed** by\n `Rank.getDefault()`.\n\n \"\"\"\n\n assert value != Fiber, \"Leaf payloads cannot be a Fiber\"\n\n #\n # Set default for leaf rank\n #\n self.ranks[-1].setDefault(value)\n\n return self\n\n\n def getDefault(self):\n \"\"\"Get the default payload for leaf ranks\n\n Parameters\n ----------\n None\n\n Returns\n -------\n value: value\n A copy of the default payload of the leaf rank\n\n Raises\n ------\n None\n\n Notes\n -----\n\n A `deepcopy()` of the **default** value will have been\n performed in `Rank.getDefault()` so the value returned will be\n unique.\n\n \"\"\"\n\n return self.ranks[-1].getDefault()\n\n\n def setMutable(self, value):\n \"\"\"Set the mutabilility hint\n\n Set the \"hint\" as to whether the tensor is mutable or not,\n i.e., its value will change. Note: this property is not\n enforced, but is useful for the *Canvas methods that want to\n save the current value of the tensor, so they know if they\n need to copy the tensor or not.\n\n Parameters\n ----------\n value: Bool\n Is the tensor mutable or not.\n\n Returns\n -------\n self: Tensor\n So method can be used in a chain\n\n Raises\n ------\n None\n\n \"\"\"\n\n self._mutable = value\n\n return self\n\n\n def isMutable(self):\n \"\"\"Returns mutability attribute\n\n Returns the \"hint\" that the tensor is mutable\n\n Parameters\n ----------\n None\n\n Returns\n -------\n value: Bool\n Whether the tensor is set mutable or not\n\n Raises\n ------\n None\n\n \"\"\"\n\n return self._mutable\n\n\n#\n# Comparison operations\n#\n def __eq__(self, other):\n \"\"\"__eq__\n\n Check for equivalence of two tensors by matching their rank\n ids and root fiber.\n\n Note: The tenor's names and colors do not need to match\n\n \"\"\"\n\n rankid_match = (self.getRankIds() == other.getRankIds())\n fiber_match = (self.getRoot() == other.getRoot())\n\n return rankid_match and fiber_match\n\n\n#\n# Tensor equivalent of Fiber methods where operating on the\n# root fiber is the logical activity\n#\n def getPayload(self, *args, **kwargs):\n \"\"\"Get payload at a point\n\n Tensor-level version of method that operates on the root\n fiber of the tensor. See `Fiber.getPayload()` for details.\n\n \"\"\"\n\n root = self.getRoot()\n\n if isinstance(root, Payload):\n # Handle rank-0 tensor\n return root\n\n return root.getPayload(*args, **kwargs)\n\n\n def getPayloadRef(self, *args, **kwargs):\n \"\"\"Get a reference to a payloat at at point\n\n Tensor-level version of method that operates on the root\n fiber of the tensor. See `Fiber.getPayloadRef()` for details.\n\n \"\"\"\n\n root = self.getRoot()\n\n if isinstance(root, Payload):\n # Handle rank-0 tensor\n return root\n\n return root.getPayloadRef(*args, **kwargs)\n\n\n def countValues(self):\n \"\"\"Get count on non-empty values in tensor\n\n Count of non-empty payload values in the leaf rank of tensor\n\n Tensor-level version of method that operates on the root\n fiber of the tensor. See `Fiber.countValues()` for details.\n\n \"\"\"\n return self.getRoot().countValues()\n\n\n def __iter__(self):\n \"\"\"__iter__\"\"\"\n\n return self.getRoot().__iter__()\n\n\n def __reversed__(self):\n \"\"\"Return reversed fiber\"\"\"\n\n return self.getRoot().__reversed__()\n\n\n def __getitem__(self, keys):\n \"\"\"__getitem__\n\n Tensor-level version of method that operates on the root\n fiber of the tensor. See `Fiber.__getitem__()` for details.\n\n \"\"\"\n\n return self.getRoot().__getitem__(keys)\n\n def __setitem__(self, key, newvalue):\n \"\"\"__setitem__\n\n Tensor-level version of method that operates on the root\n fiber of the tensor. See `Fiber.__setitem__()` for details.\n\n \"\"\"\n\n self.getRoot().__setitem__(key, newvalue)\n\n\n def updateCoords(self, func, depth=0, **kwargs):\n \"\"\"Update coordinates of root fiber\n\n Tensor-level version of method that operates on the root\n fiber of the tensor. See `Fiber.updateCoords()` for details.\n\n \"\"\"\n\n new_tensor = copy.deepcopy(self)\n\n new_tensor.getRoot().updateCoords(func, depth=depth, **kwargs)\n\n return new_tensor\n\n\n def updatePayloads(self, func, depth=0, **kwargs):\n \"\"\"Update payloads of root fiber\n\n Tensor-level version of method that operates on the root\n fiber of the tensor. See `Fiber.updatePayloads()` for details.\n\n \"\"\"\n\n new_tensor = copy.deepcopy(self)\n\n new_tensor.getRoot().updatePayloads(func, depth=depth, **kwargs)\n\n return new_tensor\n\n#\n# Split methods\n#\n# Note: all these methods return a new tensor\n#\n def __truediv__(self, arg):\n \"\"\"Split root fiber in coordinate space\n\n Tensor-level version of method that operates on the root\n fiber of the tensor. See `Fiber.__truediv()` for details.\n\n \"\"\"\n\n return self._splitGeneric(Fiber.__truediv__, arg)\n\n def __floordiv__(self, arg):\n \"\"\"Split root fiber in position space\n\n Tensor-level version of method that operates on the root\n fiber of the tensor. See `Fiber.__floordiv()` for details.\n\n \"\"\"\n\n return self._splitGeneric(Fiber.__floordiv__, arg)\n\n\n def splitUniform(self, *args, **kwargs):\n \"\"\"Split tensor's fibertree uniformly in coordinate space\n\n Tensor-level version of method that operates on the tensor's fibertree\n at depth `depth`. See `Fiber.splitUniform()` for more details.\n\n Parameters\n ----------\n\n See `Fiber.splitUniform()` for arguments.\n\n Returns\n -------\n\n split_tensor: Tensor\n A new split tensor\n\n \"\"\"\n\n return self._splitGeneric(Fiber.splitUniform,\n *args,\n **kwargs)\n\n def splitNonUniform(self, *args, **kwargs):\n \"\"\"Split tensor's fibertree non-uniformly in coordinate space\n\n Tensor-level version of method that operates on the tensor's fibertree\n at depth `depth`. See `Fiber.splitNonUniform()` for more details.\n\n Parameters\n ----------\n\n See `Fiber.splitNonUniform()` for arguments.\n\n Returns\n -------\n\n split_tensor: Tensor\n A new split tensor\n\n \"\"\"\n\n return self._splitGeneric(Fiber.splitNonUniform,\n *args,\n **kwargs)\n\n\n def splitEqual(self, *args, **kwargs):\n \"\"\"Split tensor's fibertree equally in position space\n\n Tensor-level version of method that operates on the tensor's fibertree\n at depth `depth`. See `Fiber.splitEqual()` for more details.\n\n Parameters\n ----------\n\n See `Fiber.splitEqual()` for arguments.\n\n Returns\n -------\n\n split_tensor: Tensor\n A new split tensor\n\n \"\"\"\n\n return self._splitGeneric(Fiber.splitEqual,\n *args,\n **kwargs)\n\n\n def splitUnEqual(self, *args, **kwargs):\n \"\"\"Split tensor's fibertree unequally in postion space\n\n Tensor-level version of method that operates on the tensor's fibertree\n at depth `depth`. See `Fiber.splitUnEqual()` for more details.\n\n Parameters\n ----------\n\n See `Fiber.splitUnEqual()` for arguments.\n\n Returns\n -------\n\n split_tensor: Tensor\n A new split tensor\n\n \"\"\"\n\n return self._splitGeneric(Fiber.splitUnEqual,\n *args,\n **kwargs)\n\n\n def _splitGeneric(self, func, *args, **kwargs):\n \"\"\" _splitGeneric... \"\"\"\n\n rank_ids = copy.deepcopy(self.getRankIds())\n\n #\n # Determine depth\n #\n if \"rankid\" in kwargs:\n depth = rank_ids.index(kwargs[\"rankid\"])\n elif \"depth\" in kwargs:\n depth = kwargs[\"depth\"]\n else:\n depth = 0\n\n #\n # Create new list of rank ids\n #\n id = rank_ids[depth]\n rank_ids[depth] = f\"{id}.1\"\n rank_ids.insert(depth + 1, f\"{id}.0\")\n\n #\n # Create new shape list\n #\n # TBD: Create shape\n #\n shape = None\n\n #\n # Create new root fiber\n #\n root_copy = copy.deepcopy(self.getRoot())\n root = func(root_copy, *args, **kwargs)\n\n #\n # Create Tensor from rank_ids and root fiber\n #\n tensor = Tensor.fromFiber(rank_ids, root, shape)\n tensor.setName(self.getName() + \"+split\")\n tensor.setColor(self.getColor())\n tensor.setMutable(self.isMutable())\n\n return tensor\n\n#\n# Swizzle and swap methods\n#\n def swizzleRanks(self, rank_ids):\n \"\"\"Swizzle the ranks of the tensor\n\n Re-arrange (swizzle) the ranks of the tensor so they match the\n given `rank_ids`. This is accompished via a series of rank swaps.\n\n Parameters\n ----------\n\n rank_ids: list of strings\n List of names of ranks in desired order (top to bottom)\n\n\n Returns\n -------\n\n swizzled_tensor: Tensor\n New tensor with ranks swizzed\n\n \"\"\"\n\n swizzled = self\n swizzled_rank_ids = swizzled.getRankIds()\n swizzled_name = swizzled.getName()\n\n for target_rank_idx, target_rank_id in enumerate(rank_ids):\n #\n # While target rank is not in desired place\n #\n while target_rank_idx != swizzled_rank_ids.index(target_rank_id):\n #\n # Swap the target rank id up one level\n #\n swizzled_rank_idx = next(idx\n for idx, id in enumerate(swizzled_rank_ids)\n if id == target_rank_id)\n\n swizzled = swizzled.swapRanks(depth=swizzled_rank_idx - 1)\n swizzled_rank_ids = swizzled.getRankIds()\n\n swizzled.setName(f\"{swizzled_name}+swizzled\")\n\n return swizzled\n\n\n def swapRanks(self, depth=0):\n \"\"\"Swap a pair of ranks in the tensor's fibertree.\n\n Tensor-level version of method that operates on the tensor's fibertree\n at depth `depth`. See `Fiber.swapRanks()` for more details.\n\n Parameters\n ----------\n\n depth: integer, default=0\n Level of fibertree to split\n\n See `Fiber.swapRanks()` for other arguments.\n\n Returns\n -------\n\n swapped_tensor: Tensor\n A new tensor with two ranks swapped\n\n \"\"\"\n\n #\n # Create new list of rank ids\n #\n rank_ids = copy.deepcopy(self.getRankIds())\n id = rank_ids[depth]\n rank_ids[depth] = rank_ids[depth + 1]\n rank_ids[depth + 1] = id\n\n #\n # Create new shape list\n #\n # TBD: Create shape\n #\n shape = None\n\n # Only call Fiber.swapRanks if there are actually payloads to swap\n if not all(fiber.isEmpty() for fiber in self.ranks[depth].fibers):\n root = self._modifyRoot(Fiber.swapRanks,\n Fiber.swapRanksBelow,\n depth=depth)\n else:\n root = copy.deepcopy(self.getRoot())\n\n #\n # Create Tensor from rank_ids and root fiber\n #\n tensor = Tensor.fromFiber(rank_ids, root, shape)\n tensor.setName(self.getName() + \"+swapped\")\n tensor.setColor(self.getColor())\n tensor.setMutable(self.isMutable())\n\n return tensor\n\n\n def flattenRanks(self, depth=0, levels=1, coord_style=\"tuple\"):\n \"\"\"Flatten ranks in the tensor's fibertree.\n\n Tensor-level version of method that operates on the tensor's fibertree\n at depth `depth`. See `Fiber.flattenRanks()` for more details.\n\n Parameters\n ----------\n\n depth: integer, default=0\n Level of fibertree to split\n\n See `Fiber.flattenRanks()` for other arguments.\n\n Returns\n -------\n\n flattened_tensor: Tensor\n A new tensor with some ranks flattened\n\n \"\"\"\n\n #\n # Create new list of rank ids\n #\n # Note: we need to handle the case where existing ranks are lists\n #\n rank_ids = copy.deepcopy(self.getRankIds())\n\n cur_rankid = rank_ids[depth]\n if not isinstance(cur_rankid, list):\n rank_ids[depth] = []\n rank_ids[depth].append(cur_rankid)\n\n for d in range(levels):\n next_rankid = rank_ids[depth + 1]\n\n if isinstance(next_rankid, list):\n rank_ids[depth] = cur_rankid + next_rankid\n else:\n rank_ids[depth].append(next_rankid)\n\n del rank_ids[depth + 1]\n\n #\n # Create new shape list\n #\n # TBD: Create shape\n #\n shape = None\n\n root = self._modifyRoot(Fiber.flattenRanks,\n Fiber.flattenRanksBelow,\n depth=depth,\n levels=levels,\n style=coord_style)\n #\n # Create Tensor from rank_ids and root fiber\n #\n tensor = Tensor.fromFiber(rank_ids, root, shape)\n tensor.setName(self.getName() + \"+flattened\")\n tensor.setColor(self.getColor())\n tensor.setMutable(self.isMutable())\n\n return tensor\n\n\n def unflattenRanks(self, depth=0, levels=1):\n \"\"\"Unflatten ranks in the tensor's fibertree.\n\n Tensor-level version of method that operates on the tensor's fibertree\n at depth `depth`. See `Fiber.unflattenRanks()` for more details.\n\n Parameters\n ----------\n\n depth: integer, default=0\n Level of fibertree to split\n\n See `Fiber.unflattenRanks()` for other arguments.\n\n Returns\n -------\n\n unflattened_tensor: Tensor\n A new tensor with some ranks unflattened\n\n \"\"\"\n\n #\n # Create new list of rank ids\n #\n rank_ids = copy.deepcopy(self.getRankIds())\n\n for d in range(levels):\n id = rank_ids[depth + d]\n rank_ids[depth + d] = id[0]\n if len(id) == 2:\n rank_ids.insert(depth + d + 1, id[1])\n else:\n rank_ids.insert(depth + d + 1, id[1:])\n\n #\n # Create new shape list\n #\n # TBD: Create shape\n #\n shape = None\n\n # Only call Fiber.unflattenRanks if there are actually ranks to unflatten\n if not all(fiber.isEmpty() for fiber in self.ranks[depth].fibers):\n root = self._modifyRoot(Fiber.unflattenRanks,\n Fiber.unflattenRanksBelow,\n depth=depth,\n levels=levels)\n else:\n root = Fiber()\n\n #\n # Create Tensor from rank_ids and root fiber\n #\n tensor = Tensor.fromFiber(rank_ids, root, shape)\n tensor.setName(self.getName() + \"+unflattened\")\n tensor.setColor(self.getColor())\n tensor.setMutable(self.isMutable())\n\n return tensor\n\n\n def _modifyRoot(self, func, funcBelow, depth=0, **kwargs):\n #\n # Create new root fiber\n #\n root_copy = copy.deepcopy(self.getRoot())\n if depth == 0:\n root = func(root_copy, **kwargs)\n else:\n root = root_copy\n funcBelow(root, depth=depth - 1, **kwargs)\n\n #\n # Create Tensor from rank_ids and root fiber\n #\n return root\n\n\n\n#\n# String methods\n#\n def print(self, title=None):\n \"\"\"print\"\"\"\n\n if title is not None:\n print(\"%s\" % title)\n\n print(\"%s\" % self)\n print(\"\")\n\n\n def __format__(self, format):\n \"\"\"__format__\"\"\"\n\n #\n # Just format the root fiber\n #\n return self.getRoot().__format__(format)\n\n\n def __str__(self):\n \"\"\"_str__\"\"\"\n\n # TBD: Fix to use a format from a fiber...\n\n str = \"T(%s)/[\" % \",\".join(self.getRankIds())\n\n if self.ranks:\n str += \"\\n\"\n for r in self.ranks:\n str += r.__str__(indent=2) + \"\\n\"\n else:\n root = self.getRoot()\n fmt = \"n*\" if isinstance(root, Fiber) else \"\"\n str += f\"{root:{fmt}}\"\n\n str += \"]\"\n return str\n\n\n def __repr__(self):\n \"\"\"__repr__\"\"\"\n\n # TBD: Fix to use a repr from a fiber...\n\n str = \"T(%s)/[\" % \",\".join(self.getRankIds())\n\n if self.ranks:\n str += \"\\n\"\n for r in self.ranks:\n str += \" \" + repr(r) + \"\\n\"\n else:\n str += repr(self.getRoot())\n\n str += \"]\"\n\n return str\n\n#\n# Yaml input/output methods\n#\n\n @staticmethod\n def parse(file):\n \"\"\"Parse a yaml file containing a tensor\"\"\"\n\n with open(file, 'r') as stream:\n try:\n y_file = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n exit(1)\n\n #\n # Make sure key \"tensor\" exists\n #\n if not isinstance(y_file, dict) or 'tensor' not in y_file:\n print(\"Yaml is not a tensor\")\n exit(1)\n\n y_tensor = y_file['tensor']\n\n #\n # Make sure key \"rank_ids\" exists\n #\n if not isinstance(y_tensor, dict) or 'rank_ids' not in y_tensor:\n print(\"Yaml has no rank_ids\")\n exit(1)\n\n rank_ids = y_tensor['rank_ids']\n\n #\n # Get shape information\n #\n if 'shape' in y_tensor:\n shape = y_tensor['shape']\n else:\n shape = None\n\n #\n # Get tensor name\n #\n if 'name' in y_tensor:\n name = y_tensor['name']\n else:\n # TBD: Maybe extract something from filename\n name = \"\"\n\n #\n # Make sure key \"root\" exists\n #\n if 'root' not in y_tensor:\n print(\"Yaml has no root\")\n exit(1)\n\n y_root = y_tensor['root']\n\n #\n # Generate the tree recursively\n # Note: fibers are added into self.ranks inside method\n #\n fiber = Fiber.dict2fiber(y_root[0])\n\n return (rank_ids, fiber, shape, name)\n\n\n def dump(self, filename):\n \"\"\"Dump a tensor to a file in YAML format\"\"\"\n\n root = self.getRoot()\n\n if isinstance(root, Payload):\n root_dict = Payload.payload2dict(root)\n else:\n root_dict = root.fiber2dict()\n\n tensor_dict = {'tensor':\n {'rank_ids': self.getRankIds(),\n 'shape': self.getShape(),\n 'name': self.getName(),\n 'root': [root_dict]}}\n\n with open(filename, 'w') as file:\n yaml.dump(tensor_dict, file)\n\n#\n# Utility methods\n#\n\n @staticmethod\n def _deprecated(message):\n import warnings\n\n warnings.warn(message, FutureWarning, stacklevel=3)\n\n#\n# Pdoc stuff\n#\n__pdoc__ = {'Tensor.parse': False,\n 'Tensor.__setitem__': True,\n 'Tensor.__getitem__': True,\n 'Tensor.__ilshift__': True,\n 'Tensor.__truediv__': True,\n 'Tensor.__floordiv__': True,\n }\n", "id": "9487059", "language": "Python", "matching_score": 6.233187198638916, "max_stars_count": 2, "path": "fibertree/core/tensor.py" }, { "content": "\"\"\"Rank\n\nA class used to implement a rank (or dimension) of a tensor.\n\n\"\"\"\n\nimport logging\n\nfrom copy import deepcopy\n\nfrom .fiber import Fiber\nfrom .payload import Payload\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.core.rank')\n\nclass Rank:\n \"\"\"Class representing a \"rank\" (or dimension) of a tensor.\n\n An instance of this class holds a list of all the fibers at a\n rank, common attributes of the fibers in the rank, and a pointer\n to the next rank.\n\n A `Tensor` contains a list of the ranks it is comprised of, and\n the \"next rank\" pointer is used to create a linked list of those\n ranks..\n\n Attributes\n ----------\n\n rank_id: string\n The name of the rank\n\n estimated_shape: Boolean\n Is the shape estimated or was it provided explicitly\n\n shape: integer\n The shape of the fibers in the rank\n\n fibers: list of Fibers\n A list of the fibers in the rank\n\n Constructor\n -----------\n\n The `Rank` constructor creates an empty rank.\n\n Parameters\n -----------\n\n id: string\n The name (rank_id) of the rank\n\n shape: integer, default=None\n The shape of the fibers in the rank\n\n next_rank: Rank, default=None\n The next rank in the tensor\n\n\n Notes\n -----\n\n The fibers in a rank are NOT provided as part of the contructor\n but are added incrementally using `Rank.append()`.\n\n \"\"\"\n\n\n def __init__(self, id, shape=None, next_rank=None):\n \"\"\"__init__\"\"\"\n\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.core.rank')\n\n self._id = id\n\n if shape is None:\n self._estimated_shape = True\n self._shape = 0\n else:\n self._estimated_shape = False\n self._shape = shape\n\n self.setNextRank(next_rank)\n\n self.fibers = []\n\n#\n# Accessor methods\n#\n def getId(self):\n \"\"\"Return id of rank.\n\n Get the rank id of this rank, i.e., the name of this\n rank/dimension.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n rank_id: string\n Rank id of this rank\n\n \"\"\"\n\n return self._id\n\n\n def getRankIds(self, all_ranks=True):\n \"\"\"Get a list of ranks ids.\n\n Get a list of rank ids starting at this rank and optionally\n including the rank ids all succeeding (lower level) ranks.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n rank_id: list of strings\n List of rank ids\n\n Todo\n ----\n\n There is an asymmetry between this method and\n `Rank.getShape()` because it always returns a list,\n irrespective of the value of `all_ranks`.\n\n \"\"\"\n\n rankids = [self._id]\n\n if all_ranks and self.next_rank is not None:\n rankids.extend(self.next_rank.getRankIds(all_ranks=True))\n\n return rankids\n\n def setId(self, rank_id):\n \"\"\"Set id of rank.\n\n Set the rank id of this rank, i.e., the name of this\n rank/dimension.\n\n Parameters\n ----------\n rank_id: string\n Rank id of this rank\n\n\n Returns\n -------\n self: rank\n Returns `self1 so method can be used in a chain\n\n \"\"\"\n\n self._id = rank_id\n return self\n\n\n def getName(self):\n \"\"\".. deprecated::\"\"\"\n\n Rank._deprecated(\"Use of Rank.getName() is deprecated - use Rank.getId()\")\n\n return self._id\n\n\n def getShape(self, all_ranks=True, authoritative=False):\n \"\"\"Return shape of rank.\n\n Since the shape may sometimes be estimated, this method gives\n the option of insisting that the returned shape be known\n authoritatively (if not the method returns None).\n\n Parameters\n ----------\n all_ranks: Boolean, default=True\n Control whether to return shape of all ranks or just this one\n\n authoritative: Boolean, default=False\n Control whether to return an estimated (non-authoritative) shape\n\n Returns\n -------\n shape: integer, list of integers or None\n The shape of this rank or this rank and all succeeding ranks\n\n Todo\n ----\n\n There is an asymmetry between this method and\n `Rank.getRankIds()` because it sometimes returns a list and\n sometimes a scalar depending on the value of `all_ranks`.\n\n \"\"\"\n\n if all_ranks == False:\n #\n # Handle case where user just wants shape of this rank\n #\n if authoritative and self._estimated_shape:\n #\n # We do not know the shape authoritatively\n #\n return None\n\n if self._shape == 0:\n #\n # We do not actually know the shape\n #\n return 0\n\n return self._shape\n\n #\n # Get shape of all ranks\n #\n if authoritative and self._estimated_shape:\n #\n # This will cause the final return to be None\n #\n return None\n\n if self._shape == 0 and len(self.fibers) > 0:\n shape = [max([f.estimateShape(all_ranks=False) for f in self.fibers])]\n else:\n shape = [self._shape]\n\n if self.next_rank is not None:\n rest_of_shape = self.next_rank.getShape(all_ranks=True, authoritative=authoritative)\n if rest_of_shape is None:\n return None\n\n shape.extend(rest_of_shape)\n\n #\n # If we didn't have a shape for any rank, assume we don't know anything\n #\n #if any([s == 0 for s in shape]):\n # return None\n\n return shape\n\n\n def getFibers(self):\n \"\"\"Return list of fibers in the rank.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n fibers: list of Fibers\n All the fibers in this rank\n\n \"\"\"\n\n return self.fibers\n\n def clearFibers(self):\n \"\"\"Empty rank of all fibers.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n \"\"\"\n\n self.fibers = []\n\n #\n # Default payload methods\n #\n def setDefault(self, value):\n \"\"\"Set the default payload value for fibers in this rank.\n\n Parameters\n ----------\n value: value\n A value to use as the payload value for fibers in this rank\n\n Returns\n -------\n self: Rank\n So method can be used in a chain\n\n Raises\n ------\n None\n\n Notes\n -----\n\n We make sure that the value saved is **boxed**.\n\n \"\"\"\n\n self._default_is_set = True\n self._default = Payload.maybe_box(value)\n\n return self\n\n\n def getDefault(self):\n \"\"\"Get the default payload for fibers in this rank\n\n Parameters\n ----------\n None\n\n Returns\n -------\n value: value\n A copy of the (boxed) default payload of fibers in this\n rank\n\n Raises\n ------\n None\n\n Notes\n -----\n\n We `deepcopy()` the return value so that everyone has their\n own unique **boxed** value\n\n \"\"\"\n\n assert self._default_is_set\n\n #\n # Return a copy of the default\n #\n return deepcopy(self._default)\n\n\n#\n# Fundamental methods\n#\n def append(self, fiber):\n \"\"\"Append the provided fiber into a rank\n\n Parameters\n ----------\n fiber: Fiber\n A fiber to add to the rank\n\n Returns\n -------\n Nothing\n\n\n Notes\n -----\n\n If the **shape** of the rank is being estimated, this method\n might update the rank's shape.\n\n TODO\n ----\n\n Maybe should rename to appendFiber()\n\n \"\"\"\n\n #\n # Get the raw fiber (if it was wrapped in a payload)\n #\n fiber = Payload.get(fiber)\n\n if self._estimated_shape:\n #\n # Get shape from fiber and see it is larger that current shape\n # making sure we don't get info from a prior owning rank\n #\n # TBD: If the fiber really has a definitive shape then\n # change estimated_shape to True\n #\n fiber.setOwner(None)\n self._shape = max(self._shape, fiber.getShape(all_ranks=False))\n\n #\n # Set this rank as owner of the fiber\n #\n fiber.setOwner(self)\n\n #\n # Check default value for new coordinates in the fiber\n #\n if self.next_rank is None:\n assert self.getDefault() != Fiber, \\\n \"Leaf rank default should not be Fiber\"\n else:\n assert self.getDefault() == Fiber, \\\n \"Non-leaf rank default should be Fiber\"\n\n #\n # Add fiber to list of fibers of rank\n #\n self.fibers.append(fiber)\n\n#\n# Linked list methods\n#\n def setNextRank(self, next_rank):\n \"\"\"Set the next rank\n\n Record a reference to the next rank. If that rank exists then the\n default payload of fibers in this rank must be a fiber,\n otherwise set the default payload to zero.\n\n Parameters\n ----------\n next_rank: None\n\n Returns\n -------\n Nothing\n\n\n Todo\n ----\n The default payload probably shouldn't be zero.\n\n \"\"\"\n\n self.next_rank = next_rank\n\n if next_rank is None:\n self.setDefault(0)\n else:\n self.setDefault(Fiber)\n\n\n def getNextRank(self):\n \"\"\"Get the next rank\n\n Parameters\n ----------\n None\n\n Returns\n -------\n next_rank: Rank\n The next rank\n\n \"\"\"\n\n return self.next_rank\n\n#\n# String methods\n#\n\n def __str__(self, indent=0):\n \"\"\"__str__\"\"\"\n\n string = indent * ' '\n string += f\"Rank: {self._id} \"\n\n next_indent = len(string)\n\n separator = \",\\n\" + \" \" * next_indent\n fibers = [x.__str__(indent=next_indent, cutoff=1000, newline=True) for x in self.fibers]\n string += separator.join(fibers)\n\n return string\n\n def __repr__(self):\n \"\"\"__repr__\"\"\"\n\n string = \"R(%s)/[\" % self._id\n string += \", \".join([x.__repr__() for x in self.fibers])\n string += \"]\"\n return string\n\n#\n# Utility functions\n#\n\n @staticmethod\n def _deprecated(message):\n import warnings\n\n warnings.warn(message, FutureWarning, stacklevel=3)\n\n\n", "id": "7367710", "language": "Python", "matching_score": 1.4808375835418701, "max_stars_count": 2, "path": "fibertree/core/rank.py" }, { "content": "\"\"\"Make Tensor Module\"\"\"\n\n#\n# Import standard libraries\n#\nimport logging\n\nimport yaml\nfrom pathlib import Path\n\n#\n# Import display classes/utilities\n#\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets\n\n#\n# Import fibertree libraries\n#\nfrom fibertree import Tensor\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.notebook.tensor_maker')\n\n\nclass TensorMaker():\n\n def __init__(self, name=None, autoload=False):\n \"\"\" __init__ \"\"\"\n\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.notebook.tensor_maker')\n\n\n #\n # Save parameters\n #\n self.name = name\n self.autoload = autoload\n\n #\n # Tensor creation variables\n #\n self.controls = {}\n\n self.rank_ids = {}\n self.variables = {}\n self.reset = {}\n\n self.colors = [\"blue\",\n \"green\",\n \"orange\",\n \"purple\",\n \"red\",\n \"yellow\"]\n\n #\n # Create directory for load/save configuration information\n #\n etcdir = Path(\"etc/tensormaker\")\n etcdir.mkdir(mode=0o755, parents=True, exist_ok=True)\n self.etcdir = etcdir\n\n\n\n#\n# Methods to create interactive controls for specifying the attributes\n# of a tensor.\n#\n# Note: the next set of methods are convenience functions for creating\n# interactive controls for tensors with standard names and rank names\n#\n\n#\n# Matrix multiply (by convention)\n#\n def addA_MK(self,\n shape=[16, 8],\n density=0.2,\n interval=5,\n seed=10,\n color=\"green\"):\n\n t = self.addTensor(\"A\",\n [\"M\", \"K\"],\n shape=shape,\n density=density,\n interval=interval,\n seed=seed,\n color=color)\n\n return t\n\n\n def addB_KN(self,\n shape=[8,12],\n density=0.2,\n interval=5,\n seed=10,\n color=\"blue\"):\n\n t = self.addTensor(\"B\",\n [\"K\", \"N\"],\n shape=shape,\n density=density,\n interval=interval,\n seed=seed,\n color=color)\n\n return t\n\n\n #\n # Convolution (by convention)\n #\n def addI_CHW(self,\n shape=[3,8,8],\n density=1.0,\n interval=5,\n seed=10,\n color=\"blue\"):\n\n t = self.addTensor(\"I\",\n [\"C\", \"H\", \"W\"],\n shape=shape,\n density=density,\n interval=interval,\n seed=seed,\n color=color)\n\n\n return t\n\n\n def addF_KCRS(self,\n shape=[2,4,3,3],\n density=1.0,\n interval=5,\n seed=10,\n color=\"green\"):\n\n t = self.addTensor(\"F\",\n [\"K\", \"C\", \"R\", \"S\"],\n shape=shape,\n density=density,\n interval=interval,\n seed=seed,\n color=color)\n\n return t\n\n #\n # Graphs\n #\n def addG_SD(self,\n shape=[10,10],\n density=0.2,\n interval=5,\n seed=5,\n color=\"orange\"):\n\n t = self.addTensor(\"G\",\n [\"S\", \"D\"],\n shape=shape,\n density=density,\n interval=interval,\n seed=seed,\n color=color)\n\n return t\n\n\n#\n# Generic method to create interactive controls for specifying the\n# attributes of a tensor.\n#\n\n def addTensor(self, name, rank_ids, **kwargs):\n \"\"\" Create the set of interactive controls for the given tensor \"\"\"\n\n #\n # Convert simple kwargs into full label names for:\n #\n\n kwargs = self._convert_kwargs(name, rank_ids, kwargs)\n\n self.controls[name] = widgets.Label(value=f\"Tensor {name}\")\n\n self.rank_ids[name] = rank_ids\n\n for r in rank_ids:\n vname = r+\"_SHAPE\"\n\n if vname in self.controls:\n self.controls[f\"{vname}_{name}\"] = widgets.Text(description=f'Shape {r}:',\n value=\"This shape defined above\",\n disabled=True)\n else:\n self.controls[vname]=widgets.IntSlider(description=f'Shape {r}:',\n min=1,\n max=64,\n step=1,\n value=kwargs.get(vname, 16))\n\n vname = name+\"_DENSITY\"\n\n if vname in self.controls:\n del self.controls[vname]\n\n self.controls[vname]=widgets.FloatSlider(description='Density:',\n min=0,\n max=1,\n step=0.02,\n value=kwargs.get(vname, 0.2))\n\n\n vname = name+\"_INTERVAL\"\n\n if vname in self.controls:\n del self.controls[vname]\n\n self.controls[vname]=widgets.IntSlider(description='Interval:',\n min=1,\n max=100,\n step=1,\n value=kwargs.get(vname, 5))\n\n vname = name+\"_SEED\"\n\n if vname in self.controls:\n del self.controls[vname]\n\n self.controls[vname]=widgets.IntSlider(description='Seed:',\n min=0,\n max=100,\n step=1,\n value=kwargs.get(vname, 10))\n\n\n vname = name+\"_COLOR\"\n\n if vname in self.controls:\n del self.controls[vname]\n\n self.controls[vname]=widgets.Dropdown(options=self.colors,\n value=kwargs.get(vname, \"red\"),\n description='Color:',\n disabled=False)\n \n return {'name': name,\n 'rank_ids': rank_ids}\n\n\n\n def _convert_kwargs(self, name, rank_ids, kwargs):\n \"\"\" Canonicalize kwargs\n \n \n Convert simple kwargs into full label names for:\n - shape\n - density\n - interval\n - seed\n - color\n \n \"\"\"\n\n new_kwargs = {}\n\n for key, value in kwargs.items():\n if key == \"shape\":\n for rank_id, shape in zip(rank_ids, value):\n new_kwargs[f\"{rank_id}_SHAPE\"] = shape\n\n continue\n\n if key == \"density\":\n new_kwargs[f\"{name}_DENSITY\"] = value\n continue\n\n if key == \"seed\":\n new_kwargs[f\"{name}_SEED\"] = value\n continue\n\n if key == \"interval\":\n new_kwargs[f\"{name}_INTERVAL\"] = value\n continue\n\n if key == \"color\":\n new_kwargs[f\"{name}_COLOR\"] = value\n continue\n\n if not key in kwargs:\n new_kwargs[key] = value\n\n return new_kwargs\n\n\n#\n# Display all of the interactive controls to set tensor attributes\n#\n\n def displayControls(self):\n \"\"\"Create and display the interactive controls\"\"\"\n\n #\n # Display the tensor configuration controls\n #\n controls = self._getControls()\n\n display(controls)\n\n #\n # Display the reset button\n #\n load = widgets.Button(description='Load',\n tooltip='Load all controls values from a file')\n\n load.on_click(lambda arg: self.loadControls())\n\n store = widgets.Button(description='Store',\n tooltip='Store all controls to a file')\n\n store.on_click(lambda arg: self.storeControls())\n\n reset = widgets.Button(description='Reset',\n tooltip='Reset all control values to their default state')\n\n reset.on_click(lambda arg: self.resetControls())\n\n\n display(widgets.HBox([load, store, reset]))\n\n\n def _getControls(self):\n\n title = widgets.Label(value=\"Tensor Creation Controls\")\n\n controls = interactive(self._set_params,\n Title=title,\n **self.controls)\n\n #\n # Collect reset values for all controls\n #\n for name, control in self.controls.items():\n self.reset[name] = control.value\n\n #\n # Optionally load controls from file\n #\n if self.autoload:\n self.loadControls()\n\n return controls\n\n\n def _set_params(self, **kwargs):\n\n for variable, value in kwargs.items():\n self.variables[variable] = value\n\n\n\n\n def storeControls(self):\n \"\"\" storeControls \"\"\"\n\n filename = self._getFilename()\n if filename is None:\n return\n\n state = {name: control.value for (name, control) in self.controls.items()}\n state_yaml = yaml.dump(state, Dumper=yaml.SafeDumper)\n\n with open(filename, \"w\") as control_file:\n control_file.write(state_yaml)\n\n\n def loadControls(self):\n \"\"\" loadControls \"\"\"\n\n filename = self._getFilename(exists=True)\n if filename is None:\n return\n\n with open(filename, \"r\") as control_file:\n state_yaml = control_file.read()\n\n state = yaml.load(state_yaml, Loader=yaml.SafeLoader)\n\n for name, value in state.items():\n self.controls[name].value = value\n\n\n def resetControls(self):\n \"\"\" resetControls \"\"\"\n\n for name, control in self.controls.items():\n control.value = self.reset[name]\n\n\n def _getFilename(self, exists=False):\n if self.name is None:\n self.logger.warning(\"No filename specified at init time\")\n return None\n\n filename = self.etcdir / Path(self.name+\".yaml\")\n\n if exists and not filename.is_file():\n self.logger.warning(f\"Control file ({filename}) does not exist\")\n return None\n\n return filename\n\n\n\n#\n# Methods to create a tensor from the interactively set attributes\n#\n\n\n def makeA_MK(self):\n\n return self.makeTensor(\"A\")\n\n\n\n def makeB_KN(self):\n\n return self.makeTensor(\"B\")\n\n\n\n def makeTensor(self, name):\n \"\"\" Create a tensor from the current interactively set attributes \"\"\" \n\n rank_ids = self.rank_ids[name]\n\n t = Tensor.fromRandom(name=name,\n rank_ids=rank_ids,\n shape=[self.variables[r+\"_SHAPE\"] for r in rank_ids],\n density=(len(rank_ids)-1)*[1.0]+[self.variables[name+\"_DENSITY\"]],\n interval=self.variables[name+\"_INTERVAL\"],\n seed=self.variables[name+\"_SEED\"],\n color=self.variables[name+\"_COLOR\"])\n\n return t\n\n\n\n", "id": "8710221", "language": "Python", "matching_score": 1.0114459991455078, "max_stars_count": 2, "path": "fibertree/notebook/tensor_maker.py" }, { "content": "import matplotlib.pyplot as plt\nimport yaml\nimport sys\nimport os\nimport numpy as np\nfrom matplotlib import colors as mcolors\ncolors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)\nindir = sys.argv[1]\n\nalldata = dict()\n# go through and read all files with 'cache_'\nfor filename in os.listdir(indir):\n if filename.startswith('cache_'):\n with open(os.path.join(indir, filename)) as f:\n data = yaml.load(f)\n desc = filename.split('_')[-1]\n alldata[desc] = data\n\n# print(alldata)\nind = np.arange(len(alldata))\nplts = list()\ndata_to_plot = dict()\nx_labels = list()\nlegend_labels = list()\nfor key in alldata:\n data = alldata[key]\n x_labels.append(key)\n # print(key)\n # print(data)\n for name in data:\n val = data[name]\n if name.startswith(\"Amplify\") or name.startswith(\"Reduce\"): # add into Z_buffer\n continue\n else: \n if name in data_to_plot:\n data_to_plot[name].append(val)\n else:\n data_to_plot[name] = [val]\n\n # add swoop stats in post\n A_buffer_key = \"A_buffer_access\"\n data_to_plot[A_buffer_key][-1] += data[\"Amplify_K0\"]\n data_to_plot[A_buffer_key][-1] += data[\"Amplify_K1\"]\n \n Z_buffer_key = \"Z_buffer_access\"\n data_to_plot[Z_buffer_key][-1] += data[\"Amplify_N0\"]\n data_to_plot[Z_buffer_key][-1] += data[\"Amplify_N1_Upd\"]\n data_to_plot[Z_buffer_key][-1] += data['Reduce_K0']\n\n# print(data_to_plot)\nlegend_colors = list()\ndata_types = list()\nplts = list()\nnum_vals = 0\nfor key in data_to_plot:\n print(\"normalizing {}\".format(key)) \n val = data_to_plot[key]\n if 'DRAM' in key: # scale up \n for i in range(0, len(val)):\n val[i] = val[i] * 100\n\ncolor_map = {\"A_DRAM_access\":\"b\", \"A_buffer_access\":\"lightskyblue\", 'B_DRAM_access':'green','B_buffer_access':'lime', 'Z_DRAM_access':'red', 'Z_buffer_access':'salmon'}\nstacking_order = [\"B_buffer_access\", \"B_DRAM_access\", \"Z_buffer_access\",\n \"Z_DRAM_access\", \"A_buffer_access\", \"A_DRAM_access\"]\ncumulative = [0]*len(data_to_plot[stacking_order[0]])\nfor key in stacking_order:\n# for i in range(0, len(stacking_order)):\n # key = stacking_order[i]\n # print(\"plotting {}\".format(key))\n val = data_to_plot[key]\n print(\"{}: {}\".format(key, val))\n c = color_map[key]\n p = plt.bar(ind, val,bottom=cumulative,color=c)\n cumulative = [sum(x) for x in zip(cumulative, val)]\n plts.append(p)\n \n # if key in color_map:\n # legend_colors.append(colors[color_map[key]])\n # else:\n legend_colors.append(p[0])\n \n # rename legend\n temp = key\n parts = temp.split(\"_\")\n label = \"\"\n inp_name = \"\"\n if parts[0] == \"A\":\n inp_name = \"Fr\"\n elif parts[0] == \"B\":\n inp_name = \"Gr\"\n elif parts[0] == \"Z\":\n inp_name = \"Fr'\"\n\n inp_name += \" \" + parts[1]\n \n data_types.append(inp_name)\nassert len(ind) == len(x_labels)\n\n# print(colors)\n# print(legend_colors)\nif len(sys.argv) > 2:\n plt.ylim(0, int(sys.argv[2]))\n_, top_ylim = plt.ylim()\n# label UH\nfor r in plts[-1]:\n h = r.get_height()\n if h > top_ylim / 2:\n print(\"h {} over limit\".format(h))\n plt.text(r.get_x() + r.get_width() / 2., top_ylim *.8, \"{:.2e}\".format(h) , ha=\"center\", va=\"center\", color=\"white\",fontsize=10, fontweight=\"bold\")\n\nplt.xticks(ind, x_labels)\nplt.legend(data_types)\n# print(indir)\nexp_name = indir[:-1].split('/')[-1]\n# print(exp_name)\nplt.savefig('energy_' + exp_name + '.png')\n# plt.savefig('out.png')\n", "id": "43133", "language": "Python", "matching_score": 0.9997107982635498, "max_stars_count": 2, "path": "fibertree/codec/plot-energy.py" }, { "content": "\"\"\"Image Utilities Module\"\"\"\n\nimport logging\nimport os\n\nfrom PIL import Image, ImageDraw, ImageFont\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.graphics.image_utils')\n\n\nclass ImageUtils():\n \"\"\"ImageUtils\n\n A utility class for supporting graphics for multiple drawing\n classes. A number of global attributes are class variables of this\n class.\n\n \"\"\"\n\n hl_colors = [\"goldenrod\",\n \"#efcf62\", # worker 0 - yellow\n \"#85b5c9\", # worker 2 - aqua\n \"#dd7820\", # worker 1 - orange\n \"#90bf89\", # worker x - light green\n \"#daa520\", # worker 3 - goldenrod\n \"#91a9f1\", # worker 4 - light blue\n \"#ea1f33\", # worker 4 e\n \"#ae8319\", # worker 5 b\n \"#e8c15f\"] # worker 6 g\n \"\"\"A pre-defined set of colors for highlighting workers.\"\"\"\n #\n # Next color to allocate\n #\n hl_next = 0\n \"\"\"The index of the next color to assign as a highlight.\"\"\"\n \n #\n # Map of worker names to colors\n #\n hl_map = {}\n \"\"\"A hash map of worker names (spacestamps) to colors.\"\"\"\n\n\n def __init__(self):\n \"\"\"__init__ \"\"\"\n\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.graphics.image_utils')\n\n \n @staticmethod\n def setColor(worker, color):\n \"\"\"Set color for a worker.\n\n Parameters\n ----------\n\n worker: hashable value\n Name of a worker (spacestamp)\n\n color: Pillow color\n Color to associate with `worker`\n\n \"\"\"\n\n hl_map = ImageUtils.hl_map\n\n if worker in hl_map:\n print(f\"WARNING: {worker} already has a color - OVERWRITING!\")\n\n hl_map[worker] = color\n\n\n @staticmethod\n def getColor(worker):\n \"\"\" Get color associated with a worker.\n\n If no color is currently assigned to `worker`, then assign one\n round-robin from `hl_colors`.\n\n Parameters\n ----------\n\n worker: hashable value\n Name of a worker (spacestamp)\n\n\n \"\"\"\n\n hl_map = ImageUtils.hl_map\n\n if worker in hl_map:\n return hl_map[worker]\n\n #\n # Allocate next color\n #\n hl_next = ImageUtils.hl_next\n hl_colors = ImageUtils.hl_colors\n\n color = hl_colors[hl_next]\n hl_map[worker] = color\n\n ImageUtils.hl_next = (hl_next + 1) % len(hl_colors)\n\n return color\n\n\n @staticmethod\n def resetColors():\n \"\"\"Clear all worker colors.\"\"\"\n\n ImageUtils.hl_next = 0\n ImageUtils.hl_map = {}\n\n\n @staticmethod\n def getFont():\n \"\"\"Get a font for use in images.\n\n Get a standard font for various image classes to use. First\n looks for a file as specified by environment variable\n \"FIBERTREE_FONT\", then at a well-known location.\n\n To set the environment variable in Python try the following:\n\n import os\n os.environ['FIBERTREE_FONT'] = 'Pillow/Tests/fonts/FreeMono.ttf'\n\n\n TBD: Make more robust for use on different systems\n\n \"\"\"\n\n font_file = os.getenv('FIBERTREE_FONT')\n\n if font_file is None:\n font_file = 'Pillow/Tests/fonts/FreeMono.ttf'\n\n try:\n font = ImageFont.truetype(font_file, 20)\n return font\n except Exception as e:\n print(f\"Could not find font file: {font_file}\")\n raise e\n", "id": "6136172", "language": "Python", "matching_score": 2.1516318321228027, "max_stars_count": 2, "path": "fibertree/graphics/image_utils.py" }, { "content": "\"\"\"Tensor Image Module\"\"\"\n\nimport logging\n\nfrom PIL import Image, ImageDraw, ImageFont\n\nfrom fibertree import Tensor\nfrom fibertree import Fiber\nfrom fibertree import Payload\n\nfrom .highlights import HighlightManager\n\nfrom .tree_image import TreeImage\nfrom .uncompressed_image import UncompressedImage\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.graphics.tensor_image')\n\n\nclass TensorImage():\n \"\"\"TensorImage\n\n Class to create images of a tensor or fiber. Basically a\n trampoline to the desired style, except when multiple images need\n to be combined.\n\n Constructor\n -----------\n\n Create an image corresponding the a given tensor or fiber in style\n \"style\". Optionally highlight elements of the tensor/fiber\n\n\n Parameters\n ----------\n object: tensor or fiber\n A tensor or fiber object to draw\n\n highlights: dictionary or list or tuple\n A dictionary of \"workers\" each with list of points to highlight\n list is a list of point tuples to highlight (assumes one \"worker\")\n tuple is a single point to highlight (assumes one \"worker\")\n\n style: string or list\n String containing \"tree\", \"uncompressed\" or\n \"tree+uncompressed\" indicating the style of the image to create\n\n extent: tuple\n Maximum row/col to use for image\n\n **kwargs: keyword arguments\n Additional keyword arguments to pass on to the desired style\n\n \"\"\"\n\n def __init__(self, object, *args, highlights={}, style='tree', **kwargs):\n \"\"\"__init__\"\"\"\n\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.graphics.tensor_image')\n\n\n highlights = HighlightManager.canonicalizeHighlights(highlights)\n\n #\n # Conditionally unwrap Payload objects\n #\n object = Payload.get(object)\n\n #\n # Create the subimages\n #\n if \"tree\" in style:\n im1 = TreeImage(object, *args, highlights=highlights, **kwargs).im\n\n if \"uncompressed\" in style:\n im2 = UncompressedImage(object, *args, highlights=highlights, **kwargs).im\n\n #\n # Create the final image \n #\n # TBD: Allow style to be a list\n #\n if style == \"tree\":\n self.im = im1\n elif style == \"uncompressed\":\n self.im = im2\n elif style == \"tree+uncompressed\":\n color=\"wheat\"\n im = Image.new('RGB', (max(im1.width, im2.width), im1.height + im2.height), color)\n\n diff = im1.width - im2.width\n\n if diff > 0:\n # im1 is bigger\n im1_xoffset = 0\n im2_xoffset = diff//2\n else:\n # im2 is bigger\n im1_xoffset = -diff//2\n im2_xoffset = 0\n\n im.paste(im1, (im1_xoffset, 0))\n im.paste(im2, (im2_xoffset, im1.height))\n\n self.im = im\n else:\n print(f\"TensorImage: Unsupported image style - {style}\")\n\n\n def show(self):\n \"\"\"Display the image\n\n Parameters\n ----------\n None\n\n \"\"\"\n\n self.im.show()\n\n\nif __name__ == \"__main__\":\n\n a = Tensor.fromYAMLfile(\"../../examples/data/draw-a.yaml\")\n a.print()\n i = TensorImage(a)\n i.show()\n", "id": "11918014", "language": "Python", "matching_score": 2.6210954189300537, "max_stars_count": 2, "path": "fibertree/graphics/tensor_image.py" }, { "content": "\"\"\"Movie Canvas Module\"\"\"\n\nimport logging\nimport numpy\nimport cv2\nimport copy\n\nfrom PIL import Image, ImageDraw, ImageFont\nfrom tqdm.notebook import tqdm\n\nfrom fibertree import Tensor\nfrom fibertree import TensorImage\nfrom fibertree import UncompressedImage\nfrom fibertree import Fiber\nfrom fibertree import Payload\n\n#\n# Set up logging\n#\nmodule_logger = logging.getLogger('fibertree.graphics.movie_canvas')\n\n\nclass MovieCanvas():\n \"\"\"MovieCanvas\n\n A class to create a movie of activity in a set of tensors. This\n class is used by the `TensorCanvas` class as one of the ways it\n can display activity. Various ways of displaying the tenor (e.g.,\n `TreeImage` and `UncompressedImage`) are supported.\n\n\n Constructor\n -----------\n\n Parameters\n ----------\n tensors: list\n A list of tensors or fibers objects to track\n\n style: string (default: 'tree')\n Display style ('tree', 'uncompressed', 'tree+uncompressed')\n\n progress: Boolean (default: True)\n Enable tqdm style progress bar on movie creation\n\n \"\"\"\n\n def __init__(self, *tensors, style='tree', progress=True):\n \"\"\"__init__\"\"\"\n\n #\n # Set up logging\n #\n self.logger = logging.getLogger('fibertree.graphics.movie_canvas')\n\n #\n # Set image type\n #\n self.style = style\n\n #\n # Set tqdm control\n #\n self.use_tqdm = progress\n\n #\n # Set up tensor class variables\n #\n # Note: We conditionally unwrap Payload objects\n #\n self.tensors = []\n self.image_list_per_tensor = []\n for tensor in tensors:\n self.tensors.append(Payload.get(tensor))\n self.image_list_per_tensor.append([])\n\n #\n # Font to use for text\n #\n self.font = ImageFont.truetype('Pillow/Tests/fonts/DejaVuSans.ttf', 16)\n\n #\n # Add an initial frame with nothing highlighted (it looks good)\n #\n self.addFrame()\n\n\n def addFrame(self, *highlighted_coords_per_tensor):\n \"\"\"Add a frame to the movie\n\n Parameters\n ----------\n\n highlighted_coords_per_tensor: list of highlights\n Highlights to add to the registered tensors\n\n \"\"\"\n\n #\n # Handle the case where nothing should be highlighted anywhere.\n #\n if not highlighted_coords_per_tensor:\n final_coords = [[] for n in range(len(self.tensors))]\n else:\n final_coords = highlighted_coords_per_tensor\n\n assert len(final_coords) == len(self.tensors)\n\n for n in range(len(self.tensors)):\n tensor = self.tensors[n]\n highlighted_coords = final_coords[n]\n im = TensorImage(tensor,\n style=self.style,\n highlights=highlighted_coords).im\n\n self.image_list_per_tensor[n].append(im)\n\n\n def getLastFrame(self, message=None):\n \"\"\"Get the final frame\n\n Get an image of the final frame. This method also adds a final\n frame with nothing highlighted, because it looks better\n\n Parameters\n ---------\n\n message: string, default=None\n A message to add to the image\n\n Returns\n -------\n final_frame: image\n An image of the final frame\n\n \"\"\"\n\n self.addFrame()\n\n end = len(self.image_list_per_tensor[0])\n (final_images, final_width, final_height) = self._combineFrames(end-1, end)\n\n if message is None:\n return final_images[-1]\n\n #\n # Add message to final image\n #\n im = final_images[-1].copy()\n\n ImageDraw.Draw(im).text((15, final_height-65),\n message,\n font=self.font,\n fill=\"black\")\n\n return im\n\n\n def saveMovie(self, filename=None):\n \"\"\"Save the movie to a file\n\n Parameters\n ----------\n\n filename: string, default=None\n Name of a file to save the movie\n\n \"\"\"\n\n end = len(self.image_list_per_tensor[0])\n (final_images, final_width, final_height) = self._combineFrames(0, end)\n\n fourcc = cv2.VideoWriter_fourcc(*\"vp09\")\n out = cv2.VideoWriter(filename, fourcc, 1, (final_width, final_height))\n\n for image in self._tqdm(final_images):\n for duplication_cnt in range(1):\n out.write(cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR))\n\n out.release()\n\n#\n# Internal utility functions\n#\n def _combineFrames(self, start, end):\n\n (final_width, final_height, flattened_height) = self._finalize()\n #\n # Create empty frames for pasting\n #\n final_images = []\n for n in range(start, end):\n final_images.append(Image.new(\"RGB\",\n (final_width, final_height),\n \"wheat\"))\n\n #\n # Dump individual frames into the same image so they stay in sync.\n #\n for n in range(start, end):\n for t in range(len(self.tensors)):\n image = self.image_list_per_tensor[t][n]\n\n x_center = final_width // 2 - (image.width // 2)\n # Start where the last image finished.\n y_final = 0 if t == 0 else flattened_height[t-1]\n\n final_images[n-start].paste(image, (x_center, y_final))\n\n #\n # Add cycle information to the images\n # (skipping extra frames at beginning and end)\n #\n for n, im in enumerate(final_images[1:]):\n message = f\"Cycle: {n}\"\n\n ImageDraw.Draw(im).text((15, final_height-80),\n message,\n font=self.font,\n fill=\"black\")\n\n return (final_images, final_width, final_height)\n\n\n def _finalize(self):\n \"\"\"_finalize\"\"\"\n\n #\n # Set all images to the max canvas size to ensure smooth animations\n #\n\n final_dims = []\n for n in range(len(self.tensors)):\n max_width = 0\n max_height = 0\n for image in self.image_list_per_tensor[n]:\n max_height = image.height if (image.height > max_height) else max_height\n max_width = image.width if (image.width > max_width) else max_width\n final_dims.append((max_width, max_height))\n\n #\n # Take max of width, but concatenate height\n #\n final_width = 0\n final_height = 0\n flattened_height = []\n\n for w, h in final_dims:\n final_width = w if w > final_width else final_width\n final_height = final_height + h\n flattened_height.append(final_height)\n\n #\n # Add a little padding at the bottom for when the controls are visible.\n #\n final_height = final_height + 75\n\n return (final_width, final_height, flattened_height)\n\n#\n# Tqdm-related methods\n#\n# TBD: Move to some more central location\n#\n\n def _tqdm(self, iterable):\n \"\"\"\n _tqdm\n\n Conditional tqdm based on wheter we are in a notebook\n\n \"\"\"\n\n if self.use_tqdm and MovieCanvas._in_ipynb():\n return tqdm(iterable)\n else:\n return iterable\n\n\n @staticmethod\n def _in_ipynb():\n \"\"\"\n _in_ipynb\n\n Are we in an IPython notebook?\n\n \"\"\"\n\n try:\n shell = get_ipython().__class__.__name__\n if shell == 'ZMQInteractiveShell':\n return True\n else:\n return False\n except NameError:\n return False\n\n\n\nif __name__ == \"__main__\":\n\n a = Tensor.fromYAMLfile(\"../../examples/data/draw-a.yaml\")\n b = Tensor.fromYAMLfile(\"../../examples/data/draw-b.yaml\")\n canvas = MovieCanvas(a, b)\n canvas.addFrame()\n canvas.addFrame([10], [4])\n canvas.addFrame([10, 40], [4, 1])\n canvas.addFrame([10, 40, 1], [4, 1, 0])\n canvas.addFrame()\n canvas.saveMovie(\"/tmp/tmp.mp4\")\n print(\"Try playing /tmp/tmp.mp4\")\n", "id": "9587053", "language": "Python", "matching_score": 3.22212290763855, "max_stars_count": 2, "path": "fibertree/graphics/movie_canvas.py" }, { "content": "\"\"\"Fibertree Display Module\"\"\"\n\n#\n# Import standard libraries\n#\nimport string\nimport random\nimport tempfile\n\nfrom pathlib import Path\n\n#\n# Import display classes/utilities\n#\nfrom IPython.display import display # to display images\nfrom IPython.display import Image\nfrom IPython.display import HTML\nfrom IPython.display import Javascript\nfrom IPython.display import Video\n\n#\n# Try to import ipywidgets\n#\nhave_ipywidgets = True\ntry:\n import ipywidgets as widgets\n from ipywidgets import interact, interactive, fixed, interact_manual\nexcept ImportError:\n have_ipywidgets = False\n\n#\n# Import matplotlib\n#\ntry:\n import matplotlib.pyplot as plt\n from matplotlib.pyplot import imshow\n from matplotlib import rc\nexcept ImportError:\n print(\"Library matplotlib not available\")\n\n#\n# Try to import networkx\n#\nhave_networkx = True\ntry:\n import networkx as nx\nexcept ImportError:\n have_networkx = False\n\n#\n# Import fibertree libraries\n#\nfrom fibertree import TensorImage, TensorCanvas\n\n\nclass TensorDisplay():\n \"\"\" FibertreeDisplay \"\"\"\n\n def __init__(self, style=None, animation=None, have_ipywidgets=False):\n \"\"\" __init__ \"\"\"\n\n self.have_ipywidgets = have_ipywidgets\n\n self.style = 'tree'\n self.animation = 'none'\n\n self.setupWidgets()\n\n if style is not None:\n self.setStyle(style)\n\n if animation is not None:\n self.setAnimation(animation)\n\n self.rand = random.Random()\n\n #\n # Create tmp directory for movies\n #\n tmpdir = Path(\"tmp\")\n tmpdir.mkdir(mode=0o755, exist_ok=True)\n self.tmpdir = tmpdir\n\n\n #\n # Display control settings\n #\n def setStyle(self, style='tree', sync=True):\n \"\"\" setStyle \"\"\"\n\n if style not in ['tree', 'uncompressed', 'tree+uncompressed']:\n print(\"Unsuppored display style\")\n return\n\n self.style = style\n\n if sync:\n self.syncWidgets()\n\n def setAnimation(self, animation='none', sync=True):\n \"\"\" setAnimation \"\"\"\n\n self.animation = animation\n\n if sync:\n self.syncWidgets()\n\n\n #\n # Display actions\n #\n def displayTensor(self, tensor, highlights=[], **kwargs):\n \"\"\" displayTensor \"\"\"\n\n im = TensorImage(tensor, style=self.style, highlights=highlights, **kwargs).im\n\n display(im)\n\n\n def createCanvas(self, *tensors, **kwargs):\n \"\"\" createCanvas \"\"\"\n\n return TensorCanvas(*tensors, animation=self.animation, style=self.style, **kwargs)\n\n\n def displayCanvas(self, canvas, filename=None, width=\"100%\", loop=True, autoplay=True, controls=True, center=False):\n \"\"\" displayCanvas \"\"\"\n\n if canvas is None:\n return None\n\n if self.animation == 'none':\n #\n # Just create a frame from the last state and display it\n #\n AnimationDisabledError = \"Note: Canvas animation has been disabled - showing final frame\"\n\n im = canvas.getLastFrame(AnimationDisabledError)\n display(im)\n return\n\n if self.animation == 'spacetime':\n #\n # Get the spacetime diagrams\n #\n\n print(\"Spacetime\")\n\n for image in canvas.getLastFrame():\n display(image)\n\n return\n\n if filename is None:\n basename = Path(self._random_string(10)+\".mp4\")\n filename = self.tmpdir / basename\n\n posix_filename = filename.as_posix()\n canvas.saveMovie(posix_filename)\n\n # TBD: Actually pay attention to width and centering\n final_width = \"\" if width is None else \" width=\\\"{0}\\\"\".format(width)\n final_center = \"\" if not center else \" style=\\\"display:block; margin: 0 auto;\\\"\"\n\n final_loop = \"\" if not loop else \" loop\"\n final_autoplay = \"\" if not autoplay else \" autoplay\"\n final_controls = \"\" if not controls else \" controls\"\n\n final_attributes = f\"{final_loop}{final_autoplay}{final_controls}\"\n\n video = Video(f\"./{posix_filename}\", html_attributes=final_attributes, width=800)\n display(video)\n\n\n\n def _random_string(self, length):\n return ''.join(self.rand.choice(string.ascii_letters) for m in range(length))\n\n\n def displayGraph(self, am_s):\n \"\"\" displayGraph \"\"\"\n\n if not have_networkx:\n print(\"Library networkx not available\")\n return\n\n gr = nx.DiGraph()\n\n for (s, am_d) in am_s:\n gr.add_node(s)\n for (d, _) in am_d:\n gr.add_edge(s, d)\n\n pos = nx.spring_layout(gr)\n nx.draw(gr, pos, node_size=500, with_labels=True)\n plt.show()\n\n #\n # Widget control\n #\n def setupWidgets(self):\n \"\"\" setupWidgets \"\"\"\n\n if have_ipywidgets:\n self.w = interactive(self.updateWidgets,\n style=['tree', 'uncompressed', 'tree+uncompressed'],\n animation=['none', 'movie', 'spacetime'])\n\n display(self.w)\n else:\n print(\"Warning: ipywidgets not available - set attributes manually by typing:\")\n print(\"\")\n print(\"FTD.setStyle('uncompressed') # Show tensor as a uncompressed\")\n print(\"FTD.setStyle('tree') # Show tensor as a fiber tree\")\n print(\"FTD.setStyle('tree+uncompressed') # Show tensor in both styles\")\n print(\"\")\n print(\"FTD.setAnimation('none') # Turn off animations\")\n print(\"FTD.setAnimation('movie') # Turn on movie animation\")\n print(\"FTD.setAnimation('spacetime') # Turn on spacetime animation\")\n print(\"\")\n \n\n def updateWidgets(self, style='tree', animation='none'):\n \"\"\" setup \"\"\"\n\n #\n # Set attributes (but do not recurse back and sync widgets)\n #\n self.setStyle(style=style, sync=False)\n self.setAnimation(animation=animation, sync=False)\n\n def syncWidgets(self):\n \"\"\" syncWidgets \"\"\"\n\n style = self.style\n animation = self.animation\n\n if self.have_ipywidgets:\n self.w.children[0].value = style\n self.w.children[1].value = animation\n else:\n print(f\"Style: {style}\")\n print(f\"Animation: {animation}\")\n print(\"\")\n\n", "id": "10332415", "language": "Python", "matching_score": 3.2654247283935547, "max_stars_count": 2, "path": "fibertree/notebook/tensor_display.py" }, { "content": "\"\"\" Ipython Notebook Utilities \"\"\"\n\n#\n# Import standard libraries\n#\nimport logging\nfrom pathlib import Path\n\n#\n# Import display classes/utilities\n#\nfrom IPython.display import display # to display images\nfrom IPython.display import Image\nfrom IPython.display import HTML\nfrom IPython.display import Javascript\n\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets\n\n#\n# Import fibertree libraries\n#\nfrom fibertree import Tensor\n\n\nclass NotebookUtils():\n\n def __init__(self):\n\n #\n # Debugging variables\n #\n logging.basicConfig(format='%(asctime)s %(message)s')\n\n self.logger = logging.getLogger(\"fibertree.notebook\")\n\n self.types = { \"Debug\": logging.DEBUG,\n \"Info\": logging.INFO,\n \"Warning\": logging.WARNING,\n \"Error\": logging.ERROR,\n \"Critical\": logging.CRITICAL}\n\n self.modules = { 'Notebook': \"fibertree.notebook\",\n \"Tensor\": \"fibertree.core.tensor\",\n \"Rank\": \"fibertree.core.rank\",\n \"Fiber\": \"fibertree.core.fiber\",\n \"Coord_Payload\": \"fibertree.core.coord_payload\",\n \"Payload\": \"fibertree.core.payload\",\n \"Tensor_canvas\": \"fibertree.graphics.tensor_canvas\",\n \"Movie_canvas\": \"fibertree.graphics.movie_canvas\",\n \"Spacetime_canvas\": \"fibertree.graphics.spacetime_canvas\",\n \"Tensor_image\": \"fibertree.graphics.tensor_image\",\n \"Tree_image\": \"fibertree.graphics.tree_image\",\n \"Uncompressed_image\": \"fibertree.graphics.uncompressed_image\",\n \"Highlights\": \"fibertree.graphics.highlights\",\n \"Image_utils\": \"fibertree.graphics.image_utils\"}\n\n\n self.levels = {}\n\n #\n # Functions for a \"run_all\" button\n #\n @staticmethod\n def run_all_below(ev):\n \"\"\" run_all_below \"\"\"\n\n display(Javascript('IPython.notebook.select_next()'))\n display(Javascript('IPython.notebook.execute_cells_below()'))\n\n\n def createRunallButton(self):\n \"\"\" createRunallButton \"\"\"\n\n button = widgets.Button(description=\"Run all cells below\")\n button.on_click(self.run_all_below)\n display(button)\n\n\n #\n # Logging functions\n #\n def getLogger(self):\n \"\"\" getLogger \"\"\"\n\n return self.logger\n\n\n def showLogging(self, **kwargs):\n \"\"\" showLogging \"\"\"\n\n for m_name, m_level in kwargs.items():\n self.levels[m_name] = m_level\n\n controls = {}\n\n style = {'description_width': 'initial'}\n\n for m_name in self.modules.keys():\n controls[m_name] = widgets.Dropdown(options=[*self.types],\n value=self.levels.get(m_name,'Warning'),\n description=m_name,\n style=style,\n disabled=False)\n\n\n print(\"\")\n print(\"Debugging level\")\n display(interactive(self._set_debug, **controls))\n print(\"\")\n\n return\n\n\n def _set_debug(self, **kwargs):\n \"\"\" _set_debug \"\"\"\n\n for m_name, m_level in kwargs.items():\n print(m_name, m_level, self.modules[m_name], self.types[m_level])\n logger = logging.getLogger(self.modules[m_name])\n logger.setLevel(self.types[m_level])\n\n return\n\n\n#\n# Functions for use in the IPython notebooks\n#\n\n#\n# Function for a Boolean enable dropdown\n#\n# TBD: Deprecate use of this method\n#\n\nenable = {}\n\ndef createEnableControl(name, choices=None):\n \"\"\" createEnableControl\n\n Create a widget with a dropdown box for setting\n the variable \"enable[name]\" with \"choices\".\n\n \"\"\"\n\n def set_enable(**kwargs):\n\n global enable\n\n for key, value in kwargs.items():\n enable[key] = value\n\n if choices is None:\n choices = [False, True]\n\n kwargs = {name: choices}\n\n w = interactive(set_enable,\n **kwargs)\n\n display(w)\n\n\n#\n# Helper function for locating the data directory\n#\n# TBD: Deprecate use of this method\n#\ndata_dir = Path(\"../../data\")\n\ndef datafileName(filename):\n\n return data_dir / filename\n\n\n#\n# Deprecated canvas functions\n#\ndef addFrame(canvas, *args, **kwargs):\n \"\"\" addFrame \"\"\"\n\n msg = \"The method addFrame() is deprecated. Use canvas.AddFrame()\"\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n canvas.addFrame(*args, **kwargs)\n\n\ndef addActivity(canvas, *args, **kwargs):\n \"\"\" addActivity \"\"\"\n\n msg = \"The method addActivity() is deprecated. Use canvas.AddActivity()\"\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n canvas.addActivity(*args, **kwargs)\n\n", "id": "1299399", "language": "Python", "matching_score": 3.0726425647735596, "max_stars_count": 2, "path": "fibertree/notebook/notebook_utils.py" }, { "content": "\nfrom .core.tensor import *\nfrom .core.rank import *\nfrom .core.fiber import *\nfrom .core.coord_payload import *\nfrom .core.payload import *\n\nfrom .codec.tensor_codec import *\nfrom .codec.compression_types import *\n\nfrom .graphics.image_utils import *\nfrom .graphics.highlights import *\nfrom .graphics.aahr import *\n\nfrom .graphics.tensor_image import *\nfrom .graphics.tree_image import *\nfrom .graphics.uncompressed_image import *\n\nfrom .graphics.tensor_canvas import *\nfrom .graphics.movie_canvas import *\nfrom .graphics.spacetime_canvas import *\n\nfrom .notebook.notebook_utils import *\nfrom .notebook.tensor_maker import *\nfrom .notebook.tensor_display import *\n\nfrom collections import namedtuple\n\n", "id": "6882993", "language": "Python", "matching_score": 0.621591329574585, "max_stars_count": 2, "path": "fibertree/__init__.py" }, { "content": "from setuptools import setup\n\ndef readme():\n with open('README.md') as f:\n return f.read()\n\nwith open(\"requirements.txt\", \"r\") as fh:\n requirements = fh.readlines()\n\n\nsetup(name='fiber-tree',\n version='0.1',\n description='Fibertree style tensor simulator',\n long_description=readme(),\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.0',\n 'Topic :: Scientific/Engineering',\n ],\n keywords='tensors',\n url='https://github.com/Fibertree-Project/fibertree',\n author='<NAME>',\n author_email='<EMAIL>',\n license='MIT',\n packages=['fibertree',\n 'fibertree.core',\n 'fibertree.graphics',\n 'fibertree.notebook',\n 'fibertree.codec',\n 'fibertree.codec.formats'],\n install_requires=[req for req in requirements if req[:2] != \"# \"],\n include_package_data=True,\n zip_safe=False)\n", "id": "7580288", "language": "Python", "matching_score": 1.2143845558166504, "max_stars_count": 2, "path": "setup.py" }, { "content": "from fibertree import Fiber\n\na = Fiber( [0, 1, 10, 20 ], [ 1, 2, 11, 21 ])\n\nap = a.project(lambda c: c + 1)\n\na.print(\"a\\n\")\nap.print(\"ap\\n\")\n\n\n", "id": "11580477", "language": "Python", "matching_score": 0.3010729253292084, "max_stars_count": 2, "path": "examples/scripts/methods/project.py" }, { "content": "from fibertree import Fiber\n\nab = Fiber( [0, 1, 10, 20 ], [ (0, 1), (1, 2), (10, 11), (20, 21) ])\n\n(a, b) = ab.unzip()\n\nab.print(\"ab\\n\")\na.print(\"a\\n\")\nb.print(\"b\\n\")\n\n\n", "id": "12689078", "language": "Python", "matching_score": 0.41183269023895264, "max_stars_count": 2, "path": "examples/scripts/methods/unzip.py" } ]
2.728215
nessvm
[ { "content": "from django.db.models import Model, CharField, ForeignKey, ManyToManyField\n\n\nclass Moron(Model):\n name = CharField(max_length='100')\n\n\nclass Idiot(Model):\n name = CharField(max_length='100')\n\n\nclass Dummy(Model):\n name = CharField(max_length='100')\n moron = ForeignKey('Moron')\n idiots = ManyToManyField('Idiot')\n\nclass Simple(Model):\n name = CharField(max_length='100')\n", "id": "10730791", "language": "Python", "matching_score": 1.4509514570236206, "max_stars_count": 8, "path": "testapp/models.py" }, { "content": "from six.moves.urllib.parse import urljoin\n\nimport django\nif django.VERSION[0] == 1 and django.VERSION[1] == 3:\n from django.conf.urls.defaults import patterns, include\nelse:\n from django.conf.urls import patterns, include\n\nfrom django.test import TestCase\n\nfrom collection_json import Collection\nfrom rest_framework import status\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.relations import HyperlinkedIdentityField\nfrom rest_framework.response import Response\nfrom rest_framework.routers import DefaultRouter\nfrom rest_framework.serializers import (\n HyperlinkedModelSerializer, ModelSerializer\n)\nfrom rest_framework.status import HTTP_204_NO_CONTENT\nfrom rest_framework.views import APIView\nfrom rest_framework.viewsets import ReadOnlyModelViewSet\n\nfrom rest_framework_cj.renderers import CollectionJsonRenderer\nfrom rest_framework_cj.fields import LinkField\n\nfrom testapp.models import Dummy, Idiot, Moron, Simple\n\n\nclass MoronHyperlinkedModelSerializer(HyperlinkedModelSerializer):\n class Meta(object):\n model = Moron\n fields = ('url', 'name')\n\n\nclass MoronReadOnlyModelViewSet(ReadOnlyModelViewSet):\n renderer_classes = (CollectionJsonRenderer, )\n queryset = Moron.objects.all()\n serializer_class = MoronHyperlinkedModelSerializer\n\n\nclass IdiotHyperlinkedModelSerializer(HyperlinkedModelSerializer):\n class Meta(object):\n model = Idiot\n fields = ('url', 'name')\n\n\nclass IdiotReadOnlyModelViewSet(ReadOnlyModelViewSet):\n renderer_classes = (CollectionJsonRenderer, )\n queryset = Idiot.objects.all()\n serializer_class = IdiotHyperlinkedModelSerializer\n\n\nclass DummyHyperlinkedModelSerializer(HyperlinkedModelSerializer):\n other_stuff = LinkField('get_other_link')\n empty_link = LinkField('get_empty_link')\n some_link = HyperlinkedIdentityField(view_name='moron-detail')\n\n class Meta(object):\n model = Dummy\n fields = ('url', 'name', 'moron', 'idiots', 'other_stuff', 'some_link', 'empty_link')\n\n def get_other_link(self, obj):\n return 'http://other-stuff.com/'\n\n def get_empty_link(self, obj):\n return None\n\n\nclass DummyReadOnlyModelViewSet(ReadOnlyModelViewSet):\n renderer_classes = (CollectionJsonRenderer, )\n queryset = Dummy.objects.all()\n serializer_class = DummyHyperlinkedModelSerializer\n\n\nclass NoSerializerView(APIView):\n renderer_classes = (CollectionJsonRenderer, )\n\n def get(self, request):\n return Response({'foo': '1'})\n\n\nclass SimpleGetTest(TestCase):\n urls = 'testapp.tests.test_renderers'\n endpoint = ''\n\n def setUp(self):\n self.response = self.client.get(self.endpoint)\n self.collection = Collection.from_json(self.response.content.decode('utf8'))\n\n\ndef create_models():\n bob = Moron.objects.create(name='<NAME>')\n dummy = Dummy.objects.create(name='<NAME>', moron=bob)\n dummy.idiots.add(Idiot.objects.create(name='frick'))\n dummy.idiots.add(Idiot.objects.create(name='frack'))\n\n\nclass TestCollectionJsonRenderer(SimpleGetTest):\n endpoint = '/rest-api/dummy/'\n\n def setUp(self):\n create_models()\n super(TestCollectionJsonRenderer, self).setUp()\n\n def test_it_has_the_right_response_code(self):\n self.assertEqual(self.response.status_code, status.HTTP_200_OK)\n\n def test_it_has_the_right_content_type(self):\n content_type = self.response['Content-Type']\n self.assertEqual(content_type, 'application/vnd.collection+json')\n\n def test_it_has_the_version_number(self):\n self.assertEqual(self.collection.version, '1.0')\n\n def test_it_has_an_href(self):\n href = self.collection.href\n self.assertEqual(href, 'http://testserver/rest-api/dummy/')\n\n def get_dummy(self):\n return self.collection.items[0]\n\n def test_the_dummy_item_has_an_href(self):\n href = self.get_dummy().href\n self.assertEqual(href, 'http://testserver/rest-api/dummy/1/')\n\n def test_the_dummy_item_contains_name(self):\n name = self.get_dummy().data.find('name')[0].value\n self.assertEqual(name, '<NAME>')\n\n def get_dummy_link(self, rel):\n links = self.get_dummy()['links']\n return next(x for x in links if x['rel'] == rel)\n\n def test_the_dummy_item_links_to_child_elements(self):\n href = self.get_dummy().links.find(rel='moron')[0].href\n self.assertEqual(href, 'http://testserver/rest-api/moron/1/')\n\n def test_link_fields_are_rendered_as_links(self):\n href = self.get_dummy().links.find(rel='other_stuff')[0].href\n self.assertEqual(href, 'http://other-stuff.com/')\n\n def test_empty_link_fields_are_not_rendered_as_links(self):\n links = self.get_dummy().links.find(rel='empty_link')\n self.assertEqual(len(links), 0)\n\n def test_attribute_links_are_rendered_as_links(self):\n href = self.get_dummy().links.find(rel='some_link')[0].href\n self.assertEqual(href, 'http://testserver/rest-api/moron/1/')\n\n def test_many_to_many_relationships_are_rendered_as_links(self):\n idiots = self.get_dummy().links.find(rel='idiots')\n self.assertEqual(idiots[0].href, 'http://testserver/rest-api/idiot/1/')\n self.assertEqual(idiots[1].href, 'http://testserver/rest-api/idiot/2/')\n\n\nclass TestNoSerializerViews(SimpleGetTest):\n endpoint = '/rest-api/no-serializer/'\n\n def setUp(self):\n create_models()\n super(TestNoSerializerViews, self).setUp()\n\n def test_views_without_a_serializer_work(self):\n value = self.collection.items[0].data.find('foo')[0].value\n self.assertEqual(value, '1')\n\n\nclass SimpleModelSerializer(ModelSerializer):\n\n class Meta(object):\n model = Dummy\n fields = ('name', )\n\n\nclass SimpleViewSet(ReadOnlyModelViewSet):\n renderer_classes = (CollectionJsonRenderer, )\n queryset = Simple.objects.all()\n serializer_class = SimpleModelSerializer\n\n\nclass TestNormalModels(SimpleGetTest):\n endpoint = '/rest-api/normal-model/'\n\n def setUp(self):\n Simple.objects.create(name='Foobar Baz')\n super(TestNormalModels, self).setUp()\n\n def test_items_dont_have_a_href(self):\n href_count = len(self.collection.items[0].find(name='href'))\n self.assertEqual(href_count, 0)\n\n\nclass PaginatedDataView(APIView):\n renderer_classes = (CollectionJsonRenderer, )\n\n def get(self, request):\n return Response({\n 'next': 'http://test.com/colleciton/next',\n 'previous': 'http://test.com/colleciton/previous',\n 'results': [{'foo': 1}],\n })\n\n\nclass TestCollectionJsonRendererPagination(SimpleGetTest):\n endpoint = '/rest-api/paginated/'\n\n def test_paginated_views_display_data(self):\n foo = self.collection.items[0].find(name='foo')[0]\n self.assertEqual(foo.value, 1)\n\n def test_paginated_views_display_next(self):\n next_link = self.collection.links.find(rel='next')[0]\n self.assertEqual(next_link.href, 'http://test.com/colleciton/next')\n\n def test_paginated_views_display_previous(self):\n next_link = self.collection.links.find(rel='previous')[0]\n self.assertEqual(next_link.href, 'http://test.com/colleciton/previous')\n\n\nclass NonePaginatedDataView(APIView):\n renderer_classes = (CollectionJsonRenderer, )\n\n def get(self, request):\n return Response({\n 'next': None,\n 'previous': None,\n 'results': [{'foo': 1}],\n })\n\n\nclass TestCollectionJsonRendererPaginationWithNone(SimpleGetTest):\n endpoint = '/rest-api/none-paginated/'\n\n def test_paginated_view_does_not_display_next(self):\n self.assertEqual(len(self.collection.links.find(rel='next')), 0)\n\n def test_paginated_view_does_not_display_previous(self):\n self.assertEqual(len(self.collection.links.find(rel='previous')), 0)\n\n\nclass ParseErrorView(APIView):\n renderer_classes = (CollectionJsonRenderer, )\n\n def get(self, request):\n raise ParseError('lol nice one')\n\n\nclass TestErrorHandling(SimpleGetTest):\n endpoint = '/rest-api/parse-error/'\n\n def test_errors_are_reported(self):\n self.assertEqual(self.collection.error.message, 'lol nice one')\n\n\nclass UrlRewriteRenderer(CollectionJsonRenderer):\n def get_href(self, request):\n return urljoin('http://rewritten.com', request.path)\n\n\nclass UrlRewriteView(APIView):\n renderer_classes = (UrlRewriteRenderer, )\n\n def get(self, request):\n return Response({'foo': 'bar'})\n\n\nclass TestUrlRewrite(SimpleGetTest):\n endpoint = '/rest-api/url-rewrite/'\n\n def test_the_href_url_can_be_rewritten(self):\n rewritten_url = \"http://rewritten.com/rest-api/url-rewrite/\"\n self.assertEqual(self.collection.href, rewritten_url)\n\n\nclass EmptyView(APIView):\n renderer_classes = (CollectionJsonRenderer, )\n\n def get(self, request):\n return Response(status=HTTP_204_NO_CONTENT)\n\n\nclass TestEmpty(TestCase):\n urls = 'testapp.tests.test_renderers'\n\n def test_empty_content_works(self):\n response = self.client.get('/rest-api/empty/')\n self.assertEqual(response.status_code, HTTP_204_NO_CONTENT)\n self.assertEqual(response.content.decode('utf8'), '')\n\n\nrouter = DefaultRouter()\nrouter.register('dummy', DummyReadOnlyModelViewSet)\nrouter.register('moron', MoronReadOnlyModelViewSet)\nrouter.register('idiot', IdiotReadOnlyModelViewSet)\nrouter.register('normal-model', SimpleViewSet)\nurlpatterns = patterns(\n '',\n (r'^rest-api/', include(router.urls)),\n (r'^rest-api/no-serializer/', NoSerializerView.as_view()),\n (r'^rest-api/paginated/', PaginatedDataView.as_view()),\n (r'^rest-api/none-paginated/', NonePaginatedDataView.as_view()),\n (r'^rest-api/parse-error/', ParseErrorView.as_view()),\n (r'^rest-api/url-rewrite/', UrlRewriteView.as_view()),\n (r'^rest-api/empty/', EmptyView.as_view()),\n)\n", "id": "12257650", "language": "Python", "matching_score": 2.9015233516693115, "max_stars_count": 8, "path": "testapp/tests/test_renderers.py" }, { "content": "from rest_framework.fields import SerializerMethodField\n\n\nclass LinkField(SerializerMethodField):\n def __init__(self, method_name, *args, **kwargs):\n self.method_name = method_name\n super(LinkField, self).__init__(method_name, *args, **kwargs)\n", "id": "10690466", "language": "Python", "matching_score": 0.4927225410938263, "max_stars_count": 8, "path": "rest_framework_cj/fields.py" }, { "content": "import os\nfrom setuptools import setup, find_packages\n\n\ndef read(*paths):\n with open(os.path.join(*paths), 'r') as f:\n return f.read()\n\nsetup(\n name='djangorestframework-collection-json',\n version='0.0.1_dev_2',\n description='Collection+JSON support for Django REST Framework',\n long_description=read('README.rst'),\n author='Advisory Board Company',\n author_email='<EMAIL>',\n url='https://github.com/advisory/django-rest-framework-collection-json',\n license='MIT',\n packages=find_packages(exclude=['tests*']),\n install_requires=['djangorestframework'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n include_package_data=True,\n test_suite='runtests.runtests.main',\n)\n", "id": "4883801", "language": "Python", "matching_score": 2.1361083984375, "max_stars_count": 8, "path": "setup.py" }, { "content": "#!/usr/bin/env python\n\"\"\"\nUseful tool to run the test suite and generate a coverage report.\nShamelessly adapted from https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/runtests/runcoverage.py\n\"\"\"\n\nimport os\nimport sys\n\n# fix sys path so we don't need to setup PYTHONPATH\n#sys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\nos.environ['DJANGO_SETTINGS_MODULE'] = 'testapp.tests.settings'\n\n\ndef main():\n import django\n if django.VERSION[0] >= 1 and django.VERSION[1] >= 7:\n django.setup()\n\n from django.conf import settings\n from django.test.utils import get_runner\n\n TestRunner = get_runner(settings)\n\n test_runner = TestRunner()\n failures = test_runner.run_tests(['testapp'])\n\n sys.exit(failures)\n\nif __name__ == '__main__':\n main()\n", "id": "292466", "language": "Python", "matching_score": 2.2241928577423096, "max_stars_count": 8, "path": "runtests/runtests.py" }, { "content": "import django\nfrom django.utils import unittest\n\n\nif django.VERSION[0] <= 1 and django.VERSION[1] <= 5:\n def suite():\n return unittest.TestLoader().discover(\"testapp\", pattern=\"test_*.py\")\n", "id": "1939399", "language": "Python", "matching_score": 1.6627219915390015, "max_stars_count": 8, "path": "testapp/tests/__init__.py" } ]
1.899415
Navops
[ { "content": "import os\nfrom json import JSONDecodeError\nfrom locust import HttpUser, SequentialTaskSet, task, between, events\nfrom locust_plugins.appinsights_listener import ApplicationInsights\n\n\nclass UserSequence(SequentialTaskSet):\n\n @task\n def get_sample(self):\n self.client.get(\"/api/QuickFunction\", name=\"Sample GET request\")\n\n # @task\n # def post_sample(self):\n # self.client.post(\"/api/QuickFunction\", json={\"type\":\"loadtest\", \"summary\":\"hi there - locust here\"}, name=\"Sample POST request\")\n\nclass WebsiteUser(HttpUser):\n tasks = [UserSequence]\n #wait_time = between(0.5, 2.5)\n\n# Init logger to ApplicationInsights\n@events.init.add_listener\ndef on_locust_init(environment, **_kwargs):\n ApplicationInsights(env=environment, instrumentation_key=os.environ[\"APPINSIGHTS_INSTRUMENTATIONKEY\"])\n", "id": "9105916", "language": "Python", "matching_score": 0, "max_stars_count": 5, "path": "src/testing/locustfile.py" } ]
0
nicolunardi
[ { "content": "import os\nfrom dotenv import load_dotenv, find_dotenv\n\nload_dotenv(find_dotenv())\n\norigins = [\"http://localhost:3000\", \"*\"]\n\n\nclass Settings:\n POSTGRES_USER: str = os.getenv(\"POSTGRES_USER\")\n POSTGRES_PASSWORD = os.getenv(\"POSTGRES_PASSWORD\")\n POSTGRES_SERVER: str = os.getenv(\"POSTGRES_SERVER\", \"localhost\")\n POSTGRES_PORT: str = os.getenv(\"POSTGRES_PORT\", \"5432\")\n POSTGRES_DB: str = os.getenv(\"POSTGRES_DB\")\n DATABASE_URL = f\"postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_SERVER}:{POSTGRES_PORT}/{POSTGRES_DB}\"\n\n\nsettings = Settings()\n", "id": "4582754", "language": "Python", "matching_score": 0.8320226073265076, "max_stars_count": 0, "path": "app/config/settings.py" }, { "content": "import os\nfrom dotenv import load_dotenv, find_dotenv\nfrom fastapi import Depends, HTTPException, status\nfrom sqlalchemy.orm import Session\nfrom jose import JWTError, jwt\nfrom fastapi.security import OAuth2PasswordBearer\nfrom app.dependencies.authentication import get_user_by_email\nfrom app.config.database import get_db\nfrom app.schemas.users import User\n\nfrom app.schemas.tokens import TokenPayload\nfrom app.errors.exceptions import CREDENTIALS_EXCEPTION\n\n\nload_dotenv(find_dotenv())\n\nJWT_SECRET_KEY: str = os.environ.get(\"JWT_SECRET_KEY\") or os.getenv(\n \"JWT_SECRET_KEY\"\n)\nJWT_ALGORITHM: str = os.environ.get(\"JWT_ALGORITHM\") or os.getenv(\n \"JWT_ALGORITHM\"\n)\n\noauth2_scheme = OAuth2PasswordBearer(tokenUrl=\"/user/auth/login\")\n\n\ndef create_access_token(data: dict):\n to_encode = data.copy()\n token = jwt.encode(to_encode, JWT_SECRET_KEY, algorithm=JWT_ALGORITHM)\n return token\n\n\nasync def get_current_user(\n token: str = Depends(oauth2_scheme), db: Session = Depends(get_db)\n):\n try:\n # get the teh user data from the token\n payload = jwt.decode(token, JWT_SECRET_KEY, algorithms=[JWT_ALGORITHM])\n # if the email doesnt exist reject the request\n if payload.get(\"email\") is None:\n raise CREDENTIALS_EXCEPTION\n # destructure the payload\n token_payload = TokenPayload(**payload)\n except JWTError:\n raise CREDENTIALS_EXCEPTION\n\n # get the user from the DB to ensure the one from the token is valid\n user = get_user_by_email(db, token_payload.email)\n if user is None:\n raise CREDENTIALS_EXCEPTION\n return User(email=user.email, id=user.id, name=user.name)\n\n\ndef fake_token(token: str):\n return TokenPayload(name=\"john\", email=\"<EMAIL>\", id=4)\n", "id": "11794103", "language": "Python", "matching_score": 3.0292246341705322, "max_stars_count": 0, "path": "app/dependencies/JWTtokens.py" }, { "content": "from fastapi import APIRouter, Depends, status\nfrom fastapi.security import OAuth2PasswordRequestForm\nfrom sqlalchemy.orm import Session\nfrom app.controllers.authControllers import login_user, register_user\nfrom app.schemas.users import UserCreate\nfrom app.schemas.tokens import Token\nfrom app.config.database import get_db\n\n\nrouter = APIRouter()\n\n\n@router.post(\n \"/register\",\n status_code=status.HTTP_201_CREATED,\n response_model=Token,\n tags=[\"User\"],\n)\nasync def register(user: UserCreate, db: Session = Depends(get_db)):\n return register_user(db, user)\n\n\n@router.post(\n \"/login\",\n status_code=status.HTTP_200_OK,\n response_model=Token,\n tags=[\"User\"],\n)\nasync def login(\n form_data: OAuth2PasswordRequestForm = Depends(),\n db: Session = Depends(get_db),\n):\n return login_user(form_data, db)\n", "id": "35018", "language": "Python", "matching_score": 1.6735448837280273, "max_stars_count": 0, "path": "app/routers/auth.py" }, { "content": "from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom app.routers import auth, listings, bookings\nfrom app.config.settings import origins\nfrom app.config.database import Base, engine, SessionLocal\n\nBase.metadata.create_all(bind=engine)\n\n\napp = FastAPI()\n\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\napp.include_router(auth.router, prefix=\"/user/auth\")\napp.include_router(listings.router, prefix=\"/listings\")\napp.include_router(bookings.router, prefix=\"/bookings\")\n", "id": "4637965", "language": "Python", "matching_score": 1.1501765251159668, "max_stars_count": 0, "path": "app/main.py" }, { "content": "from fastapi import APIRouter, Depends\nfrom sqlalchemy.orm import Session\nfrom app.dependencies.JWTtokens import get_current_user\nfrom app.schemas.bookings import (\n BookingOut,\n CreateBookingOut,\n CreateBookingIn,\n AcceptDeclineBookingOut,\n)\nfrom app.models.bookings import Booking as BookingModel\nfrom app.config.database import get_db\nfrom app.controllers import bookingsControllers\n\n\nrouter = APIRouter()\n\n\n@router.get(\"/\", tags=[\"Bookings\"], response_model=BookingOut)\nasync def get_all_bookings(\n db: Session = Depends(get_db), user=Depends(get_current_user)\n):\n return bookingsControllers.get_bookings(db)\n\n\n@router.post(\n \"/new/{listing_id}\", tags=[\"Bookings\"], response_model=CreateBookingOut\n)\nasync def create_booking(\n listing_id: int,\n data: CreateBookingIn,\n db: Session = Depends(get_db),\n curr_user=Depends(get_current_user),\n):\n return bookingsControllers.create_booking(listing_id, data, db, curr_user)\n\n\n@router.put(\n \"/accept/{booking_id}\",\n tags=[\"Bookings\"],\n response_model=AcceptDeclineBookingOut,\n)\nasync def accept_booking(\n booking_id: int,\n db: Session = Depends(get_db),\n curr_user=Depends(get_current_user),\n):\n return bookingsControllers.accept_booking(booking_id, db, curr_user)\n\n\n@router.put(\n \"/decline/{booking_id}\",\n tags=[\"Bookings\"],\n response_model=AcceptDeclineBookingOut,\n)\nasync def decline_booking(\n booking_id: int,\n db: Session = Depends(get_db),\n curr_user=Depends(get_current_user),\n):\n return bookingsControllers.decline_booking(booking_id, db, curr_user)\n\n\n@router.delete(\n \"/{booking_id}\",\n tags=[\"Bookings\"],\n response_model=AcceptDeclineBookingOut,\n)\nasync def delete_booking(\n booking_id: int,\n db: Session = Depends(get_db),\n curr_user=Depends(get_current_user),\n):\n return bookingsControllers.delete_booking(booking_id, db, curr_user)\n", "id": "12538925", "language": "Python", "matching_score": 4.192615985870361, "max_stars_count": 0, "path": "app/routers/bookings.py" }, { "content": "from fastapi import APIRouter, Depends\nfrom sqlalchemy.orm import Session\n\nfrom app.models.listings import Listing as ListingModel\nfrom app.models.reviews import Review as ReviewModel\nfrom app.models.images import Image as ImageModel\nfrom app.models.bedroom import Bedroom as BedroomModel\nfrom app.models.availability import Availability as AvailabilityModel\n\nfrom app.schemas.listings import (\n CreateListing,\n AllListingsOut,\n ListingOut,\n CreateListingOut,\n)\nfrom app.schemas.reviews import ReviewIn, ReviewOut\nfrom app.schemas.availability import AvailabilityIn\nfrom app.schemas.users import User\nfrom app.controllers import listingsControllers\nfrom app.config.database import get_db\nfrom app.dependencies.JWTtokens import get_current_user\n\nrouter = APIRouter()\n\n\n@router.get(\"/\", tags=[\"Listings\"], response_model=AllListingsOut)\nasync def get_all_listings(db: Session = Depends(get_db)):\n return listingsControllers.get_listings_list(db)\n\n\n@router.post(\"/new\", tags=[\"Listings\"], response_model=CreateListingOut)\nasync def create_listing(\n data: CreateListing,\n db: Session = Depends(get_db),\n current_user: User = Depends(get_current_user),\n):\n listing_id = listingsControllers.create_new_listing(data, current_user, db)\n return CreateListingOut(listing_id=listing_id)\n\n\n@router.get(\"/{listing_id}\", tags=[\"Listings\"], response_model=ListingOut)\nasync def get_listing(listing_id: int, db: Session = Depends(get_db)):\n return listingsControllers.get_listing(listing_id, db)\n\n\n@router.put(\"/{listing_id}\", tags=[\"Listings\"])\nasync def update_listing(\n data: CreateListing,\n listing_id: int,\n db: Session = Depends(get_db),\n current_user: User = Depends(get_current_user),\n):\n return listingsControllers.update_listing(\n listing_id, data, db, current_user\n )\n\n\n@router.delete(\"/{listing_id}\", tags=[\"Listings\"])\nasync def delete_listing(\n listing_id: int,\n db: Session = Depends(get_db),\n current_user: User = Depends(get_current_user),\n):\n return listingsControllers.delete_listing(listing_id, db, current_user)\n\n\n@router.put(\"/publish/{listing_id}\", tags=[\"Listings\"])\nasync def publish_listing(\n listing_id: int,\n data: AvailabilityIn,\n db: Session = Depends(get_db),\n current_user: User = Depends(get_current_user),\n):\n return listingsControllers.publish_listing(\n listing_id, data, db, current_user\n )\n\n\n@router.put(\"/unpublish/{listing_id}\", tags=[\"Listings\"])\nasync def unpublish_listing(\n listing_id: int,\n db: Session = Depends(get_db),\n current_user: User = Depends(get_current_user),\n):\n return listingsControllers.unpublish_listing(listing_id, db, current_user)\n\n\n@router.post(\n \"/{listing_id}/review/{booking_id}\",\n tags=[\"Listings\"],\n response_model=ReviewOut,\n)\nasync def review_listing(\n listing_id: str,\n booking_id: str,\n data: ReviewIn,\n db: Session = Depends(get_db),\n current_user: User = Depends(get_current_user),\n):\n return listingsControllers.post_review(\n listing_id, booking_id, data, db, current_user\n )\n", "id": "5107217", "language": "Python", "matching_score": 3.615508794784546, "max_stars_count": 0, "path": "app/routers/listings.py" }, { "content": "from sqlalchemy.orm import Session\nfrom app.errors.exceptions import (\n USER_NOT_OWNER_EXCEPTION,\n BOOKING_WITH_LISTING_AND_USER_EXCEPTION,\n)\nfrom app.models.listings import Listing as ListingModel\nfrom app.models.reviews import Review as ReviewModel\nfrom app.schemas.users import User\nfrom app.schemas.listings import CreateListing\nfrom app.schemas.availability import AvailabilityIn\nfrom app.schemas.reviews import ReviewIn, ReviewOut\nfrom app.dependencies.listings import (\n create_beds,\n create_images,\n create_all_listing_dict,\n create_listing_dict,\n update_beds,\n update_images,\n listing_belongs_to_user,\n get_listing_by_id,\n create_availabilities,\n find_booking_by_listing_user,\n)\n\n\ndef get_listings_list(db: Session):\n db_listings = db.query(ListingModel).all()\n listings = []\n for listing in db_listings:\n listings.append(create_all_listing_dict(listing))\n return {\"listings\": listings}\n\n\ndef create_new_listing(data: CreateListing, current_user: User, db: Session):\n # for listing model\n new_listing_data = {\n \"thumbnail\": data.thumbnail, # done\n \"title\": data.title, # done\n \"price\": data.price, # done\n \"description\": data.description,\n \"type\": data.type, # done\n \"owner_id\": current_user.id, # done\n \"street_number\": data.address.street_number, # done\n \"street_name\": data.address.street_name, # done\n \"suburb\": data.address.suburb, # done\n \"post_code\": data.address.post_code,\n \"state\": data.address.state, # done\n \"country\": data.address.country, # done\n \"bathrooms\": data.bathrooms, # done\n \"parking\": data.parking, # done\n \"total_bedrooms\": data.total_bedrooms, # done\n \"total_beds\": data.total_beds, # done\n \"wifi\": data.amenities.wifi,\n \"aircon\": data.amenities.aircon,\n \"kitchen\": data.amenities.kitchen,\n \"tv\": data.amenities.tv,\n \"heating\": data.amenities.heating,\n \"fridge\": data.amenities.fridge,\n \"microwave\": data.amenities.microwave,\n \"pool\": data.amenities.pool,\n }\n new_listing = ListingModel(**new_listing_data)\n db.add(new_listing)\n db.commit()\n create_beds(data.bedrooms, db, new_listing.id)\n create_images(data.images, db, new_listing.id)\n return new_listing.listing_id\n\n\ndef get_listing(listing_id: int, db: Session):\n db_listing: ListingModel = get_listing_by_id(listing_id, db)\n\n return create_listing_dict(db_listing)\n\n\ndef update_listing(\n listing_id: int, data: CreateListing, db: Session, current_user: User\n):\n db_listing: ListingModel = get_listing_by_id(listing_id, db)\n\n # ensure the current user is the owner of the listing that is to be edited\n if not listing_belongs_to_user(db_listing, current_user):\n raise USER_NOT_OWNER_EXCEPTION\n\n # format the data for the listing model\n update_listing_data = {\n \"thumbnail\": data.thumbnail, # done\n \"title\": data.title, # done\n \"price\": data.price, # done\n \"description\": data.description,\n \"type\": data.type, # done\n \"owner_id\": current_user.id, # done\n \"street_number\": data.address.street_number, # done\n \"street_name\": data.address.street_name, # done\n \"suburb\": data.address.suburb, # done\n \"post_code\": data.address.post_code,\n \"state\": data.address.state, # done\n \"country\": data.address.country, # done\n \"bathrooms\": data.bathrooms, # done\n \"parking\": data.parking, # done\n \"total_bedrooms\": data.total_bedrooms, # done\n \"total_beds\": data.total_beds, # done\n \"wifi\": data.amenities.wifi,\n \"aircon\": data.amenities.aircon,\n \"kitchen\": data.amenities.kitchen,\n \"tv\": data.amenities.tv,\n \"heating\": data.amenities.heating,\n \"fridge\": data.amenities.fridge,\n \"microwave\": data.amenities.microwave,\n \"pool\": data.amenities.pool,\n }\n # update the values of the db listing\n for key, value in update_listing_data.items():\n setattr(db_listing, key, value)\n\n # update bedrooms\n update_beds(data.bedrooms, db, listing_id)\n\n # update images\n update_images(data.images, db, listing_id)\n\n db.commit()\n\n return {}\n\n\ndef delete_listing(listing_id: int, db: Session, current_user: User):\n db_listing: ListingModel = get_listing_by_id(listing_id, db)\n\n # ensure the current user is the owner of the listing that is to be edited\n if not listing_belongs_to_user(db_listing, current_user):\n raise USER_NOT_OWNER_EXCEPTION\n\n db.delete(db_listing)\n db.commit()\n return {}\n\n\ndef publish_listing(\n listing_id: int, data: AvailabilityIn, db: Session, current_user: User\n):\n db_listing: ListingModel = get_listing_by_id(listing_id, db)\n\n if not listing_belongs_to_user(db_listing, current_user):\n raise USER_NOT_OWNER_EXCEPTION\n\n create_availabilities(listing_id, data, db)\n\n db_listing.published = True\n\n db.commit()\n\n return {}\n\n\ndef unpublish_listing(listing_id: int, db: Session, current_user: User):\n db_listing: ListingModel = get_listing_by_id(listing_id, db)\n\n if not listing_belongs_to_user(db_listing, current_user):\n raise USER_NOT_OWNER_EXCEPTION\n\n db_listing.published = False\n\n db.commit()\n\n return {}\n\n\n# booking id is needed only to make sure that a valid booking exists, as users can\n# only post reviews after they have a valid booking\ndef post_review(\n listing_id: int,\n booking_id: int,\n data: ReviewIn,\n db: Session,\n current_user: User,\n):\n # check a booking exists with user id and listing id and booking id\n db_booking = find_booking_by_listing_user(\n booking_id, listing_id, current_user, db\n )\n if not db_booking:\n raise BOOKING_WITH_LISTING_AND_USER_EXCEPTION\n new_review = ReviewModel(\n text=data.review.text,\n rating=data.review.rating,\n listing_id=listing_id,\n owner_id=current_user.id,\n )\n db.add(new_review)\n db.commit()\n return ReviewOut(\n id=new_review.id,\n text=new_review.text,\n rating=new_review.rating,\n listing_id=new_review.listing_id,\n owner_id=new_review.owner_id,\n owner_name=new_review.owner.name,\n )\n", "id": "7504282", "language": "Python", "matching_score": 5.651500225067139, "max_stars_count": 0, "path": "app/controllers/listingsControllers.py" }, { "content": "from sqlalchemy.orm import Session\nfrom app.schemas.availability import AvailabilityIn\nfrom app.models.bedroom import Bedroom\nfrom app.models.images import Image\nfrom app.models.listings import Listing\nfrom app.models.users import User\nfrom app.models.availability import Availability\nfrom app.models.bookings import Booking\nfrom app.errors.exceptions import LISTING_NOT_FOUND_EXCEPTION\n\n\ndef create_beds(bedrooms: list[int], db: Session, listing_id: int):\n for bedroom in bedrooms:\n new_bed = Bedroom(listing_id=listing_id, beds=bedroom.beds)\n db.add(new_bed)\n\n db.commit()\n\n\ndef update_beds(bedrooms: list[int], db: Session, listing_id: int):\n # first delete all bedrooms associated with a listing\n db.query(Bedroom).filter_by(listing_id=listing_id).delete()\n # add new bedrooms\n create_beds(bedrooms, db, listing_id)\n\n\ndef create_images(images: list[str], db: Session, listing_id: int):\n for image in images:\n if isinstance(image, str):\n new_image = Image(listing_id=listing_id, image=image)\n else:\n new_image = Image(listing_id=listing_id, image=image[\"image\"])\n db.add(new_image)\n\n db.commit()\n\n\ndef update_images(images: list[str], db: Session, listing_id: int):\n # first delete the images associated with the listing\n db.query(Image).filter_by(listing_id=listing_id).delete()\n # then add the new images to the db\n create_images(images, db, listing_id)\n\n\ndef get_address_dict(listing: Listing):\n address = {\n \"street_number\": listing.street_number,\n \"street_name\": listing.street_name,\n \"suburb\": listing.suburb,\n \"post_code\": listing.post_code,\n \"state\": listing.state,\n \"country\": listing.country,\n }\n return address\n\n\ndef get_amenities_dict(listing: Listing):\n amenities = {\n \"wifi\": listing.wifi,\n \"aircon\": listing.aircon,\n \"kitchen\": listing.kitchen,\n \"tv\": listing.tv,\n \"heating\": listing.heating,\n \"fridge\": listing.fridge,\n \"microwave\": listing.microwave,\n \"pool\": listing.pool,\n }\n return amenities\n\n\ndef get_metadata_dict(listing: Listing):\n metadata = {\n \"total_bedrooms\": listing.total_bedrooms,\n \"total_beds\": listing.total_beds,\n \"type\": listing.type,\n \"description\": listing.description,\n \"bathrooms\": listing.bathrooms,\n \"parking\": listing.parking,\n \"images\": listing.images,\n \"amenities\": get_amenities_dict(listing),\n \"bedrooms\": listing.bedrooms,\n }\n return metadata\n\n\ndef create_all_listing_dict(listing: Listing):\n # add the owner name to the review object that will be returned. Mainly\n # to allow the front end to display the users name easily\n for review in listing.reviews:\n review.owner_name = review.owner.name\n\n listing_dict = {\n \"thumbnail\": listing.thumbnail,\n \"price\": listing.price,\n \"title\": listing.title,\n \"address\": get_address_dict(listing),\n \"id\": listing.id,\n \"owner_id\": listing.owner_id,\n \"reviews\": listing.reviews,\n }\n\n return listing_dict\n\n\ndef create_listing_dict(listing: Listing):\n listing_dict = {\n **create_all_listing_dict(listing),\n \"availability\": listing.availability,\n \"published\": listing.published,\n \"posted_on\": listing.posted_on,\n \"metadata\": get_metadata_dict(listing),\n \"owner_name\": listing.owner.name,\n }\n return listing_dict\n\n\ndef listing_belongs_to_user(listing: Listing, user: User):\n return user.id == listing.owner_id\n\n\ndef get_listing_by_id(listing_id: int, db: Session):\n db_listing: Listing = (\n db.query(Listing).filter(Listing.id == listing_id).first()\n )\n if not db_listing:\n raise LISTING_NOT_FOUND_EXCEPTION\n\n return db_listing\n\n\ndef create_availabilities(listing_id: int, data: AvailabilityIn, db: Session):\n for availability in data.availability:\n new_availability = Availability(\n start=availability.start,\n end=availability.end,\n listing_id=listing_id,\n )\n db.add(new_availability)\n\n db.commit()\n\n\ndef find_booking_by_listing_user(\n booking_id: int, listing_id: int, user: User, db: Session\n):\n return db.query(Booking).filter_by(\n id=booking_id, listing_id=listing_id, owner_id=user.id\n )\n", "id": "4493227", "language": "Python", "matching_score": 4.312009334564209, "max_stars_count": 0, "path": "app/dependencies/listings.py" }, { "content": "from pydantic import BaseModel\nfrom datetime import date\n\nfrom .reviews import Review\nfrom .availability import Availability\n\n\nclass Address(BaseModel):\n street_number: int\n street_name: str\n suburb: str\n post_code: int\n state: str\n country: str\n\n\nclass Amenities(BaseModel):\n wifi: bool = False\n aircon: bool = False\n kitchen: bool = False\n tv: bool = False\n heating: bool = False\n fridge: bool = False\n microwave: bool = False\n pool: bool = False\n\n\nclass Image(BaseModel):\n image: str = \"\"\n\n\nclass Bedroom(BaseModel):\n beds: int = 0\n\n\nclass CreateBedroom(Bedroom):\n pass\n\n\nclass ListingMetadata(BaseModel):\n total_bedrooms: int = 0\n total_beds: int = 0\n type: str\n description: str = \"\"\n bathrooms: int = 0\n parking: int = 0\n images: list = []\n amenities: Amenities\n bedrooms: list\n\n\nclass ListingBase(BaseModel):\n thumbnail: str = \"\"\n price: float\n title: str\n address: Address\n\n\nclass AllListings(ListingBase):\n id: int\n owner_id: int\n reviews: list\n\n class Config:\n orm_mode = True\n\n\nclass AllListingsOut(BaseModel):\n listings: list[AllListings]\n\n\nclass CreateListing(ListingBase):\n total_bedrooms: int = 0\n total_beds: int = 0\n type: str\n description: str = \"\"\n bathrooms: int = 0\n parking: int = 0\n images: list = []\n amenities: Amenities\n bedrooms: list[CreateBedroom] = []\n\n\nclass CreateListingOut(BaseModel):\n listing_id: int\n\n\nclass ListingOut(ListingBase):\n id: int\n owner_id: int\n reviews: list\n availability: list\n published: bool\n posted_on: date\n metadata: ListingMetadata\n owner_name: str\n", "id": "104555", "language": "Python", "matching_score": 2.938755750656128, "max_stars_count": 0, "path": "app/schemas/listings.py" }, { "content": "from pydantic import BaseModel\n\n\nclass Review(BaseModel):\n id: int\n text: str\n rating: float\n listing_id: int\n owner_id: int\n\n\nclass CreateReview(BaseModel):\n rating: int\n text: str\n\n\nclass ReviewIn(BaseModel):\n review: CreateReview\n\n\nclass ReviewOut(Review):\n owner_name: str\n", "id": "11961298", "language": "Python", "matching_score": 0.03398703411221504, "max_stars_count": 0, "path": "app/schemas/reviews.py" }, { "content": "import bcrypt\nfrom sqlalchemy.orm import Session\nfrom email_validator import validate_email\nfrom app.models.users import User as UserModel\n\n# creates a hash of the password\ndef get_password_hash(password: str):\n salt = bcrypt.gensalt()\n return bcrypt.hashpw(password.encode(\"utf-8\"), salt)\n\n\n# checks if the password provided matches the one in the db\ndef verify_password(password, hashed_password):\n return bcrypt.checkpw(\n password.encode(\"utf-8\"), hashed_password\n )\n\n\ndef get_user_by_email(db: Session, email: str):\n return db.query(UserModel).filter(UserModel.email == email).first()\n\n\ndef check_valid_email(email: str) -> str:\n valid = validate_email(email)\n # Update with the normalized form.\n return valid.email\n", "id": "5274117", "language": "Python", "matching_score": 2.1219279766082764, "max_stars_count": 0, "path": "app/dependencies/authentication.py" }, { "content": "from fastapi import HTTPException, status, Depends\nfrom email_validator import EmailNotValidError\nfrom sqlalchemy.orm import Session\nfrom app.schemas.tokens import Token\nfrom app.models.users import User as UserModel\nfrom app.schemas.users import UserCreate, UserLogin\nfrom app.dependencies.authentication import (\n get_password_hash,\n check_valid_email,\n get_user_by_email,\n verify_password,\n)\nfrom app.dependencies.JWTtokens import create_access_token\nfrom app.config.database import get_db\n\n\ndef create_user(db: Session, user: UserCreate):\n # check the email address isn't already in use\n db_user = get_user_by_email(db, user.email)\n if db_user:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Email already registered.\",\n )\n\n # ensure the email address is valid\n try:\n email = check_valid_email(user.email)\n except EmailNotValidError as e:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=str(e),\n )\n\n hashed_password = get_password_hash(user.password)\n\n # create the user\n new_user = UserModel(\n email=user.email, name=user.name, hashed_password=hashed_password\n )\n # add the user to the db\n db.add(new_user)\n db.commit()\n db.refresh(new_user)\n return new_user\n\n\ndef register_user(db: Session, user: UserCreate):\n new_user = create_user(db, user)\n # if the user was created without problems, generate the jwt token\n if new_user:\n token = create_access_token(\n data={\n \"email\": new_user.email,\n \"name\": new_user.name,\n \"id\": new_user.id,\n }\n )\n return Token(access_token=token, token_type=\"bearer\")\n else:\n raise HTTPException(\n status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=\"Something went wrong.\",\n )\n\n\ndef login_user(form_data: UserLogin, db):\n # check if the user exists in the db\n curr_user = get_user_by_email(db, form_data.username)\n if not curr_user:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"No user exists with that email address.\",\n )\n # check if the passwords match\n if not verify_password(form_data.password, curr_user.hashed_password):\n raise HTTPException(\n status_code=status.HTTP_403_FORBIDDEN,\n detail=\"Incorrect password.\",\n )\n token = create_access_token(\n data={\n \"email\": curr_user.email,\n \"name\": curr_user.name,\n \"id\": curr_user.id,\n }\n )\n return Token(access_token=token, token_type=\"bearer\")\n", "id": "6632231", "language": "Python", "matching_score": 4.223599433898926, "max_stars_count": 0, "path": "app/controllers/authControllers.py" }, { "content": "from fastapi import HTTPException, status\n\n\nCREDENTIALS_EXCEPTION = HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Could not validate credentials.\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n)\n\n# DATABASE ERRORS\n\nLISTING_NOT_FOUND_EXCEPTION = HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"No listing found with that id.\",\n)\n\nBOOKING_NOT_FOUND_EXCEPTION = HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"No booking found with that id.\",\n)\n\nUSER_NOT_OWNER_EXCEPTION = HTTPException(\n status_code=status.HTTP_403_FORBIDDEN,\n detail=\"You do not have permission to modify this listing.\",\n)\n\nBOOKING_WITH_LISTING_AND_USER_EXCEPTION = HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"No booking found for this listing with the given user id\",\n)\n", "id": "2402871", "language": "Python", "matching_score": 2.0168581008911133, "max_stars_count": 0, "path": "app/errors/exceptions.py" }, { "content": "from sqlalchemy.orm import Session\nfrom app.errors.exceptions import BOOKING_NOT_FOUND_EXCEPTION\nfrom app.models.bookings import Booking\n\n\ndef get_booking_by_id(booking_id: int, db: Session):\n db_booking: Booking = (\n db.query(Booking).filter(Booking.id == booking_id).first()\n )\n if not db_booking:\n raise BOOKING_NOT_FOUND_EXCEPTION\n\n return db_booking\n", "id": "92805", "language": "Python", "matching_score": 1.5182414054870605, "max_stars_count": 0, "path": "app/dependencies/bookings.py" }, { "content": "from sqlalchemy.orm import Session\nfrom app.errors.exceptions import USER_NOT_OWNER_EXCEPTION\nfrom app.dependencies.listings import get_listing_by_id, listing_belongs_to_user\nfrom app.dependencies.bookings import get_booking_by_id\nfrom app.schemas.bookings import (\n BookingOut,\n CreateBookingIn,\n CreateBookingOut,\n Booking,\n)\nfrom app.models.bookings import Booking as BookingModel\nfrom app.schemas.users import User\n\n\ndef get_bookings(db: Session):\n db_bookings = db.query(BookingModel).all()\n all_bookings = []\n for booking in db_bookings:\n all_bookings.append(\n Booking(\n id=booking.id,\n start=booking.start,\n end=booking.end,\n total=booking.total,\n listing_id=booking.listing_id,\n owner_id=booking.owner_id,\n status=booking.status,\n )\n )\n return BookingOut(bookings=all_bookings)\n\n\ndef create_booking(\n listing_id: int, data: CreateBookingIn, db: Session, curr_user: User\n):\n new_booking = BookingModel(\n status=\"pending\",\n start=data.date_range.start,\n end=data.date_range.end,\n listing_id=listing_id,\n owner_id=curr_user.id,\n total=data.total,\n )\n db.add(new_booking)\n db.commit()\n return CreateBookingOut(id=new_booking.id)\n\n\ndef accept_booking(booking_id: int, db: Session, curr_user: User):\n db_booking = get_booking_by_id(booking_id, db)\n # get the listing from the db to verify the curr_user is the owner\n # of the listing\n db_listing = get_listing_by_id(db_booking.listing_id, db)\n if not listing_belongs_to_user(db_listing, curr_user):\n raise USER_NOT_OWNER_EXCEPTION\n\n db_booking.status = \"accepted\"\n db.commit()\n return {}\n\n\ndef decline_booking(booking_id: int, db: Session, curr_user: User):\n db_booking = get_booking_by_id(booking_id, db)\n # get the listing from the db to verify the curr_user is the owner\n # of the listing\n db_listing = get_listing_by_id(db_booking.listing_id, db)\n if not listing_belongs_to_user(db_listing, curr_user):\n raise USER_NOT_OWNER_EXCEPTION\n\n db_booking.status = \"declined\"\n db.commit()\n return {}\n\n\ndef delete_booking(booking_id: int, db: Session, curr_user: User):\n db_booking = get_booking_by_id(booking_id, db)\n\n # get the listing from the db to verify the curr_user is the owner\n # of the listing\n db_listing = get_listing_by_id(db_booking.listing_id, db)\n if not listing_belongs_to_user(db_listing, curr_user):\n raise USER_NOT_OWNER_EXCEPTION\n\n db.delete(db_booking)\n db.commit()\n return {}\n", "id": "11120779", "language": "Python", "matching_score": 3.1236894130706787, "max_stars_count": 0, "path": "app/controllers/bookingsControllers.py" }, { "content": "from datetime import date\nfrom pydantic import BaseModel\n\n\nclass DateRange(BaseModel):\n start: date\n end: date\n\n\nclass Booking(BaseModel):\n id: int\n start: date\n end: date\n total: int\n listing_id: int\n owner_id: int\n status: str\n\n\nclass CreateBookingIn(BaseModel):\n date_range: DateRange\n total: int\n\n\nclass CreateBookingOut(BaseModel):\n id: int\n\n\nclass BookingOut(BaseModel):\n bookings: list[Booking]\n\n\nclass AcceptDeclineBookingOut(BaseModel):\n pass\n", "id": "6478816", "language": "Python", "matching_score": 2.1905908584594727, "max_stars_count": 0, "path": "app/schemas/bookings.py" }, { "content": "from datetime import date\nfrom pydantic import BaseModel\n\n\nclass Availability(BaseModel):\n start: date\n end: date\n\n\nclass AvailabilityIn(BaseModel):\n availability: list[Availability]\n", "id": "11048416", "language": "Python", "matching_score": 0.650002121925354, "max_stars_count": 0, "path": "app/schemas/availability.py" }, { "content": "from datetime import date\nfrom sqlalchemy import Column, Float, ForeignKey, Integer, String, Date\nfrom sqlalchemy.orm import relationship\nfrom app.config.database import Base\n\n\nclass Booking(Base):\n __tablename__ = \"bookings\"\n\n id = Column(Integer, primary_key=True, index=True)\n status = Column(String(15), nullable=False)\n start = Column(Date, default=date.today)\n end = Column(Date, default=date.today)\n total = Column(Integer, nullable=False)\n listing_id = Column(Integer, ForeignKey(\"listings.id\"))\n owner_id = Column(Integer, ForeignKey(\"users.id\"))\n\n listing = relationship(\"Listing\", back_populates=\"bookings\")\n owner = relationship(\"User\")\n", "id": "5297456", "language": "Python", "matching_score": 4.395289421081543, "max_stars_count": 0, "path": "app/models/bookings.py" }, { "content": "from datetime import date\nfrom sqlalchemy import Column, Date, ForeignKey, Integer\nfrom sqlalchemy.orm import relationship\nfrom app.config.database import Base\n\n\nclass Availability(Base):\n __tablename__ = \"availability\"\n\n id = Column(Integer, primary_key=True, index=True)\n start = Column(Date, default=date.today)\n end = Column(Date, default=date.today)\n listing_id = Column(Integer, ForeignKey(\"listings.id\"))\n\n listing = relationship(\"Listing\", back_populates=\"availability\")\n", "id": "4053097", "language": "Python", "matching_score": 2.7142374515533447, "max_stars_count": 0, "path": "app/models/availability.py" }, { "content": "from sqlalchemy import Column, Integer, String, LargeBinary\nfrom sqlalchemy.orm import relationship\nfrom app.config.database import Base\n\n\nclass User(Base):\n __tablename__ = \"users\"\n\n id = Column(Integer, primary_key=True, index=True)\n email = Column(String(50), unique=True, index=True, nullable=False)\n name = Column(String(20), nullable=False)\n hashed_password = Column(LargeBinary, nullable=False)\n\n listings = relationship(\n \"Listing\", back_populates=\"owner\", cascade=\"all, delete\"\n )\n", "id": "2255675", "language": "Python", "matching_score": 3.5183677673339844, "max_stars_count": 0, "path": "app/models/users.py" }, { "content": "from datetime import date\nfrom sqlalchemy import (\n Boolean,\n Column,\n Date,\n Float,\n ForeignKey,\n Integer,\n String,\n Text,\n)\nfrom sqlalchemy.orm import relationship\nfrom app.config.database import Base\n\n\nclass Listing(Base):\n __tablename__ = \"listings\"\n\n id = Column(Integer, primary_key=True, index=True)\n thumbnail = Column(Text)\n title = Column(String(50), nullable=False)\n price = Column(Float, nullable=False)\n posted_on = Column(Date, default=date.today)\n published = Column(Boolean, default=False)\n owner_id = Column(Integer, ForeignKey(\"users.id\"))\n street_number = Column(Integer, nullable=False)\n street_name = Column(String(50), nullable=False)\n suburb = Column(String(30), nullable=False)\n post_code = Column(Integer, nullable=False)\n state = Column(String(30))\n country = Column(String(30), nullable=False)\n description = Column(Text)\n type = Column(String(20))\n total_bedrooms = Column(Integer, default=0)\n total_beds = Column(Integer, default=0)\n bathrooms = Column(Integer, default=0)\n parking = Column(Integer, default=0)\n wifi = Column(Boolean, default=False)\n aircon = Column(Boolean, default=False)\n kitchen = Column(Boolean, default=False)\n tv = Column(Boolean, default=False)\n heating = Column(Boolean, default=False)\n fridge = Column(Boolean, default=False)\n microwave = Column(Boolean, default=False)\n pool = Column(Boolean, default=False)\n\n owner = relationship(\"User\", back_populates=\"listings\")\n reviews = relationship(\n \"Review\", back_populates=\"listing\", cascade=\"all, delete\"\n )\n bookings = relationship(\n \"Booking\", back_populates=\"listing\", cascade=\"all, delete\"\n )\n availability = relationship(\n \"Availability\", back_populates=\"listing\", cascade=\"all, delete\"\n )\n bedrooms = relationship(\"Bedroom\", cascade=\"all, delete\")\n images = relationship(\"Image\", cascade=\"all, delete\")\n\n @property\n def listing_id(self):\n return self.id\n", "id": "8767121", "language": "Python", "matching_score": 4.808669567108154, "max_stars_count": 0, "path": "app/models/listings.py" }, { "content": "from sqlalchemy import Column, ForeignKey, Integer, Text\nfrom sqlalchemy.orm import relationship\nfrom app.config.database import Base\n\n\nclass Review(Base):\n __tablename__ = \"reviews\"\n\n id = Column(Integer, primary_key=True, index=True)\n text = Column(Text, index=True)\n rating = Column(Integer, default=3)\n listing_id = Column(Integer, ForeignKey(\"listings.id\"))\n owner_id = Column(Integer, ForeignKey(\"users.id\"))\n\n listing = relationship(\"Listing\", back_populates=\"reviews\")\n owner = relationship(\"User\")\n", "id": "3591014", "language": "Python", "matching_score": 3.256659746170044, "max_stars_count": 0, "path": "app/models/reviews.py" }, { "content": "from sqlalchemy import Column, Float, ForeignKey, Integer, Text\nfrom sqlalchemy.orm import relationship\nfrom app.config.database import Base\n\n\nclass Bedroom(Base):\n __tablename__ = \"bedrooms\"\n\n id = Column(Integer, primary_key=True, index=True)\n listing_id = Column(Integer, ForeignKey(\"listings.id\"))\n beds = Column(Integer, default=0)\n\n def __repr__(self) -> str:\n return f\"Bedroom: id = {self.id}, beds = {self.beds}, listing = {self.listing_id}\"\n", "id": "3148907", "language": "Python", "matching_score": 4.226668834686279, "max_stars_count": 0, "path": "app/models/bedroom.py" }, { "content": "from sqlalchemy import Column, ForeignKey, Integer, Text\nfrom app.config.database import Base\n\n\nclass Image(Base):\n __tablename__ = \"images\"\n\n id = Column(Integer, primary_key=True, index=True)\n image = Column(Text)\n listing_id = Column(Integer, ForeignKey(\"listings.id\"))\n\n def __repr__(self) -> str:\n return f\"Image: id = {self.id}, listing = {self.listing_id}\"\n", "id": "11417325", "language": "Python", "matching_score": 3.478743076324463, "max_stars_count": 0, "path": "app/models/images.py" } ]
3.076457
xwang0929
[ { "content": "import os\n\nfrom cement.core.controller import CementBaseController, expose\nfrom wo.core.logging import Log\nfrom wo.core.services import WOService\nfrom wo.core.variables import WOVar\n\n\nclass WOStackStatusController(CementBaseController):\n class Meta:\n label = 'stack_services'\n stacked_on = 'stack'\n stacked_type = 'embedded'\n description = 'Check the stack status'\n\n @expose(help=\"Start stack services\")\n def start(self):\n \"\"\"Start services\"\"\"\n services = []\n wo_system = \"/lib/systemd/system/\"\n pargs = self.app.pargs\n if not (pargs.nginx or pargs.php or\n pargs.php73 or\n pargs.mysql or\n pargs.redis or\n pargs.fail2ban or\n pargs.proftpd or\n pargs.netdata):\n pargs.nginx = True\n pargs.php = True\n pargs.mysql = True\n pargs.fail2ban = True\n pargs.netdata = True\n\n if pargs.nginx:\n if os.path.exists('{0}'.format(wo_system) + 'nginx.service'):\n services = services + ['nginx']\n else:\n Log.info(self, \"Nginx is not installed\")\n\n if pargs.php:\n if os.path.exists('{0}'.format(wo_system) + 'php7.2-fpm.service'):\n services = services + ['php7.2-fpm']\n else:\n Log.info(self, \"PHP7.2-FPM is not installed\")\n if os.path.exists('{0}'.format(wo_system) + 'php7.3-fpm.service'):\n services = services + ['php7.3-fpm']\n else:\n Log.info(self, \"PHP7.3-FPM is not installed\")\n\n if pargs.php73:\n if os.path.exists('{0}'.format(wo_system) + 'php7.3-fpm.service'):\n services = services + ['php7.3-fpm']\n else:\n Log.info(self, \"PHP7.3-FPM is not installed\")\n\n if pargs.mysql:\n if ((WOVar.wo_mysql_host == \"localhost\") or\n (WOVar.wo_mysql_host == \"127.0.0.1\")):\n if os.path.exists('/etc/systemd/system/mysql.service'):\n services = services + ['mysql']\n else:\n Log.info(self, \"MySQL is not installed\")\n else:\n Log.warn(self, \"Remote MySQL found, \"\n \"Unable to check MySQL service status\")\n\n if pargs.redis:\n if os.path.exists('{0}'.format(wo_system) +\n 'redis-server.service'):\n services = services + ['redis-server']\n else:\n Log.info(self, \"Redis server is not installed\")\n\n if pargs.fail2ban:\n if os.path.exists('{0}'.format(wo_system) + 'fail2ban.service'):\n services = services + ['fail2ban']\n else:\n Log.info(self, \"fail2ban is not installed\")\n\n # proftpd\n if pargs.proftpd:\n if os.path.exists('/etc/init.d/proftpd'):\n services = services + ['proftpd']\n else:\n Log.info(self, \"ProFTPd is not installed\")\n\n # netdata\n if pargs.netdata:\n if os.path.exists('{0}'.format(wo_system) + 'netdata.service'):\n services = services + ['netdata']\n else:\n Log.info(self, \"Netdata is not installed\")\n\n for service in services:\n Log.debug(self, \"Starting service: {0}\".format(service))\n WOService.start_service(self, service)\n\n @expose(help=\"Stop stack services\")\n def stop(self):\n \"\"\"Stop services\"\"\"\n services = []\n wo_system = \"/lib/systemd/system/\"\n pargs = self.app.pargs\n if not (pargs.nginx or pargs.php or\n pargs.php73 or\n pargs.mysql or\n pargs.fail2ban or\n pargs.netdata or\n pargs.proftpd or\n pargs.redis):\n pargs.nginx = True\n pargs.php = True\n pargs.mysql = True\n\n if pargs.nginx:\n if os.path.exists('{0}'.format(wo_system) + 'nginx.service'):\n services = services + ['nginx']\n else:\n Log.info(self, \"Nginx is not installed\")\n\n if pargs.php:\n if os.path.exists('{0}'.format(wo_system) + 'php7.2-fpm.service'):\n services = services + ['php7.2-fpm']\n else:\n Log.info(self, \"PHP7.2-FPM is not installed\")\n if os.path.exists('{0}'.format(wo_system) + 'php7.3-fpm.service'):\n services = services + ['php7.3-fpm']\n else:\n Log.info(self, \"PHP7.3-FPM is not installed\")\n\n if pargs.php73:\n if os.path.exists('{0}'.format(wo_system) + 'php7.3-fpm.service'):\n services = services + ['php7.3-fpm']\n else:\n Log.info(self, \"PHP7.3-FPM is not installed\")\n\n if pargs.mysql:\n if ((WOVar.wo_mysql_host == \"localhost\") or\n (WOVar.wo_mysql_host == \"127.0.0.1\")):\n if os.path.exists('/etc/systemd/system/mysql.service'):\n services = services + ['mysql']\n else:\n Log.info(self, \"MySQL is not installed\")\n else:\n Log.warn(self, \"Remote MySQL found, \"\n \"Unable to check MySQL service status\")\n\n if pargs.redis:\n if os.path.exists('{0}'.format(wo_system) +\n 'redis-server.service'):\n services = services + ['redis-server']\n else:\n Log.info(self, \"Redis server is not installed\")\n\n if pargs.fail2ban:\n if os.path.exists('{0}'.format(wo_system) + 'fail2ban.service'):\n services = services + ['fail2ban']\n else:\n Log.info(self, \"fail2ban is not installed\")\n\n # proftpd\n if pargs.proftpd:\n if os.path.exists('/etc/init.d/proftpd'):\n services = services + ['proftpd']\n else:\n Log.info(self, \"ProFTPd is not installed\")\n\n # netdata\n if pargs.netdata:\n if os.path.exists('{0}'.format(wo_system) + 'netdata.service'):\n services = services + ['netdata']\n else:\n Log.info(self, \"Netdata is not installed\")\n\n for service in services:\n Log.debug(self, \"Stopping service: {0}\".format(service))\n WOService.stop_service(self, service)\n\n @expose(help=\"Restart stack services\")\n def restart(self):\n \"\"\"Restart services\"\"\"\n services = []\n wo_system = \"/lib/systemd/system/\"\n pargs = self.app.pargs\n if not (pargs.nginx or pargs.php or\n pargs.php73 or\n pargs.mysql or\n pargs.netdata or\n pargs.proftpd or\n pargs.redis or\n pargs.fail2ban):\n pargs.nginx = True\n pargs.php = True\n pargs.mysql = True\n pargs.netdata = True\n\n if pargs.nginx:\n if os.path.exists('{0}'.format(wo_system) + 'nginx.service'):\n services = services + ['nginx']\n else:\n Log.info(self, \"Nginx is not installed\")\n\n if pargs.php:\n if os.path.exists('{0}'.format(wo_system) + 'php7.2-fpm.service'):\n services = services + ['php7.2-fpm']\n else:\n Log.info(self, \"PHP7.2-FPM is not installed\")\n if os.path.exists('{0}'.format(wo_system) + 'php7.3-fpm.service'):\n services = services + ['php7.3-fpm']\n else:\n Log.info(self, \"PHP7.3-FPM is not installed\")\n\n if pargs.php73:\n if os.path.exists('{0}'.format(wo_system) + 'php7.3-fpm.service'):\n services = services + ['php7.3-fpm']\n else:\n Log.info(self, \"PHP7.3-FPM is not installed\")\n\n if pargs.mysql:\n if ((WOVar.wo_mysql_host == \"localhost\") or\n (WOVar.wo_mysql_host == \"127.0.0.1\")):\n if os.path.exists('/etc/systemd/system/mysql.service'):\n services = services + ['mysql']\n else:\n Log.info(self, \"MySQL is not installed\")\n else:\n Log.warn(self, \"Remote MySQL found, \"\n \"Unable to check MySQL service status\")\n\n if pargs.redis:\n if os.path.exists('{0}'.format(wo_system) +\n 'redis-server.service'):\n services = services + ['redis-server']\n else:\n Log.info(self, \"Redis server is not installed\")\n\n if pargs.fail2ban:\n if os.path.exists('{0}'.format(wo_system) + 'fail2ban.service'):\n services = services + ['fail2ban']\n else:\n Log.info(self, \"fail2ban is not installed\")\n\n # proftpd\n if pargs.proftpd:\n if os.path.exists('/etc/init.d/proftpd'):\n services = services + ['proftpd']\n else:\n Log.info(self, \"ProFTPd is not installed\")\n\n # netdata\n if pargs.netdata:\n if os.path.exists('{0}'.format(wo_system) + 'netdata.service'):\n services = services + ['netdata']\n else:\n Log.info(self, \"Netdata is not installed\")\n\n for service in services:\n Log.debug(self, \"Restarting service: {0}\".format(service))\n WOService.restart_service(self, service)\n\n @expose(help=\"Get stack status\")\n def status(self):\n \"\"\"Status of services\"\"\"\n services = []\n wo_system = \"/lib/systemd/system/\"\n pargs = self.app.pargs\n if not (pargs.nginx or pargs.php or\n pargs.php73 or\n pargs.mysql or\n pargs.netdata or\n pargs.proftpd or\n pargs.redis or\n pargs.fail2ban):\n pargs.nginx = True\n pargs.php = True\n pargs.mysql = True\n pargs.fail2ban = True\n pargs.netdata = True\n\n if pargs.nginx:\n if os.path.exists('{0}'.format(wo_system) + 'nginx.service'):\n services = services + ['nginx']\n else:\n Log.info(self, \"Nginx is not installed\")\n\n if pargs.php:\n if os.path.exists('{0}'.format(wo_system) + 'php7.2-fpm.service'):\n services = services + ['php7.2-fpm']\n else:\n Log.info(self, \"PHP7.2-FPM is not installed\")\n if os.path.exists('{0}'.format(wo_system) + 'php7.3-fpm.service'):\n services = services + ['php7.3-fpm']\n else:\n Log.info(self, \"PHP7.3-FPM is not installed\")\n\n if pargs.php73:\n if os.path.exists('{0}'.format(wo_system) + 'php7.3-fpm.service'):\n services = services + ['php7.3-fpm']\n else:\n Log.info(self, \"PHP7.3-FPM is not installed\")\n\n if pargs.mysql:\n if ((WOVar.wo_mysql_host == \"localhost\") or\n (WOVar.wo_mysql_host == \"127.0.0.1\")):\n if os.path.exists('/etc/systemd/system/mysql.service'):\n services = services + ['mysql']\n else:\n Log.info(self, \"MySQL is not installed\")\n else:\n Log.warn(self, \"Remote MySQL found, \"\n \"Unable to check MySQL service status\")\n\n if pargs.redis:\n if os.path.exists('{0}'.format(wo_system) +\n 'redis-server.service'):\n services = services + ['redis-server']\n else:\n Log.info(self, \"Redis server is not installed\")\n\n if pargs.fail2ban:\n if os.path.exists('{0}'.format(wo_system) + 'fail2ban.service'):\n services = services + ['fail2ban']\n else:\n Log.info(self, \"fail2ban is not installed\")\n\n # proftpd\n if pargs.proftpd:\n if os.path.exists('/etc/init.d/proftpd'):\n services = services + ['proftpd']\n else:\n Log.info(self, \"ProFTPd is not installed\")\n\n # netdata\n if pargs.netdata:\n if os.path.exists('{0}'.format(wo_system) + 'netdata.service'):\n services = services + ['netdata']\n else:\n Log.info(self, \"Netdata is not installed\")\n\n for service in services:\n if WOService.get_service_status(self, service):\n Log.info(self, \"{0:10}: {1}\".format(service, \"Running\"))\n\n @expose(help=\"Reload stack services\")\n def reload(self):\n \"\"\"Reload service\"\"\"\n services = []\n wo_system = \"/lib/systemd/system/\"\n pargs = self.app.pargs\n if not (pargs.nginx or pargs.php or\n pargs.php73 or\n pargs.mysql or\n pargs.netdata or\n pargs.proftpd or\n pargs.redis or\n pargs.fail2ban):\n pargs.nginx = True\n pargs.php = True\n pargs.mysql = True\n pargs.fail2ban = True\n\n if pargs.nginx:\n if os.path.exists('{0}'.format(wo_system) + 'nginx.service'):\n services = services + ['nginx']\n else:\n Log.info(self, \"Nginx is not installed\")\n\n if pargs.php:\n if os.path.exists('{0}'.format(wo_system) + 'php7.2-fpm.service'):\n services = services + ['php7.2-fpm']\n else:\n Log.info(self, \"PHP7.2-FPM is not installed\")\n if os.path.exists('{0}'.format(wo_system) + 'php7.3-fpm.service'):\n services = services + ['php7.3-fpm']\n else:\n Log.info(self, \"PHP7.3-FPM is not installed\")\n\n if pargs.php73:\n if os.path.exists('{0}'.format(wo_system) + 'php7.3-fpm.service'):\n services = services + ['php7.3-fpm']\n else:\n Log.info(self, \"PHP7.3-FPM is not installed\")\n\n if pargs.mysql:\n if ((WOVar.wo_mysql_host == \"localhost\") or\n (WOVar.wo_mysql_host == \"127.0.0.1\")):\n if os.path.exists('/etc/systemd/system/mysql.service'):\n services = services + ['mysql']\n else:\n Log.info(self, \"MySQL is not installed\")\n else:\n Log.warn(self, \"Remote MySQL found, \"\n \"Unable to check MySQL service status\")\n\n if pargs.redis:\n if os.path.exists('{0}'.format(wo_system) +\n 'redis-server.service'):\n services = services + ['redis-server']\n else:\n Log.info(self, \"Redis server is not installed\")\n\n if pargs.fail2ban:\n if os.path.exists('{0}'.format(wo_system) + 'fail2ban.service'):\n services = services + ['fail2ban']\n else:\n Log.info(self, \"fail2ban is not installed\")\n\n # proftpd\n if pargs.proftpd:\n if os.path.exists('/etc/init.d/proftpd'):\n services = services + ['proftpd']\n else:\n Log.info(self, \"ProFTPd is not installed\")\n\n # netdata\n if pargs.netdata:\n if os.path.exists('{0}'.format(wo_system) + 'netdata.service'):\n services = services + ['netdata']\n else:\n Log.info(self, \"Netdata is not installed\")\n\n for service in services:\n Log.debug(self, \"Reloading service: {0}\".format(service))\n WOService.reload_service(self, service)\n", "id": "258607", "language": "Python", "matching_score": 2.7653968334198, "max_stars_count": 1, "path": "wo/cli/plugins/stack_services.py" }, { "content": "import csv\nimport os\n\nimport requests\n\nfrom wo.core.fileutils import WOFileUtils\nfrom wo.core.git import WOGit\nfrom wo.core.logging import Log\nfrom wo.core.shellexec import WOShellExec, CommandExecutionError\nfrom wo.core.variables import WOVar\n\n\nclass WOAcme:\n \"\"\"Acme.sh utilities for WordOps\"\"\"\n\n wo_acme_exec = (\"/etc/letsencrypt/acme.sh --config-home \"\n \"'/etc/letsencrypt/config'\")\n\n def export_cert(self):\n \"\"\"Export acme.sh csv certificate list\"\"\"\n if not WOShellExec.cmd_exec(\n self, \"{0} \".format(WOAcme.wo_acme_exec) +\n \"--list --listraw > /var/lib/wo/cert.csv\"):\n Log.error(self, \"Unable to export certs list\")\n WOFileUtils.chmod(self, '/var/lib/wo/cert.csv', 0o600)\n\n def setupletsencrypt(self, acme_domains, acmedata):\n \"\"\"Issue SSL certificates with acme.sh\"\"\"\n all_domains = '\\' -d \\''.join(acme_domains)\n wo_acme_dns = acmedata['acme_dns']\n keylenght = acmedata['keylength']\n if acmedata['dns'] is True:\n acme_mode = \"--dns {0}\".format(wo_acme_dns)\n validation_mode = \"DNS mode with {0}\".format(wo_acme_dns)\n if acmedata['dnsalias'] is True:\n acme_mode = acme_mode + \\\n \" --challenge-alias {0}\".format(acmedata['acme_alias'])\n else:\n acme_mode = \"-w /var/www/html\"\n validation_mode = \"Webroot challenge\"\n Log.debug(self, \"Validation : Webroot mode\")\n if not os.path.isdir('/var/www/html/.well-known/acme-challenge'):\n WOFileUtils.mkdir(\n self, '/var/www/html/.well-known/acme-challenge')\n WOFileUtils.chown(\n self, '/var/www/html/.well-known', 'www-data', 'www-data',\n recursive=True)\n WOFileUtils.chmod(self, '/var/www/html/.well-known', 0o750,\n recursive=True)\n\n Log.info(self, \"Validation mode : {0}\".format(validation_mode))\n Log.wait(self, \"Issuing SSL cert with acme.sh\")\n if not WOShellExec.cmd_exec(\n self, \"{0} \".format(WOAcme.wo_acme_exec) +\n \"--issue -d '{0}' {1} -k {2} -f\"\n .format(all_domains, acme_mode, keylenght)):\n Log.failed(self, \"Issuing SSL cert with acme.sh\")\n if acmedata['dns'] is True:\n Log.error(\n self, \"Please make sure your properly \"\n \"set your DNS API credentials for acme.sh\\n\"\n \"If you are using sudo, use \\\"sudo -E wo\\\"\")\n return False\n else:\n Log.error(\n self, \"Your domain is properly configured \"\n \"but acme.sh was unable to issue certificate.\\n\"\n \"You can find more informations in \"\n \"/var/log/wo/wordops.log\")\n return False\n else:\n Log.valide(self, \"Issuing SSL cert with acme.sh\")\n return True\n\n def deploycert(self, wo_domain_name):\n \"\"\"Deploy Let's Encrypt certificates with acme.sh\"\"\"\n if not os.path.isfile('/etc/letsencrypt/renewal/{0}_ecc/fullchain.cer'\n .format(wo_domain_name)):\n Log.error(self, 'Certificate not found. Deployment canceled')\n\n Log.debug(self, \"Cert deployment for domain: {0}\"\n .format(wo_domain_name))\n\n try:\n Log.wait(self, \"Deploying SSL cert\")\n if WOShellExec.cmd_exec(\n self, \"mkdir -p {0}/{1} && {2} --install-cert -d {1} --ecc \"\n \"--cert-file {0}/{1}/cert.pem --key-file {0}/{1}/key.pem \"\n \"--fullchain-file {0}/{1}/fullchain.pem \"\n \"--ca-file {0}/{1}/ca.pem --reloadcmd \\\"nginx -t && \"\n \"service nginx restart\\\" \"\n .format(WOVar.wo_ssl_live,\n wo_domain_name, WOAcme.wo_acme_exec)):\n Log.valide(self, \"Deploying SSL cert\")\n else:\n Log.failed(self, \"Deploying SSL cert\")\n Log.error(self, \"Unable to deploy certificate\")\n\n if os.path.isdir('/var/www/{0}/conf/nginx'\n .format(wo_domain_name)):\n\n sslconf = open(\"/var/www/{0}/conf/nginx/ssl.conf\"\n .format(wo_domain_name),\n encoding='utf-8', mode='w')\n sslconf.write(\n \"listen 443 ssl http2;\\n\"\n \"listen [::]:443 ssl http2;\\n\"\n \"ssl_certificate {0}/{1}/fullchain.pem;\\n\"\n \"ssl_certificate_key {0}/{1}/key.pem;\\n\"\n \"ssl_trusted_certificate {0}/{1}/ca.pem;\\n\"\n \"ssl_stapling_verify on;\\n\"\n .format(WOVar.wo_ssl_live, wo_domain_name))\n sslconf.close()\n\n if not WOFileUtils.grep(self, '/var/www/22222/conf/nginx/ssl.conf',\n '/etc/letsencrypt'):\n Log.info(self, \"Securing WordOps backend with current cert\")\n sslconf = open(\"/var/www/22222/conf/nginx/ssl.conf\",\n encoding='utf-8', mode='w')\n sslconf.write(\"ssl_certificate {0}/{1}/fullchain.pem;\\n\"\n \"ssl_certificate_key {0}/{1}/key.pem;\\n\"\n \"ssl_trusted_certificate {0}/{1}/ca.pem;\\n\"\n \"ssl_stapling_verify on;\\n\"\n .format(WOVar.wo_ssl_live, wo_domain_name))\n sslconf.close()\n\n WOGit.add(self, [\"/etc/letsencrypt\"],\n msg=\"Adding letsencrypt folder\")\n\n except IOError as e:\n Log.debug(self, str(e))\n Log.debug(self, \"Error occured while generating \"\n \"ssl.conf\")\n return 0\n\n def renew(self, domain):\n \"\"\"Renew letsencrypt certificate with acme.sh\"\"\"\n try:\n WOShellExec.cmd_exec(\n self, \"{0} \".format(WOAcme.wo_acme_exec) +\n \"--renew -d {0} --ecc --force\".format(domain))\n except CommandExecutionError as e:\n Log.debug(self, str(e))\n Log.error(self, 'Unable to renew certificate')\n return True\n\n def check_dns(self, acme_domains):\n \"\"\"Check if a list of domains point to the server IP\"\"\"\n server_ip = requests.get('https://v4.wordops.eu/').text\n for domain in acme_domains:\n url = (\n \"https://cloudflare-dns.com/dns-query?name={0}&type=A\"\n .format(domain))\n headers = {\n 'accept': 'application/dns-json'\n }\n try:\n response = requests.get(url, headers=headers).json()\n domain_ip = response[\"Answer\"][0]['data']\n except requests.RequestException:\n Log.error(self, 'Resolving domain IP failed')\n if(not domain_ip == server_ip):\n Log.warn(\n self, \"{0}\".format(domain) +\n \" point to the IP {0}\".format(domain_ip) +\n \" but your server IP is {0}.\".format(server_ip) +\n \"\\nUse the flag --force to bypass this check.\")\n Log.error(\n self, \"You have to set the \"\n \"proper DNS record for your domain\", False)\n return False\n Log.debug(self, \"DNS record are properly set\")\n return True\n\n def cert_check(self, wo_domain_name):\n \"\"\"Check certificate existance with acme.sh and return Boolean\"\"\"\n WOAcme.export_cert(self)\n # define new csv dialect\n csv.register_dialect('acmeconf', delimiter='|')\n # open file\n certfile = open('/var/lib/wo/cert.csv', mode='r', encoding='utf-8')\n reader = csv.reader(certfile, 'acmeconf')\n for row in reader:\n # check if domain exist\n if wo_domain_name == row[0]:\n # check if cert expiration exist\n if not row[3] == '':\n return True\n certfile.close()\n return False\n\n def removeconf(self, domain):\n sslconf = (\"/var/www/{0}/conf/nginx/ssl.conf\"\n .format(domain))\n sslforce = (\"/etc/nginx/conf.d/force-ssl-{0}.conf\"\n .format(domain))\n acmedir = [\n '{0}'.format(sslforce), '{0}'.format(sslconf),\n '{0}/{1}_ecc'.format(WOVar.wo_ssl_archive, domain),\n '{0}.disabled'.format(sslconf), '{0}.disabled'\n .format(sslforce), '{0}/{1}'\n .format(WOVar.wo_ssl_live, domain),\n '/etc/letsencrypt/shared/{0}.conf'.format(domain)]\n wo_domain = domain\n if WOAcme.cert_check(self, wo_domain):\n Log.info(self, \"Removing Acme configuration\")\n Log.debug(self, \"Removing Acme configuration\")\n try:\n WOShellExec.cmd_exec(\n self, \"{0} \".format(WOAcme.wo_acme_exec) +\n \"--remove -d {0} --ecc\".format(domain))\n except CommandExecutionError as e:\n Log.debug(self, \"{0}\".format(e))\n Log.error(self, \"Cert removal failed\")\n # remove all files and directories\n for dir in acmedir:\n if os.path.exists('{0}'.format(dir)):\n WOFileUtils.rm(self, '{0}'.format(dir))\n # find all broken symlinks\n WOFileUtils.findBrokenSymlink(self, \"/var/www\")\n else:\n if os.path.islink(\"{0}\".format(sslconf)):\n WOFileUtils.remove_symlink(self, \"{0}\".format(sslconf))\n WOFileUtils.rm(self, '{0}'.format(sslforce))\n\n if WOFileUtils.grepcheck(self, '/var/www/22222/conf/nginx/ssl.conf',\n '{0}'.format(domain)):\n Log.info(\n self, \"Setting back default certificate for WordOps backend\")\n with open(\"/var/www/22222/conf/nginx/\"\n \"ssl.conf\", \"w\") as ssl_conf_file:\n ssl_conf_file.write(\"ssl_certificate \"\n \"/var/www/22222/cert/22222.crt;\\n\"\n \"ssl_certificate_key \"\n \"/var/www/22222/cert/22222.key;\\n\")\n", "id": "10040263", "language": "Python", "matching_score": 2.588782548904419, "max_stars_count": 0, "path": "wo/core/acme.py" }, { "content": "\"\"\"WordOps GIT module\"\"\"\nimport os\n\nfrom sh import ErrorReturnCode, git\nfrom wo.core.logging import Log\n\n\nclass WOGit:\n \"\"\"Intialization of core variables\"\"\"\n def ___init__():\n # TODO method for core variables\n pass\n\n def add(self, paths, msg=\"Intializating\"):\n \"\"\"\n Initializes Directory as repository if not already git repo.\n and adds uncommited changes automatically\n \"\"\"\n for path in paths:\n global git\n git = git.bake(\"--git-dir={0}/.git\".format(path),\n \"--work-tree={0}\".format(path))\n if os.path.isdir(path):\n if not os.path.isdir(path + \"/.git\"):\n try:\n Log.debug(self, \"WOGit: git init at {0}\"\n .format(path))\n git.init(path)\n except ErrorReturnCode as e:\n Log.debug(self, \"{0}\".format(e))\n Log.error(self, \"Unable to git init at {0}\"\n .format(path))\n status = git.status(\"-s\")\n if len(status.splitlines()) > 0:\n try:\n Log.debug(self, \"WOGit: git commit at {0}\"\n .format(path))\n git.add(\"--all\")\n git.commit(\"-am {0}\".format(msg))\n except ErrorReturnCode as e:\n Log.debug(self, \"{0}\".format(e))\n Log.error(self, \"Unable to git commit at {0} \"\n .format(path))\n else:\n Log.debug(self, \"WOGit: Path {0} not present\".format(path))\n\n def checkfilestatus(self, repo, filepath):\n \"\"\"\n Checks status of file, If its tracked or untracked.\n \"\"\"\n global git\n git = git.bake(\"--git-dir={0}/.git\".format(repo),\n \"--work-tree={0}\".format(repo))\n status = git.status(\"-s\", \"{0}\".format(filepath))\n if len(status.splitlines()) > 0:\n return True\n else:\n return False\n\n def rollback(self, paths, msg=\"Rolling-Back\"):\n \"\"\"\n Rollback last commit to restore previous.\n configuration and commit changes automatically\n \"\"\"\n for path in paths:\n global git\n git = git.bake(\"--git-dir={0}/.git\".format(path),\n \"--work-tree={0}\".format(path))\n if os.path.isdir(path):\n if not os.path.isdir(path + \"/.git\"):\n Log.error(\n self, \"Unable to find a git repository at {0}\"\n .format(path))\n try:\n Log.debug(\n self, \"WOGit: git stash --include-untracked at {0}\"\n .format(path))\n git.stash(\"push\", \"--include-untracked\", \"-m {0}\"\n .format(msg))\n except ErrorReturnCode as e:\n Log.debug(self, \"{0}\".format(e))\n Log.error(self, \"Unable to git reset at {0} \"\n .format(path))\n else:\n Log.debug(self, \"WOGit: Path {0} not present\".format(path))\n\n def clone(self, repo, path, branch='master'):\n \"\"\"Equivalent to git clone \"\"\"\n if not os.path.exists('{0}'.format(path)):\n global git\n try:\n git.clone(\n '{0}'.format(repo),\n '{0}'.format(path),\n '--branch={0}'.format(branch),\n '--depth=1')\n except ErrorReturnCode as e:\n Log.debug(self, \"{0}\".format(e))\n Log.error(self, \"Unable to git clone at {0} \"\n .format(path))\n else:\n Log.debug(self, \"WOGit: Path {0} already exist\".format(path))\n", "id": "7400876", "language": "Python", "matching_score": 1.989518642425537, "max_stars_count": 0, "path": "wo/core/git.py" }, { "content": "\"\"\"WordOps main application entry point.\"\"\"\nimport sys\nfrom os import geteuid\n\nfrom cement.core.exc import CaughtSignal, FrameworkError\nfrom cement.core.foundation import CementApp\nfrom cement.ext.ext_argparse import ArgParseArgumentHandler\nfrom cement.utils.misc import init_defaults\n\nfrom wo.core import exc\n\n# this has to happen after you import sys, but before you import anything\n# from Cement \"source: https://github.com/datafolklabs/cement/issues/290\"\nif '--debug' in sys.argv:\n sys.argv.remove('--debug')\n TOGGLE_DEBUG = True\nelse:\n TOGGLE_DEBUG = False\n\n# Application default. Should update config/wo.conf to reflect any\n# changes, or additions here.\ndefaults = init_defaults('wo')\n\n# All internal/external plugin configurations are loaded from here\ndefaults['wo']['plugin_config_dir'] = '/etc/wo/plugins.d'\n\n# External plugins (generally, do not ship with application code)\ndefaults['wo']['plugin_dir'] = '/var/lib/wo/plugins'\n\n# External templates (generally, do not ship with application code)\ndefaults['wo']['template_dir'] = '/var/lib/wo/templates'\n\n\ndef encode_output(app, text):\n \"\"\" Encode the output to be suitable for the terminal\n\n :param app: The Cement App (unused)\n :param text: The rendered text\n :return: The encoded text\n \"\"\"\n\n return text.encode(\"utf-8\")\n\n\nclass WOArgHandler(ArgParseArgumentHandler):\n class Meta:\n label = 'wo_args_handler'\n\n def error(self, message):\n super(WOArgHandler, self).error(\"unknown args\")\n\n\nclass WOApp(CementApp):\n class Meta:\n label = 'wo'\n\n config_defaults = defaults\n\n # All built-in application bootstrapping (always run)\n bootstrap = 'wo.cli.bootstrap'\n\n # Internal plugins (ship with application code)\n plugin_bootstrap = 'wo.cli.plugins'\n\n # Internal templates (ship with application code)\n template_module = 'wo.cli.templates'\n\n extensions = ['mustache']\n\n hooks = [\n (\"post_render\", encode_output)\n ]\n\n output_handler = 'mustache'\n\n arg_handler = WOArgHandler\n\n debug = TOGGLE_DEBUG\n\n exit_on_close = True\n\n\nclass WOTestApp(WOApp):\n \"\"\"A test app that is better suited for testing.\"\"\"\n class Meta:\n # default argv to empty (don't use sys.argv)\n argv = []\n\n # don't look for config files (could break tests)\n config_files = []\n\n # don't call sys.exit() when app.close() is called in tests\n exit_on_close = False\n\n\n# Define the applicaiton object outside of main, as some libraries might wish\n# to import it as a global (rather than passing it into another class/func)\napp = WOApp()\n\n\ndef main():\n with app:\n try:\n global sys\n\n # if not root...kick out\n if not geteuid() == 0:\n print(\"\\nNon-privileged users cant use WordOps. \"\n \"Switch to root or invoke sudo.\\n\")\n app.close(1)\n app.run()\n except AssertionError as e:\n print(\"AssertionError => %s\" % e.args[0])\n app.exit_code = 1\n except exc.WOError as e:\n # Catch our application errors and exit 1 (error)\n print('WOError > %s' % e)\n app.exit_code = 1\n except CaughtSignal as e:\n # Default Cement signals are SIGINT and SIGTERM, exit 0 (non-error)\n print('CaughtSignal > %s' % e)\n app.exit_code = 0\n except FrameworkError as e:\n # Catch framework errors and exit 1 (error)\n print('FrameworkError > %s' % e)\n app.exit_code = 1\n finally:\n # Print an exception (if it occurred) and --debug was passed\n if app.debug:\n import sys\n import traceback\n\n exc_type, exc_value, exc_traceback = sys.exc_info()\n if exc_traceback is not None:\n traceback.print_exc()\n\n\nif __name__ == '__main__':\n main()\n", "id": "5697903", "language": "Python", "matching_score": 1.6049367189407349, "max_stars_count": 0, "path": "wo/cli/main.py" }, { "content": "from wo.utils import test\nfrom wo.cli.main import WOTestApp\n\n\nclass CliTestCaseDebug(test.WOTestCase):\n\n def test_wo_cli(self):\n with WOTestApp as app:\n app.run()\n\n def test_wo_cli_debug_stop(self):\n with WOTestApp(argv=['debug', '--stop']) as app:\n app.run()\n\n def test_wo_cli_debug_start(self):\n with WOTestApp(argv=['debug', '--start']) as app:\n app.run()\n\n def test_wo_cli_debug_php(self):\n with WOTestApp(argv=['debug', '--php']) as app:\n app.run()\n\n def test_wo_cli_debug_nginx(self):\n with WOTestApp(argv=['debug', '--nginx']) as app:\n app.run()\n\n def test_wo_cli_debug_rewrite(self):\n with WOTestApp(argv=['debug', '--rewrite']) as app:\n app.run()\n\n def test_wo_cli_debug_fpm(self):\n with WOTestApp(argv=['debug', '--fpm']) as app:\n app.run()\n\n def test_wo_cli_debug_mysql(self):\n with WOTestApp(argv=['debug', '--mysql']) as app:\n app.run()\n\n def test_wo_cli_debug_import_slow_log_interval(self):\n with WOTestApp(argv=['debug', '--mysql',\n '--import-slow-log-interval']) as app:\n app.run()\n\n def test_wo_cli_debug_site_name_mysql(self):\n with WOTestApp(argv=['debug', 'example3.com', '--mysql']) as app:\n app.run()\n\n def test_wo_cli_debug_site_name_wp(self):\n with WOTestApp(argv=['debug', 'example4.com', '--wp']) as app:\n app.run()\n\n def test_wo_cli_debug_site_name_nginx(self):\n with WOTestApp(argv=['debug', 'example4.com', '--nginx']) as app:\n app.run()\n\n def test_wo_cli_debug_site_name_start(self):\n with WOTestApp(argv=['debug', 'example1.com', '--start']) as app:\n app.run()\n\n def test_wo_cli_debug_site_name_stop(self):\n with WOTestApp(argv=['debug', 'example1.com', '--stop']) as app:\n app.run()\n\n def test_wo_cli_debug_site_name_rewrite(self):\n with WOTestApp(argv=['debug', 'example1.com', '--rewrite']) as app:\n app.run()\n", "id": "4708004", "language": "Python", "matching_score": 2.099668264389038, "max_stars_count": 0, "path": "tests/cli/26_test_debug.py" }, { "content": "from wo.utils import test\nfrom wo.cli.main import WOTestApp\n\n\nclass CliTestCaseStackStart(test.WOTestCase):\n\n def test_wo_cli(self):\n with WOTestApp as app:\n app.run()\n\n def test_wo_cli_stack_services_start_nginx(self):\n with WOTestApp(argv=['stack', 'start', '--nginx']) as app:\n app.run()\n\n def test_wo_cli_stack_services_start_php_fpm(self):\n with WOTestApp(argv=['stack', 'start', '--php']) as app:\n app.run()\n\n def test_wo_cli_stack_services_start_mysql(self):\n with WOTestApp(argv=['stack', 'start', '--mysql']) as app:\n app.run()\n\n def test_wo_cli_stack_services_start_all(self):\n with WOTestApp(argv=['stack', 'start']) as app:\n app.run()\n", "id": "8174399", "language": "Python", "matching_score": 1.447139024734497, "max_stars_count": 0, "path": "tests/cli/15_test_stack_services_start.py" }, { "content": "from wo.utils import test\nfrom wo.cli.main import WOTestApp\n\n\nclass CliTestCaseStack(test.WOTestCase):\n\n def test_wo_cli(self):\n with WOTestApp as app:\n app.run()\n\n def test_wo_cli_stack_install_nginx(self):\n with WOTestApp(argv=['stack', 'install', '--nginx']) as app:\n app.run()\n\n def test_wo_cli_stack_install_php(self):\n with WOTestApp(argv=['stack', 'install', '--php']) as app:\n app.run()\n\n def test_wo_cli_stack_install_php73(self):\n with WOTestApp(argv=['stack', 'install', '--php73']) as app:\n app.run()\n\n def test_wo_cli_stack_install_mysql(self):\n with WOTestApp(argv=['stack', 'install', '--mysql']) as app:\n app.run()\n\n def test_wo_cli_stack_install_wpcli(self):\n with WOTestApp(argv=['stack', 'install', '--wpcli']) as app:\n app.run()\n\n def test_wo_cli_stack_install_phpmyadmin(self):\n with WOTestApp(argv=['stack', 'install', '--phpmyadmin']) as app:\n app.run()\n\n def test_wo_cli_stack_install_adminer(self):\n with WOTestApp(argv=['stack', 'install', '--adminer']) as app:\n app.run()\n\n def test_wo_cli_stack_install_utils(self):\n with WOTestApp(argv=['stack', 'install', '--utils']) as app:\n app.run()\n", "id": "3102347", "language": "Python", "matching_score": 0.9904873967170715, "max_stars_count": 0, "path": "tests/cli/13_test_stack_install.py" }, { "content": "from wo.utils import test\nfrom wo.cli.main import WOTestApp\n\n\nclass CliTestCaseSiteDisable(test.WOTestCase):\n\n def test_wo_cli(self):\n with WOTestApp as app:\n app.run()\n\n def test_wo_cli_site_disable(self):\n with WOTestApp(argv=['site', 'disable', 'example2.com']) as app:\n app.run()\n", "id": "6972471", "language": "Python", "matching_score": 2.4307429790496826, "max_stars_count": 0, "path": "tests/cli/19_test_site_disable.py" }, { "content": "from wo.utils import test\nfrom wo.cli.main import WOTestApp\n\n\nclass CliTestCaseSiteList(test.WOTestCase):\n\n def test_wo_cli(self):\n with WOTestApp as app:\n app.run()\n\n def test_wo_cli_site_list_enable(self):\n with WOTestApp(argv=['site', 'list', '--enabled']) as app:\n app.run()\n\n def test_wo_cli_site_list_disable(self):\n with WOTestApp(argv=['site', 'list', '--disabled']) as app:\n app.run()\n", "id": "1190102", "language": "Python", "matching_score": 2.1585099697113037, "max_stars_count": 0, "path": "tests/cli/22_test_site_list.py" }, { "content": "from wo.utils import test\nfrom wo.cli.main import WOTestApp\n\n\nclass CliTestCaseSiteEnable(test.WOTestCase):\n\n def test_wo_cli(self):\n with WOTestApp as app:\n app.run()\n\n def test_wo_cli_site_enable(self):\n with WOTestApp(argv=['site', 'enable', 'example2.com']) as app:\n app.run()\n", "id": "12447722", "language": "Python", "matching_score": 2.3054559230804443, "max_stars_count": 0, "path": "tests/cli/20_test_site_enable.py" }, { "content": "from wo.utils import test\nfrom wo.cli.main import WOTestApp\n\n\nclass CliTestCaseSiteCreate(test.WOTestCase):\n\n def test_wo_cli(self):\n with WOTestApp as app:\n app.run()\n\n def test_wo_cli_site_create_html(self):\n with WOTestApp(argv=['site', 'create', 'example1.com',\n '--html']) as app:\n app.config.set('wo', '', True)\n app.run()\n\n def test_wo_cli_site_create_php(self):\n with WOTestApp(argv=['site', 'create', 'example2.com',\n '--php']) as app:\n app.run()\n\n def test_wo_cli_site_create_mysql(self):\n with WOTestApp(argv=['site', 'create', 'example3.com',\n '--mysql']) as app:\n app.run()\n\n def test_wo_cli_site_create_wp(self):\n with WOTestApp(argv=['site', 'create', 'example4.com',\n '--wp']) as app:\n app.run()\n\n def test_wo_cli_site_create_wpsubdir(self):\n with WOTestApp(argv=['site', 'create', 'example5.com',\n '--wpsubdir']) as app:\n app.run()\n\n def test_wo_cli_site_create_wpsubdomain(self):\n with WOTestApp(argv=['site', 'create', 'example6.com',\n '--wpsubdomain']) as app:\n app.run()\n\n def test_wo_cli_site_create_wpfc(self):\n with WOTestApp(argv=['site', 'create', 'example8.com',\n '--wpfc']) as app:\n app.run()\n\n def test_wo_cli_site_create_wpsc(self):\n with WOTestApp(argv=['site', 'create', 'example9.com',\n '--wpsc']) as app:\n app.run()\n", "id": "6835461", "language": "Python", "matching_score": 2.7612767219543457, "max_stars_count": 0, "path": "tests/cli/18_test_site_create.py" }, { "content": "from wo.utils import test\nfrom wo.cli.main import WOTestApp\n\n\nclass CliTestCaseSiteDelete(test.WOTestCase):\n\n def test_wo_cli(self):\n with WOTestApp as app:\n app.run()\n\n def test_wo_cli_site_detele(self):\n with WOTestApp(argv=['site', 'delete', 'example1.com',\n '--no-prompt']) as app:\n app.run()\n\n def test_wo_cli_site_detele_all(self):\n with WOTestApp(argv=['site', 'delete', 'example2.com',\n '--all', '--no-prompt']) as app:\n app.run()\n\n def test_wo_cli_site_detele_db(self):\n with WOTestApp(argv=['site', 'delete', 'example3.com',\n '--db', '--no-prompt']) as app:\n app.run()\n\n def test_wo_cli_site_detele_files(self):\n with WOTestApp(argv=['site', 'delete', 'example4.com',\n '--files', '--no-prompt']) as app:\n app.run()\n", "id": "6460669", "language": "Python", "matching_score": 4.010846138000488, "max_stars_count": 0, "path": "tests/cli/29_test_site_delete.py" }, { "content": "from wo.utils import test\nfrom wo.cli.main import WOTestApp\n\n\nclass CliTestCaseSiteShow(test.WOTestCase):\n\n def test_wo_cli(self):\n with WOTestApp as app:\n app.run()\n\n def test_wo_cli_show_edit(self):\n with WOTestApp(argv=['site', 'show', 'example1.com']) as app:\n app.run()\n", "id": "1188355", "language": "Python", "matching_score": 2.226369857788086, "max_stars_count": 0, "path": "tests/cli/23_test_site_show.py" } ]
2.22637
dextacy10-13
[ { "content": "from .initiate_database import *\nimport binascii\nimport bcrypt\nimport os\n\nclass CollectedPage(Base):\n __tablename__ = 'collected_pages'\n\n id = Column(String(100), primary_key=True)\n uri = Column(Text())\n page_html = Column(Text())\n owner_id = Column(String(100))\n timestamp = Column(Integer())\n\n def __init__( self ):\n self.generate_injection_id()\n\n def generate_injection_id( self ):\n self.id = binascii.hexlify(os.urandom(50))\n\n def to_dict( self ):\n exposed_attributes = [ \"uri\", \"id\", \"page_html\", \"timestamp\" ]\n return_dict = {}\n\n for attribute in exposed_attributes:\n return_dict[ attribute ] = getattr( self, attribute )\n\n return return_dict\n\n def __str__( self ):\n return self.id\n", "id": "4133702", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "api/models/collected_page.py" }, { "content": "#!/usr/bin/env python\nimport binascii\nimport yaml\nimport os\n\nnginx_template = \"\"\"\nserver {\n # Redirect HTTP to www\n listen 80;\n server_name fakedomain.com;\n location / {\n rewrite ^/(.*)$ https://www.fakedomain.com/$1 permanent;\n }\n}\n\nserver {\n # Redirect payloads to HTTPS\n listen 80;\n server_name *.fakedomain.com;\n proxy_set_header X-Forwarded-For $remote_addr;\n\n return 307 https://$host$request_uri;\n client_max_body_size 500M; # In case we have an extra large payload capture \n}\n\nserver {\n # Redirect HTTPS to www\n listen 443;\n ssl on;\n ssl_certificate /etc/nginx/ssl/fakedomain.com.crt; # Wildcard SSL certificate\n ssl_certificate_key /etc/nginx/ssl/fakedomain.com.key; # Wildcard SSL certificate key\n\n server_name fakedomain.com;\n location / {\n rewrite ^/(.*)$ https://www.fakedomain.com/$1 permanent;\n }\n}\n\nserver {\n # API proxy\n listen 443;\n ssl on;\n ssl_certificate /etc/nginx/ssl/fakedomain.com.crt; # Wildcard SSL certificate\n ssl_certificate_key /etc/nginx/ssl/fakedomain.com.key; # Wildcard SSL certificate key\n\n server_name *.fakedomain.com;\n access_log /var/log/nginx/fakedomain.com.vhost.access.log;\n error_log /var/log/nginx/fakedomain.com.vhost.error.log;\n\n client_max_body_size 500M;\n\n location / {\n proxy_pass http://localhost:8888;\n proxy_set_header Host $host;\n proxy_set_header X-Forwarded-For $remote_addr;\n }\n}\n\nserver {\n # Redirect api to HTTPS\n listen 80;\n server_name api.fakedomain.com; # Subdomain for API server\n proxy_set_header X-Forwarded-For $remote_addr;\n\n return 307 https://api.fakedomain.com$request_uri;\n client_max_body_size 500M; # In case we have an extra large payload capture \n}\n\nserver {\n # Redirect www to HTTPS\n listen 80;\n server_name www.fakedomain.com;\n location / {\n rewrite ^/(.*)$ https://www.fakedomain.com/$1 permanent;\n }\n}\n\nserver {\n # GUI proxy\n listen 443;\n server_name www.fakedomain.com;\n client_max_body_size 500M;\n ssl on;\n ssl_certificate /etc/nginx/ssl/fakedomain.com.crt; # Wildcard SSL certificate\n ssl_certificate_key /etc/nginx/ssl/fakedomain.com.key; # Wildcard SSL certificate key\n\n\n location / {\n proxy_pass http://localhost:1234;\n proxy_set_header Host $host;\n }\n}\n\"\"\"\n\nsettings = {\n \"email_from\":\"\",\n \"mailgun_api_key\":\"\",\n \"mailgun_sending_domain\":\"\",\n \"domain\": \"\",\n \"abuse_email\": \"\",\n \"cookie_secret\": \"\",\n}\n\nprint(\"\"\"\n __ __ _____ _____ _ _ _ \n \\ \\ / // ____/ ____| | | | | | | \n \\ V /| (___| (___ | |__| |_ _ _ __ | |_ ___ _ __ \n > < \\___ \\\\\\\\___ \\ | __ | | | | '_ \\| __/ _ \\ '__|\n / . \\ ____) |___) | | | | | |_| | | | | || __/ | \n /_/ \\_\\_____/_____/ |_| |_|\\__,_|_| |_|\\__\\___|_| \n \n \n Setup Utility\n \"\"\")\n\nprint(\"What is the base domain name you will be using? \")\nprint(\"(ex. localhost, www.example.com)\")\nhostname = input( \"Domain? \")\nif hostname != \"\":\n\tsettings[\"domain\"] = hostname\nnginx_template = nginx_template.replace( \"fakedomain.com\", settings[\"domain\"] )\n\nprint(\"Great! Now let's setup your Mailgun account to send XSS alerts to.\")\nprint(\"\")\nprint(\"Enter your API key: \")\nprint(\"(ex. key-8da843ff65205a61374b09b81ed0fa35)\")\nsettings[\"mailgun_api_key\"] = input( \"Mailgun API key: \")\nprint(\"\")\nprint(\"What is your Mailgun domain? \")\nprint(\"(ex. example.com)\")\nsettings[\"mailgun_sending_domain\"] = input( \"Mailgun domain: \")\nprint(\"\")\nprint(\"What email address is sending the payload fire emails?: \")\nprint(\"(ex. <EMAIL>)\")\nsettings[\"email_from\"] = input( \"Sending email address: \")\nprint(\"\")\nprint(\"Where should abuse/contact emails go?: \")\nprint(\"(ex. <EMAIL>)\")\nsettings[\"abuse_email\"] = input( \"Abuse/Contact email: \")\nprint(\"\")\nprint(\"\")\nprint(\"What postgres user is this service using? \")\nprint(\"(ex. xsshunter)\")\nsettings[\"postgreql_username\"] = input( \"Postgres username: \")\nprint(\"\")\nprint(\"What is the postgres user's password? \")\nprint(\"(ex. @!$%@^%UOFGJOEJG$)\")\nsettings[\"postgreql_password\"] = input( \"Postgres password: \")\nprint(\"\")\nprint(\"What is the postgres user's DB? \")\nprint(\"(ex. xsshunter)\")\nsettings[\"postgres_db\"] = input( \"Postgres DB: \")\nprint(\"\")\nprint(\"Generating cookie secret...\")\nsettings[\"cookie_secret\"] = binascii.hexlify( os.urandom(50) )\n\nyaml_config = yaml.dump( settings, default_flow_style=False)\nfile_handler = open( \"config.yaml\", \"w\" )\nfile_handler.write( yaml_config )\nfile_handler.close()\n\nprint(\"Minting new nginx configuration file...\")\nfile_handler = open( \"default\", \"w\" )\nfile_handler.write( nginx_template )\nfile_handler.close()\n\nprint(\"\"\"\nSetup complete! Please now copy the 'default' file to /etc/nginx/sites-enabled/default\nThis can be done by running the following:\nsudo cp default /etc/nginx/sites-enabled/default\n\nAlso, please ensure your wildcard SSL certificate and key are available at the following locations:\n/etc/nginx/ssl/\"\"\" + hostname + \"\"\".crt; # Wildcard SSL certificate\n/etc/nginx/ssl/\"\"\" + hostname + \"\"\".key; # Wildcard SSL key\n\nGood luck hunting for XSS!\n\t\t\t\t\t\t\t-mandatory\n\"\"\")\n", "id": "1107550", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "generate_config.py" } ]
0
SvyatoslavArtymovych
[ { "content": "\"\"\"\nUtilities to encode and decode zlib and base64 data.\n\"\"\"\n# Portions borrowed from:\n# http://stackoverflow.com/questions/1089662/python-inflate-and-deflate-implementations\nimport base64\nimport zlib\nfrom typing import Union\n\n\ndef decode_base64_and_inflate(b64string: Union[str, bytes]) -> bytes:\n \"\"\"Turn a base64-encoded zlib-compressed blob\n back in to the original bytes.\n The opposite of :func:`deflate_and_base64_encode`.\n \"\"\"\n if type(b64string) is bytes:\n b64string = b64string.decode('utf-8')\n decoded_data = base64.b64decode(b64string)\n return zlib.decompress(decoded_data, -15)\n\n\ndef deflate_and_base64_encode(string_val: Union[str, bytes]) -> bytes:\n \"\"\"zlib-compress and base64-encode some data.\n The opposite of :func:`decode_base64_and_inflate`.\n \"\"\"\n if type(string_val) is str:\n string_val = string_val.encode('utf-8')\n zlibbed_str = zlib.compress(string_val)\n compressed_string = zlibbed_str[2:-4]\n return base64.b64encode(compressed_string)\n\n\ndef decode_saml_xml(data: bytes) -> bytes:\n \"\"\"Decodes some base64-encoded and possibly zipped string\n into an XML string.\n \"\"\"\n decoded = base64.b64decode(data)\n # Is it XML yet?\n if decoded.strip().startswith(b'<'):\n return decoded\n\n # Try decode and inflate\n decoded = zlib.decompress(decoded, -15)\n # Is it XML yet?\n if decoded.strip().startswith(b'<'):\n return decoded\n\n raise ValueError(\"Does not look like an XML string!\")\n", "id": "6675616", "language": "Python", "matching_score": 0.37769678235054016, "max_stars_count": 61, "path": "flask_saml2/codex.py" }, { "content": "\"\"\"\nAll the SAML-specific exceptions this library can throw.\n\"\"\"\n\n\nclass SAML2Exception(Exception):\n \"\"\"Base exception for all flask_saml2 exceptions.\"\"\"\n pass\n\n\nclass MessageException(SAML2Exception):\n \"\"\"An exception with a nicely formatted error message.\"\"\"\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n def __repr__(self):\n return '<{}: {}>'.format(type(self).__name__, str(self))\n\n\nclass CannotHandleAssertion(MessageException):\n \"\"\"\n This SP or IdP handler can not handle this assertion.\n \"\"\"\n\n\nclass UserNotAuthorized(MessageException):\n \"\"\"\n User not authorized for SAML 2.0 authentication.\n \"\"\"\n\n\nclass ImproperlyConfigured(MessageException):\n \"\"\"\n Someone done goofed when configuring this application.\n \"\"\"\n", "id": "11852327", "language": "Python", "matching_score": 0.7951803207397461, "max_stars_count": 61, "path": "flask_saml2/exceptions.py" }, { "content": "\"\"\"\nTests for the Base SPHandler class.\n\"\"\"\nimport urllib.parse\nfrom pathlib import Path\n\nimport attr\nimport flask\nimport lxml.etree\nfrom bs4 import BeautifulSoup\nfrom flask import Flask, abort, redirect, url_for\n\nfrom flask_saml2 import codex\nfrom flask_saml2.idp import IdentityProvider, SPHandler\nfrom flask_saml2.utils import certificate_from_file, private_key_from_file\n\n\ndef c14n(xml):\n \"\"\"Get the canonical bytes representation of an lxml XML tree.\"\"\"\n return lxml.etree.tostring(xml, method='c14n', exclusive=True)\n\n\n@attr.s\nclass User:\n username = attr.ib()\n email = attr.ib()\n\n\n@attr.s\nclass SamlView:\n html = attr.ib()\n html_soup = attr.ib()\n saml = attr.ib()\n saml_soup = attr.ib()\n form_action = attr.ib()\n\n\nKEY_DIR = Path(__file__).parent.parent / 'keys' / 'sample'\nCERTIFICATE_FILE = KEY_DIR / 'idp-certificate.pem'\nPRIVATE_KEY_FILE = KEY_DIR / 'idp-private-key.pem'\n\nCERTIFICATE = certificate_from_file(CERTIFICATE_FILE)\nPRIVATE_KEY = private_key_from_file(PRIVATE_KEY_FILE)\n\n\nclass IdentityProvider(IdentityProvider):\n\n def __init__(self, service_providers, users=None, **kwargs):\n super().__init__(**kwargs)\n self.service_providers = service_providers\n self.users = {}\n if users is not None:\n for user in users:\n self.add_user(user)\n\n def get_idp_config(self):\n return {\n 'autosubmit': True,\n 'certificate': CERTIFICATE,\n 'private_key': PRIVATE_KEY,\n }\n\n def add_user(self, user):\n self.users[user.username] = user\n\n def get_service_providers(self):\n return self.service_providers\n\n def login_required(self):\n if not self.is_user_logged_in():\n abort(redirect('http://example.com/login/'))\n\n def is_user_logged_in(self):\n if 'user' not in flask.session:\n return False\n\n if flask.session['user'] not in self.users:\n return False\n\n return True\n\n def logout(self):\n del flask.session['user']\n\n def get_current_user(self):\n return self.users[flask.session['user']]\n\n def is_valid_redirect(self, url):\n url = urllib.parse.urlparse(url)\n return url.scheme == 'https' and url.netloc == 'saml.serviceprovid.er'\n\n\ndef create_test_app(idp: IdentityProvider):\n app = Flask(__name__)\n\n app.config['SERVER_NAME'] = 'idp.example.com'\n app.debug = True\n app.testing = True\n\n app.secret_key = 'not a secret'\n\n app.register_blueprint(idp.create_blueprint())\n\n return app\n\n\nclass AttributeSPHandler(SPHandler):\n def build_assertion(self, request, *args, **kwargs):\n return {\n **super().build_assertion(request, *args, **kwargs),\n 'ATTRIBUTES': {\n 'foo': 'bar',\n },\n }\n\n\nclass SamlTestCase:\n \"\"\"\n Sub-classes must provide these class properties:\n SP_CONFIG = ServicePoint metadata settings to use.\n \"\"\"\n BAD_VALUE = '!BAD VALUE!'\n USERNAME = 'fred'\n PASSWORD = '<PASSWORD>'\n EMAIL = '<EMAIL>'\n\n SP_CONFIG = [\n {\n 'CLASS': 'flask_saml2.idp.SPHandler',\n 'OPTIONS': {\n 'entity_id': 'http://example.com/',\n 'acs_url': 'http://127.0.0.1:9000/sp/acs/',\n },\n },\n {\n 'CLASS': 'tests.idp.base.AttributeSPHandler',\n 'OPTIONS': {\n 'entity_id': 'http://example.com/',\n 'acs_url': 'http://127.0.0.1:9000/sp/acs/',\n },\n },\n ]\n\n def setup_method(self, method):\n self.idp = IdentityProvider(self.SP_CONFIG)\n self.app = create_test_app(self.idp)\n self.client = self.app.test_client()\n self.context = self.app.app_context()\n self.context.push()\n\n def teardown_method(self, method):\n self.context.pop()\n\n def add_user(self, user):\n self.idp.users.append(user)\n\n def login(self, user):\n self.idp.add_user(user)\n with self.client.session_transaction() as session:\n session['user'] = user.username\n\n def hit_saml_view(self, url, **kwargs):\n \"\"\"\n Logs in the test user, then hits a view. Returns a :class:`SamlView`.\n \"\"\"\n self.login(self.user)\n response = self.client.get(url, **kwargs, follow_redirects=True)\n\n assert response.status_code == 200\n\n html = response.data.decode('utf-8')\n soup = BeautifulSoup(html, \"html5lib\")\n\n form = soup.find('form')\n form_action = form['action']\n\n inputtag = form.find('input', {'name': 'SAMLResponse'})\n encoded_response = inputtag['value']\n saml = codex.base64.b64decode(encoded_response)\n saml_soup = BeautifulSoup(saml, \"lxml-xml\")\n\n return SamlView(\n html=html, html_soup=soup,\n saml=saml, saml_soup=saml_soup,\n form_action=form_action)\n\n\nclass BaseSPHandlerTests(SamlTestCase):\n \"\"\"\n Sub-classes must provide these class properties:\n SP_CONFIG = ServicePoint metadata settings to use.\n REQUEST_DATA = dictionary containing 'SAMLRequest' and 'RelayState' keys.\n \"\"\"\n\n user = User('fred', '<EMAIL>')\n\n def setup_method(self, method):\n super().setup_method(method)\n self.login_begin_url = url_for('flask_saml2_idp.login_begin')\n self.login_process_url = url_for('flask_saml2_idp.login_process')\n\n def test_redirected(self):\n response = self.client.get(\n self.login_begin_url, query_string=self.REQUEST_DATA)\n assert response.status_code == 302\n assert response.headers['Location'] == self.login_process_url\n\n def test_authnrequest_handled(self):\n self.login(self.user)\n with self.client.session_transaction() as sess:\n sess.update(self.REQUEST_DATA)\n response = self.hit_saml_view(self.login_process_url)\n\n assert response.form_action == self.ACS_URL\n\n def test_user_logged_in(self):\n response = self.hit_saml_view(\n self.login_begin_url, query_string=self.REQUEST_DATA)\n assert self.EMAIL in response.saml.decode('utf-8')\n", "id": "12171254", "language": "Python", "matching_score": 7.5033135414123535, "max_stars_count": 61, "path": "tests/idp/base.py" }, { "content": "from pathlib import Path\n\nimport attr\nfrom flask import Flask\n\nfrom flask_saml2.sp import ServiceProvider\nfrom flask_saml2.utils import certificate_from_file, private_key_from_file\n\nKEY_DIR = Path(__file__).parent.parent / 'keys' / 'sample'\nCERTIFICATE_FILE = KEY_DIR / 'sp-certificate.pem'\nPRIVATE_KEY_FILE = KEY_DIR / 'sp-private-key.pem'\n\nCERTIFICATE = certificate_from_file(CERTIFICATE_FILE)\nPRIVATE_KEY = private_key_from_file(PRIVATE_KEY_FILE)\n\n\n@attr.s\nclass User:\n username = attr.ib()\n email = attr.ib()\n\n\ndef create_test_app(sp: ServiceProvider):\n app = Flask(__name__)\n\n app.config['SERVER_NAME'] = 'sp.example.com'\n app.debug = True\n app.testing = True\n\n app.secret_key = 'not a secret'\n\n app.register_blueprint(sp.create_blueprint())\n\n return app\n\n\nclass ServiceProvider(ServiceProvider):\n\n def __init__(self, identity_providers, **kwargs):\n super().__init__(**kwargs)\n self.identity_providers = identity_providers\n\n def get_sp_config(self):\n return {\n 'certificate': CERTIFICATE,\n 'private_key': PRIVATE_KEY,\n }\n\n def get_identity_providers(self):\n return self.identity_providers\n\n\nclass SamlTestCase:\n \"\"\"\n Sub-classes must provide these class properties:\n IDP_CONFIG = IdentityProvider settings to use.\n \"\"\"\n BAD_VALUE = '!BAD VALUE!'\n USERNAME = 'fred'\n PASSWORD = '<PASSWORD>'\n EMAIL = '<EMAIL>'\n\n IDP_CONFIG = [\n {\n 'CLASS': 'flask_saml2.sp.idphandler.IdPHandler',\n 'OPTIONS': {\n 'display_name': 'My Identity Provider',\n 'entity_id': 'https://idp.example.com/saml/metadata.xml',\n 'sso_url': 'https://idp.example.com/saml/login/',\n 'slo_url': 'https://idp.example.com/saml/logout/',\n 'certificate': CERTIFICATE,\n },\n },\n ]\n\n def setup_method(self, method):\n self.sp = ServiceProvider(self.IDP_CONFIG)\n self.app = create_test_app(self.sp)\n self.client = self.app.test_client()\n self.context = self.app.app_context()\n self.context.push()\n\n def teardown_method(self, method):\n self.context.pop()\n", "id": "9522560", "language": "Python", "matching_score": 6.0771803855896, "max_stars_count": 61, "path": "tests/sp/base.py" }, { "content": "import urllib.parse\nfrom pathlib import Path\n\nimport attr\nimport flask\nfrom flask import Flask, abort, redirect\n\nimport flask_saml2.idp\nimport flask_saml2.sp\nfrom flask_saml2.utils import certificate_from_file, private_key_from_file\n\nKEY_DIR = Path(__file__).parent.parent / 'keys' / 'sample'\n\nIDP_CERTIFICATE = certificate_from_file(KEY_DIR / 'idp-certificate.pem')\nIDP_PRIVATE_KEY = private_key_from_file(KEY_DIR / 'idp-private-key.pem')\nSP_CERTIFICATE = certificate_from_file(KEY_DIR / 'sp-certificate.pem')\nSP_PRIVATE_KEY = private_key_from_file(KEY_DIR / 'sp-private-key.pem')\n\n\n@attr.s\nclass User:\n username = attr.ib()\n email = attr.ib()\n\n\nclass ServiceProvider(flask_saml2.sp.ServiceProvider):\n\n def __init__(self, identity_providers, **kwargs):\n super().__init__(**kwargs)\n self.identity_providers = identity_providers\n\n def get_sp_config(self):\n return {\n 'certificate': SP_CERTIFICATE,\n 'private_key': SP_PRIVATE_KEY,\n }\n\n def get_identity_providers(self):\n return self.identity_providers\n\n\nclass IdentityProvider(flask_saml2.idp.IdentityProvider):\n\n login_url = 'http://idp.example.com/login/'\n\n def __init__(self, service_providers, users=None, **kwargs):\n super().__init__(**kwargs)\n self.service_providers = service_providers\n self.users = {}\n if users is not None:\n for user in users:\n self.add_user(user)\n\n def get_idp_config(self):\n return {\n 'autosubmit': True,\n 'certificate': IDP_CERTIFICATE,\n 'private_key': IDP_PRIVATE_KEY,\n }\n\n def add_user(self, user):\n self.users[user.username] = user\n\n def get_service_providers(self):\n return self.service_providers\n\n def login_required(self):\n if not self.is_user_logged_in():\n next = urllib.parse.urlencode({'next': flask.request.url})\n abort(redirect(self.login_url + '?' + next))\n\n def is_user_logged_in(self):\n if 'user' not in flask.session:\n return False\n\n if flask.session['user'] not in self.users:\n return False\n\n return True\n\n def logout(self):\n del flask.session['user']\n\n def get_current_user(self):\n return self.users[flask.session['user']]\n\n def is_valid_redirect(self, url):\n url = urllib.parse.urlparse(url)\n return url.scheme == 'http' and url.netloc == 'saml.serviceprovid.er'\n\n\nclass SamlTestCase:\n \"\"\"\n Sub-classes must provide these class properties:\n IDP_CONFIG = IdentityProvider settings to use.\n \"\"\"\n IDP_CONFIG = [\n {\n 'CLASS': 'flask_saml2.sp.idphandler.IdPHandler',\n 'OPTIONS': {\n 'display_name': 'My Identity Provider',\n 'entity_id': 'http://idp.example.com/saml/metadata.xml',\n 'sso_url': 'http://idp.example.com/saml/login/',\n 'slo_url': 'http://idp.example.com/saml/logout/',\n 'certificate': IDP_CERTIFICATE,\n },\n }\n ]\n\n SP_CONFIG = [\n {\n 'CLASS': 'flask_saml2.idp.SPHandler',\n 'OPTIONS': {\n 'entity_id': 'http://sp.example.com/saml/metadata.xml',\n 'acs_url': 'http://sp.example.com/saml/acs/',\n 'certificate': SP_CERTIFICATE,\n },\n }\n ]\n\n def create_sp_app(self, sp: flask_saml2.sp.ServiceProvider):\n app = Flask(__name__)\n\n app.config['SERVER_NAME'] = 'sp.example.com'\n app.debug = True\n app.testing = True\n\n app.secret_key = 'not a secret'\n\n app.register_blueprint(sp.create_blueprint(), url_prefix='/saml/')\n\n return app\n\n def create_idp_app(self, idp: flask_saml2.idp.IdentityProvider):\n app = Flask(__name__)\n\n app.config['SERVER_NAME'] = 'idp.example.com'\n app.debug = True\n app.testing = True\n\n app.secret_key = 'not a secret'\n\n app.register_blueprint(idp.create_blueprint(), url_prefix='/saml/')\n\n return app\n\n def setup_method(self, method):\n self.sp = ServiceProvider(self.IDP_CONFIG)\n self.sp_app = self.create_sp_app(self.sp)\n\n self.idp = IdentityProvider(self.SP_CONFIG)\n self.idp_app = self.create_idp_app(self.idp)\n\n self.sp_client = self.sp_app.test_client()\n self.idp_client = self.idp_app.test_client()\n\n def login(self, user: User):\n if user.username not in self.idp.users:\n self.idp.add_user(user)\n with self.idp_client.session_transaction() as sess:\n sess['user'] = user.username\n", "id": "1385010", "language": "Python", "matching_score": 4.590262413024902, "max_stars_count": 61, "path": "tests/combined/base.py" }, { "content": "#!/usr/bin/env python3\nimport logging\n\nfrom flask import Flask, abort, redirect, request, session, url_for\nfrom flask.views import MethodView\n\nfrom flask_saml2.idp import IdentityProvider\nfrom tests.idp.base import CERTIFICATE, PRIVATE_KEY, User\nfrom tests.sp.base import CERTIFICATE as SP_CERTIFICATE\n\nlogger = logging.getLogger(__name__)\n\n\nclass ExampleIdentityProvider(IdentityProvider):\n def login_required(self):\n if not self.is_user_logged_in():\n next = url_for('login', next=request.url)\n\n abort(redirect(next))\n\n def is_user_logged_in(self):\n return 'user' in session and session['user'] in users\n\n def logout(self):\n del session['user']\n\n def get_current_user(self):\n return users[session['user']]\n\n\nusers = {user.username: user for user in [\n User('alex', '<EMAIL>'),\n User('jordan', '<EMAIL>'),\n]}\n\n\nidp = ExampleIdentityProvider()\n\n\nclass Login(MethodView):\n def get(self):\n options = ''.join(f'<option value=\"{user.username}\">{user.email}</option>'\n for user in users.values())\n select = f'<div><label>Select a user: <select name=\"user\">{options}</select></label></div>'\n\n next_url = request.args.get('next')\n next = f'<input type=\"hidden\" name=\"next\" value=\"{next_url}\">'\n\n submit = '<div><input type=\"submit\" value=\"Login\"></div>'\n\n form = f'<form action=\".\" method=\"post\">{select}{next}{submit}</form>'\n header = '<title>Login</title><p>Please log in to continue.</p>'\n\n return header + form\n\n def post(self):\n user = request.form['user']\n next = request.form['next']\n\n session['user'] = user\n logging.info(\"Logged user\", user, \"in\")\n logging.info(\"Redirecting to\", next)\n\n return redirect(next)\n\n\napp = Flask(__name__)\napp.debug = True\napp.secret_key = 'not a secret'\napp.config['SERVER_NAME'] = 'localhost:8000'\napp.config['SAML2_IDP'] = {\n 'autosubmit': True,\n 'certificate': CERTIFICATE,\n 'private_key': PRIVATE_KEY,\n}\napp.config['SAML2_SERVICE_PROVIDERS'] = [\n {\n 'CLASS': 'tests.idp.base.AttributeSPHandler',\n 'OPTIONS': {\n 'display_name': 'Example Service Provider',\n 'entity_id': 'http://localhost:9000/saml/metadata.xml',\n 'acs_url': 'http://localhost:9000/saml/acs/',\n 'certificate': SP_CERTIFICATE,\n },\n }\n]\n\napp.add_url_rule('/login/', view_func=Login.as_view('login'))\napp.register_blueprint(idp.create_blueprint(), url_prefix='/saml/')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8000)\n", "id": "4535540", "language": "Python", "matching_score": 3.4696598052978516, "max_stars_count": 61, "path": "examples/idp.py" }, { "content": "from flask import abort, current_app\nfrom flask_login import current_user, logout_user\n\nfrom flask_saml2.idp import IdentityProvider\n\n\nclass FlaskLoginIdentityProvider(IdentityProvider):\n def login_required(self):\n if not current_user.is_authenticated:\n raise abort(current_app.login_manager.unauthorized())\n\n def logout(self):\n logout_user()\n\n def get_current_user(self):\n return current_user\n", "id": "11997663", "language": "Python", "matching_score": 0.6841800212860107, "max_stars_count": 61, "path": "flask_saml2/idp/ext/flask_login.py" }, { "content": "\"\"\"\nTests for basic view functionality only.\n\nNOTE: These classes do not test anything SAML-related.\nTesting actual SAML functionality requires implementation-specific details,\nwhich should be put in another test module.\n\"\"\"\nimport pytest\nfrom flask import url_for\nfrom lxml import etree\nfrom werkzeug.exceptions import BadRequestKeyError\n\nfrom flask_saml2.utils import certificate_to_string\nfrom flask_saml2.xml_templates import NAMESPACE_MAP\n\nfrom .base import CERTIFICATE, SamlTestCase, User\n\nSAML_REQUEST = 'this is not a real SAML Request'\nRELAY_STATE = 'abcdefghi0123456789'\nREQUEST_DATA = {\n 'SAMLRequest': SAML_REQUEST,\n 'RelayState': RELAY_STATE,\n}\n\n\nclass TestLoginView(SamlTestCase):\n def setup_method(self, method):\n super().setup_method(method)\n self.login_begin_url = url_for('flask_saml2_idp.login_begin', _external=True)\n self.login_process_url = url_for('flask_saml2_idp.login_process', _external=True)\n\n def test_empty_get(self):\n \"\"\"GET request without SAMLResponse data should fail.\"\"\"\n with pytest.raises(BadRequestKeyError):\n self.client.get(self.login_begin_url)\n\n def test_empty_post(self):\n \"\"\"POST request without SAMLResponse data should fail.\"\"\"\n with pytest.raises(BadRequestKeyError):\n self.client.post(self.login_begin_url)\n\n def _test_pre_redirect(self):\n with self.client.session_transaction() as session:\n assert 'SAMLRequest' not in session\n assert 'RelayState' not in session\n\n def _test_redirect(self, response, status_code=302):\n assert response.status_code == status_code\n assert response.headers['location'] == self.login_process_url\n\n with self.client.session_transaction() as session:\n assert session['SAMLRequest'] == SAML_REQUEST\n assert session['RelayState'] == RELAY_STATE\n\n def test_get(self):\n \"\"\"\n GET did not redirect to process URL.\n \"\"\"\n self._test_pre_redirect()\n response = self.client.get(self.login_begin_url, query_string=REQUEST_DATA)\n self._test_redirect(response)\n\n def test_post(self):\n \"\"\"\n POST did not redirect to process URL.\n \"\"\"\n self._test_pre_redirect()\n response = self.client.post(self.login_begin_url, data=REQUEST_DATA)\n self._test_redirect(response)\n\n\nclass TestLoginProcessView(SamlTestCase):\n def test_process_request_not_authorized(self):\n \"\"\"Bogus request should have triggered exception.\"\"\"\n self.login(User('jordan', '<EMAIL>'))\n\n with self.client.session_transaction() as session:\n session['RelayState'] = RELAY_STATE\n session['SAMLRequest'] = SAML_REQUEST\n\n response = self.client.get(url_for('flask_saml2_idp.login_process'))\n assert response.status_code == 400\n\n\nclass TestLogoutView(SamlTestCase):\n def test_logout(self):\n \"\"\"\n Response did not say logged out.\n \"\"\"\n self.login(User(username='alex', email='<EMAIL>'))\n\n with self.client.session_transaction() as session:\n assert 'user' in session\n\n response = self.client.get(url_for('flask_saml2_idp.logout'))\n\n assert response.status_code == 200\n assert 'logged out' in response.data.decode('utf-8')\n\n with self.client.session_transaction() as session:\n assert 'user' not in session\n\n def test_logout_redirect(self):\n self.login(User(username='alex', email='<EMAIL>'))\n\n redirect_url = 'https://saml.serviceprovid.er/somewhere/'\n response = self.client.get(\n url_for('flask_saml2_idp.logout'),\n query_string={'redirect_to': redirect_url})\n\n assert response.status_code == 302\n assert response.headers['Location'] == redirect_url\n\n def test_logout_redirect_with_invalid_url_fails(self):\n self.login(User(username='alex', email='<EMAIL>'))\n\n redirect_url = '://saml.serviceprovid.er/somewhere/'\n response = self.client.get(\n url_for('flask_saml2_idp.logout'),\n query_string={'redirect_to': redirect_url})\n\n assert response.status_code == 200\n assert 'logged out' in response.data.decode('utf-8')\n\n\nclass TestMetadataView(SamlTestCase):\n def test_rendering_metadata_view(self):\n xpath = lambda el, path: el.xpath(path, namespaces=NAMESPACE_MAP)[0]\n\n response = self.client.get(url_for('flask_saml2_idp.metadata'))\n response_xml = etree.fromstring(response.data.decode('utf-8'))\n\n certificate = certificate_to_string(CERTIFICATE)\n login_url = url_for('flask_saml2_idp.login_begin', _external=True)\n logout_url = url_for('flask_saml2_idp.logout', _external=True)\n\n idp = xpath(response_xml, '/md:EntityDescriptor/md:IDPSSODescriptor')\n enc_key = xpath(idp, 'md:KeyDescriptor[@use=\"encryption\"]')\n sign_key = xpath(idp, 'md:KeyDescriptor[@use=\"signing\"]')\n\n assert certificate == xpath(enc_key, './/ds:X509Certificate').text\n assert certificate == xpath(sign_key, './/ds:X509Certificate').text\n\n assert login_url == xpath(idp, 'md:SingleSignOnService').get('Location')\n assert logout_url == xpath(idp, 'md:SingleLogoutService').get('Location')\n", "id": "6517345", "language": "Python", "matching_score": 5.917322158813477, "max_stars_count": 61, "path": "tests/idp/test_views.py" }, { "content": "\"\"\"\nTests for basic view functionality only.\n\nNOTE: These classes do not test anything SAML-related.\nTesting actual SAML functionality requires implementation-specific details,\nwhich should be put in another test module.\n\"\"\"\nfrom flask import url_for\nfrom lxml import etree\n\nfrom flask_saml2.utils import certificate_to_string\nfrom flask_saml2.xml_templates import NAMESPACE_MAP\n\nfrom .base import CERTIFICATE, SamlTestCase\n\nSAML_REQUEST = 'this is not a real SAML Request'\nRELAY_STATE = 'abcdefghi0123456789'\nREQUEST_DATA = {\n 'SAMLRequest': SAML_REQUEST,\n 'RelayState': RELAY_STATE,\n}\n\nxpath = lambda el, path: el.xpath(path, namespaces=NAMESPACE_MAP)[0]\n\n\nclass TestLogin(SamlTestCase):\n IDP_CONFIG = [\n {\n 'CLASS': 'flask_saml2.sp.idphandler.IdPHandler',\n 'OPTIONS': {\n 'display_name': 'My Identity Provider',\n 'entity_id': 'https://foo.idp.example.com/saml/metadata.xml',\n 'sso_url': 'https://foo.idp.example.com/saml/login/',\n 'slo_url': 'https://foo.idp.example.com/saml/logout/',\n 'certificate': CERTIFICATE,\n },\n },\n {\n 'CLASS': 'flask_saml2.sp.idphandler.IdPHandler',\n 'OPTIONS': {\n 'display_name': 'My Identity Provider',\n 'entity_id': 'https://bar.idp.example.com/saml/metadata.xml',\n 'sso_url': 'https://bar.idp.example.com/saml/login/',\n 'slo_url': 'https://bar.idp.example.com/saml/logout/',\n 'certificate': CERTIFICATE,\n },\n },\n ]\n\n def test_login(self):\n response = self.client.get(url_for('flask_saml2_sp.login'))\n foo_url = url_for(\n 'flask_saml2_sp.login_idp',\n entity_id='https://foo.idp.example.com/saml/metadata.xml',\n _external=False)\n bar_url = url_for(\n 'flask_saml2_sp.login_idp',\n entity_id='https://bar.idp.example.com/saml/metadata.xml',\n _external=False)\n\n body = response.data.decode('utf-8')\n assert foo_url in body\n assert bar_url in body\n\n def test_login_idp(self):\n response = self.client.get(url_for(\n 'flask_saml2_sp.login_idp',\n entity_id='https://foo.idp.example.com/saml/metadata.xml'))\n assert response.status_code == 302\n assert response.headers['Location'].startswith('https://foo.idp.example.com/saml/login/')\n\n\nclass TestLoginSingleIdP(SamlTestCase):\n def test_login(self):\n response = self.client.get(url_for('flask_saml2_sp.login'))\n login_url = url_for(\n 'flask_saml2_sp.login_idp',\n entity_id='https://idp.example.com/saml/metadata.xml',\n _external=True)\n assert response.status_code == 302\n assert response.headers['Location'] == login_url\n\n\nclass TestMetadataView(SamlTestCase):\n def test_rendering_metadata_view(self):\n xpath = lambda el, path: el.xpath(path, namespaces=NAMESPACE_MAP)[0]\n\n response = self.client.get(url_for('flask_saml2_sp.metadata'))\n response_xml = etree.fromstring(response.data.decode('utf-8'))\n\n certificate = certificate_to_string(CERTIFICATE)\n\n sp = xpath(response_xml, '/md:EntityDescriptor/md:SPSSODescriptor')\n enc_key = xpath(sp, './md:KeyDescriptor[@use=\"encryption\"]')\n sign_key = xpath(sp, './md:KeyDescriptor[@use=\"signing\"]')\n\n assert certificate == xpath(enc_key, './/ds:X509Certificate').text\n assert certificate == xpath(sign_key, './/ds:X509Certificate').text\n\n acs_url = url_for('flask_saml2_sp.acs', _external=True)\n slo_url = url_for('flask_saml2_sp.sls', _external=True)\n assert acs_url == xpath(sp, './md:AssertionConsumerService').attrib['Location']\n assert slo_url == xpath(sp, './md:SingleLogoutService').attrib['Location']\n", "id": "2466028", "language": "Python", "matching_score": 2.89349365234375, "max_stars_count": 61, "path": "tests/sp/test_views.py" }, { "content": "\"\"\"\nTests for the Google Apps service provider.\n\"\"\"\nimport lxml.etree\n\nfrom flask_saml2 import codex\n\nfrom . import base\n\nSAML_REQUEST = codex.deflate_and_base64_encode(\n '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\n '<samlp:AuthnRequest xmlns:samlp=\"urn:oasis:names:tc:SAML:2.0:protocol\" '\n 'ID=\"doljiidhacjcjifebimhedigpeejhpifpdmlbjai\" Version=\"2.0\" '\n 'IssueInstant=\"2011-10-05T17:49:29Z\" '\n 'ProtocolBinding=\"urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST\" '\n 'ProviderName=\"google.com\" IsPassive=\"false\" '\n 'AssertionConsumerServiceURL=\"https://www.google.com/a/example.com/acs\">'\n '<saml:Issuer xmlns:saml=\"urn:oasis:names:tc:SAML:2.0:assertion\">'\n 'google.com</saml:Issuer>'\n '<samlp:NameIDPolicy AllowCreate=\"true\" '\n 'Format=\"urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified\" />'\n '</samlp:AuthnRequest>'\n)\nRELAY_STATE = (\n 'https://www.google.com/a/example.com/ServiceLogin'\n '?service=writely&passive=true'\n '&continue=https%3A%2F%2Fdocs.google.com%2Fa%2Fexample.com%2F'\n '&followup=https%3A%2F%2Fdocs.google.com%2Fa%2Fexample.com%2F'\n '&ltmpl=homepage'\n)\nGOOGLE_APPS_ACS = 'https://www.google.com/a/example.com/acs'\n\n\nclass TestGoogleAppsSPHandler(base.BaseSPHandlerTests):\n ACS_URL = GOOGLE_APPS_ACS\n\n SP_CONFIG = [{\n 'CLASS': 'flask_saml2.idp.sp.google_apps.GoogleAppsSPHandler',\n 'OPTIONS': {\n 'entity_id': 'google.com',\n 'acs_url': GOOGLE_APPS_ACS,\n },\n }]\n\n REQUEST_DATA = {\n 'SAMLRequest': SAML_REQUEST.decode('utf-8'),\n 'RelayState': RELAY_STATE,\n }\n\n BAD_ACS_URLS = [\n 'https://example.com/',\n 'https://malicious.com/www.google.com/a/example.com/acs/',\n ]\n\n def test_authnrequest_bad_acs_url(self):\n for new_acs_url in self.BAD_ACS_URLS:\n self.login(self.user)\n\n original_request = self.REQUEST_DATA['SAMLRequest']\n xml = lxml.etree.fromstring(codex.decode_saml_xml(original_request))\n xml.set('AssertionConsumerServiceURL', new_acs_url)\n new_request = codex.deflate_and_base64_encode(base.c14n(xml)).decode('utf-8')\n\n with self.client.session_transaction() as sess:\n sess.update({\n **self.REQUEST_DATA,\n 'SAMLRequest': new_request,\n })\n\n response = self.client.get(self.login_process_url)\n assert response.status_code == 400\n", "id": "885880", "language": "Python", "matching_score": 4.120004177093506, "max_stars_count": 61, "path": "tests/idp/test_google_apps.py" }, { "content": "\"\"\"\nXML templates for SAML 2.0 SP\n\"\"\"\nfrom flask_saml2.xml_templates import NameIDTemplate, XmlTemplate\n\n\nclass AuthnRequest(XmlTemplate):\n namespace = 'samlp'\n\n def get_issuer(self):\n namespace = self.get_namespace_map()['saml']\n return self.element('Issuer', namespace=namespace, text=self.params['ISSUER'])\n\n def generate_xml(self):\n return self.element('AuthnRequest', attrs={\n 'ID': self.params['REQUEST_ID'],\n 'Version': '2.0',\n 'IssueInstant': self.params['ISSUE_INSTANT'],\n 'Destination': self.params['DESTINATION'],\n 'ProtocolBinding': 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST',\n 'AssertionConsumerServiceURL': self.params['ACS_URL'],\n }, children=[\n self.get_issuer(),\n ])\n\n \"\"\"\n <samlp:AuthnRequest\n xmlns:samlp=\"urn:oasis:names:tc:SAML:2.0:protocol\"\n xmlns:saml=\"urn:oasis:names:tc:SAML:2.0:assertion\"\n ID=\"${ID}\"\n Version=\"2.0\"${PROVIDER_NAME}\n IssueInstant=\"${ISSUE_INSTANT}\"\n Destination=\"${DESTINATION}\"\n ProtocolBinding=\"urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST\"\n AssertionConsumerServiceURL=\"${ACS_URL}\">\n <saml:Issuer>${ENTITY_ID}</saml:Issuer>\n </samlp:AuthnRequest>\n \"\"\"\n\n\nclass LogoutRequest(XmlTemplate):\n namespace = 'samlp'\n\n def get_issuer(self):\n namespace = self.get_namespace_map()['saml']\n return self.element('Issuer', namespace=namespace, text=self.params['ISSUER'])\n\n def get_nameid(self):\n return NameIDTemplate(self.params).xml\n\n def get_session_index(self):\n return None\n\n def generate_xml(self):\n return self.element('LogoutRequest', attrs={\n 'ID': self.params['REQUEST_ID'],\n 'Version': '2.0',\n 'IssueInstant': self.params['ISSUE_INSTANT'],\n 'Destination': self.params['DESTINATION'],\n }, children=[\n self.get_issuer(),\n self.get_nameid(),\n self.get_session_index(),\n ])\n\n \"\"\"\n <samlp:LogoutRequest\n xmlns:samlp=\"urn:oasis:names:tc:SAML:2.0:protocol\"\n xmlns:saml=\"urn:oasis:names:tc:SAML:2.0:assertion\"\n ID=\"${ID}\"\n Version=\"2.0\"\n IssueInstant=\"${ISSUE_INSTANT}\"\n Destination=\"${SINGLE_LOGOUT_URL}\">\n <saml:Issuer>${ENTITY_ID}</saml:Issuer>\n ${NAME_ID}\n ${SESSION_INDEX}\n </samlp:LogoutRequest>\n \"\"\"\n", "id": "8728544", "language": "Python", "matching_score": 2.846967935562134, "max_stars_count": 61, "path": "flask_saml2/sp/xml_templates.py" }, { "content": "from typing import Iterable, Mapping, Optional\n\nfrom lxml import etree\n\nfrom flask_saml2.types import XmlNode\nfrom flask_saml2.utils import cached_property\n\nNAMESPACE_MAP: Mapping[str, str] = { # Namespace map\n 'samlp': 'urn:oasis:names:tc:SAML:2.0:protocol',\n 'saml': 'urn:oasis:names:tc:SAML:2.0:assertion',\n 'md': 'urn:oasis:names:tc:SAML:2.0:metadata',\n 'ds': 'http://www.w3.org/2000/09/xmldsig#',\n}\n\n\nclass XmlTemplate:\n \"\"\"Base XML template class.\n A template can represent a single node, a tree, or a whole XML document.\n \"\"\"\n\n #: XML namespace for this node or document\n namespace = None\n\n def __init__(self, params: dict = {}):\n \"\"\"Initialize this template using the supplied parameters dict.\n The parameters will be used in :meth:`generate_xml`.\n \"\"\"\n self.params = params.copy()\n\n @cached_property\n def xml(self) -> XmlNode:\n \"\"\"The XML node this template constructed.\n Generated using :meth:`generate_xml`.\n \"\"\"\n return self.generate_xml()\n\n def generate_xml(self) -> XmlNode:\n \"\"\"Generate the XML node for this template.\n Generally accessed through :attr:`xml`.\n \"\"\"\n raise NotImplementedError\n\n def get_xml_string(self) -> str:\n \"\"\"Render the XML node to a string.\n The string representation is rendered as canonical c14n XML,\n to make verification and signing possible.\n \"\"\"\n return etree.tostring(self.xml, method='c14n', exclusive=True).decode('utf-8')\n\n def element(\n self,\n tag: str,\n *,\n namespace: Optional[str] = None,\n attrs: Optional[Mapping[str, Optional[str]]] = None,\n children: Optional[Iterable[Optional[XmlNode]]] = None,\n text: Optional[str] = None\n ) -> XmlNode:\n \"\"\"\n Shortcut for creating an ElementTree Element, with optional attributes,\n children, and text.\n\n :param tag str: tag to give XML element\n :param namespace str: Namespace to use for the element. Defaults to\n :meth:`get_namespace()` if None.\n :param attrs dict: Element attributes. If an attribute value is None,\n the attribute is ignored.\n :param children list: Element children. If an item in children is None,\n the item is ignored.\n :param text str: Element text content, if any.\n :return: xml.etree.ElementTree.Element\n \"\"\"\n if namespace is None:\n namespace = self.get_namespace()\n\n tag_name = f'{{{namespace}}}{tag}'\n element = etree.Element(tag_name, nsmap=self.get_namespace_map())\n\n if attrs is not None:\n for k, v in attrs.items():\n if v is not None:\n element.set(k, v)\n\n if children is not None:\n for child in children:\n if child is not None:\n element.append(child)\n\n if text is not None:\n element.text = text\n\n return element\n\n def get_namespace_map(self) -> Mapping[str, str]:\n \"\"\"Get all the namespaces potentially used by this node, as a etree nsmap.\n \"\"\"\n return NAMESPACE_MAP\n\n def get_namespace(self) -> str:\n \"\"\"Get the namespace URI for this node.\n Looks up the namespace alias :attr:`namespace`\n in :meth:`get_namespace_map`.\n \"\"\"\n return self.get_namespace_map()[self.namespace]\n\n\nclass NameIDTemplate(XmlTemplate):\n \"\"\"\n A ``<NameID>`` node, such as:\n\n .. code-block:: xml\n\n <NameID Format=\"${SUBJECT_FORMAT}\" SPNameQualifier=\"${SP_NAME_QUALIFIER}\">\n ${SUBJECT}\n </NameID>\n \"\"\"\n namespace = 'saml'\n\n def generate_xml(self):\n return self.element('NameID', attrs={\n 'Format': self.params['SUBJECT_FORMAT'],\n 'SPNameQualifier': self.params.get('SP_NAME_QUALIFIER'),\n }, text=self.params['SUBJECT'])\n", "id": "9605193", "language": "Python", "matching_score": 2.6867876052856445, "max_stars_count": 61, "path": "flask_saml2/xml_templates.py" }, { "content": "from typing import Optional\n\nfrom flask_saml2.types import XmlNode\nfrom flask_saml2.utils import cached_property\nfrom flask_saml2.xml_parser import XmlParser\n\n\nclass AuthnRequestParser(XmlParser):\n \"\"\"Parses an incoming ``<AuthnRequest>``\n and provides shortcuts to access common attributes.\"\"\"\n\n def is_signed(self) -> bool:\n \"\"\"Is the ``<AuthnRequest>`` signed?\"\"\"\n return bool(self._xpath_xml_tree('/samlp:AuthnRequest/ds:Signature'))\n\n @cached_property\n def issuer(self) -> str:\n \"\"\"The content of the ``<Issuer>`` element.\"\"\"\n return self._xpath_xml_tree('/samlp:AuthnRequest/saml:Issuer')[0].text\n\n @cached_property\n def request_id(self) -> str:\n \"\"\"The ``<AuthnRequest>`` ID attribute.\"\"\"\n return self._xpath_xml_tree('/samlp:AuthnRequest/@ID')[0]\n\n @cached_property\n def destination(self) -> str:\n \"\"\"The ``<AuthnRequest>`` Destination attribute, if it has one.\"\"\"\n try:\n return self._xpath_xml_tree('/samlp:AuthnRequest/@Destination')[0]\n except IndexError:\n return ''\n\n @cached_property\n def acs_url(self) -> str:\n \"\"\"The AssertionConsumerServiceURL attribute.\"\"\"\n return self._xpath_xml_tree('/samlp:AuthnRequest/@AssertionConsumerServiceURL')[0]\n\n @cached_property\n def provider_name(self) -> str:\n \"\"\"The ProviderName attribute, if it exists.\"\"\"\n try:\n return self._xpath_xml_tree('/samlp:AuthnRequest/@ProviderName')[0]\n except IndexError:\n return ''\n\n @cached_property\n def version(self) -> str:\n \"\"\"The Version attribute.\"\"\"\n return self._xpath_xml_tree('/samlp:AuthnRequest/@Version')[0]\n\n @cached_property\n def issue_instant(self) -> str:\n \"\"\"The IssueInstant attribute.\"\"\"\n return self._xpath_xml_tree('/samlp:AuthnRequest/@IssueInstant')[0]\n\n @cached_property\n def protocol_binding(self) -> str:\n \"\"\"The ProtocolBinding attribute.\"\"\"\n return self._xpath_xml_tree('/samlp:AuthnRequest/@ProtocolBinding')[0]\n\n\nclass LogoutRequestParser(XmlParser):\n\n def is_signed(self):\n return bool(self._xpath_xml_tree('/samlp:LogoutRequest/ds:Signature'))\n\n @cached_property\n def issuer(self) -> str:\n return self._xpath_xml_tree('/samlp:LogoutRequest/saml:Issuer')[0].text\n\n @cached_property\n def request_id(self) -> str:\n return self._xpath_xml_tree('/samlp:LogoutRequest/@ID')[0]\n\n @cached_property\n def destination(self) -> Optional[str]:\n try:\n return self._xpath_xml_tree('/samlp:LogoutRequest/@Destination')[0]\n except IndexError:\n return None\n\n @cached_property\n def version(self) -> str:\n return self._xpath_xml_tree('/samlp:LogoutRequest/@Version')[0]\n\n @cached_property\n def issue_instant(self) -> str:\n return self._xpath_xml_tree('/samlp:LogoutRequest/@IssueInstant')[0]\n\n @cached_property\n def nameid_el(self) -> XmlNode:\n return self._xpath_xml_tree('/samlp:LogoutRequest/saml:NameID')[0]\n\n @cached_property\n def nameid(self) -> XmlNode:\n return self.nameid_el.text\n\n @cached_property\n def nameid_format(self) -> XmlNode:\n return self._xpath(self.nameid_el, '@Format')[0]\n", "id": "5303622", "language": "Python", "matching_score": 1.1019365787506104, "max_stars_count": 61, "path": "flask_saml2/idp/parser.py" }, { "content": "import datetime\n\nimport bs4\nimport flask\nimport freezegun\nimport pytest\nfrom flask import url_for\n\nfrom flask_saml2.exceptions import CannotHandleAssertion\nfrom flask_saml2.utils import utcnow\n\nfrom .base import SamlTestCase, User\n\n\nclass TestEndToEnd(SamlTestCase):\n \"\"\"\n Test the SP and IdP as a user/browser, going through the whole login\n process, following the redirects, submitting the forms, etc.\n \"\"\"\n def test_end_to_end(self):\n # Pretend we want to access this protected page\n login_next = 'http://sp.example.com/dashboard'\n\n with self.sp_app.app_context():\n # We go here to log in\n sp_login_url = url_for('flask_saml2_sp.login', next=login_next)\n response = self.sp_client.get(sp_login_url)\n\n # We should be redirected to the specific IdP login URL\n sp_login_idp_url = url_for(\n 'flask_saml2_sp.login_idp',\n entity_id='http://idp.example.com/saml/metadata.xml',\n next=login_next, _external=True)\n assert response.status_code == 302\n assert response.headers['Location'] == sp_login_idp_url\n\n # Lets fetch that...\n response = self.sp_client.get(sp_login_idp_url)\n\n with self.idp_app.app_context():\n # Which should send us to the IdP\n idp_login_url = response.headers['Location']\n assert idp_login_url.startswith(\n url_for('flask_saml2_idp.login_begin', _external=True))\n\n # Which bounces us through the hoops\n response = self.idp_client.get(idp_login_url)\n assert response.status_code == 302\n assert response.headers['Location'] \\\n == url_for('flask_saml2_idp.login_process', _external=True)\n\n process_url = response.headers['Location']\n response = self.idp_client.get(process_url)\n\n # Seems we need to log in!\n assert response.status_code == 302\n assert response.headers['Location'].startswith(self.idp.login_url)\n\n # Lets create a user and login as them\n user = User('alex', '<EMAIL>')\n self.login(user)\n\n # And try the process url again\n response = self.idp_client.get(process_url)\n assert response.status_code == 200\n\n # It returns an HTML form that gets POSTed to the SP\n doc = bs4.BeautifulSoup(response.data, 'html.parser')\n form = doc.find(id='logged_in_post_form')\n assert form.get('method') == 'post'\n\n # Collect the form details...\n target = form.get('action')\n inputs = form.find_all('input')\n data = {el.get('name'): el.get('value') for el in inputs if el.get('name')}\n\n with self.sp_app.app_context():\n # And hit the SP as if the form was posted\n assert target == url_for('flask_saml2_sp.acs', _external=True)\n response = self.sp_client.post(target, data=data)\n\n # This should send us onwards to the protected page\n assert response.status_code == 302\n assert response.headers['Location'] == login_next\n\n ctx = self.sp_app.test_request_context('/dashboard/', environ_base={\n 'HTTP_COOKIE': response.headers['Set-Cookie']})\n with ctx:\n # We should also have been logged in, horray!\n auth_data = self.sp.get_auth_data_in_session()\n assert auth_data.nameid == user.email\n\n\nclass TestInvalidConditions(SamlTestCase):\n user = User('alex', '<EMAIL>')\n\n def _make_authn_request(self):\n # Make an AuthnRequest\n idp_handler = self.sp.get_idp_handler_by_entity_id('http://idp.example.com/saml/metadata.xml')\n with self.sp_app.app_context():\n authn_request = idp_handler.get_authn_request()\n return idp_handler.encode_saml_string(authn_request.get_xml_string())\n\n def _process_authn_request(self, authn_request):\n with self.idp_app.app_context():\n sp_handler = next(self.idp.get_sp_handlers())\n\n request_handler = sp_handler.parse_authn_request(authn_request)\n with self.idp_app.test_request_context('/saml/'):\n flask.session['user'] = 'alex'\n response_xml = sp_handler.make_response(request_handler)\n return sp_handler.encode_response(response_xml)\n\n def _process_authn_response(self, authn_response):\n idp_handler = self.sp.get_idp_handler_by_entity_id('http://idp.example.com/saml/metadata.xml')\n with self.sp_app.app_context():\n response_handler = idp_handler.get_response_parser(authn_response)\n return idp_handler.get_auth_data(response_handler)\n\n def test_too_early(self):\n now = utcnow()\n self.login(self.user)\n\n with freezegun.freeze_time(now) as frozen:\n authn_request = self._make_authn_request()\n\n # step forwards a bit for transmission time\n frozen.tick(delta=datetime.timedelta(seconds=30))\n\n authn_response = self._process_authn_request(authn_request)\n\n # step backwards a bunch\n frozen.tick(delta=datetime.timedelta(minutes=-5))\n\n with pytest.raises(CannotHandleAssertion, match='NotBefore'):\n self._process_authn_response(authn_response)\n\n def test_too_late(self):\n now = utcnow()\n self.login(self.user)\n\n with freezegun.freeze_time(now) as frozen:\n authn_request = self._make_authn_request()\n\n # step forwards a bit for transmission time\n frozen.tick(delta=datetime.timedelta(seconds=30))\n\n authn_response = self._process_authn_request(authn_request)\n\n # step backwards a bunch\n frozen.tick(delta=datetime.timedelta(minutes=25))\n\n with pytest.raises(CannotHandleAssertion, match='NotOnOrAfter'):\n self._process_authn_response(authn_response)\n\n def test_just_right(self):\n now = utcnow()\n self.login(self.user)\n\n with freezegun.freeze_time(now) as frozen:\n authn_request = self._make_authn_request()\n\n # step forwards a bit for transmission time\n frozen.tick(delta=datetime.timedelta(seconds=30))\n\n authn_response = self._process_authn_request(authn_request)\n\n # step forwards a bit for transmission times\n frozen.tick(delta=datetime.timedelta(seconds=30))\n\n auth_data = self._process_authn_response(authn_response)\n assert auth_data.nameid == self.user.email\n\n def test_bad_audience(self):\n self.login(self.user)\n\n authn_request = self._make_authn_request()\n authn_response = self._process_authn_request(authn_request)\n\n # Change the server name, which will change the EntityID, which\n # will cause a mismatch in the audience.\n self.sp_app.config['SERVER_NAME'] = 'sp.sample.net'\n\n with pytest.raises(CannotHandleAssertion, match='AudienceRestriction'):\n self._process_authn_response(authn_response)\n", "id": "11443576", "language": "Python", "matching_score": 5.056057453155518, "max_stars_count": 61, "path": "tests/combined/test_thing.py" }, { "content": "import datetime\nfrom typing import Mapping, Optional\nfrom urllib.parse import urlencode\n\nimport attr\nimport iso8601\n\nfrom flask_saml2 import codex\nfrom flask_saml2.exceptions import CannotHandleAssertion\nfrom flask_saml2.signing import sign_query_parameters\nfrom flask_saml2.types import X509\nfrom flask_saml2.utils import get_random_id, utcnow\nfrom flask_saml2.xml_templates import XmlTemplate\n\nfrom .parser import ResponseParser\nfrom .xml_templates import AuthnRequest, LogoutRequest\n\n\n@attr.s(auto_attribs=True)\nclass AuthData:\n handler: 'IdPHandler'\n nameid: str\n nameid_format: str\n attributes: Mapping[str, str]\n session_id: str = None\n\n def to_dict(self) -> dict:\n \"\"\"\n Return a dict of all attributes. You can store this dict in a session\n store, and recreate this instance using :meth:`from_dict`.\n \"\"\"\n data = attr.asdict(self, filter=lambda a, v: a.name != 'handler')\n return {\n 'data': data,\n 'handler': self.handler.entity_id,\n }\n\n @classmethod\n def from_dict(cls, sp, data: dict):\n \"\"\"\n Construct an :class:`AuthData` instance from a dict such as\n :meth:`to_dict` produces.\n \"\"\"\n return cls(**{\n **data['data'],\n 'handler': sp.get_idp_handler_by_entity_id(data['handler']),\n })\n\n @classmethod\n def is_valid(cls, sp, data: dict):\n if set(data.keys()) != {'data', 'handler'}:\n return False\n try:\n sp.get_idp_handler_by_entity_id(data['handler'])\n except ValueError:\n return False\n return True\n\n\nclass IdPHandler:\n \"\"\"\n Represents an Identity Provider that the running Service Provider knows\n about. This class should be subclassed for Identity Providers that need\n specific configurations.\n \"\"\"\n entity_id: str\n display_name: Optional[str] = None\n certificate: Optional[X509] = None\n\n def __init__(\n self,\n sp,\n *,\n entity_id: str,\n display_name: Optional[str] = None,\n sso_url: Optional[str] = None,\n slo_url: Optional[str] = None,\n certificate: Optional[X509] = None,\n **kwargs,\n ):\n \"\"\"\n Construct a new IdPHandler.\n\n ``sp`` is the :class:`~.sp.ServiceProvider` instance that is running\n this Service Provider.\n\n ``entity_id`` is the unique identifier for the IdP, as found in the IdP\n metadata.\n\n ``display_name`` will be shown to users when they have a choice of\n IdP's to authenticate against, falling back to the ``entity_id`` if\n this is not provided.\n\n ``sso_url`` and ``slo_url`` are the SSO and SLO URLs on the IdP. These\n are optional if you override :meth:`get_idp_sso_url` and\n :meth:`get_idp_slo_url`.\n\n ``certificate`` is the public X509 certificate of the IdP.\n\n The ``sso_url``, ``slo_url``, and ``certificate`` can all be found in\n the IdP's metadata.\n \"\"\"\n super().__init__(**kwargs)\n\n self.sp = sp\n self.entity_id = entity_id\n\n if display_name is not None:\n self.display_name = display_name\n if sso_url is not None:\n self.sso_url = sso_url\n if slo_url is not None:\n self.slo_url = slo_url\n if certificate is not None:\n self.certificate = certificate\n\n def get_idp_sso_url(self):\n \"\"\"Get the Single Sign On URL for this IdP.\"\"\"\n return self.sso_url\n\n def get_idp_slo_url(self):\n \"\"\"Get the Single Log Out URL for this IdP.\"\"\"\n return self.slo_url\n\n def get_sp_acs_url(self):\n \"\"\"\n Get the Attribute Consumer Service URL on the current SP this IdP\n should send responses to.\n \"\"\"\n return self.sp.get_acs_url()\n\n def get_authn_request(\n self,\n template=AuthnRequest,\n **parameters,\n ):\n \"\"\"\n Make a AuthnRequest to send to this IdP.\n \"\"\"\n return template({\n 'REQUEST_ID': get_random_id(),\n 'ISSUE_INSTANT': self.format_datetime(utcnow()),\n 'DESTINATION': self.get_idp_sso_url(),\n 'ISSUER': self.sp.get_sp_entity_id(),\n 'ACS_URL': self.get_sp_acs_url(),\n **parameters,\n })\n\n def get_logout_request(\n self,\n auth_data: AuthData,\n template: XmlTemplate = LogoutRequest,\n **parameters,\n ):\n \"\"\"\n Make a LogoutRequest for the authenticated user to send to this IdP.\n \"\"\"\n return template({\n 'REQUEST_ID': get_random_id(),\n 'ISSUE_INSTANT': self.format_datetime(utcnow()),\n 'DESTINATION': self.get_idp_slo_url(),\n 'ISSUER': self.sp.get_sp_entity_id(),\n 'SUBJECT': auth_data.nameid,\n 'SUBJECT_FORMAT': auth_data.nameid_format,\n **parameters,\n })\n\n def make_login_request_url(\n self,\n relay_state: Optional[str] = None,\n ) -> str:\n \"\"\"Make a LoginRequest url and query string for this IdP.\"\"\"\n authn_request = self.get_authn_request()\n saml_request = self.encode_saml_string(authn_request.get_xml_string())\n\n parameters = [('SAMLRequest', saml_request)]\n if relay_state is not None:\n parameters.append(('RelayState', relay_state))\n\n return self._make_idp_request_url(self.get_idp_sso_url(), parameters)\n\n def make_logout_request_url(\n self,\n auth_data: AuthData,\n relay_state: Optional[str] = None,\n ) -> str:\n logout_request = self.get_logout_request(auth_data)\n saml_request = self.encode_saml_string(logout_request.get_xml_string())\n\n parameters = [('SAMLRequest', saml_request)]\n if relay_state is not None:\n parameters.append(('RelayState', relay_state))\n\n return self._make_idp_request_url(self.get_idp_slo_url(), parameters)\n\n def _make_idp_request_url(self, url, parameters):\n \"\"\"\n Make a URL to the SAML IdP, signing the query parameters if required.\n \"\"\"\n if self.sp.should_sign_requests():\n query = sign_query_parameters(self.sp.get_sp_signer(), parameters)\n else:\n query = urlencode(parameters)\n\n if \"?\" in url.split(\"/\")[-1]:\n return f'{url}&{query}'\n return f'{url}?{query}'\n\n def decode_saml_string(self, saml_string: str) -> bytes:\n \"\"\"Decode an incoming SAMLResponse into an XML string.\"\"\"\n return codex.decode_saml_xml(saml_string)\n\n def encode_saml_string(self, saml_string: str) -> str:\n \"\"\"Encoding an XML string into a SAMLRequest.\"\"\"\n return codex.deflate_and_base64_encode(saml_string)\n\n def get_response_parser(self, saml_response):\n \"\"\"\n Make a :class:`~.parser.ResponseParser` instance to handle this\n response.\n \"\"\"\n return ResponseParser(\n self.decode_saml_string(saml_response),\n certificate=self.certificate)\n\n def get_auth_data(self, response: ResponseParser) -> AuthData:\n \"\"\"\n Create an :class:`AuthData` instance from a SAML Response. The response\n is validated first.\n \"\"\"\n # self.validate_response(response)\n\n return AuthData(\n handler=self,\n nameid=response.nameid,\n nameid_format=response.nameid_format,\n attributes=response.attributes,\n )\n\n def validate_response(self, response: ResponseParser):\n # Check it came from the right place\n if self.entity_id != response.issuer:\n raise CannotHandleAssertion(\n f'Entity ID mismatch {self.entity_id} != {response.issuer}')\n\n if response.conditions is not None:\n # Validate the NotBefore/NotOnOrAfter tags\n now = utcnow()\n not_before = response.conditions.get('NotBefore')\n not_on_or_after = response.conditions.get('NotOnOrAfter')\n try:\n if not_before is not None and now < iso8601.parse_date(not_before):\n raise CannotHandleAssertion(f'NotBefore={not_before} check failed')\n if not_on_or_after is not None and now >= iso8601.parse_date(not_on_or_after):\n raise CannotHandleAssertion(f'NotOnOrAfter={not_on_or_after} check failed')\n except ValueError as err:\n raise CannotHandleAssertion(\"Could not parse date\") from err\n\n # Validate the AudienceRestriction elements, if they exist\n audiences = response._xpath(response.conditions, './saml:AudienceRestriction/saml:Audience')\n entity_id = self.sp.get_sp_entity_id()\n if len(audiences) and not any(el.text == entity_id for el in audiences):\n raise CannotHandleAssertion(\"No valid AudienceRestriction found\")\n\n def format_datetime(self, value: datetime.datetime) -> str:\n \"\"\"\n Format a datetime for this IdP. Some IdPs are picky about their date\n formatting, and don't support the format produced by\n :meth:`datetime.datetime.isoformat`.\n \"\"\"\n return value.isoformat()\n\n def __str__(self):\n if self.display_name:\n return self.display_name\n return self.entity_id\n\n def __repr__(self):\n return f'<{type(self).__name__}: {self.entity_id}>'\n", "id": "8557053", "language": "Python", "matching_score": 4.715257167816162, "max_stars_count": 0, "path": "flask_saml2/sp/idphandler.py" }, { "content": "\"\"\"\nXML templates for SAML 2.0 IdP\n\"\"\"\nfrom flask_saml2.signing import SignableTemplate\nfrom flask_saml2.types import XmlNode\nfrom flask_saml2.xml_templates import NameIDTemplate, XmlTemplate\n\n\nclass AttributeTemplate(XmlTemplate):\n \"\"\"\n .. code-block:: xml\n\n <saml:Attribute Name=\"${ATTRIBUTE_NAME}\" NameFormat=\"urn:oasis:names:tc:SAML:2.0:attrname-format:basic\">\n <saml:AttributeValue>\n ${ATTRIBUTE_VALUE}\n </saml:AttributeValue>\n </saml:Attribute>\n \"\"\"\n namespace = 'saml'\n\n def generate_xml(self):\n return self.element('Attribute', attrs={\n 'Name': self.params['ATTRIBUTE_NAME'],\n 'NameFormat': 'urn:oasis:names:tc:SAML:2.0:attrname-format:basic',\n }, children=[\n self.element('AttributeValue', text=self.params['ATTRIBUTE_VALUE']),\n ])\n\n\nclass AttributeStatementTemplate(XmlTemplate):\n \"\"\"\n .. code-block:: xml\n\n <saml:AttributeStatement>\n ${ATTRIBUTES}\n </saml:AttributeStatement>\n \"\"\"\n namespace = 'saml'\n\n def generate_xml(self):\n attributes = self.params.get('ATTRIBUTES', {})\n if not attributes:\n return None\n\n return self.element('AttributeStatement', children=[\n AttributeTemplate({'ATTRIBUTE_NAME': name, 'ATTRIBUTE_VALUE': value}).xml\n for name, value in attributes.items()\n ])\n\n\nclass SubjectTemplate(XmlTemplate):\n \"\"\"\n .. code-block:: xml\n\n <saml:Subject>\n <saml:NameID Format=\"${SUBJECT_FORMAT}\" SPNameQualifier=\"${SP_NAME_QUALIFIER}\">\n ${SUBJECT}\n </saml:NameID>\n <saml:SubjectConfirmation Method=\"urn:oasis:names:tc:SAML:2.0:cm:bearer\">\n <saml:SubjectConfirmationData\n InResponseTo=\"${IN_RESPONSE_TO}\"\n NotOnOrAfter=\"${NOT_ON_OR_AFTER}\"\n Recipient=\"${ACS_URL}\">\n </saml:SubjectConfirmationData>\n </saml:SubjectConfirmation>\n </saml:Subject>\n \"\"\"\n namespace = 'saml'\n\n def generate_xml(self):\n return self.element('Subject', children=[\n self._get_name_id_xml(),\n self._get_subject_conf_xml(),\n ])\n\n def _get_name_id_xml(self):\n return NameIDTemplate(self.params).xml\n\n def _get_subject_conf_xml(self):\n scd_attributes = {\n 'NotOnOrAfter': self.params['NOT_ON_OR_AFTER'],\n 'Recipient': self.params['ACS_URL'],\n 'InResponseTo': self.params['IN_RESPONSE_TO'],\n }\n\n return self.element('SubjectConfirmation', attrs={\n 'Method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer',\n }, children=[\n self.element('SubjectConfirmationData', attrs=scd_attributes),\n ])\n\n\nclass AssertionTemplate(SignableTemplate):\n \"\"\"\n .. code-block:: xml\n\n <saml:Assertion xmlns:saml=\"urn:oasis:names:tc:SAML:2.0:assertion\"\n ID=\"${ASSERTION_ID}\"\n IssueInstant=\"${ISSUE_INSTANT}\"\n Version=\"2.0\">\n <saml:Issuer>${ISSUER}</saml:Issuer>\n ${ASSERTION_SIGNATURE}\n ${SUBJECT_STATEMENT}\n <saml:Conditions NotBefore=\"${NOT_BEFORE}\" NotOnOrAfter=\"${NOT_ON_OR_AFTER}\">\n <saml:AudienceRestriction>\n <saml:Audience>${AUDIENCE}</saml:Audience>\n </saml:AudienceRestriction>\n </saml:Conditions>\n <saml:AuthnStatement AuthnInstant=\"${AUTH_INSTANT}\">\n <saml:AuthnContext>\n <saml:AuthnContextClassRef>urn:oasis:names:tc:SAML:2.0:ac:classes:Password</saml:AuthnContextClassRef>\n </saml:AuthnContext>\n </saml:AuthnStatement>\n ${ATTRIBUTE_STATEMENT}\n </saml:Assertion>\n \"\"\"\n namespace = 'saml'\n id_parameter = 'ASSERTION_ID'\n\n def generate_xml(self):\n return self.element('Assertion', attrs={\n 'ID': self.params['ASSERTION_ID'],\n 'IssueInstant': self.params['ISSUE_INSTANT'],\n 'Version': '2.0',\n }, children=[\n self.element('Issuer', text=self.params['ISSUER']),\n self._get_subject_statement(),\n self._get_conditions(),\n self._get_authn_context(),\n self._get_attribute_statement(),\n ])\n\n def _get_subject_statement(self) -> XmlNode:\n return SubjectTemplate(self.params).xml\n\n def _get_conditions(self) -> XmlNode:\n return self.element('Conditions', attrs={\n 'NotBefore': self.params['NOT_BEFORE'],\n 'NotOnOrAfter': self.params['NOT_ON_OR_AFTER'],\n }, children=[\n self.element('AudienceRestriction', children=[\n self.element('Audience', text=self.params['AUDIENCE']),\n ]),\n ])\n\n def _get_authn_context(self) -> XmlNode:\n return self.element('AuthnStatement', attrs={\n 'AuthnInstant': self.params['AUTH_INSTANT'],\n }, children=[\n self.element('AuthnContext', children=[\n self.element('AuthnContextClassRef', text='urn:oasis:names:tc:SAML:2.0:ac:classes:Password'),\n ])\n ])\n\n def _get_attribute_statement(self) -> XmlNode:\n return AttributeStatementTemplate(self.params).xml\n\n\nclass ResponseTemplate(SignableTemplate):\n \"\"\"\n .. code-block:: xml\n\n <samlp:Response xmlns:samlp=\"urn:oasis:names:tc:SAML:2.0:protocol\"\n Destination=\"${ACS_URL}\"\n ID=\"${RESPONSE_ID}\"\n InResponseTo=\"${IN_RESPONSE_TO}\"\n IssueInstant=\"${ISSUE_INSTANT}\"\n Version=\"2.0\">\n <saml:Issuer xmlns:saml=\"urn:oasis:names:tc:SAML:2.0:assertion\">${ISSUER}</saml:Issuer>\n <samlp:Status>\n <samlp:StatusCode Value=\"urn:oasis:names:tc:SAML:2.0:status:Success\"></samlp:StatusCode>\n </samlp:Status>\n ${ASSERTION}\n </samlp:Response>\n \"\"\"\n namespace = 'samlp'\n id_parameter = 'RESPONSE_ID'\n\n def __init__(self, params, assertion: AssertionTemplate):\n super().__init__(params)\n self.assertion = assertion\n\n def generate_xml(self):\n return self.element('Response', attrs={\n 'Destination': self.params['ACS_URL'],\n 'ID': self.params['RESPONSE_ID'],\n 'InResponseTo': self.params['IN_RESPONSE_TO'],\n 'IssueInstant': self.params['ISSUE_INSTANT'],\n 'Version': '2.0',\n }, children=[\n self._get_issuer(),\n self._get_status(),\n self.assertion.xml,\n ])\n\n def _get_issuer(self) -> XmlNode:\n namespace = self.get_namespace_map()['saml']\n return self.element('Issuer', namespace=namespace, text=self.params['ISSUER'])\n\n def _get_status(self):\n return self.element('Status', children=[\n self.element('StatusCode', attrs={\n 'Value': 'urn:oasis:names:tc:SAML:2.0:status:Success',\n }),\n ])\n", "id": "8579606", "language": "Python", "matching_score": 6.752039909362793, "max_stars_count": 61, "path": "flask_saml2/idp/xml_templates.py" }, { "content": "import urllib.parse\n\nfrom flask_saml2 import exceptions\nfrom flask_saml2.idp import SPHandler, xml_templates\n\n\nclass GoogleAppsAssertionTemplate(xml_templates.AssertionTemplate):\n \"\"\"\n .. code-block:: xml\n\n <saml:Assertion xmlns:saml=\"urn:oasis:names:tc:SAML:2.0:assertion\"\n ID=\"${ASSERTION_ID}\"\n IssueInstant=\"${ISSUE_INSTANT}\"\n Version=\"2.0\">\n <saml:Issuer>${ISSUER}</saml:Issuer>\n ${ASSERTION_SIGNATURE}\n ${SUBJECT_STATEMENT}\n <saml:Conditions NotBefore=\"${NOT_BEFORE}\" NotOnOrAfter=\"${NOT_ON_OR_AFTER}\"></saml:Conditions>\n <saml:AuthnStatement AuthnInstant=\"${AUTH_INSTANT}\">\n <saml:AuthnContext>\n <saml:AuthnContextClassRef>urn:oasis:names:tc:SAML:2.0:ac:classes:Password</saml:AuthnContextClassRef>\n </saml:AuthnContext>\n </saml:AuthnStatement>\n ${ATTRIBUTE_STATEMENT}\n </saml:Assertion>\n \"\"\"\n\n namespace = 'saml'\n\n def _get_conditions(self):\n return self.element('Conditions', attrs={\n 'NotBefore': self.params['NOT_BEFORE'],\n 'NotOnOrAfter': self.params['NOT_ON_OR_AFTER'],\n })\n\n\nclass GoogleAppsSPHandler(SPHandler):\n \"\"\"\n Google Apps :class:`SPHandler` implementation.\n \"\"\"\n assertion_template = GoogleAppsAssertionTemplate\n\n def validate_request(self, request):\n url = urllib.parse.urlparse(request.acs_url)\n is_valid = url.netloc.endswith('.google.com') \\\n and url.path.startswith('/a/') \\\n and url.scheme in ('http', 'https')\n\n if not is_valid:\n raise exceptions.CannotHandleAssertion('AssertionConsumerService is not a Google Apps URL.')\n", "id": "7447452", "language": "Python", "matching_score": 2.0468950271606445, "max_stars_count": 61, "path": "flask_saml2/idp/sp/google_apps.py" }, { "content": "import datetime\n\nimport pytz\n\nfrom flask_saml2.exceptions import CannotHandleAssertion\nfrom flask_saml2.idp import SPHandler\nfrom flask_saml2.idp.parser import AuthnRequestParser\nfrom flask_saml2.signing import RsaSha256Signer, Sha256Digester\n\n\nclass DropboxSPHandler(SPHandler):\n \"\"\"\n Dropbox :class:`SPHandler` implementation.\n \"\"\"\n def get_sp_digester(self):\n return Sha256Digester()\n\n def get_sp_signer(self):\n private_key = self.idp.get_idp_private_key()\n return RsaSha256Signer(private_key)\n\n def format_datetime(self, value: datetime.datetime) -> str:\n \"\"\"\n Dropbox does not like too much precision in its seconds, and only\n supports UTC as Z, not an hourly offset.\n \"\"\"\n return value.astimezone(pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ')\n\n def validate_destination(self, request: AuthnRequestParser):\n \"\"\"\n Dropbox sets the ``<AuthnRequest>`` Destination attribute to the empty\n string. This is not valid according to the spec, so must be handled as\n a special case.\n \"\"\"\n if request.destination != '':\n raise CannotHandleAssertion(\n f'Destination expected to be the empty string for Dropbox Service Provider')\n", "id": "5338082", "language": "Python", "matching_score": 3.1830196380615234, "max_stars_count": 61, "path": "flask_saml2/idp/sp/dropbox.py" }, { "content": "import urllib.parse\n\nfrom flask_saml2 import exceptions\nfrom flask_saml2.idp import SPHandler\n\n\nclass SalesforceSPHandler(SPHandler):\n \"\"\"\n Salesforce.com :class:`SPHandler` implementation.\n \"\"\"\n\n def validate_request(self, request):\n url = urllib.parse.urlparse(request.acs_url)\n\n is_valid = url.netloc.endswith('.salesforce.com') and \\\n url.scheme in ('http', 'https')\n\n if not is_valid:\n raise exceptions.CannotHandleAssertion('AssertionConsumerService is not a SalesForce URL.')\n\n def get_audience(self, request):\n return 'https://saml.salesforce.com'\n", "id": "6976753", "language": "Python", "matching_score": 0.037763673812150955, "max_stars_count": 61, "path": "flask_saml2/idp/sp/salesforce.py" }, { "content": "#!/usr/bin/env python3\n\"\"\"\nWatch and rebuild the docs when changes are made.\n\"\"\"\nimport http.server\nimport itertools\nimport os\nimport pathlib\nimport subprocess\nimport sys\nimport threading\nimport time\n\nimport inotify.adapters\nimport inotify.constants\n\nHERE = pathlib.Path(__file__).absolute().parent\n\n\ndef drop_while_not_none(gen):\n for x in itertools.takewhile(lambda x: x is not None, gen):\n pass\n\n\ndef sensible_dir_watch(\n directory: pathlib.Path,\n sleep: float = 0.5,\n **kwargs,\n):\n i = inotify.adapters.InotifyTree(directory.as_posix(), **kwargs)\n gen = i.event_gen()\n\n for event in gen:\n # inotify will yield a None every second if nothing else happens. Just\n # wait for a real event if that happens.\n if event is None:\n continue\n\n # Consume any extra events that just happened, waiting for another None\n # indicating that nothing has happened for a while. Sometimes saving\n # a file in e.g. vim causes multiple inotify events to happen.\n drop_while_not_none(gen)\n\n # Let the caller do their thing\n yield\n\n # Wait for a bit, so inotify events from the build process can catch up\n time.sleep(sleep)\n\n # Consume any extra events that just happened. Sometimes the build\n # process generates some extra events.\n drop_while_not_none(gen)\n\n\nbuild_lock = threading.Lock()\n\n\ndef compile_docs():\n subprocess.run([\"make\", \"html\"], check=True, cwd=HERE)\n\n\ndef watch_dir(directory, mask):\n for _ in sensible_dir_watch(directory, mask=mask):\n if build_lock.acquire(blocking=False):\n try:\n compile_docs()\n finally:\n build_lock.release()\n\n\ndef serve_dir(directory, port):\n os.chdir(directory)\n http.server.test(http.server.SimpleHTTPRequestHandler, port=port)\n\n\ndef main():\n compile_docs()\n\n # Don't listen to all events, such as accessing and opening files, only\n # those that modify the source files\n mask = (\n inotify.constants.IN_MODIFY |\n inotify.constants.IN_CLOSE_WRITE |\n inotify.constants.IN_MOVED_FROM |\n inotify.constants.IN_MOVED_TO |\n inotify.constants.IN_CREATE |\n inotify.constants.IN_DELETE |\n inotify.constants.IN_DELETE_SELF |\n inotify.constants.IN_MOVE_SELF |\n 0\n )\n\n dirs = [\n HERE / 'source',\n HERE.parent / 'flask_saml2'\n ]\n\n threads = []\n\n port = int(sys.argv[1]) if len(sys.argv) > 1 else 8000\n server = threading.Thread(target=serve_dir, args=(HERE / 'build' / 'html', port))\n server.start()\n\n threads.append(server)\n\n for directory in dirs:\n thread = threading.Thread(target=watch_dir, args=(directory, mask))\n thread.start()\n threads.append(thread)\n\n try:\n all(thread.join() for thread in threads)\n except KeyboardInterrupt:\n sys.exit(0)\n\n\nif __name__ == '__main__':\n main()\n", "id": "10331517", "language": "Python", "matching_score": 1.8226088285446167, "max_stars_count": 61, "path": "docs/watch.py" }, { "content": "import sphinx_rtd_theme\n\nimport flask_saml2.version\n\nproject = 'Flask SAML2 IdP and SP'\ncopyright = '2019, <NAME>'\nauthor = '<NAME>'\n\n# The short X.Y version.\nversion = '{}.{}'.format(*flask_saml2.version.version_info[:2])\n# The full version, including alpha/beta/rc tags.\nrelease = flask_saml2.version.version_str\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx_autodoc_typehints',\n 'sphinx.ext.intersphinx',\n]\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\n\nmaster_doc = 'index'\n\nadd_module_names = True\nautodoc_member_order = 'bysource'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\npygments_style = 'sphinx'\n\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3.7\", None),\n \"flask\": (\"https://flask.palletsprojects.com/en/1.1.x\", None),\n \"OpenSSL\": (\"https://www.pyopenssl.org/en/stable/\", None),\n}\n\nnitpick_ignore = [\n ('py:class', 'typing.Tuple'),\n]\n\n# -- Options for HTML output ----------------------------------------------\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_static_path = []\nhtmlhelp_basename = 'flask_saml2_doc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {}\nlatex_documents = [\n (master_doc, 'flask_saml2.tex', 'Flask SAML2 Documentation', 'Tidetech', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'flask_saml2', 'Flask SAML2 Documentation', [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc, 'flask_saml2', 'Flask SAML2 Documentation',\n author, 'flask_saml2', 'One line description of project.',\n 'Miscellaneous'),\n]\n", "id": "7580561", "language": "Python", "matching_score": 1.5135364532470703, "max_stars_count": 61, "path": "docs/source/conf.py" }, { "content": "from .version import version_info as VERSION\nfrom .version import version_str as __version__\n\n__all__ = ['VERSION', '__version__']\n", "id": "4909252", "language": "Python", "matching_score": 0.48398056626319885, "max_stars_count": 61, "path": "flask_saml2/__init__.py" }, { "content": "from lxml.etree import ElementBase\nfrom OpenSSL.crypto import X509, PKey\n\n__all__ = ['X509', 'PKey']\n\nXmlNode = ElementBase # An easier to type, and easier to import, alias\n", "id": "12509399", "language": "Python", "matching_score": 0.5437362194061279, "max_stars_count": 61, "path": "flask_saml2/types.py" }, { "content": "from .idphandler import AuthData, IdPHandler\nfrom .sp import ServiceProvider\n\n__all__ = ['ServiceProvider', 'AuthData', 'IdPHandler']\n", "id": "7821308", "language": "Python", "matching_score": 1.7551597356796265, "max_stars_count": 61, "path": "flask_saml2/sp/__init__.py" }, { "content": "from .idp import IdentityProvider\nfrom .sphandler import SPHandler\n\n__all__ = ['IdentityProvider', 'SPHandler']\n", "id": "11924298", "language": "Python", "matching_score": 1.0725125074386597, "max_stars_count": 61, "path": "flask_saml2/idp/__init__.py" } ]
2.686788
nikhil-mathews
[ { "content": "#Usage\r\n# import sys\r\n# sys.path.insert(0,'path to this file')\r\n# import functions as f\r\n\r\n\r\nimport pickle\r\nimport pandas as pd\r\nimport os\r\nimport sys\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom keras.preprocessing.text import Tokenizer\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nfrom keras.layers import Dense, Input, GlobalMaxPooling1D, Flatten\r\nfrom keras.layers import Conv1D, MaxPooling1D, Embedding, Attention,Concatenate\r\nfrom keras.models import Model\r\nfrom sklearn.metrics import roc_auc_score,roc_curve, auc\r\nfrom numpy import random\r\nfrom keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout, GlobalAveragePooling1D\r\nfrom keras.optimizers import Adam\r\nfrom keras.utils.vis_utils import plot_model\r\nimport seaborn as sns\r\n\r\ndirectory = '/content/drive/MyDrive/ML_Data/'\r\n\r\n#Use this to create nD format input.\r\n#For eg, to create 4D input, combine_AC(df,4)\r\ndef combine_AC(df,chunksize=3,seperate_chunks=False):\r\n if not seperate_chunks:\r\n df.Human = df.Human.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x))) if len(''.join(x)[i:i+chunksize])>=chunksize])\r\n df.Yersinia = df.Yersinia.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x))) if len(''.join(x)[i:i+chunksize])>=chunksize])\r\n try:\r\n df.Joined = [df.loc[row]['Human']+df.loc[row]['Yersinia'] for row in range(df.shape[0])]\r\n except:\r\n df.Joined = df.Joined.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x))) if len(''.join(x)[i:i+chunksize])>=chunksize])\r\n return df\r\n #print(\"JHGVBJGHGHKHGKG\")\r\n df.Human = df.Human.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x)), chunksize)])\r\n df.Yersinia = df.Yersinia.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x)), chunksize)])\r\n df.Joined = [df.loc[row]['Human']+df.loc[row]['Yersinia'] for row in range(df.shape[0])]\r\n return df\r\n\r\ndef shuff_together(df1,df2):\r\n joined = pd.concat([df1,df2], axis=0)\r\n joined = joined.iloc[np.random.permutation(len(joined))].reset_index(drop=True)\r\n return joined.iloc[:df1.shape[0],:],joined.iloc[df1.shape[0]:,:].reset_index(drop=True)\r\ndef load_data(D=1,randomize=False):\r\n try:\r\n with open(directory+'df_train_'+str(D)+'D.pickle', 'rb') as handle:\r\n df_train = pickle.load(handle)\r\n except:\r\n df_train = pd.read_pickle(\"C:/Users/nik00/py/proj/hyppi-train.pkl\")\r\n try:\r\n with open(directory+'df_test_'+str(D)+'D.pickle', 'rb') as handle:\r\n df_test = pickle.load(handle)\r\n except:\r\n df_test = pd.read_pickle(\"C:/Users/nik00/py/proj/hyppi-independent.pkl\")\r\n if randomize:\r\n return shuff_together(df_train,df_test)\r\n else:\r\n return df_train,df_test\r\n\r\n#Creates tokenizers and inputs for doubleip configuration\r\ndef get_seq_data_doubleip(MAX_VOCAB_SIZE, MAX_SEQUENCE_LENGTH,df_train,df_test, pad = 'center',show =False, saveTokrs = False):\r\n print(\"MAX_VOCAB_SIZE is\",MAX_VOCAB_SIZE)\r\n print(\"MAX_SEQUENCE_LENGTH is\",MAX_SEQUENCE_LENGTH)\r\n ip_train_Human = df_train[['Human']]\r\n ip_train_Yersinia = df_train[['Yersinia']]\r\n sentences_train_Human = pd.DataFrame(' '.join(ip_train_Human.loc[i]['Human']) for i in range(ip_train_Human.shape[0])).values.flatten()\r\n sentences_train_Yersinia = pd.DataFrame(' '.join(ip_train_Yersinia.loc[i]['Yersinia']) for i in range(ip_train_Yersinia.shape[0])).values.flatten()\r\n tokenizer1 = Tokenizer(num_words=MAX_VOCAB_SIZE)\r\n tokenizer1.fit_on_texts(sentences_train_Human)\r\n tokenizer2 = Tokenizer(num_words=MAX_VOCAB_SIZE)\r\n tokenizer2.fit_on_texts(sentences_train_Yersinia)\r\n sequences1_train = tokenizer1.texts_to_sequences(sentences_train_Human)\r\n sequences2_train = tokenizer2.texts_to_sequences(sentences_train_Yersinia)\r\n print(\"max sequences1_train length:\", max(len(s) for s in sequences1_train))\r\n print(\"min sequences1_train length:\", min(len(s) for s in sequences1_train))\r\n s = sorted(len(s) for s in sequences1_train)\r\n print(\"median sequences1_train length:\", s[len(s) // 2])\r\n if show : show_stats(sequences1_train,MAX_SEQUENCE_LENGTH,'Human_train') \r\n print(\"max word index sequences1_train:\", max(max(seq) for seq in sequences1_train if len(seq) > 0))\r\n print(\"max sequences2_train length:\", max(len(s) for s in sequences2_train))\r\n print(\"min sequences2_train length:\", min(len(s) for s in sequences2_train))\r\n s = sorted(len(s) for s in sequences2_train)\r\n print(\"median sequences2_train length:\", s[len(s) // 2])\r\n if show : show_stats(sequences2_train,MAX_SEQUENCE_LENGTH,'Yersinia_train')\r\n print(\"max word index sequences2_train:\", max(max(seq) for seq in sequences2_train if len(seq) > 0))\r\n word2idx = tokenizer1.word_index\r\n print('Found %s unique tokens in tokenizer1.' % len(word2idx))\r\n word2idx = tokenizer2.word_index\r\n print('Found %s unique tokens in tokenizer2.' % len(word2idx))\r\n if pad is 'center':\r\n print(\"Center padding\")\r\n data1 = pad_centered(sequences1_train, MAX_SEQUENCE_LENGTH)\r\n data2 = pad_centered(sequences2_train, MAX_SEQUENCE_LENGTH)\r\n else:\r\n print(pad+\" padding\")\r\n data1 = pad_sequences(sequences1_train, MAX_SEQUENCE_LENGTH,padding=pad, truncating=pad)\r\n data2 = pad_sequences(sequences2_train, MAX_SEQUENCE_LENGTH,padding=pad, truncating=pad)\r\n print('Shape of data1 tensor:', data1.shape)\r\n print('Shape of data2 tensor:', data2.shape)\r\n\r\n ip_test_Human = df_test[['Human']]\r\n ip_test_Yersinia = df_test[['Yersinia']]\r\n sentences1_test = pd.DataFrame(' '.join(ip_test_Human.loc[i]['Human']) for i in range(ip_test_Human.shape[0])).values.flatten()\r\n sentences2_test = pd.DataFrame(' '.join(ip_test_Yersinia.loc[i]['Yersinia']) for i in range(ip_test_Yersinia.shape[0])).values.flatten()\r\n test_sequences1 = tokenizer1.texts_to_sequences(sentences1_test)\r\n test_sequences2 = tokenizer2.texts_to_sequences(sentences2_test)\r\n print(\"max test_sequences1 length:\", max(len(s) for s in test_sequences1))\r\n print(\"min test_sequences1 length:\", min(len(s) for s in test_sequences1))\r\n s = sorted(len(s) for s in test_sequences1)\r\n print(\"median test_sequences1 length:\", s[len(s) // 2])\r\n if show : show_stats(test_sequences1,MAX_SEQUENCE_LENGTH,'Human_test')\r\n print(\"max test_sequences2 length:\", max(len(s) for s in test_sequences2))\r\n print(\"min test_sequences2 length:\", min(len(s) for s in test_sequences2))\r\n s = sorted(len(s) for s in test_sequences2)\r\n print(\"median test_sequences2 length:\", s[len(s) // 2])\r\n if show : show_stats(test_sequences2,MAX_SEQUENCE_LENGTH,'Yersinia_test')\r\n if pad is 'center':\r\n print(\"Center padding for test seq.\")\r\n test_data1 = pad_centered(test_sequences1, MAX_SEQUENCE_LENGTH)\r\n test_data2 = pad_centered(test_sequences2, MAX_SEQUENCE_LENGTH)\r\n else:\r\n print(pad+\" padding for test seq.\")\r\n test_data1 = pad_sequences(test_sequences1, MAX_SEQUENCE_LENGTH,padding=pad, truncating=pad)\r\n test_data2 = pad_sequences(test_sequences2, MAX_SEQUENCE_LENGTH,padding=pad, truncating=pad)\r\n print('Shape of test_data1 tensor:', test_data1.shape)\r\n print('Shape of test_data2 tensor:', test_data2.shape)\r\n\r\n num_words = min(MAX_VOCAB_SIZE, len(word2idx) + 1)\r\n print(\"num_words is\",num_words)\r\n if saveTokrs:\r\n save((tokenizer1,tokenizer2),'doubleip_tkrs')\r\n print('Saved tokenizers as doubleip_tkrs')\r\n return data1,data2,test_data1,test_data2,num_words,MAX_SEQUENCE_LENGTH,MAX_VOCAB_SIZE\r\n \r\n \r\n#Creates tokenizers and inputs for join configuration\r\ndef get_seq_data_join(MAX_VOCAB_SIZE, MAX_SEQUENCE_LENGTH,df_train,df_test, pad = 'center',show =False, saveTokrs = False):\r\n print(\"MAX_VOCAB_SIZE is\",MAX_VOCAB_SIZE)\r\n print(\"MAX_SEQUENCE_LENGTH is\",MAX_SEQUENCE_LENGTH)\r\n sentences = pd.DataFrame(' '.join(df_train.loc[i]['Joined']) for i in range(df_train.shape[0])).values.flatten()\r\n tokenizer = Tokenizer(num_words=MAX_VOCAB_SIZE)\r\n tokenizer.fit_on_texts(sentences)\r\n sequences = tokenizer.texts_to_sequences(sentences)\r\n print(\"max sequence_data length:\", max(len(s) for s in sequences))\r\n print(\"min sequence_data length:\", min(len(s) for s in sequences))\r\n s = sorted(len(s) for s in sequences)\r\n print(\"median sequence_data length:\", s[len(s) // 2])\r\n if show : show_stats(sequences,MAX_SEQUENCE_LENGTH,'Joined_train')\r\n print(\"max word index:\", max(max(seq) for seq in sequences if len(seq) > 0))\r\n word2idx = tokenizer.word_index\r\n print('Found %s unique tokens.' % len(word2idx))\r\n \r\n if pad is 'center':\r\n print(\"Center padding.\")\r\n data = pad_centered(sequences, MAX_SEQUENCE_LENGTH)\r\n else:\r\n print(pad+\" padding.\")\r\n data = pad_sequences(sequences, MAX_SEQUENCE_LENGTH,padding=pad, truncating=pad)\r\n print('Shape of data tensor:', data.shape)\r\n sentences_test = pd.DataFrame(' '.join(df_test.loc[i]['Joined']) for i in range(df_test.shape[0])).values.flatten()\r\n sequences_test = tokenizer.texts_to_sequences(sentences_test)\r\n print(\"max sequences_test length:\", max(len(s) for s in sequences_test))\r\n print(\"min sequences_test length:\", min(len(s) for s in sequences_test))\r\n s = sorted(len(s) for s in sequences_test)\r\n print(\"median sequences_test length:\", s[len(s) // 2])\r\n if show : show_stats(sequences_test,MAX_SEQUENCE_LENGTH,'Joined_test') \r\n if pad is 'center':\r\n print(\"Center padding for test seq.\")\r\n data_test = pad_centered(sequences_test, MAX_SEQUENCE_LENGTH)\r\n else:\r\n print(pad+\" padding for test seq.\")\r\n data_test = pad_sequences(sequences_test, MAX_SEQUENCE_LENGTH,padding=pad, truncating=pad)\r\n print('Shape of data_test tensor:', data_test.shape)\r\n num_words = min(MAX_VOCAB_SIZE, len(word2idx) + 1)\r\n print(\"num_words is\",num_words)\r\n if saveTokrs:\r\n save(tokenizer,'join_tkr')\r\n print('Saved tokenizer as join_tkr')\r\n return data,data_test,num_words,MAX_SEQUENCE_LENGTH,MAX_VOCAB_SIZE\r\n\r\ndef test_functions():\r\n print (\"Access to functions.py verified\")\r\n print (\"Access to functions.py verified\")\r\n\r\nimport tensorflow as tf\r\ndef pad_centered(l,max_len):\r\n padded = []\r\n for item in l:\r\n #print(item)\r\n if len(item)<=max_len :\r\n left_zeros = (max_len - len(item))//2\r\n right_zeros = (max_len - len(item))//2 + (max_len - len(item))%2\r\n padded.append([0] * left_zeros + item + [0] * right_zeros)\r\n else:\r\n left_idx = (len(item) - max_len)//2 #- (len(item) - max_len)%2\r\n right_idx = left_idx + max_len\r\n padded.append(item[left_idx:right_idx])\r\n assert(np.array(padded).shape == (len(l),max_len))\r\n return tf.convert_to_tensor(padded)\r\n\r\ndef embedding_layer(num_words,MAX_SEQUENCE_LENGTH,EMBEDDING_DIM):\r\n embedding_matrix = random.uniform(-1, 1,(num_words,EMBEDDING_DIM))\r\n embedding_layer = Embedding(\r\n num_words,\r\n EMBEDDING_DIM,\r\n weights=[embedding_matrix],\r\n input_length=MAX_SEQUENCE_LENGTH,\r\n trainable=True)\r\n return embedding_layer\r\n \r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\ndef show_stats(sequence,MAX_SEQUENCE_LENGTH,title):\r\n lengths = [len(l) for l in sequence]\r\n sss = sorted(lengths)\r\n median = sss[len(sss)//2]\r\n y_pos = np.arange(len(lengths))\r\n plt.bar(y_pos,lengths)\r\n plt.plot([0,len(lengths)], [MAX_SEQUENCE_LENGTH,MAX_SEQUENCE_LENGTH],color='red',linestyle='-',label = \"MAX length cutoff\")\r\n plt.plot([0,len(lengths)], [median,median],color='purple',linestyle='--',label = \"Median = \"+str(median)+\"\")#, ms=558,label = \"Median\")\r\n #plt.figure(figsize=(3, 3))\r\n plt.title(title+\" seq lengths with max length = \"+str(sss[-1])+\"\")\r\n plt.xlabel(\"seq[i]\")\r\n plt.ylabel(\"seq length\")\r\n plt.legend()\r\n plt.show()\r\n \r\ndef conv_model(MAX_SEQUENCE_LENGTH,EMBEDDING_DIM,num_words,DROP=0.2, Flatt = True,filters = 32, kernel_size = 3, MAXpool_size=3):\r\n inputA = Input(shape=(MAX_SEQUENCE_LENGTH,))\r\n x1 = Embedding(num_words, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH,trainable=True)(inputA)\r\n x1 = Conv1D(filters, kernel_size, activation='relu')(x1)\r\n x1= Dropout(DROP)(x1)\r\n x1 = MaxPooling1D(MAXpool_size)(x1)\r\n if Flatt: x1= Flatten()(x1)\r\n x1 = Dropout(DROP)(x1)\r\n x1 = Dense(128, activation='relu')(x1)\r\n return Model(inputs=inputA, outputs=x1)\r\n \r\ndef BiLSTM_model(MAX_SEQUENCE_LENGTH,EMBEDDING_DIM,num_words,M,DROP=0.2):\r\n ip = Input(shape=(MAX_SEQUENCE_LENGTH,))\r\n x = embedding_layer(num_words,MAX_SEQUENCE_LENGTH,EMBEDDING_DIM)(ip)\r\n #x = Embedding(num_words, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH,trainable=True)(ip)\r\n x = Bidirectional(LSTM(M, return_sequences=True))(x)\r\n x = Dropout(DROP)(x)\r\n x = Dense(128, activation='relu')(x)\r\n x = GlobalMaxPool1D()(x)\r\n x = Dropout(DROP)(x)\r\n x = Dense(128, activation='relu')(x)\r\n return Model(inputs=ip, outputs=x)\r\n \r\n\r\n# from https://keras.io/api/layers/attention_layers/attention/\r\ndef att_model(MAX_SEQUENCE_LENGTH,EMBEDDING_DIM,num_words,DROP=0.2, BiLSTM = False):\r\n \r\n inputA = Input(shape=(MAX_SEQUENCE_LENGTH,))\r\n query_embeddings = embedding_layer(num_words,MAX_SEQUENCE_LENGTH,EMBEDDING_DIM)(inputA)\r\n \r\n inputB = Input(shape=(MAX_SEQUENCE_LENGTH,))\r\n value_embeddings = embedding_layer(num_words,MAX_SEQUENCE_LENGTH,EMBEDDING_DIM)(inputB)\r\n \r\n \r\n cnn_layer = Conv1D(32, 3)\r\n if BiLSTM: cnn_layer = Bidirectional(LSTM(15, return_sequences=True))\r\n \r\n # Query encoding of shape [batch_size, Tq, filters].\r\n query_seq_encoding = cnn_layer(query_embeddings)\r\n # Value encoding of shape [batch_size, Tv, filters].\r\n value_seq_encoding = cnn_layer(value_embeddings)\r\n \r\n # Query-value attention of shape [batch_size, Tq, filters].\r\n query_value_attention_seq = Attention()(\r\n [query_seq_encoding, value_seq_encoding])\r\n \r\n query_value_attention_seq = Dropout(DROP)(query_value_attention_seq)\r\n query_value_attention_seq = Dense(128, activation='relu')(query_value_attention_seq)\r\n \r\n query_seq_encoding = Dropout(DROP)(query_seq_encoding)\r\n query_seq_encoding = Dense(128, activation='relu')(query_seq_encoding)\r\n \r\n # Reduce over the sequence axis to produce encodings of shape\r\n # [batch_size, filters].\r\n query_encoding = GlobalAveragePooling1D()(\r\n query_seq_encoding)\r\n query_value_attention = GlobalAveragePooling1D()(\r\n query_value_attention_seq)\r\n \r\n query_encoding = Dropout(DROP)(query_encoding)\r\n query_encoding = Dense(128, activation='relu')(query_encoding)\r\n \r\n query_value_attention = Dropout(DROP)(query_value_attention)\r\n query_value_attention = Dense(128, activation='relu')(query_value_attention)\r\n \r\n \r\n # Concatenate query and document encodings to produce a DNN input layer.\r\n input_layer = Concatenate()([query_encoding, query_value_attention])\r\n\r\n return Model(inputs=[inputA, inputB], outputs=input_layer) \r\n # x = Dense(128, activation='relu')(input_layer)\r\n # x = Dropout(DROP)(x)\r\n # output = Dense(1, activation=\"sigmoid\",name=\"Final\")(x)\r\n # return Model(inputs=[inputA, inputB], outputs=output)\r\n\r\n\r\n# from https://keras.io/examples/nlp/text_classification_with_transformer/\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers\r\nclass TransformerBlock(layers.Layer):\r\n def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):\r\n super(TransformerBlock, self).__init__()\r\n self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)\r\n self.ffn = keras.Sequential(\r\n [layers.Dense(ff_dim, activation=\"relu\"), layers.Dense(embed_dim),]\r\n )\r\n self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)\r\n self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)\r\n self.dropout1 = layers.Dropout(rate)\r\n self.dropout2 = layers.Dropout(rate)\r\n\r\n def call(self, inputs, training):\r\n attn_output = self.att(inputs, inputs)\r\n attn_output = self.dropout1(attn_output, training=training)\r\n out1 = self.layernorm1(inputs + attn_output)\r\n ffn_output = self.ffn(out1)\r\n ffn_output = self.dropout2(ffn_output, training=training)\r\n return self.layernorm2(out1 + ffn_output)\r\n\r\n\r\nclass TokenAndPositionEmbedding(layers.Layer):\r\n def __init__(self, maxlen, vocab_size, embed_dim):\r\n super(TokenAndPositionEmbedding, self).__init__()\r\n self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)\r\n self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)\r\n\r\n def call(self, x):\r\n maxlen = tf.shape(x)[-1]\r\n positions = tf.range(start=0, limit=maxlen, delta=1)\r\n positions = self.pos_emb(positions)\r\n x = self.token_emb(x)\r\n return x + positions\r\n\r\ndef transf_model(MAX_SEQUENCE_LENGTH,num_words, EMBEDDING_DIM, DROP = 0.3, num_heads = 2, ff_dim = 64):\r\n inputs=Input((MAX_SEQUENCE_LENGTH,))\r\n embedding_layer = TokenAndPositionEmbedding(MAX_SEQUENCE_LENGTH, num_words, EMBEDDING_DIM)\r\n x = embedding_layer(inputs)\r\n transformer_block = TransformerBlock(EMBEDDING_DIM, num_heads, ff_dim)\r\n x = transformer_block(x)\r\n x = Dropout(DROP)(x)\r\n x = Dense(256, activation=\"relu\")(x)\r\n x = GlobalAveragePooling1D()(x)\r\n return Model(inputs,x)\r\n # ip = transf_model(MAX_SEQUENCE_LENGTH_,num_words_5D_join,5)\r\n # x = Dropout(DROP)(ip.output)\r\n # x = Dense(128, activation=\"relu\")(x)\r\n # x = Dropout(DROP)(x)\r\n # outputs = Dense(1, activation=\"sigmoid\")(x)\r\n # model1D_CNN_join=Model(ip.input,outputs)\r\n \r\n\r\ndef save(data,name):\r\n with open(directory+''+name+'.pickle', 'wb') as handle:\r\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\ndef load(name):\r\n with open(directory+''+name+'.pickle', 'rb') as handle:\r\n return pickle.load(handle)\r\n \r\n#Creates and saves Tokenizers for combine config\r\ndef create_tokenizers(df_train):\r\n ip_train_Human = df_train[['Human']]\r\n ip_train_Yersinia = df_train[['Yersinia']]\r\n sentences_train_Human = pd.DataFrame(' '.join(ip_train_Human.loc[i]['Human']) for i in range(ip_train_Human.shape[0])).values.flatten()\r\n sentences_train_Yersinia = pd.DataFrame(' '.join(ip_train_Yersinia.loc[i]['Yersinia']) for i in range(ip_train_Yersinia.shape[0])).values.flatten()\r\n tokenizer1 = Tokenizer(num_words=500000)\r\n tokenizer1.fit_on_texts(sentences_train_Human)\r\n tokenizer2 = Tokenizer(num_words=500000)\r\n tokenizer2.fit_on_texts(sentences_train_Yersinia)\r\n save((tokenizer1,tokenizer2),'doubleip_tkrs')\r\n print('Saved tokenizers as doubleip_tkrs')\r\n sentences = pd.DataFrame(' '.join(df_train.loc[i]['Joined']) for i in range(df_train.shape[0])).values.flatten()\r\n tokenizer = Tokenizer(num_words=1000000)\r\n tokenizer.fit_on_texts(sentences)\r\n save(tokenizer,'join_tkr')\r\n print('Saved tokenizer as join_tkr')\r\n \r\n\r\n#Meant for final model\r\ndef preprocess(df_test, show =False, saveTokrs = True):\r\n D = len(df_test[['Human']].iloc[0][0][0])\r\n if D==1:\r\n print(\"Converting to 5D. This will take a few minutes\")\r\n combine_AC(df_test,5)\r\n elif D!=5:\r\n print(\"Data should be in 1D format\")\r\n sys.exit()\r\n else: pass\r\n \r\n if saveTokrs:\r\n if input(\"Create tokenizers? Enter y if this is new training data. y/n: \") is 'y': create_tokenizers(df_test)\r\n \r\n inputs = []\r\n MAX_SEQUENCE_LENGTH = 2000 #for joined\r\n print('Preprocessing...')\r\n #print(\"Seq length for joined is\",MAX_SEQUENCE_LENGTH)\r\n tokenizer = load('join_tkr')\r\n sentences_test_J = pd.DataFrame(' '.join(df_test.loc[i]['Joined']) for i in range(df_test.shape[0])).values.flatten()\r\n sequences_test = tokenizer.texts_to_sequences(sentences_test_J)\r\n s = sorted(len(s) for s in sequences_test)\r\n if show : show_stats(sequences_test,MAX_SEQUENCE_LENGTH,'Joined_seq')\r\n data_test = pad_sequences(sequences_test, MAX_SEQUENCE_LENGTH,padding='pre', truncating='pre')\r\n inputs.append(data_test)\r\n sequences_test = tokenizer.texts_to_sequences(sentences_test_J)\r\n data_test = pad_centered(sequences_test, MAX_SEQUENCE_LENGTH)\r\n inputs.append(data_test)\r\n sequences_test = tokenizer.texts_to_sequences(sentences_test_J)\r\n data_test = pad_sequences(sequences_test, MAX_SEQUENCE_LENGTH,padding='post', truncating='post')\r\n inputs.append(data_test)\r\n MAX_SEQUENCE_LENGTH = 1000 #for doubleip\r\n #print(\"Seq length for doubleip is\",MAX_SEQUENCE_LENGTH)\r\n ip_test_Human = df_test[['Human']]\r\n ip_test_Yersinia = df_test[['Yersinia']]\r\n sentences1_test = pd.DataFrame(' '.join(ip_test_Human.loc[i]['Human']) for i in range(ip_test_Human.shape[0])).values.flatten()\r\n sentences2_test = pd.DataFrame(' '.join(ip_test_Yersinia.loc[i]['Yersinia']) for i in range(ip_test_Yersinia.shape[0])).values.flatten()\r\n tokenizer1,tokenizer2 = load('doubleip_tkrs')\r\n test_sequences1 = tokenizer1.texts_to_sequences(sentences1_test)\r\n test_sequences2 = tokenizer2.texts_to_sequences(sentences2_test)\r\n if show : show_stats(test_sequences1,MAX_SEQUENCE_LENGTH,'doubleip seq')\r\n test_data1 = pad_sequences(test_sequences1, MAX_SEQUENCE_LENGTH,padding='pre', truncating='pre')\r\n inputs.append(test_data1)\r\n test_data2 = pad_sequences(test_sequences2, MAX_SEQUENCE_LENGTH,padding='pre', truncating='pre')\r\n inputs.append(test_data2)\r\n test_sequences1 = tokenizer1.texts_to_sequences(sentences1_test)\r\n test_sequences2 = tokenizer2.texts_to_sequences(sentences2_test)\r\n test_data1 = pad_centered(test_sequences1, MAX_SEQUENCE_LENGTH)\r\n inputs.append(test_data1)\r\n test_data2 = pad_centered(test_sequences2, MAX_SEQUENCE_LENGTH)\r\n inputs.append(test_data2)\r\n test_sequences1 = tokenizer1.texts_to_sequences(sentences1_test)\r\n test_sequences2 = tokenizer2.texts_to_sequences(sentences2_test)\r\n test_data1 = pad_sequences(test_sequences1, MAX_SEQUENCE_LENGTH,padding='post', truncating='post')\r\n inputs.append(test_data1)\r\n test_data2 = pad_sequences(test_sequences2, MAX_SEQUENCE_LENGTH,padding='post', truncating='post')\r\n inputs.append(test_data2)\r\n return inputs\r\n\r\n\r\n\r\n\r\n", "id": "12258276", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "functions.py" } ]
0
ajayyadukrishnan
[ { "content": "import numpy as np\nimport matplotlib.pyplot as plt\n\n# Create 1000 samples from a normal distribution\nrng = np.random.default_rng()\nvals = rng.standard_normal(1000)\n\n# Set the chart color blue/red/green..etc\nchartColor = \"blue\"\n#And make a histogram\nplt.hist(vals, color=chartColor)\nplt.ylabel('counts')\nplt.title('1000 random numbers generated from a standard normal distribution')\nplt.savefig('standard normal historgram.png')\n", "id": "5364022", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "random_normal.py" } ]
0
tjiang31
[ { "content": "import numpy as np\nimport torch\nimport torch.nn as nn\n\n# Calculate the range of values for uniform distributions\ndef hidden_init(layer):\n fan_in = layer.weight.data.size()[0]\n lim = 1. / np.sqrt(fan_in)\n return (-lim, lim)\n\nactor_net = {'fc1_units': 200, 'fc2_units': 150}\ncritic_net = {'fc1_units': 200, 'fc2_units': 150}\n\nclass Actor(nn.Module):\n \"\"\"Actor (Policy) Model.\"\"\"\n\n def __init__(self, state_size, action_size, seed, \n fc1_units = actor_net['fc1_units'], \n fc2_units = actor_net['fc2_units']): \n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): seed\n fc1_units (int): Number of nodes in first hidden layer\n fc2_units (int): Number of nodes in second hidden layer\n \"\"\"\n super(Actor, self).__init__()\n self.seed = torch.manual_seed(seed)\n \n self.layer1 = nn.Sequential(nn.Linear(state_size, fc1_units),\n nn.ReLU()) \n \n self.layer2 = nn.Sequential(nn.Linear(fc1_units, fc2_units), \n nn.ReLU())\n \n self.layer3 = nn.Linear(fc2_units, action_size)\n self.reset_parameters()\n \n def reset_parameters(self):\n # Apply to layers the specified weight initialization\n self.layer1[0].weight.data.uniform_(*hidden_init(self.layer1[0]))\n self.layer2[0].weight.data.uniform_(*hidden_init(self.layer2[0]))\n self.layer3.weight.data.uniform_(-3e-3, 3e-3)\n \n def forward(self, state):\n \"\"\"Build an actor (policy) network that maps states -> actions.\"\"\" \n x = self.layer1(state)\n x = self.layer2(x)\n x = self.layer3(x)\n return torch.tanh(x)\n\nclass Critic(nn.Module):\n \"\"\"Critic (Value) Model.\"\"\"\n\n def __init__(self, state_size, action_size, seed,\n fc1_units=critic_net['fc1_units'],\n fc2_units=critic_net['fc2_units']): \n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): seed\n num_agents (int): Total number of agents\n fc1_units (int): Number of nodes in the first hidden layer\n fc2_units (int): Number of nodes in the second hidden layer\n \"\"\"\n super(Critic, self).__init__()\n self.seed = torch.manual_seed(seed)\n \n self.layer1 = nn.Sequential(nn.Linear(state_size * 2 + action_size * 2, fc1_units),\n nn.ReLU())\n \n self.layer2 = nn.Sequential(nn.Linear(fc1_units, fc2_units),\n nn.ReLU())\n \n self.layer3 = nn.Linear(fc2_units, 1)\n self.reset_parameters() \n \n def reset_parameters(self):\n # Apply to layers the specified weight initialization\n self.layer1[0].weight.data.uniform_(*hidden_init(self.layer1[0]))\n self.layer2[0].weight.data.uniform_(*hidden_init(self.layer2[0]))\n self.layer3.weight.data.uniform_(-3e-3, 3e-3) \n \n def forward(self, state, action):\n \"\"\"Build a critic (value) network that maps (state, action) pairs -> Q-value.\"\"\"\n xs = torch.cat((state, action), dim = 1)\n x = self.layer1(xs)\n x = self.layer2(x)\n output = self.layer3(x)\n return output", "id": "634545", "language": "Python", "matching_score": 0, "max_stars_count": 3, "path": "p3_collab-compet/model.py" } ]
0
alexandergmzx
[ { "content": "#!/usr/bin/env python3\nimport shutil\nimport psutil\nimport os\nimport emails\nimport socket\nimport sys\n\ndef check_cpu_usage():\n\tusage = psutil.cpu_percent(1)\n\treturn usage < 80\n\ndef check_disk_usage(disk):\n\tdu = shutil.disk_usage(disk)\n\tfree = du.free / du.total * 100\n\treturn free > 20\n\ndef check_memory_usage():\n available = psutil.virtual_memory()[1]/2.**20\n return available > 524.288\n\ndef main(argv):\t\n\t\"\"\"Process the JSON data and generate a full report out of it.\"\"\"\n\tprint(socket.gethostbyname(socket.gethostname()))\n\tsubject_line = \"\"\n\tif not check_cpu_usage() :\n\t\tsubject_line = \"Error - CPU usage is over 80%\"\n\telif not check_disk_usage(\"/\") :\n\t\tsubject_line = \"Error - Available disk space is less than 20%\"\n\telif not check_memory_usage() :\n\t\tsubject_line = \"Error - Available memory is less than 500MB\"\n\t#elif not (socket.gethostbyname(socket.gethostname()) == \"127.0.0.1\"):\n\t#\tsubject_line = \"Error - localhost cannot be resolved to 127.0.0.1\"\n\telse:\n\t\tprint(\"Everything is OK :)\")\n\n\tif not (subject_line == \"\"):\n\t\tprint(subject_line)\n\t\tsender = \"<EMAIL>\"\n\t\treceiver = <EMAIL>\".<EMAIL>(os.environ.get('USER'))\n\t\tsubject = subject_line\n\t\tbody = \"Please check your system and resolve the issue as soon as possible.\"\n\n\t\tmessage = emails.generate2(sender, receiver, subject, body)\n\t\temails.send(message)\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "id": "8672708", "language": "Python", "matching_score": 2.965524673461914, "max_stars_count": 1, "path": "health_check.py" }, { "content": "#!/usr/bin/env python3\n\nimport json\nimport locale\nimport sys\nimport operator\nimport emails\nimport os\nimport reports\nimport time\nimport datetime\n\nfrom reportlab.platypus import SimpleDocTemplate\nfrom reportlab.platypus import Paragraph, Spacer, Table, Image\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.lib import colors\n\ndef generate(filename, title, additional_info):\n styles = getSampleStyleSheet()\n report = SimpleDocTemplate(filename)\n report_title = Paragraph(title, styles[\"h1\"])\n report_info = Paragraph(additional_info, styles[\"BodyText\"])\n empty_line = Spacer(1,20)\n report.build([report_title, report_info])\n\ndef load_data(filename):\n \"\"\"Loads the contents of filename as a JSON file.\"\"\"\n with open(filename) as json_file:\n data = json.load(json_file)\n return data\n\ndef process_data(data):\n '''\n Takes the json dictionary and turns it into a string list\n that is rady and available to process into the report.\n Like : \"name: *Fruit name*\n weigth: *Fruit weigth* lbs\"\n '''\n #data_list = []\n data_string = \"\"\n for value in data.values():\n data_string += \"<br/>\" + \"<br/>\" + \"name: \" + value[\"name\"] + \\\n \"<br/>\" + \"weight: \" + str(value[\"weight\"]) + \" lbs\"\n #data_list.append(data_string)\n return data_string\n\ndef main(argv):\n \"\"\"Process the JSON data and generate a full report out of it.\"\"\"\n data = load_data(\"../dict_o_dicts.json\")\n processed = process_data(data) \n print(processed)\n\n titulo = \"Processed Update on \" + datetime.date.today().strftime(\"%B %d, %Y\") # Add the date in international format\n # Like: March 11, 2020\n\n reports.generate(\"/tmp/processed.pdf\", \n titulo, processed)\n\n sender = \"<EMAIL>\"\n receiver = <EMAIL>(os.<EMAIL>.get('USER'))\n subject = \"Upload Completed - Online Fruit Store\"\n body = \"All fruits are uploaded to our website successfully. A detailed list is attached to this email.\"\n\n message = emails.generate(sender, receiver, subject, body,\"/tmp/processed.pdf\")\n emails.send(message)\n\nif __name__ == \"__main__\":\n main(sys.argv)", "id": "12582028", "language": "Python", "matching_score": 1.6484047174453735, "max_stars_count": 1, "path": "reports.py" }, { "content": "#! /usr/bin/env python3\nimport os\nimport requests\nimport json\nimport re\n\nurl = \"http://34.122.175.55/fruits/\"\ndict_o_dicts = {}\n\nfor feed in os.listdir(\"../supplier-data/descriptions/\"):\n\tdict_o_dicts[feed] = {}\n\timg_num = int(re.search(r'\\d+', feed).group())\n\twith open((\"../supplier-data/descriptions/\"+feed), mode='r', encoding='UTF-8') as file:\n\t\tdict_o_dicts[feed][\"name\"] = file.readline().rstrip()\n\t\tdict_o_dicts[feed][\"weight\"] = int(re.search(r'\\d+', file.readline().rstrip()).group())\n\t\tdict_o_dicts[feed][\"description\"] = file.readline().rstrip()\n\t\tif img_num <= 9:\n\t\t\tdict_o_dicts[feed][\"image_name\"] = \"00\"+str(int(re.search(r'\\d+', feed).group()))+\".jpeg\"\n\t\telse:\n\t\t\tdict_o_dicts[feed][\"image_name\"] = \"0\"+str(int(re.search(r'\\d+', feed).group()))+\".jpeg\"\n\tprint(dict_o_dicts[feed])\n\tresponse = requests.post(url, data=dict_o_dicts[feed])\n\tresponse.raise_for_status()\n\nwith open('../dict_o_dicts.json', 'w') as outfile:\n json.dump(dict_o_dicts, outfile)", "id": "1035758", "language": "Python", "matching_score": 1.5355452299118042, "max_stars_count": 1, "path": "run.py" }, { "content": "#! /usr/bin/env python3\nimport os\nimport requests\n\nurl = \"http://localhost/upload/\"\nfor jpg_image in os.listdir(\"../supplier-data/images/\"):\n\tif \".jpeg\" in jpg_image:\n\t\twith open('../supplier-data/images/'+jpg_image,'rb') as opened:\n\t\t\tr = requests.post(url, files={'file': opened})\n", "id": "11276903", "language": "Python", "matching_score": 1.4755685329437256, "max_stars_count": 1, "path": "supplier_image_upload.py" }, { "content": "#! /usr/bin/env python3\n\nimport re\nimport os\nfrom PIL import Image\n\nfor tif in os.listdir(\"../supplier-data/images/\"):\n\ttry:\n\t\timg = Image.open(\"../supplier-data/images/\"+tif)\n\t\tprint(tif,\" :\",img.format,img.size)\n\t\timg.resize((600,400)).convert('RGB').save(\"../supplier-data/images/\"+str(re.search(r'\\d+', tif).group())+\".jpeg\",\"JPEG\")\n\texcept OSError:\n\t\tpass\n", "id": "6723984", "language": "Python", "matching_score": 1.495023250579834, "max_stars_count": 1, "path": "changeImage.py" } ]
1.535545
longjj
[ { "content": "from pycocotools.coco import COCO\r\nfrom pycocoevalcap.eval import COCOEvalCap\r\nimport matplotlib.pyplot as plt\r\nimport json\r\nfrom json import encoder\r\nimport argparse\r\nimport os\r\n\r\nif __name__=='__main__':\r\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\r\n parser.add_argument('--folder', type=str, default='', help=\"result_folders\")\r\n args = parser.parse_args()\r\n\r\n annFile='captions_test5k.json'\r\n coco = COCO(annFile)\r\n\r\n if os.path.isfile(os.path.join(args.folder, 'performance.txt')):\r\n with open(os.path.join(args.folder, 'performance.txt')) as f:\r\n content = f.readlines()\r\n evaledFile = [x.split(' ')[0].strip() for x in content]\r\n else:\r\n evaledFile = []\r\n\r\n resFiles = [resFile for resFile in os.listdir(args.folder) if (('.json' in resFile) and (resFile not in evaledFile))]\r\n\t\r\n # Eval all result in the args.folder\r\n for resFile in resFiles:\r\n try:\r\n print('Evaluate: ' + resFile)\r\n cocoRes = coco.loadRes(os.path.join(args.folder, resFile))\r\n cocoEval = COCOEvalCap(coco, cocoRes)\r\n cocoEval.evaluate()\r\n with open(os.path.join(args.folder, 'performance.txt'), 'a') as fid:\r\n fid.write(resFile + ' ' + str(cocoEval.eval) + '\\n')\r\n except Exception as e:\r\n print(e)\r\n with open(os.path.join(args.folder, 'error.txt'), 'a') as f:\r\n f.write('============================\\n' + resFile + '\\n' + str(e) + '\\n')\r\n", "id": "11254333", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "eval_res.py" } ]
0
sdaityari
[ { "content": "import sys\nimport os\n\ncwd = os.path.dirname(os.path.realpath(__file__))\nmain_dir = os.path.normpath(cwd + '/../')\nsys.path.append(main_dir)\n\n#print sys.path\n\nfrom config.all import *\n\nlanguage = 'en'\n#html_logo = '../images/logos/logo-en.png'\nlatex_logo = '../images/logos/logo-en.png'\nlatex_documents = [\n ('index', 'e-cidadania.tex', u'Documentation',\n u'Cidadania S. Coop. Galega', 'manual'),\n]", "id": "12844300", "language": "Python", "matching_score": 1.2492152452468872, "max_stars_count": 40, "path": "docs/en/conf.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" This script download the latest git version of e-cidadania, compiles\nthe documentation and places it in the documentation website\n\"\"\"\n\nimport sys\nimport os\nimport subprocess\nimport argparse\n\n\n__author__ = \"<NAME>\"\n__license__ = \"GPLv3\"\n__version__ = \"0.2\"\n__email__ = \"<EMAIL>\"\n__status__ = \"Stable/Production\"\n\nclass Documents():\n\n \"\"\"\n Document class.\n \"\"\"\n def __init__(self):\n\n \"\"\"\n Declare variables.\n \"\"\"\n self.cwd = os.getcwd()\n self.langs = [\"es\", \"en\", \"gl\"]\n self.formats = [\"html\", \"latex\", \"latexpdf\"]\n\n # We don't include cidadania's server repository because it\n # needs authentication and some specific settings.\n self.repos = [\n \"git://github.com/cidadania/e-cidadania.git\",\n \"git://github.com/oscarcp/e-cidadania.git\",\n \"git://gitorious.org/e-cidadania/mainline.git\",\n \"git://repo.or.cz/e_cidadania.git\",\n ]\n\n def download_code(self, branch='master'):\n\n \"\"\"\n Download the latest code from the e-cidadania repositories. It the\n clone fails it will try with the next repository until it finds\n a working one.\n \"\"\"\n i = 0\n print \"\\n >> Getting e-cidadania codebase from %s...\" % self.repos[i].split('/')[2]\n print \"DEBUG: BRANCH=%s\" % branch\n done = False\n while not done:\n if i <= (len(self.repos) - 1):\n try:\n get_code = subprocess.check_call('git clone -b ' + branch + ' ' + self.repos[i] + ' ../ecidadania > /dev/null 2>&1', shell=True)\n done = True\n except:\n print \" -- Couldn't get the code from %s\" % self.repos[i].split('/')[2]\n i += 1\n else:\n import shutil\n print \"\\n EE Couldn't get the e-cidadania codebase. This can be caused by an old copy of the codebase.\"\n print \" -- Trying to delete the old codebase...\"\n try:\n os.chdir('../')\n shutil.rmtree('ecidadania/')\n print \" -- Code succesfully deleted. Please run the application again.\\n\"\n os.chdir('scripts/')\n except:\n print \" -- There was some error trying to delete the old codebase. Exiting.\\n\"\n sys.exit()\n\n def compile_docs(self):\n\n \"\"\"\n Compile all the documentation and languages at once.\n \"\"\"\n os.chdir(self.cwd + '/../ecidadania/docs/')\n sys.stdout.write(\"\\n >> Compiling documentation... \")\n sys.stdout.flush()\n\n i = 0\n done = False\n while not done:\n if i < (len(self.formats) - 1):\n try:\n sys.stdout.write('(%s) ' % self.formats[i])\n sys.stdout.flush()\n gen_docs = subprocess.check_call('make ' + self.formats[i] + ' > /dev/null 2>&1', shell=True)\n if gen_docs == 0:\n i += 1\n except:\n print \" -- Couldn't compile the %s documentation.\" % self.formats[i]\n i += 1\n elif i == (len(self.formats) - 1):\n try:\n sys.stdout.write('(%s) ' % self.formats[i])\n sys.stdout.flush()\n gen_docs = subprocess.check_call('make ' + self.formats[i] + ' > /dev/null 2>&1', shell=True)\n if gen_docs == 0:\n i += 1\n done = True\n except:\n print \" -- Couldn't compile the %s documentation.\" % self.formats[i]\n i += 1\n else:\n sys.exit(\"\\n EE Couldn't generate documentation. Exiting.\\n\")\n print \"\\n\"\n\n def pack_latex(self):\n\n \"\"\"\n Package the LaTeX documentation into a tar.gz\n \"\"\"\n print \" >> Packaging the LaTeX files...\"\n import tarfile\n \n os.chdir(os.getcwd() + '/build/latex/')\n i = 0\n while i <= (len(self.langs) - 1):\n tar = tarfile.open(os.getcwd() + \"/../../%s/latest-%s.tar.gz\" % (self.langs[i], self.langs[i]), \"w:gz\")\n tar.add(self.langs[i])\n tar.close()\n i += 1\n \n\n def copy_docs(self):\n\n \"\"\"\n Copy the generated documentation into their respective directories.\n \"\"\"\n os.chdir(\"../../\")\n\n c = 0\n while c <= (len(self.formats) - 1):\n print \" >> Copying the %s documentation...\" % self.formats[c]\n sys.stdout.write(\" >> done \")\n sys.stdout.flush()\n \n i = 0\n while i <= (len(self.langs) - 1):\n if self.formats[c] == 'latexpdf':\n try:\n copy_latexpdf = subprocess.check_call('cp -R build/latex/' + self.langs[i] + '/e-cidadania.pdf ../../' + self.langs[i] + '/latest-' + self.langs[i] + '.pdf', shell=True)\n except:\n print \" -- Couldn't copy the \" + self.langs[i] + \" documentation.\"\n pass\n sys.stdout.write(\"(%s) \" % self.langs[i])\n sys.stdout.flush()\n i += 1\n elif self.formats[c] == 'html':\n try:\n copy_html = subprocess.check_call('cp -R build/' + self.formats[c] + '/' + self.langs[i] + '/* ../../' + self.langs[i] + '/latest', shell=True)\n except:\n print \" -- Couldn't copy the \" + self.langs[i] + \" documentation.\"\n pass\n sys.stdout.write(\"(%s) \" % self.langs[i])\n sys.stdout.flush()\n i += 1\n elif self.formats[c] == 'latex':\n try:\n copy_latex = subprocess.check_call('cp -R ' + self.langs[i] + '/latest-' + self.langs[i] + '.tar.gz' + ' ../../' + self.langs[i], shell=True)\n except:\n print \" -- Couldn't copy the \" + self.langs[i] + \" documentation.\"\n print \" EE Couldn't copy one or all the documentation! Exiting.\"\n sys.exit(1)\n sys.stdout.write(\"(%s) \" % self.langs[i])\n sys.stdout.flush()\n i += 1\n print \"\\n\"\n c += 1\n\n def make_all(self, branch):\n if len(sys.argv) == 1:\n self.download_code(branch)\n else:\n self.download_code(sys.argv[1])\n self.compile_docs()\n self.pack_latex()\n self.copy_docs()\n\ndoc = Documents()\nif len(sys.argv) == 1:\n doc.make_all('master')\nelse:\n doc.make_all(sys.argv[1])\n", "id": "3079740", "language": "Python", "matching_score": 3.24277663230896, "max_stars_count": 40, "path": "docs/scripts/generate-docs.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 Cidadania S. Coop. Galega\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis script automates the creation of all the language catalogs for a django\nproject, creating both standard and JS catalogs.\n\"\"\"\n\nimport sys\nimport os\nimport subprocess\nimport argparse\n\n__author__ = \"<NAME> <<EMAIL>>\"\n__license__ = \"GPLv3\"\n__version__ = \"0.6\"\n\n\nclass Language():\n\n \"\"\"\n Language class.\n \"\"\"\n def __init__(self):\n\n \"\"\"\n Store all the applications and languages installed in the platfom\n \"\"\"\n # Get current work directory and add it to sys.path so we can import\n # the project settings.\n self.cwd = os.getcwd() # Remove scripts directory just in case\n sys.path.append(self.cwd)\n\n # Get the languages configured in settings.py and the installed\n # e-cidadania modules. If we can't get the settings module, abort execution.\n try:\n import e_cidadania.settings as settings\n except:\n sys.exit(\"\\nCould not import the settings module. Aborting execution.\\n\\\nProbable cause: the script is not being executed from the project root (usually src/).\\n\")\n\n # You must put here the name off you applications variable in the form\n # \"settings.YOURVARNAME\"\n APPLICATIONS = settings.ECIDADANIA_MODULES\n\n self.applications = APPLICATIONS\n self.languages = settings.LANGUAGES\n self.appnames = []\n self.appdirs = []\n\n # We are going to add all the applications of the project, and create\n # a dictionary with appname:appdir values\n print \"\\n >> Populating variables with applications...\\n\"\n for app in self.applications:\n appdata = app.split('.') # Separate all components\n # appdata.pop(0) # Remove project name, it's useless. This was for django <= 1.3\n app_path_list = appdata # This will leave us with an useful route to the application\n app_path = '/'.join(app_path_list)\n appname = app_path_list[-1] # Get the application name (last value)\n self.appnames.append(appname)\n self.appdirs.append(app_path)\n\n # When we exit the for loop, create a dictionary with appname:app_path\n self.appDict = dict(zip(self.appnames, self.appdirs))\n print self.appDict\n\n def _iterator(self, command, action):\n\n \"\"\"\n This method iterates over the applications and languages executing the\n command specified in the call.\n \"\"\"\n for app, appdir in self.appDict.items():\n os.chdir(self.cwd + '/' + appdir)\n print '\\n>> %s language catalog: %s' % (action, app)\n for lang in self.languages:\n a = subprocess.Popen(command + '-l %s' % (lang[0]), shell=True)\n subprocess.Popen.wait(a)\n\n print '\\n>> %s site root language catalog.' % (action)\n os.chdir(self.cwd + '/e_cidadania')\n for lang in self.languages:\n if action is not \"Compiling\":\n a = subprocess.Popen(command + \"-i 'apps/*' -l %s\" % (lang[0]), shell=True)\n else:\n a = subprocess.Popen(command + \"-l %s\" % (lang[0]), shell=True)\n subprocess.Popen.wait(a)\n\n def make(self):\n\n \"\"\"\n Generate the language catalogs for the application and site root.\n \"\"\"\n # Spit out the information\n print \"\\n>> Languages to generate:\"\n for lang in self.languages:\n print ' - ' + lang[1]\n\n print \"\\n>> Installed applications:\"\n for app in self.appDict.keys():\n print ' - ' + app\n\n self._iterator('django-admin.py makemessages ', 'Generating')\n self._iterator('django-admin.py makemessages -d djangojs ', 'Generating JavaScript')\n\n def compile(self):\n\n \"\"\"\n Compile all the language catalogs.\n \"\"\"\n # Spit out the information\n print \"\\n>> Languages to generate:\"\n for lang in self.languages:\n print ' - ' + lang[1]\n\n print \"\\n>> Installed applications:\"\n for app in self.appDict.keys():\n print ' - ' + app\n\n self._iterator('django-admin.py compilemessages ', 'Compiling')\n\n def clean(self):\n\n \"\"\"\n Removes the language installed catalogs in the platform, leaving the\n locale directories clean for new catalogs.\n \"\"\"\n print '\\n>> WARNING: This command will remove ALL the language catalogs, having to rebuild and translate them all.'\n raw_input('\\n Continue? (Ctrl-C to quit)')\n for app, appdir in self.appDict.items():\n os.chdir(self.cwd + '/' + appdir)\n print '\\n>> Cleaning language catalogs for %s' % (app)\n for lang in self.languages:\n a = subprocess.Popen('rm -rf locale/%s' % (lang[0]), shell=True)\n subprocess.Popen.wait(a)\n\n print '\\n>> Cleaning site root language catalogs'\n os.chdir(self.cwd)\n for lang in self.languages:\n a = subprocess.Popen('rm -rf locale/%s' % (lang[0]), shell=True)\n subprocess.Popen.wait(a)\n\nlang = Language()\nparser = argparse.ArgumentParser(description='e-cidadania language catalog generator. This script manages all the .po and .mo files from templates, python code and javascript i18n (if used).')\nsubparser = parser.add_subparsers()\nparser_make = subparser.add_parser('make', help='Create all the language catalogs for translation including JavaScript.')\nparser_make.set_defaults(func=lang.make)\n\nparser_compile = subparser.add_parser('compile', help='Compile all the language catalogs for use.')\nparser_compile.set_defaults(func=lang.compile)\n\nparser_clean = subparser.add_parser('clean', help='Delete all the language catalogs. After this you will have to rebuild the catalogs and translate them.')\nparser_clean.set_defaults(func=lang.clean)\n\n\nargs = parser.parse_args()\nargs.func()\n", "id": "742068", "language": "Python", "matching_score": 2.8766403198242188, "max_stars_count": 40, "path": "src/e_cidadania/scripts/generate_languages.py" }, { "content": "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport os\nimport subprocess\n\n\"\"\"\nThis script installs a development environment in an easy way, instead of\nhaving to execute all the bootstrapping commands.\n\"\"\"\n\n__version__ = '0.2'\nprint \"e-cidadania install script %s\\n\" % __version__\n\n# Detect where is this file\ncwd = os.path.dirname(os.path.realpath(__file__))\n# Change the working dir\nos.chdir(cwd)\n\n# Execute the bootstrap\nprint \" * Bootstrapping...\"\na = subprocess.Popen('python bootstrap.py', shell=True)\nsubprocess.Popen.wait(a)\n\nprint \" * Making buildout...\"\nb = subprocess.Popen('bin/buildout')\nsubprocess.Popen.wait(b)\n\nd = raw_input(' * Do you want to create the database? (y/n) ')\n\nif d == 'y':\n\tos.chdir(cwd + '/src/')\n\tc = subprocess.Popen('../bin/django syncdb', shell=True)\n\tsubprocess.Popen.wait(c)\n\tsys.exit(0)\nelif d == 'n':\n\tprint 'Process finished'\n\tprint \"\"\"You should follow this instructions blablabla\"\"\"\n\tsys.exit(0)\nelse:\n\tsys.exit(0)\n", "id": "5247502", "language": "Python", "matching_score": 0.6927675604820251, "max_stars_count": 40, "path": "install.py" }, { "content": "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 <NAME>\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\nfrom mockups import Mockup\nfrom mockups.helpers import register\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass Error(Exception):\n \"\"\"Base class for all exceptions raised by the seeder.\n \"\"\"\n pass\n\n\nclass InvalidModelError(Error):\n \"\"\"Raised when a model is not a valid Django model.\n \"\"\"\n pass\n\n\nclass DataSeeder(object):\n \"\"\"Class which contains the methods to seed data or fixtures for testing.\n \n We presently wrap our methods around Mockup. \n \"\"\"\n \n def __init__(self):\n #Register User with Mockup\n register(User, Mockup)\n \n def seed(self, model, constraints=None, follow_fk=None, generate_fk=None,\n follow_m2m=None, factory=None, model_properties=None, commit=True):\n \"\"\"Creates and saves an instance of 'model' in the database.\n \n The values generated by Mockup class for the fields may not be \n acceptable. Custom values for the fields can be provided in \n model_properties.\n \n Parameters:\n ``model``: A model class which is used to create the test data.\n\n ``constraints``: A list of callables. The constraints are used to\n verify if the created model instance may be used. The callable\n gets the actual model as first and the instance as second\n parameter. The instance is not populated yet at this moment. The\n callable may raise an :exc:`InvalidConstraint` exception to\n indicate which fields violate the constraint.\n\n ``follow_fk``: A boolean value indicating if foreign keys should be\n set to random, already existing, instances of the related model.\n\n ``generate_fk``: A boolean which indicates if related models should\n also be created with random values. The *follow_fk* parameter will\n be ignored if *generate_fk* is set to ``True``.\n\n ``follow_m2m``: A tuple containing minium and maximum of model\n instances that are assigned to ``ManyToManyField``. No new\n instances will be created. Default is (1, 5). You can ignore\n ``ManyToManyField`` fields by setting this parameter to ``False``.\n\n ``generate_m2m``: A tuple containing minimum and maximum number of\n model instance that are newly created and assigned to the\n ``ManyToManyField``. Default is ``False`` which disables the\n generation of new related instances. The value of ``follow_m2m``\n will be ignored if this parameter is set.\n\n ``factory``: A Factory *instance*, overriding the one defined in the\n Mockup class.\n \n ``model_properties``: A dict containing the custom properties \n for the ``model``\n \n ``commit``: A boolean which is set to True by default and indicates\n whether the model should be saved to the database or not.\n \n \"\"\"\n \n #if not isinstance(model, models.Model):\n # raise InvalidModelError(\"%s is not a valid Django model.\" % model)\n if model_properties is None:\n model_properties = {}\n \n # Creates and randomly populates the data\n mockup = Mockup(model, constraints=constraints, follow_fk=follow_fk,\n generate_fk=generate_fk, follow_m2m=follow_m2m, \n factory=factory)\n created_model = mockup.create_one(commit=commit)\n \n # set the attributes of the model as provided in model_properties\n for key in model_properties.iterkeys():\n setattr(created_model, key, model_properties[key])\n if commit:\n created_model.save()\n return created_model\n\n def seedn(self, count, model, constraints=None, follow_fk=None, \n generate_fk=None, follow_m2m=None, factory=None,\n model_properties=None, commit=True ):\n \"\"\"Creates and saves n instances of 'model' in the database and returns\n a list of all those saved instances.\n \n The method uses self.seed to generate a list of instances of ``model``\n ``count`` number of times.\n \"\"\"\n\n obj_list = []\n for _ in xrange(count):\n obj = self.seed(model=model, constraints=constraints, \n follow_fk=follow_fk, generate_fk=generate_fk,\n follow_m2m=follow_m2m, factory=factory,\n model_properties=model_properties, commit=commit)\n obj_list.append(obj)\n \n return obj_list\n \nseeder = DataSeeder() ", "id": "2095412", "language": "Python", "matching_score": 3.176345109939575, "max_stars_count": 40, "path": "tests/data_seeder.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 <NAME>\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\nimport unittest\n\nfrom tests.data_seeder import seeder\n\nfrom src.core.spaces.models import Space\n\nfrom django.contrib.auth.models import User\n\n\nclass TestDataSeeder(unittest.TestCase):\n \"\"\"Tests the DataSeeder class methods.\n \"\"\"\n \n def testInstanceIsCreated(self):\n \"\"\"Tests if the correct instance of a model is generated.\n \"\"\"\n \n created_model = seeder.seed(Space)\n self.assertTrue(isinstance(created_model, Space))\n \n def testCorrectNumberOfInstancesAreGenerated(self):\n \"\"\"Tests if correct number of model instances are generated.\n \"\"\"\n \n count = 5\n actual_list = seeder.seedn(count, Space)\n self.assertEqual(len(actual_list), count)\n \n def testIfInstanceIsGeneratedWithRequiredAttributes(self):\n \"\"\"Tests if the generated instance has the desired properties.\n \"\"\"\n \n properties = {\n 'name': 'Test Space',\n 'description': 'Temporary Description',\n 'public': 'False',\n }\n instance = seeder.seed(Space, model_properties=properties)\n self.assertEqual(instance.name, properties['name'])\n self.assertEqual(instance.description, properties['description'])\n self.assertEqual(instance.public, properties['public'])\n #Space.author is a Foreign Key. Since generate_fk is False by default,\n #Space.author should be None as it will not be populated.\n self.assertEqual(instance.author, None)\n self.assertFalse(isinstance(instance.author, User))\n \n def testIfForeignKeyFieldsOfaModelIsPopulated(self):\n \"\"\"Tests if the foreign key fields of a model is populated if\n generate_fk is set to True\n \"\"\"\n \n instance = seeder.seed(Space)\n self.assertEqual(instance.author, None)\n \n instance = seeder.seed(Space, generate_fk=True)\n self.assertTrue(isinstance(instance.author, User))\n User.objects.all().delete()", "id": "9107062", "language": "Python", "matching_score": 1.9469695091247559, "max_stars_count": 40, "path": "tests/unit_tests/src/test_data_seeder.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 Cidadania S. Coop. Galega\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\nfrom src.core.spaces.models import Space\nfrom src.apps.ecidadania.proposals.models import Proposal, ProposalSet\n\nfrom tests.test_utils import ECDTestCase\n\n\nclass ListProposalViewsTest(ECDTestCase):\n \"\"\"\n Tests the views of proposals app.\n \"\"\"\n \n def setUp(self):\n self.init()\n \n def testListProposalsView(self):\n \"\"\"\n Tests ListProposal view.\n \"\"\"\n user = self.create_user('test_user', 'abcde')\n other_user = self.create_user('other_test_user', 'acsrsd')\n space_properties = {'name': 'test_space', 'url': 'test_space_url',\n 'author': user, 'public': True}\n space1 = self.seed(Space, properties=space_properties)\n \n space_properties.update({'name': 'other_space', 'url': 'other_test_url',\n 'author': other_user, 'public': True})\n space2 = self.seed(Space, space_properties)\n \n proposal_properties = {'space': space1, 'author': user}\n proposal1 = self.seed(Proposal, properties=proposal_properties)\n proposal2 = self.seed(Proposal, properties=proposal_properties)\n proposals_list = [proposal1, proposal2]\n \n proposal_properties.update({'space': space2, 'author': other_user})\n proposal3 = self.seed(Proposal, properties=proposal_properties)\n proposal4 = self.seed(Proposal, properties=proposal_properties)\n proposal5 = self.seed(Proposal, properties=proposal_properties)\n other_proposals_list = [proposal3, proposal4, proposal5]\n url = self.getURL('list-proposals', kwargs={'space_url':space1.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertEqual(len(response.context[0].dicts[0]['proposal']), \n len(proposals_list))\n url = self.getURL('list-proposals', kwargs={'space_url': space2.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertEqual(len(response.context[0].dicts[0]['proposal']), \n len(other_proposals_list))\n \nclass ListProposalSetViewsTest(ECDTestCase):\n \"\"\"\n Tests the views of proposalsets app.\n \"\"\"\n \n def setUp(self):\n self.init()\n \n def testListProposalSetView(self):\n \"\"\"\n Tests ListProposalSet view.\n \"\"\"\n user = self.create_user('test_user', 'abcde')\n other_user = self.create_user('other_test_user', 'acsrsd')\n space_properties = {'name': 'test_space', 'url': 'test_space_url',\n 'author': user, 'public': True}\n space1 = self.seed(Space, properties=space_properties)\n \n space_properties.update({'name': 'other_space', 'url': 'other_test_url',\n 'author': other_user, 'public': True})\n space2 = self.seed(Space, space_properties)\n \n proposalset_properties = {'space': space1, 'author': user}\n proposalset1 = self.seed(ProposalSet, properties=proposalset_properties)\n proposalset2 = self.seed(ProposalSet, properties=proposalset_properties)\n proposalsets_list = [proposalset1, proposalset2]\n \n proposalset_properties.update({'space': space2, 'author': other_user})\n proposalset3 = self.seed(ProposalSet, properties=proposalset_properties)\n proposalset4 = self.seed(ProposalSet, properties=proposalset_properties)\n proposalset5 = self.seed(ProposalSet, properties=proposalset_properties)\n other_proposalsets_list = [proposalset3, proposalset4, proposalset5]\n url = self.getURL('list-proposalset', kwargs={'space_url':space1.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertEqual(len(response.context[0].dicts[0]['setlist']), \n len(proposalsets_list))\n url = self.getURL('list-proposalset', kwargs={'space_url': space2.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertEqual(len(response.context[0].dicts[0]['setlist']), \n len(other_proposalsets_list))\n \n", "id": "1198860", "language": "Python", "matching_score": 3.3779661655426025, "max_stars_count": 40, "path": "tests/unit_tests/src/apps/ecidadania/proposals/test_views.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 <NAME>. Galega\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\n\nfrom django.core.cache import cache\n\nfrom core.spaces.models import Space\n\nfrom src.helpers import cache as cache_helper\n\nfrom tests.test_utils import ECDTestCase\n\n\n\nclass CacheHelperTest(ECDTestCase):\n \"\"\"Tests the cache helper functions.\n \"\"\"\n \n def setUp(self):\n self.init()\n \n def testGetOrInsertObjectInCache(self):\n \"\"\"\n Tests the get_or_insert_object_in_helpers.cache.\n \"\"\"\n \n \n space_props = {'url': 'test_space', 'name': 'some_name'}\n #print Space.__class__.__name__\n space_key = cache_helper._get_cache_key_for_model(Space, 'test_space')\n expected = None\n actual = cache.get(space_key)\n self.assertEqual(expected, actual)\n \n space = Space(**space_props)\n space.save()\n expected = space\n actual = cache_helper.get_or_insert_object_in_cache(Space, \n space.url, url=space.url)\n self.assertEqual(expected, actual)\n \n cache.delete(space_key)\n self.assertEqual(cache.get(space_key), None)\n expected = space\n actual = cache_helper.get_or_insert_object_in_cache(Space, \n space.url, url=space.url)\n self.assertEqual(expected, actual)\n ", "id": "6584862", "language": "Python", "matching_score": 2.1267521381378174, "max_stars_count": 40, "path": "tests/unit_tests/helpers/test_cache.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file contains functions to help with caching.\n\"\"\"\n\n# Django's cache module\nfrom django.core.cache import cache\n\n# Cached models\nfrom core.spaces.models import Space\n\n# Response types\nfrom django.shortcuts import get_object_or_404\n\n# Tries to get the object from cache\n# Else queries the database\n# Else returns a 404 error\n\n\ndef _get_cache_key_for_model(model, key):\n \"\"\"\n Returns a unique key for the given model.\n\n We prefix the given `key` with the name of the `model` to provide a further\n degree of uniqueness of keys across the cache.\n \"\"\"\n\n if not isinstance(key, basestring):\n raise TypeError('key must be str or a unicode string')\n\n return model.__name__ + '_' + key\n\n\ndef get_or_insert_object_in_cache(model, key, *args, **kwargs):\n \"\"\"\n Returns an instance of the `model` stored in the cache with the given key.\n If the object is not found in the cache, it is retrieved from the database\n and set in the cache.\n \"\"\"\n\n actual_key = _get_cache_key_for_model(model, key)\n return_object = cache.get(actual_key)\n\n if not return_object:\n return_object = get_object_or_404(model, *args, **kwargs)\n cache.set(actual_key, return_object)\n\n return return_object\n", "id": "1138351", "language": "Python", "matching_score": 0.6426374316215515, "max_stars_count": 40, "path": "src/helpers/cache.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2013 <NAME>\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\nfrom nose.plugins import Plugin\n\nfrom django.core import management\n\n\ndef flush_database():\n \"\"\"Flushes the default test database.\n \"\"\"\n management.call_command('flush', verbosity=0, interactive=False)\n \n\nclass DatabaseFlushPlugin(Plugin):\n \"\"\"Nose plugin to flush the database after every test.\n \n The instances of models generated in one test may cause other tests to fail.\n So it is necessary to clear the test database after every test.\n \"\"\"\n \n name = 'DatabaseFlushPlugin'\n enabled = True\n \n def options(self, parser, env):\n return Plugin.options(self, parser, env)\n \n def configure(self, parser, env):\n Plugin.configure(self, parser, env)\n self.enabled = True\n \n def afterTest(self, test):\n flush_database()\n", "id": "10625946", "language": "Python", "matching_score": 2.3254342079162598, "max_stars_count": 40, "path": "tests/nose_plugins.py" }, { "content": "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 <NAME>\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\nfrom pylint import lint\n\nimport sys\n\ndef run_pylint():\n \"\"\"Runs pylint on the module supplied via command line arguments.\n \n Usage:\n \n >>> bin/python tests/pylint.py path_to_module_or_package\n \n where path_to_module is the relative or absolute path to the module\n or package which you want to test with pylint.\n \n The format of the message output by pylint is:\n MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE\n where MESSAGE_TYPE can be C(convention), R(refactor), W(warning),\n E(Error), F(Fatal)\n \n Reports generation is disabled by default. \n Ids are included with message types by default.\n These settings can be changed in the args variable below.\n \n For a full list of command line options pass --help .\n \n For more information please refer to the pyline manual at \n http://www.logilab.org/card/pylint_manual\n \"\"\"\n args = [\n '--reports=n',\n '--include-ids=y']\n sys.argv.extend(args)\n lint.Run(sys.argv[1:])\n\nif __name__=='__main__':\n run_pylint()", "id": "4058999", "language": "Python", "matching_score": 0.9958509206771851, "max_stars_count": 40, "path": "tests/pylint.py" }, { "content": "#!/usr/bin/env python\nimport os\nimport sys\n\nos.environ.setdefault('LANG', 'en_US')\n\nif __name__ == \"__main__\":\n manage_cwd = os.getcwd()\n sys.path.insert(0, manage_cwd + '/e_cidadania')\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"e_cidadania.settings\")\n from django.core.management import execute_from_command_line\n execute_from_command_line(sys.argv)\n", "id": "8879724", "language": "Python", "matching_score": 0.9552477598190308, "max_stars_count": 40, "path": "src/manage.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nChange environment according to the parameters.\n\"\"\"\n\nimport os\nimport sys\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.core import management\n\n\nclass Command(BaseCommand):\n\n \"\"\"\n \"\"\"\n args = \"<settings_file> [development, production]\"\n help = \"This command will run the django development server with the \\\n specified configuration file, which can be 'production' or 'development'.\"\n\n def handle(self, *args, **options):\n\n \"\"\"\n \"\"\"\n if args[0] == 'development':\n self.stdout.write('Running development settings...\\n')\n management.call_command('runserver', settings=\"e_cidadania.settings.development\", verbosity=0)\n elif args[0] == 'production':\n self.stdout.write('Running production settings...\\n')\n management.call_command('runserver', settings=\"e_cidadania.settings.production\", verbosity=0)\n else:\n self.stdout.write(\"You didn't select a valid option. Valid options are: development, production.\\n\")\n sys.exit(0)\n", "id": "6043517", "language": "Python", "matching_score": 0.9597552418708801, "max_stars_count": 40, "path": "src/extras/custom_stuff/management/commands/runsettings.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ndef unicode_sorter(input):\n \"\"\" This function implements sort keys for the german language according to\n DIN 5007.\"\"\"\n\n # key1: compare words lowercase and replace umlauts according to DIN 5007\n key1=input.lower()\n key1=key1.replace(u\"ä\", u\"a\")\n key1=key1.replace(u\"ö\", u\"o\")\n key1=key1.replace(u\"ü\", u\"u\")\n key1=key1.replace(u\"ß\", u\"ss\")\n\n # key2: sort the lowercase word before the uppercase word and sort\n # the word with umlaut after the word without umlaut\n # key2=input.swapcase()\n\n # in case two words are the same according to key1, sort the words\n # according to key2.\n return key1\n", "id": "6709381", "language": "Python", "matching_score": 1.2247593402862549, "max_stars_count": 40, "path": "src/apps/thirdparty/smart_selects/utils.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.db.models import get_model\nfrom django.http import HttpResponse\nfrom django.utils import simplejson\nimport locale\nfrom apps.thirdparty.smart_selects.utils import unicode_sorter\n\n\ndef filterchain(request, app, model, field, value, manager=None):\n Model = get_model(app, model)\n if value == '0':\n keywords = {str(\"%s__isnull\" % field): True}\n else:\n keywords = {str(field): str(value)}\n if manager is not None and hasattr(Model, manager):\n queryset = getattr(Model, manager).all()\n else:\n queryset = Model.objects\n results = list(queryset.filter(**keywords))\n results.sort(cmp=locale.strcoll, key=lambda x: unicode_sorter(unicode(x)))\n result = []\n for item in results:\n result.append({'value': item.pk, 'display': unicode(item)})\n json = simplejson.dumps(result)\n return HttpResponse(json, mimetype='application/json')\n\n\ndef filterchain_all(request, app, model, field, value):\n Model = get_model(app, model)\n if value == '0':\n keywords = {str(\"%s__isnull\" % field): True}\n else:\n keywords = {str(field): str(value)}\n results = list(Model.objects.filter(**keywords))\n results.sort(cmp=locale.strcoll, key=lambda x: unicode_sorter(unicode(x)))\n final = []\n for item in results:\n final.append({'value': item.pk, 'display': unicode(item)})\n results = list(Model.objects.exclude(**keywords))\n results.sort(cmp=locale.strcoll, key=lambda x: unicode_sorter(unicode(x)))\n final.append({'value': \"\", 'display': \"---------\"})\n\n for item in results:\n final.append({'value': item.pk, 'display': unicode(item)})\n json = simplejson.dumps(final)\n return HttpResponse(json, mimetype='application/json')\n", "id": "9615576", "language": "Python", "matching_score": 1.0413233041763306, "max_stars_count": 40, "path": "src/apps/thirdparty/smart_selects/views.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nALLOWED_CONTENT_TYPES = [\n 'application/vnd.openofficeorg.extension',\n # PDF\n 'application/pdf',\n 'application/x-pdf',\n 'application/acrobat',\n 'applications/vnd.pdf',\n 'text/pdf',\n 'text/x-pdf',\n # DOC\n 'application/doc',\n 'appl/text',\n 'application/vnd.msword',\n 'application/vnd.ms-word',\n 'application/winword',\n 'application/word',\n 'application/x-msw6',\n 'application/x-msword',\n 'application/msword',\n # DOCX and template\n 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',\n 'application/vnd.openxmlformats-officedocument.wordprocessingml.template',\n # PPT\n 'application/vnd.ms-powerpoint',\n 'application/mspowerpoint',\n 'application/ms-powerpoint',\n 'application/mspowerpnt',\n 'application/vnd-mspowerpoint',\n 'application/powerpoint',\n 'application/x-powerpoint',\n 'application/x-m',\n # PPTX and template\n 'application/vnd.openxmlformats-officedocument.presentationml.presentation',\n 'application/vnd.openxmlformats-officedocument.presentationml.template',\n # XLS\n 'application/vnd.ms-excel',\n 'application/msexcel',\n 'application/x-msexcel',\n 'application/x-ms-excel',\n 'application/vnd.ms-excel',\n 'application/x-excel',\n 'application/x-dos_ms_excel',\n 'application/xls',\n # XLSX\n 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n # ODT\n 'application/vnd.oasis.opendocument.text',\n 'application/x-vnd.oasis.opendocument.text',\n # ODS\n 'application/vnd.oasis.opendocument.spreadsheet',\n 'application/x-vnd.oasis.opendocument.spreadsheet',\n # ODP\n 'application/vnd.oasis.opendocument.presentation',\n 'application/x-vnd.oasis.opendocument.presentation',\n # TXT\n 'text/plain',\n 'application/txt',\n 'browser/internal',\n 'text/anytext',\n 'widetext/plain',\n 'widetext/paragraph',\n # RTF\n 'application/rtf',\n 'application/x-rtf',\n 'text/rtf',\n 'text/richtext',\n 'application/x-soffice',\n # ODF\n 'application/vnd.oasis.opendocument.formula',\n 'application/x-vnd.oasis.opendocument.formula',\n]\n", "id": "11310131", "language": "Python", "matching_score": 0.9272564053535461, "max_stars_count": 40, "path": "src/core/spaces/allowed_types.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# This code was downloaded from http://djangosnippets.org/snippets/1357/\n\nfrom django.template import Library\n\nregister = Library()\n\n\n@register.filter\ndef get_range(value):\n \"\"\"\n Filter - returns a list containing range made from given value\n Usage (in template):\n\n <ul>{% for i in 3|get_range %}\n <li>{{ i }}. Do something</li>\n {% endfor %}</ul>\n\n Results with the HTML:\n <ul>\n <li>0. Do something</li>\n <li>1. Do something</li>\n <li>2. Do something</li>\n </ul>\n\n Instead of 3 one may use the variable set in the views\n \"\"\"\n return range(value)\n", "id": "7891496", "language": "Python", "matching_score": 0.44377920031547546, "max_stars_count": 40, "path": "src/apps/ecidadania/debate/templatetags/range.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.core.mail import EmailMessage, EmailMultiAlternatives\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext, loader, Context\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom e_cidadania import settings\n\n\n@login_required\ndef invite(request):\n\n \"\"\"\n Simple view to send invitations to friends via mail. Making the invitation\n system as a view, guarantees that no invitation will be monitored or saved\n to the hard disk.\n \"\"\"\n if request.method == \"POST\":\n mail_addr = request.POST['email_addr']\n raw_addr_list = mail_addr.split(',')\n addr_list = [x.strip() for x in raw_addr_list]\n usr_msg = request.POST['mail_msg']\n\n plain_template = \"invite/invite_plain.txt\"\n html_template = \"invite/invite.html\"\n\n plain_msg = loader.get_template(plain_template).render(\n RequestContext(request,\n {'msg': usr_msg}))\n html_msg = loader.get_template(html_template).render(\n RequestContext(request,\n {'msg': usr_msg}))\n\n email = EmailMultiAlternatives(_('Invitation to join e-cidadania'), plain_msg, settings.DEFAULT_FROM_EMAIL, [], addr_list)\n email.attach_alternative(html_msg, 'text/html')\n email.send(fail_silently=False)\n return render_to_response('invite_done.html',\n context_instance=RequestContext(request))\n uri = request.build_absolute_uri(\"/\")\n return render_to_response('invite.html', {\"uri\": uri}, context_instance=RequestContext(request))\n", "id": "429205", "language": "Python", "matching_score": 1.7085493803024292, "max_stars_count": 40, "path": "src/core/views/invite.py" }, { "content": "# Obtained from: http://www.djangosnippets.org/snippets/133/\n# Author: http://www.djangosnippets.org/users/SmileyChris/\n\nfrom django.template import loader, Context, RequestContext, TemplateSyntaxError\nfrom django.http import HttpResponse\n\n\ndef render_response(template_prefix=None, always_use_requestcontext=True):\n \"\"\"\n Create a decorator which can be used as a shortcut to render templates to\n an HttpResponse.\n\n The decorated function must return either:\n * an HttpResponse object,\n * a string containing the template name (if doesn't start with '/' then\n will be combined with the template_prefix) or\n * a tuple comprising of:\n * a string or tuple containing the template name(s),\n * a dictionary to add to the Context or RequestContext and\n * (optionally) a list of context processors (if given, forces use of\n RequestContext).\n\n Example usage (in a views module)::\n\n from projectname.renderer import render_response\n render_response = render_response('app_name/') # Template dir.\n\n @render_response\n app_view(request):\n ...\n return 'app_view_template.htm', dict(object=object)\n\"\"\"\n def renderer(func):\n def _dec(request, *args, **kwargs):\n response = func(request, *args, **kwargs)\n\n if isinstance(response, HttpResponse):\n return response\n elif isinstance(response, basestring):\n template_name = response\n namespace = {}\n context_processors = None\n elif isinstance(response, (tuple, list)):\n len_tuple = len(response)\n if len_tuple == 2:\n template_name, namespace = response\n context_processors = None\n elif len_tuple == 3:\n template_name, namespace, context_processors = response\n else:\n raise TemplateSyntaxError('%s.%s function did not return a parsable tuple' % (func.__module__, func.__name__))\n else:\n raise TemplateSyntaxError('%s.%s function did not provide a template name or HttpResponse object' % (func.__module__, func.__name__))\n\n if always_use_requestcontext or context_processors is not None:\n context = RequestContext(request, namespace, context_processors)\n else:\n context = Context(namespace)\n\n if template_prefix:\n if isinstance(template_name, (list, tuple)):\n template_name = map(correct_path, template_name)\n else:\n template_name = correct_path(template_name)\n\n return HttpResponse(loader.render_to_string(template_name, context_instance=context))\n\n return _dec\n\n def correct_path(template_name):\n if template_name.startswith('/'):\n return template_name[1:]\n return '%s%s' % (template_prefix, template_name)\n\n return renderer\n", "id": "619911", "language": "Python", "matching_score": 1.3827877044677734, "max_stars_count": 40, "path": "src/apps/thirdparty/userprofile/utils/decorators.py" }, { "content": "from django.utils.translation import ugettext_lazy as _\nfrom django.shortcuts import render_to_response\nfrom django.contrib.admin.views.decorators import staff_member_required\n\nfrom django import template\nfrom django.core.urlresolvers import reverse\nfrom django.contrib import admin\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import capfirst\nfrom core.prismriver.settings import CUSTOM_MENU, DEFAULT_LABELS\nfrom core.prismriver.dashboard.models import Plugin\n\n\ndef separate_class_path(class_path):\n path = class_path.split(\".\")\n class_name = path.pop()\n class_path = \".\".join([i for i in path])\n return str(class_path), str(class_name)\n\n\n@staff_member_required\ndef dashboard(request):\n rendered_plugins = []\n plugins = Plugin.objects.filter(home_screen__user__username=request.user.username).order_by(\"position\")\n for p in plugins:\n class_path, class_name = separate_class_path(p.class_name)\n plugin = getattr(__import__(class_path, globals(),\n locals(), [class_name]), class_name)()\n rendered_plugins.append(plugin.render(request))\n context = {\n 'title': _('Site administration'),\n 'plugins': rendered_plugins,\n }\n context.update({})\n context_instance = template.RequestContext(request)\n return render_to_response('admin/dashboard.html', context,\n context_instance=context_instance)\n", "id": "8909668", "language": "Python", "matching_score": 1.7039674520492554, "max_stars_count": 40, "path": "src/core/prismriver/dashboard/views.py" }, { "content": "class DashboardPlugin(object):\n def render(self, request):\n raise NotImplementedError\n", "id": "8040391", "language": "Python", "matching_score": 0.7872939705848694, "max_stars_count": 40, "path": "src/core/prismriver/dashboard/plugins/pluginbase.py" }, { "content": "from django.conf.urls.defaults import *\n\nurlpatterns = patterns('', url(r'^$', 'prismriver.dashboard.views.dashboard'))\n", "id": "720472", "language": "Python", "matching_score": 0.09733492881059647, "max_stars_count": 40, "path": "src/core/prismriver/dashboard/urls.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.conf.urls import patterns, url, include\nfrom rest_framework import routers\nfrom apps.ecidadania.api.views.accounts import UserViewSet, GroupViewSet\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', UserViewSet)\nrouter.register(r'groups', GroupViewSet)\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browseable API.\nurlpatterns = patterns('',\n\n url(r'^', include(router.urls)),\n\n url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),\n\n # url(r'^debates/', include('apps.ecidadania.api.urls.debates', namespace='debate_api')),\n\n # This one should go on the spaces api\n # url(r'^news/', include('apps.ecidadania.api.urls.news', namespace='news_api')),\n\n url(r'^spaces/', include('apps.ecidadania.api.urls.spaces', namespace='spaces_api')),\n\n # url(r'^proposals/', include('apps.ecidadania.api.urls.proposals', namespace='rest_framework')),\n\n # url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),\n\n # url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),\n\n # url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),\n\n)\n", "id": "11151124", "language": "Python", "matching_score": 4.664342403411865, "max_stars_count": 40, "path": "src/apps/ecidadania/api/urls/__init__.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.conf.urls import patterns, url, include\nfrom rest_framework import routers\nfrom apps.ecidadania.api.views.accounts import UserViewSet, GroupViewSet\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', UserViewSet)\nrouter.register(r'groups', GroupViewSet)\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browseable API.\nurlpatterns = patterns('',\n\n url(r'^', include(router.urls)),\n\n # url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),\n\n)\n", "id": "7105923", "language": "Python", "matching_score": 0.247391939163208, "max_stars_count": 40, "path": "src/apps/ecidadania/api/urls/spaces.py" }, { "content": "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 <NAME>\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\nimport httplib\n\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\n\nfrom django.test import Client\nfrom django.test import TestCase\n\nfrom django.utils.encoding import smart_str\n\nfrom core.spaces.models import Space\n\nfrom apps.ecidadania.debate.models import Debate\nfrom apps.ecidadania.debate.models import Column\nfrom apps.ecidadania.debate.models import Row\nfrom apps.ecidadania.debate.models import Note\nfrom apps.ecidadania.news.models import Post\nfrom apps.ecidadania.proposals.models import ProposalSet\n\nfrom tests.data_seeder import seeder\n\n\nclass ECDTestCase(TestCase):\n \"\"\"Class which extends Django TestCase and adds methods specific to \n e-cidadania for testing.\n \"\"\"\n \n def init(self):\n \"\"\"Performs set up for the tests.\n \"\"\"\n self.client = Client(enforce_csrf_checks=False)\n self.username = 'dummy_username'\n self.user_password = '<PASSWORD>'\n self.admin_username = 'admin_username'\n self.admin_password = '<PASSWORD>'\n self.foo_user = self.create_user('foo_user', 'foo_user_password')\n self.foo_admin = self.create_user('foo_admin', 'foo_admin_password')\n self.foo_mod = self.create_user('foo_mod', 'foo_mod_password')\n \n self.bar_user = self.create_user('bar_user', 'bar_user_password')\n self.bar_admin = self.create_user('bar_admin', 'bar_admin_password')\n self.bar_mod = self.create_user('bar_mod', 'bar_mod_password')\n \n space_properties = {'name': 'foo_space', 'url': 'foo_space_url',\n 'author': self.foo_admin, 'public': False, \n 'mod_debate': True, 'mod_proposals': True,\n 'mod_news': True, 'mod_cal': True, 'mod_docs': True,\n }\n self.foo_space = Space(**space_properties)\n self.foo_space.save()\n self.foo_space.admins.add(self.foo_admin)\n self.foo_space.mods.add(self.foo_mod)\n self.foo_space.users.add(self.foo_user)\n self.foo_space.save()\n \n space_properties.update({'author': self.bar_admin, 'name': 'bar_space',\n 'url': 'bar_space_url', 'public': True,})\n self.bar_space = Space(**space_properties)\n self.bar_space.save()\n self.bar_space.admins.add(self.bar_admin)\n self.bar_space.mods.add(self.bar_mod)\n self.bar_space.users.add(self.bar_user)\n self.bar_space.save()\n \n debate_properties = {'space': self.foo_space, 'author': self.foo_admin}\n self.foo_debate = self.seed(Debate,properties=debate_properties)\n \n debate_properties.update({'space': self.bar_space, \n 'author': self.bar_admin})\n self.bar_debate = self.seed(Debate,debate_properties)\n \n column_properties = {'debate': self.foo_debate, 'criteria': 'private'}\n self.foo_column = Column(**column_properties)\n self.foo_column.save()\n \n column_properties.update({'debate': self.bar_debate,\n 'criteria': 'public'})\n self.bar_column = Column(**column_properties)\n self.bar_column.save()\n \n row_properties = column_properties.copy()\n self.bar_row = Row(**row_properties)\n self.bar_row.save()\n \n row_properties.update({'debate': self.foo_debate})\n self.foo_row = Row(**row_properties)\n self.foo_row.save()\n \n note_properties = {'column': self.foo_column, 'row': self.foo_row,\n 'debate': self.foo_debate, 'author': self.foo_admin}\n self.foo_note = Note(**note_properties)\n self.foo_note.save()\n \n note_properties.update({'column': self.bar_column, \n 'row': self.bar_row,\n 'debate': self.bar_debate,\n 'author': self.bar_admin})\n self.bar_note = Note(**note_properties)\n self.bar_note.save()\n \n post_properties = {'title': 'Foo news post', 'author': self.foo_user,\n 'pub_index': True, 'space': self.foo_space}\n self.foo_post = Post(**post_properties)\n self.foo_post.save()\n \n post_properties.update({'title': 'Bar news post',\n 'author': self.bar_user, 'space': self.bar_space})\n self.bar_post = Post(**post_properties)\n self.bar_post.save()\n \n proposal_set_properties = {'name': 'Foo Proposal Set', \n 'space': self.foo_space,\n 'author': self.foo_admin, \n 'debate': self.foo_debate}\n self.foo_proposalset = ProposalSet(**proposal_set_properties)\n self.foo_proposalset.save()\n \n proposal_set_properties.update({'name': 'Bar Proposal Set',\n 'space': self.bar_space,\n 'author': self.bar_admin,\n 'debate': self.bar_debate})\n self.bar_proposalset = ProposalSet(**proposal_set_properties)\n self.bar_proposalset.save()\n \n def seed(self, model, properties=None, constraints=None, follow_fk=None, \n generate_fk=None, follow_m2m=None, factory=None, commit=True):\n \"\"\"Generates and returns a new instance of the `model` with \n properties in `properties`.\n \"\"\"\n instance = seeder.seed(model=model, constraints=constraints, \n follow_fk=follow_fk, generate_fk=None, \n follow_m2m=None, factory=None,\n model_properties=properties, commit=commit)\n \n return instance\n \n def seedn(self, count, model, properties, constraints=None, follow_fk=None, \n generate_fk=None, follow_m2m=None, factory=None, commit=True):\n \"\"\"Generates and returns `count` number of instances of `model` with\n properties in `properties`.\n \"\"\"\n \n obj_list = seeder.seedn(count=count, model=model, constraints=constraints,\n follow_fk=follow_fk, generate_fk=generate_fk,\n follow_m2m=follow_m2m, factory=factory,\n model_properties=properties, commit=True)\n return obj_list\n \n def create_user(self, username, password, email=None, properties=None,\n logged_in=False):\n \"\"\"Creates, saves and returns a user with a given username, password\n and email. If `properties` is supplied, it will be applied to the \n created user.\n \"\"\"\n \n user = User.objects.create_user(username=username, password=password, \n email=email)\n if properties:\n for key in properties:\n setattr(user, key, properties[key])\n user.save()\n \n if logged_in:\n # log out the current user\n self.logout()\n # log in the new user\n user = self.login(username=username, password=password, email=email)\n return user\n \n def create_super_user(self, username='admin', password='<PASSWORD>', \n email='<EMAIL>', properties=None, \n logged_in=False):\n \"\"\"Creates, saves and returns a super user with a given username, \n password and email. If `properties` is supplied, it will be applied\n to the created user.\n \"\"\"\n \n super_user = User.objects.create_superuser(username=username,\n password=password, \n email=email)\n if properties:\n for key in properties:\n setattr(super_user, key, properties[key])\n super_user.save()\n \n if logged_in:\n self.logout()\n super_user = self.login(username=username, password=password, \n email=email)\n return super_user\n \n def login(self, username, password, email=None):\n \"\"\"Logs in a user with the given username and password. If the user is \n not present in the database, it will be created and logged in.\n \n We assume `username` is unique across the database.\n \"\"\"\n \n try:\n user = User.objects.get(username=username)\n except Exception:\n user = None\n if user is None:\n user = self.create_user(username=username, password=password, \n email=email)\n \n self.client.login(username=username, password=password)\n \n return user\n \n def logout(self):\n \"\"\"Logs out the currently logged in user.\n \"\"\"\n \n self.client.logout()\n \n def isLoggedIn(self, user=None):\n \"\"\"Checks and returns True if a user is logged in otherwise returns\n False.\n \"\"\"\n \n if '_auth_user_id' not in self.client.session:\n return False\n \n if (user.pk == self.client.session['_auth_user_id']):\n return True\n \n return False\n \n def getURL(self, name, args=None, kwargs=None):\n \"\"\"Returns the url for the given `name` which may be a function name or\n url name.\n \"\"\"\n return reverse(name, args=args, kwargs=kwargs)\n \n def get(self, url=None, url_name=None, data={}, follow=False, **extra):\n \"\"\"\n Performs a get to the given url and returns the response.\n \"\"\"\n if url is None and url_name is None:\n raise Exception(\"Please pass either url or url name\")\n \n if url_name:\n url = self.getURL(url_name)\n \n response = self.client.get(url, data=data, follow=follow, extra=extra)\n return response\n \n def post(self, url, data={}, follow=False, **extra):\n \"\"\"\n Performs a post to the supplied url and returns the response.\n \"\"\"\n \n response = self.client.post(path=url, data=data, follow=follow, \n extra=extra)\n return response\n \n def printResponse(self, response):\n \"\"\"Prints the response to the terminal.\n We need this method because the response is a unicode string and\n results in exception when printed directly i.e print response.\n \"\"\"\n \n print smart_str(response)\n \n \n def assertResponseCode(self, response, status_code):\n \"\"\"Asserts that the response status is status_code.\n \"\"\"\n if response.status_code != status_code:\n verbose_codes = [\n httplib.FOUND,\n ]\n message_codes = [\n httplib.FORBIDDEN, httplib.BAD_REQUEST, httplib.NOT_FOUND,\n ]\n url_codes = [httplib.NOT_FOUND]\n \n if response.status_code in verbose_codes:\n print response\n\n if response.context and response.status_code in message_codes:\n try:\n print response.context['message']\n except KeyError:\n pass\n \n if response.status_code in url_codes:\n print response.request['PATH_INFO']\n\n self.assertEqual(status_code, response.status_code)\n\n def assertResponseOK(self, response):\n \"\"\"Asserts that the response status is OK.\n \"\"\"\n self.assertResponseCode(response, httplib.OK)\n\n def assertResponseRedirect(self, response):\n \"\"\"Asserts that the response status is FOUND.\n \"\"\"\n self.assertResponseCode(response, httplib.FOUND)\n\n def assertResponseForbidden(self, response):\n \"\"\"Asserts that the response status is FORBIDDEN.\n \"\"\"\n self.assertResponseCode(response, httplib.FORBIDDEN)\n\n def assertResponseBadRequest(self, response):\n \"\"\"Asserts that the response status is BAD_REQUEST.\n \"\"\"\n self.assertResponseCode(response, httplib.BAD_REQUEST)\n\n def assertResponseNotFound(self, response):\n \"\"\"Asserts that the response status is NOT_FOUND.\n \"\"\"\n self.assertResponseCode(response, httplib.NOT_FOUND)\n", "id": "11738467", "language": "Python", "matching_score": 4.903242588043213, "max_stars_count": 40, "path": "tests/test_utils.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 Cidadania S. Coop. Galega\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\nfrom src.core.spaces.models import Space\nfrom src.apps.ecidadania.debate.models import Debate\n\nfrom django.contrib.auth.models import *\nfrom tests.test_utils import ECDTestCase\nfrom django.contrib.auth.models import Group, Permission\nfrom src.apps.ecidadania.debate import url_names as urln\n\n\nclass ListDebatesViewsTest(ECDTestCase):\n \"\"\"Tests the views of debate app.\n \"\"\"\n \n def setUp(self):\n self.init()\n \n \n def testListDebatesView(self):\n \"\"\"Tests ListDebates view.\n \"\"\"\n user = self.create_user('test_user', 'abcde')\n other_user = self.create_user('other_test_user', 'acsrsd')\n space_properties = {'name': 'test_space', 'url': 'test_space_url',\n 'author': user, 'public': True}\n space1 = self.seed(Space, properties=space_properties)\n \n space_properties.update({'name': 'other_space', 'url': 'other_test_url',\n 'author': other_user, 'public': True})\n space2 = self.seed(Space, space_properties)\n \n debate_properties = {'space': space1, 'author': user}\n debate1 = self.seed(Debate, properties=debate_properties)\n debate2 = self.seed(Debate, properties=debate_properties)\n debates_list = [debate1, debate2]\n \n debate_properties.update({'space': space2, 'author': other_user})\n debate3 = self.seed(Debate, properties=debate_properties)\n debate4 = self.seed(Debate, properties=debate_properties)\n debate5 = self.seed(Debate, properties=debate_properties)\n other_debates_list = [debate3, debate4, debate5]\n url = self.getURL(urln.DEBATE_LIST, kwargs={'space_url':space1.url})\n response = self.get(url)\n #print self.printResponse(response)\n self.assertResponseOK(response)\n self.assertEqual(len(response.context[0].dicts[0]['debate_list']), \n len(debates_list))\n \n url = self.getURL(urln.DEBATE_LIST, kwargs={'space_url': space2.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertEqual(len(response.context[0].dicts[0]['debate_list']), \n len(other_debates_list))\n \n def testViewDebate(self):\n \"\"\"Tests ViewDebate view.\n \"\"\"\n url = self.getURL(urln.DEBATE_VIEW,(), {'debate_id': self.foo_debate.id,\n 'space_url': self.foo_space.url})\n response = self.get(url)\n self.assertResponseOK(response)\n context = response.context[0].dicts[0]\n assert 'notes' in context\n assert 'columns' in context\n assert 'rows' in context\n assert 'get_place' in context\n #print context['columns']\n self.assertEqual(len(context['notes']), 1)\n self.assertEqual(len(context['columns']), 1)\n self.assertEqual(len(context['rows']), 1)\n \n url = self.getURL(urln.DEBATE_VIEW,(), {'debate_id': 5,\n 'space_url': self.foo_space.url})\n response = self.get(url)\n self.assertResponseNotFound(response)\n\n\n def testDeleteDebate(self):\n \"\"\"\n Check if admin can delete from private space\n \"\"\"\n\n \n space=self.foo_space\n self.login('foo_admin', '<PASSWORD>')\n self.foo_admin.user_permissions.add(Permission.objects.get(codename='delete_debate'))\n self.foo_admin.save()\n space.save()\n self.assertTrue(self.isLoggedIn(self.foo_admin))\n url = self.getURL(urln.DEBATE_DELETE,(), {'debate_id': self.foo_debate.id,\n 'space_url': space.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateNotUsed(response, 'not_allowed.html')\n \"\"\"\n When the admin doesn't have delete_debate permissions\n \"\"\"\n self.foo_admin.user_permissions.remove(Permission.objects.get(codename='delete_debate'))\n self.foo_admin.save()\n space.save()\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateUsed(response, 'not_allowed.html')\n self.logout()\n\n\n \"\"\"\n Check if admin can delete from public space\n \"\"\"\n\n\n space = self.bar_space\n self.login('bar_admin','bar_admin_password')\n self.bar_admin.user_permissions.add(Permission.objects.get(codename='delete_debate'))\n self.bar_admin.save()\n space.save()\n self.assertTrue(self.isLoggedIn(self.bar_admin))\n url = self.getURL(urln.DEBATE_DELETE,(), {'debate_id': self.bar_debate.id,\n 'space_url': space.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateNotUsed(response, 'not_allowed.html')\n \"\"\"\n When the admin doesn't have debate_delete permissions\n \"\"\"\n self.bar_admin.user_permissions.remove(Permission.objects.get(codename='delete_debate'))\n self.bar_admin.save()\n space.save()\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateUsed(response, 'not_allowed.html')\n self.logout()\n\n\n \"\"\"\n Check if registered user cannot delete from private space\n \"\"\"\n\n\n space = self.foo_space\n self.login('foo_user', 'foo_user_password')\n self.assertTrue(self.isLoggedIn(self.foo_user))\n self.foo_user.user_permissions.add(Permission.objects.get(codename='delete_debate'))\n self.foo_user.save()\n space.save()\n url = self.getURL(urln.DEBATE_DELETE,(), {'debate_id': self.foo_debate.id,\n 'space_url': space.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateUsed(response, 'not_allowed.html')\n \"\"\"\n When the user doesn't have debate_delete permissions\n \"\"\"\n self.foo_user.user_permissions.remove(Permission.objects.get(codename='delete_debate'))\n self.foo_user.save()\n space.save()\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateUsed(response, 'not_allowed.html')\n self.logout()\n\n\n \"\"\"\n Check if registered user cannot delete from public space\n \"\"\"\n space = self.bar_space\n self.login('bar_user', 'bar_user_password')\n self.assertTrue(self.isLoggedIn(self.bar_user))\n self.bar_user.user_permissions.add(Permission.objects.get(codename='delete_debate'))\n self.bar_user.save()\n space.save()\n url = self.getURL(urln.DEBATE_DELETE,(), {'debate_id': self.bar_debate.id,\n 'space_url': space.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateUsed(response, 'not_allowed.html')\n \"\"\"\n When the user doesn't have delete_debate permissions\n \"\"\"\n self.bar_user.user_permissions.remove(Permission.objects.get(codename='delete_debate'))\n self.bar_user.save()\n space.save()\n response=self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateUsed('not_allowed.html')\n self.logout()\n\n\n \"\"\"\n Check if mods can delete from private space\n \"\"\"\n\n\n space = self.foo_space\n self.login('foo_mod', '<PASSWORD>mod_password')\n self.assertTrue(self.isLoggedIn(self.foo_mod))\n self.foo_mod.user_permissions.add(Permission.objects.get(codename='delete_debate'))\n self.foo_mod.save()\n space.save()\n url = self.getURL(urln.DEBATE_DELETE,(), {'debate_id': self.foo_debate.id,\n 'space_url': space.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateNotUsed(response, 'not_allowed.html')\n \"\"\"\n When the mod doesn't have delete_debate permissions\n \"\"\"\n self.foo_mod.user_permissions.remove(Permission.objects.get(codename='delete_debate'))\n self.foo_mod.save()\n space.save()\n response=self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateUsed('not_allowed.html')\n self.logout()\n\n\n \"\"\"\n Check if mods can delete from public space\n \"\"\"\n\n\n space = self.bar_space\n self.login('bar_mod', '<PASSWORD>')\n self.assertTrue(self.isLoggedIn(self.bar_mod))\n self.bar_mod.user_permissions.add(Permission.objects.get(codename='delete_debate'))\n self.bar_mod.save()\n space.save()\n url = self.getURL(urln.DEBATE_DELETE,(), {'debate_id': self.bar_debate.id,\n 'space_url': space.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateNotUsed(response, 'not_allowed.html')\n \"\"\"\n When the mod doesn't have delete_debate permissions\n \"\"\"\n self.bar_mod.user_permissions.remove(Permission.objects.get(codename='delete_debate'))\n self.bar_mod.save()\n space.save()\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateUsed(response, 'not_allowed.html')\n self.logout()\n\n\n \"\"\"\n Check if unregistered users cannot delete from public space\n \"\"\"\n\n\n space = self.bar_space\n self.unreg_user = self.create_user('unreg_user', 'unreg_user_password')\n self.login('unreg_user','unreg_user_password')\n self.assertTrue(self.isLoggedIn(self.unreg_user))\n self.unreg_user.user_permissions.add(Permission.objects.get(codename='delete_debate'))\n self.unreg_user.save()\n space.save()\n url = self.getURL(urln.DEBATE_DELETE,(), {'debate_id': self.bar_debate.id,\n 'space_url': space.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateUsed(response, 'not_allowed.html')\n \"\"\"\n When the unreg-user doesn't have delete_debate permissions\n \"\"\"\n self.unreg_user.user_permissions.remove(Permission.objects.get(codename='delete_debate'))\n self.unreg_user.save()\n space.save()\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateUsed(response, 'not_allowed.html')\n self.logout()\n\n\n \"\"\"\n Check if unregistered users cannot delete from private space\n \"\"\"\n\n\n space = self.foo_space\n self.login('unreg_user', '<PASSWORD>password')\n self.assertTrue(self.isLoggedIn(self.unreg_user))\n self.unreg_user.user_permissions.add(Permission.objects.get(codename='delete_debate'))\n self.unreg_user.save()\n space.save()\n url = self.getURL(urln.DEBATE_DELETE,(), {'debate_id': self.foo_debate.id,\n 'space_url': space.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateUsed(response, 'not_allowed.html')\n \"\"\"\n When the unreg_user doesn't have delete_debate permissions \n \"\"\"\n self.unreg_user.user_permissions.remove(Permission.objects.get(codename='delete_debate'))\n self.unreg_user.save()\n space.save()\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateUsed(response, 'not_allowed.html')\n self.logout()\n", "id": "11267004", "language": "Python", "matching_score": 5.604502201080322, "max_stars_count": 40, "path": "tests/unit_tests/src/apps/ecidadania/debate/test_views.py" }, { "content": "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 <NAME>\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\n\nfrom core.spaces import url_names\nfrom core.spaces.models import Space\n\nfrom tests.test_utils import ECDTestCase\n\n\nclass ViewSpaceIndexTest(ECDTestCase):\n\n \"\"\"\n Tests the view for the index page of a space.\n \"\"\"\n \n def setUp(self):\n super(ViewSpaceIndexTest, self).init()\n self.private_space = self.foo_space\n self.private_space_url = self.getURL(url_names.SPACE_INDEX,\n kwargs={'space_url': self.private_space.url})\n self.public_space = self.bar_space\n self.public_space_url = self.getURL(url_names.SPACE_INDEX,\n kwargs={'space_url': self.public_space.url})\n \n def testAnonymousUserCanNotAccessPrivateSpace(self):\n \"\"\"\n Tests if anonymous user can not access the space index page.\n \"\"\" \n response = self.get(self.private_space_url)\n self.assertResponseOK(response)\n self.assertContains(response, \"You're an anonymous user.\")\n \n def testUnregisteredUserCanNotAccessPrivateSpace(self):\n \"\"\"Tests if an unregistered user can not access the space index.\n \"\"\"\n #Create and login a user who is not registered to the space\n user = self.login(\"test_user\", \"<PASSWORD>\")\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)\n self.assertFalse(user.is_anonymous())\n self.assertFalse(user in self.private_space.users.all())\n self.assertFalse(user in self.private_space.mods.all())\n self.assertFalse(user in self.private_space.admins.all())\n response = self.get(self.private_space_url)\n self.assertResponseOK(response)\n self.assertContains(response, \"You're not registered to this space.\")\n self.logout()\n \n def testSpaceAdminCanAccessThePrivateSpace(self):\n \"\"\"Tests if the space admin can access the space index.\n \"\"\" \n space_admin = self.login('foo_admin', '<PASSWORD>')\n self.assertTrue(self.isLoggedIn(space_admin))\n \n response = self.get(self.private_space_url)\n self.assertResponseOK(response)\n self.assertTemplateNotUsed(response, 'not_allowed.html')\n self.logout()\n \n def testSpaceModCanAccessThePrivateSpace(self):\n \"\"\"Tests if the space mod can access the space index.\n \"\"\"\n space_mod = self.login('foo_mod', '<PASSWORD>')\n self.assertTrue(self.isLoggedIn(space_mod))\n response = self.get(self.private_space_url)\n self.assertResponseOK(response)\n self.assertTemplateNotUsed(response, 'not_allowed.html')\n self.logout()\n \n def testSpaceUserCanAccessTheSpace(self):\n \"\"\"Tests if the space user can access the space index.\n \"\"\"\n space_user = self.login('foo_user', '<PASSWORD>')\n self.assertTrue(self.isLoggedIn(space_user))\n response = self.get(self.private_space_url)\n self.assertResponseOK(response)\n self.assertTemplateNotUsed(response, 'not_allowed.html')\n self.logout()\n \n def testOtherUsersCanNotAccessThePrivateSpace(self):\n \"\"\"Test if other users who are not registered to the space can not\n access the space.\n \"\"\"\n other_user = self.login('bar_admin', '<PASSWORD>')\n self.assertTrue(self.isLoggedIn(other_user))\n self.assertFalse(other_user in self.private_space.admins.all())\n response = self.get(self.private_space_url)\n self.assertResponseOK(response)\n self.assertTemplateUsed(response, 'not_allowed.html')\n \n \n def testAdminAccessToAPublicSpace(self):\n \"\"\"Tests if an admin for one space can access a public space.\n \"\"\"\n admin = self.login('foo_admin', '<PASSWORD>')\n self.assertTrue(self.isLoggedIn(admin))\n self.assertFalse(admin in self.public_space.admins.all())\n response = self.get(self.public_space_url)\n self.assertResponseOK(response)\n self.assertTemplateNotUsed(response, 'not_allowed.html')\n\n def testAnonymousUserCanAcessAPublicSpace(self):\n \"\"\"Tests if an anonymous user can access a public space.\n \"\"\"\n response = self.get(self.public_space_url)\n self.assertResponseOK(response)\n self.assertTemplateNotUsed(response, 'not_allowed.html')\n \n\nclass DeleteSpaceTest(ECDTestCase):\n \"\"\"\n Tests the deletion of a space.\n \"\"\"\n \n def setUp(self):\n self.init()\n \n def testGeneralUserAccess(self):\n \"\"\"\n Tests if a general user is prohibited from deleting the space.\n \"\"\"\n space = self.bar_space\n general_user = self.login('test_user', '<PASSWORD>')\n url = self.getURL(url_names.SPACE_DELETE, kwargs={'space_url': space.url})\n response = self.get(url)\n self.assertResponseRedirect(response)\n self.assertEqual(url, response.request['PATH_INFO'])\n \n def testAdminAccess(self):\n \"\"\"\n Tests if a correct admin can delete a space.\n \"\"\"\n space =self.bar_space\n user = self.create_super_user(\"other_admin\", \"<PASSWORD>\",\n logged_in=True)\n self.assertTrue(self.isLoggedIn(user))\n \n url = self.getURL(url_names.SPACE_DELETE, kwargs={'space_url': space.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertTemplateUsed(response, 'not_allowed.html')\n \n #logout the present user because the space does not belong to it\n self.logout()\n admin = self.login('bar_admin', '<PASSWORD>')\n self.assertTrue(self.isLoggedIn(admin))\n self.assertTrue(admin in space.admins.all())\n response = self.get(url)\n self.assertResponseRedirect(response)\n self.assertTemplateNotUsed(response, 'not_allowed.html')\n\nclass ListSpacesTest(ECDTestCase):\n \"\"\"\n Tests the list spaces view.\n \"\"\"\n \n def setUp(self):\n self.init()\n #We have a public space as well as a private space.\n self.private_space = self.foo_space\n self.public_space = self.bar_space\n self.url = self.getURL(url_names.SPACE_LIST)\n \n def testOnlyPublicSpacesAreListedForAnonymousUser(self):\n \"\"\"\n Tests if only the public spaces are listed for anonymous user.\n \"\"\"\n #No user is logged in currently\n response = self.get(self.url)\n self.assertResponseOK(response)\n spaces_returned = response.context[0].dicts[0]['space_list']\n self.assertEqual(len(spaces_returned), 1)\n self.assertTrue(self.public_space in spaces_returned)\n self.assertTrue(self.private_space not in spaces_returned)\n \n def testAllSpacesAreReturnedForALoggedInUser(self):\n \"\"\"\n Tests if both the public and private spaces are returned for a logged\n in user who is registered for both the spaces.\n \n We make self.bar_admin to be a user for self.foo_space which is a\n private space.\n \"\"\" \n self.foo_space.users.add(self.bar_admin)\n self.login('bar_admin', '<PASSWORD>')\n response = self.get(self.url)\n spaces_returned = response.context[0].dicts[0]['space_list']\n self.assertEqual(len(spaces_returned), 2)\n self.assertTrue(self.foo_space in spaces_returned)\n self.assertTrue(self.bar_space in spaces_returned) \n\nclass EditRoleTest(ECDTestCase):\n \"\"\"\n Tests if only admin can edit roles of people\n \"\"\"\t\n\n def setUp(self):\n self.init()\n self.private_space = self.foo_space\n self.public_space = self.bar_space\n\n def testSuperuserCanAccessPrivateView(self):\n space=self.private_space\n self.root=self.create_super_user(logged_in=True)\n self.assertTrue(self.isLoggedIn(self.root))\n url = self.getURL('edit-roles', kwargs={'space_url': space.url})\n response = self.get(url,follow=True)\n self.assertResponseOK(response)\n self.assertContains(response,\"Please select the users that will be administrators\")\n self.logout()\n\n def testSuperuserCanAccessPrivateView(self):\n space=self.public_space\n self.root=self.create_super_user(logged_in=True)\n self.assertTrue(self.isLoggedIn(self.root))\n url = self.getURL('edit-roles', kwargs={'space_url': space.url})\n response = self.get(url,follow=True)\n self.assertResponseOK(response)\n self.assertContains(response,\"Please select the users that will be administrators\")\n self.logout()\n\n def testAdminCannotAccessPrivateView(self):\n space = self.private_space\n self.login('foo_admin', 'foo_<PASSWORD>')\n self.assertTrue(self.isLoggedIn(self.foo_admin))\n url = self.getURL('edit-roles', kwargs={'space_url': space.url})\n response=self.get(url,follow=True)\n self.assertResponseOK(response)\n self.assertContains(response, \"you don't have permissions for accessing to some area.\")\n self.logout()\n\n def testAdminCannotAccessPublicView(self):\n space = self.public_space\n self.login('bar_admin', '<PASSWORD>')\n self.assertTrue(self.isLoggedIn(self.bar_admin))\n url = self.getURL('edit-roles', kwargs={'space_url': space.url})\n response=self.get(url,follow=True)\n self.assertResponseOK(response)\n self.assertContains(response, \"you don't have permissions for accessing to some area.\")\n self.logout()\n\n def testModCannotAccessPrivateView(self):\n space = self.private_space\n self.login('foo_mod', 'foo_mod_password')\n self.assertTrue(self.isLoggedIn(self.foo_mod))\n url = self.getURL('edit-roles', kwargs={'space_url': space.url})\n response=self.get(url,follow=True)\n self.assertResponseOK(response)\n self.assertContains(response, \"you don't have permissions for accessing to some area.\")\n self.logout()\n\n def testModCannotAccessPublicView(self):\n space = self.public_space\n self.login('bar_mod', '<PASSWORD>password')\n self.assertTrue(self.isLoggedIn(self.bar_mod))\n url = self.getURL('edit-roles', kwargs={'space_url': space.url})\n response=self.get(url,follow=True)\n self.assertResponseOK(response)\n self.assertContains(response, \"you don't have permissions for accessing to some area.\")\n self.logout()\n\n\n def testUserCannotAccessPrivateView(self):\n space = self.private_space\n self.login('foo_user', '<PASSWORD>')\n self.assertTrue(self.isLoggedIn(self.foo_user))\n url=self.getURL('edit-roles', kwargs={'space_url': space.url})\n response=self.get(url,follow=True)\n self.assertResponseOK(response)\n self.assertContains(response, \"you don't have permissions for accessing to some area.\")\n self.logout()\n\n def testUserCannotAccessPublicView(self):\n space = self.public_space\n self.login('bar_user', '<PASSWORD>user_password')\n self.assertTrue(self.isLoggedIn(self.bar_user))\n url = self.getURL('edit-roles',kwargs={'space_url': space.url})\n response=self.get(url,follow=True)\n self.assertResponseOK(response)\n self.assertContains(response, \"you don't have permissions for accessing to some area.\")\n self.logout()\n\t\n def testOtherUserCannotAccessPrivateView(self):\n space = self.private_space\n self.unreg_user = self.create_user('unreg_user', '<PASSWORD>')\n self.login('unreg_user', '<PASSWORD>')\n self.assertTrue(self.isLoggedIn(self.unreg_user))\n url = self.getURL('edit-roles', kwargs={'space_url': space.url})\n response=self.get(url,follow=True)\n self.assertResponseOK(response)\n self.assertContains(response, \"you don't have permissions for accessing to some area.\")\n self.logout()\n\n def testOtherUserCannotAccessPublicView(self):\n space = self.public_space\n self.unreg_user = self.create_user('unreg_user', '<PASSWORD>')\n self.login('unreg_user', '<PASSWORD>')\n self.assertTrue(self.isLoggedIn(self.unreg_user))\n url = self.getURL('edit-roles', kwargs={'space_url': space.url})\n response=self.get(url,follow=True)\n self.assertResponseOK(response)\n self.assertContains(response, \"you don't have permissions for accessing to some area.\")\n self.logout()\n", "id": "8156065", "language": "Python", "matching_score": 6.113320827484131, "max_stars_count": 40, "path": "tests/unit_tests/src/core/spaces/test_spaces.py" }, { "content": "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 <NAME>\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\n\nfrom core.spaces import url_names\n\nfrom tests.test_utils import ECDTestCase\n\n\nclass ViewSpaceIndexTest(ECDTestCase):\n\n \"\"\"\n Tests the view for the index page of a space.\n \"\"\"\n \n def setUp(self):\n super(ViewSpaceIndexTest, self).init()\n self.admin_space=self.foo_space\n self.user_space=self.bar_space\n\n def testUserAccess(self):\n \"\"\"\n Tests if only the allowed user can access the space index page.\n \"\"\"\n #Not a public space\n self.admin_space.public = False\n self.admin_space.save()\n \n url = self.getURL(url_names.SPACE_INDEX,\n kwargs={'space_url': self.admin_space.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertContains(response, \"You're an anonymous user.\")\n #print self.printResponse(response)\n \n user = self.login(\"test_user\", \"<PASSWORD>\")\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)\n self.assertFalse(user.is_anonymous())\n self.assertFalse(user in self.admin_space.users.all())\n self.assertFalse(user in self.admin_space.mods.all())\n self.assertFalse(user in self.admin_space.admins.all())\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertContains(response, \"You're not registered to this space.\")\n \n self.logout()\n \n url = self.getURL(url_names.SPACE_INDEX,\n kwargs={'space_url': self.user_space.url})\n self.assertTrue(self.user_space.public)\n response = self.get(url) \n #print self.printResponse(response)\n self.assertResponseOK(response)\n self.assertContains(response, \"Hello anonymous user.\")\n \n user.public = False\n user.is_staff = True\n user.save()\n self.assertTrue(user.is_staff)\n self.assertFalse(user.public)\n url = self.getURL(url_names.SPACE_INDEX, \n kwargs={'space_url': self.user_space.url})\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertContains(response, \"Hello anonymous user.\")\n \n admin = self.login(self.admin_username, self.admin_password)\n self.assertTrue(self.isLoggedIn(admin))\n# self.assertTrue(admin.is_superuser)\n self.assertFalse(admin.is_superuser)\n response = self.get(url)\n self.assertResponseOK(response)\n\n superuser=self.create_super_user()\n self.login('admin','admin_pass')\n self.assertTrue(self.isLoggedIn(superuser))\n self.assertTrue(superuser.is_superuser)\n response = self.get(url)\n self.assertResponseOK(response)\n\n self.logout()\n response = self.get(url)\n self.assertResponseOK(response)\n self.assertContains(response, \"Hello anonymous user.\")\n", "id": "1235985", "language": "Python", "matching_score": 3.922358512878418, "max_stars_count": 40, "path": "tests/unit_tests/src/core/spaces/test_views.py" }, { "content": "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 <NAME>\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\nfrom e_cidadania import url_names\n\nfrom apps.ecidadania.news import models\n\nfrom tests.test_utils import ECDTestCase\n\n\nclass NewsViewTest(ECDTestCase):\n \"\"\"Tests the news related views in core.views.news.\"\n \"\"\"\n \n def setUp(self):\n super(NewsViewTest, self).init()\n \n def testListNews(self):\n \"\"\"Tests the view for listing news.\n \"\"\"\n \n response = self.get(url_name=url_names.LIST_SITE_NEWS)\n self.assertResponseOK(response)\n \n def testAddPost(self):\n \"\"\"Tests the view for adding news posts.\n \"\"\"\n # No user is logged in, so we will be redirected to the login page\n response = self.get(url_name=url_names.ADD_SITE_POST)\n # TODO(Praveen): Check for the redirected url\n self.assertResponseRedirect(response)\n \n # Log in a superuser with username as 'admin' and password as `password`\n self.create_super_user(logged_in=True)\n response = self.get(url_name=url_names.ADD_SITE_POST)\n self.assertResponseOK(response)\n \n def testViewPost(self):\n \"\"\"Tests the view for viewing news posts.\n \"\"\"\n \n post = self.seed(models.Post)\n url = self.getURL(url_names.VIEW_SITE_POST, kwargs={'post_id': post.id})\n response = self.get(url=url)\n self.assertResponseOK(response)\n \n def testEditPost(self):\n \"\"\"Tests the view for editing news posts.\n \"\"\"\n \n post = self.seed(models.Post)\n url = self.getURL(url_names.EDIT_SITE_POST, kwargs={'post_id': post.id})\n self.create_super_user(logged_in=True)\n response = self.get(url)\n self.assertResponseOK(response)\n \n def testDeletePost(self):\n \"\"\"Tests the view for deleting news posts.\n \"\"\"\n \n post = self.seed(models.Post)\n url = self.getURL(url_names.DELETE_SITE_POST, \n kwargs={'post_id': post.id})\n self.create_super_user(logged_in=True)\n response = self.get(url)\n self.assertResponseOK(response)", "id": "1823051", "language": "Python", "matching_score": 5.457747459411621, "max_stars_count": 40, "path": "tests/unit_tests/src/core/views/test_news.py" }, { "content": "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 <NAME>\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\nfrom e_cidadania import url_names\n\nfrom tests.test_utils import ECDTestCase\n\n\nclass IndexTestCase(ECDTestCase): \n \"\"\"Class to test index related views.\n \"\"\"\n \n def testIndexView(self):\n \"\"\"Tests the index view.\n \"\"\"\n \n response = self.get(url_name=url_names.SITE_INDEX)\n self.assertResponseOK(response)\n \n def testIndexEntriesFeed(self):\n \"\"\"Tests the index entries feed view.\n \"\"\"\n \n response = self.get(url_name=url_names.SITE_FEED)\n self.assertResponseOK(response)", "id": "4449839", "language": "Python", "matching_score": 4.325302600860596, "max_stars_count": 40, "path": "tests/unit_tests/src/core/views/test_index.py" }, { "content": "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 <NAME>\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\n\nfrom e_cidadania import url_names\n\nfrom apps.ecidadania.news import models\n\nfrom tests.test_utils import ECDTestCase\n\n\nclass InviteViewTest(ECDTestCase):\n \"\"\"Tests the invite view in core.views.invite.\n \"\"\"\n \n def setUp(self):\n self.init()\n \n def testInviteView(self):\n url = self.getURL(url_names.INVITE)\n response = self.get(url)\n self.assertResponseOK(response)\n \n self.create_user('test_user', 'user_pass', logged_in=True)\n response = self.get(url)\n #print self.printResponse(response)\n #print response.context['uri']\n self.assertResponseOK(response)\n \n post_data = {'email_addr': '<EMAIL>', 'mail_msg':'test'}\n response = self.post(url, data=post_data)\n self.assertResponseOK(response)\n", "id": "8649613", "language": "Python", "matching_score": 1.2216010093688965, "max_stars_count": 40, "path": "tests/unit_tests/src/core/views/test_invite.py" }, { "content": "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 <NAME>\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\nimport os\n\nfrom selenium.webdriver.common.keys import Keys\n\nfrom tests.functional_utils import FunctionalTestCase\n\n\nclass CreateSpaceTest(FunctionalTestCase):\n \"\"\"Tests if the page to create a space works correctly.\n \"\"\"\n \n def setUp(self):\n self.init()\n \n def testCreateSpace(self):\n username = 'test_user'\n password = '<PASSWORD>'\n self.create_super_user(username, password)\n \n url = self.live_server_url + self.getURL('site-index')\n self.browser.get(url)\n self.wait(2)\n self.browser.find_element_by_link_text(\"Login\").click()\n self.wait(2)\n username_field = self.browser.find_element_by_name('username')\n username_field.send_keys(username)\n \n password_field = self.browser.find_element_by_name('password')\n password_field.send_keys(password)\n self.wait(2)\n password_field.send_keys(Keys.RETURN)\n #self.wait(1)\n self.wait(2)\n \n url = self.live_server_url + self.getURL('create-space')\n self.browser.get(url)\n self.wait(2)\n \n #Now we fill the creat space form\n \n name_field = self.browser.find_element_by_name('name')\n name_field.send_keys('test_<PASSWORD>')\n \n url_field = self.browser.find_element_by_name('url')\n url_field.send_keys('test_url')\n \n logo_field = self.browser.find_element_by_name('logo')\n logo_field.send_keys(os.getcwd()+'/generic.jpeg')\n \n banner_field = self.browser.find_element_by_name('banner')\n banner_field.send_keys(os.getcwd()+'/generic.jpeg')\n self.wait(2)\n #url_field.send_keys(Keys.RETURN)\n banner_field.submit()\n self.wait(300)", "id": "4302953", "language": "Python", "matching_score": 6.524360656738281, "max_stars_count": 40, "path": "tests/functional/test_create_space.py" }, { "content": "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 <NAME>\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\nfrom selenium.webdriver.common.keys import Keys\n\nfrom tests.functional_utils import FunctionalTestCase\n\n\n\nclass LoginPageTest(FunctionalTestCase):\n \"\"\"Tests if user can log into ecidadania.\n \"\"\"\n \n def setUp(self):\n self.init()\n \n def testIfAUserCanLogin(self):\n \n url = self.live_server_url + self.getURL('site-index')\n self.browser.get(url)\n #self.wait(2)\n \n self.browser.find_element_by_link_text(\"Login\").click()\n self.wait(2)\n \n self.create_user('praveen', 'something')\n \n username_field = self.browser.find_element_by_name('username')\n username_field.send_keys('praveen')\n \n password_field = self.browser.find_element_by_name('password')\n password_field.send_keys('<PASSWORD>')\n self.wait(2)\n password_field.send_keys(Keys.RETURN)\n self.wait(1)\n ", "id": "10686174", "language": "Python", "matching_score": 5.296921730041504, "max_stars_count": 40, "path": "tests/functional/test_login_page.py" }, { "content": "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2010-2012 <NAME>\n#\n# This file is part of e-cidadania.\n#\n# e-cidadania is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-cidadania is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.\n\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nfrom django.test import LiveServerTestCase\n\nfrom tests.test_utils import ECDTestCase\n\n\n\nclass FunctionalTestCase(ECDTestCase, LiveServerTestCase):\n \"\"\"\n Class which provides functional testing capabilities. It subclasses both\n our custom ECDTestCase and django's LiveServerTestCase. LiveServerTestCase\n was introduced in Django 1.4 to support functional testing.\n \"\"\"\n \n def init(self):\n ECDTestCase.init(self)\n self.browser = webdriver.Firefox()\n \n def setUp(self):\n \"\"\"\n Setup done prior to a test run.\n \"\"\"\n self.init()\n \n def tearDown(self):\n \"\"\"\n Actions taken after a test run.\n \"\"\" \n self.browser.quit()\n \n def wait(self, sec):\n \"\"\"\n Halts script execution for `sec` seconds. \n \n This is necessary because the script executes faster than the browser.\n \"\"\"\n time.sleep(sec)\n return\n\n def login(self, browser, username='test_user', password='<PASSWORD>'):\n \"\"\"\n Logs into e-cidadania.\n \"\"\"\n username_field = browser.find_element_by_name('username')\n username_field.send_keys(username)\n \n password_field = browser.find_element_by_name('password')\n password_field.send_keys(password)\n self.wait(2)\n password_field.send_keys(Keys.RETURN)\n", "id": "3797760", "language": "Python", "matching_score": 0.7578350305557251, "max_stars_count": 40, "path": "tests/functional_utils.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport django\nfrom django.conf import settings\nfrom django.forms.widgets import Select\nfrom django.core.urlresolvers import reverse\nfrom django.utils.encoding import iri_to_uri\nfrom django.utils.safestring import mark_safe\nfrom django.db.models import get_model\nimport locale\nfrom apps.thirdparty.smart_selects.utils import unicode_sorter\n\n\nif django.VERSION >= (1, 2, 0) and getattr(settings,\n 'USE_DJANGO_JQUERY', True):\n USE_DJANGO_JQUERY = True\nelse:\n USE_DJANGO_JQUERY = False\n JQUERY_URL = getattr(settings, 'JQUERY_URL', 'http://ajax.googleapis.com/ajax/libs/jquery/1.3.2/jquery.min.js')\n\n\nclass ChainedSelect(Select):\n def __init__(self, app_name, model_name, chain_field, model_field, show_all, auto_choose, manager=None, *args, **kwargs):\n self.app_name = app_name\n self.model_name = model_name\n self.chain_field = chain_field\n self.model_field = model_field\n self.show_all = show_all\n self.auto_choose = auto_choose\n self.manager = manager\n super(Select, self).__init__(*args, **kwargs)\n\n class Media:\n if USE_DJANGO_JQUERY:\n js = [\"%s%s\" % (settings.ADMIN_MEDIA_PREFIX, i) for i in\n ('js/jquery.min.js', 'js/jquery.init.js')]\n elif JQUERY_URL:\n js = (\n JQUERY_URL,\n )\n\n def render(self, name, value, attrs=None, choices=()):\n if len(name.split('-')) > 1: # formset\n chain_field = '-'.join(name.split('-')[:-1] + [self.chain_field])\n else:\n chain_field = self.chain_field\n\n if self.show_all:\n view_name = \"chained_filter_all\"\n else:\n view_name = \"chained_filter\"\n kwargs = {'app': self.app_name, 'model': self.model_name, 'field': self.model_field, 'value': \"1\"}\n if self.manager is not None:\n kwargs.update({'manager': self.manager})\n url = \"/\".join(reverse(view_name, kwargs=kwargs).split(\"/\")[:-2])\n if self.auto_choose:\n auto_choose = 'true'\n else:\n auto_choose = 'false'\n empty_label = iter(self.choices).next()[1] # Hacky way to getting the correct empty_label from the field instead of a hardcoded '--------'\n js = \"\"\"\n <script type=\"text/javascript\">\n //<![CDATA[\n (function($) {\n function fireEvent(element,event){\n if (document.createEventObject){\n // dispatch for IE\n var evt = document.createEventObject();\n return element.fireEvent('on'+event,evt)\n }\n else{\n // dispatch for firefox + others\n var evt = document.createEvent(\"HTMLEvents\");\n evt.initEvent(event, true, true ); // event type,bubbling,cancelable\n return !element.dispatchEvent(evt);\n }\n }\n\n function dismissRelatedLookupPopup(win, chosenId) {\n var name = windowname_to_id(win.name);\n var elem = document.getElementById(name);\n if (elem.className.indexOf('vManyToManyRawIdAdminField') != -1 && elem.value) {\n elem.value += ',' + chosenId;\n } else {\n elem.value = chosenId;\n }\n fireEvent(elem, 'change');\n win.close();\n }\n\n $(document).ready(function(){\n function fill_field(val, init_value){\n if (!val || val==''){\n options = '<option value=\"\">%(empty_label)s<'+'/option>';\n $(\"#%(id)s\").html(options);\n $('#%(id)s option:first').attr('selected', 'selected');\n $(\"#%(id)s\").trigger('change');\n return;\n }\n $.getJSON(\"%(url)s/\"+val+\"/\", function(j){\n var options = '<option value=\"\">%(empty_label)s<'+'/option>';\n for (var i = 0; i < j.length; i++) {\n options += '<option value=\"' + j[i].value + '\">' + j[i].display + '<'+'/option>';\n }\n var width = $(\"#%(id)s\").outerWidth();\n $(\"#%(id)s\").html(options);\n if (navigator.appVersion.indexOf(\"MSIE\") != -1)\n $(\"#%(id)s\").width(width + 'px');\n $('#%(id)s option:first').attr('selected', 'selected');\n var auto_choose = %(auto_choose)s;\n if(init_value){\n $('#%(id)s option[value=\"'+ init_value +'\"]').attr('selected', 'selected');\n }\n if(auto_choose && j.length == 1){\n $('#%(id)s option[value=\"'+ j[0].value +'\"]').attr('selected', 'selected');\n }\n $(\"#%(id)s\").trigger('change');\n })\n }\n\n if(!$(\"#id_%(chainfield)s\").hasClass(\"chained\")){\n var val = $(\"#id_%(chainfield)s\").val();\n fill_field(val, \"%(value)s\");\n }\n\n $(\"#id_%(chainfield)s\").change(function(){\n var start_value = $(\"#%(id)s\").val();\n var val = $(this).val();\n fill_field(val, start_value);\n })\n })\n var oldDismissAddAnotherPopup = dismissAddAnotherPopup;\n dismissAddAnotherPopup = function(win, newId, newRepr) {\n oldDismissAddAnotherPopup(win, newId, newRepr);\n if (windowname_to_id(win.name) == \"id_%(chainfield)s\") {\n $(\"#id_%(chainfield)s\").change();\n }\n }\n })(jQuery || django.jQuery);\n //]]>\n </script>\n\n \"\"\" % {\"chainfield\": chain_field, \"url\": url, \"id\": attrs['id'], 'value': value, 'auto_choose': auto_choose, 'empty_label': empty_label}\n final_choices = []\n\n if value:\n item = self.queryset.filter(pk=value)[0]\n try:\n pk = getattr(item, self.model_field + \"_id\")\n filter = {self.model_field: pk}\n except AttributeError:\n try: # maybe m2m?\n pks = getattr(item, self.model_field).all().values_list('pk', flat=True)\n filter = {self.model_field + \"__in\": pks}\n except AttributeError:\n try: # maybe a set?\n pks = getattr(item, self.model_field + \"_set\").all().values_list('pk', flat=True)\n filter = {self.model_field + \"__in\": pks}\n except: # give up\n filter = {}\n filtered = list(get_model(self.app_name, self.model_name).objects.filter(**filter).distinct())\n filtered.sort(cmp=locale.strcoll, key=lambda x: unicode_sorter(unicode(x)))\n for choice in filtered:\n final_choices.append((choice.pk, unicode(choice)))\n if len(final_choices) > 1:\n final_choices = [(\"\", (empty_label))] + final_choices\n if self.show_all:\n final_choices.append((\"\", (empty_label)))\n self.choices = list(self.choices)\n self.choices.sort(cmp=locale.strcoll, key=lambda x: unicode_sorter(x[1]))\n for ch in self.choices:\n if not ch in final_choices:\n final_choices.append(ch)\n self.choices = ()\n final_attrs = self.build_attrs(attrs, name=name)\n if 'class' in final_attrs:\n final_attrs['class'] += ' chained'\n else:\n final_attrs['class'] = 'chained'\n output = super(ChainedSelect, self).render(name, value, final_attrs, choices=final_choices)\n output += js\n return mark_safe(output)\n", "id": "4546049", "language": "Python", "matching_score": 3.2998766899108887, "max_stars_count": 40, "path": "src/apps/thirdparty/smart_selects/widgets.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom apps.thirdparty.smart_selects.widgets import ChainedSelect\nfrom django.forms.models import ModelChoiceField\nfrom django.forms import ChoiceField\nfrom django.db.models import get_model\n\n\nclass ChainedModelChoiceField(ModelChoiceField):\n def __init__(self, app_name, model_name, chain_field, model_field, show_all, auto_choose, manager=None, initial=None, *args, **kwargs):\n defaults = {\n 'widget': ChainedSelect(app_name, model_name, chain_field, model_field, show_all, auto_choose, manager),\n }\n defaults.update(kwargs)\n if not 'queryset' in kwargs:\n queryset = get_model(app_name, model_name).objects.all()\n super(ChainedModelChoiceField, self).__init__(queryset=queryset, initial=initial, *args, **defaults)\n else:\n super(ChainedModelChoiceField, self).__init__(initial=initial, *args, **defaults)\n\n def _get_choices(self):\n self.widget.queryset = self.queryset\n choices = super(ChainedModelChoiceField, self)._get_choices()\n return choices\n choices = property(_get_choices, ChoiceField._set_choices)\n\n\nclass GroupedModelSelect(ModelChoiceField):\n def __init__(self, queryset, order_field, *args, **kwargs):\n self.order_field = order_field\n super(GroupedModelSelect, self).__init__(queryset, *args, **kwargs)\n\n def _get_choices(self):\n # If self._choices is set, then somebody must have manually set\n # the property self.choices. In this case, just return self._choices.\n if hasattr(self, '_choices'):\n return self._choices\n # Otherwise, execute the QuerySet in self.queryset to determine the\n # choices dynamically. Return a fresh QuerySetIterator that has not been\n # consumed. Note that we're instantiating a new QuerySetIterator *each*\n # time _get_choices() is called (and, thus, each time self.choices is\n # accessed) so that we can ensure the QuerySet has not been consumed. This\n # construct might look complicated but it allows for lazy evaluation of\n # the queryset.\n final = [(\"\", self.empty_label or \"---------\"), ]\n group = None\n for item in self.queryset:\n if not group or group[0] != unicode(getattr(item, self.order_field)):\n if group:\n final.append(group)\n group = [unicode(getattr(item, self.order_field)), []]\n group[1].append(self.make_choice(item))\n return final\n\n def make_choice(self, obj):\n return (obj.pk, \" \" + self.label_from_instance(obj))\n\n choices = property(_get_choices, ChoiceField._set_choices)\n", "id": "2298786", "language": "Python", "matching_score": 3.517221450805664, "max_stars_count": 40, "path": "src/apps/thirdparty/smart_selects/form_fields.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.db.models.fields.related import ForeignKey\nimport form_fields\ntry:\n from south.modelsinspector import add_introspection_rules\n has_south = True\nexcept:\n has_south = False\n\n\nclass ChainedForeignKey(ForeignKey):\n \"\"\"\n chains the choices of a previous combo box with this one\n \"\"\"\n def __init__(self, to, chained_field=None, chained_model_field=None, show_all=False, auto_choose=False, **kwargs):\n if isinstance(to, basestring):\n self.app_name, self.model_name = to.split('.')\n else:\n self.app_name = to._meta.app_label\n self.model_name = to._meta.object_name\n self.chain_field = chained_field\n self.model_field = chained_model_field\n self.show_all = show_all\n self.auto_choose = auto_choose\n ForeignKey.__init__(self, to, **kwargs)\n\n def formfield(self, **kwargs):\n defaults = {\n 'form_class': form_fields.ChainedModelChoiceField,\n 'queryset': self.rel.to._default_manager.complex_filter(self.rel.limit_choices_to),\n 'to_field_name': self.rel.field_name,\n 'app_name': self.app_name,\n 'model_name': self.model_name,\n 'chain_field': self.chain_field,\n 'model_field': self.model_field,\n 'show_all': self.show_all,\n 'auto_choose': self.auto_choose,\n }\n defaults.update(kwargs)\n return super(ChainedForeignKey, self).formfield(**defaults)\n\n\nclass GroupedForeignKey(ForeignKey):\n \"\"\"\n Opt Grouped Field\n \"\"\"\n def __init__(self, to, group_field, **kwargs):\n self.group_field = group_field\n self._choices = True\n ForeignKey.__init__(self, to, **kwargs)\n\n def formfield(self, **kwargs):\n defaults = {\n 'form_class': form_fields.GroupedModelSelect,\n 'queryset': self.rel.to._default_manager.complex_filter(\n self.rel.limit_choices_to),\n 'to_field_name': self.rel.field_name,\n 'order_field': self.group_field,\n }\n defaults.update(kwargs)\n return super(ForeignKey, self).formfield(**defaults)\n\nif has_south:\n rules_grouped = [(\n (GroupedForeignKey,),\n [],\n {\n 'to': ['rel.to', {}],\n 'group_field': ['group_field', {}],\n },\n )]\n\n add_introspection_rules([], [\"^apps\\.thirdparty\\.smart_selects\\.db_fields\\.ChainedForeignKey\"])\n add_introspection_rules(rules_grouped, [\"^apps\\.thirdparty\\.smart_selects\\.db_fields\\.GroupedForeignKey\"])\n", "id": "7377166", "language": "Python", "matching_score": 1.5698210000991821, "max_stars_count": 40, "path": "src/apps/thirdparty/smart_selects/db_fields.py" }, { "content": "# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding model 'Country'\n db.create_table(u'accounts_country', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('name', self.gf('django.db.models.fields.CharField')(max_length=50)),\n ('code', self.gf('django.db.models.fields.CharField')(max_length=5)),\n ))\n db.send_create_signal(u'accounts', ['Country'])\n\n # Adding model 'Region'\n db.create_table(u'accounts_region', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('name', self.gf('django.db.models.fields.CharField')(max_length=50)),\n ('country', self.gf('django.db.models.fields.CharField')(max_length=50)),\n ))\n db.send_create_signal(u'accounts', ['Region'])\n\n # Adding model 'City'\n db.create_table(u'accounts_city', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('name', self.gf('django.db.models.fields.CharField')(max_length=50)),\n ('region', self.gf('django.db.models.fields.CharField')(max_length=50)),\n ))\n db.send_create_signal(u'accounts', ['City'])\n\n # Adding model 'Interest'\n db.create_table(u'accounts_interest', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('item', self.gf('django.db.models.fields.CharField')(max_length=50)),\n ))\n db.send_create_signal(u'accounts', ['Interest'])\n\n # Adding model 'UserProfile'\n db.create_table(u'accounts_userprofile', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)),\n ('date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),\n ('latitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=6, blank=True)),\n ('longitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=6, blank=True)),\n ('location', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),\n ('firstname', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),\n ('surname', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),\n ('gender', self.gf('django.db.models.fields.CharField')(max_length=1, blank=True)),\n ('birthdate', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),\n ('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.Country'], null=True)),\n ('region', self.gf('apps.thirdparty.smart_selects.db_fields.ChainedForeignKey')(to=orm['accounts.Region'], null=True)),\n ('city', self.gf('apps.thirdparty.smart_selects.db_fields.ChainedForeignKey')(to=orm['accounts.City'], null=True)),\n ('district', self.gf('django.db.models.fields.CharField')(max_length=50)),\n ('address', self.gf('django.db.models.fields.CharField')(max_length=100)),\n ('address_number', self.gf('django.db.models.fields.CharField')(max_length=3, null=True, blank=True)),\n ('address_floor', self.gf('django.db.models.fields.CharField')(max_length=3)),\n ('address_letter', self.gf('django.db.models.fields.CharField')(max_length=2, null=True, blank=True)),\n ('phone', self.gf('django.db.models.fields.CharField')(max_length=9, null=True, blank=True)),\n ('phone_alt', self.gf('django.db.models.fields.CharField')(max_length=9, null=True, blank=True)),\n ('nid', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),\n ('website', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),\n ))\n db.send_create_signal(u'accounts', ['UserProfile'])\n\n # Adding M2M table for field interests on 'UserProfile'\n m2m_table_name = db.shorten_name(u'accounts_userprofile_interests')\n db.create_table(m2m_table_name, (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('userprofile', models.ForeignKey(orm[u'accounts.userprofile'], null=False)),\n ('interest', models.ForeignKey(orm[u'accounts.interest'], null=False))\n ))\n db.create_unique(m2m_table_name, ['userprofile_id', 'interest_id'])\n\n\n def backwards(self, orm):\n # Deleting model 'Country'\n db.delete_table(u'accounts_country')\n\n # Deleting model 'Region'\n db.delete_table(u'accounts_region')\n\n # Deleting model 'City'\n db.delete_table(u'accounts_city')\n\n # Deleting model 'Interest'\n db.delete_table(u'accounts_interest')\n\n # Deleting model 'UserProfile'\n db.delete_table(u'accounts_userprofile')\n\n # Removing M2M table for field interests on 'UserProfile'\n db.delete_table(db.shorten_name(u'accounts_userprofile_interests'))\n\n\n models = {\n u'accounts.city': {\n 'Meta': {'object_name': 'City'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),\n 'region': ('django.db.models.fields.CharField', [], {'max_length': '50'})\n },\n u'accounts.country': {\n 'Meta': {'object_name': 'Country'},\n 'code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})\n },\n u'accounts.interest': {\n 'Meta': {'object_name': 'Interest'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'item': ('django.db.models.fields.CharField', [], {'max_length': '50'})\n },\n u'accounts.region': {\n 'Meta': {'object_name': 'Region'},\n 'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})\n },\n u'accounts.userprofile': {\n 'Meta': {'object_name': 'UserProfile'},\n 'address': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'address_floor': ('django.db.models.fields.CharField', [], {'max_length': '3'}),\n 'address_letter': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),\n 'address_number': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),\n 'birthdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),\n 'city': ('apps.thirdparty.smart_selects.db_fields.ChainedForeignKey', [], {'to': u\"orm['accounts.City']\", 'null': 'True'}),\n 'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['accounts.Country']\", 'null': 'True'}),\n 'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'district': ('django.db.models.fields.CharField', [], {'max_length': '50'}),\n 'firstname': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),\n 'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'interests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['accounts.Interest']\", 'null': 'True', 'blank': 'True'}),\n 'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '6', 'blank': 'True'}),\n 'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),\n 'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '6', 'blank': 'True'}),\n 'nid': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),\n 'phone': ('django.db.models.fields.CharField', [], {'max_length': '9', 'null': 'True', 'blank': 'True'}),\n 'phone_alt': ('django.db.models.fields.CharField', [], {'max_length': '9', 'null': 'True', 'blank': 'True'}),\n 'region': ('apps.thirdparty.smart_selects.db_fields.ChainedForeignKey', [], {'to': u\"orm['accounts.Region']\", 'null': 'True'}),\n 'surname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),\n 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['auth.User']\", 'unique': 'True'}),\n 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})\n },\n u'auth.group': {\n 'Meta': {'object_name': 'Group'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),\n 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'})\n },\n u'auth.permission': {\n 'Meta': {'ordering': \"(u'content_type__app_label', u'content_type__model', u'codename')\", 'unique_together': \"((u'content_type', u'codename'),)\", 'object_name': 'Permission'},\n 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['contenttypes.ContentType']\"}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})\n },\n u'auth.user': {\n 'Meta': {'object_name': 'User'},\n 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),\n 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),\n 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),\n 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),\n 'password': ('<PASSWORD>', [], {'max_length': '128'}),\n 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}),\n 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})\n },\n u'contenttypes.contenttype': {\n 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},\n 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n }\n }\n\n complete_apps = ['accounts']", "id": "681595", "language": "Python", "matching_score": 5.193881511688232, "max_stars_count": 40, "path": "src/apps/ecidadania/accounts/migrations/0001_initial.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\n\nfrom django.core.validators import RegexValidator\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.conf import settings\n\nfrom apps.thirdparty.userprofile.models import BaseProfile\nfrom core.spaces.models import Space\nfrom apps.ecidadania.accounts.locations import Country, Region, City\nfrom apps.thirdparty.smart_selects.db_fields import ChainedForeignKey\n\nGENDER = (\n\n ('M', _('Male')),\n ('F', _('Female')),\n\n)\n\n\nclass Interest(models.Model):\n\n \"\"\"\n \"\"\"\n item = models.CharField(_('Interest'), max_length=50)\n\n\nclass UserProfile(BaseProfile):\n\n \"\"\"\n Extends the default User profiles of Django. The fields of this model\n can be obtained by the user.get_profile method and it's extended by the\n django-profile application.\n \"\"\"\n # user = models.ForeignKey(User, unique=True)\n\n firstname = models.CharField(_('Name'), max_length=50, blank=True)\n surname = models.CharField(_('Surname'), max_length=200, blank=True)\n gender = models.CharField(_('Gender'), max_length=1, choices=GENDER,\n blank=True)\n birthdate = models.DateField(_('Birth date'), blank=True, null=True, help_text='dd/mm/yyyy')\n country = models.ForeignKey(Country, null=True)\n region = ChainedForeignKey(\n Region,\n chained_field=\"country\",\n chained_model_field=\"country\",\n show_all=True,\n auto_choose=True,\n null=True\n )\n city = ChainedForeignKey(\n City,\n chained_field=\"region\",\n chained_model_field=\"region\",\n null=True\n )\n district = models.CharField(_('District'), max_length=50)\n\n # Detailed overview of the address\n address = models.CharField(_('Address'), max_length=100)\n address_number = models.CharField(_('Number'), max_length=3, blank=True,\n null=True, validators=[RegexValidator(regex='^[0-9]*$',\n message='Invalid characters in the building number.')])\n address_floor = models.CharField(_('Floor'), max_length=3,\n validators=[RegexValidator(regex='^[0-9]*$', message='Invalid \\\n characters in the floor number.')])\n address_letter = models.CharField(_('Letter'), max_length=2, null=True,\n blank=True, validators=[RegexValidator(regex='^[A-Za-z]*$')])\n phone = models.CharField(_('Phone 1'), max_length=9, null=True,\n validators=[RegexValidator(\n regex='^[0-9]*$',\n message='Invalid characters in the phone number.'\n )],\n blank=True, help_text=_('9 digits maximum'))\n phone_alt = models.CharField(_('Phone 2'), max_length=9, null=True,\n validators=[RegexValidator(\n regex='^[0-9]*$',\n message='Invalid characters in the phone number.'\n )],\n blank=True, help_text=_('9 digits maximum'))\n\n nid = models.CharField(_('Identification document'), max_length=200,\n null=True, blank=True)\n\n website = models.URLField(_('Website'), max_length=200,\n null=True, blank=True)\n interests = models.ManyToManyField(Interest, blank=True, null=True)\n\n def get_age(self):\n\n \"\"\"\n Get the current user age.\n \"\"\"\n\n if self.birthdate:\n diff = datetime.date.today() - self.birthdate\n years = diff.days / 365\n return years\n else:\n return '??'\n\nUser.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])\n", "id": "3064037", "language": "Python", "matching_score": 2.9441328048706055, "max_stars_count": 40, "path": "src/apps/ecidadania/accounts/models.py" }, { "content": "# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding model 'Avatar'\n db.create_table(u'userprofile_avatar', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),\n ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),\n ('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),\n ('valid', self.gf('django.db.models.fields.BooleanField')(default=False)),\n ))\n db.send_create_signal(u'userprofile', ['Avatar'])\n\n # Adding unique constraint on 'Avatar', fields ['user', 'valid']\n db.create_unique(u'userprofile_avatar', ['user_id', 'valid'])\n\n # Adding model 'EmailValidation'\n db.create_table(u'userprofile_emailvalidation', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)),\n ('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),\n ('key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=70, db_index=True)),\n ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),\n ))\n db.send_create_signal(u'userprofile', ['EmailValidation'])\n\n\n def backwards(self, orm):\n # Removing unique constraint on 'Avatar', fields ['user', 'valid']\n db.delete_unique(u'userprofile_avatar', ['user_id', 'valid'])\n\n # Deleting model 'Avatar'\n db.delete_table(u'userprofile_avatar')\n\n # Deleting model 'EmailValidation'\n db.delete_table(u'userprofile_emailvalidation')\n\n\n models = {\n u'auth.group': {\n 'Meta': {'object_name': 'Group'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),\n 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'})\n },\n u'auth.permission': {\n 'Meta': {'ordering': \"(u'content_type__app_label', u'content_type__model', u'codename')\", 'unique_together': \"((u'content_type', u'codename'),)\", 'object_name': 'Permission'},\n 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['contenttypes.ContentType']\"}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})\n },\n u'auth.user': {\n 'Meta': {'object_name': 'User'},\n 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),\n 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),\n 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),\n 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),\n 'password': ('<PASSWORD>', [], {'max_length': '128'}),\n 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}),\n 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})\n },\n u'contenttypes.contenttype': {\n 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},\n 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n },\n u'userprofile.avatar': {\n 'Meta': {'unique_together': \"(('user', 'valid'),)\", 'object_name': 'Avatar'},\n 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),\n 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['auth.User']\"}),\n 'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'})\n },\n u'userprofile.emailvalidation': {\n 'Meta': {'object_name': 'EmailValidation'},\n 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '70', 'db_index': 'True'}),\n 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['auth.User']\", 'unique': 'True'})\n }\n }\n\n complete_apps = ['userprofile']", "id": "8824449", "language": "Python", "matching_score": 5.725770473480225, "max_stars_count": 40, "path": "src/apps/thirdparty/userprofile/migrations/0001_initial.py" }, { "content": "# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding model 'Poll'\n db.create_table(u'voting_poll', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('question', self.gf('django.db.models.fields.CharField')(max_length=200)),\n ('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),\n ('poll_lastup', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),\n ('author', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='poll-author', null=True, to=orm['auth.User'])),\n ('space', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['spaces.Space'], null=True, blank=True)),\n ('poll_tags', self.gf('apps.thirdparty.tagging.fields.TagField')(max_length=255, blank=True)),\n ('start_date', self.gf('django.db.models.fields.DateField')()),\n ('end_date', self.gf('django.db.models.fields.DateField')()),\n ))\n db.send_create_signal(u'voting', ['Poll'])\n\n # Adding M2M table for field participants on 'Poll'\n m2m_table_name = db.shorten_name(u'voting_poll_participants')\n db.create_table(m2m_table_name, (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('poll', models.ForeignKey(orm[u'voting.poll'], null=False)),\n ('user', models.ForeignKey(orm[u'auth.user'], null=False))\n ))\n db.create_unique(m2m_table_name, ['poll_id', 'user_id'])\n\n # Adding model 'Choice'\n db.create_table(u'voting_choice', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('poll', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['voting.Poll'])),\n ('choice_text', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),\n ))\n db.send_create_signal(u'voting', ['Choice'])\n\n # Adding M2M table for field votes on 'Choice'\n m2m_table_name = db.shorten_name(u'voting_choice_votes')\n db.create_table(m2m_table_name, (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('choice', models.ForeignKey(orm[u'voting.choice'], null=False)),\n ('user', models.ForeignKey(orm[u'auth.user'], null=False))\n ))\n db.create_unique(m2m_table_name, ['choice_id', 'user_id'])\n\n # Adding model 'Voting'\n db.create_table(u'voting_voting', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('title', self.gf('django.db.models.fields.CharField')(unique=True, max_length=200)),\n ('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),\n ('space', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['spaces.Space'], null=True, blank=True)),\n ('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),\n ('date_mod', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),\n ('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),\n ('start_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),\n ('end_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),\n ('ponderation', self.gf('django.db.models.fields.CharField')(max_length=3, null=True, blank=True)),\n ('max_votes', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),\n ))\n db.send_create_signal(u'voting', ['Voting'])\n\n # Adding M2M table for field proposalsets on 'Voting'\n m2m_table_name = db.shorten_name(u'voting_voting_proposalsets')\n db.create_table(m2m_table_name, (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('voting', models.ForeignKey(orm[u'voting.voting'], null=False)),\n ('proposalset', models.ForeignKey(orm[u'proposals.proposalset'], null=False))\n ))\n db.create_unique(m2m_table_name, ['voting_id', 'proposalset_id'])\n\n # Adding M2M table for field proposals on 'Voting'\n m2m_table_name = db.shorten_name(u'voting_voting_proposals')\n db.create_table(m2m_table_name, (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('voting', models.ForeignKey(orm[u'voting.voting'], null=False)),\n ('proposal', models.ForeignKey(orm[u'proposals.proposal'], null=False))\n ))\n db.create_unique(m2m_table_name, ['voting_id', 'proposal_id'])\n\n # Adding model 'ConfirmVote'\n db.create_table(u'voting_confirmvote', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),\n ('proposal', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['proposals.Proposal'], null=True, blank=True)),\n ('token', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),\n ('requested_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),\n ))\n db.send_create_signal(u'voting', ['ConfirmVote'])\n\n\n def backwards(self, orm):\n # Deleting model 'Poll'\n db.delete_table(u'voting_poll')\n\n # Removing M2M table for field participants on 'Poll'\n db.delete_table(db.shorten_name(u'voting_poll_participants'))\n\n # Deleting model 'Choice'\n db.delete_table(u'voting_choice')\n\n # Removing M2M table for field votes on 'Choice'\n db.delete_table(db.shorten_name(u'voting_choice_votes'))\n\n # Deleting model 'Voting'\n db.delete_table(u'voting_voting')\n\n # Removing M2M table for field proposalsets on 'Voting'\n db.delete_table(db.shorten_name(u'voting_voting_proposalsets'))\n\n # Removing M2M table for field proposals on 'Voting'\n db.delete_table(db.shorten_name(u'voting_voting_proposals'))\n\n # Deleting model 'ConfirmVote'\n db.delete_table(u'voting_confirmvote')\n\n\n models = {\n u'auth.group': {\n 'Meta': {'object_name': 'Group'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),\n 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'})\n },\n u'auth.permission': {\n 'Meta': {'ordering': \"(u'content_type__app_label', u'content_type__model', u'codename')\", 'unique_together': \"((u'content_type', u'codename'),)\", 'object_name': 'Permission'},\n 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['contenttypes.ContentType']\"}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})\n },\n u'auth.user': {\n 'Meta': {'object_name': 'User'},\n 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),\n 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),\n 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),\n 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),\n 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),\n 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}),\n 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})\n },\n u'contenttypes.contenttype': {\n 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},\n 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n },\n u'debate.debate': {\n 'Meta': {'object_name': 'Debate'},\n 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['auth.User']\", 'null': 'True', 'blank': 'True'}),\n 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),\n 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),\n 'end_date': ('django.db.models.fields.DateField', [], {}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'space': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['spaces.Space']\", 'null': 'True', 'blank': 'True'}),\n 'start_date': ('django.db.models.fields.DateField', [], {}),\n 'theme': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),\n 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})\n },\n u'proposals.proposal': {\n 'Meta': {'object_name': 'Proposal'},\n 'anon_allowed': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),\n 'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'proposal_authors'\", 'null': 'True', 'to': u\"orm['auth.User']\"}),\n 'budget': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'closed': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),\n 'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'proposal_closed_by'\", 'null': 'True', 'to': u\"orm['auth.User']\"}),\n 'code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),\n 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['contenttypes.ContentType']\", 'null': 'True', 'blank': 'True'}),\n 'description': ('django.db.models.fields.TextField', [], {'max_length': '300'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '17', 'decimal_places': '15', 'blank': 'True'}),\n 'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '17', 'decimal_places': '15', 'blank': 'True'}),\n 'merged': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),\n 'merged_proposals': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': \"'merged_proposals_rel_+'\", 'null': 'True', 'to': u\"orm['proposals.Proposal']\"}),\n 'mod_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'object_pk': ('django.db.models.fields.TextField', [], {'null': 'True'}),\n 'proposalset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'proposal_in'\", 'null': 'True', 'to': u\"orm['proposals.ProposalSet']\"}),\n 'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'refurbished': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),\n 'space': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['spaces.Space']\", 'null': 'True', 'blank': 'True'}),\n 'support_votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': \"'support_votes'\", 'null': 'True', 'symmetrical': 'False', 'to': u\"orm['auth.User']\"}),\n 'tags': ('apps.thirdparty.tagging.fields.TagField', [], {'max_length': '255', 'blank': 'True'}),\n 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),\n 'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': \"'voting_votes'\", 'null': 'True', 'symmetrical': 'False', 'to': u\"orm['auth.User']\"})\n },\n u'proposals.proposalset': {\n 'Meta': {'object_name': 'ProposalSet'},\n 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['auth.User']\", 'null': 'True', 'blank': 'True'}),\n 'debate': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['debate.Debate']\", 'null': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),\n 'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'space': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['spaces.Space']\", 'null': 'True', 'blank': 'True'})\n },\n u'spaces.space': {\n 'Meta': {'ordering': \"['name']\", 'object_name': 'Space'},\n 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['auth.User']\", 'null': 'True', 'blank': 'True'}),\n 'banner': ('core.spaces.fields.StdImageField', [], {'max_length': '100'}),\n 'description': ('django.db.models.fields.TextField', [], {'default': \"u'Write here your description.'\"}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'logo': ('core.spaces.fields.StdImageField', [], {'max_length': '100'}),\n 'mod_cal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'mod_debate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'mod_docs': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'mod_news': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'mod_proposals': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'mod_voting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),\n 'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})\n },\n u'voting.choice': {\n 'Meta': {'object_name': 'Choice'},\n 'choice_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'poll': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['voting.Poll']\"}),\n 'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['auth.User']\", 'null': 'True', 'blank': 'True'})\n },\n u'voting.confirmvote': {\n 'Meta': {'object_name': 'ConfirmVote'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'proposal': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['proposals.Proposal']\", 'null': 'True', 'blank': 'True'}),\n 'requested_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),\n 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['auth.User']\", 'null': 'True', 'blank': 'True'})\n },\n u'voting.poll': {\n 'Meta': {'object_name': 'Poll'},\n 'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'poll-author'\", 'null': 'True', 'to': u\"orm['auth.User']\"}),\n 'end_date': ('django.db.models.fields.DateField', [], {}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'participants': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['auth.User']\", 'null': 'True', 'blank': 'True'}),\n 'poll_lastup': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),\n 'poll_tags': ('apps.thirdparty.tagging.fields.TagField', [], {'max_length': '255', 'blank': 'True'}),\n 'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'question': ('django.db.models.fields.CharField', [], {'max_length': '200'}),\n 'space': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['spaces.Space']\", 'null': 'True', 'blank': 'True'}),\n 'start_date': ('django.db.models.fields.DateField', [], {})\n },\n u'voting.voting': {\n 'Meta': {'object_name': 'Voting'},\n 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['auth.User']\", 'null': 'True', 'blank': 'True'}),\n 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),\n 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),\n 'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'max_votes': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'ponderation': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),\n 'proposals': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['proposals.Proposal']\", 'null': 'True', 'blank': 'True'}),\n 'proposalsets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['proposals.ProposalSet']\", 'null': 'True', 'blank': 'True'}),\n 'space': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['spaces.Space']\", 'null': 'True', 'blank': 'True'}),\n 'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),\n 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})\n }\n }\n\n complete_apps = ['voting']", "id": "1044796", "language": "Python", "matching_score": 7.622263431549072, "max_stars_count": 40, "path": "src/apps/ecidadania/voting/migrations/0001_initial.py" }, { "content": "# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding model 'Category'\n db.create_table(u'proposals_category', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True, blank=True)),\n ('object_pk', self.gf('django.db.models.fields.TextField')(null=True)),\n ))\n db.send_create_signal(u'proposals', ['Category'])\n\n # Adding model 'ProposalSet'\n db.create_table(u'proposals_proposalset', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=200)),\n ('space', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['spaces.Space'], null=True, blank=True)),\n ('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),\n ('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),\n ('debate', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['debate.Debate'], null=True, blank=True)),\n ))\n db.send_create_signal(u'proposals', ['ProposalSet'])\n\n # Adding model 'Proposal'\n db.create_table(u'proposals_proposal', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True, blank=True)),\n ('object_pk', self.gf('django.db.models.fields.TextField')(null=True)),\n ('code', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),\n ('title', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),\n ('proposalset', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='proposal_in', null=True, to=orm['proposals.ProposalSet'])),\n ('description', self.gf('django.db.models.fields.TextField')(max_length=300)),\n ('space', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['spaces.Space'], null=True, blank=True)),\n ('author', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='proposal_authors', null=True, to=orm['auth.User'])),\n ('tags', self.gf('apps.thirdparty.tagging.fields.TagField')(max_length=255, blank=True)),\n ('latitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=17, decimal_places=15, blank=True)),\n ('longitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=17, decimal_places=15, blank=True)),\n ('closed', self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True)),\n ('closed_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='proposal_closed_by', null=True, to=orm['auth.User'])),\n ('close_reason', self.gf('django.db.models.fields.SmallIntegerField')(null=True, blank=True)),\n ('merged', self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True)),\n ('anon_allowed', self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True)),\n ('refurbished', self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True)),\n ('budget', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),\n ('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),\n ('mod_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),\n ))\n db.send_create_signal(u'proposals', ['Proposal'])\n\n # Adding M2M table for field merged_proposals on 'Proposal'\n m2m_table_name = db.shorten_name(u'proposals_proposal_merged_proposals')\n db.create_table(m2m_table_name, (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('from_proposal', models.ForeignKey(orm[u'proposals.proposal'], null=False)),\n ('to_proposal', models.ForeignKey(orm[u'proposals.proposal'], null=False))\n ))\n db.create_unique(m2m_table_name, ['from_proposal_id', 'to_proposal_id'])\n\n # Adding M2M table for field support_votes on 'Proposal'\n m2m_table_name = db.shorten_name(u'proposals_proposal_support_votes')\n db.create_table(m2m_table_name, (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('proposal', models.ForeignKey(orm[u'proposals.proposal'], null=False)),\n ('user', models.ForeignKey(orm[u'auth.user'], null=False))\n ))\n db.create_unique(m2m_table_name, ['proposal_id', 'user_id'])\n\n # Adding M2M table for field votes on 'Proposal'\n m2m_table_name = db.shorten_name(u'proposals_proposal_votes')\n db.create_table(m2m_table_name, (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('proposal', models.ForeignKey(orm[u'proposals.proposal'], null=False)),\n ('user', models.ForeignKey(orm[u'auth.user'], null=False))\n ))\n db.create_unique(m2m_table_name, ['proposal_id', 'user_id'])\n\n # Adding model 'ProposalField'\n db.create_table(u'proposals_proposalfield', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('proposalset', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['proposals.ProposalSet'])),\n ('field_name', self.gf('django.db.models.fields.CharField')(max_length=100)),\n ))\n db.send_create_signal(u'proposals', ['ProposalField'])\n\n\n def backwards(self, orm):\n # Deleting model 'Category'\n db.delete_table(u'proposals_category')\n\n # Deleting model 'ProposalSet'\n db.delete_table(u'proposals_proposalset')\n\n # Deleting model 'Proposal'\n db.delete_table(u'proposals_proposal')\n\n # Removing M2M table for field merged_proposals on 'Proposal'\n db.delete_table(db.shorten_name(u'proposals_proposal_merged_proposals'))\n\n # Removing M2M table for field support_votes on 'Proposal'\n db.delete_table(db.shorten_name(u'proposals_proposal_support_votes'))\n\n # Removing M2M table for field votes on 'Proposal'\n db.delete_table(db.shorten_name(u'proposals_proposal_votes'))\n\n # Deleting model 'ProposalField'\n db.delete_table(u'proposals_proposalfield')\n\n\n models = {\n u'auth.group': {\n 'Meta': {'object_name': 'Group'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),\n 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'})\n },\n u'auth.permission': {\n 'Meta': {'ordering': \"(u'content_type__app_label', u'content_type__model', u'codename')\", 'unique_together': \"((u'content_type', u'codename'),)\", 'object_name': 'Permission'},\n 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['contenttypes.ContentType']\"}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})\n },\n u'auth.user': {\n 'Meta': {'object_name': 'User'},\n 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),\n 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),\n 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),\n 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),\n 'password': ('<PASSWORD>', [], {'max_length': '128'}),\n 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}),\n 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})\n },\n u'contenttypes.contenttype': {\n 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},\n 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n },\n u'debate.debate': {\n 'Meta': {'object_name': 'Debate'},\n 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['auth.User']\", 'null': 'True', 'blank': 'True'}),\n 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),\n 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),\n 'end_date': ('django.db.models.fields.DateField', [], {}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'space': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['spaces.Space']\", 'null': 'True', 'blank': 'True'}),\n 'start_date': ('django.db.models.fields.DateField', [], {}),\n 'theme': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),\n 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})\n },\n u'proposals.category': {\n 'Meta': {'object_name': 'Category'},\n 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['contenttypes.ContentType']\", 'null': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'object_pk': ('django.db.models.fields.TextField', [], {'null': 'True'})\n },\n u'proposals.proposal': {\n 'Meta': {'object_name': 'Proposal'},\n 'anon_allowed': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),\n 'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'proposal_authors'\", 'null': 'True', 'to': u\"orm['auth.User']\"}),\n 'budget': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'closed': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),\n 'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'proposal_closed_by'\", 'null': 'True', 'to': u\"orm['auth.User']\"}),\n 'code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),\n 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['contenttypes.ContentType']\", 'null': 'True', 'blank': 'True'}),\n 'description': ('django.db.models.fields.TextField', [], {'max_length': '300'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '17', 'decimal_places': '15', 'blank': 'True'}),\n 'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '17', 'decimal_places': '15', 'blank': 'True'}),\n 'merged': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),\n 'merged_proposals': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': \"'merged_proposals_rel_+'\", 'null': 'True', 'to': u\"orm['proposals.Proposal']\"}),\n 'mod_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'object_pk': ('django.db.models.fields.TextField', [], {'null': 'True'}),\n 'proposalset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'proposal_in'\", 'null': 'True', 'to': u\"orm['proposals.ProposalSet']\"}),\n 'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'refurbished': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),\n 'space': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['spaces.Space']\", 'null': 'True', 'blank': 'True'}),\n 'support_votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': \"'support_votes'\", 'null': 'True', 'symmetrical': 'False', 'to': u\"orm['auth.User']\"}),\n 'tags': ('apps.thirdparty.tagging.fields.TagField', [], {'max_length': '255', 'blank': 'True'}),\n 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),\n 'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': \"'voting_votes'\", 'null': 'True', 'symmetrical': 'False', 'to': u\"orm['auth.User']\"})\n },\n u'proposals.proposalfield': {\n 'Meta': {'object_name': 'ProposalField'},\n 'field_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'proposalset': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['proposals.ProposalSet']\"})\n },\n u'proposals.proposalset': {\n 'Meta': {'object_name': 'ProposalSet'},\n 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['auth.User']\", 'null': 'True', 'blank': 'True'}),\n 'debate': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['debate.Debate']\", 'null': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),\n 'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'space': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['spaces.Space']\", 'null': 'True', 'blank': 'True'})\n },\n u'spaces.space': {\n 'Meta': {'ordering': \"['name']\", 'object_name': 'Space'},\n 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['auth.User']\", 'null': 'True', 'blank': 'True'}),\n 'banner': ('core.spaces.fields.StdImageField', [], {'max_length': '100'}),\n 'description': ('django.db.models.fields.TextField', [], {'default': \"u'Write here your description.'\"}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'logo': ('core.spaces.fields.StdImageField', [], {'max_length': '100'}),\n 'mod_cal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'mod_debate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'mod_docs': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'mod_news': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'mod_proposals': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'mod_voting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),\n 'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})\n }\n }\n\n complete_apps = ['proposals']", "id": "9936601", "language": "Python", "matching_score": 7.2336297035217285, "max_stars_count": 40, "path": "src/apps/ecidadania/proposals/migrations/0001_initial.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nProposal data models are the ones to store the data inside the DB.\n\"\"\"\n\nimport datetime\nfrom django.core import urlresolvers\n\nfrom django import forms\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes import generic\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.sites.models import Site\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.conf import settings\n\nfrom apps.thirdparty.tagging.fields import TagField\nfrom apps.thirdparty.tagging.models import Tag\nfrom core.spaces.models import Space\nfrom apps.ecidadania.debate.models import Debate\n\nCLOSE_REASONS = (\n (1, _('Economically not viable')),\n (2, _('Legally not viable')),\n (3, _('Technically not viable')),\n (4, _('Offtopic'))\n)\n\nOPTIONAL_FIELDS = (\n ('tags', _('Tags')),\n ('latitude', _('Latitude')),\n ('longitude', _('Longitude'))\n)\n\n\nclass BaseProposalAbstractModel(models.Model):\n\n \"\"\"\n Integrated generic relation into the proposal module, which will allow\n the proposal module to be related to any other module in e-cidadania.\n\n .. versionadded:: 0.1.5b\n\n :automatically filled fields: contype_type, object_pk\n\n \"\"\"\n\n content_type = models.ForeignKey(ContentType, null=True, blank=True)\n object_pk = models.TextField(_('object ID'), null=True)\n content_object = generic.GenericForeignKey(ct_field=\"content_type\", fk_field=\"object_pk\")\n\n class Meta:\n abstract = True\n\n\nclass Category(BaseProposalAbstractModel):\n\n \"\"\"\n Dummy class for proposal categories. Inherits directly from\n :class:`BaseClass` without adding any fields.\n \"\"\"\n pass\n\n\nclass ProposalSet(models.Model):\n\n \"\"\"\n ProposalSet date model. This will contain a group of proposal\n which will be created after the debate using the debate note after it is\n finished.\n\n .. addedversion:: 0.1.5b\n\n :automatically filled fields: space, author, pub_date, debate\n :user filled fields: Name\n\n \"\"\"\n\n name = models.CharField(_('Name'), max_length=200, unique=True,\n help_text=_('Max: 200 characters'))\n # ptype = models.CharField(_('Ponderation'), choices=PONDERATIONS,\n # max_length=20, help_text=_('Ponderation types:<br><strong>Users: \\\n # </strong>Users give support votes to the proposal, and that votes \\\n # are added to the final voting.<br><strong>Fixed:</strong>Fixed \\\n # ponderations are stablished by the process managers. It\\'s a \\\n # porcentual puntuation. That means that percetange is calculated \\\n # after the voting and added to the final voting.<br><strong>None: \\\n # </strong> No ponderation is applied to the final voting.'))\n space = models.ForeignKey(Space, blank=True, null=True)\n pub_date = models.DateTimeField(auto_now_add=True)\n author = models.ForeignKey(User, blank=True, null=True)\n debate = models.ForeignKey(Debate, blank=True, null=True,\n help_text=_('Select the debate associated with this proposal set'))\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n verbose_name = _('Proposal set')\n verbose_name_plural = _('Proposal sets')\n get_latest_by = 'pub_date'\n permissions = (\n ('view_proposalset', 'Can view the ProposalSet'),\n ('admin_proposalset', 'Can administrate the ProposalSet'),\n ('mod_proposalset', 'Can moderate the ProposalSet'),\n )\n\n @models.permalink\n def get_absolute_url(self):\n return ('view-proposalset', (), {\n 'space_url': self.space.url,\n 'set_id': self.id\n })\n\n\nclass Proposal(BaseProposalAbstractModel):\n\n \"\"\"\n Proposal data model. This will store the user proposal in a similar\n way that Stackoverflow does. Take in mind that this data model is very\n exhaustive because it covers the administrator and the user.\n\n :automatically filled fields: Space, Author, Pub_date, mod_date.\n :user filled fields: Title, Description, Tags, Latitude, Longitude.\n :admin fields (manual): Code, Closed, Close_reason, Anon_allowed,\n Refurbished, Budget.\n :admin fields (auto): Closed_by\n :extra permissions: proposal_view\n\n :const:`CLOSE_REASONS` for :class:Proposal data model is hardcoded with four values, which will fit most of the requirements.\n \"\"\"\n code = models.CharField(_('Code'), max_length=50, blank=True,\n null=True)\n title = models.CharField(_('Title'), max_length=100, unique=True,\n help_text=_('Max: 200 characters'))\n proposalset = models.ForeignKey(ProposalSet, related_name='proposal_in',\n blank=True, null=True, help_text=_('Proposal set in which the \\\n proposal resides'))\n description = models.TextField(_('Description'), max_length=300)\n space = models.ForeignKey(Space, blank=True, null=True)\n author = models.ForeignKey(User, related_name='proposal_authors',\n blank=True, null=True, help_text=_('Change the user that will \\\n figure as the author'))\n tags = TagField(help_text=_('Insert here relevant words related with \\\n the proposal'))\n latitude = models.DecimalField(_('Latitude'), blank=True, null=True,\n max_digits=17, decimal_places=15, help_text=_('Specify it in decimal'))\n longitude = models.DecimalField(_('Longitude'), blank=True, null=True,\n max_digits=17, decimal_places=15, help_text=_('Specify it in decimal'))\n closed = models.NullBooleanField(default=False, blank=True)\n closed_by = models.ForeignKey(User, blank=True, null=True,\n related_name='proposal_closed_by')\n close_reason = models.SmallIntegerField(choices=CLOSE_REASONS, null=True,\n blank=True)\n merged = models.NullBooleanField(default=False, blank=True, null=True)\n merged_proposals = models.ManyToManyField('self', blank=True, null=True,\n help_text=_(\"Select proposals from the list\"))\n\n anon_allowed = models.NullBooleanField(default=False, blank=True)\n support_votes = models.ManyToManyField(User, null=True, blank=True,\n verbose_name=_('Support votes from'), related_name='support_votes')\n votes = models.ManyToManyField(User, verbose_name=_('Votes from'),\n null=True, blank=True, related_name='voting_votes')\n refurbished = models.NullBooleanField(default=False, blank=True)\n budget = models.IntegerField(blank=True, null=True)\n\n pub_date = models.DateTimeField(auto_now_add=True)\n mod_date = models.DateTimeField(auto_now_add=True)\n\n def __unicode__(self):\n return self.title\n\n def set_tags(self, tags):\n Tag.objects.update_tags(self, tags)\n\n def get_tags(self, tags):\n return Tag.objects.get_for_object(self)\n\n class Meta:\n verbose_name = _('Proposal')\n verbose_name_plural = _('Proposals')\n get_latest_by = 'pub_date'\n permissions = (\n ('view_proposal', 'Can view the Proposal'),\n ('admin_proposal', 'Can administrate the Proposal'),\n ('mod_proposal', 'Can moderate the Proposal'),\n )\n\n @models.permalink\n def get_absolute_url(self):\n return ('view-proposal', (), {\n 'space_url': self.space.url,\n 'prop_id': str(self.id)})\n\n\nclass ProposalField(models.Model):\n\n \"\"\"\n Proposal Fields data model. This will store details of addition form\n fields which can be optionally added the proposal form which is residing\n in a particular proposal set.\n\n user filled fields: proposalset, field_name\n const:`OPTIONAL_FIELD` for class:ProposalField is hardcoded with three\n field values, more fields can be added as need.\n\n \"\"\"\n\n proposalset = models.ForeignKey(ProposalSet, help_text=_('Customizing \\\n proposal form for a proposal set'), unique=False)\n field_name = models.CharField(max_length=100, choices=OPTIONAL_FIELDS, help_text=_('Additional field that needed to added to the proposal \\\n form'))\n\n def __unicode__(self):\n return self.field_name\n\n class Meta:\n verbose_name = _('ProposalField')\n verbose_name_plural = _('ProposalFields')\n", "id": "6005930", "language": "Python", "matching_score": 6.697860240936279, "max_stars_count": 40, "path": "src/apps/ecidadania/proposals/models.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import datetime\n\nfrom django.core.validators import RegexValidator\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.models import User\nfrom django.contrib.sites.models import Site\n\nfrom core.spaces.file_validation import ContentTypeRestrictedFileField\nfrom fields import StdImageField\nfrom allowed_types import ALLOWED_CONTENT_TYPES\n\n\nclass Space(models.Model):\n\n \"\"\"\n Spaces model. This model stores a \"space\" or \"place\" also known as a\n participative process in reality. Every place has a minimum set of\n settings for customization.\n\n There are three main permission roles in every space: administrator\n (admins), moderators (mods) and regular users (users).\n \"\"\"\n name = models.CharField(_('Name'), max_length=250, unique=True,\n help_text=_('Max: 250 characters'))\n url = models.CharField(_('URL'), max_length=100, unique=True,\n validators=[RegexValidator(regex='^[a-z0-9_]+$',\n message='Invalid characters in the space URL.')],\n help_text=_('Valid characters are lowercase, digits and \\\n underscore. This will be the accesible URL'))\n description = models.TextField(_('Description'),\n default=_('Write here your description.'))\n pub_date = models.DateTimeField(_('Date of creation'), auto_now_add=True)\n author = models.ForeignKey(User, blank=True, null=True,\n verbose_name=_('Space creator'), help_text=_('Select a user that \\\n will be marked as creator of the space'))\n logo = StdImageField(upload_to='spaces/logos', size=(100, 75, False),\n help_text = _('Valid extensions are jpg, jpeg, png and gif'))\n banner = StdImageField(upload_to='spaces/banners', size=(500, 75, False),\n help_text = _('Valid extensions are jpg, jpeg, png and gif'))\n public = models.BooleanField(_('Public space'), help_text=_(\"This will \\\n make the space visible to everyone, but registration will be \\\n necessary to participate.\"))\n\n# Modules\n mod_debate = models.BooleanField(_('Debate'))\n mod_proposals = models.BooleanField(_('Proposals'))\n mod_news = models.BooleanField(_('News'))\n mod_cal = models.BooleanField(_('Calendar'))\n mod_docs = models.BooleanField(_('Documents'))\n mod_voting = models.BooleanField(_('Voting'))\n\n class Meta:\n ordering = ['name']\n verbose_name = _('Space')\n verbose_name_plural = _('Spaces')\n get_latest_by = 'pub_date'\n permissions = (\n ('view_space', 'Can view this space.'),\n ('admin_space', 'Can administrate this space.'),\n ('mod_space', 'Can moderate this space.')\n )\n\n def __unicode__(self):\n return self.name\n\n @models.permalink\n def get_absolute_url(self):\n return ('space-index', (), {\n 'space_url': self.url})\n\n\nclass Entity(models.Model):\n\n \"\"\"\n This model stores the name of the entities responsible for the creation\n of the space or supporting it.\n \"\"\"\n name = models.CharField(_('Name'), max_length=100, unique=True)\n website = models.CharField(_('Website'), max_length=100, null=True,\n blank=True)\n logo = models.ImageField(upload_to='spaces/logos', verbose_name=_('Logo'),\n blank=True, null=True)\n space = models.ForeignKey(Space, blank=True, null=True)\n\n class Meta:\n ordering = ['name']\n verbose_name = _('Entity')\n verbose_name_plural = _('Entities')\n\n def __unicode__(self):\n return self.name\n\n\nclass Document(models.Model):\n\n \"\"\"\n This models stores documents for the space, like a document repository,\n There is no restriction in what a user can upload to the space.\n\n :methods: get_file_ext, get_file_size\n \"\"\"\n title = models.CharField(_('Document title'), max_length=100,\n help_text=_('Max: 100 characters'))\n space = models.ForeignKey(Space, blank=True, null=True,\n help_text=_('Change the space to whom belongs this document'))\n docfile = ContentTypeRestrictedFileField(_('File'),\n upload_to='spaces/documents/%Y/%m/%d',\n content_types=ALLOWED_CONTENT_TYPES,\n max_upload_size=26214400,\n help_text=_('Permitted file types: DOC, DOCX, PPT, ODT, ODF, ODP, \\\n PDF, RST, TXT.'))\n pub_date = models.DateTimeField(auto_now_add=True)\n author = models.ForeignKey(User, verbose_name=_('Author'), blank=True,\n null=True, help_text=_('Change the user that will figure as the \\\n author'))\n\n def get_file_ext(self):\n filename = self.docfile.name\n extension = filename.split('.')\n return extension[1].upper()\n\n def get_file_size(self):\n if self.docfile.size < 1023:\n return str(self.docfile.size) + \" Bytes\"\n elif self.docfile.size >= 1024 and self.docfile.size <= 1048575:\n return str(round(self.docfile.size / 1024.0, 2)) + \" KB\"\n elif self.docfile.size >= 1048576:\n return str(round(self.docfile.size / 1024000.0, 2)) + \" MB\"\n\n class Meta:\n ordering = ['pub_date']\n verbose_name = _('Document')\n verbose_name_plural = _('Documents')\n get_latest_by = 'pub_date'\n\n # There is no 'view-document' view, so I'll leave the get_absolute_url\n # method without permalink. Remember that the document files are accesed\n # through the url() method in templates.\n def get_absolute_url(self):\n return '/spaces/%s/docs/%s' % (self.space.url, self.id)\n\n\nclass Event(models.Model):\n\n \"\"\"\n Meeting data model. Every space (process) has N meetings. This will\n keep record of the assistants, meeting name, etc.\n \"\"\"\n title = models.CharField(_('Event name'), max_length=250,\n help_text=\"Max: 250 characters\")\n space = models.ForeignKey(Space, blank=True, null=True)\n user = models.ManyToManyField(User, verbose_name=_('Users'),\n help_text=_('List of the users that will assist or assisted to the \\\n event.'))\n pub_date = models.DateTimeField(auto_now_add=True)\n event_author = models.ForeignKey(User, verbose_name=_('Created by'),\n blank=True, null=True, related_name='meeting_author',\n help_text=_('Select the user that will be designated as author.'))\n event_date = models.DateTimeField(verbose_name=_('Event date'),\n help_text=_('Select the date where the event is celebrated.'))\n description = models.TextField(_('Description'), blank=True, null=True)\n location = models.TextField(_('Location'), blank=True, null=True)\n latitude = models.DecimalField(_('Latitude'), blank=True, null=True,\n max_digits=17, decimal_places=15, help_text=_('Specify it in decimal'))\n longitude = models.DecimalField(_('Longitude'), blank=True, null=True,\n max_digits=17, decimal_places=15, help_text=_('Specify it in decimal'))\n\n def is_due(self):\n if self.event_date < datetime.now():\n return True\n else:\n return False\n\n class Meta:\n ordering = ['event_date']\n verbose_name = _('Event')\n verbose_name_plural = _('Events')\n get_latest_by = 'event_date'\n permissions = (\n ('view_event', 'Can view this event'),\n ('admin_event', 'Can administrate this event'),\n ('mod_event', 'Can moderate this event'),\n )\n\n def __unicode__(self):\n return self.title\n\n @models.permalink\n def get_absolute_url(self):\n return ('view-event', (), {\n 'space_url': self.space.url,\n 'event_id': str(self.id)})\n\n\nclass Intent(models.Model):\n\n \"\"\"\n Intent data model. Intent stores the reference of a user-token when a user\n asks entering in a restricted space.\n\n .. versionadded: 0.1.5\n \"\"\"\n user = models.ForeignKey(User)\n space = models.ForeignKey(Space)\n token = models.CharField(max_length=32)\n requested_on = models.DateTimeField(auto_now_add=True)\n\n def get_approve_url(self):\n site = Site.objects.all()[0]\n return \"http://%s%sintent/approve/%s\" % (site.domain, self.space.get_absolute_url(), self.token)\n", "id": "8915868", "language": "Python", "matching_score": 5.04597282409668, "max_stars_count": 40, "path": "src/core/spaces/models.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file contains all the data models for the debate module.\n\"\"\"\nimport datetime\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.exceptions import ValidationError\n\nfrom apps.thirdparty.tagging.fields import TagField\nfrom apps.thirdparty.tagging.models import Tag\nfrom core.spaces.models import Space\n\n\nclass Debate(models.Model):\n\n \"\"\"\n Debate object. In every space there can be unlimited debates, each one of\n them holds all the related notes. Debates are filtered by space. Start/End\n dates are for letting users use the debate or not.\n\n .. versionadded:: 0.1b\n \"\"\"\n title = models.CharField(_('Title'), max_length=200, unique=True)\n description = models.TextField(_('Description'), blank=True, null=True)\n theme = models.CharField(_('Theme'), blank=True, null=True, max_length=100)\n\n space = models.ForeignKey(Space, blank=True, null=True)\n date = models.DateTimeField(_('Date created'), auto_now_add=True)\n date_mod = models.DateTimeField(_('Last update'), auto_now=True)\n author = models.ForeignKey(User, blank=True, null=True)\n start_date = models.DateField(_('Start date'))\n end_date = models.DateField(_('End date'))\n private = models.BooleanField(_('Private'), help_text=_('Set the debate as private so only the accepted users can participate in it.'))\n\n class Meta:\n permissions = (\n ('view_debate', 'Can view the debate'),\n ('admin_debate', 'Can administrate the debate'),\n ('mod_debate', 'Can moderate the debate'),\n )\n\n def __unicode__(self):\n return self.title\n\n def is_active(self):\n if datetime.date.today() >= self.end_date or datetime.date.today() <= self.start_date:\n return False\n else:\n return True\n\n @models.permalink\n def get_absolute_url(self):\n return ('view-debate', (), {\n 'space_url': self.space.url,\n 'debate_id': str(self.id)})\n\n def clean(self):\n if self.start_date > self.end_date:\n raise ValidationError('The start date can not be after the end date.')\n\n\nclass Column(models.Model):\n \"\"\"\n Debate column object. The debate table is done mixing columns and rows. The column\n object is linked to the debate, but with no preferable order.\n\n .. versionadded:: 0.1b\n \"\"\"\n criteria = models.CharField(_('Criteria'), max_length=100, blank=True, null=True)\n debate = models.ForeignKey(Debate, blank=True, null=True)\n\n def __unicode__(self):\n return self.criteria\n\n\nclass Row(models.Model):\n \"\"\"\n Row object for the debate system. The row object works exactly like the\n column. It's associated to the debate in no preferred order.\n\n .. versionadded:: 0.1b\n \"\"\"\n criteria = models.CharField(_('Criteria'), max_length=100, blank=True, null=True)\n debate = models.ForeignKey(Debate, blank=True, null=True)\n\n def __unicode__(self):\n return self.criteria\n\n\nclass Note(models.Model):\n\n \"\"\"\n The most important object in every debate, the message. It has a coordinates\n value to determine the position of the note in its debate.\n\n .. versionadded:: 0.1b\n \"\"\"\n column = models.ForeignKey(Column, null=True, blank=True)\n row = models.ForeignKey(Row, null=True, blank=True)\n debate = models.ForeignKey(Debate, null=True, blank=True)\n title = models.CharField(_('Title'), max_length=60, blank=True, null=True)\n message = models.TextField(_('Message'), max_length=100, null=True, blank=True)\n\n date = models.DateTimeField(_('Date created'), auto_now_add=True)\n author = models.ForeignKey(User, null=True, blank=True, related_name=\"note_author\")\n last_mod_author = models.ForeignKey(User, null=True, blank=True, related_name=\"update_author\")\n last_mod = models.DateTimeField(_('Last modification time'), auto_now=True)\n\n def __unicode__(self):\n return self.message\n\n class Meta:\n permissions = (\n ('move', 'Can move note'),\n )\n", "id": "10624316", "language": "Python", "matching_score": 5.512244701385498, "max_stars_count": 40, "path": "src/apps/ecidadania/debate/models.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\n\nfrom apps.thirdparty.tagging.fields import TagField\nfrom apps.thirdparty.tagging.models import Tag\nfrom core.spaces.models import Space\nfrom apps.ecidadania.proposals.models import *\n\n\nPONDERATIONS = (\n ('users', _('Users')),\n ('fixed', _('Fixed')),\n ('none', _('No ponderation'))\n)\n\n\nclass Poll(models.Model):\n\n \"\"\"\n Data model for Polls. It stores the question and some data like the space\n and dates. The most important field is \"participants\". It allows us to\n limit the times a user can vote in a Poll, using it with the vote field in\n Choices model.\n\n .. versionadded:: 0.1.5\n \"\"\"\n\n question = models.CharField(_('Question'), max_length=200,\n help_text=_('Max: 200 characters'))\n pub_date = models.DateTimeField(_('Date'), auto_now_add=True)\n poll_lastup = models.DateTimeField(_('Last update'), auto_now=True)\n author = models.ForeignKey(User, verbose_name=_('Author'), blank=True,\n null=True, help_text=_('Change the user that will figure as the \\\n author'), related_name='poll-author')\n participants = models.ManyToManyField(User, blank=True, null=True)\n space = models.ForeignKey(Space, verbose_name=_('Publish in'), blank=True,\n null=True, help_text=_('If you want to post to the index leave this \\\n blank'))\n poll_tags = TagField(help_text=_('Insert here relevant words related with \\\n the poll'))\n start_date = models.DateField(_('Start date'))\n end_date = models.DateField(_('End date'))\n\n class Meta:\n permissions = (\n ('view', 'Can view the poll'),\n )\n\n def __unicode__(self):\n return self.question\n\n def set_tags(self, tags):\n Tag.objects.update_tags(self, tags)\n\n def get_tags(self, tags):\n return Tag.objects.get_for_object(self)\n\n @models.permalink\n def get_absolute_url(self):\n if self.space is not None:\n return ('view-polls', (), {\n 'space_url': self.space.url,\n 'poll_id': str(self.id)})\n else:\n return ('view-polls', (), {\n 'poll_id': str(self.id)})\n\n def clean(self):\n if self.start_date > self.end_date:\n raise ValidationError('The start date can not be after the end date.')\n\n\nclass Choice(models.Model):\n poll = models.ForeignKey(Poll)\n choice_text = models.CharField(_('Choice'), max_length=200, blank=True, null=True, help_text=_('Enter choice to be voted upon'))\n # votes = models.IntegerField(blank=True, null=True, default='0')\n votes = models.ManyToManyField(User, blank=True, null=True)\n\n @models.permalink\n def get_absolute_url(self):\n if self.space is not None:\n return ('view-polls', (), {\n 'space_url': self.space.url,\n 'poll_id': str(self.id)})\n else:\n return ('view-polls', (), {\n 'poll_id': str(self.id)})\n\n\nclass Voting(models.Model):\n title = models.CharField(_('Title'), max_length=200, unique=True)\n description = models.TextField(_('Description'), blank=True, null=True)\n\n space = models.ForeignKey(Space, blank=True, null=True)\n date = models.DateTimeField(_('Date created'), auto_now_add=True)\n date_mod = models.DateTimeField(_('Last update'), auto_now=True)\n author = models.ForeignKey(User, blank=True, null=True)\n start_date = models.DateField(_('Start date'), blank=True, null=True)\n end_date = models.DateField(_('End date'), blank=True, null=True)\n ponderation = models.CharField(_('Ponderation'), max_length=3, null=True,\n blank=True, choices=PONDERATIONS)\n proposalsets = models.ManyToManyField(ProposalSet, blank=True, null=True)\n\n proposals = models.ManyToManyField(Proposal, blank=True, null=True, limit_choices_to={'proposalset__isnull': True})\n max_votes = models.IntegerField(_('Maximum votes per person'), blank=True,\n null=True)\n\n class Meta:\n permissions = (\n ('view', 'Can view the voting'),\n )\n\n @models.permalink\n def get_absolute_url(self):\n if self.space is not None:\n return ('view-votings', (), {\n 'space_url': self.space.url,\n 'voting_id': str(self.id)})\n else:\n return ('view-votings', (), {\n 'voting_id': str(self.id)})\n\n def clean(self):\n if self.start_date > self.end_date:\n raise ValidationError('The start date can not be after the end date.')\n\n\nclass ConfirmVote(models.Model):\n\n \"\"\"\n Intent data model. Intent stores the reference of a user-token when a user\n asks entering in a restricted space.\n\n .. versionadded: 0.1.5\n \"\"\"\n user = models.ForeignKey(User, blank=True, null=True)\n proposal = models.ForeignKey(Proposal, blank=True, null=True)\n token = models.CharField(max_length=32, blank=True, null=True)\n requested_on = models.DateTimeField(auto_now_add=True)\n\n def get_approve_url(self):\n site = Site.objects.all()[0]\n return \"http://%s%svote/approve/%s\" % (site.domain, self.proposal.get_absolute_url(), self.token)\n", "id": "6089083", "language": "Python", "matching_score": 6.523519515991211, "max_stars_count": 40, "path": "src/apps/ecidadania/voting/models.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.models import User\n\nfrom apps.thirdparty.tagging.fields import TagField\nfrom apps.thirdparty.tagging.models import Tag\nfrom core.spaces.models import Space\n\n\nclass Post(models.Model):\n\n \"\"\"\n Model of a news post.\n \"\"\"\n title = models.CharField(_('Title'), max_length=200,\n help_text=_('Max: 200 characters'))\n description = models.TextField(_('Description'))\n pub_date = models.DateTimeField(_('Date'), auto_now_add=True)\n post_lastup = models.DateTimeField(_('Last update'), auto_now=True)\n author = models.ForeignKey(User, verbose_name=_('Author'), blank=True,\n null=True, help_text=_('Change the user that will figure as the \\\n author'))\n pub_index = models.BooleanField(_('Publish in index'),\n help_text=_('This will publish the post in the main site page'))\n space = models.ForeignKey(Space, verbose_name=_('Publish in'),\n blank=True, null=True,\n help_text=_('If you want to post to the index leave this blank'))\n post_tags = TagField(help_text=_('Insert here relevant words related with the post'))\n views = models.IntegerField(_('Views'), blank=True, null=True)\n\n class Meta:\n ordering = ['title']\n verbose_name = _('Post')\n verbose_name_plural = _('Posts')\n get_latest_by = 'pub_date'\n\n def __unicode__(self):\n return self.title\n\n def comment_count(self):\n ct = ContentType.objects.get_for_model(Post)\n obj_pk = self.id\n return Comment.objects.filter(content_type=ct, object_pk=obj_pk).count()\n\n def set_tags(self, tags):\n Tag.objects.update_tags(self, tags)\n\n def get_tags(self, tags):\n return Tag.objects.get_for_object(self)\n\n @models.permalink\n def get_absolute_url(self):\n if self.space is not None:\n return ('view-post', (), {\n 'space_url': self.space.url,\n 'post_id': str(self.id)})\n else:\n return ('view-site-post', (), {\n 'post_id': str(self.id)})\n", "id": "7562266", "language": "Python", "matching_score": 6.564101696014404, "max_stars_count": 40, "path": "src/apps/ecidadania/news/models.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.conf import settings\nfrom django.contrib.auth.models import User, Group\n\n\nclass StaticPage(models.Model):\n\n \"\"\"\n Create basic static pages.\n \"\"\"\n name = models.CharField(_('Page Title'), max_length=100)\n uri = models.CharField(_('URL'), max_length=50)\n content = models.TextField(_('Content'))\n show_footer = models.BooleanField(_('Show in footer'))\n author = models.ForeignKey(User, blank=True, null=True, verbose_name=_('Author'))\n pub_date = models.DateTimeField(auto_now_add=True)\n last_update = models.DateTimeField(_('Last update'), auto_now=True)\n order = models.IntegerField(_('Order'))\n\n class Meta:\n ordering = ['name']\n verbose_name_plural = _('Static Pages')\n permissions = (\n ('view', 'Can view the page'),\n )\n\n def __unicode__(self):\n return self.name\n\n @models.permalink\n def get_absolute_url(self):\n return ('view-page', (), {\n 'slug': self.uri})\n", "id": "7129077", "language": "Python", "matching_score": 3.033956289291382, "max_stars_count": 40, "path": "src/apps/ecidadania/staticpages/models.py" }, { "content": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom prismriver.dashboard.settings import HOMESCREEN_PLUGIN_CHOICES\nfrom django.contrib.auth.models import User\n\n\nclass HomeScreen(models.Model):\n user = models.ForeignKey(User, unique=True)\n\n\nclass Plugin(models.Model):\n home_screen = models.ForeignKey(HomeScreen)\n position = models.IntegerField(verbose_name=_(\"Plugin position\"))\n class_name = models.CharField(max_length=128,\n choices=HOMESCREEN_PLUGIN_CHOICES,\n verbose_name=_(\"Plugin Classname\"))\n\n class Meta:\n verbose_name = _(\"Plugin\")\n verbose_name_plural = _(\"Plugins\")\n", "id": "10062903", "language": "Python", "matching_score": 3.019768714904785, "max_stars_count": 40, "path": "src/core/prismriver/dashboard/models.py" }, { "content": "from django.conf import settings\n\n# The app menu (this is used by the App menu site plugin:\n# Default: Joins Prismriver auth and sites in a menu named Users and settings\nif hasattr(settings, 'APP_MENU'):\n APP_MENU = settings.APP_MENU\nelse:\n APP_MENU = [\n {\"name\": \"Users and Settings\",\n \"items\": [\"auth\", \"prismriver\", \"sites\"],\n \"icon\": \"users.png\",\n \"big_icon\": \"users_big.png\",\n \"description\": \"Manage everything about the users here\"},\n ]\n\n# Plugin choices\nif hasattr(settings, 'HOMESCREEN_PLUGIN_CHOICES'):\n HOMESCREEN_PLUGIN_CHOICES = settings.HOMESCREEN_PLUGIN_CHOICES\nelse:\n HOMESCREEN_PLUGIN_CHOICES = (('prismriver.dashboard.plugins.dashplugins.AppList', 'App list'),\n )\n", "id": "9573897", "language": "Python", "matching_score": 0.9132272601127625, "max_stars_count": 40, "path": "src/core/prismriver/dashboard/settings.py" }, { "content": "from django.contrib import admin\nfrom prismriver.dashboard.models import HomeScreen, Plugin\n\n\nclass PluginInline(admin.TabularInline):\n extra = 0\n model = Plugin\n\n\nclass HomeScreenAdmin(admin.ModelAdmin):\n list_display = ('user',)\n inlines = (PluginInline,)\n\n\nadmin.site.register(HomeScreen, HomeScreenAdmin)\n", "id": "4301927", "language": "Python", "matching_score": 2.392831325531006, "max_stars_count": 40, "path": "src/core/prismriver/dashboard/admin.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.contrib import admin\n\nfrom guardian.admin import GuardedModelAdmin\n\nfrom apps.ecidadania.debate.models import Debate, Note, Column, Row\n\n\nclass ColumnInline(admin.TabularInline):\n\n \"\"\"\n This TabularInline form allows the user to add the debate Columns in the same\n form as the debate.\n \"\"\"\n model = Column\n extra = 2\n\n\nclass RowInline(admin.TabularInline):\n\n \"\"\"\n This TabularInline form allows the user to add the debate Rows in the same\n form as the debate.\n \"\"\"\n model = Row\n extra = 2\n\n\nclass DebateAdmin(GuardedModelAdmin):\n\n \"\"\"\n Administration for all the debates.\n \"\"\"\n list_display = ('title', 'date')\n inlines = [ColumnInline, RowInline]\n\n\nclass NoteAdmin(GuardedModelAdmin):\n\n \"\"\"\n Administration for all the notes in every debate.\n \"\"\"\n list_display = ('message', 'date', 'author')\n\nadmin.site.register(Debate, DebateAdmin)\nadmin.site.register(Note, NoteAdmin)\n", "id": "9736816", "language": "Python", "matching_score": 1.3398252725601196, "max_stars_count": 40, "path": "src/apps/ecidadania/debate/admin.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file contains all the URLs that e_cidadania will inherit when the user\naccess to '/spaces/'.\n\"\"\"\nfrom django.conf.urls import *\nfrom django.contrib.auth.decorators import permission_required\nfrom apps.ecidadania.debate.views import ListDebates, ViewDebate, DeleteDebate, edit_debate\nfrom apps.ecidadania.debate.url_names import *\n\nurlpatterns = patterns('apps.ecidadania.debate.views',\n\n url(r'^$', ListDebates.as_view(), name=DEBATE_LIST),\n\n url(r'^(?P<debate_id>\\d+)/', ViewDebate.as_view(), name=DEBATE_VIEW),\n\n url(r'^add/', 'add_new_debate', name=DEBATE_ADD),\n\n url(r'^update_position/', 'update_position', name=NOTE_UPDATE_POSITION),\n\n url(r'^update_note/', 'update_note', name=NOTE_UPDATE),\n\n url(r'^create_note/', 'create_note', name=NOTE_ADD),\n\n url(r'^delete_note/', 'delete_note', name=NOTE_DELETE),\n\n # Editing debates is not allowed at this time\n url(r'^edit/(?P<debate_id>\\d+)/', 'edit_debate', name=DEBATE_EDIT),\n\n url(r'^delete/(?P<debate_id>\\d+)', DeleteDebate.as_view(), name=DEBATE_DELETE),\n\n)\n", "id": "1457199", "language": "Python", "matching_score": 1.8702033758163452, "max_stars_count": 40, "path": "src/apps/ecidadania/debate/urls.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.conf.urls import *\nfrom django.conf import settings\n\nfrom apps.ecidadania.news.views import DeletePost, ViewPost, AddPost, EditPost\nfrom apps.ecidadania.news.url_names import *\n\n\nurlpatterns = patterns('apps.ecidadania.news.views',\n\n url(r'^add/$', AddPost.as_view(), name=POST_ADD),\n\n url(r'^(?P<post_id>\\d+)/delete/$', DeletePost.as_view(),\n name=POST_DELETE),\n\n url(r'^(?P<post_id>\\d+)/edit/$', EditPost.as_view(), name=POST_EDIT),\n\n url(r'^(?P<post_id>\\d+)', ViewPost.as_view(), name=POST_VIEW),\n\n)\n", "id": "11437672", "language": "Python", "matching_score": 1.9637219905853271, "max_stars_count": 40, "path": "src/apps/ecidadania/news/urls.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file contains all the URLs that e_cidadania will inherit when the user\naccess to '/spaces/'.\n\"\"\"\nfrom django.conf.urls import *\n\nfrom core.spaces.views.spaces import ViewSpaceIndex, ListSpaces, \\\n DeleteSpace\nfrom core.spaces.views.documents import ListDocs, DeleteDocument, \\\n AddDocument, EditDocument\nfrom core.spaces.views.events import ListEvents, DeleteEvent, ViewEvent, \\\n AddEvent, EditEvent\nfrom core.spaces.views.rss import SpaceFeed\nfrom core.spaces.views.intent import ValidateIntent\nfrom core.spaces.views.news import ListPosts, YearlyPosts, MonthlyPosts, \\\n RedirectArchive\nfrom core.spaces.url_names import *\n\n# NOTICE: Don't change the order of urlpatterns or it will probably break.\n\nurlpatterns = patterns('',\n\n # RSS Feed\n url(r'^(?P<space_url>\\w+)/rss/$', SpaceFeed(), name=SPACE_FEED),\n\n # News\n url(r'^(?P<space_url>\\w+)/news/',\n include('apps.ecidadania.news.urls')),\n\n # Proposals\n url(r'^(?P<space_url>\\w+)/proposal/',\n include('apps.ecidadania.proposals.urls')),\n\n # Calendar\n url(r'^(?P<space_url>\\w+)/calendar/',\n include('apps.ecidadania.cal.urls')),\n\n # Debates\n url(r'^(?P<space_url>\\w+)/debate/',\n include('apps.ecidadania.debate.urls')),\n\n # Votes\n url(r'^(?P<space_url>\\w+)/voting/',\n include('apps.ecidadania.voting.urls')),\n\n)\n\n# Document URLs\nurlpatterns += patterns('',\n\n url(r'^(?P<space_url>\\w+)/docs/add/$', AddDocument.as_view(),\n name=DOCUMENT_ADD),\n\n url(r'^(?P<space_url>\\w+)/docs/(?P<doc_id>\\d+)/edit/$',\n EditDocument.as_view(), name=DOCUMENT_EDIT),\n\n url(r'^(?P<space_url>\\w+)/docs/(?P<doc_id>\\d+)/delete/$',\n DeleteDocument.as_view(), name=DOCUMENT_DELETE),\n\n url(r'^(?P<space_url>\\w+)/docs/$', ListDocs.as_view(),\n name=DOCUMENT_LIST),\n\n)\n\n# Event URLs\nurlpatterns += patterns('',\n\n url(r'^(?P<space_url>\\w+)/event/add/$', AddEvent.as_view(),\n name=EVENT_ADD),\n\n url(r'^(?P<space_url>\\w+)/event/(?P<event_id>\\d+)/edit/$',\n EditEvent.as_view(), name=EVENT_EDIT),\n\n url(r'^(?P<space_url>\\w+)/event/(?P<event_id>\\d+)/delete/$',\n DeleteEvent.as_view(), name=EVENT_DELETE),\n\n url(r'^(?P<space_url>\\w+)/event/(?P<event_id>\\d+)/$',\n ViewEvent.as_view(), name=EVENT_VIEW),\n\n url(r'^(?P<space_url>\\w+)/event/$', ListEvents.as_view(),\n name=EVENT_LIST),\n\n)\n\n# Intent URLs\nurlpatterns += patterns('',\n\n url(r'^(?P<space_url>\\w+)/intent/$',\n 'core.spaces.views.intent.add_intent', name=INTENT_ADD),\n\n url(r'^(?P<space_url>\\w+)/intent/approve/(?P<token>\\w+)/$',\n ValidateIntent.as_view(), name=INTENT_VALIDATE),\n\n)\n\n# Spaces URLs\nurlpatterns += patterns('',\n\n url(r'^(?P<space_url>\\w+)/edit/',\n 'core.spaces.views.spaces.edit_space', name=SPACE_EDIT),\n\n url(r'^(?P<space_url>\\w+)/delete/', DeleteSpace.as_view(),\n name=SPACE_DELETE),\n\n url(r'^(?P<space_url>\\w+)/news/$', RedirectArchive.as_view(),\n name=SPACE_NEWS),\n\n url(r'^(?P<space_url>\\w+)/news/archive/$', ListPosts.as_view(),\n name=NEWS_ARCHIVE),\n\n url(r'^(?P<space_url>\\w+)/news/archive/(?P<year>\\d{4})/$',\n YearlyPosts.as_view(), name=NEWS_YEAR),\n\n url(r'^(?P<space_url>\\w+)/news/archive/(?P<year>\\d{4})/(?P<month>\\w+)/$',\n MonthlyPosts.as_view(), name=NEWS_MONTH),\n\n url(r'^add/$', 'core.spaces.views.spaces.create_space',\n name=SPACE_ADD),\n\n url(r'^$', ListSpaces.as_view(), name=SPACE_LIST),\n\n # url(_(r'^go/'), GoToSpace.as_view(), name=GOTO_SPACE),\n\n url(r'^(?P<space_url>\\w+)/roles/', 'core.spaces.views.spaces.edit_roles',\n name=EDIT_ROLES),\n\n url(r'^(?P<space_url>\\w+)/search_user/',\n 'core.spaces.views.spaces.search_user', name=SEARCH_USER),\n\n url(r'^(?P<space_url>\\w+)/$', ViewSpaceIndex.as_view(),\n name=SPACE_INDEX),\n\n)\n", "id": "7002282", "language": "Python", "matching_score": 6.795462131500244, "max_stars_count": 40, "path": "src/core/spaces/urls.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nModule to store space related url names.\n\"\"\"\n\n# Spaces\n\nSPACE_ADD = 'create-space'\n\nSPACE_EDIT = 'edit-space'\n\nSPACE_DELETE = 'delete-space'\n\nSPACE_INDEX = 'space-index'\n\nSPACE_FEED = 'space-feed'\n\nSPACE_LIST = 'list-spaces'\n\nGOTO_SPACE = 'goto-space'\n\nEDIT_ROLES = 'edit-roles'\n\nSEARCH_USER = 'search-user'\n\n# News\n# Notes: SPACE_NEWS is held only for backwards compatibility, it should be\n# removed when every reverse is cleaned\n\nSPACE_NEWS = 'list-space-news'\n\nNEWS_ARCHIVE = 'post-archive'\n\nNEWS_MONTH = 'post-archive-month'\n\nNEWS_YEAR = 'post-archive-year'\n\n# Documents\n\nDOCUMENT_ADD = 'add-document'\n\nDOCUMENT_EDIT = 'edit-document'\n\nDOCUMENT_DELETE = 'delete-document'\n\nDOCUMENT_LIST = 'list-documents'\n\n# Events\n\nEVENT_ADD = 'add-event'\n\nEVENT_EDIT = 'edit-event'\n\nEVENT_DELETE = 'delete-event'\n\nEVENT_LIST = 'list-events'\n\nEVENT_VIEW = 'view-event'\n\n# Intents\n\nINTENT_ADD = 'add-intent'\n\nINTENT_VALIDATE = 'validate-intent'\n", "id": "334900", "language": "Python", "matching_score": 1.6144089698791504, "max_stars_count": 40, "path": "src/core/spaces/url_names.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import UpdateView, DeleteView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic import FormView\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import PermissionDenied\nfrom guardian.shortcuts import assign_perm\n\nfrom core.spaces import url_names as urln\nfrom core.spaces.models import Space, Event\nfrom core.spaces.forms import SpaceForm, EventForm\nfrom helpers.cache import get_or_insert_object_in_cache\n\n\nclass AddEvent(FormView):\n\n \"\"\"\n Returns an empty MeetingForm to create a new Meeting. Space and author\n fields are automatically filled with the request data.\n\n :permissions required: admin_space, mod_space\n :rtype: HTML Form\n :context: form, get_place\n \"\"\"\n form_class = EventForm\n template_name = 'spaces/event_form.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(AddEvent, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return reverse(urln.SPACE_INDEX, kwargs={'space_url': space})\n\n def form_valid(self, form):\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n form_uncommited = form.save(commit=False)\n form_uncommited.event_author = self.request.user\n form_uncommited.space = self.space\n form_uncommited.save()\n form.save_m2m()\n\n return super(AddEvent, self).form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = super(AddEvent, self).get_context_data(**kwargs)\n place = get_object_or_404(Space, url=self.kwargs['space_url'])\n context['get_place'] = place\n return context\n\n\nclass ViewEvent(DetailView):\n\n \"\"\"\n View the content of a event.\n\n :permissions required: view_space\n :rtype: Object\n :context: event, get_place\n \"\"\"\n context_object_name = 'event'\n template_name = 'spaces/event_detail.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(ViewEvent, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n return get_object_or_404(Event, pk=self.kwargs['event_id'])\n\n def get_context_data(self, **kwargs):\n context = super(ViewEvent, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space,\n url=self.kwargs['space_url'])\n return context\n\n\nclass EditEvent(UpdateView):\n\n \"\"\"\n Returns a MeetingForm filled with the current Meeting data to be edited.\n\n :permissions required: admin_space, admin_event, mod_space, change_event\n :rtype: HTML Form\n :context: event, get_place\n \"\"\"\n model = Event\n template_name = 'spaces/event_form.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n event = get_object_or_404(Event, pk=kwargs['event_id'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(EditEvent, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n cur_event = get_object_or_404(Event, pk=self.kwargs['event_id'])\n return cur_event\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return reverse(urln.SPACE_INDEX, kwargs={'space_url': space})\n\n def form_valid(self, form):\n form_uncommited = form.save(commit=False)\n form_uncommited.save()\n form.save_m2m()\n\n return super(EditEvent, self).form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = super(EditEvent, self).get_context_data(**kwargs)\n space = get_object_or_404(Space, url=self.kwargs['space_url'])\n context['get_place'] = space\n return context\n\n\nclass DeleteEvent(DeleteView):\n\n \"\"\"\n Returns a confirmation page before deleting the Meeting object.\n\n :permissions required: admin_space, mod_space, admin_event, delete_event\n :rtype: Confirmation\n :context: get_place\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n event = get_object_or_404(Event, url=kwargs['event_id'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(DeleteEvent, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n return get_object_or_404(Event, pk=self.kwargs['event_id'])\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return reverse(urln.SPACE_INDEX, kwargs={'space_url': space})\n\n def get_context_data(self, **kwargs):\n context = super(DeleteEvent, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space,\n url=self.kwargs['space_url'])\n return context\n\n\nclass ListEvents(ListView):\n\n \"\"\"\n List all the events attached to a space.\n\n :permissions required: view_space\n :rtype: Object list\n :context: event_list, get_place\n \"\"\"\n paginate_by = 25\n context_object_name = 'event_list'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n if request.user.has_perm('view_space', space):\n return super(ListEvents, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_queryset(self):\n place = get_object_or_404(Space, url=self.kwargs['space_url'])\n objects = Event.objects.all().filter(space=place.id).order_by('event_date')\n return objects\n\n def get_context_data(self, **kwargs):\n context = super(ListEvents, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space,\n url=self.kwargs['space_url'])\n return context\n", "id": "10489429", "language": "Python", "matching_score": 5.02701473236084, "max_stars_count": 40, "path": "src/core/spaces/views/events.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import UpdateView, DeleteView\nfrom django.views.generic import FormView\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import PermissionDenied\n\nfrom core.spaces import url_names as urln\nfrom core.spaces.models import Space, Document\nfrom core.spaces.forms import SpaceForm, DocForm\n\n\nclass AddDocument(FormView):\n\n \"\"\"\n Upload a new document and attach it to the current space.\n\n :permissions required: admin_space, mod_space\n :rtype: Object\n :context: form, get_place\n \"\"\"\n form_class = DocForm\n template_name = 'spaces/document_form.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(AddDocument, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return reverse(urln.SPACE_INDEX, kwargs={'space_url': space})\n\n def form_valid(self, form):\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n form_uncommited = form.save(commit=False)\n form_uncommited.space = self.space\n form_uncommited.author = self.request.user\n form_uncommited.save()\n\n return super(AddDocument, self).form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = super(AddDocument, self).get_context_data(**kwargs)\n space = get_object_or_404(Space, url=self.kwargs['space_url'])\n context['get_place'] = space\n return context\n\n\nclass EditDocument(UpdateView):\n\n \"\"\"\n Returns a DocForm filled with the current document data.\n\n :permissions required: admin_space, mod_space\n :rtype: HTML Form\n :context: doc, get_place\n \"\"\"\n model = Document\n template_name = 'spaces/document_form.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(EditDocument, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return reverse(urln.SPACE_INDEX, kwargs={'space_url': space})\n\n def get_object(self):\n cur_doc = get_object_or_404(Document, pk=self.kwargs['doc_id'])\n return cur_doc\n\n def get_context_data(self, **kwargs):\n context = super(EditDocument, self).get_context_data(**kwargs)\n space = get_object_or_404(Space, url=self.kwargs['space_url'])\n context['get_place'] = space\n context['user_is_admin'] = (has_space_permission(self.request.user,\n space, allow=['admins', 'mods']) or has_all_permissions(\n self.request.user))\n return context\n\n\nclass DeleteDocument(DeleteView):\n\n \"\"\"\n Returns a confirmation page before deleting the current document.\n\n :permissions required: admin_space, mod_space\n :rtype: Confirmation\n :context: get_place\n \"\"\"\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n doc = get_object_or_404(Document, pk=kwargs['doc_id'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space')):\n return super(DeleteDocument, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n return get_object_or_404(Document, pk=self.kwargs['doc_id'])\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n # Now we delete the file for real. It's not the best place, but here\n # we know that our user gave confirmation.\n f = get_object_or_404(Document, pk=self.kwargs['doc_id'])\n f.delete()\n return reverse(urln.SPACE_INDEX, kwargs={'space_url': space})\n\n def get_context_data(self, **kwargs):\n context = super(DeleteDocument, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space,\n url=self.kwargs['space_url'])\n return context\n\n\nclass ListDocs(ListView):\n\n \"\"\"\n Returns a list of documents attached to the current space.\n\n :rtype: Object list\n :context: object_list, get_place\n \"\"\"\n paginate_by = 25\n context_object_name = 'document_list'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(ListDocs, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_queryset(self):\n place = get_object_or_404(Space, url=self.kwargs['space_url'])\n objects = Document.objects.all().filter(space=place.id) \\\n .order_by('pub_date')\n return objects\n\n def get_context_data(self, **kwargs):\n context = super(ListDocs, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space,\n url=self.kwargs['space_url'])\n return context\n", "id": "1066698", "language": "Python", "matching_score": 3.178311824798584, "max_stars_count": 40, "path": "src/core/spaces/views/documents.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThese are the views that control the debates.\n\"\"\"\n\nimport json\nimport datetime\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views.generic.detail import DetailView\nfrom django.views.decorators.http import require_http_methods\nfrom django.contrib import messages\nfrom django.contrib.comments import *\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.comments.forms import CommentForm\nfrom django.core.exceptions import ObjectDoesNotExist, PermissionDenied\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, Http404\nfrom django.shortcuts import render_to_response, get_object_or_404, redirect\nfrom django.template import RequestContext\nfrom django.forms.formsets import formset_factory, BaseFormSet\nfrom django.db import connection\nfrom django.forms.models import modelformset_factory, inlineformset_factory\n\nfrom guardian.shortcuts import assign_perm\n\nfrom apps.ecidadania.debate import url_names as urln\nfrom apps.ecidadania.debate.models import Debate, Note, Row, Column\nfrom apps.ecidadania.debate.forms import DebateForm, UpdateNoteForm, \\\n NoteForm, RowForm, ColumnForm, UpdateNotePosition\nfrom core.spaces.models import Space\nfrom helpers.cache import get_or_insert_object_in_cache\n\n\ndef add_new_debate(request, space_url):\n\n \"\"\"\n Create a new debate. This function returns two forms to create\n a complete debate, debate form and phases formset.\n\n .. versionadded:: 0.1.5\n\n :attributes: debate_form, row_formset, column_formset\n :context: form, rowform, colform, get_place, debateid\n \"\"\"\n place = get_object_or_404(Space, url=space_url)\n\n if (request.user.has_perm('admin_space', place) or\n request.user.has_perm('mod_space', place)):\n\n RowFormSet = inlineformset_factory(Debate, Row, extra=1)\n ColumnFormSet = inlineformset_factory(Debate, Column, extra=1)\n\n debate_form = DebateForm(request.POST or None)\n row_formset = RowFormSet(request.POST or None, prefix=\"rowform\")\n column_formset = ColumnFormSet(request.POST or None, prefix=\"colform\")\n\n # Get the last PK and add 1 to get the current PK\n try:\n last_debate_id = Debate.objects.latest('id')\n current_debate_id = last_debate_id.pk + 1\n except ObjectDoesNotExist:\n current_debate_id = 1\n\n if request.method == 'POST':\n if (debate_form.is_valid() and row_formset.is_valid() and\n column_formset.is_valid()):\n\n debate_form_uncommited = debate_form.save(commit=False)\n debate_form_uncommited.space = place\n debate_form_uncommited.author = request.user\n\n saved_debate = debate_form_uncommited.save()\n debate_instance = get_object_or_404(Debate, pk=current_debate_id)\n\n row = row_formset.save(commit=False)\n for form in row:\n form.debate = debate_instance\n form.save()\n\n column = column_formset.save(commit=False)\n for form in column:\n form.debate = debate_instance\n form.save()\n\n # Assign the permissions to the creator of the debate and the\n # space administrators\n assign_perm('view_debate', request.user, debate_instance)\n assign_perm('admin_debate', request.user, debate_instance)\n assign_perm('change_debate', request.user, debate_instance)\n assign_perm('delete_debate', request.user, debate_instance)\n\n return HttpResponseRedirect(reverse(urln.DEBATE_VIEW,\n kwargs={'space_url': space_url,\n 'debate_id': str(debate_form_uncommited.id)}))\n\n return render_to_response('debate/debate_add.html',\n {'form': debate_form,\n 'rowform': row_formset,\n 'colform': column_formset,\n 'get_place': place,\n 'debateid': current_debate_id},\n context_instance=RequestContext(request))\n else:\n raise PermissionDenied\n\n\ndef edit_debate(request, space_url, debate_id):\n\n \"\"\"\n \"\"\"\n pk = debate_id\n place = get_object_or_404(Space, url=space_url)\n instance = Debate.objects.get(pk=debate_id)\n\n if (request.user.has_perm('admin_space', place) or\n request.user.has_perm('admin_debate', instance) or\n request.user == instance.author):\n\n RowFormSet = inlineformset_factory(Debate, Row, extra=1)\n ColumnFormSet = inlineformset_factory(Debate, Column, extra=1)\n\n debate_form = DebateForm(request.POST or None, instance=instance)\n row_formset = RowFormSet(request.POST or None, instance=instance,\n prefix=\"rowform\")\n column_formset = ColumnFormSet(request.POST or None, instance=instance,\n prefix=\"colform\")\n\n if request.method == 'POST':\n if debate_form.is_valid() and row_formset.is_valid() \\\n and column_formset.is_valid():\n debate_form_uncommited = debate_form.save(commit=False)\n debate_form_uncommited.space = place\n debate_form_uncommited.author = request.user\n\n saved_debate = debate_form_uncommited.save()\n debate_instance = get_object_or_404(Debate,\n pk=debate_id)\n\n row = row_formset.save(commit=False)\n\n for form in row:\n form.debate = instance\n form.save()\n\n column = column_formset.save(commit=False)\n for form in column:\n form.debate = instance\n form.save()\n\n return HttpResponseRedirect(reverse(urln.DEBATE_VIEW,\n kwargs={'space_url': space_url,\n 'debate_id': str(debate_form_uncommited.id)}))\n\n return render_to_response('debate/debate_add.html',\n {'form': debate_form,\n 'rowform': row_formset,\n 'colform': column_formset,\n 'get_place': place,\n 'debateid': debate_id},\n context_instance=RequestContext(request))\n else:\n raise PermissionDenied\n\n\n# def get_debates(request):\n\n# \"\"\"\n# Get all debates and serve them through JSON.\n# \"\"\"\n# data = [debate.title for debate in Debate.objects.order_by('title')]\n# return render_to_response(json.dumps(data), content_type='application/json')\n\n\ndef create_note(request, space_url):\n\n \"\"\"\n This function creates a new note inside the debate board. It receives the\n order from the createNote() AJAX function. To create the note first we\n create the note in the DB, and if successful we return some of its\n parameters to the debate board for the user. In case the petition had\n errors, we return the error message that will be shown by jsnotify.\n\n .. versionadded:: 0.1.5\n \"\"\"\n note_form = NoteForm(request.POST or None)\n place = get_object_or_404(Space, url=space_url)\n\n if request.method == \"POST\" and request.is_ajax():\n debate = get_object_or_404(Debate, pk=request.POST['debateid'])\n\n # This is not the best approach, but I don't want to think in\n # another solution right now, we need this and we need it now\n if ((debate.private and request.user.has_perm('view_debate', debate)) or\n (not debate.private and request.user.has_perm('view_space', place))):\n\n if note_form.is_valid():\n note_form_uncommited = note_form.save(commit=False)\n note_form_uncommited.author = request.user\n note_form_uncommited.debate = get_object_or_404(Debate,\n pk=request.POST['debateid'])\n note_form_uncommited.title = request.POST['title']\n note_form_uncommited.message = request.POST['message']\n note_form_uncommited.column = get_object_or_404(Column,\n pk=request.POST['column'])\n note_form_uncommited.row = get_object_or_404(Row,\n pk=request.POST['row'])\n note_form_uncommited.save()\n\n response_data = {}\n response_data['id'] = note_form_uncommited.id\n response_data['message'] = note_form_uncommited.message\n response_data['title'] = note_form_uncommited.title\n msg = \"The note has been created.\"\n return HttpResponse(json.dumps(response_data),\n mimetype=\"application/json\")\n else:\n return HttpResponseBadRequest(_(\"The note form didn't validate. This fields gave errors: \") + str(note_form.errors))\n else:\n raise PermissionDenied\n else:\n return HttpResponseBadRequest(_(\"The petition was not POST.\"))\n\n\ndef update_note(request, space_url):\n\n \"\"\"\n Updated the current note with the POST data. UpdateNoteForm is an incomplete\n form that doesn't handle some properties, only the important for the note\n editing.\n \"\"\"\n\n # Shit double validation here due to the fact that we can't get the note ID\n # until the JS code sends us the GET or POST signals\n place = get_object_or_404(Space, url=space_url)\n\n if request.method == \"GET\" and request.is_ajax():\n note = get_object_or_404(Note, pk=request.GET['noteid'])\n debate = get_object_or_404(Debate, pk=note.debate.id)\n\n if (request.user.has_perm('admin_space', place) or\n request.user.has_perm('mod_space', place) or\n request.user.has_perm('admin_debate', debate) or\n request.user.has_perm('mod_debate', debate) or\n request.user == note.author):\n\n ctype = ContentType.objects.get_for_model(Note)\n latest_comments = Comment.objects.filter(is_public=True,\n is_removed=False, content_type=ctype, object_pk=note.id) \\\n .order_by('-submit_date')[:5]\n form = CommentForm(target_object=note)\n\n response_data = {}\n response_data['title'] = note.title\n response_data['message'] = note.message\n response_data['author'] = {'name': note.author.username}\n response_data['comments'] = [{'username': c.user.username,\n 'comment': c.comment,\n 'submit_date': c.submit_date} for c in latest_comments]\n response_data[\"form_html\"] = form.as_p()\n\n return HttpResponse(json.dumps(response_data, cls=DjangoJSONEncoder),\n mimetype=\"application/json\")\n else:\n raise PermissionDenied\n\n elif request.method == \"POST\" and request.is_ajax():\n note = get_object_or_404(Note, pk=request.POST['noteid'])\n debate = get_object_or_404(Debate, pk=note.debate.id)\n\n if (request.user.has_perm('admin_space', place) or\n request.user.has_perm('mod_space', place) or\n request.user.has_perm('admin_debate', debate) or\n request.user.has_perm('mod_debate', debate) or\n request.user == note.author):\n\n note_form = UpdateNoteForm(request.POST or None, instance=note)\n if note_form.is_valid():\n note_form_uncommited = note_form.save(commit=False)\n note_form_uncommited.title = request.POST['title']\n note_form_uncommited.message = request.POST['message']\n note_form_uncommited.last_mod_author = request.user\n\n note_form_uncommited.save()\n\n return HttpResponse(_(\"Note saved\"))\n else:\n return HttpResponseBadRequest(_(\"The form is not valid, check field(s): \") + note_form.errors)\n else:\n raise PermissionDenied\n else:\n return HttpResponseBadRequest(_(\"Bad request\"))\n\n\ndef update_position(request, space_url):\n\n \"\"\"\n This view saves the new note position in the debate board. Instead of\n reloading all the note form with all the data, we use the partial form\n \"UpdateNotePosition\" which only handles the column and row of the note.\n \"\"\"\n place = get_object_or_404(Space, url=space_url)\n\n if request.method == \"POST\" and request.is_ajax():\n note = get_object_or_404(Note, pk=request.POST['noteid'])\n debate = get_object_or_404(Debate, pk=note.debate.id)\n position_form = UpdateNotePosition(request.POST or None, instance=note)\n\n if (request.user.has_perm('admin_space', place) or\n request.user.has_perm('mod_space', place) or\n request.user.has_perm('admin_debate', debate) or\n request.user.has_perm('mod_debate', debate) or\n request.user == note.author):\n\n if position_form.is_valid():\n position_form_uncommited = position_form.save(commit=False)\n position_form_uncommited.column = get_object_or_404(Column,\n pk=request.POST['column'])\n position_form_uncommited.row = get_object_or_404(Row,\n pk=request.POST['row'])\n position_form_uncommited.save()\n\n return HttpResponse(_(\"Note updated\"))\n else:\n return HttpResponseBadRequest(_(\"There has been an error validating the form.\"))\n else:\n raise PermissionDenied\n else:\n return HttpResponseBadRequest(_(\"The petition was not POST.\"))\n\n\ndef delete_note(request, space_url):\n\n \"\"\"\n Deletes a note object.\n \"\"\"\n note = get_object_or_404(Note, pk=request.POST['noteid'])\n place = get_object_or_404(Space, url=space_url)\n\n if (request.user.has_perm('admin_space', place) or\n request.user.has_perm('mod_space', place) or\n request.user.has_perm('admin_debate', debate) or\n request.user.has_perm('mod_debate', debate) or\n request.user == note.author):\n\n ctype = ContentType.objects.get_for_model(Note)\n all_comments = Comment.objects.filter(is_public=True,\n is_removed=False, content_type=ctype,\n object_pk=note.id).all()\n for i in range(len(all_comments)):\n all_comments[i].delete()\n note.delete()\n return HttpResponse(\"The note has been deleted.\")\n\n else:\n raise PermissionDenied\n\n\nclass ViewDebate(DetailView):\n \"\"\"\n View a debate.\n\n :context: get_place, notes, columns, rows\n \"\"\"\n context_object_name = 'debate'\n template_name = 'debate/debate_view.html'\n\n def dispatch(self, request, *args, **kwargs):\n debate = get_object_or_404(Debate, pk=kwargs['debate_id'])\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if debate.private:\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space) or\n request.user.has_perm('view_debate', debate)):\n return super(ViewDebate, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n else:\n if request.user.has_perm('view_space', space):\n return super(ViewDebate, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n key = self.kwargs['debate_id']\n debate = get_or_insert_object_in_cache(Debate, key, pk=key)\n\n # Check debate dates\n if datetime.date.today() >= debate.end_date:\n self.template_name = 'debate/debate_expired_view.html'\n return debate\n elif datetime.date.today() < debate.start_date:\n self.template_name = 'debate/debate_outdated.html'\n return debate\n # We can't return none, if we do, the platform cannot show\n # the start and end dates and the title\n # return Debate.objects.none()\n\n return debate\n\n def get_context_data(self, **kwargs):\n context = super(ViewDebate, self).get_context_data(**kwargs)\n columns = Column.objects.filter(debate=self.kwargs['debate_id'])\n rows = Row.objects.filter(debate=self.kwargs['debate_id'])\n space_key = self.kwargs['space_url']\n current_space = get_or_insert_object_in_cache(Space, space_key,\n url=space_key)\n debate_key = self.kwargs['debate_id']\n current_debate = get_or_insert_object_in_cache(Debate, debate_key,\n pk=debate_key)\n notes = Note.objects.filter(debate=current_debate.pk)\n try:\n last_note = Note.objects.latest('id')\n except:\n last_note = 0\n\n context['get_place'] = current_space\n context['notes'] = notes\n context['columns'] = columns\n context['rows'] = rows\n if last_note == 0:\n context['lastnote'] = 0\n else:\n context['lastnote'] = last_note.pk\n\n return context\n\n\nclass ListDebates(ListView):\n \"\"\"\n Return a list of debates for the current space.\n\n :context: get_place\n \"\"\"\n paginate_by = 10\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(ListDebates, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_queryset(self):\n key = self.kwargs['space_url']\n current_space = get_or_insert_object_in_cache(Space, key, url=key)\n debates = Debate.objects.filter(space=current_space)\n return debates\n\n def get_context_data(self, **kwargs):\n context = super(ListDebates, self).get_context_data(**kwargs)\n key = self.kwargs['space_url']\n space = get_or_insert_object_in_cache(Space, key, url=key)\n context['get_place'] = space\n return context\n\n\nclass DeleteDebate(DeleteView):\n\n \"\"\"\n Delete an existent debate. Debate deletion is only reserved to spaces\n administrators or site admins.\n \"\"\"\n context_object_name = \"get_place\"\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n debate = get_object_or_404(Debate, pk=kwargs['debate_id'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space) or\n request.user.has_perm('admin_debate', debate) or\n request.user == debate.author):\n return super(DeleteDebate, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return '/spaces/%s' % (space)\n\n def get_object(self):\n space = get_object_or_404(Space, url=self.kwargs['space_url'])\n return get_object_or_404(Debate, pk=self.kwargs['debate_id'])\n\n def get_context_data(self, **kwargs):\n\n \"\"\"\n Get extra context data for ViewDebate view.\n \"\"\"\n context = super(DeleteDebate, self).get_context_data(**kwargs)\n space = get_object_or_404(Space, url=self.kwargs['space_url'])\n context['get_place'] = space\n return context\n", "id": "2947588", "language": "Python", "matching_score": 6.766282558441162, "max_stars_count": 40, "path": "src/apps/ecidadania/debate/views.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import UpdateView, DeleteView\nfrom django.views.generic.detail import DetailView\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.contrib import messages\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.comments.models import Comment\nfrom django.db.models import Count\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponseRedirect, HttpResponse, \\\n HttpResponseNotFound, HttpResponseBadRequest, HttpResponseServerError\nfrom django.contrib.auth.models import User\n\nfrom helpers.cache import get_or_insert_object_in_cache\nfrom operator import itemgetter\nfrom guardian.shortcuts import assign_perm, get_users_with_perms, remove_perm, get_perms\nfrom guardian.core import ObjectPermissionChecker\n\nfrom core.spaces import url_names as urln\nfrom core.spaces.models import Space, Entity, Document, Event\nfrom core.spaces.forms import SpaceForm, EntityFormSet, RoleForm\nfrom apps.ecidadania.news.models import Post\nfrom apps.ecidadania.proposals.models import Proposal, ProposalSet\nfrom apps.ecidadania.staticpages.models import StaticPage\nfrom apps.ecidadania.debate.models import Debate\nfrom apps.ecidadania.voting.models import Poll, Voting\nfrom e_cidadania.settings import DEBUG\n\n\n# Please take in mind that the create_space view can't be replaced by a CBV\n# (class-based view) since it manipulates two forms at the same time. Apparently\n# that creates some trouble in the django API. See this ticket:\n# https://code.djangoproject.com/ticket/16256\n@login_required\ndef create_space(request):\n\n \"\"\"\n Returns a SpaceForm form to fill with data to create a new space. There\n is an attached EntityFormset to save the entities related to the space.\n Every user in the platform is allowed to create spaces. Once it's created\n we assign the administration permissions to the user, along with some\n others for the sake of functionality.\n\n .. note:: Since everyone can have the ability to create spaces, instead\n of checking for the add_space permission we just ask for login.\n\n :attributes: - space_form: empty SpaceForm instance\n - entity_forms: empty EntityFormSet\n :permissions required: login_required\n :rtype: Space object, multiple entity objects.\n :context: form, entityformset\n \"\"\"\n space_form = SpaceForm(request.POST or None, request.FILES or None)\n entity_forms = EntityFormSet(request.POST or None, request.FILES or None,\n queryset=Entity.objects.none())\n\n if request.method == 'POST':\n if space_form.is_valid() and entity_forms.is_valid():\n space_form_uncommited = space_form.save(commit=False)\n space_form_uncommited.author = request.user\n\n new_space = space_form_uncommited.save()\n space = get_object_or_404(Space, name=space_form_uncommited.name)\n\n ef_uncommited = entity_forms.save(commit=False)\n for ef in ef_uncommited:\n ef.space = space\n ef.save()\n\n # We add the created spaces to the user allowed spaces\n # space.admins.add(request.user)\n space_form.save_m2m()\n\n # Assign permissions to the user so he can chenge everything in the\n # space\n assign_perm('view_space', request.user, space)\n assign_perm('change_space', request.user, space)\n assign_perm('delete_space', request.user, space)\n assign_perm('admin_space', request.user, space)\n\n if DEBUG:\n # This will tell us if the user got the right permissions for\n # the object\n un = request.user.username\n u = ObjectPermissionChecker(request.user) # Avoid unnecesary queries for the permission checks\n print \"\"\"Space permissions for user '%s':\n View: %s\n Change: %s\n Delete: %s\n Admin: %s\n Mod: %s\n \"\"\" % (un, u.has_perm('view_space', space),\n u.has_perm('change_space', space),\n u.has_perm('delete_space', space),\n u.has_perm('admin_space', space),\n u.has_perm('mod_space', space))\n\n return HttpResponseRedirect(reverse(urln.SPACE_INDEX,\n kwargs={'space_url': space.url}))\n\n return render_to_response('spaces/space_form.html',\n {'form': space_form,\n 'entityformset': entity_forms},\n context_instance=RequestContext(request))\n\n\nclass ViewSpaceIndex(DetailView):\n\n \"\"\"\n Returns the index page for a space. The access to spaces is restricted and\n filtered in the dispatch method. This view gathers information from all\n the configured modules in the space and also makes some calculations to\n gather most commented posts, most interesting content, etc.\n\n\n :attributes: - space_object/space/place: current space instance\n :permissions required: space.view_space\n :rtype: Object\n :context: get_place, entities, documents, proposals, proposalsets,\n publication, mostviewed, mostcommented, mostcommentedproposal,\n page, messages, debates, events, votings, polls, participants.\n \"\"\"\n context_object_name = 'get_place'\n template_name = 'spaces/space_index.html'\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"\n We get the current space and user, first we check if the space is\n public, if so, we check if the user is anonymous and leave a message,\n after that we return the view. If the space is not public we check\n for the view permission of the object, if the user doesn't have it we\n return a 403. Since dispatch is run before anything, this checks are\n made before obtaining the object. If the user doesn't have the\n permission we return a 403 code, which is handled by\n django-guardian and returns a template.\n\n .. note:: Take in mind that the dispatch method takes **request** as a\n parameter.\n \"\"\"\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if space.public:\n if request.user.is_anonymous():\n messages.info(self.request, _(\"Hello anonymous user. Remember \\\n that this space is public to view, but you must \\\n <a href=\\\"/accounts/register\\\">register</a> or \\\n <a href=\\\"/accounts/login\\\">login</a> to participate.\"))\n\n return super(ViewSpaceIndex, self).dispatch(request, *args, **kwargs)\n\n if request.user.has_perm('view_space', space):\n return super(ViewSpaceIndex, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n # Makes sure the space ins't already in the cache before hitting\n # the database\n space_url = self.kwargs['space_url']\n space_object = get_or_insert_object_in_cache(Space, space_url, url=space_url)\n return space_object\n\n # Get extra context data\n def get_context_data(self, **kwargs):\n context = super(ViewSpaceIndex, self).get_context_data(**kwargs)\n # Makes sure the space ins't already in the cache before hitting the\n # databass\n place_url = self.kwargs['space_url']\n place = get_or_insert_object_in_cache(Space, place_url, url=place_url)\n '''posts_by_score = Comment.objects.filter(is_public=True) \\\n .values('object_pk').annotate(score=Count('id')).order_by('-score')'''\n posts_by_score = Comment.objects.filter(is_public=True) \\\n .values('object_pk').annotate(score=Count('id')).order_by('-score')\n post_ids = [int(obj['object_pk']) for obj in posts_by_score]\n top_posts = Post.objects.filter(space=place.id).in_bulk(post_ids)\n # print top_posts.values()[0].title\n # o_list = Comment.objects.annotate(ocount=Count('object_pk'))\n comment_list = {}\n most_commented = []\n for proposal in Proposal.objects.filter(space=place.id):\n comment_list[proposal.pk] = Comment.objects.filter(object_pk=proposal.pk).count()\n for p in dict(sorted(comment_list.items(), key=itemgetter(1))):\n most_commented.append(Proposal.objects.filter(pk=p))\n\n highlighted = {}\n highlight = []\n for i in Proposal.objects.filter(space=place.id):\n highlighted[i.pk] = i.support_votes.count\n for p in dict(sorted(highlighted.items(), key=itemgetter(1))):\n highlight.append(Proposal.objects.filter(pk=p))\n\n context['entities'] = Entity.objects.filter(space=place.id)\n context['documents'] = Document.objects.filter(space=place.id)\n context['proposalsets'] = ProposalSet.objects.filter(space=place.id)\n context['proposals'] = Proposal.objects.filter(space=place.id) \\\n .order_by('-pub_date')\n context['publication'] = Post.objects.filter(space=place.id) \\\n .order_by('-pub_date')[:5]\n context['mostviewed'] = Post.objects.filter(space=place.id) \\\n .order_by('-views')[:5]\n # context['mostcommented'] = [top_posts.get(id,None) for id in post_ids]\n context['mostcommented'] = filter(None, map(lambda x: top_posts.get(x, None), post_ids))\n context['mostcommentedproposal'] = most_commented\n context['highlightedproposal'] = highlight\n\n # context['mostcommented'] = sorted(o_list,\n # key=lambda k: k['ocount'])[:10]\n # print sorted(o_list, key=lambda k: k['ocount'])[:10]\n context['page'] = StaticPage.objects.filter(show_footer=True) \\\n .order_by('-order')\n context['messages'] = messages.get_messages(self.request)\n context['debates'] = Debate.objects.filter(space=place.id) \\\n .order_by('-date')\n context['event'] = Event.objects.filter(space=place.id) \\\n .order_by('-event_date')\n context['votings'] = Voting.objects.filter(space=place.id)\n context['polls'] = Poll.objects.filter(space=place.id)\n context['participants'] = get_users_with_perms(place)\n return context\n\n\n# Please take in mind that the change_space view can't be replaced by a CBV\n# (class-based view) since it manipulates two forms at the same time. Apparently\n# that creates some trouble in the django API. See this ticket:\n# https://code.djangoproject.com/ticket/16256\ndef edit_space(request, space_url):\n\n \"\"\"\n Returns a form filled with the current space data to edit. Access to\n this view is restricted only to site and space administrators. The filter\n for space administrators is given by the change_space and admin_space\n permission and their belonging to that space.\n\n :attributes: - place: current space intance.\n - form: SpaceForm instance.\n - form_uncommited: form instance before commiting to\n the DB, so we can modify the data.\n :permissions required: spaces.change_space, spaces.admin_space\n :param space_url: Space URL\n :rtype: HTML Form\n :context: form, get_place, entityformset\n \"\"\"\n place = get_object_or_404(Space, url=space_url)\n\n if (request.user.has_perm('change_space', place) and\n request.user.has_perm('admin_space', place)):\n form = SpaceForm(request.POST or None, request.FILES or None,\n instance=place)\n entity_forms = EntityFormSet(request.POST or None, request.FILES\n or None, queryset=Entity.objects.all().filter(space=place))\n\n if request.method == 'POST':\n if form.is_valid() and entity_forms.is_valid():\n form_uncommited = form.save(commit=False)\n form_uncommited.author = request.user\n\n new_space = form_uncommited.save()\n space = get_object_or_404(Space, name=form_uncommited.name)\n\n ef_uncommited = entity_forms.save(commit=False)\n for ef in ef_uncommited:\n ef.space = space\n ef.save()\n\n form.save_m2m()\n return HttpResponseRedirect(reverse(urln.SPACE_INDEX,\n kwargs={'space_url': space.url}))\n\n return render_to_response('spaces/space_form.html', {'form': form,\n 'get_place': place, 'entityformset': entity_forms},\n context_instance=RequestContext(request))\n\n else:\n raise PermissionDenied\n\n\nclass DeleteSpace(DeleteView):\n\n \"\"\"\n Returns a confirmation page before deleting the space object completely.\n This does not delete the space related content. Only the site\n administrators or the space administrators can delete a space.\n\n :attributes: space_url\n :permissions required: spaces.delete_space, spaces.admin_space\n :rtype: Confirmation\n \"\"\"\n context_object_name = 'get_place'\n success_url = '/'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if (request.user.has_perm('delete_space', space) and\n request.user.has_perm('admin_space', space)):\n return super(DeleteSpace, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n space_url = self.kwargs['space_url']\n space = get_object_or_404(Space, url=space_url)\n return space\n\n\nclass ListSpaces(ListView):\n\n \"\"\"\n Return a list of spaces in the system (except private ones) using a generic\n view. The users associated to a private spaces will see it, but not the\n other private spaces. ListSpaces is a django generic :class:`ListView`.\n\n .. note:: Permissions on this view are used only to filter the spaces list\n but the view itself is public.\n\n :attributes: space_url\n :permissions required: spaces.view_space\n :rtype: Object list\n :contexts: object_list\n \"\"\"\n paginate_by = 10\n\n public_spaces = Space.objects.filter(public=True)\n all_spaces = Space.objects.all()\n\n def get_queryset(self):\n\n # I think I should explain this mess. What we want to obtain here is:\n # a list of public spaces in case the user is anonymous, or a list of\n # the public spaces plus the spaces the user is registered to if the\n # user is logged in.\n # To do the second, we create a set of PK objects, and outside of the\n # 'for' loop we make a queryset for those PK objects, after that we\n # combine the data of the user spaces and public ones with the '|'\n # operand.\n current_user = self.request.user\n user_spaces = set()\n all_spaces = Space.objects.all()\n public_spaces = Space.objects.filter(public=True)\n\n if not current_user.is_anonymous():\n for space in self.all_spaces:\n if current_user.has_perm('view_space', space):\n user_spaces.add(space.pk)\n\n user_spaces = Space.objects.filter(pk__in=user_spaces)\n return self.public_spaces | user_spaces\n\n return self.public_spaces\n\n def get_context_data(self, **kwargs):\n context = super(ListSpaces, self).get_context_data(**kwargs)\n context['public_spaces'] = self.public_spaces\n return context\n\n\ndef edit_roles(request, space_url):\n\n \"\"\"\n The edit_role function works to provide a way for space administrators to\n modify the users roles inside a space, at the space level.\n\n It basically works as an AJAX communication where the frontend sends to key\n values: userid and perm, containing the user ID and the permission code,\n which later we compare with the permissions dictionary. If the user has the\n permission we go to the next one and so on.\n\n There is a special perm code called \"delete\" that triggers the deletion of\n all the permissions for the current user on the current space.\n\n :ajax keys: userid, perm\n :returns: reponses\n :versionadded: 0.1.9\n \"\"\"\n\n space = get_object_or_404(Space, url=space_url)\n perm_dict = {\n 'admins': ['admin_space', 'view_space'],\n 'mods': ['mod_space', 'view_space'],\n 'users': ['view_space', ]\n }\n\n if request.user.has_perm('admin_space', space):\n if request.method == 'POST' and request.is_ajax():\n user = get_object_or_404(User, pk=request.POST['userid'])\n cur_user_perms = get_perms(user, space)\n\n if request.POST['perm'] == \"delete\":\n for p in cur_user_perms:\n try:\n remove_perm(p, user, space)\n except:\n return HttpResponseServerError(_(\"Couldn't delete user permissions.\"))\n return HttpResponse(_('Permissions deleted. User removed from space.'))\n\n else:\n try:\n perm = perm_dict[request.POST['perm']]\n for p in perm:\n if p in cur_user_perms:\n pass\n else:\n try:\n assign_perm(p, user, space)\n except:\n return HttpResponseServerError(_(\"The permissions couldn't be assigned.\"))\n return HttpResponse(_('Permissions assigned.'))\n except:\n return HttpResponseBadRequest(_('Permission code not valid.'))\n else:\n space_users = get_users_with_perms(space, with_superusers=False)\n admins = set()\n mods = set()\n users = set()\n for user in space_users:\n if user.has_perm('admin_space', space):\n admins.add(user)\n elif user.has_perm('mod_space', space):\n mods.add(user)\n else:\n # We omit the check for \"view_space\" because the space_users\n # variable is already filtered to show only the users with permissions\n # on that object and users shows all the users in the space.\n users.add(user)\n\n return render_to_response('spaces/user_groups.html',\n {'get_place': space, 'user_admins': admins, 'user_mods': mods,\n 'user_users': users}, context_instance=RequestContext(request))\n else:\n raise PermissionDenied\n\n\ndef search_user(request, space_url):\n\n \"\"\"\n Simple search user mechanishm, it makes a query to django with the strict\n user name, it it doesn't match, it returns an error.\n\n :ajax keys: uname\n :returns: user ID\n .. versionadded:: 0.1.9\n \"\"\"\n space = get_object_or_404(Space, url=space_url)\n\n if request.user.has_perm('admin_space', space):\n if request.method == 'POST' and request.is_ajax():\n try:\n user = User.objects.get(username=request.POST['uname'])\n assign_perm('view_space', user, space)\n return HttpResponse(user.id)\n except:\n return HttpResponseNotFound(_('The user does not exist.'))\n else:\n return HttpResponseBadRequest(_(\"Wrong petition.\"))\n else:\n raise PermissionDenied\n", "id": "10964273", "language": "Python", "matching_score": 6.1569085121154785, "max_stars_count": 40, "path": "src/core/spaces/views/spaces.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nProposal module views.\n\"\"\"\nimport hashlib\nimport datetime\n\nfrom django.core.mail import send_mail\nfrom django.core.urlresolvers import reverse\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic import FormView\nfrom django.views.decorators.http import require_POST\nfrom django.db.models import Count\nfrom django.db.models import F\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib import messages\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.decorators import method_decorator\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render_to_response, get_object_or_404, redirect\nfrom django.core.exceptions import PermissionDenied\n\nfrom apps.ecidadania.proposals import url_names as urln_prop\nfrom core.spaces import url_names as urln_space\nfrom core.spaces.models import Space\nfrom apps.ecidadania.proposals.models import Proposal, ProposalSet, \\\n ProposalField\nfrom apps.ecidadania.proposals.forms import ProposalForm, VoteProposal, \\\n ProposalSetForm, ProposalFieldForm, ProposalSetSelectForm, \\\n ProposalMergeForm, ProposalFieldDeleteForm, ProposalFormInSet\nfrom apps.ecidadania.debate.models import Debate\n\n\nclass AddProposalInSet(FormView):\n\n \"\"\"\n Create a new single (not tied to a set) proposal.\n\n :parameters: space_url\n :rtype: HTML Form\n :context: form, get_place\n \"\"\"\n form_class = ProposalFormInSet\n template_name = 'proposals/proposal_form_in_set.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(AddProposalInSet, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return reverse(urln_space.SPACE_INDEX, kwargs={'space_url': space})\n\n def form_valid(self, form):\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n pset = get_object_or_404(ProposalSet, pk=self.kwargs['set_id'])\n form_uncommited = form.save(commit=False)\n form_uncommited.space = self.space\n form_uncommited.author = self.request.user\n form_uncommited.proposalset = pset\n form_uncommited.save()\n return super(AddProposalInSet, self).form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = super(AddProposalInSet, self).get_context_data(**kwargs)\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n self.field = ProposalField.objects.filter(proposalset=self.kwargs['set_id'])\n context['get_place'] = self.space\n context['form_field'] = [f_name.field_name for f_name in self.field]\n return context\n\n\ndef add_proposal_field(request, space_url):\n\n \"\"\"\n Adds a new form field to the proposal form. The admin can customize the proposal form for a\n particular proposal set. The optional fields will be already defined, this function will allow\n the admin to add those field to the proposal form.\n\n .. versionadded:: 0.1.5\n\n :arguments: space_url\n :context:form, get_place, prop_fields, form_data, prop_fields\n\n \"\"\"\n get_place = get_object_or_404(Space, url=space_url)\n\n if (request.user.has_perm('admin_space', get_place) or\n request.user.has_perm('mod_space', get_place)):\n form = ProposalFieldForm(request.POST or None)\n if request.method == 'POST':\n if form.is_valid():\n form_data = form.save()\n proposal_fields = ProposalField.objects.filter(\n proposalset=form_data.proposalset)\n return render_to_response(\"proposals/proposal_add_fields.html\",\n {'form_data': form_data,\n 'get_place': get_place,\n 'prop_fields': proposal_fields,\n 'form': form},\n context_instance=RequestContext(request))\n\n return render_to_response(\"proposals/proposal_add_fields.html\",\n {'form': form, 'get_place': get_place},\n context_instance=RequestContext(request))\n else:\n raise PermissionDenied\n\n\ndef delete_proposal_field(request, space_url):\n\n \"\"\"\n Removes a form field from proposal form. Only for proposals which are in proposal set.\n\n ..versionadded:: 0.1.5\n\n :arguments: space_url\n :context: d_form, get_place, delete_field\n\n \"\"\"\n get_place = get_object_or_404(Space, url=space_url)\n\n if (request.user.has_perm('admin_space', get_place) or\n request.user.has_perm('mod_space', get_place)):\n d_form = ProposalFieldDeleteForm(request.POST or None)\n if request.method == 'POST':\n if d_form.is_valid():\n form_data = d_form.save(commit=False)\n delete_field = ProposalField.objects.filter(\n proposalset=form_data.proposalset,\n field_name=form_data.field_name)\n delete_field.delete()\n return render_to_response(\n \"proposals/proposalform_remove_field.html\",\n {'form': d_form, 'get_place': get_place,\n 'deleted_field': form_data},\n context_instance=RequestContext(request))\n\n return render_to_response(\"proposals/proposalform_remove_field.html\",\n {'form': d_form, 'get_place': get_place},\n context_instance=RequestContext(request))\n else:\n raise PermissionDenied\n\n\ndef proposal_to_set(request, space_url):\n\n \"\"\"\n Allows to select a proposal set to which a proposal need to be added.\n\n .. versionadded:: 0.1.5\n\n :arguments: space_url\n :context: form, get_place\n\n \"\"\"\n get_place = get_object_or_404(Space, url=space_url)\n\n if (request.user.has_perm('admin_space', get_place) or\n request.user.has_perm('mod_space', get_place)):\n sel_form = ProposalSetSelectForm(request.POST or None)\n # We change here the queryset, so only the proposalsets of this space\n # appear on the list.\n sel_form.fields['proposalset'].queryset = ProposalSet.objects.filter(\n space=get_place)\n\n if request.method == 'POST':\n if sel_form.is_valid():\n pset = request.POST['proposalset']\n return HttpResponseRedirect(reverse(urln_prop.PROPOSAL_ADD_INSET,\n kwargs={'space_url': space_url, 'set_id': pset}))\n\n return render_to_response(\"proposals/proposalset_select_form.html\",\n {'form': sel_form, 'get_place': get_place},\n context_instance=RequestContext(request))\n else:\n raise PermissionDenied\n\n\ndef mergedproposal_to_set(request, space_url):\n\n \"\"\"\n Allows to select a proposal set to which a merged proposal need to be added\n\n :arguments: space_url\n :context:form, get_place\n\n \"\"\"\n get_place = get_object_or_404(Space, url=space_url)\n\n if (request.user.has_perm('admin_space', get_place) or\n request.user.has_perm('mod_space', get_place)):\n sel_form = ProposalSetSelectForm(request.POST or None)\n\n if request.method == 'POST':\n if sel_form.is_valid():\n pset = request.POST['proposalset']\n return HttpResponseRedirect(reverse(urln_prop.PROPOSAL_MERGED, kwargs={'space_url': space_url, 'set_id': pset}))\n\n return render_to_response(\"proposals/mergedproposal_in_set.html\",\n {'form': sel_form, 'get_place': get_place},\n context_instance=RequestContext(request))\n else:\n raise PermissionDenied\n\n\n#\n# Proposal Sets\n#\n\nclass ListProposalSet(ListView):\n\n \"\"\"\n List all the proposal set in a space.\n\n .. versionadded: 0.1.5\n\n :rtype: Object list\n :context: setlist\n \"\"\"\n paginate_by = 20\n context_object_name = 'setlist'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(ListProposalSet, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_queryset(self):\n cur_space = self.kwargs['space_url']\n place = get_object_or_404(Space, url=cur_space)\n objects = ProposalSet.objects.filter(space=place)\n return objects\n\n def get_context_data(self, **kwargs):\n context = super(ListProposalSet, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])\n return context\n\n\nclass ViewProposalSet(ListView):\n\n \"\"\"\n List all the proposals inside a proposals set.\n\n .. versionadded 0.1.5\n\n :rtype: Object list\n :context: proposalset\n \"\"\"\n paginate_by = 50\n context_object_name = 'proposalset'\n template_name = 'proposals/proposalset_detail.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(ViewProposalSet, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_queryset(self):\n place = get_object_or_404(Space, url=self.kwargs['space_url'])\n objects = Proposal.objects.all().filter(\n proposalset=self.kwargs['set_id']).order_by('pub_date')\n return objects\n\n def get_context_data(self, **kwargs):\n context = super(ViewProposalSet, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space,\n url=self.kwargs['space_url'])\n return context\n\n\nclass AddProposalSet(FormView):\n\n \"\"\"\n Create a new prpoposal set, it can be related to a debate or be in free mode,\n which is not linked to anything. If it's linked to a debate, people can\n make their proposals related to the debate notes.\n\n .. versionadded: 0.1.5\n\n :rtype: Form object\n :context: form, get_place\n \"\"\"\n form_class = ProposalSetForm\n template_name = 'proposals/proposalset_form.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(AddProposalSet, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return reverse(urln_space.SPACE_INDEX, kwargs={'space_url': space})\n\n def get_form_kwargs(self, **kwargs):\n kwargs = super(AddProposalSet, self).get_form_kwargs(**kwargs)\n kwargs['initial']['space'] = self.kwargs['space_url']\n return kwargs\n\n def form_valid(self, form):\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n form_uncommited = form.save(commit=False)\n form_uncommited.space = self.space\n form_uncommited.author = self.request.user\n form_uncommited.save()\n return super(AddProposalSet, self).form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = super(AddProposalSet, self).get_context_data(**kwargs)\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n context['get_place'] = self.space\n return context\n\n\nclass EditProposalSet(UpdateView):\n\n \"\"\"\n Modify an already created proposal set.\n\n .. versionadded: 0.1.5\n\n :rtype: Form object\n :context: form, get_place\n \"\"\"\n model = ProposalSet\n template_name = 'proposals/proposalset_form.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(EditProposalSet, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n pset = self.kwargs['set_id']\n return reverse(urln_prop.PROPOSALSET_VIEW, kwargs={'space_url': space,\n 'set_id': pset})\n\n def get_object(self):\n propset_id = self.kwargs['set_id']\n return get_object_or_404(ProposalSet, pk=propset_id)\n\n def get_context_data(self, **kwargs):\n context = super(EditProposalSet, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])\n return context\n\n\nclass DeleteProposalSet(DeleteView):\n\n \"\"\"\n Delete a proposal set.\n\n .. versionadded: 0.1.5\n\n :rtype: Confirmation\n :context: get_place\n \"\"\"\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(DeleteProposalSet, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n return get_object_or_404(ProposalSet, pk=self.kwargs['set_id'])\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return reverse(urln_space.SPACE_INDEX, kwargs={'space_url': space})\n\n def get_context_data(self, **kwargs):\n context = super(DeleteProposalSet, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])\n return context\n", "id": "4302105", "language": "Python", "matching_score": 7.74226713180542, "max_stars_count": 40, "path": "src/apps/ecidadania/proposals/views/proposalsets.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nProposal module views.\n\"\"\"\n\nfrom django.core.urlresolvers import reverse\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import UpdateView, DeleteView\nfrom django.views.generic import FormView\nfrom django.views.decorators.http import require_POST\nfrom django.db.models import Count\nfrom django.db.models import F\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib import messages\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.decorators import method_decorator\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response, get_object_or_404, redirect\nfrom django.core.exceptions import PermissionDenied\n\nfrom apps.ecidadania.proposals import url_names as urln_prop\nfrom core.spaces import url_names as urln_space\nfrom core.spaces.models import Space\nfrom apps.ecidadania.proposals.models import Proposal, ProposalSet, \\\n ProposalField\nfrom apps.ecidadania.proposals.forms import ProposalForm, VoteProposal, \\\n ProposalSetForm, ProposalFieldForm, ProposalSetSelectForm, \\\n ProposalMergeForm, ProposalFieldDeleteForm\n\n\nclass AddProposal(FormView):\n\n \"\"\"\n Create a new single (not tied to a set) proposal. The permission checks are\n done in the form_valid method.\n\n :parameters: space_url\n :rtype: HTML Form\n :context: form, get_place\n \"\"\"\n form_class = ProposalForm\n template_name = 'proposals/proposal_form.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(AddProposal, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return reverse(urln_space.SPACE_INDEX, kwargs={'space_url': space})\n\n def form_valid(self, form):\n space = get_object_or_404(Space, url=self.kwargs['space_url'])\n form_uncommited = form.save(commit=False)\n form_uncommited.space = space\n form_uncommited.author = self.request.user\n form_uncommited.save()\n return super(AddProposal, self).form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = super(AddProposal, self).get_context_data(**kwargs)\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n # self.field = ProposalField.objects.filter(proposalset=self.kwargs['p_set'])\n context['get_place'] = self.space\n # context['form_field'] = [f_name.field_name for f_name in self.field]\n return context\n\n\nclass EditProposal(UpdateView):\n\n \"\"\"\n The proposal can be edited not only by the space and global admins, but also by its\n creator.\n\n :permissions required: admin_spacea, mod_space, author\n :rtype: HTML Form\n :context: get_place\n :parameters: space_url, prop_id\n \"\"\"\n model = Proposal\n template_name = 'proposals/proposal_form.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n proposal = get_object_or_404(Proposal, pk=kwargs['prop_id'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space) or\n proposal.author == request.user):\n return super(EditProposal, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def form_valid(self, form):\n space = get_object_or_404(Space, url=self.kwargs['space_url'])\n form_uncommited = form.save(commit=False)\n form_uncommited.space = space\n form_uncommited.author = self.request.user\n form_uncommited.save()\n return super(EditProposal, self).form_valid(form)\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n proposal = self.kwargs['prop_id']\n return reverse(urln_prop.PROPOSAL_VIEW, kwargs={'space_url': space,\n 'prop_id': proposal})\n\n def get_object(self):\n prop_id = self.kwargs['prop_id']\n proposal = get_object_or_404(Proposal, pk=prop_id)\n return proposal\n\n def get_context_data(self, **kwargs):\n context = super(EditProposal, self).get_context_data(**kwargs)\n self.p_set = Proposal.objects.get(pk=self.kwargs['prop_id'])\n self.field = ProposalField.objects.filter(proposalset=self.p_set.proposalset)\n context['form_field'] = [f_name.field_name for f_name in self.field]\n context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])\n return context\n\n\nclass DeleteProposal(DeleteView):\n\n \"\"\"\n Delete a proposal.\n\n :rtype: Confirmation\n :context: get_place\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n proposal = get_object_or_404(Proposal, pk=kwargs['prop_id'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space) or\n request.user == proposal.author):\n return super(DeleteProposal, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n prop_id = self.kwargs['prop_id']\n proposal = get_object_or_404(Proposal, pk=prop_id)\n return proposal\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return reverse(urln_space.SPACE_INDEX, kwargs={'space_url': space})\n\n def get_context_data(self, **kwargs):\n context = super(DeleteProposal, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])\n return context\n\n\nclass ListProposals(ListView):\n\n \"\"\"\n List all proposals stored whithin a space. Inherits from django :class:`ListView`\n generic view.\n\n :rtype: Object list\n :context: proposal\n \"\"\"\n paginate_by = 50\n context_object_name = 'proposal'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(ListProposals, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_queryset(self):\n place = get_object_or_404(Space, url=self.kwargs['space_url'])\n objects = Proposal.objects.annotate(Count('support_votes')).filter(space=place.id).order_by('pub_date')\n return objects\n\n def get_context_data(self, **kwargs):\n context = super(ListProposals, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])\n return context\n\n\ndef merge_proposal(request, space_url, set_id):\n\n \"\"\"\n Create a new merged proposal. This proposal can be linked to many other proposals which are in the\n same proposal set. Only admin and moderator can create merged proposals.\n\n .. versionadded:: 0.1.5\n\n :arguments: space_url, p_set\n :context:form, get_place, form_field\n\n \"\"\"\n get_place = get_object_or_404(Space, url=space_url)\n field = ProposalField.objects.filter(proposalset=set_id)\n form_field = [f_name.field_name for f_name in field]\n\n if (request.user.has_perm('admin_space', get_place) or\n request.user.has_perm('mod_space', get_place)):\n if request.method == 'POST':\n merged_form = ProposalForm(request.POST)\n if merged_form.is_valid():\n form_data = merged_form.save(commit=False)\n form_data.proposalset = get_object_or_404(ProposalSet, pk=set_id)\n form_data.space = get_object_or_404(Space, url=space_url)\n form_data.author = request.user\n form_data.merged = True\n field = ProposalField.objects.filter(proposalset=set_id)\n form_field = [f_name.field_name for f_name in field]\n form_data.save()\n merged_form.save_m2m()\n\n return reverse(urln_space.SPACE_INDEX,\n kwargs={'space_url': space_url})\n else:\n print \"id: \" + set_id\n merged_form = ProposalMergeForm(initial={'set_id': set_id})\n\n return render_to_response(\"proposals/proposal_merged.html\",\n {'form': merged_form, 'get_place': get_place, 'form_field': form_field}, context_instance=RequestContext(request))\n else:\n raise PermissionDenied\n", "id": "5806548", "language": "Python", "matching_score": 5.485515594482422, "max_stars_count": 40, "path": "src/apps/ecidadania/proposals/views/proposals.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render_to_response, get_object_or_404, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.views.generic.base import TemplateView, RedirectView\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic import FormView\nfrom django.template import RequestContext\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import PermissionDenied\nfrom guardian.shortcuts import assign_perm\n\nfrom core.spaces import url_names as urln\nfrom core.spaces.models import Space\nfrom apps.ecidadania.news.models import Post\nfrom apps.ecidadania.news.forms import NewsForm\n\n\nclass AddPost(FormView):\n\n \"\"\"\n Create a new post. Only registered users belonging to a concrete group\n are allowed to create news. only site administrators will be able to\n post news in the index page.\n\n .. versionadded: 0.1\n\n :permissions required: admin_space, mod_space\n :parameters: space_url\n :context: get_place\n \"\"\"\n form_class = NewsForm\n template_name = 'news/post_form.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(AddPost, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return reverse(urln.SPACE_INDEX, kwargs={'space_url': space})\n\n def form_valid(self, form):\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n form_uncommited = form.save(commit=False)\n form_uncommited.author = self.request.user\n form_uncommited.space = self.space\n form_uncommited.save()\n return super(AddPost, self).form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = super(AddPost, self).get_context_data(**kwargs)\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n context['get_place'] = self.space\n return context\n\n\nclass ViewPost(DetailView):\n\n \"\"\"\n View a specific post.\n\n :permissions required: view_space\n \"\"\"\n context_object_name = 'news'\n template_name = 'news/post_detail.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(ViewPost, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n post = Post.objects.get(pk=self.kwargs['post_id'])\n try:\n post.views = post.views + 1\n except:\n post.views = 1\n post.save()\n return post\n\n def get_context_data(self, **kwargs):\n\n \"\"\"\n Get extra context data for the ViewPost view.\n \"\"\"\n context = super(ViewPost, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])\n return context\n\n\nclass EditPost(UpdateView):\n\n \"\"\"\n Edit an existent post.\n\n :permissions required: admin_space, mod_space\n :parameters: space_url, post_id\n :context: get_place\n \"\"\"\n model = Post\n template_name = 'news/post_form.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(EditPost, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def form_valid(self, form):\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n form_uncommited = form.save(commit=False)\n form_uncommited.author = self.request.user\n form_uncommited.space = self.space\n form_uncommited.save()\n return super(EditPost, self).form_valid(form)\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return reverse(urln.SPACE_INDEX, kwargs={'space_url': space})\n\n def get_object(self):\n cur_post = get_object_or_404(Post, pk=self.kwargs['post_id'])\n return cur_post\n\n def get_context_data(self, **kwargs):\n context = super(EditPost, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])\n return context\n\n\nclass DeletePost(DeleteView):\n\n \"\"\"\n Delete an existent post. Post deletion is only reserved to spaces\n administrators or site admins.\n \"\"\"\n context_object_name = \"get_place\"\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(DeletePost, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return reverse(urln.SPACE_INDEX, kwargs={'space_url': space})\n\n def get_object(self):\n return get_object_or_404(Post, pk=self.kwargs['post_id'])\n\n def get_context_data(self, **kwargs):\n\n \"\"\"\n Get extra context data for the ViewPost view.\n \"\"\"\n context = super(DeletePost, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])\n return context\n", "id": "6405512", "language": "Python", "matching_score": 4.712334632873535, "max_stars_count": 40, "path": "src/apps/ecidadania/news/views.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.views.generic.base import RedirectView\nfrom django.views.generic.list import ListView\nfrom django.views.generic.dates import ArchiveIndexView, MonthArchiveView, \\\n YearArchiveView\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import PermissionDenied\n\nfrom core.spaces import url_names as urln\nfrom core.spaces.models import Space\nfrom apps.ecidadania.news.models import Post\n\n\nclass RedirectArchive(RedirectView):\n\n \"\"\"\n This class redirect any page to the news archive page (ListPosts)\n\n :rtype: Redirect (permanent)\n\n .. versionadded:: 0.1.6\n \"\"\"\n permanent = True\n\n def get_redirect_url(self, **kwargs):\n space = self.kwargs['space_url']\n return reverse(urln.NEWS_ARCHIVE, kwargs={'space_url': space})\n\n\nclass YearlyPosts(YearArchiveView):\n\n \"\"\"\n List all the news posts of the selected year. Uses default template naming.\n\n :rtype: Object list by date\n\n .. versionadded:: 0.1.6\n \"\"\"\n make_object_list = True\n paginate_by = 12\n date_field = 'pub_date'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(YearlyPosts, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_queryset(self):\n \"\"\"\n We use the get queryset function to get only the posts relevant to\n a space, instead of all of them.\n \"\"\"\n place = get_object_or_404(Space, url=self.kwargs['space_url'])\n return Post.objects.filter(space=place.id)\n\n def get_context_data(self, **kwargs):\n\n \"\"\"\n Get extra context data for the ViewPost view.\n \"\"\"\n context = super(YearlyPosts, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space,\n url=self.kwargs['space_url'])\n return context\n\n\nclass MonthlyPosts(MonthArchiveView):\n\n \"\"\"\n List all the news posts for the selected month. This view uses default\n template naming.\n\n :rtype: Object list by date\n\n .. versionadded:: 0.1.6\n \"\"\"\n paginate_by = 12\n date_field = 'pub_date'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(MonthlyPosts, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_queryset(self):\n \"\"\"\n We use the get queryset function to get only the posts relevant to\n a space, instead of all of them.\n \"\"\"\n place = get_object_or_404(Space, url=self.kwargs['space_url'])\n return Post.objects.filter(space=place.id)\n\n def get_context_data(self, **kwargs):\n\n \"\"\"\n Get extra context data for the ViewPost view.\n \"\"\"\n context = super(MonthlyPosts, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space,\n url=self.kwargs['space_url'])\n return context\n\n\nclass ListPosts(ArchiveIndexView):\n\n \"\"\"\n List all post ordered by date\n \"\"\"\n date_field = 'pub_date'\n paginate_by = 12\n allow_empty = True\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(ListPosts, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_queryset(self):\n \"\"\"\n We use the get queryset function to get only the posts relevant to\n a space, instead of all of them.\n \"\"\"\n place = get_object_or_404(Space, url=self.kwargs['space_url'])\n return Post.objects.filter(space=place.id)\n\n def get_context_data(self, **kwargs):\n\n \"\"\"\n Get extra context data for the ViewPost view.\n \"\"\"\n context = super(ListPosts, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space,\n url=self.kwargs['space_url'])\n return context\n", "id": "5525990", "language": "Python", "matching_score": 2.2109363079071045, "max_stars_count": 40, "path": "src/core/spaces/views/news.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom guardian.admin import GuardedModelAdmin\n\nfrom apps.ecidadania.news.models import Post\n\n\nclass PostAdmin(GuardedModelAdmin):\n\n \"\"\"\n Administration view for news.\n \"\"\"\n list_display = ('title', 'pub_date', 'post_lastup', 'author',\n 'space', 'pub_index')\n search_fields = ('title', 'author', 'space', 'pub_index')\n\n fieldsets = [\n (None, {'fields':\n ['title', 'description']}),\n (_('Other data'), {'fields':\n ['space', 'pub_index']})\n ]\n\n def save_model(self, request, obj, form, change):\n if not change:\n obj.author = request.user\n obj.save()\n\nadmin.site.register(Post, PostAdmin)\n", "id": "12055337", "language": "Python", "matching_score": 4.3259358406066895, "max_stars_count": 40, "path": "src/apps/ecidadania/news/admin.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\ne-cidadania administration models for django-admin. This administration models\nwill make their respective data models available for management.\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.http import HttpResponse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom guardian.admin import GuardedModelAdmin\n\nfrom core.spaces.models import Space, Entity, Document, Event, Intent\n\n\nclass EntityAdmin(GuardedModelAdmin):\n\n \"\"\"\n Entities administration model.\n\n :list fields: name, website, space\n :search fields: name\n \"\"\"\n list_display = ('name', 'website', 'space')\n search_fields = ('name',)\n\n\nclass EntityInline(admin.TabularInline):\n\n \"\"\"\n TabularInline view for entities.\n \"\"\"\n model = Entity\n\n\nclass SpaceAdmin(GuardedModelAdmin):\n\n \"\"\"\n Administration view for django admin to create spaces. The save() method\n is overriden to store automatically the author of the space.\n\n :list fields: name, description, date\n :search fields: name\n \"\"\"\n list_display = ('name', 'description', 'pub_date')\n search_fields = ('name',)\n\n fieldsets = [\n (None, {'fields':\n [('name', 'url'), 'description']}),\n\n (_('Appearance'), {'fields':\n [('logo', 'banner')]}),\n\n (_('Modules'), {'fields':\n ('mod_cal', 'mod_docs', 'mod_news', 'mod_proposals',\n 'mod_debate')}),\n ]\n\n inlines = [\n EntityInline,\n ]\n\n def save_model(self, request, obj, form, change):\n if not change:\n obj.author = request.user\n obj.save()\n\n def send_email(self, request, queryset):\n user_emails = queryset.objects.values('email')\n\n\nclass IntentAdmin(GuardedModelAdmin):\n\n \"\"\"\n This is the administrative view to manage the request from users to\n participate on the spaces.\n \"\"\"\n list_display = ('space', 'user', 'token', 'requested_on')\n search_fields = ('space', 'user')\n\n fieldsets = [\n (None, {'fields':\n ['user', 'space', 'token']})\n ]\n\n\nclass DocumentAdmin(GuardedModelAdmin):\n\n \"\"\"\n Administration view to upload/modify documents. The save() method is\n overriden to store the author automatically.\n\n :list fields: title, space, docfile, author, pub_date\n :search fields: title, space, author, pub_date\n \"\"\"\n list_display = ('title', 'space', 'docfile', 'author', 'pub_date')\n search_fields = ('title', 'space', 'author', 'pub_date')\n\n fieldsets = [\n (None, {'fields':\n ['title', 'docfile', 'space']}),\n ]\n\n def save_model(self, request, obj, form, change):\n if not change:\n obj.author = request.user\n obj.save()\n\n\nclass EventAdmin(GuardedModelAdmin):\n\n \"\"\"\n Meetings administration model.\n\n :list fields: title, space, meeting_date\n :search fields: title\n \"\"\"\n list_display = ('title', 'space', 'event_date')\n search_fields = ('title',)\n\n# This register line is commented because it collides with\n# admin.autoregister() in the main urls.py file.\n\nadmin.site.register(Space, SpaceAdmin)\nadmin.site.register(Document, DocumentAdmin)\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(Entity, EntityAdmin)\nadmin.site.register(Intent, IntentAdmin)\n", "id": "8898176", "language": "Python", "matching_score": 3.7051124572753906, "max_stars_count": 40, "path": "src/core/spaces/admin.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom guardian.admin import GuardedModelAdmin\n\nfrom apps.ecidadania.voting.models import Poll, Choice, Voting, ConfirmVote\n\n\nclass ChoiceInline(admin.TabularInline):\n model = Choice\n\n\nclass PollAdmin(GuardedModelAdmin):\n list_display = ('question', 'pub_date', 'poll_lastup', 'author',\n 'space')\n search_fields = ('question', 'author', 'space')\n\n inlines = [ChoiceInline]\n\n\nclass VotingAdmin(GuardedModelAdmin):\n\n list_display = ('title', 'start_date', 'end_date', 'author', 'space')\n search_fields = ('title', 'author', 'space')\n\n\nclass VoteTokenAdmin(admin.ModelAdmin):\n \"\"\"\n Rgis admin class lists all the tokens and mails and their status.\n\n .. versionadded:: 0.1.8\n \"\"\"\n list_display = ('user', 'proposal', 'token')\n search_fields = ('user', 'proposal', 'token')\n\n\nadmin.site.register(Poll, PollAdmin)\nadmin.site.register(Voting, VotingAdmin)\nadmin.site.register(ConfirmVote, VoteTokenAdmin)\n", "id": "7954972", "language": "Python", "matching_score": 2.7427515983581543, "max_stars_count": 40, "path": "src/apps/ecidadania/voting/admin.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe proposal administration allows to edit every proposal made in the system.\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom guardian.admin import GuardedModelAdmin\n\nfrom apps.ecidadania.proposals.models import Proposal, ProposalSet, ProposalField\n\n\nclass ProposalSetAdmin(GuardedModelAdmin):\n\n \"\"\"\n Basic ProposalSet administration interface.\n\n :list_display: name, author\n :search: none\n \"\"\"\n\n list_display = ('name', 'author')\n\n fieldsets = [\n (None, {'fields': ['name']})\n ]\n\n\nclass ProposalAdmin(GuardedModelAdmin):\n\n \"\"\"\n Basic proposal administration interface since most of the work is done\n in the website.\n\n :list display: title, author, tags\n :search: none:\n \"\"\"\n list_display = ('title', 'author', 'tags')\n\n fieldsets = [\n (None, {'fields':\n ['code', 'title', 'proposalset', 'description', 'tags',\n 'support_votes']}),\n\n (_('Location'), {'fields':\n ['latitude', 'longitude']}),\n\n (_('Relations'), {'fields':\n [('space', 'author')]}),\n\n (_('Other'), {'fields':\n ['budget', 'closed', 'close_reason', 'closed_by']}),\n\n (_('Options'), {'fields':\n ['anon_allowed', 'refurbished']}),\n\n ]\n\n\nclass ProposalFieldAdmin(GuardedModelAdmin):\n\n \"\"\"\n Basic proposal administration interface since most part is done in\n the website.\n\n :list display: proposalset, field_name\n :search:none\n\n .. versionadded:: 0.1.5b\n \"\"\"\n list_display = ('proposalset', 'field_name')\n\n fieldsets = [\n (None, {'fields':\n ['proposalset', 'field_name']}),\n ]\n\n\nadmin.site.register(ProposalSet, ProposalSetAdmin)\nadmin.site.register(Proposal, ProposalAdmin)\nadmin.site.register(ProposalField, ProposalFieldAdmin)\n", "id": "7309539", "language": "Python", "matching_score": 2.0472278594970703, "max_stars_count": 40, "path": "src/apps/ecidadania/proposals/admin.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nProposal module URLs.\n\"\"\"\n\nfrom django.conf.urls import *\n\nfrom apps.ecidadania.proposals.views.common import ViewProposal, \\\n support_proposal\nfrom apps.ecidadania.proposals.views.proposals import AddProposal, \\\n EditProposal, DeleteProposal, ListProposals\nfrom apps.ecidadania.proposals.views.proposalsets import AddProposalSet, \\\n EditProposalSet, DeleteProposalSet, add_proposal_field, \\\n delete_proposal_field, proposal_to_set, mergedproposal_to_set, \\\n ListProposalSet, ViewProposalSet, AddProposalInSet\nfrom apps.ecidadania.proposals.url_names import *\n\n\nurlpatterns = patterns('apps.ecidadania.proposals.views',\n\n url(r'^set/$', ListProposalSet.as_view(), name=PROPOSALSET_LIST),\n\n url(r'^set/(?P<set_id>\\w+)/$', ViewProposalSet.as_view(),\n name=PROPOSALSET_VIEW),\n\n url(r'^set/(?P<set_id>\\w+)/add/$', AddProposalInSet.as_view(),\n name=PROPOSAL_ADD_INSET),\n\n url(r'^add/$', AddProposal.as_view(), name=PROPOSAL_ADD),\n\n url(r'^add/set/$', AddProposalSet.as_view(), name=PROPOSALSET_ADD),\n\n url(r'^add/field/', 'proposalsets.add_proposal_field',\n name=PROPOSALFIELD_ADD),\n\n url(r'^edit/(?P<prop_id>\\w+)/', EditProposal.as_view(),\n name=PROPOSAL_EDIT),\n\n url(r'^edit/set/(?P<p_set>\\w+)/', EditProposalSet.as_view(),\n name=PROPOSALSET_EDIT),\n\n url(r'^delete/field/$', 'proposalsets.delete_proposal_field',\n name=PROPOSALFIELD_DELETE),\n\n url(r'^delete/(?P<prop_id>\\w+)/$', DeleteProposal.as_view(),\n name=PROPOSAL_DELETE),\n\n url(r'^delete/set/(?P<p_set>\\w+)/$', DeleteProposalSet.as_view(),\n name=PROPOSALSET_DELETE),\n\n url(r'^support/', 'common.support_proposal', name=PROPOSAL_VOTE),\n\n url(r'^merge/(?P<set_id>\\w+)/', 'proposals.merge_proposal',\n name=PROPOSAL_MERGED),\n\n url(r'^merge_proposals/', 'proposalsets.mergedproposal_to_set',\n name=PROPOSAL_MERGEDTOSET),\n\n url(r'^select_set/', 'proposalsets.proposal_to_set', name=SELECT_SET),\n\n url(r'^(?P<prop_id>\\w+)/$', ViewProposal.as_view(), name=PROPOSAL_VIEW),\n\n url(r'^$', ListProposals.as_view(), name=PROPOSAL_LIST),\n\n # url(_(r'^(?P<space_url>\\w+)/vote/approve/(?P<token>\\w+)/$'),\n # ValidateVote.as_view(), name=VALIDATE_VOTE),\n)\n", "id": "12384482", "language": "Python", "matching_score": 5.342057228088379, "max_stars_count": 40, "path": "src/apps/ecidadania/proposals/urls.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nModule to store proposal related url names.\n\"\"\"\n\n# Note: the order of the url names is the same as in the urls.py file. Or at\n# least it shoudl be.\n\nPROPOSAL_LIST = 'list-proposals'\n\nPROPOSAL_VIEW = 'view-proposal'\n\nPROPOSALSET_VIEW = 'view-proposalset'\n\nPROPOSAL_ADD = 'add-proposal'\n\nPROPOSAL_ADD_INSET = 'add-proposal-inset'\n\nPROPOSALSET_ADD = 'add-proposalset'\n\nPROPOSALSET_LIST = 'list-proposalset'\n\nPROPOSALFIELD_ADD = 'add_fields'\n\nPROPOSAL_EDIT = 'edit-proposal'\n\nPROPOSALSET_EDIT = 'edit-proposalset'\n\nPROPOSAL_DELETE = 'delete-proposal'\n\nPROPOSALSET_DELETE = 'delete-proposalset'\n\nPROPOSALFIELD_DELETE = 'delete_fields'\n\nPROPOSAL_VOTE = 'vote-proposal'\n\nPROPOSAL_MERGED = 'merged_proposal'\n\nPROPOSAL_MERGEDTOSET = 'merge_proposal'\n\nSELECT_SET = 'select_set'\n\nVALIDATE_VOTE = 'validate-vote'\n", "id": "9100963", "language": "Python", "matching_score": 0.4320387840270996, "max_stars_count": 40, "path": "src/apps/ecidadania/proposals/url_names.py" }, { "content": "\"\"\"\nTagging components for Django's form library.\n\"\"\"\nfrom django import forms\nfrom django.utils.translation import ugettext as _\n\nfrom apps.thirdparty.tagging import settings\nfrom apps.thirdparty.tagging.models import Tag\nfrom apps.thirdparty.tagging.utils import parse_tag_input\n\n\nclass TagAdminForm(forms.ModelForm):\n class Meta:\n model = Tag\n\n def clean_name(self):\n value = self.cleaned_data['name']\n tag_names = parse_tag_input(value)\n if len(tag_names) > 1:\n raise forms.ValidationError(_('Multiple tags were given.'))\n elif len(tag_names[0]) > settings.MAX_TAG_LENGTH:\n raise forms.ValidationError(\n _('A tag may be no more than %s characters long.') %\n settings.MAX_TAG_LENGTH)\n return value\n\n\nclass TagField(forms.CharField):\n \"\"\"\n A ``CharField`` which validates that its input is a valid list of\n tag names.\n \"\"\"\n def clean(self, value):\n value = super(TagField, self).clean(value)\n if value == u'':\n return value\n for tag_name in parse_tag_input(value):\n if len(tag_name) > settings.MAX_TAG_LENGTH:\n raise forms.ValidationError(\n _('Each tag may be no more than %s characters long.') %\n settings.MAX_TAG_LENGTH)\n return value\n", "id": "8115516", "language": "Python", "matching_score": 2.736222505569458, "max_stars_count": 40, "path": "src/apps/thirdparty/tagging/forms.py" }, { "content": "from django.contrib import admin\nfrom apps.thirdparty.tagging.models import Tag, TaggedItem\nfrom apps.thirdparty.tagging.forms import TagAdminForm\n\n\nclass TagAdmin(admin.ModelAdmin):\n form = TagAdminForm\n\nadmin.site.register(TaggedItem)\nadmin.site.register(Tag, TagAdmin)\n", "id": "9616376", "language": "Python", "matching_score": 0.5877152681350708, "max_stars_count": 40, "path": "src/apps/thirdparty/tagging/admin.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport cPickle as pickle\nimport Image\nimport os\nimport random\nimport urllib\nimport urllib2\nfrom xml.dom import minidom\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext as _\nfrom django.http import Http404\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.urlresolvers import reverse\nfrom django.utils import simplejson\nfrom django.db import models\nfrom django.contrib.auth.models import User, SiteProfileNotAvailable, Group\nfrom django.template import RequestContext\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom guardian.shortcuts import get_objects_for_user\n\nfrom apps.ecidadania.proposals.models import Proposal\nfrom apps.thirdparty.userprofile.forms import AvatarForm, AvatarCropForm, \\\n EmailValidationForm, ProfileForm, RegistrationForm, \\\n LocationForm, PublicFieldsForm, ChangeEmail\nfrom apps.thirdparty.userprofile.models import EmailValidation, Avatar\nfrom core.spaces.models import Space\n\nif not settings.AUTH_PROFILE_MODULE:\n raise SiteProfileNotAvailable\ntry:\n app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')\n # app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')\n Profile = models.get_model(app_label, model_name)\nexcept (ImportError, ImproperlyConfigured):\n raise SiteProfileNotAvailable\n\nif not Profile:\n raise SiteProfileNotAvailable\n\nif hasattr(settings, \"DEFAULT_AVATAR\") and settings.DEFAULT_AVATAR:\n DEFAULT_AVATAR = settings.DEFAULT_AVATAR\nelse:\n DEFAULT_AVATAR = os.path.join(settings.MEDIA_ROOT, \"generic.jpg\")\n\nif not os.path.isfile(DEFAULT_AVATAR):\n import shutil\n image = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n \"generic.jpg\")\n print image\n print DEFAULT_AVATAR\n shutil.copy(image, DEFAULT_AVATAR)\n\nGOOGLE_MAPS_API_KEY = hasattr(settings, \"GOOGLE_MAPS_API_KEY\") and \\\n settings.GOOGLE_MAPS_API_KEY or None\nAVATAR_WEBSEARCH = hasattr(settings, \"AVATAR_WEBSEARCH\") and \\\n settings.AVATAR_WEBSEARCH or None\n\nif AVATAR_WEBSEARCH:\n import gdata.service\n import gdata.photos.service\n\n\ndef get_profiles():\n return Profile.objects.order_by(\"-date\")\n\n\ndef fetch_geodata(request, lat, lng):\n if request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':\n url = \"http://ws.geonames.org/countrySubdivision?lat=%s&lng=%s\" % (lat, lng)\n dom = minidom.parse(urllib.urlopen(url))\n country = dom.getElementsByTagName('countryCode')\n if len(country) >=1:\n country = country[0].childNodes[0].data\n region = dom.getElementsByTagName('adminName1')\n if len(region) >=1:\n region = region[0].childNodes[0].data\n\n return HttpResponse(simplejson.dumps({'success': True, 'country': country, 'region': region}))\n else:\n raise Http404()\n\n\ndef public(request, username):\n try:\n profile = User.objects.get(username=username).get_profile()\n except:\n raise Http404\n\n template = \"userprofile/profile/public.html\"\n data = {'profile': profile, 'GOOGLE_MAPS_API_KEY': GOOGLE_MAPS_API_KEY, }\n return render_to_response(template, data, context_instance=RequestContext(request))\n\n\n@login_required\ndef searchimages(request):\n \"\"\"\n Web search for images Form\n \"\"\"\n\n images = dict()\n if request.method==\"POST\" and request.POST.get('keyword'):\n keyword = request.POST.get('keyword')\n gd_client = gdata.photos.service.PhotosService()\n feed = gd_client.SearchCommunityPhotos(\"%s&thumbsize=72c\" % keyword.split(\" \")[0], limit='48')\n for entry in feed.entry:\n images[entry.media.thumbnail[0].url] = entry.content.src\n\n template = \"userprofile/avatar/search.html\"\n data = {'section': 'avatar', 'images': images, }\n return render_to_response(template, data, context_instance=RequestContext(request))\n\n\n@login_required\ndef overview(request):\n \"\"\"\n Main profile page\n \"\"\"\n # Get the data of what the user did\n # WARNING, THIS IS HARDCODED, MUST BE IMPLEMENTED WELL\n # AFTER TESTING\n proposals = Proposal.objects.annotate(models.Count('support_votes')).filter(author=request.user.id).order_by('pub_date')\n profile, created = Profile.objects.get_or_create(user=request.user)\n spaces = get_objects_for_user(request.user, 'view_space', klass=Space)\n validated = False\n try:\n email = EmailValidation.objects.get(user=request.user).email\n except EmailValidation.DoesNotExist:\n email = request.user.email\n if email:\n validated = True\n\n template = \"userprofile/profile/overview.html\"\n data = {'section': 'overview', 'GOOGLE_MAPS_API_KEY': GOOGLE_MAPS_API_KEY,\n 'email': email, 'validated': validated, 'proposals': proposals,\n 'spaces': spaces}\n return render_to_response(template, data, context_instance=RequestContext(request))\n\n\n@login_required\ndef personal(request):\n \"\"\"\n Personal data of the user profile\n \"\"\"\n profile, created = Profile.objects.get_or_create(user=request.user)\n\n if request.method == \"POST\":\n form = ProfileForm(request.POST, instance=profile)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse(\"profile_edit_personal_done\"))\n else:\n form = ProfileForm(instance=profile)\n\n template = \"userprofile/profile/personal.html\"\n data = {'section': 'personal', 'GOOGLE_MAPS_API_KEY': GOOGLE_MAPS_API_KEY,\n 'form': form, }\n return render_to_response(template, data, context_instance=RequestContext(request))\n\n\n@login_required\ndef location(request):\n \"\"\"\n Location selection of the user profile\n \"\"\"\n profile, created = Profile.objects.get_or_create(user=request.user)\n\n if request.method == \"POST\":\n form = LocationForm(request.POST, instance=profile)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse(\"profile_edit_location_done\"))\n else:\n form = LocationForm(instance=profile)\n\n template = \"userprofile/profile/location.html\"\n data = {'section': 'location', 'GOOGLE_MAPS_API_KEY': GOOGLE_MAPS_API_KEY,\n 'form': form, }\n return render_to_response(template, data, context_instance=RequestContext(request))\n\n\n@login_required\ndef delete(request):\n if request.method == \"POST\":\n # Remove the profile and all the information\n Profile.objects.filter(user=request.user).delete()\n EmailValidation.objects.filter(user=request.user).delete()\n Avatar.objects.filter(user=request.user).delete()\n\n # Remove the e-mail of the account too\n request.user.email = ''\n request.user.first_name = ''\n request.user.last_name = ''\n request.user.save()\n\n return HttpResponseRedirect(reverse(\"profile_delete_done\"))\n\n template = \"userprofile/profile/delete.html\"\n data = {'section': 'delete', }\n return render_to_response(template, data, context_instance=RequestContext(request))\n\n\n@login_required\ndef avatarchoose(request):\n \"\"\"\n Avatar choose\n \"\"\"\n profile, created = Profile.objects.get_or_create(user=request.user)\n if not request.method == \"POST\":\n form = AvatarForm()\n else:\n form = AvatarForm(request.POST, request.FILES)\n if form.is_valid():\n image = form.cleaned_data.get('url') or form.cleaned_data.get('photo')\n avatar = Avatar(user=request.user, image=image, valid=False)\n avatar.image.save(\"%s.jpg\" % request.user.username, image)\n image = Image.open(avatar.image.path)\n image.thumbnail((480, 480), Image.ANTIALIAS)\n image.convert(\"RGB\").save(avatar.image.path, \"JPEG\")\n avatar.save()\n return HttpResponseRedirect('%scrop/' % request.path_info)\n\n base, filename = os.path.split(avatar_path)\n generic, extension = os.path.splitext(filename)\n\n if DEFAULT_AVATAR:\n base, filename = os.path.split(DEFAULT_AVATAR)\n filename, extension = os.path.splitext(filename)\n generic96 = \"%s/%s.96%s\" % (base, filename, extension)\n generic96 = generic96.replace(settings.MEDIA_ROOT, settings.MEDIA_URL)\n else:\n generic96 = \"\"\n\n template = \"userprofile/avatar/choose.html\"\n data = {'generic96': generic96, 'form': form,\n 'AVATAR_WEBSEARCH': AVATAR_WEBSEARCH, 'section': 'avatar', }\n return render_to_response(template, data, context_instance=RequestContext(request))\n\n\n@login_required\ndef avatarcrop(request):\n \"\"\"\n Avatar management\n \"\"\"\n avatar = get_object_or_404(Avatar, user=request.user, valid=False)\n if not request.method == \"POST\":\n form = AvatarCropForm()\n else:\n form = AvatarCropForm(request.POST)\n if form.is_valid():\n top = int(form.cleaned_data.get('top'))\n left = int(form.cleaned_data.get('left'))\n right = int(form.cleaned_data.get('right'))\n bottom = int(form.cleaned_data.get('bottom'))\n\n image = Image.open(avatar.image.path)\n box = [left, top, right, bottom]\n image = image.crop(box)\n if image.mode not in ('L', 'RGB'):\n image = image.convert('RGB')\n\n image.save(avatar.image.path)\n avatar.valid = True\n avatar.save()\n return HttpResponseRedirect(reverse(\"profile_avatar_crop_done\"))\n\n template = \"userprofile/avatar/crop.html\"\n data = {'section': 'avatar', 'avatar': avatar, 'form': form, }\n return render_to_response(template, data, context_instance=RequestContext(request))\n\n\n@login_required\ndef avatardelete(request, avatar_id=False):\n if request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':\n try:\n Avatar.objects.get(user=request.user, valid=True).delete()\n return HttpResponse(simplejson.dumps({'success': True}))\n except:\n return HttpResponse(simplejson.dumps({'success': False}))\n else:\n raise Http404()\n\n\ndef email_validation_process(request, key):\n \"\"\"\n Verify key and change email\n \"\"\"\n if EmailValidation.objects.verify(key=key):\n successful = True\n else:\n successful = False\n\n template = \"userprofile/account/email_validation_done.html\"\n data = {'successful': successful, }\n return render_to_response(template, data, context_instance=RequestContext(request))\n\n\ndef email_validation(request, space_url):\n \"\"\"\n E-mail Change form\n \"\"\"\n if request.method == 'POST':\n form = EmailValidationForm(request.POST)\n if form.is_valid():\n EmailValidation.objects.add(user=request.user, email=form.cleaned_data.get('email'))\n return HttpResponseRedirect('%sprocessed/' % request.path_info)\n else:\n form = EmailValidationForm()\n\n template = \"userprofile/account/email_validation.html\"\n data = {'form': form, }\n return render_to_response(template, data, context_instance=RequestContext(request))\n\n\ndef register(request):\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('<PASSWORD>')\n newuser = User.objects.create_user(username=username, email='', password=password)\n newuser.email = form.cleaned_data.get('email')\n EmailValidation.objects.add(user=newuser, email=newuser.email)\n newuser.save()\n\n return HttpResponseRedirect('%scomplete/' % request.path_info)\n else:\n form = RegistrationForm()\n\n template = \"userprofile/account/registration.html\"\n data = {'form': form, }\n return render_to_response(template, data, context_instance=RequestContext(request))\n\n\n@login_required\ndef email_validation_reset(request):\n \"\"\"\n Resend the validation email for the authenticated user.\n \"\"\"\n try:\n resend = EmailValidation.objects.get(user=request.user).resend()\n response = \"done\"\n except EmailValidation.DoesNotExist:\n response = \"failed\"\n\n return HttpResponseRedirect(reverse(\"email_validation_reset_response\",\n args=[response]))\n\n\n@login_required\ndef email_change(request):\n form1 = ChangeEmail(request.POST or None)\n user1 = request.user\n if form1.is_valid():\n if form1.cleaned_data['email'] == form1.cleaned_data['email2']:\n user1.email = form1.cleaned_data['email']\n user1.save()\n email = request.user.email\n variables = RequestContext(request, {\n 'email': email\n })\n return render_to_response('userprofile/email/email_change_done.html', variables)\n else:\n user = request.user\n email = user.email\n variables = RequestContext(request, {\n 'form': form1,\n 'email': email\n })\n return render_to_response('userprofile/email/email_change.html', variables)\n", "id": "7885534", "language": "Python", "matching_score": 3.3961822986602783, "max_stars_count": 40, "path": "src/apps/thirdparty/userprofile/views.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport base64\n\nfrom django.contrib.syndication.views import Feed\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth import authenticate, login\nfrom django.http import HttpResponse\n\nfrom apps.ecidadania.proposals.models import Proposal\nfrom apps.ecidadania.debate.models import Debate\nfrom apps.ecidadania.news.models import Post\nfrom core.spaces.models import Space, Event\n\n\nclass HTTPAuthFeed(Feed):\n basic_auth_realm = 'e-cidadania'\n\n def __call__(self, request, *args, **kwargs):\n # HTTP auth check inspired by http://djangosnippets.org/snippets/243/\n if request.user.is_authenticated():\n # already logged in\n return super(HTTPAuthFeed, self).__call__(request, *args, **kwargs)\n\n # check HTTP auth credentials\n if 'HTTP_AUTHORIZATION' in request.META:\n auth = request.META['HTTP_AUTHORIZATION'].split()\n if len(auth) == 2:\n # only basic auth is supported\n if auth[0].lower() == \"basic\":\n uname, passwd = base64.b64decode(auth[1]).split(':')\n user = authenticate(username=uname, password=<PASSWORD>)\n if user is not None:\n if user.is_active:\n login(request, user)\n request.user = user\n return super(HTTPAuthFeed, self).__call__(request,\n *args, **kwargs)\n\n # missing auth header or failed authentication results in 401\n response = HttpResponse()\n response.status_code = 401\n response['WWW-Authenticate'] = 'Basic realm=\"%s\"' % self.basic_auth_realm\n return response\n\n\nclass SpaceFeed(HTTPAuthFeed):\n\n \"\"\"\n Returns a space feed with the content of various applications. In the\n future this function must detect applications and returns their own feeds.\n \"\"\"\n\n def get_object(self, request, space_url):\n current_space = get_object_or_404(Space, url=space_url)\n return current_space\n\n def title(self, obj):\n return _(\"%s\") % obj.name\n\n def link(self, obj):\n return obj.get_absolute_url()\n\n def description(self, obj):\n return _(\"All the recent activity in %s \") % obj.name\n\n def items(self, obj):\n results = itertools.chain(\n Post.objects.filter(space=obj).order_by('-pub_date')[:10],\n Proposal.objects.filter(space=obj).order_by('-pub_date')[:10],\n Event.objects.filter(space=obj).order_by('-pub_date')[:10],\n Debate.objects.filter(space=obj).order_by('-date')[:10]\n )\n return results\n\n def item_title(self, item):\n return type(item).__name__ + \": \" + item.title\n\n def item_description(self, item):\n return item.description\n\n return sorted(results, key=lambda x: x.pub_date, reverse=True)\n", "id": "10218861", "language": "Python", "matching_score": 2.960202932357788, "max_stars_count": 40, "path": "src/core/spaces/views/rss.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nCommon functions and classes for proposals and proposal sets.\n\"\"\"\n\nfrom django.views.generic.detail import DetailView\nfrom django.views.decorators.http import require_POST\nfrom django.db.models import Count\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.decorators import method_decorator\nfrom django.http import HttpResponse, HttpResponseServerError\nfrom django.shortcuts import get_object_or_404\nfrom guardian.shortcuts import assign_perm\nfrom guardian.decorators import permission_required_or_403\nfrom django.core.exceptions import PermissionDenied\n\nfrom apps.ecidadania.proposals import url_names as urln_prop\nfrom core.spaces import url_names as urln_space\nfrom core.spaces.models import Space\nfrom apps.ecidadania.proposals.models import Proposal\n\n\nclass ViewProposal(DetailView):\n\n \"\"\"\n Detail view of a proposal. Inherits from django :class:`DetailView` generic\n view.\n\n **Permissions:** Everyone can read if the space is public. If it is private\n only logged in users that belong to any of the space groups can read. In\n other case just return an empty object and a not_allowed template.\n\n :rtype: object\n :context: proposal\n \"\"\"\n context_object_name = 'proposal'\n template_name = 'proposals/proposal_detail.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(ViewProposal, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n prop_id = self.kwargs['prop_id']\n proposal = get_object_or_404(Proposal, pk=prop_id)\n return proposal\n\n def get_context_data(self, **kwargs):\n context = super(ViewProposal, self).get_context_data(**kwargs)\n current_space = get_object_or_404(Space, url=self.kwargs['space_url'])\n # We are going to get the proposal position in the list\n self.get_position = 0\n proposal = get_object_or_404(Proposal, pk=self.kwargs['prop_id'])\n if proposal.merged:\n context['merged_proposal'] = proposal.merged_proposals.all()\n\n support_votes_count = Proposal.objects.filter(space=current_space)\\\n .annotate(Count('support_votes'))\n for i, x in enumerate(support_votes_count):\n if x.id == int(self.kwargs['prop_id']):\n self.get_position = i\n context['support_votes_count'] = support_votes_count[int(self.get_position)].support_votes__count\n context['get_place'] = current_space\n return context\n\n\n@require_POST\ndef support_proposal(request, space_url):\n\n \"\"\"\n Increment support votes for the proposal in 1. We porform some permission\n checks, for example, the user has to be inside any of the user groups of\n the space.\n\n :permissions required: view_space\n \"\"\"\n prop = get_object_or_404(Proposal, pk=request.POST['propid'])\n space = get_object_or_404(Space, url=space_url)\n\n if request.user.has_perm('view_space', space):\n try:\n prop.support_votes.add(request.user)\n return HttpResponse(_('Vote added'))\n except:\n return HttpResponseServerError(_(\"Couldn't emit the vote.\"))\n else:\n raise PermissionDenied\n", "id": "4565395", "language": "Python", "matching_score": 2.7764999866485596, "max_stars_count": 40, "path": "src/apps/ecidadania/proposals/views/common.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\n\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views.generic.detail import DetailView\nfrom django.template import RequestContext\nfrom django.contrib.auth.models import User\nfrom django.forms.formsets import formset_factory, BaseFormSet\nfrom django.forms.models import modelformset_factory, inlineformset_factory\nfrom django.core.exceptions import ObjectDoesNotExist, PermissionDenied\nfrom helpers.cache import get_or_insert_object_in_cache\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Count, Sum\nfrom django.core.exceptions import PermissionDenied\n\nfrom core.spaces.models import Space\nfrom core.spaces import url_names as urln\nfrom apps.ecidadania.voting import url_names as urln_voting\nfrom apps.ecidadania.voting.models import Choice, Poll\nfrom apps.ecidadania.voting.forms import PollForm, ChoiceFormSet\nfrom apps.ecidadania.proposals.models import Proposal\n\n\ndef add_poll(request, space_url):\n\n \"\"\"\n Create a new poll. Only registered users belonging to a concrete group\n are allowed to create polls. The polls are composed by a form and a choice\n formset.\n\n :parameters: space_url\n :context: get_place\n \"\"\"\n space = get_object_or_404(Space, url=space_url)\n poll_form = PollForm(request.POST or None)\n choice_form = ChoiceFormSet(request.POST or None, prefix=\"choiceform\",\n queryset=Choice.objects.none())\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space')):\n if request.method == 'POST':\n if poll_form.is_valid() and choice_form.is_valid():\n poll_form_uncommited = poll_form.save(commit=False)\n poll_form_uncommited.space = space\n poll_form_uncommited.author = request.user\n\n saved_poll = poll_form_uncommited.save()\n poll_instance = get_object_or_404(Poll,\n pk=poll_form_uncommited.pk)\n\n cform_uncommited = choice_form.save(commit=False)\n for cf in cform_uncommited:\n cf.poll = poll_instance\n cf.save()\n\n return HttpResponseRedirect(reverse(urln.SPACE_INDEX,\n kwargs={'space_url': space.url}))\n\n return render_to_response('voting/poll_form.html', {'form': poll_form,\n 'choiceform': choice_form, 'get_place': space},\n context_instance=RequestContext(request))\n\n raise PermissionDenied\n\n\nclass ViewPoll(DetailView):\n\n \"\"\"\n Display a poll. If the poll didn't start, ended, or the user already voted\n the user will be redirected to the VotePollResults view.\n\n ..versionadded:: 0.1.7\n \"\"\"\n context_object_name = 'poll'\n template_name = 'voting/poll_detail.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(ViewPoll, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n poll = get_object_or_404(Poll, pk=self.kwargs['pk'])\n return poll\n\n def get_context_data(self, **kwargs):\n context = super(ViewPoll, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space,\n url=self.kwargs['space_url'])\n return context\n\n def get(self, request, **kwargs):\n self.object = self.get_object()\n if self.request.user in self.object.participants.all() \\\n or datetime.date.today() >= self.object.end_date \\\n or datetime.date.today() < self.object.start_date:\n return HttpResponseRedirect(reverse(urln_voting.VIEW_RESULT,\n kwargs={'space_url': self.kwargs['space_url'],\n 'pk': self.kwargs['pk']}))\n else:\n context = self.get_context_data(object=self.object)\n return self.render_to_response(context)\n\n\nclass ViewPollResults(DetailView):\n\n \"\"\"\n Displays an specific poll results. The results are always available even\n after the end_date.\n\n .. versionadded:: 0.1.7 beta\n\n :context: get_place\n \"\"\"\n context_object_name = 'poll'\n template_name = 'voting/poll_results.html'\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(ViewPollResults, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n self.poll = get_object_or_404(Poll, pk=self.kwargs['pk'])\n return self.poll\n\n def get_context_data(self, **kwargs):\n context = super(ViewPollResults, self).get_context_data(**kwargs)\n space = get_object_or_404(Space, url=self.kwargs['space_url'])\n total_votes = Choice.objects.filter(poll=self.poll)\n\n # This fuckin' shitty logic should be removed from here, maybe there's\n # a way to do this with django. The thing is to obtain all the votes\n # from each choice and 'sum them all!'\n v = 0\n for vote in total_votes:\n v += vote.votes.count()\n\n context['get_place'] = space\n context['votes_total'] = v\n return context\n\n\ndef edit_poll(request, space_url, poll_id):\n\n \"\"\"\n Edit a specific poll.\n\n :parameters: space_url, poll_id\n :context: form, get_place, choiceform, pollid\n \"\"\"\n place = get_object_or_404(Space, url=space_url)\n\n if (request.user.has_perm('admin_space', place) or\n request.user.has_perm('mod_space', place)):\n\n ChoiceFormSet = inlineformset_factory(Poll, Choice, extra=1)\n instance = Poll.objects.get(pk=poll_id)\n poll_form = PollForm(request.POST or None, instance=instance)\n choice_form = ChoiceFormSet(request.POST or None, instance=instance,\n prefix=\"choiceform\")\n\n if request.method == 'POST':\n if poll_form.is_valid() and choice_form.is_valid():\n poll_form_uncommited = poll_form.save(commit=False)\n poll_form_uncommited.space = place\n poll_form_uncommited.author = request.user\n\n saved_poll = poll_form_uncommited.save()\n\n choices = choice_form.save(commit=False)\n\n for form in choices:\n form.poll = instance\n form.save()\n\n return HttpResponseRedirect(reverse(urln.SPACE_INDEX,\n kwargs={'space_url': place.url}))\n\n return render_to_response('voting/poll_form.html',\n {'form': poll_form,\n 'choiceform': choice_form,\n 'get_place': place,\n 'pollid': poll_id, },\n context_instance=RequestContext(request))\n else:\n raise PermissionDenied\n\nclass DeletePoll(DeleteView):\n\n \"\"\"\n Delete an existent poll. Poll deletion is only reserved to spaces\n administrators or site admins.\n \"\"\"\n context_object_name = \"get_place\"\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(DeletePoll, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return '/spaces/%s' % (space)\n\n def get_object(self):\n space = get_object_or_404(Space, url=self.kwargs['space_url'])\n return get_object_or_404(Poll, pk=self.kwargs['poll_id'])\n\n def get_context_data(self, **kwargs):\n context = super(DeletePoll, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space,\n url=self.kwargs['space_url'])\n return context\n\n\nclass ListPolls(ListView):\n \"\"\"\n Return a list of polls for the current space.\n\n :context: get_place\n \"\"\"\n paginate_by = 10\n\n def dispatch(self, request, *args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(ListPolls, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_queryset(self):\n key = self.kwargs['space_url']\n current_space = get_or_insert_object_in_cache(Space, key, url=key)\n polls = Poll.objects.filter(space=current_space)\n return polls\n\n def get_context_data(self, **kwargs):\n context = super(ListPolls, self).get_context_data(**kwargs)\n key = self.kwargs['space_url']\n space = get_or_insert_object_in_cache(Space, key, url=key)\n context['get_place'] = space\n return context\n\n\ndef vote_poll(request, poll_id, space_url):\n\n \"\"\"\n Vote on a choice inside the polls.\n\n .. versionadded:: 0.1.5\n \"\"\"\n space = get_object_or_404(Space, url=space_url)\n poll = get_object_or_404(Poll, pk=poll_id)\n try:\n choice = get_object_or_404(Choice, pk=request.POST['choice'])\n except KeyError:\n return render_to_response('voting/poll_detail.html', {\n 'poll': poll,\n 'get_place': space,\n 'error_message': \"You didn't select a choice.\",\n }, context_instance=RequestContext(request))\n\n if request.user.has_perm('view_space', space) and request.method == 'POST':\n poll.participants.add(request.user)\n choice.votes.add(request.user)\n return render_to_response('voting/poll_results.html',\n {'poll': poll, 'get_place': space, 'error_message': \"You didn't \\\n select a choice.\"}, context_instance=RequestContext(request))\n\n else:\n raise PermissionDenied\n", "id": "10518232", "language": "Python", "matching_score": 6.1765875816345215, "max_stars_count": 40, "path": "src/apps/ecidadania/voting/views/polls.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport hashlib\n\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404, HttpResponseServerError\nfrom django.shortcuts import render_to_response, get_object_or_404, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.views.generic.base import TemplateView, RedirectView\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic import FormView\nfrom django.template import RequestContext\nfrom django.contrib.auth.models import User\nfrom django.forms.formsets import formset_factory, BaseFormSet\nfrom django.forms.models import modelformset_factory, inlineformset_factory\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom helpers.cache import get_or_insert_object_in_cache\nfrom django.core.urlresolvers import NoReverseMatch, reverse\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.sites.models import get_current_site\nfrom django.core.exceptions import PermissionDenied\n\nfrom e_cidadania import settings\nfrom core.spaces.models import Space\nfrom apps.ecidadania.voting.models import *\nfrom apps.ecidadania.voting.forms import *\nfrom apps.ecidadania.proposals.models import Proposal, ProposalSet\n\n\nclass AddVoting(FormView):\n\n \"\"\"\n Create a new voting process. Only registered users belonging to a concrete\n group are allowed to create voting processes.\n\n versionadded: 0.1\n\n :parameters: space_url\n :context: get_place\n \"\"\"\n form_class = VotingForm\n template_name = 'voting/voting_form.html'\n\n def dispatch(self, request, *Args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(AddVoting, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_form_kwargs(self):\n \"\"\"\n This send the current space to the form so we can change the\n foreignkeys querysets there.\n \"\"\"\n space = get_object_or_404(Space, url=self.kwargs['space_url'])\n kwargs = super(AddVoting, self).get_form_kwargs()\n kwargs['current_space'] = space\n return kwargs\n\n def get_success_url(self):\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n return '/spaces/' + self.space.url + '/'\n\n def form_valid(self, form):\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n form_uncommited = form.save(commit=False)\n form_uncommited.author = self.request.user\n form_uncommited.space = self.space\n form_uncommited.save()\n form.save_m2m()\n return super(AddVoting, self).form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = super(AddVoting, self).get_context_data(**kwargs)\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n context['get_place'] = self.space\n return context\n\n\nclass ViewVoting(DetailView):\n\n \"\"\"\n View a specific voting process.\n\n Proposals: Return unlinked proposals (not linked to sets)\n All_proposals\n \"\"\"\n context_object_name = 'voting'\n template_name = 'voting/voting_detail.html'\n\n def dispatch(self, request, *Args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(ViewVoting, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n return Voting.objects.get(pk=self.kwargs['voting_id'])\n\n def get_context_data(self, **kwargs):\n\n \"\"\"\n Get extra context data for the ViewVoting view.\n \"\"\"\n context = super(ViewVoting, self).get_context_data(**kwargs)\n context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_url'])\n voting = Voting.objects.get(pk=self.kwargs['voting_id'])\n all_proposals = Proposal.objects.all()\n proposalsets = voting.proposalsets.all()\n proposals = voting.proposals.all()\n context['proposalsets'] = proposalsets\n context['proposals'] = proposals\n context['all_proposals'] = all_proposals\n\n return context\n\n\nclass EditVoting(UpdateView):\n\n \"\"\"\n Edit an existent voting process.\n\n :parameters: space_url, voting_id\n :context: get_place\n \"\"\"\n model = Voting\n template_name = 'voting/voting_form.html'\n\n def dispatch(self, request, *Args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(EditVoting, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_success_url(self):\n return '/spaces/' + self.space.url\n\n def get_object(self):\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n return get_object_or_404(Voting, pk=self.kwargs['voting_id'])\n\n def get_context_data(self, **kwargs):\n context = super(EditVoting, self).get_context_data(**kwargs)\n context['get_place'] = self.space\n return context\n\n\nclass DeleteVoting(DeleteView):\n\n \"\"\"\n Delete an existent voting process. Voting process deletion is only reserved to spaces\n administrators or site admins.\n \"\"\"\n context_object_name = \"get_place\"\n\n def dispatch(self, request, *Args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space)):\n return super(DeleteVoting, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_success_url(self):\n space = self.kwargs['space_url']\n return '/spaces/%s' % (space)\n\n def get_object(self):\n self.space = get_object_or_404(Space, url=self.kwargs['space_url'])\n return get_object_or_404(Voting, pk=self.kwargs['voting_id'])\n\n def get_context_data(self, **kwargs):\n\n \"\"\"\n Get extra context data for the ViewVoting view.\n \"\"\"\n context = super(DeleteVoting, self).get_context_data(**kwargs)\n context['get_place'] = self.space\n return context\n\n\nclass ListVotings(ListView):\n\n \"\"\"\n List all the existing votings inside the space. This is meant to be a\n tabbed view, just like the spaces list. The user can see the open and\n closed votings.\n\n .. versionadded:: 0.1.7 beta\n \"\"\"\n paginate_by = 10\n\n def dispatch(self, request, *Args, **kwargs):\n space = get_object_or_404(Space, url=kwargs['space_url'])\n\n if request.user.has_perm('view_space', space):\n return super(ListVotings, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_queryset(self):\n key = self.kwargs['space_url']\n current_space = get_or_insert_object_in_cache(Space, key, url=key)\n votings = Voting.objects.filter(space=current_space)\n return votings\n\n def get_context_data(self, **kwargs):\n context = super(ListVotings, self).get_context_data(**kwargs)\n key = self.kwargs['space_url']\n space = get_or_insert_object_in_cache(Space, key, url=key)\n context['get_place'] = space\n return context\n\n\ndef vote_voting(request, space_url):\n\n \"\"\"\n View to control the votes during a votation process. Do not confuse with\n proposals support_votes. This function creates a new ConfirmVote object\n trough VoteForm with the user and a token. After that an email is sent\n to the user with the token for validation. This function does not add the\n votes.\n\n .. versionadded:: 0.1.7\n \"\"\"\n proposal = get_object_or_404(Proposal, pk=request.POST['propid'])\n space = get_object_or_404(Space, url=space_url)\n voteform = VoteForm(request.POST)\n\n if request.user_has_perm('view_space', space):\n if request.method == 'POST' and voteform.is_valid():\n # Generate the objetct\n token = hashlib.md5(\"%s%s%s\" % (request.user, space,\n datetime.datetime.now())).hexdigest()\n voteform_uncommitted = voteform.save(commit=False)\n voteform_uncommitted.user = request.user\n voteform_uncommitted.token = token\n voteform_uncommitted.proposal = proposal\n voteform_uncommitted.save()\n\n # Send the email to the user. Get URL, get user mail, send mail.\n space_absolute_url = space.get_absolute_url()\n full_url = ''.join(['http://', get_current_site(request).domain,\n space_absolute_url, 'voting/vote/validate/', token])\n user_email = request.user.email\n subject = _(\"Validate your vote\")\n body = _(\"You voted recently on a process in our platform, please validate your vote following this link: %s\") % full_url\n try:\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [user_email])\n except:\n return HttpResponseServerError(_(\"Couldn't send the email.\"))\n else:\n return HttpResponseBadRequest(_(\"Request is not POST.\"))\n else:\n raise PermissionDenied\n\n\ndef validate_voting(request, space_url, token):\n\n \"\"\"\n Validate the votes done in a votation process. This function checks if the\n token provided by the user is the same located in the database. If the\n token is the same, a vote is added, if not, we redirect the user to an\n error page.\n \"\"\"\n space = get_object_or_404(Space, url=space_url)\n tk = get_object_or_404(ConfirmVote, token=token)\n\n if (request.user.has_perm('admin_space', space) or\n request.user.has_perm('mod_space', space) or\n request.user == tk.user):\n try:\n prop = get_object_or_404(Proposal, pk=tk.proposal.id)\n prop.votes.add(request.user)\n return HttpResponse(\"Your vote has been validated.\")\n except:\n return HttpResponse(\"Error V01: Couldn't find the token for validation or the token has already been used.\")\n tk.delete()\n\n else:\n raise PermissionDenied\n", "id": "8903729", "language": "Python", "matching_score": 5.464940547943115, "max_stars_count": 40, "path": "src/apps/ecidadania/voting/views/voting.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport hashlib\n\nfrom django.views.generic.detail import DetailView\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.mail import send_mail\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\n\nfrom e_cidadania import settings\nfrom core.spaces.models import Space, Intent\nfrom helpers.cache import get_or_insert_object_in_cache\n\n\n@login_required\ndef add_intent(request, space_url):\n\n \"\"\"\n Returns a page where the logged in user can click on a \"I want to\n participate\" button, which after sends an email to the administrator of\n the space with a link to approve the user to use the space.\n\n :attributes: space, intent, token\n :rtype: Multiple entity objects.\n :context: space_url, heading\n \"\"\"\n space = get_object_or_404(Space, url=space_url)\n admins = space.admins.all()\n mails = []\n\n for m in space.admins.all():\n mails.append(m.email)\n\n try:\n intent = Intent.objects.get(user=request.user, space=space)\n heading = _(\"Access has been already authorized\")\n\n except Intent.DoesNotExist:\n token = hashlib.md5(\"%s%s%s\" % (request.user, space,\n datetime.datetime.now())).hexdigest()\n intent = Intent(user=request.user, space=space, token=token)\n intent.save()\n subject = _(\"New participation request\")\n body = _(\"User {0} wants to participate in space {1}.\\n \\\n Please click on the link below to approve.\\n {2}\").format(\n request.user.username, space.name, intent.get_approve_url())\n heading = _(\"Your request is being processed.\")\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, mails)\n\n return render_to_response('space_intent.html', {'space_name': space.name,\n 'heading': heading}, context_instance=RequestContext(request))\n\n\nclass ValidateIntent(DetailView):\n\n \"\"\"\n Validate the user petition to join a space. This will add the user to the\n users list in the space, allowing him participation. This function checks\n if the user visiting the token url is admin of the space. If he or she is\n an admin proceeds with the validation.\n\n .. versionadded: 0.1.5\n \"\"\"\n context_object_name = 'get_place'\n template_name = 'spaces/validate_intent.html'\n status = _(\"The requested intent does not exist!\")\n\n def dispatch(self, reques, *args, **kwargs):\n if request.user.is_authenticated:\n return super(ValidateIntent, self).dispatch(request, *args, **kwargs)\n else:\n raise PermissionDenied\n\n def get_object(self):\n # Makes sure the space ins't already in the cache before hitting the\n # databass\n space_url = self.kwargs['space_url']\n space_object = get_or_insert_object_in_cache(Space, space_url,\n url=space_url)\n\n if has_space_permission(self.request.user, space_object,\n allow=['admins', 'mods']) \\\n or has_all_permissions(self.request.user):\n try:\n intent = Intent.objects.get(token=self.kwargs['token'])\n intent.space.users.add(intent.user)\n self.status = _(\"The user has been authorized to participate \\\n in space \\\"%s\\\".\" % space_object.name)\n messages.success(self.request, _(\"Authorization successful\"))\n\n except Intent.DoesNotExist:\n self.status = _(\"The requested intent does not exist!\")\n\n return space_object\n\n def get_context_data(self, **kwargs):\n context = super(ValidateIntent, self).get_context_data(**kwargs)\n context['status'] = self.status\n context['request_user'] = Intent.objects.get(\n token=self.kwargs['token']).user\n return context\n", "id": "1663579", "language": "Python", "matching_score": 3.3111660480499268, "max_stars_count": 40, "path": "src/core/spaces/views/intent.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.contrib import admin\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response, get_object_or_404, redirect\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail, send_mass_mail\nfrom django.template import RequestContext\n\nfrom e_cidadania import settings\nfrom apps.ecidadania.accounts.models import UserProfile\n\n\nclass ProfileAdmin(admin.ModelAdmin):\n\n \"\"\"\n This is a minimal view for Django administration interface. It shows the\n user and the website.\n \"\"\"\n list_display = ('user', 'firstname', 'surname', 'country', 'website')\n actions = ['mass_mail']\n\n def mass_mail(self, request, queryset):\n \"\"\"\n This function exports the selected ovjects to a new view to manipulate\n them properly.\n \"\"\"\n # selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)\n # ct = ContentType.objects.get_for_model(queryset.model)\n if 'sendmail' in request.POST:\n for obj in queryset:\n get_user = get_object_or_404(User, id=obj.id)\n send_mail(request.POST['massmail_subject'], request.POST['message'], settings.DEFAULT_FROM_EMAIL, [get_user.email])\n return HttpResponseRedirect(request.get_full_path())\n\n selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)\n ct = ContentType.objects.get_for_model(queryset.model)\n return render_to_response('mail/massmail.html', {'people': selected},\n context_instance=RequestContext(request))\n mass_mail.short_description = 'Send a global mail to the selected users'\n\nadmin.site.register(UserProfile, ProfileAdmin)\n", "id": "7649556", "language": "Python", "matching_score": 2.9861910343170166, "max_stars_count": 40, "path": "src/apps/ecidadania/accounts/admin.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\n\nfrom django.contrib.auth.models import User\n\nfrom apps.ecidadania.accounts.models import UserProfile, Phone\n\n# This views are no longer required since they were replaced by userprofile\n\n\n@login_required\ndef view_profile(request):\n\n \"\"\"\n Return the profile of the current logged user.\n\n userdata: This variable gets the django basic user profile from\n the current logged in user.\n userprofile: Gets all the variables stored by the model UserProfile\n using the method get_profile() since it's bound to the\n user profile in the settings file.\n\n Template tags\n -------------\n user: returns any of the data stored by the django user profile.\n profile: returns any of the data stored by the UserProfile model.\n \"\"\"\n userdata = get_object_or_404(User, pk=request.user.id)\n userprofile = User.get_profile(userdata)\n\n return render_to_response('accounts/profile.html',\n {'user': userdata, 'profile': userprofile})\n", "id": "5841503", "language": "Python", "matching_score": 2.1269922256469727, "max_stars_count": 40, "path": "src/apps/ecidadania/accounts/views.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom django.contrib import messages\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\n\nfrom core.spaces.models import Space\nfrom e_cidadania import settings\n\n\ndef index_view(request):\n\n \"\"\"\n Main view for the index page. It's separated from the urls.py file\n because using direct_to_template in urls.py doesn't refresh the content\n (it's loaded only once).\n \"\"\"\n extra_context = {\n 'version': settings.__version__,\n 'status': settings.__status__,\n 'debug_mode': settings.DEBUG,\n #'cache_timeout': 500,\n }\n\n if request.user.is_anonymous():\n messages.warning(request, _(\"Hi! It seems that it's your first time \\\n here. Maybe you want to <a href=\\\"/accounts/register\\\">register</a> \\\n or <a href=\\\"/accounts/login/\\\">login</a> if you have an account.\"))\n\n return render_to_response('site_index.html', extra_context,\n context_instance=RequestContext(request))\n else:\n return HttpResponseRedirect(reverse('profile_overview'))\n", "id": "11556792", "language": "Python", "matching_score": 2.4632303714752197, "max_stars_count": 40, "path": "src/core/views/index.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom core.spaces.models import Space\nfrom apps.ecidadania.news.models import Post\n\n\ndef explore(request):\n\n \"\"\"\n This view provides a list of all the recent activity happening in the\n platform like new spaces, latest news on public spaces, etc.\n\n .. versionadded:: 0.1.8\n \"\"\"\n spaces = Space.objects.all().filter(public=True)\n recent_spaces = Space.objects.all().order_by('-date')[:5]\n news = Post.objects.filter(space__public=True).order_by('-pub_date')\n\n extra_context = {\n 'recent_spaces': recent_spaces,\n 'spaces': spaces,\n 'news': news,\n }\n\n return render_to_response('explore.html', extra_context,\n context_instance=RequestContext(request))\n", "id": "7453205", "language": "Python", "matching_score": 0.6388655304908752, "max_stars_count": 40, "path": "src/core/views/explore.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.utils.safestring import mark_safe\nfrom django.template import RequestContext\nfrom django.utils import translation\nfrom django.core.exceptions import PermissionDenied\n\nfrom core.spaces.models import Event, Space\nfrom apps.ecidadania.cal.models import EventCalendar\nfrom e_cidadania import settings\n\n\ndef calendar(request, space_url, year, month):\n\n \"\"\"\n Returns an localized event calendar with all the Meeting objects.\n\n :Context: calendar, nextmonth, prevmonth, get_place\n :Returns: Localized HTML Calendar\n \"\"\"\n space = get_object_or_404(Space, url=space_url)\n\n if request.user.has_perm('view_space', space):\n # Avoid people writing wrong numbers or any program errors.\n if int(month) not in range(1, 13):\n return render_to_response('cal/error.html',\n context_instance=RequestContext(request))\n\n place = get_object_or_404(Space, url=space_url)\n events = Event.objects.order_by('event_date').filter(space=place,\n event_date__year=year, event_date__month=month)\n\n cur_year, cur_month = int(year), int(month)\n next_month = cur_month + 1\n prev_month = cur_month - 1\n\n cur_lang = translation.get_language()\n cur_locale = translation.to_locale(cur_lang) + '.UTF-8' # default encoding with django\n cal = EventCalendar(events, settings.FIRST_WEEK_DAY).formatmonth(cur_year, cur_month)\n\n # This code is quite strange, it worked like a charm, but one day it returned\n # a \"too many values to unpack\" error, and then just by removing the locale\n # declaration it worked, but the best thing is... it still translates the calendar!\n # For gods sake someone explain me this black magic.\n\n # cal = EventCalendar(meetings, settings.FIRST_WEEK_DAY, cur_locale).formatmonth(cur_year, cur_month)\n\n return render_to_response('cal/calendar.html',\n {'calendar': mark_safe(cal),\n 'nextmonth': next_month,\n 'prevmonth': prev_month,\n 'get_place': place},\n context_instance=RequestContext(request))\n else:\n raise PermissionDenied\n", "id": "1673223", "language": "Python", "matching_score": 2.5358188152313232, "max_stars_count": 40, "path": "src/apps/ecidadania/cal/views.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe calendar module calls a version of Python HTML Calendar and adds some\nfunctions to use django objects with it.\n\nThe source code is based on the work of <NAME> <<EMAIL>>\n\"\"\"\n\nfrom calendar import LocaleHTMLCalendar\nfrom datetime import date\nfrom itertools import groupby\n\nfrom django.utils.html import conditional_escape as esc\n\n\nclass EventCalendar(LocaleHTMLCalendar):\n\n \"\"\"\n Event calendar is a basic calendar made with HTMLCalendar module and\n its instance LocaleHTMLCalendar for translation.\n\n :Attributes: LocaleHTMLCalendar\n :Methods: formatday, formatmonth, group_by_day, day_cell\n \"\"\"\n # This init is needed for multilanguage, see ticket #86\n\n def __init__(self, events, *args, **kwargs):\n self.events = self.group_by_day(events)\n super(EventCalendar, self).__init__(*args, **kwargs)\n\n# def __init__(self, events):\n# super(EventCalendar, self).__init__()\n# self.events = self.group_by_day(events)\n\n def formatday(self, day, weekday):\n\n \"\"\"\n Format the day cell with the current events for the day.\n \"\"\"\n if day != 0:\n cssclass = self.cssclasses[weekday]\n if date.today() == date(self.year, self.month, day):\n cssclass += ' today'\n if day in self.events:\n cssclass += ' filled'\n body = ['<ul>']\n for event in self.events[day]:\n body.append('<li>')\n body.append('<a href=\"%s\">' % event.get_absolute_url())\n body.append(esc(event.title))\n body.append('</a></li>')\n body.append('<ul>')\n return self.day_cell(cssclass, '%d %s' % (day, ''.join(body)))\n return self.day_cell(cssclass, day)\n return self.day_cell('noday', '&nbsp;')\n\n def formatmonth(self, year, month):\n\n \"\"\"\n Format the current month wuth the events.\n \"\"\"\n # WTF is this!?\n self.year, self.month = year, month\n return super(EventCalendar, self).formatmonth(self.year, self.month)\n\n def group_by_day(self, events):\n\n \"\"\"\n Group the returned events into their respective dates.\n \"\"\"\n field = lambda event: event.event_date.day\n return dict(\n [(day, list(items)) for day, items in groupby(events, field)]\n )\n\n def day_cell(self, cssclass, body):\n\n \"\"\"\n Create the day cell.\n \"\"\"\n return '<td class=\"%s\">%s</td>' % (cssclass, body)\n", "id": "114844", "language": "Python", "matching_score": 0.6401717662811279, "max_stars_count": 40, "path": "src/apps/ecidadania/cal/models.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\n\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import render_to_response, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.auth.models import User, Group\nfrom django.contrib import messages\nfrom django.template import RequestContext\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views.generic.detail import DetailView\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom apps.ecidadania.staticpages.models import StaticPage\n\n\n@permission_required('staticpages.add_staticpage')\ndef add_page(request, slug):\n\n \"\"\"\n This function goes into the administration\n \"\"\"\n pass\n\n\nclass ViewPage(DetailView):\n\n \"\"\"\n Get the request page and view it. There are no view restrictions on views.\n \"\"\"\n context_object_name = 'staticpage'\n template_name = 'staticpages/staticpages_index.html'\n\n def get_object(self):\n self.page = get_object_or_404(StaticPage, uri=self.kwargs['slug'])\n return self.page\n\n\nclass EditPage(UpdateView):\n\n \"\"\"\n \"\"\"\n model = StaticPage\n template_name = 'staticpages/staticpages_edit.html'\n success_url = '/'\n\n def get_object(self):\n self.page = get_object_or_404(StaticPage, uri=self.kwargs['slug'])\n return self.page\n\n# def get_context_data(self, **kwargs):\n# context = super(EditPage, self).get_context_data(**kwargs)\n# context['get_place'] = get_object_or_404(Space, url=self.kwargs['space_name'])\n# return context\n\n @method_decorator(permission_required('staticpages.change_staticpage'))\n def dispatch(self, *args, **kwargs):\n return super(EditPage, self).dispatch(*args, **kwargs)\n\n\nclass DeletePage(DeleteView):\n\n \"\"\"\n \"\"\"\n sucess_url = '/'\n\n def get_object(self):\n return get_object_or_404(StaticPage, uri=self.kwargs['slug'])\n\n @method_decorator(permission_required('staticpages.delete_staticpage'))\n def dispatch(self, *args, **kwargs):\n return super(DeletePage, self).dispatch(*args, **kwargs)\n", "id": "1710503", "language": "Python", "matching_score": 3.075716018676758, "max_stars_count": 40, "path": "src/apps/ecidadania/staticpages/views.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.conf.urls import *\n\nfrom apps.ecidadania.staticpages.views import ViewPage, EditPage, DeletePage\n\nurlpatterns = patterns('apps.ecidadania.staticpages.views',\n\n url(r'^$', ViewPage.as_view(), name='view-page'),\n\n url(r'^edit/', EditPage.as_view(), name='edit-page'),\n\n url(r'^delete/', DeletePage.as_view(), name='delete-page'),\n\n url(r'^add/', 'add_page', name='add-page')\n)\n", "id": "9834300", "language": "Python", "matching_score": 0.19724661111831665, "max_stars_count": 40, "path": "src/apps/ecidadania/staticpages/urls.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\nfrom django.db.models.fields.files import ImageField\nfrom django.db.models import signals\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\ntry:\n from south.modelsinspector import add_introspection_rules\n # Add some simple introspection for the StdImageField\n add_introspection_rules([], [\"^core\\.spaces\\.fields\\.StdImageField\"])\nexcept ImportError:\n sys.exit(\"South is not available! Please install south to make this work.\")\n\n\nclass ThumbnailField:\n \"\"\"Instances of this class will be used to access data of the\n generated thumbnails\"\"\"\n def __init__(self, name):\n self.name = name\n self.storage = FileSystemStorage()\n\n def path(self):\n return self.storage.path(self.name)\n\n def url(self):\n return self.storage.url(self.name)\n\n def size(self):\n return self.storage.size(self.name)\n\n\nclass StdImageField(ImageField):\n \"\"\"Django field that behaves as ImageField, with some extra features like:\n - Auto resizing\n - Automatically generate thumbnails\n \"\"\"\n def __init__(self, verbose_name=None, name=None, width_field=None,\n height_field=None, size=None, thumbnail_size=None, **kwargs):\n \"\"\"Added fields:\n - size: a tuple containing width and height to resize image, and\n an optional boolean setting if is wanted forcing that size\n (None for not resizing).\n - thumbnail_size: a tuple with same values than `size' (None for\n not creating a thumbnail\n Example: (640, 480, True) -> Will resize image to a width of 640px and\n a height of 480px. File will be cutted if necessary for forcing\n the image to have the desired size\n \"\"\"\n params_size = ('width', 'height', 'force')\n extra_args = dict(size=size, thumbnail_size=thumbnail_size)\n for att_name, att in extra_args.items():\n if att and (isinstance(att, tuple) or isinstance(att, list)):\n setattr(self, att_name, dict(map(None, params_size, att)))\n else:\n setattr(self, att_name, None)\n super(StdImageField, self).__init__(verbose_name, name, width_field,\n height_field, **kwargs)\n\n def contribute_to_class(self, cls, name):\n \"\"\"Call methods for generating all operations on specified signals\n \"\"\"\n super(StdImageField, self).contribute_to_class(cls, name)\n signals.post_save.connect(self._rename_resize_image, sender=cls)\n signals.post_init.connect(self._set_thumbnail, sender=cls)\n\n def _get_thumbnail_filename(self, filename):\n \"\"\"Returns the thumbnail name associated to the standard image filename\n\n Example: /var/www/myproject/media/img/picture_1.jpeg will return\n /var/www/myproject/media/img/picture_1.thumbnail.jpeg\n \"\"\"\n splitted_filename = list(os.path.splitext(filename))\n splitted_filename.insert(1, '.thumbnail')\n return ''.join(splitted_filename)\n\n def _resize_image(self, filename, size):\n \"\"\"Resizes the image to specified width, height and force option\n - filename: full path of image to resize\n - size: dictionary containing:\n - width: new width\n - height: new height\n - force: if True, image will be cropped to fit the exact size,\n if False, it will have the bigger size that fits the\n specified size, but without cropping, so it could be\n smaller on width or height\n \"\"\"\n WIDTH, HEIGHT = 0, 1\n from PIL import Image, ImageOps\n img = Image.open(filename)\n if img.size[WIDTH] > size['width'] or img.size[HEIGHT] > size['height']:\n if size['force']:\n img = ImageOps.fit(img, (size['width'], size['height']), Image.ANTIALIAS)\n else:\n img.thumbnail((size['width'], size['height']), Image.ANTIALIAS)\n try:\n img.save(filename, optimize=1)\n except IOError:\n img.save(filename)\n\n def _rename_resize_image(self, instance=None, **kwargs):\n \"\"\"Renames the image, and calls methods to resize and create the\n thumbnail\n \"\"\"\n if getattr(instance, self.name):\n filename = getattr(instance, self.name).path\n ext = os.path.splitext(filename)[1].lower().replace('jpg', 'jpeg')\n dst = self.generate_filename(instance, '%s_%s%s' % (\n self.name,\n instance._get_pk_val(),\n ext))\n dst_fullpath = os.path.join(settings.MEDIA_ROOT, dst)\n normpath = lambda x: os.path.normpath(os.path.abspath(x))\n if normpath(filename) != normpath(dst_fullpath):\n os.rename(filename, dst_fullpath)\n if self.size:\n self._resize_image(dst_fullpath, self.size)\n if self.thumbnail_size:\n thumbnail_filename = self._get_thumbnail_filename(dst_fullpath)\n shutil.copyfile(dst_fullpath, thumbnail_filename)\n self._resize_image(thumbnail_filename, self.thumbnail_size)\n setattr(instance, self.attname, dst)\n instance.save()\n\n def _set_thumbnail(self, instance=None, **kwargs):\n \"\"\"Creates a \"thumbnail\" object as attribute of the ImageField instance\n Thumbnail attribute will be of the same class of original image, so\n \"path\", \"url\"... properties can be used\n \"\"\"\n if getattr(instance, self.name):\n filename = self.generate_filename(\n instance,\n os.path.basename(\n getattr(instance, self.name).path))\n thumbnail_filename = self._get_thumbnail_filename(filename)\n thumbnail_field = ThumbnailField(thumbnail_filename)\n setattr(getattr(instance, self.name), 'thumbnail', thumbnail_field)\n", "id": "6448659", "language": "Python", "matching_score": 1.9196234941482544, "max_stars_count": 40, "path": "src/core/spaces/fields.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.db.models import FileField\nfrom django.forms import forms\nfrom django.template.defaultfilters import filesizeformat\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass ContentTypeRestrictedFileField(FileField):\n \"\"\"\n Same as FileField, but you can specify:\n * content_types - list containing allowed content_types. Example: ['application/pdf', 'image/jpeg']\n * max_upload_size - a number indicating the maximum file size allowed for upload.\n 2.5MB - 2621440\n 5MB - 5242880\n 10MB - 10485760\n 20MB - 20971520\n 50MB - 5242880\n 100MB 104857600\n 250MB - 214958080\n 500MB - 429916160\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.content_types = kwargs.pop(\"content_types\")\n self.max_upload_size = kwargs.pop(\"max_upload_size\")\n\n super(ContentTypeRestrictedFileField, self).__init__(*args, **kwargs)\n\n def clean(self, *args, **kwargs):\n data = super(ContentTypeRestrictedFileField, self).clean(*args, **kwargs)\n\n file = data.file\n try:\n content_type = file.content_type\n if content_type in self.content_types:\n if file._size > self.max_upload_size:\n raise forms.ValidationError(_('Please keep filesize under %s.') % (filesizeformat(self.max_upload_size), filesizeformat(file._size)))\n else:\n raise forms.ValidationError(_('Filetype not supported.'))\n except AttributeError:\n pass\n\n return data\n\ntry:\n from south.modelsinspector import add_introspection_rules\nexcept:\n pass\nelse:\n rules = [\n (\n (ContentTypeRestrictedFileField,), [],\n {\n \"content_types\": [\"content_types\", {\"default\": None}],\n \"max_upload_size\": [\"max_upload_size\", {\"default\": None}],\n }\n ),\n ]\n # Modify this string, there should be the path to this class\n add_introspection_rules(rules, [\"^core\\.spaces\\.file_validation\\.ContentTypeRestrictedFileField\"])\n", "id": "5563501", "language": "Python", "matching_score": 1.4109997749328613, "max_stars_count": 40, "path": "src/core/spaces/file_validation.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module contains all the space related forms, including the forms for\ndocuments, meetings and entities. Most of the forms are directly generated\nfrom the data models.\n\"\"\"\n\nfrom django.forms import ModelForm, ValidationError, Select\nfrom django.forms.models import modelformset_factory\n\nfrom core.spaces.models import Space, Document, Event, Entity\n\n\nclass SpaceForm(ModelForm):\n\n \"\"\"\n Returns a form to create or edit a space. SpaceForm inherits all the fields\n from the :class:`Space` data model.\n\n :rtype: HTML Form\n\n .. versionadded:: 0.1\n \"\"\"\n class Meta:\n model = Space\n\n def clean_logo(self):\n valid_image_extensions = ['jpg', 'jpeg', 'png', 'gif']\n logo_file = self.cleaned_data['logo']\n for extension in valid_image_extensions:\n if logo_file.name.endswith(''.join(['.', extension])):\n return logo_file\n\n raise ValidationError(\"Invalid file extension\")\n\n def clean_banner(self):\n valid_image_extensions = ['jpg', 'jpeg', 'png', 'gif']\n banner_file = self.cleaned_data['banner']\n for extension in valid_image_extensions:\n if banner_file.name.endswith(''.join(['.', extension])):\n return banner_file\n\n raise ValidationError(\"Invalid file extension\")\n\n# Create a formset for entities. This formset can be attached to any other form\n# but will be usually attached to SpaceForm\nEntityFormSet = modelformset_factory(Entity, extra=3)\n\n\nclass DocForm(ModelForm):\n\n \"\"\"\n Returns a form to create or edit a space related document, based on the\n spaces.Document data model.\n\n :rtype: HTML Form\n\n .. versionadded:: 0.1\n \"\"\"\n class Meta:\n model = Document\n\n\nclass RoleForm(ModelForm):\n\n \"\"\"\n Returns a form to edit the administrators, moderators and users of the space.\n This is the way that e-cidadania uses to filter content and access.\n\n :rtype: HTML Form\n\n .. versionadded:: 0.1.5\n \"\"\"\n class Meta:\n model = Space\n exclude = ('name', 'url', 'date', 'description', 'date', 'logo', 'banner',\n 'author', 'mod_debate', 'mod_proposals', 'mod_news', 'mod_cal',\n 'mod_docs', 'mod_voting', 'public')\n\n\nclass EventForm(ModelForm):\n\n \"\"\"\n Returns a form to create or edit a space related meeting, based on the\n spaces.Meeting data model.\n\n :rtype: HTML Form\n\n .. versionadded:: 0.1\n \"\"\"\n class Meta:\n model = Event\n", "id": "2070324", "language": "Python", "matching_score": 3.0893313884735107, "max_stars_count": 40, "path": "src/core/spaces/forms.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file contains all the forms for the debate modules.\n\"\"\"\n\nfrom django.forms import ModelForm, Textarea, TextInput\nfrom django.forms.models import modelformset_factory\n\nfrom apps.ecidadania.debate.models import Debate, Note, Row, Column\n\n\nclass DebateForm(ModelForm):\n\n \"\"\"\n Returns an empty form for creating a new Debate.\n\n :rtype: HTML Form\n\n .. versionadded:: 0.1b\n \"\"\"\n class Meta:\n model = Debate\n widgets = {\n 'title': TextInput(attrs={'class': 'medium'}),\n }\n\nRowForm = modelformset_factory(Row, exclude=('debate'))\nColumnForm = modelformset_factory(Column, exclude=('debate'))\n\n\nclass NoteForm(ModelForm):\n\n \"\"\"\n Returns an HTML Form to create or edit a new 'note' or 'proposal' like it's\n called on the sociologists argot.\n\n :rtype: HTML Form\n\n .. versionadded:: 0.1b\n \"\"\"\n class Meta:\n model = Note\n\n\nclass UpdateNoteForm(ModelForm):\n\n \"\"\"\n Returns a more simple version of the NoteForm for the AJAX interaction,\n preventing modification of significative fields non relevant to AJAX.\n\n :rtype: HTML Form\n .. versionadded:: 0.1b\n \"\"\"\n class Meta:\n model = Note\n exclude = ('debate', 'author', 'row', 'column', 'date')\n\n\nclass UpdateNotePosition(ModelForm):\n\n \"\"\"\n This is a partial form to save only the position updates of the notes in the\n debates. This form excludes all the fields except Column and Row just for\n security, this wau the original data of the note cannot be modified. Moving\n notes does not count as modification, so we also exclude last modification data.\n\n :rtype: HTML Form\n .. versionadded:: 0.1.5\n \"\"\"\n class Meta:\n model = Note\n exclude = ('author', 'debate', 'last_mod', 'last_mod_author', 'date',\n 'message', 'title')\n", "id": "7933659", "language": "Python", "matching_score": 2.6171610355377197, "max_stars_count": 40, "path": "src/apps/ecidadania/debate/forms.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nProposal forms.\n\"\"\"\n\n\nfrom django.forms import ModelForm\nfrom django import forms\nfrom django.core import validators\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import get_object_or_404\n\nfrom apps.ecidadania.proposals.models import Proposal, ProposalSet, ProposalField\nfrom apps.ecidadania.debate.models import Debate\nfrom core.spaces.models import Space\n\nimport datetime\n\n\nclass ProposalSetForm(ModelForm):\n\n \"\"\"\n ProposalSetForm is a basic form autogenerated form from ProposalSet model.\n Returns an empty form to create a new proposal set. The debate field is\n filtered based on the closed debates and the current space.\n\n :rtype: HTML Form\n\n .. versionadded:: 0.1.5b\n \"\"\"\n class Meta:\n model = ProposalSet\n\n def __init__(self, *args, **kwargs):\n super(ProposalSetForm, self).__init__(*args, **kwargs)\n get_place = get_object_or_404(Space, url=kwargs['initial']['space'])\n if self.instance:\n self.fields['debate'].queryset = Debate.objects.filter(end_date__lte=datetime.date.today(), space=get_place)\n\n\nclass ProposalForm(ModelForm):\n\n \"\"\"\n ProposalForm is a basic form autogenerated form for Proposal model.\n Returns an empty form for creating a new proposal.\n\n :rtype: HTML Form\n\n .. versionadded:: 0.1.5b\n \"\"\"\n class Meta:\n model = Proposal\n exclude = ('contenttype', 'object_pk', 'content_object')\n\n\nclass ProposalFormInSet(ModelForm):\n\n \"\"\"\n \"\"\"\n class Meta:\n model = Proposal\n exclude = ('contenttype', 'object_pk', 'content_object')\n\n\nclass ProposalMergeForm(ModelForm):\n\n \"\"\"\n Returns a proposal form to create merged proposal. This form has a merged proposals \\\n field which is initialized with list the proposals of a particular proposal set.\n\n :rtype: HTML Form\n\n .. versionadded:: 0.1.5b\n \"\"\"\n\n class Meta:\n model = Proposal\n exclude = ('contenttype', 'object_pk', 'content_object')\n\n def __init__(self, *args, **kwargs):\n if len(kwargs) > 0:\n p_set = kwargs['initial']['set_id']\n print \"form - set id: \" + p_set\n super(ProposalMergeForm, self).__init__(*args, **kwargs)\n if self.instance:\n # self.fields['merged_proposals'].widget = forms.SelectMultiple()\n print Proposal.objects.filter(proposalset=p_set)\n self.fields['merged_proposals'].queryset = Proposal.objects.filter(proposalset=p_set)\n\n\nclass VoteProposal(ModelForm):\n\n \"\"\"\n aswd\n \"\"\"\n class Meta:\n model = Proposal\n exclude = ('code', 'title', 'description', 'space', 'author', 'tags',\n 'latitude', 'longitude', 'closed', 'closed_by',\n 'close_reason', 'anon_allowed', 'refurbished', 'budget',\n 'pub_date', 'mod_date', 'content_type', 'object_pk',\n 'content_object')\n\n\nclass ProposalFieldForm(ModelForm):\n\n \"\"\"\n ProposalFieldForm is a basic form autogenerated from ProposalField model. Returns a form \\\n with two dropdown box which list the proposal set and optional fields.\n\n rtype: HTML Form\n\n .. versionadded:: 0.1.5b\n \"\"\"\n\n class Meta:\n model = ProposalField\n\n def clean(self):\n cleaned_data = super(ProposalFieldForm, self).clean()\n field_set = []\n p_set = cleaned_data.get(\"proposalset\")\n f_name = cleaned_data.get(\"field_name\")\n proposal_sets = ProposalField.objects.filter(proposalset=p_set)\n for p in proposal_sets:\n field_set.append(p.field_name)\n if f_name in field_set:\n raise forms.ValidationError(\"This Field has be already added\")\n else:\n return cleaned_data\n\n\nclass ProposalFieldDeleteForm(ModelForm):\n\n \"\"\"\n ProposalFieldDeleteForm is a basic form autogenerated from ProposalField model. Returns a form \\\n with two dropdown boxs which list the proposal set and optional fields.\n\n rtype: HTML Form\n\n .. versionadded:: 0.1.5b\n \"\"\"\n\n class Meta:\n model = ProposalField\n\n\nclass ProposalSetSelectForm(ModelForm):\n\n \"\"\"\n ProposalSetSelectForm is a basic form autogenerated from ProposalField model. Return a form \\\n which list the proposal sets.\n\n rtype: HTML Form\n\n .. versionadded:: 0.1.5b\n \"\"\"\n\n class Meta:\n model = ProposalField\n exclude = ('field_name')\n", "id": "12348769", "language": "Python", "matching_score": 3.097792387008667, "max_stars_count": 40, "path": "src/apps/ecidadania/proposals/forms.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.forms import ModelForm\nfrom django.forms.models import modelformset_factory\n\nfrom apps.ecidadania.voting.models import *\nfrom apps.ecidadania.proposals.models import Proposal, ProposalSet\n\n\nclass PollForm(ModelForm):\n \"\"\"\n \"\"\"\n class Meta:\n model = Poll\n\n# Create a formset for choices. This formset can be attached to any other form\n# but will be usually attached to PollForm\n\nChoiceFormSet = modelformset_factory(Choice, exclude=('poll'), extra=5)\n\n\nclass VotingForm(ModelForm):\n\n \"\"\"\n \"\"\"\n class Meta:\n model = Voting\n\n # This override of the init method allows us to filter the list of\n # elements in proposalsets and proposals\n def __init__(self, current_space, **kwargs):\n super(VotingForm, self).__init__(**kwargs)\n self.fields['proposalsets'].queryset = ProposalSet.objects.filter(\n space=current_space)\n self.fields['proposals'].queryset = Proposal.objects.filter(\n space=current_space)\n\n\nclass VoteForm(ModelForm):\n\n \"\"\"\n \"\"\"\n class Meta:\n model = ConfirmVote\n", "id": "12826081", "language": "Python", "matching_score": 1.679051160812378, "max_stars_count": 40, "path": "src/apps/ecidadania/voting/forms.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.conf.urls import *\nfrom django.conf import settings\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom apps.ecidadania.voting.views.polls import ViewPoll, DeletePoll, \\\n ListPolls, ViewPollResults\nfrom apps.ecidadania.voting.views.voting import ViewVoting, ListVotings, \\\n AddVoting, EditVoting, DeleteVoting\nfrom apps.ecidadania.voting.url_names import *\n\n\nurlpatterns = patterns('apps.ecidadania.voting.views',\n\n url(r'^$', ListVotings.as_view(), name=LIST_VOTING),\n\n url(r'^poll/$', ListPolls.as_view(), name=LIST_POLL),\n\n url(r'^add/$', AddVoting.as_view(), name=ADD_VOTING),\n\n url(r'^add/poll/$', 'polls.add_poll', name=ADD_POLL),\n\n url(r'^poll/(?P<poll_id>\\d+)/edit/$', 'polls.edit_poll', name=EDIT_POLL),\n\n url(r'^(?P<voting_id>\\d+)/edit/$', EditVoting.as_view(),\n name=EDIT_VOTING),\n\n url(r'^poll/(?P<poll_id>\\d+)/delete/$', DeletePoll.as_view(),\n name=DELETE_POLL),\n\n url(r'^(?P<voting_id>\\d+)/delete/$', DeleteVoting.as_view(),\n name=DELETE_VOTING),\n\n url(r'^poll/(?P<pk>\\d+)/$', ViewPoll.as_view(), name=VIEW_POLL),\n\n url(r'^poll/(?P<pk>\\d+)/results/$', ViewPollResults.as_view(),\n name=VIEW_RESULT),\n\n url(r'^(?P<voting_id>\\d+)/$', ViewVoting.as_view(), name=VIEW_VOTING),\n\n url(r'^vote/poll/(?P<poll_id>\\d+)/$', 'polls.vote_poll', name=VOTE_POLL),\n\n url(r'^vote/voting/$', 'voting.vote_voting', name=VOTE_VOTING),\n\n url(r'^vote/validate/(?P<token>\\w+)/$', 'voting.validate_voting',\n name=VALIDATE_VOTE),\n)\n", "id": "7961627", "language": "Python", "matching_score": 1.5694345235824585, "max_stars_count": 40, "path": "src/apps/ecidadania/voting/urls.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nMain URLs for the e-cidadania platform.\n\"\"\"\n\nfrom django.conf.urls import *\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nadmin.autodiscover()\n\n# We put here the dictionary with all the packages for translatin JavaScript code\n# Please refer to https://docs.djangoproject.com/en/dev/topics/i18n/internationalization/#specifying-translation-strings-in-javascript-code\njs_info_dict = {\n 'packages': ('apps.ecidadania.debate',),\n}\n\nurlpatterns = patterns('',\n # i18n switcher\n (r'^i18n/', include('django.conf.urls.i18n')),\n)\n\nurlpatterns += patterns('',\n\n # Django administration\n (r'^admin/', include(admin.site.urls)),\n\n # Index\n url(r'^$', 'core.views.index.index_view', name='site-index'),\n\n # User accounts\n url(r'^accounts/', include('apps.thirdparty.userprofile.urls')),\n\n # REST API\n url(r'^api/', include('apps.ecidadania.api.urls')),\n\n # Spaces\n url(r'^spaces/', include('core.spaces.urls')),\n\n # Invitations\n url(r'^invite/', 'core.views.invite.invite', name='invite'),\n\n # Explore\n url(r'^explore/$', 'core.views.explore.explore', name='explore'),\n\n # This urls is for the django comments system\n url(r'^comments/', include('django.contrib.comments.urls')),\n\n (r'^jsi18n/$', 'django.views.i18n.javascript_catalog', js_info_dict),\n\n # For smart_selects app\n url(r'^chaining/', include('apps.thirdparty.smart_selects.urls')),\n\n # This url is for the access to static pages. I hope this doesn't collide\n # with the index view\n url(r'^(?P<slug>[\\w\\-]+)/', include('apps.ecidadania.staticpages.urls')),\n\n)\n\nif settings.DEBUG:\n # Serve static files\n urlpatterns += staticfiles_urlpatterns()\n # Serve uploaded files\n urlpatterns += patterns('',\n url(r'^uploads/(?P<path>.*)$', 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT}),\n )\n", "id": "335037", "language": "Python", "matching_score": 3.379225969314575, "max_stars_count": 40, "path": "src/e_cidadania/urls.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nDefault settings for the e-cidadania project. This settings can be overriden by\nthe development and production files. They also can add new settings to this file.\n\nPlase refer to the 'configuration' section of the documentation for guidance.\n\"\"\"\n\nimport os\n\n# e-cidadania version and current status\n__version__ = \"0.1.9\"\n__status__ = \"beta2\"\n\n# Get the current working directory so we can fill automatically other variables.\ncwd = os.path.dirname(os.path.realpath(__file__)).strip('settings')\n#print \"Current working dir: %s\" % cwd\n\n# Extending the user profile a bit more\nAUTH_PROFILE_MODULE = \"accounts.UserProfile\"\nACCOUNT_ACTIVATION_DAYS = 2\nLOGIN_REDIRECT_URL = '/accounts/'\nLOGIN_URL = '/accounts/'\nANONYMOUS_USER_ID = -1\nGUARDIAN_RENDER_403 = True\n\n# Languages for the platform.\nLANGUAGES = (\n ('es_ES', 'Español'),\n ('en_GB', 'English'),\n ('gl_ES', 'Galego'),\n ('fr_FR', 'Français'),\n ('mk_MK', 'Makedonski'),\n ('pt_BR', 'Português'),\n ('hi_IN', 'Hindi'),\n)\n\nLOCALE_PATHS = (\n cwd + '/templates/locale',\n)\n\nSITE_ID = 1\nUSE_I18N = True\nUSE_L10N = True\n\n# Calendar\nFIRST_WEEK_DAY = 0 # '0' for Monday, '6' for Sunday\n\n# Configuration related to media and static content directories\nMEDIA_ROOT = cwd + '/uploads/'\n# print \"Media root: %s\" % MEDIA_ROOT\nMEDIA_URL = '/uploads/'\nSTATIC_ROOT = cwd + '/static/'\n# print \"Static root: %s\" % STATIC_ROOT\nSTATIC_URL = '/static/'\nADMIN_MEDIA_PREFIX = STATIC_URL\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n #'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\nSTATICFILES_DIRS = (\n (cwd + '/static_files/'),\n)\n\nFILE_UPLOAD_HANDLERS = (\n \"django.core.files.uploadhandler.MemoryFileUploadHandler\",\n \"django.core.files.uploadhandler.TemporaryFileUploadHandler\",\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = '<KEY>'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n #'django.template.loaders.eggs.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.core.context_processors.request\",\n)\n\nMIDDLEWARE_CLASSES = (\n # GZipMiddleware compresses content for modern browsers\n 'django.middleware.gzip.GZipMiddleware',\n # ConditionalGetMiddleware adds support for modern browsers to conditionaly\n # GET responses\n 'django.middleware.http.ConditionalGetMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n #'debug_toolbar.middleware.DebugToolbarMiddleware',\n)\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend', # this is default\n 'guardian.backends.ObjectPermissionBackend',\n)\n\nROOT_URLCONF = 'e_cidadania.urls'\nAPPEND_SLASH = True\n\nTEMPLATE_DIRS = (\n (cwd + '/templates'),\n)\n\n# We separate the applications so we can manage them through scripts\n# Please do not touch this unless you know very well what you're doing\n\nDJANGO_APPS = (\n # This list is from the builtin applications in django that are used in\n # e-cidadania\n 'core.prismriver',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.comments',\n 'django.contrib.admin',\n 'django.contrib.comments',\n)\n\n# Stablish message storage\nMESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'\n\nTHIRDPARTY_APPS = (\n # This list is from the third party software included in e-cidadania or\n # system-wide dependencies.\n 'apps.thirdparty.smart_selects',\n 'apps.thirdparty.userprofile',\n 'apps.thirdparty.tagging',\n 'guardian',\n 'south',\n 'rest_framework',\n)\n\nECIDADANIA_MODULES = (\n # Modules created for e-cidadania and installed by default. You can add\n # here your own modules\n 'core.spaces',\n 'apps.ecidadania.accounts',\n 'apps.ecidadania.proposals',\n 'apps.ecidadania.news',\n 'apps.ecidadania.debate',\n 'apps.ecidadania.staticpages',\n 'apps.ecidadania.cal',\n 'extras.custom_stuff',\n 'apps.ecidadania.voting',\n 'apps.ecidadania.api',\n 'apps.ecidadania.reports',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n# Combine all the apps in the django variable INSTALLED_APPS\nINSTALLED_APPS = DJANGO_APPS + THIRDPARTY_APPS + ECIDADANIA_MODULES\n\n# Activate the new url syntax in django 1.3 which will be\n# compatible till 1.5\n# import django.template\n# django.template.add_to_builtins('django.templatetags.future')\n\nREST_FRAMEWORK = {\n # Use hyperlinked styles by default.\n # Only used if the `serializer_class` attribute is not set on a view.\n 'DEFAULT_MODEL_SERIALIZER_CLASS':\n 'rest_framework.serializers.HyperlinkedModelSerializer',\n\n # Use Django's standard `django.contrib.auth` permissions,\n # or allow read-only access for unauthenticated users.\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'\n ],\n\n 'PAGINATE_BY': 10,\n}\n", "id": "7293047", "language": "Python", "matching_score": 3.509251832962036, "max_stars_count": 40, "path": "src/e_cidadania/settings/defaults.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom e_cidadania.settings.defaults import *\n\n# Registration mail settings\n# EMAIL_HOST = \"\"\n# EMAIL_PORT=\n# EMAIL_HOST_USER=\"\"\n# EMAIL_HOST_PASSWORD=\"\"\nDEFAULT_FROM_EMAIL = \"\"\n# EMAIL_USE_TLS = True\n\n# Time and zone configuration\nTIME_ZONE = 'Europe/Madrid'\nLANGUAGE_CODE = 'es-es'\n\n# Cache backend.\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'unique-snowflake'\n }\n}\n\n# Who will we alert?\nADMINS = (\n ('YourAdmin', '<EMAIL>'),\n)\nMANAGERS = ADMINS\n\n# Change this to your working domain! If this variable is empty, django\n# will return an error 500\n#ALLOWED_HOSTS = ['*'] # This allows any host. INSECURE!\nALLOWED_HOSTS = []\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = '<KEY>'\n\n# Database configuration. Default: sqlite3\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'e_cidadania/db/development.db',\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n", "id": "2299454", "language": "Python", "matching_score": 0.6337204575538635, "max_stars_count": 40, "path": "src/e_cidadania/settings/production.py" }, { "content": "from django.contrib import admin\nfrom apps.thirdparty.userprofile.models import EmailValidation, Avatar\n\n\nclass EmailValidationAdmin(admin.ModelAdmin):\n list_display = ('__unicode__',)\n search_fields = ('user__username', 'user__first_name')\n\nadmin.site.register(Avatar)\nadmin.site.register(EmailValidation, EmailValidationAdmin)\n", "id": "6195112", "language": "Python", "matching_score": 0.861335277557373, "max_stars_count": 40, "path": "src/apps/thirdparty/userprofile/admin.py" }, { "content": "from django.contrib.sites.models import Site\n\n\ndef site(request):\n \"\"\"\n Adds site-related context variables to the context.\n \"\"\"\n current_site = Site.objects.get_current()\n\n return {\n 'SITE_NAME': current_site.name,\n 'SITE_DOMAIN': current_site.domain,\n 'SITE_URL': \"http://www.%s\" % (current_site.domain),\n }\n", "id": "11150516", "language": "Python", "matching_score": 0.24657891690731049, "max_stars_count": 40, "path": "src/apps/thirdparty/userprofile/context_processors.py" }, { "content": "from django.utils.translation import ugettext_lazy as _\nfrom django.shortcuts import render_to_response\nfrom django.contrib.admin.views.decorators import staff_member_required\n\nfrom django import template\nfrom django.core.urlresolvers import reverse\nfrom django.contrib import admin\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import capfirst\nfrom core.prismriver.settings import CUSTOM_MENU, DEFAULT_LABELS\n\n\ndef load_apps(request):\n current_url = request.path.replace(reverse('admin:index'), \"\")\n app_dict = {}\n enabled = False\n for model, model_admin in admin.site._registry.items():\n app_label = model._meta.app_label\n has_module_perms = request.user.has_module_perms(app_label)\n if has_module_perms:\n perms = model_admin.get_model_perms(request)\n if True in perms.values():\n if '%s/%s/' % (app_label, model.__name__.lower()) in current_url.lower():\n enabled = True\n else:\n enabled = False\n model_dict = {\n 'name': capfirst(model._meta.verbose_name_plural),\n 'admin_url': mark_safe('%s/%s/' % (app_label, model.__name__.lower())),\n 'perms': perms,\n }\n if app_label in app_dict:\n app_dict[app_label]['models'].append(model_dict)\n if enabled:\n app_dict[app_label][\"enabled\"] = enabled\n else:\n app_dict[app_label] = {\n 'name': app_label.title(),\n 'app_url': app_label + '/',\n 'has_module_perms': has_module_perms,\n 'models': [model_dict],\n 'icon': 'default.png',\n 'big_icon': \"default_big.png\",\n 'description': _(\"Default application description\"),\n 'enabled': enabled,\n }\n if app_dict[app_label][\"app_url\"] in DEFAULT_LABELS.keys():\n current_app = DEFAULT_LABELS[app_dict[app_label][\"app_url\"]]\n app_dict[app_label][\"name\"] = current_app[0]\n app_dict[app_label][\"icon\"] = current_app[1]\n app_dict[app_label][\"big_icon\"] = current_app[2]\n app_dict[app_label][\"description\"] = current_app[3]\n app_list = app_dict.values()\n app_list.sort(key=lambda x: x['name'])\n return app_list\n\n\ndef load_custom_models(request, model_paths):\n current_url = request.path.replace(reverse('admin:index'), \"\")\n enabled = False\n model_list = []\n for model, model_admin in admin.site._registry.items():\n app_label = model._meta.app_label\n has_module_perms = request.user.has_module_perms(app_label)\n if has_module_perms:\n perms = model_admin.get_model_perms(request)\n if True in perms.values():\n current_path = \"\"\n for model_path in model_paths:\n current_path = '%s/%s/' % (app_label, model.__name__.lower())\n if current_url in current_path:\n enabled = True\n if model_path in current_path:\n model_list.append({\n 'name': capfirst(model._meta.verbose_name_plural),\n 'admin_url': mark_safe('%s/%s/' % (app_label, model.__name__.lower())),\n 'perms': perms,\n })\n return model_list, enabled\n", "id": "8757762", "language": "Python", "matching_score": 2.924685001373291, "max_stars_count": 40, "path": "src/core/prismriver/views.py" }, { "content": "from django.conf import settings\n\n# Enable the sidebar custom menu or just render the regular tree? Default: False\nif hasattr(settings, 'CUSTOM_MENU'):\n CUSTOM_MENU = settings.CUSTOM_MENU\nelse:\n CUSTOM_MENU = False\n\n# If you use a custom menu what apps does it display. Default: Same as APP_MENU\nif hasattr(settings, 'SIDEBAR_APP_MENU'):\n SIDEBAR_APP_MENU = settings.SIDEBAR_APP_MENU\nelse:\n SIDEBAR_APP_MENU = [\n {\"name\": \"Users and Settings\",\n \"items\": [\"auth\", \"prismriver\", \"sites\"],\n \"icon\": \"users.png\"},\n ]\n\n# Display Last actions on the sidebar? Default:True\nif hasattr(settings, 'SIDEBAR_LAST_ACTIONS'):\n SIDEBAR_LAST_ACTIONS = settings.SIDEBAR_LAST_ACTIONS\nelse:\n SIDEBAR_LAST_ACTIONS = True\n\n# If you use the default menu which labels and pictures you want for the sidebar\nif hasattr(settings, 'DEFAULT_LABELS'):\n DEFAULT_LABELS = settings.DEFAULT_LABELS\nelse:\n DEFAULT_LABELS = {\"auth/\": [\"Users and Groups\", \"users.png\", \"users_big.png\",\n \"Manage the application users or groups permissions\"],\n \"sites/\": [\"Site management\", \"web.png\", \"web_big.png\", \"Manages the sites application\"]}\n", "id": "5742954", "language": "Python", "matching_score": 1.6264636516571045, "max_stars_count": 40, "path": "src/core/prismriver/settings.py" }, { "content": "#!/usr/bin/python\n\n#import coverage\nfrom django.conf import settings\nfrom django.core import management\n\ndef main():\n print \"Hi\"\n", "id": "11494822", "language": "Python", "matching_score": 0, "max_stars_count": 40, "path": "tests/run.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django import template\nfrom django.template import Library\n\nfrom e_cidadania.apps.debate.models import Note, Debate\nfrom django.shortcuts import get_object_or_404\n\nregister = Library()\n\n\nclass NotesNode(template.Node):\n \"\"\"\n \"\"\"\n def __init__(self, format_string):\n self.format_string = format_string\n self.debate = get_object_or_404(Debate, pk=format_string)\n self.debate_matrix = len(self.debate.xvalues.split(',')) * \\\n len(self.debate.yvalues.split(','))\n\n def render(self, context):\n i = 1\n while i < self.debate_matrix:\n get_sortable = \"sortable-debate%s\" % i\n try:\n note = Note.objects.all().filter(parent=get_sortable, debate=self.format_string)\n return \"<td id='%s' class='connectedSortable'>\\\n <div id='%s' class='note'>\\\n <a href='javascript:getClickedNote()' id='deletenote' class='hidden'></a>\\\n <textarea>%s</textarea>\\\n </div>\\\n </td>\" % (get_sortable, note.noteid, note.message)\n i += 1\n except:\n return \"<td id='%s' class='connectedSortable'></td>\" % (get_sortable)\n i += 1\n\n\n@register.tag\ndef get_debate_notes(parser, token):\n \"\"\"\n Generate the notes for the debate.\n \"\"\"\n try:\n tag_name, format_string = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError(\"%r requires a single argument.\" % token.contents.split()[0])\n# The current style of template tags does not consider the quotes an obligation.\n# if not (format_string[0] == format_string[-1] and format_string[0] in ('\"', \"'\")):\n# raise template.TemplateSyntaxError(\"%r tag's argument should be in quotes\" % tag_name)\n if not format_string.isdigit():\n raise template.TemplateSyntaxError(\"%r is not a valid debate id.\" % format_string)\n return NotesNode(format_string)\n", "id": "1226161", "language": "Python", "matching_score": 1.6105314493179321, "max_stars_count": 40, "path": "src/apps/ecidadania/debate/templatetags/get_notes.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2013 Clione Software\n# Copyright (c) 2010-2013 <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom urlparse import urljoin\n\nfrom django import template\nfrom django.conf import settings\nfrom django.template import Context, Template\nfrom django.template.defaultfilters import stringfilter\nfrom django.template.loader import get_template, render_to_string\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef wysiwyg_editor(field_id, editor_name=None, config=None):\n if not editor_name:\n editor_name = \"%s_editor\" % field_id\n\n ctx = {\n 'field_id': field_id,\n 'editor_name': editor_name,\n 'config': config\n }\n\n return render_to_string(\n \"../templates/wysihtml5_instance.html\",\n ctx\n )\n", "id": "9247478", "language": "Python", "matching_score": 1.2723644971847534, "max_stars_count": 40, "path": "src/extras/custom_stuff/templatetags/wysiwyg.py" }, { "content": "from django import template\nfrom django.core.urlresolvers import reverse\nfrom django.template.context import Context\nfrom core.prismriver.settings import CUSTOM_MENU\nfrom core.prismriver.views import load_apps, load_custom_models\nfrom core.prismriver.settings import SIDEBAR_APP_MENU, SIDEBAR_LAST_ACTIONS\nfrom django.template.loader import get_template\nfrom copy import deepcopy\n\nregister = template.Library()\n\n\ndef get_custom_menu(request):\n apps = deepcopy(SIDEBAR_APP_MENU)\n for app in apps:\n app[\"models\"], app[\"enabled\"] = load_custom_models(request, app[\"items\"])\n c = Context({\"apps\": apps, \"custom\": True})\n t = get_template('admin/side_menu.html')\n return t.render(c)\n\n\ndef get_menu(request):\n current_url = request.path.replace(reverse('admin:index'), \"\").lower()\n c = Context({\"apps\": load_apps(request), \"custom\": False, })\n t = get_template('admin/side_menu.html')\n return t.render(c)\n\n\n@register.filter(name='get_apps')\ndef get_apps(request):\n if CUSTOM_MENU:\n return get_custom_menu(request)\n else:\n return get_menu(request)\n\n\n@register.tag(name='side_last_actions')\ndef side_last_actions(request):\n return SIDEBAR_LAST_ACTIONS\n", "id": "5701766", "language": "Python", "matching_score": 3.994994878768921, "max_stars_count": 40, "path": "src/core/prismriver/templatetags/prismriver_tags.py" }, { "content": "from django.core.urlresolvers import reverse\nfrom django.template.context import Context\nfrom django.template.loader import get_template\nfrom core.prismriver.dashboard.plugins import pluginbase\nfrom core.prismriver.settings import CUSTOM_MENU\nfrom core.prismriver.dashboard.settings import APP_MENU\nfrom core.prismriver.views import load_apps, load_custom_models\nfrom copy import deepcopy\n\n\nclass AppList(pluginbase.DashboardPlugin):\n def get_custom_menu(self, request):\n apps = deepcopy(APP_MENU)\n for app in apps:\n app[\"models\"], app[\"enabled\"] = load_custom_models(request, app[\"items\"])\n c = Context({\"apps\": apps})\n t = get_template('plugins/app_menu.html')\n return t.render(c)\n\n def get_menu(self, request):\n current_url = request.path.replace(reverse('admin:index'), \"\").lower()\n c = Context({\"apps\": load_apps(request)})\n t = get_template('plugins/app_menu.html')\n return t.render(c)\n\n def render(self, request):\n if CUSTOM_MENU:\n return self.get_custom_menu(request)\n else:\n return self.get_menu(request)\n", "id": "11545945", "language": "Python", "matching_score": 3.9135677814483643, "max_stars_count": 40, "path": "src/core/prismriver/dashboard/plugins/dashplugins.py" } ]
2.7765
TheAlchemistOak
[ { "content": "#!./env/bin/python3\nfrom entropy import get_best_attribute, normalize_data\nimport pandas as pd\nimport sys\nimport os\n\nclass Node:\n def __init__(self, name, val_set):\n self.name = name\n self.val_set = val_set\n \ndef decision_tree(filename):\n global df\n df = pd.read_csv(filename, index_col=0)\n normalize_data(df)\n attrib_best = get_best_attribute(df)[0]\n #get_common()\n ID3(df, list(df)[-1], attrib_best, 0)\n\ndef get_common(val, attribs):\n global common\n global count\n\n class_name = list(df)[-1]\n df_aux = df[df[attribs] == val]\n common = set(df_aux[class_name]).pop()\n count = 0\n for val in set(df_aux[class_name]):\n if count < df_aux[df_aux[class_name] == val][class_name].count():\n count = df_aux[df_aux[class_name] == val][class_name].count()\n common = val\n\ndef ID3(dataframe, target_attrib, attribs, cnt):\n root = Node(attribs, set(df[attribs]))\n print(\" \"*cnt + \"<{}>\".format(attribs))\n file_.write(\" \"*cnt + \"<{}>\\n\".format(attribs))\n for val in root.val_set:\n df_aux = dataframe[dataframe[attribs] == val]\n a = set(df_aux[target_attrib])\n if len(a) == 1:\n aux_1 = a.pop()\n print(\" \"*(cnt + 1) + \"{}: {} ({})\".format(val, aux_1, df_aux[attribs].count()))\n file_.write(\" \"*(cnt + 1) + \"{}: {} ({})\\n\".format(val, aux_1, df_aux[attribs].count()))\n elif len(a) == 0:\n get_common(val, attribs)\n print(\" \"*(cnt + 1) + \"{}: {} ({})\".format(val, common, count))\n file_.write(\" \"*(cnt + 1) + \"{}: {} ({})\\n\".format(val, common, count))\n elif len(dataframe) > 1:\n print(\" \"*(cnt + 1) + \"{}: \".format(val)) \n file_.write(\" \"*(cnt + 1) + \"{}: \\n\".format(val))\n if len(list(df_aux.drop(attribs, axis=1))) == 1:\n return\n ID3(df_aux.drop(attribs, axis=1), target_attrib, get_best_attribute(df_aux.drop(attribs, axis=1))[0], cnt + 2)\n\nfile_ = None\n\nif __name__ == '__main__':\n if not os.path.exists('output/'):\n os.mkdir('output')\n name = sys.argv[1]\n aux = name.split('/')[-1]\n file_name = aux.split('.')[0]\n file_ = open('output/tree_{}.txt'.format(file_name), 'w')\n decision_tree(name)\n file_.close()", "id": "7374688", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "decision_tree.py" }, { "content": "import pandas as pd\nimport numpy as np\nimport sys\n\ndef entropy(attribute, df, class_name):\n entropia_list = []\n aux = len(df[attribute])\n attrib = list(set(df[attribute]))\n clazz = list(set(df[class_name]))\n\n count_matrix = [np.zeros(len(clazz)) for i in range(len(attrib))]\n\n attrib = dict(zip(attrib, (range(len(attrib)))))\n clazz = dict(zip(clazz, (range(len(clazz)))))\n\n for i in range(aux):\n count_matrix[attrib[df[attribute].iloc[i]]][clazz[df[class_name].iloc[i]]]+=1\n\n total_list = [0]*len(attrib)\n\n for i in range(len(attrib)):\n total = sum(count_matrix[i])\n count_matrix[i] = count_matrix[i]/sum(count_matrix[i])\n for j in range(len(count_matrix[i])):\n if count_matrix[i][j] != 0:\n count_matrix[i][j] = -1*count_matrix[i][j]*np.log2(count_matrix[i][j])\n count_matrix[i] = sum(count_matrix[i])*(total/aux)\n\n return sum(count_matrix)\n\ndef get_best_attribute(df):\n columns_name = list(df)\n best = (None ,sys.maxsize)\n for title in df:\n if title!=columns_name[-1]:\n aux = (title, entropy(title, df, columns_name[-1]))\n if best[1] > aux[1]:\n best = aux\n if best[0] is None:\n print(\"Error\")\n return\n return best\n\n## The code below is for categorization of raw number data\n\ndef num_int(dataframe, column):\n min_ = dataframe[column].min()\n max_ = dataframe[column].max()\n R = max_ - min_\n k = 1 + 3.222*np.log10(dataframe[column].count())\n w = round(R/int(round(k)), 2)\n for i in range(0, int(round(k))):\n dataframe[column] = dataframe[column].apply(lambda x: fun(x, min_, w, i))\n \ndef fun(x, min_, w, i):\n if type(x) is str:\n return x\n if min_ + w*i <= x <= min_ + w*(i + 1):\n return '{} - {}'.format(round(min_ + w*i, 2), round(min_ + w*(i + 1), 2))\n else:\n return x\n\ndef normalize_data(dataframe):\n numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\n for col in dataframe.select_dtypes(include=numerics):\n num_int(dataframe, col)", "id": "9281484", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "entropy.py" } ]
0
KennyLabSinai
[ { "content": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 7 10:36:50 2020\n\n@author: <NAME>, PhD, for lab of <NAME> @ Mount Sinai\n\nThis creates an array of cell center coordinates transformed to the atlas, and then\nisolates a region, and splits into two hemispheres. The result can be used as input for \nthe clearMapSubregionParser script.\"\n\"\"\"\n\nimport ClearMap.IO.IO as io\nimport os\nimport ClearMap.Visualization.Plot as plt\nimport ClearMap.Analysis.Label as lbl\nimport numpy as np\n\nsampleName = 'IA1_LB'\nparentDirectory = '/d2/studies/ClearMap/IA_iDISCO/'\nexecfile(os.path.join(parentDirectory, sampleName, 'parameter_file_'+sampleName+'.py'))\n\nregion = 'Caudoputamen'\n\npoints = io.readPoints(TransformedCellsFile)\ndata = plt.overlayPoints(AnnotationFile, points.astype(int), pointColor = None)\ndata = data[:,:,:,1:]\nio.writeData(os.path.join(BaseDirectory, sampleName + '_Points_Transformed.tif'), data)\n\nlabel = io.readData(AnnotationFile)\nlabel = label.astype('int32')\nlabelids = np.unique(label)\n\noutside = np.zeros(label.shape, dtype = bool);\n\n\"\"\"\nAutomated isolation of points in caudoputamen (ABA region ID 672).\nIn order to find out the level to use, in console input:\n>>> lbl.labelAtLevel(r, n)\nwhere r is region ID, and n is level (I usually start at 5), if the output is not the\nregion ID, increase n.\n\"\"\"\nfor l in labelids:\n if not (lbl.labelAtLevel(l, 6) == 672):\n outside = np.logical_or(outside, label == l);\n#Load the transformed points, and set everything outside of the desired ROI to 0\nheatmap = io.readData(os.path.join(BaseDirectory, sampleName, sampleName + '_Points_Transformed.tif'))\nheatmap[outside] = 0;\n#Split into right and let hemispheres:\nXmin = np.amin(np.nonzero(heatmap)[1])\nXmax = np.amax(np.nonzero(heatmap)[1])\nYmin = np.amin(np.nonzero(heatmap)[0])\nYmax = np.amax(np.nonzero(heatmap)[0])\n\nheatmap_left = heatmap[Xmin-10:heatmap.shape[0]/2,Ymin-10:Ymax+10,:]\nheatmap_right = heatmap[heatmap.shape[0]/2:Xmax+10,Ymin-10:Ymax+10:,:]\n#Right result TIF files (which can be loaded into the clearMapSubregionParser.py script)\nio.writeData(os.path.join(BaseDirectory, sampleName, sampleName + '_' + region + '_isolated_points_left.tif'), heatmap_left)\nio.writeData(os.path.join(BaseDirectory, sampleName, sampleName + '_' + region + '_isolated_points_right.tif'), heatmap_right)\n\n\n", "id": "3704953", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "ClearMapSubregionParser/plotTransformedCellCenters.py" } ]
0
bsericks
[ { "content": "from OpenGL.GL import *\nfrom OpenGL.GLU import *\n\n\nclass Shader():\n\n\n program_id = 0;\n # 1. retrieve the vertex/fragment source code from filePath\n \n\n def __init__(self, vertexPath='data/vertexShader.glsl', fragmentPath='data/fragmentShader.glsl'):\n\n vertexCode = '';\n fragmentCode = '';\n\n try:\n with open(vertexPath,'r',newline='') as rf:\n vertexCode = rf.read()\n\n with open(fragmentPath,'r',newline='') as rf:\n fragmentCode = rf.read()\n\n except:\n print(\"could not open files\")\n\n \n vs_id = self.add_shader(vertexCode, GL_VERTEX_SHADER)\n frag_id = self.add_shader(fragmentCode, GL_FRAGMENT_SHADER)\n\n self.program_id = glCreateProgram()\n glAttachShader(self.program_id, vs_id)\n glAttachShader(self.program_id, frag_id)\n glLinkProgram(self.program_id)\n\n if glGetProgramiv(self.program_id, GL_LINK_STATUS) != GL_TRUE:\n info = glGetProgramInfoLog(self.program_id)\n glDeleteProgram(self.program_id)\n glDeleteShader(vs_id)\n glDeleteShader(frag_id)\n raise RuntimeError('Error linking program: %s' % (info))\n glDeleteShader(vs_id)\n glDeleteShader(frag_id)\n\n def add_shader(self, source, shader_type):\n \"\"\" Helper function for compiling a GLSL shader\n Parameters\n ----------\n source : str\n String containing shader source code\n shader_type : valid OpenGL shader type\n Type of shader to compile\n Returns\n -------\n value : int\n Identifier for shader if compilation is successful\n \"\"\"\n try:\n shader_id = glCreateShader(shader_type)\n glShaderSource(shader_id, source)\n glCompileShader(shader_id)\n if glGetShaderiv(shader_id, GL_COMPILE_STATUS) != GL_TRUE:\n info = glGetShaderInfoLog(shader_id)\n raise RuntimeError('Shader compilation failed: %s' % (info))\n return shader_id\n except:\n glDeleteShader(shader_id)\n raise\n\n def use(self):\n glUseProgram(self.program_id); \n\n def setBool(self, name, value):\n glUniform1i(glGetUniformLocation(self.program_id, name), value); \n\n def setInt(self, name, value):\n glUniform1i(glGetUniformLocation(self.program_id, name), value); \n \n def setFloat(self, name, value): \n glUniform1f(glGetUniformLocation(self.program_id, name), value); \n\n #def setVec2(self, name, value):\n # glUniform2fv(glGetUniformLocation(self.program_id, name), 1, value[0]); \n \n def setVec2(self, name, x, y):\n glUniform2f(glGetUniformLocation(self.program_id, name), x, y); \n \n #def setVec3(self, name, value):\n # glUniform3fv(glGetUniformLocation(self.program_id, name), 1, value[0]); \n \n def setVec3(self, name, x, y, z):\n glUniform3f(glGetUniformLocation(self.program_id, name), x, y, z); \n\n #def setVec4(self, name, value):\n # glUniform4fv(glGetUniformLocation(self.program_id, name), 1, value[0]); \n \n def setVec4(self, name, x, y, z, w):\n glUniform4f(glGetUniformLocation(self.program_id, name), x, y, z, w); \n \n def setMat2(self, name, mat): \n glUniformMatrix2fv(glGetUniformLocation(self.program_id, name), 1, GL_FALSE, mat);\n \n def setMat3(self, name, mat):\n glUniformMatrix3fv(glGetUniformLocation(self.program_id, name), 1, GL_FALSE, mat);\n \n def setMat4(self, name, mat):\n glUniformMatrix4fv(glGetUniformLocation(self.program_id, name), 1, GL_FALSE, mat);\n \n\n def __del__(self):\n glDeleteProgram(self.program_id);\n ", "id": "8276569", "language": "Python", "matching_score": 5.882410526275635, "max_stars_count": 0, "path": "pygl/shader.py" }, { "content": "import pygame\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom ctypes import *\nimport numpy as np\n\nimport pywavefront\n\n\n\nvertex = \"\"\"\n#version 330 core\nlayout (location = 0) in vec3 aPos;\n\nvoid main()\n{\n gl_Position = vec4(aPos.x, aPos.y, aPos.z, 1.0);\n}\n\"\"\"\n\n\nfragment = \"\"\"\n#version 330 core\nout vec4 FragColor;\n\nvoid main()\n{\n FragColor = vec4(1.0f, 0.5f, 0.2f, 1.0f);\n}\n\"\"\"\n\nvertex_data = np.array([0.75, 0.75, 0.0,\n 0.75, -0.75, 0.0,\n -0.75, -0.75, 0.0], dtype=np.float32)\n\ncolor_data = np.array([1, 0, 0,\n 0, 1, 0,\n 0, 0, 1], dtype=np.float32)\n\n\nclass ShaderProgram(object):\n \"\"\" Helper class for using GLSL shader programs\n \"\"\"\n def __init__(self, vertex, fragment):\n \"\"\"\n Parameters\n ----------\n vertex : str\n String containing shader source code for the vertex\n shader\n fragment : str\n String containing shader source code for the fragment\n shader\n \"\"\"\n self.program_id = glCreateProgram()\n vs_id = self.add_shader(vertex, GL_VERTEX_SHADER)\n frag_id = self.add_shader(fragment, GL_FRAGMENT_SHADER)\n\n glAttachShader(self.program_id, vs_id)\n glAttachShader(self.program_id, frag_id)\n glLinkProgram(self.program_id)\n\n if glGetProgramiv(self.program_id, GL_LINK_STATUS) != GL_TRUE:\n info = glGetProgramInfoLog(self.program_id)\n glDeleteProgram(self.program_id)\n glDeleteShader(vs_id)\n glDeleteShader(frag_id)\n raise RuntimeError('Error linking program: %s' % (info))\n glDeleteShader(vs_id)\n glDeleteShader(frag_id)\n\n def add_shader(self, source, shader_type):\n \"\"\" Helper function for compiling a GLSL shader\n Parameters\n ----------\n source : str\n String containing shader source code\n shader_type : valid OpenGL shader type\n Type of shader to compile\n Returns\n -------\n value : int\n Identifier for shader if compilation is successful\n \"\"\"\n try:\n shader_id = glCreateShader(shader_type)\n glShaderSource(shader_id, source)\n glCompileShader(shader_id)\n if glGetShaderiv(shader_id, GL_COMPILE_STATUS) != GL_TRUE:\n info = glGetShaderInfoLog(shader_id)\n raise RuntimeError('Shader compilation failed: %s' % (info))\n return shader_id\n except:\n glDeleteShader(shader_id)\n raise\n\n def uniform_location(self, name):\n \"\"\" Helper function to get location of an OpenGL uniform variable\n Parameters\n ----------\n name : str\n Name of the variable for which location is to be returned\n Returns\n -------\n value : int\n Integer describing location\n \"\"\"\n return glGetUniformLocation(self.program_id, name)\n\n def attribute_location(self, name):\n \"\"\" Helper function to get location of an OpenGL attribute variable\n Parameters\n ----------\n name : str\n Name of the variable for which location is to be returned\n Returns\n -------\n value : int\n Integer describing location\n \"\"\"\n return glGetAttribLocation(self.program_id, name)\n\n\nscene = pywavefront.Wavefront('bunny.obj', collect_faces=True)\n\nverts = np.array(scene.vertices, dtype=\"float32\")\nflatverts = (verts.flatten())\nprint(len(scene.vertices))\n\ndef main():\n\n pygame.init ()\n display = (800, 600)\n pygame.display.set_mode(display, pygame.DOUBLEBUF | pygame.OPENGL)\n glClearColor (0.0, 0.5, 0.5, 1.0)\n glEnableClientState (GL_VERTEX_ARRAY)\n\n gluPerspective(45, (display[0] / display[1]), 1, 500.0)\n glTranslatef(0.0, -1.0, -5)\n \n # Lets compile our shaders since the use of shaders is now\n # mandatory. We need at least a vertex and fragment shader\n # begore we can draw anything\n program = ShaderProgram(fragment=fragment, vertex=vertex)\n \n \n\n #vertices = [ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0 ]\n vbo = glGenBuffers (1)\n glBindBuffer (GL_ARRAY_BUFFER, vbo)\n glBufferData (GL_ARRAY_BUFFER, len(flatverts)*4, flatverts, GL_STATIC_DRAW)\n\n button_down = False\n running = True\n while running:\n \n \n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEMOTION:\n #print(event.rel)\n if button_down == True:\n #pitch = pitch + event.rel[1]*0.1;\n #yaw = yaw + event.rel[0]*0.1;\n glRotatef(event.rel[1]*0.1, 1, 0, 0)\n glRotatef(event.rel[0]*0.1, 0, 1, 0)\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 4: # wheel rolled up\n glScaled(1.05, 1.05, 1.05);\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 5: # wheel rolled down\n glScaled(0.95, 0.95, 0.95);\n \n for event in pygame.mouse.get_pressed():\n #print(pygame.mouse.get_pressed())\n if pygame.mouse.get_pressed()[0] == 1:\n button_down = True\n elif pygame.mouse.get_pressed()[0] == 0:\n button_down = False\n \n \n \n glClear (GL_COLOR_BUFFER_BIT)\n \n #glUseProgram(program.program_id)\n \n glPushMatrix()\n #view = gluLookAt(0,0,6,0,0,0,0,1,0)\n \n \n glBindBuffer (GL_ARRAY_BUFFER, vbo)\n glVertexPointer (3, GL_FLOAT, 0, None)\n\n glDrawArrays (GL_POINTS, 0, len(scene.vertices))\n glPopMatrix()\n \n pygame.display.flip ()\n pygame.time.wait(10)\n \nmain()", "id": "3071947", "language": "Python", "matching_score": 0.7879467606544495, "max_stars_count": 0, "path": "examples/bunny2.py" }, { "content": "import glfw\nfrom OpenGL.GL import *\nfrom OpenGL.GL.shaders import compileProgram, compileShader\nimport numpy as np\nimport glm\nfrom numpy.core.numeric import identity\nfrom pygl.camera import *\nfrom pygl.shader import *\nfrom pygl.skybox import *\nfrom OpenGL.GLUT import *\n\nimport tkinter as tk\nimport threading\n\n\n\nclass App(threading.Thread):\n\n yawmetric = 0.0\n pitchmetric = 0.0\n positionmetric = glm.vec3(0,0,0);\n\n def __init__(self):\n threading.Thread.__init__(self)\n self.start()\n\n def update_metrics(self, position, yaw, pitch):\n self.positionmetric = position\n self.yawmetric = yaw\n self.pitchmetric = pitch\n\n def callback(self):\n self.root.quit()\n\n def update_text(self, text_box):\n text_box.delete(0.0, tk.END)\n text_box.insert(tk.END, 'X: {}\\n'.format(self.positionmetric[0]))\n text_box.insert(tk.END, 'Y: {}\\n'.format(self.positionmetric[1]))\n text_box.insert(tk.END, 'Z: {}\\n'.format(self.positionmetric[2]))\n text_box.insert(tk.END, 'Yaw: {}\\n'.format(self.yawmetric))\n text_box.insert(tk.END, 'Pitch: {}\\n'.format(self.pitchmetric))\n\n self.text_box.tag_add(\"here\", \"1.0\", tk.END)\n self.text_box.tag_config(\"here\", background=\"black\", foreground=\"green\")\n \n self.root.after(500, self.update_text, text_box);\n \n\n def run(self):\n self.root = tk.Tk()\n \n self.root.protocol(\"WM_DELETE_WINDOW\", self.callback)\n self.root.title('PythonGuides')\n self.root.geometry('400x150')\n self.root.config(bg='#FFFFFF')\n\n self.text_box = tk.Text(\n self.root,\n height=12,\n width=40\n )\n \n self.text_box.pack(expand=True)\n \n\n self.root.after(0, self.update_text, self.text_box);\n \n\n self.root.mainloop()\n\nmetric_app = App();\n\n\n# screen settings\nSCR_WIDTH = 800;\nSCR_HEIGHT = 600;\n\nmove_list = [False, False, False, False, False, False, False, False]\n\n# mouse settings\nfirst_mouse = True;\nlastX = SCR_WIDTH / 2\nlastY = SCR_HEIGHT / 2\n\n# timing\ndeltaTime = 0.0; # time between current frame and last frame\nlastFrame = 0.0;\n\nmouse_left_down = False;\n\ncamera = Camera(position=glm.vec3(1.0, 1.0, 10.0));\n\n\nprojection = glm.perspective(45, SCR_WIDTH / SCR_HEIGHT, 0.1, 100)\n\n\ndef scroll_callback(window, xoffset, yoffset):\n camera.ProcessMouseScroll(yoffset);\n\n\n# the mouse position callback function\ndef mouse_callback(window, xpos, ypos):\n global first_mouse, lastX, lastY, camera, mouse_left_down\n \n if mouse_left_down == False:\n return;\n\n if first_mouse:\n lastX = xpos\n lastY = ypos\n first_mouse = False\n\n xoffset = xpos - lastX\n yoffset = lastY - ypos\n \n lastX = xpos\n lastY = ypos\n \n #constrain jumpyness when clicking\n if abs(xoffset) < 100 and abs(yoffset) < 100:\n camera.ProcessMouseMovement(xoffset, yoffset)\n\n# the window resize callback function\ndef framebuffer_size_callback(window, width, height):\n global projection, SCR_WIDTH, SCR_HEIGHT\n\n glViewport(0, 0, width, height)\n SCR_WIDTH = width;\n SCR_HEIGHT = height;\n\n# the keyboard input callback\ndef key_input_clb(window, key, scancode, action, mode):\n global deltaTime, camera, move_list\n\n if key == glfw.KEY_ESCAPE and action == glfw.PRESS:\n glfw.set_window_should_close(window, True)\n\n if key == glfw.KEY_W and action == glfw.PRESS:\n move_list[int(Camera_Movement.FORWARD)] = True;\n if key == glfw.KEY_W and action == glfw.RELEASE:\n move_list[int(Camera_Movement.FORWARD)] = False;\n\n\n if key == glfw.KEY_S and action == glfw.PRESS:\n move_list[Camera_Movement.BACKWARD] = True;\n if key == glfw.KEY_S and action == glfw.RELEASE:\n move_list[Camera_Movement.BACKWARD] = False;\n\n\n if key == glfw.KEY_A and action == glfw.PRESS:\n move_list[Camera_Movement.LEFT] = True;\n if key == glfw.KEY_A and action == glfw.RELEASE:\n move_list[Camera_Movement.LEFT] = False;\n\n\n if key == glfw.KEY_D and action == glfw.PRESS:\n move_list[Camera_Movement.RIGHT] = True;\n if key == glfw.KEY_D and action == glfw.RELEASE:\n move_list[Camera_Movement.RIGHT] = False;\n\n if key == glfw.KEY_Q and action == glfw.PRESS:\n move_list[Camera_Movement.ROLL_LEFT] = True;\n if key == glfw.KEY_Q and action == glfw.RELEASE:\n move_list[Camera_Movement.ROLL_LEFT] = False;\n\n if key == glfw.KEY_E and action == glfw.PRESS:\n move_list[Camera_Movement.ROLL_RIGHT] = True;\n if key == glfw.KEY_E and action == glfw.RELEASE:\n move_list[Camera_Movement.ROLL_RIGHT] = False;\n\n\n if key == glfw.KEY_LEFT_SHIFT and action == glfw.PRESS:\n move_list[Camera_Movement.UP] = True;\n if key == glfw.KEY_LEFT_SHIFT and action == glfw.RELEASE:\n move_list[Camera_Movement.UP] = False;\n\n\n if key == glfw.KEY_SPACE and action == glfw.PRESS:\n move_list[Camera_Movement.DOWN] = True;\n if key == glfw.KEY_SPACE and action == glfw.RELEASE:\n move_list[Camera_Movement.DOWN] = False;\n\n\ndef process_movement():\n #call processkeyboard from here\n global delta_time, camera, move_list\n\n for idx, move in enumerate(move_list):\n if move == True:\n camera.ProcessKeyboard(Camera_Movement(idx), deltaTime)\n\ndef mouse_button_callback(window, button, action, mods):\n global mouse_left_down, lastX, lastY\n \n glfw.set_cursor_pos(window, SCR_WIDTH/2, SCR_HEIGHT/2)\n\n if button == glfw.MOUSE_BUTTON_LEFT and action == glfw.PRESS: \n mouse_left_down = True;\n glfw.set_input_mode(window, glfw.CURSOR, glfw.CURSOR_DISABLED)\n \n elif button == glfw.MOUSE_BUTTON_LEFT and action == glfw.RELEASE: \n mouse_left_down = False;\n glfw.set_input_mode(window, glfw.CURSOR, glfw.CURSOR_NORMAL)\n \n \n lastX, lastY = glfw.get_cursor_pos(window);\n\n\ndef window_resize(window, width, height):\n glViewport(0, 0, width, height)\n\n# initializing glfw library\nif not glfw.init():\n raise Exception(\"glfw can not be initialized!\")\n\n# creating the window\nwindow = glfw.create_window(SCR_WIDTH, SCR_HEIGHT, \"My OpenGL window\", None, None)\n\n# check if window was created\nif not window:\n glfw.terminate()\n raise Exception(\"glfw window can not be created!\")\n\n# set window's position\nglfw.set_window_pos(window, 600, 600)\n\n# set the callback function for window resize\nglfw.set_window_size_callback(window, window_resize)\n\n# make the context current\nglfw.make_context_current(window)\n\nglfw.make_context_current(window);\nglfw.set_framebuffer_size_callback(window, framebuffer_size_callback);\nglfw.set_cursor_pos_callback(window, mouse_callback);\nglfw.set_scroll_callback(window, scroll_callback);\nglfw.set_mouse_button_callback(window, mouse_button_callback);\n \nglfw.set_key_callback(window, key_input_clb)\n \n\nglEnable(GL_DEPTH_TEST);\n\nvertices = [-0.5, -0.5, 0.5, 1.0, 0.0, 0.0,\n 0.5, -0.5, 0.5, 0.0, 1.0, 0.0,\n 0.5, 0.5, 0.5, 0.0, 0.0, 1.0,\n -0.5, 0.5, 0.5, 1.0, 1.0, 1.0,\n\n -0.5, -0.5, -0.5, 1.0, 0.0, 0.0,\n 0.5, -0.5, -0.5, 0.0, 1.0, 0.0,\n 0.5, 0.5, -0.5, 0.0, 0.0, 1.0,\n -0.5, 0.5, -0.5, 1.0, 1.0, 1.0]\n\nindices = [0, 1, 2, 2, 3, 0,\n 4, 5, 6, 6, 7, 4,\n 4, 5, 1, 1, 0, 4,\n 6, 7, 3, 3, 2, 6,\n 5, 6, 2, 2, 1, 5,\n 7, 4, 0, 0, 3, 7]\n\n#vertices = [-0.5, -0.5, 0.5, 1.0, 0.0, 0.0,\n# 0.5, -0.5, 0.5, 0.0, 1.0, 0.0]\n\n#indices = [0, 1]\n\nvertices = np.array(vertices, dtype=np.float32)\nindices = np.array(indices, dtype=np.uint32)\n\nshader = Shader() \n\n\n\nvao = glGenVertexArrays( 1 );\nglBindVertexArray( vao );\n\n# Vertex Buffer Object\nVBO = glGenBuffers(1)\nglBindBuffer(GL_ARRAY_BUFFER, VBO)\nglBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)\n\n# Element Buffer Object\nEBO = glGenBuffers(1)\nglBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)\nglBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)\n\nglEnableVertexAttribArray(0)\nglVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))\n\nglEnableVertexAttribArray(1)\nglVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))\n\nglBindBuffer(GL_ARRAY_BUFFER, 0); \nglBindVertexArray(0); \n\nshader.use()\nglClearColor(0, 0.1, 0.1, 1)\n\nfrom pygl.line import *\n\nline_shader = Shader(vertexPath='data/lineShader.vs', fragmentPath='data/lineShader.fs')\n\nx_axis = Line(glm.vec3(50, 0, 0), glm.vec3(49,0,0), line_shader)\nx_axis.setColor(glm.vec3(1.0,1.0,0))\ny_axis = Line(glm.vec3(0, 50, 0), glm.vec3(0,0,0), line_shader)\ny_axis.setColor(glm.vec3(0.0,1.0,1.0))\nz_axis = Line(glm.vec3(0, 0, 50), glm.vec3(0,0,0), line_shader)\nz_axis.setColor(glm.vec3(1.0,0.0,1.0))\n\nx_grid_lines = []\nz_grid_lines = []\n\nfor a in range(11):\n x_grid_lines.append(Line(glm.vec3(100, 0, 0), glm.vec3(-100,0,0), line_shader))\n z_grid_lines.append(Line(glm.vec3(0, 0, 100), glm.vec3(0,0,-100), line_shader))\n\nglLineWidth(2.0);\n\nfrom pygl.skybox import *\n\nskybox = Skybox();\n\n# the main application loop\nwhile not glfw.window_should_close(window):\n glfw.poll_events()\n\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n\n \n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); \n\n\n currentFrame = glfw.get_time();\n deltaTime = currentFrame - lastFrame;\n lastFrame = currentFrame;\n\n projection = glm.perspective(glm.radians(camera.Zoom), SCR_WIDTH / SCR_HEIGHT, 0.1, 200)\n\n glDepthMask(GL_FALSE);\n view = glm.mat4(glm.mat3(camera.GetViewMatrix())); # remove translation from the view matrix\n skybox.setMVP(glm.mat4(1), view, projection);\n skybox.draw()\n glDepthMask(GL_TRUE);\n\n\n transform = glm.mat4(1)\n transform = glm.translate(transform, glm.vec3(0, 0, 0))\n transform = glm.rotate(transform, glfw.get_time(),glm.vec3(0.5,0.8,0))\n \n process_movement();\n view = camera.GetViewMatrix();\n \n metric_app.update_metrics(camera.Position, camera.Yaw, camera.Pitch);\n \n identitymat = glm.mat4(1)\n shader.use()\n shader.setMat4('model', glm.value_ptr(transform))\n shader.setMat4('view', glm.value_ptr(view))\n shader.setMat4('projection', glm.value_ptr(projection))\n\n \n glBindVertexArray( vao );\n glDrawElements(GL_TRIANGLES, len(indices), GL_UNSIGNED_INT, None)\n #glDrawElements(GL_LINES, len(indices), GL_UNSIGNED_INT, None)\n\n #transform = glm.mat4(1)\n #transform = glm.translate(transform, glm.vec3(0, 0, 0))\n x_axis.setMVP(glm.mat4(1), view, projection);\n x_axis.draw();\n\n #transform = glm.mat4(1)\n #transform = glm.translate(transform, glm.vec3(0, 0, 2))\n y_axis.setMVP(glm.mat4(1), view, projection);\n y_axis.draw();\n\n z_axis.setMVP(glm.mat4(1), view, projection);\n z_axis.draw();\n\n for a in range(11):\n transform = glm.mat4(1)\n transform = glm.translate(transform, glm.vec3(-50, 0, -50+(a*10)))\n x_grid_lines[a].setMVP(transform, view, projection)\n x_grid_lines[a].draw();\n\n transform = glm.mat4(1)\n transform = glm.translate(transform, glm.vec3(-50+(a*10), 0, -50))\n z_grid_lines[a].setMVP(transform, view, projection)\n z_grid_lines[a].draw();\n \n \n\n\n glfw.swap_buffers(window)\n\n# terminate glfw, free up allocated resources\nglfw.terminate()", "id": "509441", "language": "Python", "matching_score": 9.68222713470459, "max_stars_count": 0, "path": "demo.py" }, { "content": "import glm\nimport glfw\nfrom pygl.camera import *\nfrom pygl.shader import *\nimport sys\nimport numpy as np\n\n#If we're running from the test directory, this finds the modules in pygl\nsys.path.append('./../')\n\n\n# screen settings\nSCR_WIDTH = 800;\nSCR_HEIGHT = 600;\n\n\n# mouse settings\nfirst_mouse = True;\nlastX = SCR_WIDTH / 2\nlastY = SCR_HEIGHT / 2\n\n# timing\ndeltaTime = 0.0; # time between current frame and last frame\nlastFrame = 0.0;\n\n\ncamera = Camera(position=glm.vec3(0.0, 0.0, 3.0));\n\n\nprojection = glm.perspective(45, SCR_WIDTH / SCR_HEIGHT, 0.1, 100)\n\n\ndef scroll_callback(window, xoffset, yoffset):\n camera.ProcessMouseScroll(yoffset);\n\n\n# the mouse position callback function\ndef mouse_callback(window, xpos, ypos):\n global first_mouse, lastX, lastY, camera\n\n if first_mouse:\n lastX = xpos\n lastY = ypos\n first_mouse = False\n\n xoffset = xpos - lastX\n yoffset = lastY - ypos\n\n lastX = xpos\n lastY = ypos\n\n camera.ProcessMouseMovement(xoffset, yoffset)\n\n# the window resize callback function\n\n\ndef framebuffer_size_callback(window, width, height):\n global projection\n\n glViewport(0, 0, width, height)\n projection = glm.mat4.create_perspective_projection_matrix(\n 45, width / height, 0.1, 100)\n glUniformMatrix4fv(proj_loc, 1, GL_FALSE, projection)\n\n# the keyboard input callback\n\n\ndef key_input_clb(window, key, scancode, action, mode):\n global deltaTime, camera\n\n if key == glfw.KEY_ESCAPE and action == glfw.PRESS:\n glfw.set_window_should_close(window, True)\n\n if key == glfw.KEY_W and action == glfw.PRESS:\n camera.ProcessKeyboard(Camera_Movement.FORWARD, deltaTime)\n if key == glfw.KEY_S and action == glfw.PRESS:\n camera.ProcessKeyboard(Camera_Movement.FORWARD, deltaTime)\n if key == glfw.KEY_A and action == glfw.PRESS:\n camera.ProcessKeyboard(Camera_Movement.FORWARD, deltaTime)\n if key == glfw.KEY_D and action == glfw.PRESS:\n camera.ProcessKeyboard(Camera_Movement.FORWARD, deltaTime)\n # if key in [glfw.KEY_W, glfw.KEY_S, glfw.KEY_D, glfw.KEY_A] and action == glfw.RELEASE:\n # left, right, forward, backward = False, False, False, False\n\n\n\n############################################ object data #######################################\n\n\nslices = 10\n\nvertices = np.empty(shape=((slices+1) * (slices+1) * 3), dtype=int)\nindices = np.empty(shape=((slices) * (slices) * 2 * 4), dtype=int)\n\nvertex_index=0\nfor j in range (slices+1):\n for i in range (slices+1):\n x = i/slices;\n y = 0;\n z = j/slices;\n vertices[vertex_index] = x;\n vertex_index = vertex_index+1\n vertices[vertex_index] = y;\n vertex_index = vertex_index+1\n vertices[vertex_index] = z;\n vertex_index = vertex_index+1\n \n\nindices_index=0\nfor j in range (slices):\n for i in range (slices):\n row1 = j * (slices+1);\n row2 = (j+1) * (slices+1);\n\n #indices.append(glm.uvec4(row1+i, row1+i+1, row1+i+1, row2+i+1));\n indices[indices_index] = row1+i; indices_index = indices_index+1;\n indices[indices_index] = row1+i+1; indices_index = indices_index+1\n indices[indices_index] = row1+i+1; indices_index = indices_index+1\n indices[indices_index] = row2+i+1; indices_index = indices_index+1\n \n #indices.append(glm.uvec4(row2+i+1, row2+i, row2+i, row1+i));\n indices[indices_index] = row2+i+1; indices_index = indices_index+1;\n indices[indices_index] = row2+i; indices_index = indices_index+1\n indices[indices_index] = row2+i; indices_index = indices_index+1\n indices[indices_index] = row1+i; indices_index = indices_index+1\n \n\n\n\n\ndef main():\n\n # glfw: initialize and configure\n # ------------------------------\n glfw.init();\n glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3);\n glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3);\n glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE);\n\n \n\n # glfw window creation\n # --------------------\n window = glfw.create_window(SCR_WIDTH, SCR_HEIGHT, \"LearnOpenGL\", None, None);\n\n if (window == None):\n print(\"Failed to create GLFW window\");\n glfw.terminate();\n return -1;\n \n glfw.make_context_current(window);\n glfw.set_framebuffer_size_callback(window, framebuffer_size_callback);\n glfw.set_cursor_pos_callback(window, mouse_callback);\n glfw.set_scroll_callback(window, scroll_callback);\n \n glfw.set_key_callback(window, key_input_clb)\n # capture the mouse cursor\n glfw.set_input_mode(window, glfw.CURSOR, glfw.CURSOR_DISABLED)\n\n shader = Shader()\n shader.use()\n \n vertices = [-0.5, -0.5, 0.5, 1.0, 0.0, 0.0,\n 0.5, -0.5, 0.5, 0.0, 1.0, 0.0,\n 0.5, 0.5, 0.5, 0.0, 0.0, 1.0,\n -0.5, 0.5, 0.5, 1.0, 1.0, 1.0,\n\n -0.5, -0.5, -0.5, 1.0, 0.0, 0.0,\n 0.5, -0.5, -0.5, 0.0, 1.0, 0.0,\n 0.5, 0.5, -0.5, 0.0, 0.0, 1.0,\n -0.5, 0.5, -0.5, 1.0, 1.0, 1.0]\n\n indices = [0, 1, 2, 2, 3, 0,\n 4, 5, 6, 6, 7, 4,\n 4, 5, 1, 1, 0, 4,\n 6, 7, 3, 3, 2, 6,\n 5, 6, 2, 2, 1, 5,\n 7, 4, 0, 0, 3, 7]\n\n vertices = np.array(vertices, dtype=np.float32)\n indices = np.array(indices, dtype=np.uint32)\n\n vao = glGenVertexArrays( 1 );\n glBindVertexArray( vao );\n \n # Vertex Buffer Object\n VBO = glGenBuffers(1)\n glBindBuffer(GL_ARRAY_BUFFER, VBO)\n glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)\n\n # Element Buffer Object\n EBO = glGenBuffers(1)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)\n\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))\n\n glEnableVertexAttribArray(1)\n glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))\n\n #vao = glGenVertexArrays( 1 );\n #glBindVertexArray( vao );\n#\n #vbo = glGenBuffers( 1 );\n #glBindBuffer( GL_ARRAY_BUFFER, vbo );\n #glBufferData( GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW );\n#\n #glEnableVertexAttribArray( 0 );\n #glVertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, 0, ctypes.c_void_p(0) );\n\n length = len(indices);\n\n \n glEnable(GL_DEPTH_TEST);\n\n lastFrame = 0;\n\n running = True\n while not glfw.window_should_close(window):\n\n # render\n # ------\n glClearColor(0.2, 0.3, 0.3, 1.0);\n glClear(GL_COLOR_BUFFER_BIT);\n\n currentFrame = glfw.get_time();\n deltaTime = currentFrame - lastFrame;\n lastFrame = currentFrame;\n \n projection = glm.perspective(45, SCR_WIDTH / SCR_HEIGHT, 0.1, 100)\n\n shader.setMat4(\"projection\", projection)\n\n view = camera.GetViewMatrix();\n shader.setMat4(\"view\", view);\n\n \n\n model = glm.mat4(1.0)\n model = glm.translate(model, glm.vec3(0.0, 0.0, 0.0));\n shader.setMat4(\"model\", model)\n\n\n #glBindVertexArray(vao);\n\n #glDrawElements(GL_LINES, length, GL_UNSIGNED_INT, ctypes.c_void_p(0));\n glDrawArrays(GL_POINTS, 0, 8)\n\n #glBindVertexArray(0);\n\n \n glfw.poll_events()\n glfw.swap_buffers(window)\n\n\n\n\n\nmain();\n", "id": "7025118", "language": "Python", "matching_score": 2.399322986602783, "max_stars_count": 0, "path": "test/cameraTest.py" }, { "content": "from OpenGL.GL import *\nfrom OpenGL.GLU import *\nimport glm\nfrom pygl.shader import *\nimport numpy as np\nfrom PIL import Image\n\nclass Skybox ():\n \n VBO = 0\n VAO = 0\n \n model = glm.mat4(1.0);\n view = glm.mat4(1.0);\n projection = glm.mat4(1.0);\n\n\n def __init__(self):\n\n self.shader = Shader(vertexPath='data/skyboxShader.vs', fragmentPath='data/skyboxShader.fs')\n\n \n self.vertices = np.array([\n # positions \n -1.0, 1.0, -1.0,\n -1.0, -1.0, -1.0,\n 1.0, -1.0, -1.0,\n 1.0, -1.0, -1.0,\n 1.0, 1.0, -1.0,\n -1.0, 1.0, -1.0,\n#\n -1.0, -1.0, 1.0,\n -1.0, -1.0, -1.0,\n -1.0, 1.0, -1.0,\n -1.0, 1.0, -1.0,\n -1.0, 1.0, 1.0,\n -1.0, -1.0, 1.0,\n#\n 1.0, -1.0, -1.0,\n 1.0, -1.0, 1.0,\n 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0,\n 1.0, 1.0, -1.0,\n 1.0, -1.0, -1.0,\n#\n -1.0, -1.0, 1.0,\n -1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0,\n 1.0, -1.0, 1.0,\n -1.0, -1.0, 1.0,\n#\n -1.0, 1.0, -1.0,\n 1.0, 1.0, -1.0,\n 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0,\n -1.0, 1.0, 1.0,\n -1.0, 1.0, -1.0,\n#\n -1.0, -1.0, -1.0,\n -1.0, -1.0, 1.0,\n 1.0, -1.0, -1.0,\n 1.0, -1.0, -1.0,\n -1.0, -1.0, 1.0,\n 1.0, -1.0, 1.0],\n dtype=np.float32);\n\n #self.vertices = np.array([\n ## -1.0, 1.0, 1.0,\n ## 1.0, 1.0, 1.0,\n ## -1.0, -1.0, 1.0,\n ## 1.0, 1.0, 1.0,\n ## -1.0, -1.0, 1.0,\n ## 1.0, -1.0, 1.0\n #1.0, -1.0, -1.0,\n # 1.0, 1.0, -1.0,\n #-1.0, 1.0, -1.0,\n #-1.0, 1.0, -1.0,\n #-1.0, -1.0, -1.0,\n # 1.0, -1.0, -1.0\n #], dtype=np.float32);\n\n self.VAO = glGenVertexArrays(1);\n self.VBO = glGenBuffers(1);\n\n glBindVertexArray(self.VAO);\n glBindBuffer(GL_ARRAY_BUFFER, self.VBO);\n glBufferData(GL_ARRAY_BUFFER, self.vertices.nbytes, self.vertices, GL_STATIC_DRAW);\n \n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3*self.vertices.dtype.itemsize, ctypes.c_void_p(0));\n glEnableVertexAttribArray(0);\n\n faces = [\n \n \"./data/skybox/right.png\",\n \"./data/skybox/left.png\",\n \"./data/skybox/top.png\",\n \"./data/skybox/bottom.png\",\n \"./data/skybox/front.png\",\n \"./data/skybox/back.png\"\n ]\n self.cubemapTexture = self.loadCubemap(faces);\n\n self.shader.use()\n self.shader.setInt(\"skybox\", 0)\n \n\n def setMVP(self, model, view, projection):\n self.model = model;\n self.view = view;\n self.projection = projection;\n return 1; \n \n\n def draw(self):\n glDepthFunc(GL_LEQUAL); # change depth function so depth test passes when values are equal to depth buffer's content\n self.shader.use();\n #\n #self.shader.setMat4(\"model\", glm.value_ptr(self.model));\n self.shader.setMat4(\"view\", glm.value_ptr(self.view));\n self.shader.setMat4(\"projection\", glm.value_ptr(self.projection));\n # skybox cube\n glBindVertexArray(self.VAO);\n glActiveTexture(GL_TEXTURE0);\n glBindTexture(GL_TEXTURE_CUBE_MAP, self.cubemapTexture);\n #glDrawArrays(GL_POINTS, 0, 36);\n glDrawArrays(GL_TRIANGLES, 0, 36);\n glBindVertexArray(0);\n glDepthFunc(GL_LESS); # set depth function back to default\n return 1;\n \n\n def __del__(self):\n\n glDeleteVertexArrays(1, self.VAO);\n glDeleteBuffers(1, self.VBO);\n \n def loadCubemap(self, faces):\n\n textureID = 0;\n textureID = glGenTextures(1);\n glBindTexture(GL_TEXTURE_CUBE_MAP, textureID);\n\n \n for idx, name in enumerate(faces):\n img = Image.open(name) # .png, .bmp, etc. also work\n img_data = np.array(list(img.getdata()), np.int8)\n\n glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + idx, 0, GL_RGBA, img.size[0], img.size[1], 0, GL_RGBA, GL_UNSIGNED_BYTE, img_data);\n print ('loading {}'.format(name))\n \n glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);\n glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);\n glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);\n glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);\n glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);\n\n return textureID;\n", "id": "7751928", "language": "Python", "matching_score": 4.538468837738037, "max_stars_count": 0, "path": "pygl/skybox.py" }, { "content": "from OpenGL.GL import *\nfrom OpenGL.GLU import *\nimport glm\nfrom pygl.shader import *\nimport numpy as np\n\nclass Line ():\n \n VBO = 0\n VAO = 0\n vertices = np.array([0.0,0.0,0.0,0.0,0.0,0.0], dtype=np.float32);\n startPoint = glm.vec3(0,0,0);\n endPoint = glm.vec3(0,0,0);\n \n model = glm.mat4(1.0);\n view = glm.mat4(1.0);\n projection = glm.mat4(1.0);\n\n lineColor = glm.vec3(0,0,0);\n\n def __init__(self, start, end, shader):\n\n self.line_shader = shader\n self.startPoint = start;\n self.endPoint = end;\n self.lineColor = glm.vec3(1,1,1);\n self.model = glm.mat4(1.0);\n\n #self.vertices = [\n # start.x, start.y, start.z,\n # end.x, end.y, end.z,\n #]\n np.put(self.vertices, [0,1,2,3,4,5], [start.x, start.y, start.z, end.x, end.y, end.z])\n \n self.vertices\n\n self.VAO = glGenVertexArrays(1);\n self.VBO = glGenBuffers(1);\n glBindVertexArray(self.VAO);\n\n glBindBuffer(GL_ARRAY_BUFFER, self.VBO);\n\n glBufferData(GL_ARRAY_BUFFER, self.vertices.nbytes, self.vertices, GL_STATIC_DRAW);\n\n # 8 = sizeof float\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * 8, ctypes.c_void_p(0));\n glEnableVertexAttribArray(0);\n\n glBindBuffer(GL_ARRAY_BUFFER, 0); \n glBindVertexArray(0); \n\n \n\n def setMVP(self, model, view, projection):\n self.model = model;\n self.view = view;\n self.projection = projection;\n return 1;\n \n\n def setColor(self, color):\n self.lineColor = color;\n return 1;\n \n\n def draw(self):\n self.line_shader.use()\n glEnable(GL_DEPTH_TEST)\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); \n self.line_shader.setMat4(\"model\", glm.value_ptr(self.model))\n self.line_shader.setMat4(\"view\", glm.value_ptr(self.view))\n self.line_shader.setMat4(\"projection\", glm.value_ptr(self.projection))\n\n self.line_shader.setVec3(\"color\", self.lineColor[0], self.lineColor[1], self.lineColor[2])\n\n glBindVertexArray(self.VAO);\n glDrawArrays(GL_LINES, 0, 2);\n return 1;\n \n\n def __del__(self):\n\n glDeleteVertexArrays(1, self.VAO);\n glDeleteBuffers(1, self.VBO);\n \n", "id": "11394305", "language": "Python", "matching_score": 0.579235315322876, "max_stars_count": 0, "path": "pygl/line.py" }, { "content": "import glm\nfrom math import sin, cos\n\n# Defines several possible options for camera movement. Used as abstraction to stay away from window-system specific input methods\nfrom enum import IntEnum\n\nfrom numpy.lib.histograms import _ravel_and_check_weights\nclass Camera_Movement(IntEnum):\n FORWARD = 0\n BACKWARD = 1\n LEFT = 2 \n RIGHT = 3\n UP = 4\n DOWN = 5\n ROLL_LEFT = 6\n ROLL_RIGHT = 7\n\n\n# Default camera values\nYAW = -90.0\nPITCH = 0.0\nSPEED = 5\nSENSITIVITY = 0.005\nZOOM = 45.0\n\nclass Camera:\n\n # camera Attributes\n Position = glm.vec3\n Front = glm.vec3 \n Up = glm.vec3 \n Right = glm.vec3 \n WorldUp = glm.vec3 \n # euler Angle\n Yaw = -90.0\n Pitch = 0.0\n Roll = 0.0\n # camera option\n MovementSpeed = SPEED\n MouseSensitivity = 0.05\n Zoom = 45.0\n\n # constructor with vectors\n def __init__(self, position = glm.vec3(0.0, 0.0, 0.0), up = glm.vec3(0.0, 1.0, 0.0), yaw = YAW, pitch = PITCH):\n \n Front = glm.vec3(0.0, 0.0, -1.0)\n Right = glm.vec3(0.0, -1.0, 0.0)\n MovementSpeed = SPEED\n MouseSensitivity = SENSITIVITY\n Zoom = ZOOM\n\n self.Position = position;\n self.WorldUp = up;\n self.Up = up;\n self.Yaw = yaw;\n self.Pitch = pitch;\n self.updateCameraVectors();\n \n\n # returns the view matrix calculated using Euler Angles and the LookAt Matrix\n def GetViewMatrix(self):\n return glm.lookAt(self.Position, self.Position + self.Front, self.Up);\n\n # processes input received from any keyboard-like input system. Accepts input parameter in the form of camera defined ENUM (to abstract it from windowing systems)\n def ProcessKeyboard(self, direction, deltaTime):\n \n velocity = self.MovementSpeed * deltaTime;\n if (direction == Camera_Movement.FORWARD):\n self.Position += self.Front * velocity;\n if (direction == Camera_Movement.BACKWARD):\n self.Position -= self.Front * velocity;\n if (direction == Camera_Movement.LEFT):\n self.Position -= self.Right * velocity;\n if (direction == Camera_Movement.RIGHT):\n self.Position += self.Right * velocity;\n if (direction == Camera_Movement.ROLL_LEFT):\n self.Roll -= 1\n if (direction == Camera_Movement.ROLL_RIGHT):\n self.Roll += 1\n if (direction == Camera_Movement.UP):\n self.Position += self.Up * velocity;\n if (direction == Camera_Movement.DOWN):\n self.Position -= self.Up * velocity;\n \n self.updateCameraVectors();\n \n\n # processes input received from a mouse input system. Expects the offset value in both the x and y direction.\n def ProcessMouseMovement(self, xoffset, yoffset, constrainPitch = True):\n \n xoffset *= self.MouseSensitivity;\n yoffset *= self.MouseSensitivity;\n\n self.Yaw += xoffset*cos(glm.radians(self.Roll)) + yoffset*sin(glm.radians(self.Roll));\n self.Pitch += yoffset*cos(glm.radians(self.Roll)) - xoffset*sin(glm.radians(self.Roll))\n\n # make sure that when pitch is out of bounds, screen doesn't get flipped\n if (constrainPitch):\n\n if (self.Pitch > 89.0):\n self.Pitch = 89.0\n\n if (self.Pitch < -89.0):\n self.Pitch = -89.0\n \n\n # update Front, Right and Up Vectors using the updated Euler angles\n self.updateCameraVectors();\n \n\n # processes input received from a mouse scroll-wheel event. Only requires input on the vertical wheel-axis\n def ProcessMouseScroll(self, yoffset):\n \n self.Zoom -= yoffset;\n if (self.Zoom < 1.0):\n self.Zoom = 1.0\n if (self.Zoom > 45.0):\n self.Zoom = 45.0 \n \n\n # calculates the front vector from the Camera's (updated) Euler Angles\n def updateCameraVectors(self):\n \n roll = 45\n # calculate the new Front vector\n front = glm.vec3();\n front.x = cos(glm.radians(self.Yaw)) * cos(glm.radians(self.Pitch))\n front.y = sin(glm.radians(self.Pitch))\n front.z = sin(glm.radians(self.Yaw)) * cos(glm.radians(self.Pitch))\n\n self.Front = glm.normalize(front);\n # also re-calculate the Right and Up vector\n \n\n if True:\n self.Right = glm.normalize(glm.cross(self.Front, self.WorldUp)); # normalize the vectors, because their length gets closer to 0 the more you look up or down which results in slower movement.\n \n self.Up = glm.normalize(glm.cross(self.Right, self.Front));\n\n if False:\n #roll_mat = glm.rotate(glm.mat4(1.0), glm.radians(roll), self.Front);\n #self.Up = glm.mat3(roll_mat) * self.Up;\n\n right = glm.vec3();\n right.x = cos(glm.radians(self.Yaw)) * sin(glm.radians(self.Pitch))*sin(glm.radians(roll)) - sin(glm.radians(self.Yaw))*cos(glm.radians(roll))\n right.y = sin(glm.radians(self.Yaw)) * sin(glm.radians(self.Pitch))*sin(glm.radians(roll)) + cos(glm.radians(self.Yaw))*cos(glm.radians(roll))\n right.z = cos(glm.radians(self.Pitch)) * cos(glm.radians(roll))\n \n self.Right = glm.normalize(right)\n \n up = glm.vec3();\n up.x = cos(glm.radians(self.Yaw)) * sin(glm.radians(self.Pitch))*cos(glm.radians(roll)) + sin(glm.radians(self.Yaw))*sin(glm.radians(roll))\n up.y = sin(glm.radians(self.Yaw)) * sin(glm.radians(self.Pitch))*cos(glm.radians(roll)) - cos(glm.radians(self.Yaw))*sin(glm.radians(roll))\n up.z = cos(glm.radians(self.Yaw)) * cos(glm.radians(roll))\n \n self.Up = glm.normalize(up)\n\n if True:\n roll_mat = glm.rotate(glm.mat4(1.0), glm.radians(self.Roll), self.Front);\n self.Right = glm.mat3(roll_mat) * self.Right;\n self.Up = glm.mat3(roll_mat) * self.Up;", "id": "10542423", "language": "Python", "matching_score": 1.6842191219329834, "max_stars_count": 0, "path": "pygl/camera.py" }, { "content": "import pygame\nimport OpenGL\nfrom pygame.locals import *\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nimport pywavefront\nfrom PIL import Image\nimport numpy\nimport math\n\nscene = pywavefront.Wavefront('f16.obj', collect_faces=True)\n\nwidth = 8*50;\nheight = 6*50;\ncamX = 0;\ncamZ = 0;\nyaw = 0;\npitch = 0;\n\nclass Motion(object):\n __slots__ = ['forward', 'backward', 'left', 'right']\n \ndef passive_motion(x, y):\n # two variables to store X and Y coordinates, as observed from the center\n # of the window\n \n dev_x = (width/2)-x;\n dev_y = (height/2)-y;\n\n #/* apply the changes to pitch and yaw*/\n yaw+=dev_x/10.0;\n pitch+=dev_y/10.0;\n\n\ndef read_texture(filename):\n img = Image.open(filename)\n img_data = numpy.array(list(img.getdata()), numpy.int8)\n texture_id = glGenTextures(1)\n glBindTexture(GL_TEXTURE_2D,texture_id)\n glPixelStorei(GL_UNPACK_ALIGNMENT, 1)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img.size[0], img.size[1], 0, GL_RGB, GL_UNSIGNED_BYTE, img_data)\n return texture_id\n \n\n\ncubeVertices = ((20,20,20),(20,20,-20),(20,-20,-20),(20,-20,20),(-20,20,20),(-20,-20,-20),(-20,-20,20),(-20, 20,-20))\ncubeEdges = ((0,1),(0,3),(0,4),(1,2),(1,7),(2,5),(2,3),(3,6),(4,6),(4,7),(5,6),(5,7))\ncubeQuads = ((0,3,6,4),(2,5,6,3),(1,2,5,7),(1,0,4,7),(7,4,6,5),(2,3,0,1))\n\nscene_box = (scene.vertices[0], scene.vertices[0])\nfor vertex in scene.vertices:\n min_v = [min(scene_box[0][i], vertex[i]) for i in range(3)]\n max_v = [max(scene_box[1][i], vertex[i]) for i in range(3)]\n scene_box = (min_v, max_v)\n\nscene_size = [scene_box[1][i]-scene_box[0][i] for i in range(3)]\nmax_scene_size = max(scene_size)\nscaled_size = 5\nscene_scale = [scaled_size/max_scene_size for i in range(3)]\nscene_trans = [-(scene_box[1][i]+scene_box[0][i])/2 for i in range(3)]\n\n\ndef wireCube():\n glBegin(GL_LINES)\n for cubeEdge in cubeEdges:\n for cubeVertex in cubeEdge:\n glVertex3fv(cubeVertices[cubeVertex])\n glEnd()\n \n \ndef solidCube():\n glBegin(GL_QUADS)\n for cubeQuad in cubeQuads:\n for cubeVertex in cubeQuad:\n glVertex3fv(cubeVertices[cubeVertex])\n glEnd()\n \n \ndef Model():\n glPushMatrix()\n glScalef(*scene_scale)\n glTranslatef(*scene_trans)\n\n for mesh in scene.mesh_list:\n glBegin(GL_TRIANGLES)\n for face in mesh.faces:\n for vertex_i in face:\n glVertex3f(*scene.vertices[vertex_i])\n glEnd()\n\n glPopMatrix()\n \ndef camera(d_yaw, d_pitch, mot):\n \n global camX, camZ, yaw, pitch\n if (mot.left == True):\n camX += math.cos(math.radians((yaw+90+90)))/5.0;\n camZ -= math.sin(math.radians((yaw+90+90)))/5.0;\n if (mot.right == True):\n #glTranslatef(1, 0, 0)\n camX += math.cos(math.radians((yaw+90-90)))/5.0;\n camZ -= math.sin(math.radians((yaw+90-90)))/5.0;\n if (mot.forward == True):\n #glTranslatef(0, 1, 0)\n camX += math.cos(math.radians((yaw+90)))/5.0;\n camZ -= math.sin(math.radians((yaw+90)))/5.0;\n if (mot.backward == True):\n #glTranslatef(0, -1, 0)\n camX += math.cos(math.radians((yaw+90+180)))/5.0;\n camZ -= math.sin(math.radians((yaw+90+180)))/5.0;\n \n glRotatef(-d_pitch, 1, 0, 0);\n glRotatef(-d_yaw, 0, 1, 0);\n glTranslatef(-camX, 0, -camZ);\n\n\ndef main():\n pygame.init()\n \n \n display = (800, 600)\n pygame.display.set_mode(display, DOUBLEBUF | OPENGL)\n gluPerspective(45, (display[0] / display[1]), 1, 500.0)\n glTranslatef(0.0, 0.0, -10)\n \n print ('Vendor: %s' % (glGetString(GL_VENDOR)))\n print ('Opengl version: %s' % (glGetString(GL_VERSION)))\n print ('GLSL Version: %s' % (glGetString(GL_SHADING_LANGUAGE_VERSION)))\n print ('Renderer: %s' % (glGetString(GL_RENDERER)))\n \n \n button_down = False\n button2_down = False\n \n camX = 0;\n camZ = -10\n \n yaw = 0;\n pitch = 0;\n \n motion = Motion()\n\n while True:\n d_yaw = 0\n d_pitch = 0\n \n motion.left = False;\n motion.right = False;\n motion.forward = False;\n motion.backward = False;\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n #glTranslatef(-1, 0, 0)\n motion.left = True;\n if event.key == pygame.K_RIGHT:\n #glTranslatef(1, 0, 0)\n motion.right = True;\n if event.key == pygame.K_UP:\n #glTranslatef(0, 1, 0)\n motion.forward = True;\n if event.key == pygame.K_DOWN:\n #glTranslatef(0, -1, 0)\n motion.backward = True;\n \n \n if event.type == pygame.MOUSEMOTION:\n \n if button_down == True:\n #print(event.rel)\n d_pitch = event.rel[1]/10;\n d_yaw = event.rel[0]/10;\n \n yaw = yaw + d_yaw\n pitch = pitch + d_pitch\n \n #glRotatef(event.rel[1]*0.1, 1, 0, 0)\n #glRotatef(event.rel[0]*0.1, 0, 1, 0)\n if button2_down == True:\n #glTranslatef(event.rel[0]*0.01, -event.rel[1]*0.01, 0)\n x = x + event.rel[0]*0.01\n y = y + event.rel[1]*0.01\n \n \n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 4: # wheel rolled up\n glScaled(1.05, 1.05, 1.05);\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 5: # wheel rolled down\n glScaled(0.95, 0.95, 0.95);\n \n for event in pygame.mouse.get_pressed():\n #print(pygame.mouse.get_pressed())\n if pygame.mouse.get_pressed()[0] == 1:\n button_down = True\n elif pygame.mouse.get_pressed()[0] == 0:\n button_down = False\n \n if pygame.mouse.get_pressed()[2] == 1:\n button2_down = True\n elif pygame.mouse.get_pressed()[2] == 0:\n button2_down = False\n \n camera(d_yaw, d_pitch, motion)\n \n print(\"yaw:{} pitch:{} motion:{}\".format(yaw, pitch, motion))\n\n \n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n Model()\n solidCube()\n glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)\n\n pygame.display.flip()\n pygame.time.wait(10)\n\nmain()", "id": "2146682", "language": "Python", "matching_score": 5.5392279624938965, "max_stars_count": 0, "path": "examples/bunny.py" }, { "content": "import pygame\nfrom pygame.locals import *\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\n\nimport math\n\npygame.init()\ndisplay = (400, 300)\nscree = pygame.display.set_mode(display, DOUBLEBUF | OPENGL)\n\nglEnable(GL_DEPTH_TEST)\nglEnable(GL_LIGHTING)\nglShadeModel(GL_SMOOTH)\nglEnable(GL_COLOR_MATERIAL)\nglColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)\n\nglEnable(GL_LIGHT0)\nglLightfv(GL_LIGHT0, GL_AMBIENT, [0.5, 0.5, 0.5, 1])\nglLightfv(GL_LIGHT0, GL_DIFFUSE, [1.0, 1.0, 1.0, 1])\n\nsphere = gluNewQuadric() \n\nglMatrixMode(GL_PROJECTION)\ngluPerspective(45, (display[0]/display[1]), 0.1, 50.0)\n\nglMatrixMode(GL_MODELVIEW)\ngluLookAt(0, -8, 0, 0, 0, 0, 0, 0, 1)\nviewMatrix = glGetFloatv(GL_MODELVIEW_MATRIX)\nglLoadIdentity()\n\n# init mouse movement and center mouse on screen\ndisplayCenter = [scree.get_size()[i] // 2 for i in range(2)]\nmouseMove = [0, 0]\npygame.mouse.set_pos(displayCenter)\n\nup_down_angle = 0.0\npaused = False\nrun = True\nwhile run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE or event.key == pygame.K_RETURN:\n run = False\n if event.key == pygame.K_PAUSE or event.key == pygame.K_p:\n paused = not paused\n pygame.mouse.set_pos(displayCenter) \n if not paused: \n if event.type == pygame.MOUSEMOTION:\n mouseMove = [event.pos[i] - displayCenter[i] for i in range(2)]\n pygame.mouse.set_pos(displayCenter) \n\n if not paused:\n # get keys\n keypress = pygame.key.get_pressed()\n #mouseMove = pygame.mouse.get_rel()\n \n # init model view matrix\n glLoadIdentity()\n\n # apply the look up and down\n up_down_angle += mouseMove[1]*0.1\n glRotatef(up_down_angle, 1.0, 0.0, 0.0)\n\n \n \n # init the view matrix\n glPushMatrix()\n glLoadIdentity()\n\n # apply the movment \n if keypress[pygame.K_w]:\n glTranslatef(0,0,0.1)\n if keypress[pygame.K_s]:\n glTranslatef(0,0,-0.1)\n if keypress[pygame.K_d]:\n glTranslatef(-0.1,0,0)\n if keypress[pygame.K_a]:\n glTranslatef(0.1,0,0)\n if keypress[pygame.K_SPACE]:\n glTranslatef(0,0.1,0)\n if keypress[pygame.K_LSHIFT]:\n glTranslatef(0,-0.1,0)\n\n # apply the left and right rotation\n glRotatef(mouseMove[0]*0.1, 0.0, 1.0, 0.0)\n\n # multiply the current matrix by the get the new view matrix and store the final vie matrix \n glMultMatrixf(viewMatrix)\n viewMatrix = glGetFloatv(GL_MODELVIEW_MATRIX)\n\n # apply view matrix \n glPopMatrix()\n glMultMatrixf(viewMatrix)\n\n glLightfv(GL_LIGHT0, GL_POSITION, [1, -1, 1, 0])\n\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n\n glPushMatrix()\n\n glColor4f(0.5, 0.5, 0.5, 1)\n glBegin(GL_QUADS)\n glVertex3f(-10, -10, -2)\n glVertex3f(10, -10, -2)\n glVertex3f(10, 10, -2)\n glVertex3f(-10, 10, -2)\n glEnd()\n\n glTranslatef(-1.5, 0, 0)\n glColor4f(0.5, 0.2, 0.2, 1)\n gluSphere(sphere, 1.0, 32, 16) \n\n glTranslatef(3, 0, 0)\n glColor4f(0.2, 0.2, 0.5, 1)\n gluSphere(sphere, 1.0, 32, 16) \n\n glPopMatrix()\n\n pygame.display.flip()\n pygame.time.wait(10)\n\npygame.quit()", "id": "3769558", "language": "Python", "matching_score": 2.0372021198272705, "max_stars_count": 0, "path": "examples/game.py" }, { "content": "import sys\n\n#If we're running from the test directory, this finds the modules in pygl\nsys.path.append('./../')\n\nfrom pygl.shader import *\nimport pygame\n\npygame.init()\ndisplay = (800, 600)\npygame.display.set_mode(display, pygame.DOUBLEBUF | pygame.OPENGL)\n\nglClearColor(0.0, 0.5, 0.5, 1.0)\nglEnableClientState(GL_VERTEX_ARRAY)\n\nsh = Shader()\n\nrunning = True\nwhile running:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n pygame.display.flip()\n pygame.time.wait(10)\n", "id": "4757253", "language": "Python", "matching_score": 0.8890373110771179, "max_stars_count": 0, "path": "test/testShader.py" } ]
2.218263
Lyah2000
[ { "content": "#\n# Copyright (c) 2021, <NAME>\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nfrom binary_tree_generator import levelorder, preorder\n\nclass Node:\n def __init__(self, data, left=None, right=None):\n self.left = left\n self.right = right\n self.data = data\n\n\nif __name__ == '__main__':\n print('Hello world')\n tree = Node(1, \n Node(2, \n Node(4), \n Node(5)), \n Node(3, \n Node(6), \n Node(7)))\n g = preorder(tree)\n \n for X in map(lambda node: node.data, preorder(tree)):\n print(X) \n\n for i in levelorder(tree):\n print(i.data)\n \n", "id": "4609702", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "binary_tree.py" }, { "content": "#\n# Copyright (c) 2021, <NAME>\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\ndef preorder(node):\n if node == None:\n return\n yield node\n yield from preorder(node.left)\n yield from preorder(node.right)\n\n\ndef inorder(node):\n if node == None:\n return\n yield from inorder(node.left)\n yield node\n yield from inorder(node.right)\n\n\ndef postorder(node):\n if node == None:\n return\n yield from postorder(node.left)\n yield from postorder(node.right)\n yield node\n\n\ndef levelorder(node):\n\n def _levelorder(queue):\n\n if len(queue) == 0:\n return\n current = queue.pop()\n yield current\n if current.left:\n queue.insert(0, current.left)\n if current.right:\n queue.insert(0, current.right)\n yield from _levelorder(queue)\n\n return _levelorder([node])\n", "id": "4906470", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "binary_tree_generator.py" } ]
0
almiuxcs
[ { "content": "from matplotlib import pyplot as plt \nimport numpy as np\n\ndef plot(a, x0=0.1, iterations=50):\n\t\"\"\" Plot a cobweb graph using the equation:\n\t\ta * x * (1 - x)\n\t\n\t\tIt does 50 iterations to plot the graph. \n\t\t\n\t\tThe default value of x0 is 0.1 and it must \n\t\tbe in the interval ]0, 1[\n\t\"\"\"\n\n\t# If x0 isn't valid \n\tif x0 <= 0 or x0 >= 1: return\n\n\tplt.title(\"Cobweb Plot\")\n\n\t# Plot f(x) = ax(1 - x)\n\tx = np.linspace(0, 1, 1000)\n\tfx = a * x * (1 - x)\t\n\tplt.plot(x, fx, color=\"black\", label=\"f(x) = ax(1-x)\")\n\t\n\t# Plot y = x\n\tplt.plot([0, 1.2], [0, 1.2], color=\"red\")\n\n\t# Plot f^50(x0)\n\tlast_x, last_y = x0, 0\n\tfor _ in range(iterations):\n\t\tnext_x = a * last_x * (1 - last_x)\n\t\t# Plot vertical line \n\t\tplt.plot([last_x, last_x], [last_y, next_x], color=\"black\")\n\t\t# Plot horizontal line\n\t\tplt.plot([last_x, next_x], [next_x, next_x], color=\"red\")\n\n\t\tlast_x, last_y = next_x, next_x\n\n\tplt.show()\n\n\ndef main():\n\tplot(4, 0.2)\n\t\n\nif __name__ == \"__main__\":\n\tmain()", "id": "1320317", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "cobwebplot.py" } ]
0
MeNavel
[ { "content": "import pickle,sys\nfrom keras.applications.mobilenet import MobileNet\nimport keras\nfrom keras.models import Model\nimport cv2\nimport time\nimport operator\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nDIM=224\nIMG_DIM = (DIM, DIM)\ninput_shape = (DIM, DIM, 3)\n\ndef loadModel(nmModel):\n f = open(nmModel, 'rb')\n model = pickle.load(f)\n return model\n\ndef createMobileNet():\n mobileNet = MobileNet(include_top=False, weights='imagenet', \n input_shape=input_shape)\n\n output = mobileNet.layers[-1].output\n output = keras.layers.Flatten()(output)\n ModelmobileNet = Model(inputs=mobileNet.input, outputs=output)# base_model.get_layer('custom').output)\n\n ModelmobileNet.trainable = False\n for layer in ModelmobileNet.layers:\n layer.trainable = False\n return ModelmobileNet\n\ndef prediksiImg(nmFile,model):\n t = time.time()\n img = cv2.imread(nmFile)\n if img is None:\n return t,\"REJECTED, not valid file , cant be predict\"\n \n img = cv2.resize(img, IMG_DIM)\n img=img/255\n img=img.reshape(1,img.shape[0],img.shape[1],img.shape[2])\n ModelmobileNet = createMobileNet()\n\n ftr_np = ModelmobileNet(img)\n \n predicted_proba = model.predict_proba(ftr_np)\n res = {}\n prob = -1\n for i in range(len(model.classes_)):\n res[model.classes_[i]] = predicted_proba[0][i]\n res = sorted(res.items(), key=operator.itemgetter(1))\n res.reverse()\n \n rank = 0\n prev_val = 0\n huruf = ''\n for key, val in res:\n if val >= prev_val:\n rank += 1\n prob = val\n huruf = key\n prev_val = val\n # rank += 1\n # if key == huruf:\n # prob = val\n # break\n score = round(prob*100,2)\n\n if rank <= 5 and score > 60:\n result = \"ACCEPTED,\"\n\n else:\n result = \"REJECTED,\"\n\n # return t,\"%s %s mobileNet score %g rank %g\" %(result,huruf,score,rank)\n return result\n\ndef cobak(nmFile, model):\n t = time.time()\n img = cv2.imread(nmFile)\n if img is None:\n return t,\"REJECTED, not valid file , cant be predict\"\n\n img = cv2.resize(img, IMG_DIM)\n img=img/255\n img=img.reshape(1,img.shape[0],img.shape[1],img.shape[2])\n ModelmobileNet = createMobileNet()\n\n ftr_np = ModelmobileNet(img)\n\n predicted_proba = model.predict_proba(ftr_np)\n\n rank = 1\n score = 99\n if rank <= 5 and score > 60:\n result = \"ACCEPTED,\"\n\n else:\n result = \"REJECTED,\"\n\n return predicted_proba\n\nif __name__ == '__main__':\n filemodel = sys.argv[2]\n nmFile = sys.argv[1]\n\n # t,r=prediksiImg(nmFile,model)\n # elapsed = time.time() - t\n # r=prediksiImg(nmFile, filemodel)\n # print(\"%s\"%(r))\n\n \n r=cobak(nmFile, filemodel)\n t = time.ctime()\n print(\"Success %s %s\"%(t, r))\n", "id": "10323855", "language": "Python", "matching_score": 4.230312824249268, "max_stars_count": 0, "path": "storage/app/python/hello_world.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 31 19:22:42 2021\n\n@author: drajad\n\"\"\"\n\nimport pickle,sys\nimport cv2\nimport time\nimport operator\nimport warnings\nimport face_recognition\nwarnings.filterwarnings(\"ignore\")\n\nDIM=224\nIMG_DIM = (DIM, DIM)\ninput_shape = (DIM, DIM, 3)\n\ndef loadModel(nmModel):\n f = open(nmModel, 'rb')\n model = pickle.load(f)\n return model\n\ndef createMobileNet():\n import keras\n from keras.applications.mobilenet import MobileNet\n from keras.models import Model\n from tensorflow.keras.backend import clear_session\n clear_session()\n mobileNet = MobileNet(include_top=False, weights='imagenet', \n input_shape=input_shape)\n\n output = mobileNet.layers[-1].output\n output = keras.layers.Flatten()(output)\n ModelmobileNet = Model(inputs=mobileNet.input, outputs=output)# base_model.get_layer('custom').output)\n\n ModelmobileNet.trainable = False\n for layer in ModelmobileNet.layers:\n layer.trainable = False\n return ModelmobileNet\n\n\ndef prediksiImg(nmFile,model):\n t = time.time()\n img = cv2.imread(nmFile)\n if img is None:\n return \"REJECTED, not valid file , cant be predict\"\n face_bounding_boxes = face_recognition.face_locations(img)\n if len(face_bounding_boxes) != 1:\n return \"Wajah Tidak Terdeteksi\"\n for face_location in face_bounding_boxes:\n top, right, bottom, left = face_location\n face_image = img[top:bottom, left:right]\n img = cv2.resize(face_image, IMG_DIM)\n img=img/255\n img=img.reshape(1,img.shape[0],img.shape[1],img.shape[2])\n ModelmobileNet = createMobileNet()\n\n ftr_np = ModelmobileNet(img)\n \n predicted_proba = model.predict_proba(ftr_np)\n res = {}\n prob = -1\n for i in range(len(model.classes_)):\n res[model.classes_[i]] = predicted_proba[0][i]\n res = sorted(res.items(), key=operator.itemgetter(1))\n res.reverse()\n \n rank = 0\n prev_val = 0\n huruf = ''\n for key, val in res:\n if val >= prev_val:\n rank += 1\n prob = val\n huruf = key\n prev_val = val\n # rank += 1\n # if key == huruf:\n # prob = val\n # break\n score = round(prob*100,2)\n nmFile = nmFile.replace('/','\\\\')\n\n if rank <= 5 and score > 60:\n # result = \"ACCEPTED\"\n return \"%s\" %(huruf)\n\n else:\n result = \"Tidak Terdeteksi\"\n return \"%s\" %(result)\n \n # return t,\"%s %s mobileNet score %g rank %g\" %(result,huruf,score,rank)\n # return \"halo\"\n\nif __name__ == '__main__':\n filemodel = sys.argv[2]\n nmFile = sys.argv[1]\n model=loadModel(filemodel)\n r=prediksiImg(nmFile,model)\n print(\"%s\" %(r))\n \n # model=loadModel('/Applications/XAMPP/xamppfiles/htdocs/website/storage/app/python/model/face/mobileNet_Face.pkl')\n # nmFile = '/Users/drajad/Desktop/Test/fariq_no_mask.JPG'\n # nmFile = '/Users/drajad/Desktop/Test/no_mask_firsa.png'\n # r=prediksiImg(nmFile,model)\n # print(\"%s\" %(r))\n \n", "id": "3283471", "language": "Python", "matching_score": 4.915989398956299, "max_stars_count": 0, "path": "storage/app/python/predict_face.py" }, { "content": "import pickle,sys\nimport cv2\nimport time\nimport operator\nimport warnings\nimport face_recognition\nwarnings.filterwarnings(\"ignore\")\n\nfrom keras.preprocessing import image\nfrom keras.layers import Dense,GlobalAveragePooling2D,Dropout\nfrom keras.applications.mobilenet import MobileNet\nfrom keras.models import Model\nimport numpy as np\n\nfrom flask import Flask, request\nfrom threading import Thread\nfrom tensorflow.keras.models import load_model\nfrom PIL import Image\n\n\napp = Flask(__name__)\n\n@app.route('/predict_mask')\ndef index_mask():\n model=loadModel('/Users/drajad/Mac/Website/website-masker/storage/app/python/model/mobileNet_Mask.pkl')\n nmFile = request.args['file']\n r=prediksiImg(nmFile,model)\n return r\ndef loadModel(nmModel):\n f = open(nmModel, 'rb')\n model = pickle.load(f)\n return model\ndef createMobileNet():\n DIM=224\n IMG_DIM = (DIM, DIM)\n input_shape = (DIM, DIM, 3)\n import keras\n from keras.applications.mobilenet import MobileNet\n from keras.models import Model\n from tensorflow.keras.backend import clear_session\n clear_session()\n mobileNet = MobileNet(include_top=False, weights='imagenet', \n input_shape=input_shape)\n\n output = mobileNet.layers[-1].output\n output = keras.layers.Flatten()(output)\n ModelmobileNet = Model(inputs=mobileNet.input, outputs=output)# base_model.get_layer('custom').output)\n\n ModelmobileNet.trainable = False\n for layer in ModelmobileNet.layers:\n layer.trainable = False\n return ModelmobileNet\ndef prediksiImg(nmFile,model):\n DIM=224\n IMG_DIM = (DIM, DIM)\n input_shape = (DIM, DIM, 3)\n img = cv2.imread(nmFile)\n if img is None:\n return t,\"REJECTED, not valid file , cant be predict\"\n \n img = cv2.resize(img, IMG_DIM)\n img=img/255\n img=img.reshape(1,img.shape[0],img.shape[1],img.shape[2])\n ModelmobileNet = createMobileNet()\n\n ftr_np = ModelmobileNet(img)\n \n predicted_proba = model.predict_proba(ftr_np)\n res = {}\n prob = -1\n for i in range(len(model.classes_)):\n res[model.classes_[i]] = predicted_proba[0][i]\n res = sorted(res.items(), key=operator.itemgetter(1))\n res.reverse()\n \n rank = 0\n prev_val = 0\n huruf = ''\n for key, val in res:\n if val >= prev_val:\n rank += 1\n prob = val\n huruf = key\n prev_val = val\n score = round(prob*100,2)\n nmFile = nmFile.replace('/','\\\\')\n\n if rank <= 5 and score > 70:\n return \"%s\" %(huruf)\n\n# @app.route('/predict_face')\n# def index_face():\n# # model=loadModel('/Users/drajad/Mac/Website/website-masker/storage/app/python/model/mobileNet_Face.pkl')\n# model=loadModel('/Users/drajad/Mac/Website/website-masker/storage/app/python/model/mobileNet_Face_Baru_2.pkl')\n\n# nmFile = request.args['file']\n# r=prediksiImg(nmFile,model)\n# return r\n\n# def loadModel(nmModel):\n# f = open(nmModel, 'rb')\n# model = pickle.load(f)\n# return model\n\n# def createMobileNet():\n# DIM=224\n# IMG_DIM = (DIM, DIM)\n# input_shape = (DIM, DIM, 3)\n# import keras\n# from keras.applications.mobilenet import MobileNet\n# from keras.models import Model\n# from tensorflow.keras.backend import clear_session\n# clear_session()\n# mobileNet = MobileNet(include_top=False, weights='imagenet', \n# input_shape=input_shape)\n\n# output = mobileNet.layers[-1].output\n# output = keras.layers.Flatten()(output)\n# ModelmobileNet = Model(inputs=mobileNet.input, outputs=output)# base_model.get_layer('custom').output)\n\n# ModelmobileNet.trainable = False\n# for layer in ModelmobileNet.layers:\n# layer.trainable = False\n# return ModelmobileNet\n\n# def prediksiImg(nmFile,model):\n# DIM=224\n# IMG_DIM = (DIM, DIM)\n# input_shape = (DIM, DIM, 3)\n# img = cv2.imread(nmFile)\n# if img is None:\n# return \"REJECTED, not valid file , cant be predict\"\n# face_bounding_boxes = face_recognition.face_locations(img)\n# if len(face_bounding_boxes) != 1:\n# return \"Wajah Tidak Terdeteksi\"\n# for face_location in face_bounding_boxes:\n# top, right, bottom, left = face_location\n# face_image = img[top:bottom, left:right]\n# img = cv2.resize(face_image, IMG_DIM)\n# img=img/255\n# img=img.reshape(1,img.shape[0],img.shape[1],img.shape[2])\n# ModelmobileNet = createMobileNet()\n\n# ftr_np = ModelmobileNet(img)\n \n# predicted_proba = model.predict_proba(ftr_np)\n# res = {}\n# prob = -1\n# for i in range(len(model.classes_)):\n# res[model.classes_[i]] = predicted_proba[0][i]\n# res = sorted(res.items(), key=operator.itemgetter(1))\n# res.reverse()\n \n# rank = 0\n# prev_val = 0\n# huruf = ''\n# for key, val in res:\n# if val >= prev_val:\n# rank += 1\n# prob = val\n# huruf = key\n# prev_val = val\n# score \n\n@app.route('/predict_face')\ndef index_face():\n # nmFile = ('/Users/drajad/Mac/Website/website-masker/storage/app/python/IMG_6712.JPG')\n nmFile = request.args['file']\n mapClass = ('/Users/drajad/Mac/Website/website-masker/storage/app/python/map.npz')\n loaded = np.load(mapClass)\n mapAngka = loaded['label']\n img_size = 224\n bntk_input = (img_size, img_size, 3)\n kelas=len(mapAngka)\n fold_no=2\n nmModel = ('/Users/drajad/Mac/Website/website-masker/storage/app/python/model/modelCNN.h5')\n model = mobileNetCNN(bntk_input,kelas)\n model.load_weights(nmModel) \n img = load_image(nmFile, img_size)\n img = img.reshape(1,224,224,3)\n pred = model(img)\n id_person = np.argmax(pred)\n return(mapAngka[id_person])\n \n # akurasi = np.array(pred)\n # akurasi = akurasi.ravel()\n # return(\"%s %s\"%(mapAngka[id_person], akurasi[id_person]))\n\n\ndef mobileNetCNN(bntk_input,kelas):\n base_model=MobileNet(weights='imagenet',include_top=False, input_shape=bntk_input)\n\n x=base_model.output\n x=GlobalAveragePooling2D()(x)\n x=Dense(1024,activation='relu')(x) #dense layer 2\n x=Dropout(0.1)(x) \n x=Dense(512,activation='relu')(x) #dense layer 3 \n x=Dropout(0.1)(x) \n \n preds=Dense(kelas,activation='softmax')(x) #final layer with softmax activation\n model=Model(inputs=base_model.input,outputs=preds)\n model.compile(optimizer='Adam',#Adam()#0.0001),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n return model\n\ndef load_image(img_path, img_size) :\n \n img = image.load_img(img_path, target_size=(img_size, img_size))\n img_tensor = image.img_to_array(img) # (height, width, channels)\n img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)\n img_tensor /= 255. # imshow expects values in the range [0, 1]\n return img_tensor\n\n@app.route('/predict_shield')\ndef index_shield():\n file = request.args['file']\n\n # Parameters\n input_size = (150,150)\n\n #define input shape\n channel = (3,)\n input_shape = input_size + channel\n\n #define labels\n labels = ['noshield', 'shield']\n\n # ada 2 cara load model, jika cara pertama berhasil maka bisa lasngusng di lanjutkan ke fungsi prediksi\n\n MODEL_PATH = '/Users/drajad/Mac/Website/website-masker/storage/app/python/model/model.h5'\n model = load_model(MODEL_PATH,compile=False)\n\n # read image\n im = Image.open(file)\n X = preprocess(im,input_size)\n X = reshape([X])\n y = model.predict(X)\n\n # print( labels[np.argmax(y)], np.max(y) )\n return labels[np.argmax(y)]\n\ndef preprocess(img,input_size):\n nimg = img.convert('RGB').resize(input_size, resample= 0)\n img_arr = (np.array(nimg))/255\n return img_arr\n\ndef reshape(imgs_arr):\n return np.stack(imgs_arr, axis=0)\n\nif __name__ == '__main__':\n app.run(debug=True, threaded=True)", "id": "5973143", "language": "Python", "matching_score": 3.6792891025543213, "max_stars_count": 0, "path": "storage/app/python/predict.py" }, { "content": "x=GlobalAveragePooling2D()(x)\nx=Dense(1024,activation='relu')(x)\nx=Dropout(0.1)(x) \nx=Dense(512,activation='relu')(x)\nx=Dropout(0.1)(x)\npreds=Dense(kelas,activation='softmax')(x)", "id": "3407019", "language": "Python", "matching_score": 0.38276180624961853, "max_stars_count": 0, "path": "buku.py" } ]
3.954801
zdforient
[ { "content": "import os\nimport os.path as op\nimport json\nimport cv2\nimport base64\nimport numpy as np\nimport code\nimport torch\nfrom tqdm import tqdm\nfrom metro.utils.tsv_file_ops import tsv_reader, tsv_writer\nfrom metro.utils.tsv_file_ops import generate_linelist_file\nfrom metro.utils.tsv_file_ops import generate_hw_file\nfrom metro.utils.tsv_file import TSVFile\nfrom metro.utils.image_ops import img_from_base64\nfrom metro.modeling._smpl import SMPL\nsmpl = SMPL().cuda()\n\nfrom collections import defaultdict\nfrom pycocotools.coco import COCO \n\ntsv_file = \"{}/{}.img.tsv\"\nhw_file = \"{}/{}.hw.tsv\"\nlabel_file = \"{}/{}.label.tsv\"\nlinelist_file = \"{}/{}.linelist.tsv\"\n\ndef world2cam(world_coord, R, t):\n cam_coord = np.dot(R, world_coord.transpose(1,0)).transpose(1,0) + t.reshape(1,3)\n return cam_coord\n\ndef cam2pixel(cam_coord, f, c):\n x = cam_coord[:, 0] / (cam_coord[:, 2]) * f[0] + c[0]\n y = cam_coord[:, 1] / (cam_coord[:, 2]) * f[1] + c[1]\n z = cam_coord[:, 2]\n img_coord = np.concatenate((x[:,None], y[:,None], z[:,None]),1)\n return img_coord\n\ndef preproc(dataset_folder, dataset_tsv_folder, split):\n # init SMPL\n smpl_mesh_model = SMPL()\n\n # bbox expansion factor\n scaleFactor = 1.2\n\n imgfiles_folder = dataset_folder+'/imageFiles'\n\n # annotation loading\n rows, rows_label, rows_hw = [], [], []\n db = COCO(op.join(dataset_folder, '3DPW_'+split+'.json'))\n\n for aid in tqdm(db.anns.keys()):\n ann = db.anns[aid]\n img = db.loadImgs(ann['image_id'])[0]\n imgname = op.join(img['sequence'], img['file_name'])\n img_path = op.join(imgfiles_folder, imgname)\n img_data = cv2.imread(img_path)\n img_encoded_str = base64.b64encode(cv2.imencode('.jpg', img_data)[1])\n width, height = img['width'], img['height']\n bbox = ann['bbox']\n cam_f = img['cam_param']['focal']\n cam_p = img['cam_param']['princpt']\n\n center = [bbox[0] + bbox[2]/2, bbox[1] + bbox[3]/2]\n scale = scaleFactor*max(bbox[2], bbox[3])/200\n\n smpl_shape = ann['smpl_param']['shape']\n smpl_pose = ann['smpl_param']['pose']\n gender = ann['smpl_param']['gender']\n\n smpl_pose_tensor = torch.FloatTensor(smpl_pose).view(1,-1)\n smpl_shape_tensor = torch.FloatTensor(smpl_shape).view(1,-1)\n gt_vertices = smpl_mesh_model(smpl_pose_tensor, smpl_shape_tensor)\n gt_keypoints_3d = smpl_mesh_model.get_joints(gt_vertices) \n gt_3d_joints = np.asarray(gt_keypoints_3d.cpu())\n gt_3d_joints_tag = np.ones((1,24,4))\n gt_3d_joints_tag[0,:,0:3] = gt_3d_joints\n gt_3d_joints[0] = gt_3d_joints[0] + ann['smpl_param']['trans']\n gt_2d_joints = cam2pixel(gt_3d_joints[0], cam_f, cam_p)\n keypoint_num = gt_2d_joints.shape[0]\n\n gt_2d_joints_tag = np.ones([24,3])\n gt_2d_joints_tag[:,:2] = gt_2d_joints[:,:2]\n\n smpl_pose_camera_corrd = np.asarray(smpl_pose_tensor).tolist()[0]\n smpl_shape_camera_corrd = np.asarray(smpl_shape_tensor).tolist()[0]\n \n labels = []\n labels.append({\"center\": center, \"scale\": scale,\n \"2d_joints\": gt_2d_joints_tag.tolist(), \"has_2d_joints\": 1,\n \"3d_joints\": gt_3d_joints_tag.tolist(), \"has_3d_joints\": 1,\n \"gender\": gender, \"pose\": smpl_pose_camera_corrd, \"betas\": smpl_shape_camera_corrd, \"has_smpl\": 1 })\n\n row_label = [imgname, json.dumps(labels)]\n rows_label.append(row_label)\n row = [imgname, img_encoded_str]\n rows.append(row)\n height = img_data.shape[0]\n width = img_data.shape[1]\n row_hw = [imgname, json.dumps([{\"height\":height, \"width\":width}])]\n rows_hw.append(row_hw)\n\n\n resolved_label_file = label_file.format(dataset_tsv_folder, split)\n print('save to',resolved_label_file)\n tsv_writer(rows_label, resolved_label_file)\n\n resolved_linelist_file = linelist_file.format(dataset_tsv_folder, split)\n print('save to',resolved_linelist_file)\n generate_linelist_file(resolved_label_file, save_file=resolved_linelist_file)\n\n resolved_tsv_file = tsv_file.format(dataset_tsv_folder, split)\n print('save to',resolved_tsv_file)\n tsv_writer(rows, resolved_tsv_file)\n\n resolved_hw_file = hw_file.format(dataset_tsv_folder, split)\n print('save to',resolved_hw_file)\n tsv_writer(rows_hw, resolved_hw_file)\n\n\n\ndef main():\n # *****Instruction to reproduce 3DPW tsv files*****\n #\n # (1) Download 3DPW image files \"imageFiles.zip\" from the 3DPW websit: https://virtualhumans.mpi-inf.mpg.de/3DPW/evaluation.html\n # (2) Unzip \"imageFiles.zip\" to get folder \"imageFiles\"\n # (3) Download pre-parsed 3DPW annotations from https://github.com/hongsukchoi/Pose2Mesh_RELEASE \n # (4) Clone https://github.com/cocodataset/cocoapi and install cocoapi\n #\n # The final data structure should look like this:\n # ${ROOT} \n # |-- datasets \n # |-- 3dpw\n # |-- 3DPW_train.json \n # |-- 3DPW_test.json\n # |-- imageFiles\n # |-- courtyard_arguing_00\n # |-- courtyard_backpack_00\n # |-- ....\n # |-- ....\n #\n # *****Important Note*****\n #\n # We use annotations from https://github.com/hongsukchoi/Pose2Mesh_RELEASE \n # If you use the annotations, please consider citing the following paper:\n #\n # @InProceedings{Choi_2020_ECCV_Pose2Mesh, \n # author = {<NAME> Moon, <NAME> <NAME>}, \n # title = {Pose2Mesh: Graph Convolutional Network for 3D Human Pose and Mesh Recovery from a 2D Human Pose}, \n # booktitle = {European Conference on Computer Vision (ECCV)}, \n # year = {2020} \n # } \n\n datasets = ['train','test']\n dataset_img_folder = \"./datasets/3dpw\"\n dataset_tsv_folder = \"./datasets/3dpw_tsv_reproduce\"\n for split in datasets:\n preproc(dataset_img_folder, dataset_tsv_folder, split)\n\nif __name__ == '__main__':\n main()\n", "id": "5233641", "language": "Python", "matching_score": 6.64553165435791, "max_stars_count": 1, "path": "metro/tools/tsv_demo_3dpw.py" }, { "content": "import os\nimport os.path as op\nimport json\nimport cv2\nimport base64\nimport sys\nimport argparse\nimport numpy as np\nimport pickle\nimport code\nimport imageio\nimport torch\nfrom tqdm import tqdm\nfrom metro.utils.tsv_file_ops import tsv_reader, tsv_writer\nfrom metro.utils.tsv_file_ops import generate_linelist_file\nfrom metro.utils.tsv_file_ops import generate_hw_file\nfrom metro.utils.tsv_file import TSVFile\nfrom metro.utils.image_ops import img_from_base64\nimport scipy.misc\n\nfrom metro.modeling._smpl import SMPL\nsmpl = SMPL().cuda()\n\ndef preproc(dataset_img_folder, dataset_tsv_folder, split):\n # get image list based on split definition\n txt_file = os.path.join(dataset_img_folder, 'trainval.txt')\n file = open(txt_file, 'r')\n txt_content = file.read()\n imgs = txt_content.split('\\n')\n\n # structs we will output\n rows, rows_label, rows_hw = [], [], []\n tsv_img_file = dataset_tsv_folder + \"/{}.img.tsv\"\n tsv_hw_file = dataset_tsv_folder + \"/{}.hw.tsv\"\n tsv_label_file = dataset_tsv_folder + \"/{}.label.tsv\"\n tsv_linelist_file = dataset_tsv_folder + \"/{}.linelist.tsv\"\n\n # iterate all images\n for img_i in tqdm(imgs):\n # skip empty row in txt\n if len(img_i) == 0:\n continue\n \n # =======================================================\n # preprocess tsv_img_file\n # encode image to bytestring, and save it in tsv_img_file\n img_base = img_i[1:-10]\n img_name = '%s_image.png'%img_base\n img_path = op.join(dataset_img_folder, img_name)\n img = cv2.imread(img_path)\n img_encoded_str = base64.b64encode(cv2.imencode('.jpg', img)[1])\n row = [img_name, img_encoded_str]\n rows.append(row)\n\n # =======================================================\n # preprocess tsv_hw_file\n # save image height & width in tsv_hw_file\n height = img.shape[0]\n width = img.shape[1]\n row_hw = [img_name, json.dumps([{\"height\":height, \"width\":width}])]\n rows_hw.append(row_hw)\n\n # =======================================================\n # preprocess tsv_label_file\n\n # step 1. keypoints processing\n keypoints_file = os.path.join(dataset_img_folder, '%s_joints.npy'%img_base)\n keypoints = np.load(keypoints_file)\n vis = keypoints[2]\n keypoints = keypoints[:2].T\n gt_2d_joints = np.zeros([1,24,3])\n gt_2d_joints[0,:14,:] = np.hstack([keypoints, np.vstack(vis)])\n\n # step 2. scale and center\n render_name = os.path.join(dataset_img_folder, '%s_render_light.png' % img_base)\n I = imageio.imread(render_name) # I = scipy.misc.imread(render_name)\n ys, xs = np.where(np.min(I,axis=2)<255)\n bbox = np.array([np.min(xs), np.min(ys), np.max(xs)+1, np.max(ys)+1])\n center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]\n\n # step 3. bbox expansion factor\n scaleFactor = 1.2\n scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200.\n\n # step 4. pose and shape\n pkl_file = os.path.join(dataset_img_folder, '%s_body.pkl' % img_base)\n pkl = pickle.load(open(pkl_file, 'rb'), encoding='latin1') \n pose = pkl['pose']\n betas = pkl['betas']\n pose_tensor = torch.from_numpy(pose).unsqueeze(0).cuda().float()\n betas_tensor = torch.from_numpy(betas).unsqueeze(0).cuda().float()\n\n # step 5. 3d pose\n gt_vertices = smpl(pose_tensor, betas_tensor) # output shape: torch.Size([1, 6890, 3]) \n gt_keypoints_3d = smpl.get_joints(gt_vertices) # output shape: torch.Size([1, 24, 3]) \n gt_3d_joints = np.asarray(gt_keypoints_3d.cpu())\n gt_3d_joints_tag = np.ones((1,24,4))\n gt_3d_joints_tag[0,:,0:3] = gt_3d_joints\n\n # step 6. save them in tsv_label_file\n labels = []\n labels.append({\"center\": center, \"scale\": scale, \n \"2d_joints\": gt_2d_joints.tolist(), \"has_2d_joints\": 1,\n \"3d_joints\": gt_3d_joints_tag.tolist(), \"has_3d_joints\": 1,\n \"pose\": pose.tolist(), \"betas\": betas.tolist(), \"has_smpl\": 1 })\n row_label = [img_name, json.dumps(labels)]\n rows_label.append(row_label)\n\n resolved_tsv_file = tsv_img_file.format(split)\n tsv_writer(rows, resolved_tsv_file)\n resolved_label_file = tsv_label_file.format(split)\n tsv_writer(rows_label, resolved_label_file)\n resolved_tsv_file = tsv_hw_file.format(split)\n tsv_writer(rows_hw, resolved_tsv_file)\n # generate linelist file\n resolved_linelist_file = tsv_linelist_file.format(split)\n generate_linelist_file(resolved_label_file, save_file=resolved_linelist_file)\n\ndef main():\n datasets = ['trainval']\n # download https://files.is.tuebingen.mpg.de/classner/up/datasets/up-3d.zip\n # unzip it and put all files in \"./datasets/up-3d\"\n dataset_img_folder = \"./datasets/up-3d\"\n dataset_tsv_folder = \"./datasets/up-3d-tsv\"\n for split in datasets:\n preproc(dataset_img_folder, dataset_tsv_folder, split)\n\nif __name__ == '__main__':\n main()\n", "id": "9782418", "language": "Python", "matching_score": 3.1504647731781006, "max_stars_count": 1, "path": "metro/tools/tsv_demo_up3d.py" }, { "content": "\"\"\"\nCopyright (c) Microsoft Corporation.\nLicensed under the MIT license.\n\n\"\"\"\n\nimport cv2\nimport math\nimport json\nfrom PIL import Image\nimport os.path as op\nimport numpy as np\nimport code\n\nfrom metro.utils.tsv_file import TSVFile, CompositeTSVFile\nfrom metro.utils.tsv_file_ops import load_linelist_file, load_from_yaml_file, find_file_path_in_yaml\nfrom metro.utils.image_ops import img_from_base64, crop, flip_img, flip_pose, flip_kp, transform, rot_aa\nimport torch\nimport torchvision.transforms as transforms\n\n\nclass MeshTSVDataset(object):\n def __init__(self, img_file, label_file=None, hw_file=None,\n linelist_file=None, is_train=True, cv2_output=False, scale_factor=1):\n\n self.img_file = img_file\n self.label_file = label_file\n self.hw_file = hw_file\n self.linelist_file = linelist_file\n self.img_tsv = self.get_tsv_file(img_file)\n self.label_tsv = None if label_file is None else self.get_tsv_file(label_file)\n self.hw_tsv = None if hw_file is None else self.get_tsv_file(hw_file)\n\n if self.is_composite:\n assert op.isfile(self.linelist_file)\n self.line_list = [i for i in range(self.hw_tsv.num_rows())]\n else:\n self.line_list = load_linelist_file(linelist_file)\n\n self.cv2_output = cv2_output\n self.normalize_img = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n self.is_train = is_train\n self.scale_factor = 0.25 # rescale bounding boxes by a factor of [1-options.scale_factor,1+options.scale_factor]\n self.noise_factor = 0.4\n self.rot_factor = 30 # Random rotation in the range [-rot_factor, rot_factor]\n self.img_res = 224\n\n self.image_keys = self.prepare_image_keys()\n\n self.joints_definition = ('R_Ankle', 'R_Knee', 'R_Hip', 'L_Hip', 'L_Knee', 'L_Ankle', 'R_Wrist', 'R_Elbow', 'R_Shoulder', 'L_Shoulder',\n 'L_Elbow','L_Wrist','Neck','Top_of_Head','Pelvis','Thorax','Spine','Jaw','Head','Nose','L_Eye','R_Eye','L_Ear','R_Ear')\n self.pelvis_index = self.joints_definition.index('Pelvis')\n\n def get_tsv_file(self, tsv_file):\n if tsv_file:\n if self.is_composite:\n return CompositeTSVFile(tsv_file, self.linelist_file,\n root=self.root)\n tsv_path = find_file_path_in_yaml(tsv_file, self.root)\n return TSVFile(tsv_path)\n\n def get_valid_tsv(self):\n # sorted by file size\n if self.hw_tsv:\n return self.hw_tsv\n if self.label_tsv:\n return self.label_tsv\n\n def prepare_image_keys(self):\n tsv = self.get_valid_tsv()\n return [tsv.get_key(i) for i in range(tsv.num_rows())]\n\n def prepare_image_key_to_index(self):\n tsv = self.get_valid_tsv()\n return {tsv.get_key(i) : i for i in range(tsv.num_rows())}\n\n\n def augm_params(self):\n \"\"\"Get augmentation parameters.\"\"\"\n flip = 0 # flipping\n pn = np.ones(3) # per channel pixel-noise\n rot = 0 # rotation\n sc = 1 # scaling\n if self.is_train:\n # We flip with probability 1/2\n if np.random.uniform() <= 0.5:\n flip = 1\n\t \n # Each channel is multiplied with a number \n # in the area [1-opt.noiseFactor,1+opt.noiseFactor]\n pn = np.random.uniform(1-self.noise_factor, 1+self.noise_factor, 3)\n\t \n # The rotation is a number in the area [-2*rotFactor, 2*rotFactor]\n rot = min(2*self.rot_factor,\n max(-2*self.rot_factor, np.random.randn()*self.rot_factor))\n\t \n # The scale is multiplied with a number\n # in the area [1-scaleFactor,1+scaleFactor]\n sc = min(1+self.scale_factor,\n max(1-self.scale_factor, np.random.randn()*self.scale_factor+1))\n # but it is zero with probability 3/5\n if np.random.uniform() <= 0.6:\n rot = 0\n\t\n return flip, pn, rot, sc\n\n def rgb_processing(self, rgb_img, center, scale, rot, flip, pn):\n \"\"\"Process rgb image and do augmentation.\"\"\"\n rgb_img = crop(rgb_img, center, scale, \n [self.img_res, self.img_res], rot=rot)\n # flip the image \n if flip:\n rgb_img = flip_img(rgb_img)\n # in the rgb image we add pixel noise in a channel-wise manner\n rgb_img[:,:,0] = np.minimum(255.0, np.maximum(0.0, rgb_img[:,:,0]*pn[0]))\n rgb_img[:,:,1] = np.minimum(255.0, np.maximum(0.0, rgb_img[:,:,1]*pn[1]))\n rgb_img[:,:,2] = np.minimum(255.0, np.maximum(0.0, rgb_img[:,:,2]*pn[2]))\n # (3,224,224),float,[0,1]\n rgb_img = np.transpose(rgb_img.astype('float32'),(2,0,1))/255.0\n return rgb_img\n\n def j2d_processing(self, kp, center, scale, r, f):\n \"\"\"Process gt 2D keypoints and apply all augmentation transforms.\"\"\"\n nparts = kp.shape[0]\n for i in range(nparts):\n kp[i,0:2] = transform(kp[i,0:2]+1, center, scale, \n [self.img_res, self.img_res], rot=r)\n # convert to normalized coordinates\n kp[:,:-1] = 2.*kp[:,:-1]/self.img_res - 1.\n # flip the x coordinates\n if f:\n kp = flip_kp(kp)\n kp = kp.astype('float32')\n return kp\n\n def j3d_processing(self, S, r, f):\n \"\"\"Process gt 3D keypoints and apply all augmentation transforms.\"\"\"\n # in-plane rotation\n rot_mat = np.eye(3)\n if not r == 0:\n rot_rad = -r * np.pi / 180\n sn,cs = np.sin(rot_rad), np.cos(rot_rad)\n rot_mat[0,:2] = [cs, -sn]\n rot_mat[1,:2] = [sn, cs]\n S[:, :-1] = np.einsum('ij,kj->ki', rot_mat, S[:, :-1]) \n # flip the x coordinates\n if f:\n S = flip_kp(S)\n S = S.astype('float32')\n return S\n\n def pose_processing(self, pose, r, f):\n \"\"\"Process SMPL theta parameters and apply all augmentation transforms.\"\"\"\n # rotation or the pose parameters\n pose = pose.astype('float32')\n pose[:3] = rot_aa(pose[:3], r)\n # flip the pose parameters\n if f:\n pose = flip_pose(pose)\n # (72),float\n pose = pose.astype('float32')\n return pose\n\n def get_line_no(self, idx):\n return idx if self.line_list is None else self.line_list[idx]\n\n def get_image(self, idx): \n line_no = self.get_line_no(idx)\n row = self.img_tsv[line_no]\n # use -1 to support old format with multiple columns.\n cv2_im = img_from_base64(row[-1])\n if self.cv2_output:\n return cv2_im.astype(np.float32, copy=True)\n cv2_im = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)\n\n return cv2_im\n\n def get_annotations(self, idx):\n line_no = self.get_line_no(idx)\n if self.label_tsv is not None:\n row = self.label_tsv[line_no]\n annotations = json.loads(row[1])\n return annotations\n else:\n return []\n\n def get_target_from_annotations(self, annotations, img_size, idx):\n # This function will be overwritten by each dataset to \n # decode the labels to specific formats for each task. \n return annotations\n\n\n def get_img_info(self, idx):\n if self.hw_tsv is not None:\n line_no = self.get_line_no(idx)\n row = self.hw_tsv[line_no]\n try:\n # json string format with \"height\" and \"width\" being the keys\n return json.loads(row[1])[0]\n except ValueError:\n # list of strings representing height and width in order\n hw_str = row[1].split(' ')\n hw_dict = {\"height\": int(hw_str[0]), \"width\": int(hw_str[1])}\n return hw_dict\n\n def get_img_key(self, idx):\n line_no = self.get_line_no(idx)\n # based on the overhead of reading each row.\n if self.hw_tsv:\n return self.hw_tsv[line_no][0]\n elif self.label_tsv:\n return self.label_tsv[line_no][0]\n else:\n return self.img_tsv[line_no][0]\n\n def __len__(self):\n if self.line_list is None:\n return self.img_tsv.num_rows() \n else:\n return len(self.line_list)\n\n def __getitem__(self, idx):\n\n img = self.get_image(idx)\n img_key = self.get_img_key(idx)\n annotations = self.get_annotations(idx)\n\n annotations = annotations[0]\n center = annotations['center']\n scale = annotations['scale']\n has_2d_joints = annotations['has_2d_joints']\n has_3d_joints = annotations['has_3d_joints']\n joints_2d = np.asarray(annotations['2d_joints'])\n joints_3d = np.asarray(annotations['3d_joints'])\n\n if joints_2d.ndim==3:\n joints_2d = joints_2d[0]\n if joints_3d.ndim==3:\n joints_3d = joints_3d[0]\n\n # Get SMPL parameters, if available\n has_smpl = np.asarray(annotations['has_smpl'])\n pose = np.asarray(annotations['pose'])\n betas = np.asarray(annotations['betas'])\n\n try:\n gender = annotations['gender']\n except KeyError:\n gender = 'none'\n\n # Get augmentation parameters\n flip,pn,rot,sc = self.augm_params()\n\n # Process image\n img = self.rgb_processing(img, center, sc*scale, rot, flip, pn)\n img = torch.from_numpy(img).float()\n # Store image before normalization to use it in visualization\n transfromed_img = self.normalize_img(img)\n\n # normalize 3d pose by aligning the pelvis as the root (at origin)\n root_pelvis = joints_3d[self.pelvis_index,:-1]\n joints_3d[:,:-1] = joints_3d[:,:-1] - root_pelvis[None,:]\n # 3d pose augmentation (random flip + rotation, consistent to image and SMPL)\n joints_3d_transformed = self.j3d_processing(joints_3d.copy(), rot, flip)\n # 2d pose augmentation\n joints_2d_transformed = self.j2d_processing(joints_2d.copy(), center, sc*scale, rot, flip)\n\n ###################################\n # Masking percantage\n # We observe that 30% works better for human body mesh. Further details are reported in the paper.\n mvm_percent = 0.3\n ###################################\n \n mjm_mask = np.ones((14,1))\n if self.is_train:\n num_joints = 14\n pb = np.random.random_sample()\n masked_num = int(pb * mvm_percent * num_joints) # at most x% of the joints could be masked\n indices = np.random.choice(np.arange(num_joints),replace=False,size=masked_num)\n mjm_mask[indices,:] = 0.0\n mjm_mask = torch.from_numpy(mjm_mask).float()\n\n mvm_mask = np.ones((431,1))\n if self.is_train:\n num_vertices = 431\n pb = np.random.random_sample()\n masked_num = int(pb * mvm_percent * num_vertices) # at most x% of the vertices could be masked\n indices = np.random.choice(np.arange(num_vertices),replace=False,size=masked_num)\n mvm_mask[indices,:] = 0.0\n mvm_mask = torch.from_numpy(mvm_mask).float()\n\n meta_data = {}\n meta_data['ori_img'] = img\n meta_data['pose'] = torch.from_numpy(self.pose_processing(pose, rot, flip)).float()\n meta_data['betas'] = torch.from_numpy(betas).float()\n meta_data['joints_3d'] = torch.from_numpy(joints_3d_transformed).float()\n meta_data['has_3d_joints'] = has_3d_joints\n meta_data['has_smpl'] = has_smpl\n\n meta_data['mjm_mask'] = mjm_mask\n meta_data['mvm_mask'] = mvm_mask\n\n # Get 2D keypoints and apply augmentation transforms\n meta_data['has_2d_joints'] = has_2d_joints\n meta_data['joints_2d'] = torch.from_numpy(joints_2d_transformed).float()\n meta_data['scale'] = float(sc * scale)\n meta_data['center'] = np.asarray(center).astype(np.float32)\n meta_data['gender'] = gender\n return img_key, transfromed_img, meta_data\n\n\n\nclass MeshTSVYamlDataset(MeshTSVDataset):\n \"\"\" TSVDataset taking a Yaml file for easy function call\n \"\"\"\n def __init__(self, yaml_file, is_train=True, cv2_output=False, scale_factor=1):\n self.cfg = load_from_yaml_file(yaml_file)\n self.is_composite = self.cfg.get('composite', False)\n self.root = op.dirname(yaml_file)\n \n if self.is_composite==False:\n img_file = find_file_path_in_yaml(self.cfg['img'], self.root)\n label_file = find_file_path_in_yaml(self.cfg.get('label', None),\n self.root)\n hw_file = find_file_path_in_yaml(self.cfg.get('hw', None), self.root)\n linelist_file = find_file_path_in_yaml(self.cfg.get('linelist', None),\n self.root)\n else:\n img_file = self.cfg['img']\n hw_file = self.cfg['hw']\n label_file = self.cfg.get('label', None)\n linelist_file = find_file_path_in_yaml(self.cfg.get('linelist', None),\n self.root)\n\n super(MeshTSVYamlDataset, self).__init__(\n img_file, label_file, hw_file, linelist_file, is_train, cv2_output=cv2_output, scale_factor=scale_factor)\n", "id": "4778838", "language": "Python", "matching_score": 4.809175491333008, "max_stars_count": 1, "path": "metro/datasets/human_mesh_tsv.py" }, { "content": "\"\"\"\nThis file contains definitions of useful data stuctures and the paths\nfor the datasets and data files necessary to run the code.\n\nAdapted from opensource project GraphCMR (https://github.com/nkolot/GraphCMR/) and Pose2Mesh (https://github.com/hongsukchoi/Pose2Mesh_RELEASE)\n\n\"\"\"\n\nfrom os.path import join\nfolder_path = 'metro/modeling/'\nJOINT_REGRESSOR_TRAIN_EXTRA = folder_path + 'data/J_regressor_extra.npy'\nJOINT_REGRESSOR_H36M_correct = folder_path + 'data/J_regressor_h36m_correct.npy'\nSMPL_FILE = folder_path + 'data/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'\nSMPL_Male = folder_path + 'data/basicModel_m_lbs_10_207_0_v1.0.0.pkl'\nSMPL_Female = folder_path + 'data/basicModel_f_lbs_10_207_0_v1.0.0.pkl'\nSMPL_sampling_matrix = folder_path + 'data/mesh_downsampling.npz'\nMANO_FILE = folder_path + 'data/MANO_RIGHT.pkl'\nMANO_sampling_matrix = folder_path + 'data/mano_downsampling.npz'\n\nJOINTS_IDX = [8, 5, 29, 30, 4, 7, 21, 19, 17, 16, 18, 20, 31, 32, 33, 34, 35, 36, 37, 24, 26, 25, 28, 27]\n\n\n\"\"\"\nWe follow the body joint definition, loss functions, and evaluation metrics from \nopen source project GraphCMR (https://github.com/nkolot/GraphCMR/)\n\nEach dataset uses different sets of joints.\nWe use a superset of 24 joints such that we include all joints from every dataset.\nIf a dataset doesn't provide annotations for a specific joint, we simply ignore it.\nThe joints used here are:\n\"\"\"\nJ24_NAME = ('R_Ankle', 'R_Knee', 'R_Hip', 'L_Hip', 'L_Knee', 'L_Ankle', 'R_Wrist', 'R_Elbow', 'R_Shoulder', 'L_Shoulder',\n'L_Elbow','L_Wrist','Neck','Top_of_Head','Pelvis','Thorax','Spine','Jaw','Head','Nose','L_Eye','R_Eye','L_Ear','R_Ear')\nH36M_J17_NAME = ( 'Pelvis', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Torso', 'Neck', 'Nose', 'Head',\n 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Shoulder', 'R_Elbow', 'R_Wrist')\nJ24_TO_J14 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18]\nH36M_J17_TO_J14 = [3, 2, 1, 4, 5, 6, 16, 15, 14, 11, 12, 13, 8, 10]\n\n\"\"\"\nWe follow the hand joint definition and mesh topology from \nopen source project Manopth (https://github.com/hassony2/manopth)\n\nThe hand joints used here are:\n\"\"\"\nJ_NAME = ('Wrist', 'Thumb_1', 'Thumb_2', 'Thumb_3', 'Thumb_4', 'Index_1', 'Index_2', 'Index_3', 'Index_4', 'Middle_1',\n'Middle_2', 'Middle_3', 'Middle_4', 'Ring_1', 'Ring_2', 'Ring_3', 'Ring_4', 'Pinky_1', 'Pinky_2', 'Pinky_3', 'Pinky_4')\nROOT_INDEX = 0", "id": "1550791", "language": "Python", "matching_score": 0.8813223838806152, "max_stars_count": 1, "path": "metro/modeling/data/config.py" }, { "content": "\"\"\"\r\nCopyright (c) Microsoft Corporation.\r\nLicensed under the MIT license.\r\n\r\n\"\"\"\r\n\r\n\r\nimport os\r\nimport os.path as op\r\nimport numpy as np\r\nimport base64\r\nimport cv2\r\nimport yaml\r\nfrom collections import OrderedDict\r\n\r\n\r\ndef img_from_base64(imagestring):\r\n try:\r\n jpgbytestring = base64.b64decode(imagestring)\r\n nparr = np.frombuffer(jpgbytestring, np.uint8)\r\n r = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\r\n return r\r\n except:\r\n return None\r\n\r\n\r\ndef load_labelmap(labelmap_file):\r\n label_dict = None\r\n if labelmap_file is not None and op.isfile(labelmap_file):\r\n label_dict = OrderedDict()\r\n with open(labelmap_file, 'r') as fp:\r\n for line in fp:\r\n label = line.strip().split('\\t')[0]\r\n if label in label_dict:\r\n raise ValueError(\"Duplicate label \" + label + \" in labelmap.\")\r\n else:\r\n label_dict[label] = len(label_dict)\r\n return label_dict\r\n\r\n\r\ndef load_shuffle_file(shuf_file):\r\n shuf_list = None\r\n if shuf_file is not None:\r\n with open(shuf_file, 'r') as fp:\r\n shuf_list = []\r\n for i in fp:\r\n shuf_list.append(int(i.strip()))\r\n return shuf_list\r\n\r\n\r\ndef load_box_shuffle_file(shuf_file):\r\n if shuf_file is not None:\r\n with open(shuf_file, 'r') as fp:\r\n img_shuf_list = []\r\n box_shuf_list = []\r\n for i in fp:\r\n idx = [int(_) for _ in i.strip().split('\\t')]\r\n img_shuf_list.append(idx[0])\r\n box_shuf_list.append(idx[1])\r\n return [img_shuf_list, box_shuf_list]\r\n return None\r\n\r\n\r\ndef load_from_yaml_file(file_name):\r\n with open(file_name, 'r') as fp:\r\n return yaml.load(fp, Loader=yaml.CLoader)\r\n", "id": "5822679", "language": "Python", "matching_score": 1.1666842699050903, "max_stars_count": 135, "path": "metro/utils/dataset_utils.py" }, { "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport errno\nimport os\nimport os.path as op\nimport re\nimport logging\nimport numpy as np\nimport torch\nimport random\nimport shutil\nfrom .comm import is_main_process\nimport yaml\n\n\ndef mkdir(path):\n # if it is the current folder, skip.\n # otherwise the original code will raise FileNotFoundError\n if path == '':\n return\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef save_config(cfg, path):\n if is_main_process():\n with open(path, 'w') as f:\n f.write(cfg.dump())\n\n\ndef config_iteration(output_dir, max_iter):\n save_file = os.path.join(output_dir, 'last_checkpoint')\n iteration = -1\n if os.path.exists(save_file):\n with open(save_file, 'r') as f:\n fname = f.read().strip()\n model_name = os.path.basename(fname)\n model_path = os.path.dirname(fname)\n if model_name.startswith('model_') and len(model_name) == 17:\n iteration = int(model_name[-11:-4])\n elif model_name == \"model_final\":\n iteration = max_iter\n elif model_path.startswith('checkpoint-') and len(model_path) == 18:\n iteration = int(model_path.split('-')[-1])\n return iteration\n\n\ndef get_matching_parameters(model, regexp, none_on_empty=True):\n \"\"\"Returns parameters matching regular expression\"\"\"\n if not regexp:\n if none_on_empty:\n return {}\n else:\n return dict(model.named_parameters())\n compiled_pattern = re.compile(regexp)\n params = {}\n for weight_name, weight in model.named_parameters():\n if compiled_pattern.match(weight_name):\n params[weight_name] = weight\n return params\n\n\ndef freeze_weights(model, regexp):\n \"\"\"Freeze weights based on regular expression.\"\"\"\n logger = logging.getLogger(\"maskrcnn_benchmark.trainer\")\n for weight_name, weight in get_matching_parameters(model, regexp).items():\n weight.requires_grad = False\n logger.info(\"Disabled training of {}\".format(weight_name))\n\n\ndef unfreeze_weights(model, regexp, backbone_freeze_at=-1,\n is_distributed=False):\n \"\"\"Unfreeze weights based on regular expression.\n This is helpful during training to unfreeze freezed weights after\n other unfreezed weights have been trained for some iterations.\n \"\"\"\n logger = logging.getLogger(\"maskrcnn_benchmark.trainer\")\n for weight_name, weight in get_matching_parameters(model, regexp).items():\n weight.requires_grad = True\n logger.info(\"Enabled training of {}\".format(weight_name))\n if backbone_freeze_at >= 0:\n logger.info(\"Freeze backbone at stage: {}\".format(backbone_freeze_at))\n if is_distributed:\n model.module.backbone.body._freeze_backbone(backbone_freeze_at)\n else:\n model.backbone.body._freeze_backbone(backbone_freeze_at)\n\n\ndef delete_tsv_files(tsvs):\n for t in tsvs:\n if op.isfile(t):\n try_delete(t)\n line = op.splitext(t)[0] + '.lineidx'\n if op.isfile(line):\n try_delete(line)\n\n\ndef concat_files(ins, out):\n mkdir(op.dirname(out))\n out_tmp = out + '.tmp'\n with open(out_tmp, 'wb') as fp_out:\n for i, f in enumerate(ins):\n logging.info('concating {}/{} - {}'.format(i, len(ins), f))\n with open(f, 'rb') as fp_in:\n shutil.copyfileobj(fp_in, fp_out, 1024*1024*10)\n os.rename(out_tmp, out)\n\n\ndef concat_tsv_files(tsvs, out_tsv):\n concat_files(tsvs, out_tsv)\n sizes = [os.stat(t).st_size for t in tsvs]\n sizes = np.cumsum(sizes)\n all_idx = []\n for i, t in enumerate(tsvs):\n for idx in load_list_file(op.splitext(t)[0] + '.lineidx'):\n if i == 0:\n all_idx.append(idx)\n else:\n all_idx.append(str(int(idx) + sizes[i - 1]))\n with open(op.splitext(out_tsv)[0] + '.lineidx', 'w') as f:\n f.write('\\n'.join(all_idx))\n\n\ndef load_list_file(fname):\n with open(fname, 'r') as fp:\n lines = fp.readlines()\n result = [line.strip() for line in lines]\n if len(result) > 0 and result[-1] == '':\n result = result[:-1]\n return result\n\n\ndef try_once(func):\n def func_wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n logging.info('ignore error \\n{}'.format(str(e)))\n return func_wrapper\n\n\n@try_once\ndef try_delete(f):\n os.remove(f)\n\n\ndef set_seed(seed, n_gpu):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(seed)\n\n\ndef print_and_run_cmd(cmd):\n print(cmd)\n os.system(cmd)\n\n\ndef write_to_yaml_file(context, file_name):\n with open(file_name, 'w') as fp:\n yaml.dump(context, fp, encoding='utf-8')\n\n\ndef load_from_yaml_file(yaml_file):\n with open(yaml_file, 'r') as fp:\n return yaml.load(fp, Loader=yaml.CLoader)\n\n\n", "id": "8380588", "language": "Python", "matching_score": 2.6031365394592285, "max_stars_count": 135, "path": "metro/utils/miscellaneous.py" }, { "content": "\"\"\"\nCopyright (c) Microsoft Corporation.\nLicensed under the MIT license.\n\nTraining and evaluation codes for \n3D human body mesh reconstruction from an image\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nimport argparse\nimport os\nimport os.path as op\nimport code\nimport json\nimport time\nimport datetime\nimport torch\nimport torchvision.models as models\nfrom torchvision.utils import make_grid\nimport numpy as np\nimport cv2\nfrom metro.modeling.bert import BertConfig, METRO\nfrom metro.modeling.bert import METRO_Body_Network as METRO_Network\nfrom metro.modeling._smpl import SMPL, Mesh\nfrom metro.modeling.hrnet.hrnet_cls_net_featmaps import get_cls_net\nfrom metro.modeling.hrnet.config import config as hrnet_config\nfrom metro.modeling.hrnet.config import update_config as hrnet_update_config\nimport metro.modeling.data.config as cfg\nfrom metro.datasets.build import make_data_loader\n\nfrom metro.utils.logger import setup_logger\nfrom metro.utils.comm import synchronize, is_main_process, get_rank, get_world_size, all_gather\nfrom metro.utils.miscellaneous import mkdir, set_seed\nfrom metro.utils.metric_logger import AverageMeter, EvalMetricsLogger\nfrom metro.utils.renderer import Renderer, visualize_reconstruction, visualize_reconstruction_test\nfrom metro.utils.metric_pampjpe import reconstruction_error\nfrom metro.utils.geometric_layers import orthographic_projection\n\ndef save_checkpoint(model, args, epoch, iteration, num_trial=10):\n checkpoint_dir = op.join(args.output_dir, 'checkpoint-{}-{}'.format(\n epoch, iteration))\n if not is_main_process():\n return checkpoint_dir\n mkdir(checkpoint_dir)\n model_to_save = model.module if hasattr(model, 'module') else model\n for i in range(num_trial):\n try:\n torch.save(model_to_save, op.join(checkpoint_dir, 'model.bin'))\n torch.save(model_to_save.state_dict(), op.join(checkpoint_dir, 'state_dict.bin'))\n torch.save(args, op.join(checkpoint_dir, 'training_args.bin'))\n logger.info(\"Save checkpoint to {}\".format(checkpoint_dir))\n break\n except:\n pass\n else:\n logger.info(\"Failed to save checkpoint after {} trails.\".format(num_trial))\n return checkpoint_dir\n\ndef save_scores(args, split, mpjpe, pampjpe, mpve):\n eval_log = []\n res = {}\n res['mPJPE'] = mpjpe\n res['PAmPJPE'] = pampjpe\n res['mPVE'] = mpve\n eval_log.append(res)\n with open(op.join(args.output_dir, split+'_eval_logs.json'), 'w') as f:\n json.dump(eval_log, f)\n logger.info(\"Save eval scores to {}\".format(args.output_dir))\n return\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"\n Sets the learning rate to the initial LR decayed by x every y epochs\n x = 0.1, y = args.num_train_epochs/2.0 = 100\n \"\"\"\n lr = args.lr * (0.1 ** (epoch // (args.num_train_epochs/2.0) ))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef mean_per_joint_position_error(pred, gt, has_3d_joints):\n \"\"\" \n Compute mPJPE\n \"\"\"\n gt = gt[has_3d_joints == 1]\n gt = gt[:, :, :-1]\n pred = pred[has_3d_joints == 1]\n\n with torch.no_grad():\n gt_pelvis = (gt[:, 2,:] + gt[:, 3,:]) / 2\n gt = gt - gt_pelvis[:, None, :]\n pred_pelvis = (pred[:, 2,:] + pred[:, 3,:]) / 2\n pred = pred - pred_pelvis[:, None, :]\n error = torch.sqrt( ((pred - gt) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()\n return error\n\ndef mean_per_vertex_error(pred, gt, has_smpl):\n \"\"\"\n Compute mPVE\n \"\"\"\n pred = pred[has_smpl == 1]\n gt = gt[has_smpl == 1]\n with torch.no_grad():\n error = torch.sqrt( ((pred - gt) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()\n return error\n\ndef keypoint_2d_loss(criterion_keypoints, pred_keypoints_2d, gt_keypoints_2d, has_pose_2d):\n \"\"\"\n Compute 2D reprojection loss if 2D keypoint annotations are available.\n The confidence (conf) is binary and indicates whether the keypoints exist or not.\n \"\"\"\n conf = gt_keypoints_2d[:, :, -1].unsqueeze(-1).clone()\n loss = (conf * criterion_keypoints(pred_keypoints_2d, gt_keypoints_2d[:, :, :-1])).mean()\n return loss\n\ndef keypoint_3d_loss(criterion_keypoints, pred_keypoints_3d, gt_keypoints_3d, has_pose_3d, device):\n \"\"\"\n Compute 3D keypoint loss if 3D keypoint annotations are available.\n \"\"\"\n conf = gt_keypoints_3d[:, :, -1].unsqueeze(-1).clone()\n gt_keypoints_3d = gt_keypoints_3d[:, :, :-1].clone()\n gt_keypoints_3d = gt_keypoints_3d[has_pose_3d == 1]\n conf = conf[has_pose_3d == 1]\n pred_keypoints_3d = pred_keypoints_3d[has_pose_3d == 1]\n if len(gt_keypoints_3d) > 0:\n gt_pelvis = (gt_keypoints_3d[:, 2,:] + gt_keypoints_3d[:, 3,:]) / 2\n gt_keypoints_3d = gt_keypoints_3d - gt_pelvis[:, None, :]\n pred_pelvis = (pred_keypoints_3d[:, 2,:] + pred_keypoints_3d[:, 3,:]) / 2\n pred_keypoints_3d = pred_keypoints_3d - pred_pelvis[:, None, :]\n return (conf * criterion_keypoints(pred_keypoints_3d, gt_keypoints_3d)).mean()\n else:\n return torch.FloatTensor(1).fill_(0.).to(device) \n\ndef vertices_loss(criterion_vertices, pred_vertices, gt_vertices, has_smpl, device):\n \"\"\"\n Compute per-vertex loss if vertex annotations are available.\n \"\"\"\n pred_vertices_with_shape = pred_vertices[has_smpl == 1]\n gt_vertices_with_shape = gt_vertices[has_smpl == 1]\n if len(gt_vertices_with_shape) > 0:\n return criterion_vertices(pred_vertices_with_shape, gt_vertices_with_shape)\n else:\n return torch.FloatTensor(1).fill_(0.).to(device) \n \ndef rectify_pose(pose):\n pose = pose.copy()\n R_mod = cv2.Rodrigues(np.array([np.pi, 0, 0]))[0]\n R_root = cv2.Rodrigues(pose[:3])[0]\n new_root = R_root.dot(R_mod)\n pose[:3] = cv2.Rodrigues(new_root)[0].reshape(3)\n return pose\n\ndef run(args, train_dataloader, val_dataloader, METRO_model, smpl, mesh_sampler, renderer):\n smpl.eval()\n max_iter = len(train_dataloader)\n iters_per_epoch = max_iter // args.num_train_epochs\n if iters_per_epoch<1000:\n args.logging_steps = 500\n\n optimizer = torch.optim.Adam(params=list(METRO_model.parameters()),\n lr=args.lr,\n betas=(0.9, 0.999),\n weight_decay=0)\n\n # define loss function (criterion) and optimizer\n criterion_2d_keypoints = torch.nn.MSELoss(reduction='none').cuda(args.device)\n criterion_keypoints = torch.nn.MSELoss(reduction='none').cuda(args.device)\n criterion_vertices = torch.nn.L1Loss().cuda(args.device)\n\n if args.distributed:\n METRO_model = torch.nn.parallel.DistributedDataParallel(\n METRO_model, device_ids=[args.local_rank], \n output_device=args.local_rank,\n find_unused_parameters=True,\n )\n\n logger.info(\n ' '.join(\n ['Local rank: {o}', 'Max iteration: {a}', 'iters_per_epoch: {b}','num_train_epochs: {c}',]\n ).format(o=args.local_rank, a=max_iter, b=iters_per_epoch, c=args.num_train_epochs)\n )\n\n start_training_time = time.time()\n end = time.time()\n METRO_model.train()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n log_losses = AverageMeter()\n log_loss_2djoints = AverageMeter()\n log_loss_3djoints = AverageMeter()\n log_loss_vertices = AverageMeter()\n log_eval_metrics = EvalMetricsLogger()\n\n for iteration, (img_keys, images, annotations) in enumerate(train_dataloader):\n\n METRO_model.train()\n iteration += 1\n epoch = iteration // iters_per_epoch\n batch_size = images.size(0)\n adjust_learning_rate(optimizer, epoch, args)\n data_time.update(time.time() - end)\n\n images = images.cuda(args.device)\n gt_2d_joints = annotations['joints_2d'].cuda(args.device)\n gt_2d_joints = gt_2d_joints[:,cfg.J24_TO_J14,:]\n has_2d_joints = annotations['has_2d_joints'].cuda(args.device)\n\n gt_3d_joints = annotations['joints_3d'].cuda(args.device)\n gt_3d_pelvis = gt_3d_joints[:,cfg.J24_NAME.index('Pelvis'),:3]\n gt_3d_joints = gt_3d_joints[:,cfg.J24_TO_J14,:] \n gt_3d_joints[:,:,:3] = gt_3d_joints[:,:,:3] - gt_3d_pelvis[:, None, :]\n has_3d_joints = annotations['has_3d_joints'].cuda(args.device)\n\n gt_pose = annotations['pose'].cuda(args.device)\n gt_betas = annotations['betas'].cuda(args.device)\n has_smpl = annotations['has_smpl'].cuda(args.device)\n mjm_mask = annotations['mjm_mask'].cuda(args.device)\n mvm_mask = annotations['mvm_mask'].cuda(args.device)\n\n # generate simplified mesh\n gt_vertices = smpl(gt_pose, gt_betas)\n gt_vertices_sub2 = mesh_sampler.downsample(gt_vertices, n1=0, n2=2)\n gt_vertices_sub = mesh_sampler.downsample(gt_vertices)\n\n # normalize gt based on smpl's pelvis \n gt_smpl_3d_joints = smpl.get_h36m_joints(gt_vertices)\n gt_smpl_3d_pelvis = gt_smpl_3d_joints[:,cfg.H36M_J17_NAME.index('Pelvis'),:]\n gt_vertices_sub2 = gt_vertices_sub2 - gt_smpl_3d_pelvis[:, None, :]\n \n # prepare masks for mask vertex/joint modeling\n mjm_mask_ = mjm_mask.expand(-1,-1,2051)\n mvm_mask_ = mvm_mask.expand(-1,-1,2051)\n meta_masks = torch.cat([mjm_mask_, mvm_mask_], dim=1)\n\n # forward-pass\n pred_camera, pred_3d_joints, pred_vertices_sub2, pred_vertices_sub, pred_vertices = METRO_model(images, smpl, mesh_sampler, meta_masks=meta_masks, is_train=True)\n\n # normalize gt based on smpl's pelvis \n gt_vertices_sub = gt_vertices_sub - gt_smpl_3d_pelvis[:, None, :] \n gt_vertices = gt_vertices - gt_smpl_3d_pelvis[:, None, :]\n\n # obtain 3d joints, which are regressed from the full mesh\n pred_3d_joints_from_smpl = smpl.get_h36m_joints(pred_vertices)\n pred_3d_joints_from_smpl = pred_3d_joints_from_smpl[:,cfg.H36M_J17_TO_J14,:]\n\n # obtain 2d joints, which are projected from 3d joints of smpl mesh\n pred_2d_joints_from_smpl = orthographic_projection(pred_3d_joints_from_smpl, pred_camera)\n pred_2d_joints = orthographic_projection(pred_3d_joints, pred_camera)\n\n # compute 3d joint loss (where the joints are directly output from transformer)\n loss_3d_joints = keypoint_3d_loss(criterion_keypoints, pred_3d_joints, gt_3d_joints, has_3d_joints, args.device)\n # compute 3d vertex loss\n loss_vertices = ( args.vloss_w_sub2 * vertices_loss(criterion_vertices, pred_vertices_sub2, gt_vertices_sub2, has_smpl, args.device) + \\\n args.vloss_w_sub * vertices_loss(criterion_vertices, pred_vertices_sub, gt_vertices_sub, has_smpl, args.device) + \\\n args.vloss_w_full * vertices_loss(criterion_vertices, pred_vertices, gt_vertices, has_smpl, args.device) )\n # compute 3d joint loss (where the joints are regressed from full mesh)\n loss_reg_3d_joints = keypoint_3d_loss(criterion_keypoints, pred_3d_joints_from_smpl, gt_3d_joints, has_3d_joints, args.device)\n # compute 2d joint loss\n loss_2d_joints = keypoint_2d_loss(criterion_2d_keypoints, pred_2d_joints, gt_2d_joints, has_2d_joints) + \\\n keypoint_2d_loss(criterion_2d_keypoints, pred_2d_joints_from_smpl, gt_2d_joints, has_2d_joints)\n \n loss_3d_joints = loss_3d_joints + loss_reg_3d_joints\n \n # we empirically use hyperparameters to balance difference losses\n loss = args.joints_loss_weight*loss_3d_joints + \\\n args.vertices_loss_weight*loss_vertices + args.vertices_loss_weight*loss_2d_joints\n\n # update logs\n log_loss_2djoints.update(loss_2d_joints.item(), batch_size)\n log_loss_3djoints.update(loss_3d_joints.item(), batch_size)\n log_loss_vertices.update(loss_vertices.item(), batch_size)\n log_losses.update(loss.item(), batch_size)\n\n # back prop\n optimizer.zero_grad()\n loss.backward() \n optimizer.step()\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if iteration % args.logging_steps == 0 or iteration == max_iter:\n eta_seconds = batch_time.avg * (max_iter - iteration)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n logger.info(\n ' '.join(\n ['eta: {eta}', 'epoch: {ep}', 'iter: {iter}', 'max mem : {memory:.0f}',]\n ).format(eta=eta_string, ep=epoch, iter=iteration, \n memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0) \n + ' loss: {:.4f}, 2d joint loss: {:.4f}, 3d joint loss: {:.4f}, vertex loss: {:.4f}, compute: {:.4f}, data: {:.4f}, lr: {:.6f}'.format(\n log_losses.avg, log_loss_2djoints.avg, log_loss_3djoints.avg, log_loss_vertices.avg, batch_time.avg, data_time.avg, \n optimizer.param_groups[0]['lr'])\n )\n\n visual_imgs = visualize_mesh( renderer,\n annotations['ori_img'].detach(),\n annotations['joints_2d'].detach(),\n pred_vertices.detach(), \n pred_camera.detach(),\n pred_2d_joints_from_smpl.detach())\n visual_imgs = visual_imgs.transpose(0,1)\n visual_imgs = visual_imgs.transpose(1,2)\n visual_imgs = np.asarray(visual_imgs)\n\n if is_main_process()==True:\n stamp = str(epoch) + '_' + str(iteration)\n temp_fname = args.output_dir + 'visual_' + stamp + '.jpg'\n cv2.imwrite(temp_fname, np.asarray(visual_imgs[:,:,::-1]*255))\n\n if iteration % iters_per_epoch == 0:\n val_mPVE, val_mPJPE, val_PAmPJPE, val_count = run_validate(args, val_dataloader, \n METRO_model, \n criterion_keypoints, \n criterion_vertices, \n epoch, \n smpl,\n mesh_sampler)\n\n logger.info(\n ' '.join(['Validation', 'epoch: {ep}',]).format(ep=epoch) \n + ' mPVE: {:6.2f}, mPJPE: {:6.2f}, PAmPJPE: {:6.2f}, Data Count: {:6.2f}'.format(1000*val_mPVE, 1000*val_mPJPE, 1000*val_PAmPJPE, val_count)\n )\n\n if val_PAmPJPE<log_eval_metrics.PAmPJPE:\n checkpoint_dir = save_checkpoint(METRO_model, args, epoch, iteration)\n log_eval_metrics.update(val_mPVE, val_mPJPE, val_PAmPJPE, epoch)\n \n \n total_training_time = time.time() - start_training_time\n total_time_str = str(datetime.timedelta(seconds=total_training_time))\n logger.info('Total training time: {} ({:.4f} s / iter)'.format(\n total_time_str, total_training_time / max_iter)\n )\n checkpoint_dir = save_checkpoint(METRO_model, args, epoch, iteration)\n\n logger.info(\n ' Best Results:'\n + ' mPVE: {:6.2f}, mPJPE: {:6.2f}, PAmPJPE: {:6.2f}, at epoch {:6.2f}'.format(1000*log_eval_metrics.mPVE, 1000*log_eval_metrics.mPJPE, 1000*log_eval_metrics.PAmPJPE, log_eval_metrics.epoch)\n )\n\n\ndef run_eval_general(args, val_dataloader, METRO_model, smpl, mesh_sampler):\n smpl.eval()\n criterion_keypoints = torch.nn.MSELoss(reduction='none').cuda(args.device)\n criterion_vertices = torch.nn.L1Loss().cuda(args.device)\n\n epoch = 0\n if args.distributed:\n METRO_model = torch.nn.parallel.DistributedDataParallel(\n METRO_model, device_ids=[args.local_rank], \n output_device=args.local_rank,\n find_unused_parameters=True,\n )\n METRO_model.eval()\n\n val_mPVE, val_mPJPE, val_PAmPJPE, val_count = run_validate(args, val_dataloader, \n METRO_model, \n criterion_keypoints, \n criterion_vertices, \n epoch, \n smpl,\n mesh_sampler)\n\n logger.info(\n ' '.join(['Validation', 'epoch: {ep}',]).format(ep=epoch) \n + ' mPVE: {:6.2f}, mPJPE: {:6.2f}, PAmPJPE: {:6.2f} '.format(1000*val_mPVE, 1000*val_mPJPE, 1000*val_PAmPJPE)\n )\n # checkpoint_dir = save_checkpoint(METRO_model, args, 0, 0)\n return\n\ndef run_validate(args, val_loader, METRO_model, criterion, criterion_vertices, epoch, smpl, mesh_sampler):\n batch_time = AverageMeter()\n mPVE = AverageMeter()\n mPJPE = AverageMeter()\n PAmPJPE = AverageMeter()\n # switch to evaluate mode\n METRO_model.eval()\n smpl.eval()\n with torch.no_grad():\n # end = time.time()\n for i, (img_keys, images, annotations) in enumerate(val_loader):\n batch_size = images.size(0)\n # compute output\n images = images.cuda(args.device)\n gt_3d_joints = annotations['joints_3d'].cuda(args.device)\n gt_3d_pelvis = gt_3d_joints[:,cfg.J24_NAME.index('Pelvis'),:3]\n gt_3d_joints = gt_3d_joints[:,cfg.J24_TO_J14,:] \n gt_3d_joints[:,:,:3] = gt_3d_joints[:,:,:3] - gt_3d_pelvis[:, None, :]\n has_3d_joints = annotations['has_3d_joints'].cuda(args.device)\n\n gt_pose = annotations['pose'].cuda(args.device)\n gt_betas = annotations['betas'].cuda(args.device)\n has_smpl = annotations['has_smpl'].cuda(args.device)\n\n # generate simplified mesh\n gt_vertices = smpl(gt_pose, gt_betas)\n gt_vertices_sub = mesh_sampler.downsample(gt_vertices)\n gt_vertices_sub2 = mesh_sampler.downsample(gt_vertices_sub, n1=1, n2=2)\n\n # normalize gt based on smpl pelvis \n gt_smpl_3d_joints = smpl.get_h36m_joints(gt_vertices)\n gt_smpl_3d_pelvis = gt_smpl_3d_joints[:,cfg.H36M_J17_NAME.index('Pelvis'),:]\n gt_vertices_sub2 = gt_vertices_sub2 - gt_smpl_3d_pelvis[:, None, :] \n gt_vertices = gt_vertices - gt_smpl_3d_pelvis[:, None, :] \n\n # forward-pass\n pred_camera, pred_3d_joints, pred_vertices_sub2, pred_vertices_sub, pred_vertices = METRO_model(images, smpl, mesh_sampler)\n\n # obtain 3d joints from full mesh\n pred_3d_joints_from_smpl = smpl.get_h36m_joints(pred_vertices)\n\n pred_3d_pelvis = pred_3d_joints_from_smpl[:,cfg.H36M_J17_NAME.index('Pelvis'),:]\n pred_3d_joints_from_smpl = pred_3d_joints_from_smpl[:,cfg.H36M_J17_TO_J14,:]\n pred_3d_joints_from_smpl = pred_3d_joints_from_smpl - pred_3d_pelvis[:, None, :]\n pred_vertices = pred_vertices - pred_3d_pelvis[:, None, :]\n\n # measure errors\n error_vertices = mean_per_vertex_error(pred_vertices, gt_vertices, has_smpl)\n error_joints = mean_per_joint_position_error(pred_3d_joints_from_smpl, gt_3d_joints, has_3d_joints)\n error_joints_pa = reconstruction_error(pred_3d_joints_from_smpl.cpu().numpy(), gt_3d_joints[:,:,:3].cpu().numpy(), reduction=None)\n \n if len(error_vertices)>0:\n mPVE.update(np.mean(error_vertices), int(torch.sum(has_smpl)) )\n if len(error_joints)>0:\n mPJPE.update(np.mean(error_joints), int(torch.sum(has_3d_joints)) )\n if len(error_joints_pa)>0:\n PAmPJPE.update(np.mean(error_joints_pa), int(torch.sum(has_3d_joints)) )\n\n val_mPVE = all_gather(float(mPVE.avg))\n val_mPVE = sum(val_mPVE)/len(val_mPVE)\n val_mPJPE = all_gather(float(mPJPE.avg))\n val_mPJPE = sum(val_mPJPE)/len(val_mPJPE)\n\n val_PAmPJPE = all_gather(float(PAmPJPE.avg))\n val_PAmPJPE = sum(val_PAmPJPE)/len(val_PAmPJPE)\n\n val_count = all_gather(float(mPVE.count))\n val_count = sum(val_count)\n\n return val_mPVE, val_mPJPE, val_PAmPJPE, val_count\n\n\ndef visualize_mesh( renderer,\n images,\n gt_keypoints_2d,\n pred_vertices, \n pred_camera,\n pred_keypoints_2d):\n \"\"\"Tensorboard logging.\"\"\"\n gt_keypoints_2d = gt_keypoints_2d.cpu().numpy()\n to_lsp = list(range(14))\n rend_imgs = []\n batch_size = pred_vertices.shape[0]\n # Do visualization for the first 6 images of the batch\n for i in range(min(batch_size, 10)):\n img = images[i].cpu().numpy().transpose(1,2,0)\n # Get LSP keypoints from the full list of keypoints\n gt_keypoints_2d_ = gt_keypoints_2d[i, to_lsp]\n pred_keypoints_2d_ = pred_keypoints_2d.cpu().numpy()[i, to_lsp]\n # Get predict vertices for the particular example\n vertices = pred_vertices[i].cpu().numpy()\n cam = pred_camera[i].cpu().numpy()\n # Visualize reconstruction and detected pose\n rend_img = visualize_reconstruction(img, 224, gt_keypoints_2d_, vertices, pred_keypoints_2d_, cam, renderer)\n rend_img = rend_img.transpose(2,0,1)\n rend_imgs.append(torch.from_numpy(rend_img)) \n rend_imgs = make_grid(rend_imgs, nrow=1)\n return rend_imgs\n\ndef visualize_mesh_test( renderer,\n images,\n gt_keypoints_2d,\n pred_vertices, \n pred_camera,\n pred_keypoints_2d,\n PAmPJPE_h36m_j14):\n \"\"\"Tensorboard logging.\"\"\"\n gt_keypoints_2d = gt_keypoints_2d.cpu().numpy()\n to_lsp = list(range(14))\n rend_imgs = []\n batch_size = pred_vertices.shape[0]\n # Do visualization for the first 6 images of the batch\n for i in range(min(batch_size, 10)):\n img = images[i].cpu().numpy().transpose(1,2,0)\n # Get LSP keypoints from the full list of keypoints\n gt_keypoints_2d_ = gt_keypoints_2d[i, to_lsp]\n pred_keypoints_2d_ = pred_keypoints_2d.cpu().numpy()[i, to_lsp]\n # Get predict vertices for the particular example\n vertices = pred_vertices[i].cpu().numpy()\n cam = pred_camera[i].cpu().numpy()\n score = PAmPJPE_h36m_j14[i]\n # Visualize reconstruction and detected pose\n rend_img = visualize_reconstruction_test(img, 224, gt_keypoints_2d_, vertices, pred_keypoints_2d_, cam, renderer, score)\n rend_img = rend_img.transpose(2,0,1)\n rend_imgs.append(torch.from_numpy(rend_img)) \n rend_imgs = make_grid(rend_imgs, nrow=1)\n return rend_imgs\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n #########################################################\n # Data related arguments\n #########################################################\n parser.add_argument(\"--data_dir\", default='datasets', type=str, required=False,\n help=\"Directory with all datasets, each in one subfolder\")\n parser.add_argument(\"--train_yaml\", default='imagenet2012/train.yaml', type=str, required=False,\n help=\"Yaml file with all data for training.\")\n parser.add_argument(\"--val_yaml\", default='imagenet2012/test.yaml', type=str, required=False,\n help=\"Yaml file with all data for validation.\")\n parser.add_argument(\"--num_workers\", default=4, type=int, \n help=\"Workers in dataloader.\")\n parser.add_argument(\"--img_scale_factor\", default=1, type=int, \n help=\"adjust image resolution.\") \n #########################################################\n # Loading/saving checkpoints\n #########################################################\n parser.add_argument(\"--model_name_or_path\", default='metro/modeling/bert/bert-base-uncased/', type=str, required=False,\n help=\"Path to pre-trained transformer model or model type.\")\n parser.add_argument(\"--resume_checkpoint\", default=None, type=str, required=False,\n help=\"Path to specific checkpoint for resume training.\")\n parser.add_argument(\"--output_dir\", default='output/', type=str, required=False,\n help=\"The output directory to save checkpoint and test results.\")\n parser.add_argument(\"--config_name\", default=\"\", type=str, \n help=\"Pretrained config name or path if not the same as model_name.\")\n #########################################################\n # Training parameters\n #########################################################\n parser.add_argument(\"--per_gpu_train_batch_size\", default=30, type=int, \n help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\"--per_gpu_eval_batch_size\", default=30, type=int, \n help=\"Batch size per GPU/CPU for evaluation.\")\n parser.add_argument('--lr', \"--learning_rate\", default=1e-4, type=float, \n help=\"The initial lr.\")\n parser.add_argument(\"--num_train_epochs\", default=200, type=int, \n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--vertices_loss_weight\", default=100.0, type=float) \n parser.add_argument(\"--joints_loss_weight\", default=1000.0, type=float)\n parser.add_argument(\"--vloss_w_full\", default=0.33, type=float) \n parser.add_argument(\"--vloss_w_sub\", default=0.33, type=float) \n parser.add_argument(\"--vloss_w_sub2\", default=0.33, type=float) \n parser.add_argument(\"--drop_out\", default=0.1, type=float, \n help=\"Drop out ratio in BERT.\")\n #########################################################\n # Model architectures\n #########################################################\n parser.add_argument('-a', '--arch', default='hrnet-w64',\n help='CNN backbone architecture: hrnet-w64, hrnet, resnet50')\n parser.add_argument(\"--num_hidden_layers\", default=4, type=int, required=False, \n help=\"Update model config if given\")\n parser.add_argument(\"--hidden_size\", default=-1, type=int, required=False, \n help=\"Update model config if given\")\n parser.add_argument(\"--num_attention_heads\", default=4, type=int, required=False, \n help=\"Update model config if given. Note that the division of \"\n \"hidden_size / num_attention_heads should be in integer.\")\n parser.add_argument(\"--intermediate_size\", default=-1, type=int, required=False, \n help=\"Update model config if given.\")\n parser.add_argument(\"--input_feat_dim\", default='2051,512,128', type=str, \n help=\"The Image Feature Dimension.\") \n parser.add_argument(\"--hidden_feat_dim\", default='1024,256,128', type=str, \n help=\"The Image Feature Dimension.\") \n parser.add_argument(\"--legacy_setting\", default=True, action='store_true',)\n #########################################################\n # Others\n #########################################################\n parser.add_argument(\"--run_eval_only\", default=False, action='store_true',) \n parser.add_argument('--logging_steps', type=int, default=1000, \n help=\"Log every X steps.\")\n parser.add_argument(\"--device\", type=str, default='cuda', \n help=\"cuda or cpu\")\n parser.add_argument('--seed', type=int, default=88, \n help=\"random seed for initialization.\")\n parser.add_argument(\"--local_rank\", type=int, default=0, \n help=\"For distributed training.\")\n\n\n args = parser.parse_args()\n return args\n\n\ndef main(args):\n global logger\n # Setup CUDA, GPU & distributed training\n args.num_gpus = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1\n args.distributed = args.num_gpus > 1\n args.device = torch.device(args.device)\n if args.distributed:\n print(\"Init distributed training on local rank {} ({}), rank {}, world size {}\".format(args.local_rank, int(os.environ[\"LOCAL_RANK\"]), int(os.environ[\"NODE_RANK\"]), args.num_gpus))\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(\n backend='nccl', init_method='env://'\n )\n local_rank = int(os.environ[\"LOCAL_RANK\"])\n args.device = torch.device(\"cuda\", local_rank)\n synchronize()\n\n mkdir(args.output_dir)\n logger = setup_logger(\"METRO\", args.output_dir, get_rank())\n set_seed(args.seed, args.num_gpus)\n logger.info(\"Using {} GPUs\".format(args.num_gpus))\n\n # Mesh and SMPL utils\n smpl = SMPL().to(args.device)\n mesh_sampler = Mesh()\n\n # Renderer for visualization\n renderer = Renderer(faces=smpl.faces.cpu().numpy())\n\n # Load model\n trans_encoder = []\n\n input_feat_dim = [int(item) for item in args.input_feat_dim.split(',')]\n hidden_feat_dim = [int(item) for item in args.hidden_feat_dim.split(',')]\n output_feat_dim = input_feat_dim[1:] + [3]\n \n if args.run_eval_only==True and args.resume_checkpoint!=None and args.resume_checkpoint!='None' and 'state_dict' not in args.resume_checkpoint:\n # if only run eval, load checkpoint\n logger.info(\"Evaluation: Loading from checkpoint {}\".format(args.resume_checkpoint))\n _metro_network = torch.load(args.resume_checkpoint)\n else:\n # init three transformer-encoder blocks in a loop\n for i in range(len(output_feat_dim)):\n config_class, model_class = BertConfig, METRO\n config = config_class.from_pretrained(args.config_name if args.config_name \\\n else args.model_name_or_path)\n\n config.output_attentions = False\n config.hidden_dropout_prob = args.drop_out\n config.img_feature_dim = input_feat_dim[i] \n config.output_feature_dim = output_feat_dim[i]\n args.hidden_size = hidden_feat_dim[i]\n\n if args.legacy_setting==True:\n # During our paper submission, we were using the original intermediate size, which is 3072 fixed\n # We keep our legacy setting here \n args.intermediate_size = -1\n else:\n # We have recently tried to use an updated intermediate size, which is 4*hidden-size.\n # But we didn't find significant performance changes on Human3.6M (~36.7 PA-MPJPE)\n args.intermediate_size = int(args.hidden_size*4)\n\n # update model structure if specified in arguments\n update_params = ['num_hidden_layers', 'hidden_size', 'num_attention_heads', 'intermediate_size']\n\n for idx, param in enumerate(update_params):\n arg_param = getattr(args, param)\n config_param = getattr(config, param)\n if arg_param > 0 and arg_param != config_param:\n logger.info(\"Update config parameter {}: {} -> {}\".format(param, config_param, arg_param))\n setattr(config, param, arg_param)\n\n # init a transformer encoder and append it to a list\n assert config.hidden_size % config.num_attention_heads == 0\n model = model_class(config=config) \n logger.info(\"Init model from scratch.\")\n trans_encoder.append(model)\n\n \n # init ImageNet pre-trained backbone model\n if args.arch=='hrnet':\n hrnet_yaml = 'models/hrnet/cls_hrnet_w40_sgd_lr5e-2_wd1e-4_bs32_x100.yaml'\n hrnet_checkpoint = 'models/hrnet/hrnetv2_w40_imagenet_pretrained.pth'\n hrnet_update_config(hrnet_config, hrnet_yaml)\n backbone = get_cls_net(hrnet_config, pretrained=hrnet_checkpoint)\n logger.info('=> loading hrnet-v2-w40 model')\n elif args.arch=='hrnet-w64':\n hrnet_yaml = 'models/hrnet/cls_hrnet_w64_sgd_lr5e-2_wd1e-4_bs32_x100.yaml'\n hrnet_checkpoint = 'models/hrnet/hrnetv2_w64_imagenet_pretrained.pth'\n hrnet_update_config(hrnet_config, hrnet_yaml)\n backbone = get_cls_net(hrnet_config, pretrained=hrnet_checkpoint)\n logger.info('=> loading hrnet-v2-w64 model')\n else:\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n backbone = models.__dict__[args.arch](pretrained=True)\n # remove the last fc layer\n backbone = torch.nn.Sequential(*list(backbone.children())[:-2])\n\n\n trans_encoder = torch.nn.Sequential(*trans_encoder)\n total_params = sum(p.numel() for p in trans_encoder.parameters())\n logger.info('Transformers total parameters: {}'.format(total_params))\n backbone_total_params = sum(p.numel() for p in backbone.parameters())\n logger.info('Backbone total parameters: {}'.format(backbone_total_params))\n\n # build end-to-end METRO network (CNN backbone + multi-layer transformer encoder)\n _metro_network = METRO_Network(args, config, backbone, trans_encoder, mesh_sampler)\n\n if args.resume_checkpoint!=None and args.resume_checkpoint!='None':\n # for fine-tuning or resume training or inference, load weights from checkpoint\n logger.info(\"Loading state dict from checkpoint {}\".format(args.resume_checkpoint))\n cpu_device = torch.device('cpu')\n state_dict = torch.load(args.resume_checkpoint, map_location=cpu_device)\n _metro_network.load_state_dict(state_dict, strict=False)\n del state_dict\n \n _metro_network.to(args.device)\n logger.info(\"Training parameters %s\", args)\n\n if args.run_eval_only==True:\n val_dataloader = make_data_loader(args, args.val_yaml, \n args.distributed, is_train=False, scale_factor=args.img_scale_factor)\n run_eval_general(args, val_dataloader, _metro_network, smpl, mesh_sampler)\n\n else:\n train_dataloader = make_data_loader(args, args.train_yaml, \n args.distributed, is_train=True, scale_factor=args.img_scale_factor)\n val_dataloader = make_data_loader(args, args.val_yaml, \n args.distributed, is_train=False, scale_factor=args.img_scale_factor)\n run(args, train_dataloader, val_dataloader, _metro_network, smpl, mesh_sampler, renderer)\n\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n", "id": "6886361", "language": "Python", "matching_score": 3.8711628913879395, "max_stars_count": 1, "path": "metro/tools/run_metro_bodymesh.py" }, { "content": "__version__ = \"1.0.0\"\n\nfrom .modeling_bert import (BertConfig, BertModel,\n load_tf_weights_in_bert, BERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n BERT_PRETRAINED_CONFIG_ARCHIVE_MAP)\n\nfrom .modeling_metro import (METRO, METRO_Encoder, METRO_Hand_Network, METRO_Body_Network)\n \nfrom .modeling_utils import (WEIGHTS_NAME, CONFIG_NAME, TF_WEIGHTS_NAME,\n PretrainedConfig, PreTrainedModel, prune_layer, Conv1D)\n\nfrom .file_utils import (PYTORCH_PRETRAINED_BERT_CACHE, cached_path)\n", "id": "4668867", "language": "Python", "matching_score": 0.10035473108291626, "max_stars_count": 1, "path": "metro/modeling/bert/__init__.py" }, { "content": "import yfinance as yf\r\nfrom get_stock_name import load_stock_name\r\nfrom pathlib import Path\r\nimport pandas as pd\r\n\r\n# load full datasets of periods and intervals\r\ndef load_stock_data(duration = '2y', interval = '1d'):\r\n current_path = Path().absolute()\r\n csv_name = duration+'_'+interval+'.csv'\r\n data_path = current_path.parent.parent / 'data' / 'stock_history_data' / csv_name\r\n if data_path.exists():\r\n return pd.read_csv(data_path,index_col=0)\r\n \r\n \r\n stock_name = load_stock_name()\r\n stock_data_list = []\r\n stock_name = stock_name[0:len(stock_name):1]\r\n stock_open_data = pd.DataFrame()\r\n for i in range(len(stock_name)):\r\n name = stock_name.iloc[i]\r\n stock_data_i = yf.Ticker(name).history(period = duration, interval = interval)\r\n if stock_data_i.empty or stock_data_i.shape[0] < 10:\r\n continue\r\n stock_data_i.rename({'Open':name},axis=1,inplace=True)\r\n #stock_data_i.reset_index(drop=True)\r\n try:\r\n #stock_open_data = pd.concat([stock_open_data, stock_data_i.loc[:,name]], ignore_index=True,axis=1)\r\n stock_open_data = stock_open_data.join(stock_data_i.loc[:,name],how='outer')\r\n except:\r\n print(name)\r\n #stock_data_list.append(stock_data_i.iloc[:,0])\r\n #stock_open_data = pd.concat(stock_data_list,axis=1)\r\n #stock_open_data.columns = stock_name\r\n stock_open_data = stock_open_data[~stock_open_data.index.duplicated(keep='first')]\r\n stock_open_data.to_csv(data_path, header = 1)#, index = False\r\n\r\n \r\n #print(stock_open_data)\r\n #print(data_path)\r\n #print(data_path.parent.exists())\r\n \r\n return stock_open_data\r\n\r\n#select ROA > 1.4 from sep 2020\r\ndef get_ROA_stock(duration = '2y', interval = '1d'):\r\n current_path = Path().absolute()\r\n csv_name = duration+'_'+interval+'_ROA.csv'\r\n data_path = current_path.parent.parent / 'data' / 'stock_history_data' / csv_name\r\n #if data_path.exists():\r\n #return pd.read_csv(data_path,index_col=0) \r\n stock_data = load_stock_data(duration, interval)\r\n stock_data = stock_data[~stock_data.index.duplicated(keep='first')]\r\n #select ROA (value[-1] > value[0] * 2 || value[-1] > 1.4 * value[1/2])\r\n ROA_stock_num = []\r\n rown, coln = stock_data.shape[0], stock_data.shape[1]\r\n rown_half = (int) (rown / 2)\r\n for i in range(coln):\r\n if (stock_data.iloc[rown-1,i] > 1.4 * stock_data.iloc[rown_half,i] or\r\n stock_data.iloc[rown-1,i] > 2 * stock_data.iloc[0,i]):\r\n ROA_stock_num.append(i)\r\n ROA_stock_data = stock_data.iloc[:,ROA_stock_num]\r\n ROA_stock_data.to_csv(data_path, header = 1)\r\n return ROA_stock_data\r\n #print(data)\r\n\r\ndef clean(data):\r\n rowsum = data.sum(axis=1)\r\n data = data.fillna(-1)\r\n data = data.drop(data.index & rowsum[rowsum<1].index)\r\n \r\n\r\n#get_ROA_stock('2y', '1d')\r\n#data = load_stock_data('1mo','1d')\r\n \r\n", "id": "1475900", "language": "Python", "matching_score": 3.8541173934936523, "max_stars_count": 0, "path": "simulator/extract.py" }, { "content": "from pathlib import Path\r\nimport pandas as pd\r\ndef load_stock_name():\r\n current_path = Path().absolute()\r\n data_path = current_path.parent.parent / 'data' / 'info' / 'companylist.csv'\r\n full_data = pd.read_csv(data_path)\r\n company_name = full_data.iloc[:,0]\r\n return company_name\r\n\r\n#cn = load_stock_name()\r\n#print(cn)\r\n", "id": "10391396", "language": "Python", "matching_score": 1.361879587173462, "max_stars_count": 0, "path": "simulator/get_stock_name.py" }, { "content": "import yfinance as yf\r\nimport pandas as pd\r\nfrom pathlib import Path\r\n#from extract import get_ROA_stock\r\n\r\nclass SIM:\r\n\r\n def __init__(self, backlen = 100, freq = 3, stock_num = 5, total_input = 1000, timelen = '2y'):\r\n self.freq = freq\r\n self.stock_num = stock_num\r\n self.initial_input = total_input\r\n self.cur_invest = self.initial_input\r\n self.data_path = Path().absolute().parent / 'data'\r\n self.extract_module_path = Path().absolute().parent / 'simulator'\r\n self.selected_stock = pd.Index\r\n self.invest_ratio = pd.DataFrame()\r\n self.prof_ratio = pd.DataFrame()\r\n self.stock_data = pd.DataFrame()\r\n self.ROA_data = pd.DataFrame()\r\n self.backlen = backlen\r\n self.timelen = timelen\r\n \r\n def one_step(self,t0,method='noob'):\r\n \r\n if method == 'noob':\r\n self.select_stock_noob(t0)\r\n elif method == '3factor':\r\n self.select_stock_3factor(t0)\r\n elif method == '5factor':\r\n self.select_stock_5factor(t0)\r\n elif method == '1934':\r\n self.select_stock_1943(t0)\r\n selected_stock_data = self.ROA_data[self.ROA_data.columns & self.selected_stock]\r\n self.prof_ratio = selected_stock_data.iloc[t0+self.freq]/selected_stock_data.iloc[t0]\r\n self.cur_invest *= sum(self.invest_ratio*self.prof_ratio)\r\n \r\n def clean(self, data):\r\n rowsum = data.sum(axis=1)\r\n data = data.fillna(-1)\r\n data = data.drop(data.index & rowsum[rowsum<1].index)\r\n return data\r\n #data = data.fillna(-1) \r\n \r\n\r\n def load_data(self):\r\n ROA_data_path = self.data_path / 'stock_history_data' / (self.timelen + '_1d_ROA.csv')\r\n #ROA_data = get_ROA_stock()\r\n self.ROA_data = pd.read_csv(self.data_path / 'stock_history_data' / (self.timelen + '_1d_ROA.csv'),index_col=0)\r\n #self.stock_data = pd.read_csv(self.data_path / 'stock_history_data' / '10y_1d.csv',index_col=0)\r\n self.ROA_data = self.clean(self.ROA_data)\r\n #self.clean(self.stock_data)\r\n \r\n\r\n\r\n\r\n def select_stock_noob(self, t0):\r\n #may use caldendar module: https://docs.python.org/3/library/calendar.html\r\n row0, row1 = self.ROA_data.iloc[t0-self.freq], self.ROA_data.iloc[t0]\r\n row0.sort_values()\r\n row1.sort_values()\r\n row0 = row0[row0<1e3]\r\n row0 = row0[row0>1]\r\n row1 = row1[row1<1e3]\r\n row1 = row1[row1>1]\r\n row2 = row1 / row0\r\n row2 = row2.sort_values(ascending=False)\r\n row2 = row2.drop(row2[row2>pow(2,self.freq*4)])\r\n #print(row2)\r\n row2 = row2[0:self.stock_num]\r\n #print(row2)\r\n ratio = row2*0.5 - 0.5\r\n ratio /= row2\r\n s = sum(ratio)\r\n ratio /= s\r\n self.invest_ratio = ratio\r\n self.selected_stock = row2.index\r\n\r\n def select_stock_1943(self, t0):\r\n rown, coln = self.ROA_data.shape[0], self.ROA_data.shape[1]\r\n \r\n y0 = (int) (rown / 2)\r\n y1 = rown - 1\r\n '''\r\n y1 = (int) (rown / 2)\r\n y0 = 10\r\n '''\r\n stock_price_now = self.ROA_data.iloc[t0]\r\n stock_price_last = self.ROA_data.iloc[t0-self.backlen]\r\n stock_return = stock_price_now - stock_price_last\r\n pe = stock_return / stock_price_now\r\n #ep = 1.0/pe\r\n pe = pe.fillna(0)\r\n pe_avg = max(0, sum(pe) / coln)\r\n #ep_avg = sum(ep) / coln\r\n double_pe_data = pe\r\n price_data = stock_price_now.sort_values(ascending=False)\r\n price_data = price_data[int(coln*0.33):int(coln*0.9)]\r\n price_data = price_data[price_data < 10]\r\n price_data = price_data[price_data > 1]\r\n\r\n \r\n quality = double_pe_data[price_data.index&double_pe_data.index]\r\n sort_quality = quality.sort_values(ascending=False)\r\n sort_quality = sort_quality[sort_quality>0]\r\n sort_quality = sort_quality[0:self.stock_num]\r\n #print('#####################',sum(pe), coln)\r\n\r\n self.selected_stock = sort_quality.index\r\n self.invest_ratio = sort_quality / sum(sort_quality)\r\n\r\n\r\n \r\n def show_status(self):\r\n print(\"Principle: \\n\", self.cur_invest)\r\n print(\"selected_stock: \\n\", self.selected_stock)\r\n print(\"invest_ratio: \\n\", self.invest_ratio)\r\n print(\"prof_ratio: \\n\", self.prof_ratio)\r\n\r\n def Test_sim(self, method = 'noob',show = 0):\r\n t0 = self.backlen\r\n self.load_data()\r\n #print('simulation started:')\r\n print('freq: ', self.freq, '\\nDate: ', self.ROA_data.index[t0])\r\n #print(\"Initial input: \\n\", self.cur_invest)\r\n while t0 < self.ROA_data.shape[0]- self.freq - 1 and self.cur_invest>10:\r\n self.one_step(t0, method)\r\n if show:\r\n self.show_status()\r\n #print('t0: ',t0)\r\n t0 += self.freq\r\n #\r\n print(\"Principle: \\n\", self.cur_invest)\r\n #print(\"End date: \\n\", self.ROA_data.index[t0])\r\n \r\n\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n", "id": "1124659", "language": "Python", "matching_score": 2.0685532093048096, "max_stars_count": 0, "path": "simulator/simu.py" }, { "content": "from simulator import simu\r\n\r\nif __name__==\"__main__\":\r\n backlen = 250\r\n freq = 10\r\n stocknum = 10\r\n totalinput = 1000\r\n timelen = '2y'\r\n\r\n SIM = simu.SIM(backlen, freq, stocknum,totalinput, timelen)\r\n show = 1\r\n SIM.Test_sim('1934',show)\r\n#30 days as freq\r\n", "id": "9579275", "language": "Python", "matching_score": 0.38955163955688477, "max_stars_count": 0, "path": "main.py" } ]
2.335845
shrutimary15
[ { "content": "import pandas as pd # importing packages required\r\n# saved the input file as input.txt\r\nf = open('input.txt', encoding='utf-8')\r\nh = f.readlines() # solving the problem while reading the file by encoding\r\n# separating the list of questions and answers and saving it in different list\r\ni = 0\r\nque_list = []\r\nans_list = []\r\nl = len(h)-1\r\nque = open('que.txt', 'w')\r\nans = open('ans.txt', 'w')\r\nfor ele in h:\r\n if i != l:\r\n u = len(ele)\r\n tlist = h[i][0]\r\n flist = h[i][u-2]\r\n addlist = h[i][4:8]\r\n if tlist == \"0\" or tlist == \"1\" or tlist == '2' or tlist == '3' or tlist == '4' or tlist == '5' or tlist == '6' or tlist == '7' or tlist == '8' or tlist == '9':\r\n while flist !='?':\r\n que_list = ele\r\n que.write(ele.rstrip()+'\\n')\r\n elif flist == '?':\r\n que_list = ele\r\n que.write(ele.rstrip())\r\n\r\n else:\r\n ans_list = ele\r\n ans.write(ele.rstrip()+\"\\n\")\r\n if ele[0:6] != 'Answer' and ele[0] != '\\n':\r\n que.write(\"\\n\")\r\n i = i+1\r\n\r\nque.close()\r\nans.close()\r\nque = open('que.txt', 'r')\r\nans = open('ans.txt', 'r')\r\n\r\n# Converting each text file into csv\r\nq = pd.read_csv(que, delimiter='\\n', skip_blank_lines=False, names=['Question'])\r\na = pd.read_csv(ans, delimiter='\\n', skip_blank_lines=True, names=['Answer'])\r\na.to_csv('answer.csv')\r\nq.to_csv('question.csv')\r\n\r\n# concat 2 csv to form combined csv\r\ninputs = ['question.csv', 'answer.csv']\r\ndf = (pd.read_csv(f, sep=',') for f in inputs)\r\ncombined = pd.concat(df, axis=1)\r\ncombined.drop([\"Unnamed: 0\"], axis=1, inplace=True)\r\ncombined.to_csv(\"Output.csv\")\r\n", "id": "7185302", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "further.py" } ]
0
skamatala
[ { "content": "count=dict()\ndict=dict()\ndef forwarding_count(destination, to, **kwargs):\n global count\n global dict\n if destination in dict:\n if to not in dict[destination]:\n if to != \"to\":\n dict[destination].append(to)\n count[destination]=len(dict[destination])\n return count[destination]\n else:\n return 0\n else:\n return count[destination]\n else:\n if to != \"to\":\n dict[destination]=[to]\n count[destination]=len(dict[destination])\n return count[destination]\n else:\n return 0\n", "id": "5955678", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "community_supplied/Protocols/Bgp/forwarding_count.py" } ]
0
aahumayed
[ { "content": "#!/usr/bin/env python\nimport sys\nimport time\nimport cancat\nimport struct\nimport threading\nimport json\n\nimport cancat.iso_tp as cisotp\n\nclass J1939:\n def __init__(self, c, verbose=True):\n self.c = c\n self.verbose = verbose\n self.readJ1939DB()\n\n def readJ1939DB(self):\n f=open(\"J1939db.json\", \"r\")\n self.j1939DB = json.load(f)\n# printJ1939Msgs --> reprJ1939Msgs --> filterJ1939Msgs --> genJ1939Msgs --> reprJ1939Msg\n def printJ1939Msgs(self, start_msg=0, stop_msg=None, start_bkmk=None, stop_bkmk=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, priorities=None, pgns=None, sourceAddresses=None, spns=None, ignore=[]):\n '''\n This function decodes CAN messages into J1939 format. Examples of usage:\n 1) Importing and creating a J1939 object\n import j1939\n j=j1939.J1939(c)\n 2) Decode messages without filtering:\n j.printJ1939Msgs()\n 3) Filter by PGN by passing a list of PGNs:\n j.printJ1939Msgs(pgns={61441,0x123})\n 4) Filter by source addresses\n j.printJ1939Msgs(sourceAddresses={1,3,6})\n 5) Filter by Priority:\n j.printJ1939Msgs(priorities={0,7})\n 6) Filter by spns:\n j.printJ1939Msgs(spns={520,190})\n '''\n print self.reprJ1939Msgs(start_msg, stop_msg, start_bkmk, stop_bkmk, start_baseline_msg, stop_baseline_msg, arbids, priorities, pgns, sourceAddresses, spns, ignore)\n\n def reprJ1939Msgs(self, start_msg=0, stop_msg=None, start_bkmk=None, stop_bkmk=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None,priorities=None, pgns=None, sourceAddresses=None, spns=None, ignore=[]):\n '''\n String representation of a set of CAN Messages.\n These can be filtered by start and stop message indexes, as well as\n use a baseline (defined by start/stop message indexes),\n by a list of \"desired\" arbids as well as a list of\n ignored arbids\n\n Many functions wrap this one.\n '''\n out = []\n\n if start_bkmk != None:\n start_msg = self.c.getMsgIndexFromBookmark(start_bkmk)\n\n if stop_bkmk != None:\n stop_msg = self.c.getMsgIndexFromBookmark(stop_bkmk)\n\n\n\n if start_msg in self.c.bookmarks:\n bkmk = self.c.bookmarks.index(start_msg)\n self.c.out.append(\"starting from bookmark %d: '%s'\" %\n (bkmk,\n self.c.bookmark_info[bkmk].get('name'))\n )\n\n if stop_msg in self.c.bookmarks:\n bkmk = self.c.bookmarks.index(stop_msg)\n self.c.out.append(\"stoppng at bookmark %d: '%s'\" %\n (bkmk,\n self.c.bookmark_info[bkmk].get('name'))\n )\n\n last_msg = None\n next_bkmk = 0\n next_bkmk_idx = 0\n\n msg_count = 0\n last_ts = None\n tot_delta_ts = 0\n counted_msgs = 0 # used for calculating averages, excluding outliers\n\n data_delta = None\n\n\n data_repeat = 0\n data_similar = 0\n\n for idx, ts, arbid, pgns, msg in self.filterJ1939Msgs(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids=arbids, priorities=priorities, pgns=pgns,sourceAddresses=sourceAddresses, spns=spns, ignore=ignore):\n diff = []\n\n # insert bookmark names/comments in appropriate places\n while next_bkmk_idx < len(self.c.bookmarks) and idx >= self.c.bookmarks[next_bkmk_idx]:\n out.append(self.c.reprBookmark(next_bkmk_idx))\n next_bkmk_idx += 1\n\n msg_count += 1\n\n # check data\n byte_cnt_diff = 0\n if last_msg != None:\n if len(last_msg) == len(msg):\n for bidx in range(len(msg)):\n if last_msg[bidx] != msg[bidx]:\n byte_cnt_diff += 1\n\n if byte_cnt_diff == 0:\n diff.append(\"REPEAT\")\n data_repeat += 1\n elif byte_cnt_diff <=4:\n diff.append(\"Similar\")\n data_similar += 1\n # FIXME: make some better heuristic to identify \"out of norm\"\n\n # look for ASCII data (4+ consecutive bytes)\n if hasAscii(msg):\n diff.append(\"ASCII: %s\" % repr(msg))\n\n # calculate timestamp delta and comment if out of whack\n if last_ts == None:\n last_ts = ts\n\n delta_ts = ts - last_ts\n if counted_msgs:\n avg_delta_ts = tot_delta_ts / counted_msgs\n else:\n avg_delta_ts = delta_ts\n\n\n if abs(delta_ts - avg_delta_ts) <= delta_ts:\n tot_delta_ts += delta_ts\n counted_msgs += 1\n else:\n diff.append(\"TS_delta: %.3f\" % delta_ts)\n\n out.append(self.reprJ1939Msg(idx, ts, arbid, msg, comment='\\t'.join(diff)))\n last_ts = ts\n last_msg = msg\n\n out.append(\"Total Messages: %d (repeat: %d / similar: %d)\" % (msg_count, data_repeat, data_similar))\n\n return \"\\n\".join(out)\n\n def reprJ1939Msg(self, idx, ts, arbid, data, comment=None):\n #TODO: make decoding spns optional\n if comment == None:\n comment = ''\n priority, pgn, pgnName, sourceAddress = self.splitID(arbid)\n spns= self.getSPNs(pgn)\n d = \"\"\n #TODO SPNs are appended to data in a different order than the actual CAN frame, not sure if this is of significance\n if spns != None:\n for spn in spns:\n d+= self.getSpnInfo(spn,int(data.encode('hex'), 16))\n #d+='SPN: ',value,', (',name,') '\n return \"%.8d %8.3f ID: %.3x, Priority: %d, PGN: %d, SA: %d, Len: %.2x, Data: %-18s\\t%s\" % (idx, ts, arbid, priority, pgn, sourceAddress, len(data), d, comment)\n\n def filterJ1939Msgs(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, priorities=None, pgns=None, sourceAddresses=None, spns=None, ignore=[]):\n '''\n returns the received CAN messages between indexes \"start_msg\" and \"stop_msg\"\n but only messages to ID's that *do not* appear in the the baseline indicated\n by \"start_baseline_msg\" and \"stop_baseline_msg\".\n\n for message indexes, you *will* want to look into the bookmarking subsystem!\n '''\n self.c.log(\"starting filtering messages...\")\n if stop_baseline_msg != None:\n self.c.log(\"ignoring arbids from baseline...\")\n # get a list of baseline arbids\n filter_ids = { arbid:1 for ts,arbid,data in self.genJ1939Msgs(start_baseline_msg, stop_baseline_msg)\n }.keys()\n else:\n filter_ids = None\n self.c.log(\"filtering messages...\")\n filteredMsgs = [(idx, ts,arbid, pgn, msg) for idx, ts,arbid, pgn, msg in self.genJ1939Msgs(start_msg, stop_msg, arbids=arbids, priorities=priorities, pgns=pgns,sourceAddresses=sourceAddresses, spns=spns)\n if (type(arbids) == list and arbid in arbids) or arbid not in ignore and (filter_ids==None or arbid not in filter_ids)]\n return filteredMsgs\n\n def getSPNs(self,pgn):\n '''\n Returns a list of spns used in the passed PGN\n '''\n try:\n return self.j1939DB['J1939PGNdb'][str(pgn)]['SPNs']\n except:\n return None\n\n def genJ1939Msgs(self, start=0, stop=None, arbids=None, priorities=None, pgns=None, sourceAddresses=None, spns=None):\n '''\n CAN message generator. takes in start/stop indexes as well as a list\n of desired arbids (list)\n '''\n messages = self.c._messages.get(0x30, [])# CMD_CAN_RECV = 0x30\n if stop == None:\n stop = len(messages)\n else:\n stop = stop + 1 # This makes the stop index inclusive if specified\n\n for idx in xrange(start, stop):\n ts, msg = messages[idx]\n\n arbid, data = self._splitCanMsg(msg)\n priority, pgn, pgnName, sourceAddress = self.splitID(arbid)\n currentSPNs=self.getSPNs(pgn)\n\n if arbids != None and arbid not in arbids:\n # allow filtering of arbids\n continue\n if pgns != None and pgn not in pgns:\n # allow filtering of pgns\n continue\n if priorities != None and priority not in priorities:\n # allow filtering of priorities\n continue\n if sourceAddresses != None and sourceAddress not in sourceAddresses:\n # allow filtering of sourceAddresses\n continue\n # spns: associated spns with the PGN. spns1: the acutal values in the data field of the J1939 message\n #if spns != None and currentSPNs != None and not spns.issubset(currentSPNs):\n #continue\n #SPNs filtering needs two steps: 1) filter out frames with empty data fields (i.e., no defined SPNs), and then 2) filter out data fields that do not contain the specified spns\n if spns != None and currentSPNs == None:\n continue\n if currentSPNs != None and spns != None and not any(x in spns for x in currentSPNs):\n continue\n\n yield((idx, ts, arbid, pgn, data))\n\n def _splitCanMsg(self, msg):\n '''\n takes in captured message\n returns arbid and data\n\n does not check msg size. MUST be at least 4 bytes in length as the\n tool should send 4 bytes for the arbid\n '''\n arbid = struct.unpack(\">I\", msg[:4])[0]\n data = msg[4:]\n return arbid, data\n\n def getSpnName(self, spn):\n '''\n This is a helper function where you pass a PGN value and get its name\n '''\n name = self.j1939DB['J1939SPNdb'][str(spn)]['Name']\n #bin(0xf07d84b11200f084>>48 & ((1 << 8))-1)\n print \"SPN %s: (%s) \" % (str(spn),name)\n\n def splitID(self, arbid):\n '''\n This function extracts and returns priority, PGN, and SA from a 29-bit ID\n '''\n priority = arbid >> 26 & 0b111\n pgn = arbid >> 8 & 0b00001111111111111111\n sourceAddress = arbid & 0b00000000000000000000011111111\n pgnName = self.getPgnName(pgn)\n return priority, pgn, pgnName , sourceAddress\n\n def constructID(self, priority, pgn, sourceAddress):\n '''\n It is used in J1939xmit and it constructs a 29-bit ID given the 3 building blocks (priority, pgn, and sourceAddress)\n Example:\n hex(j.constructID(3,61440,0))\n '''\n arbid =0\n arbid |= priority << 26\n arbid |= pgn << 8\n arbid |= sourceAddress\n return arbid\n\n def getPgnName(self, pgn):\n '''\n A utility function that returns a PGN's name by passing it\n Example:\n In [32]: j.getPgnName(61440)\n Out[32]: u'Electronic Retarder Controller 1'\n '''\n try:\n return self.j1939DB['J1939PGNdb'][str(pgn)]['Name']\n except:\n #print \"PGN: \", pgn, \"not found.\\n\"\n pass\n\n def constructSPNs(self, spns):\n '''\n This function receives a dictionary of spn key-value pairs and returns the correcsponding data field that can be transmitted\n Populated spns get they assigned values, whereas the unpopulated get 0xFF.. values\n constructSPNs(spns={190:0x6813, 520:0x12})\n IMPORTANT: This function is incompleteself.\n In [26]: hex(j.constructSPNs(spns={190:0x6813}))\n constructSPNs(spns): spns= {190: 0x6813}\n Out[26]: '0x6813000000' ==> spn value starting at the correct startBit position (bytes 4 and 5)\n In [27]: hex(j.constructSPNs(spns={1675:13}))\n constructSPNs(spns): spns= {1675: 13}\n Out[27]: '0xd000000000000' (starting at position 7.1)\n TODO: Instead of returning the spn values, these need to be appended to a data variable until the whole list of spns is constructed\n '''\n data = \"\"\n print \"constructSPNs(spns): spns= \", spns\n print \"type: \", type(spns)\n for k, v in spns.iteritems():\n print k, ': ', v\n startBit= self.j1939DB['J1939SPNdb'][str(k)]['StartBit']\n spnLength= self.j1939DB['J1939SPNdb'][str(k)]['SPNLength']\n msg= v <<startBit\n mask = ((1<<spnLength)-1)<<startBit\n\n return (msg or mask)\n\n #name = self.j1939DB['J1939SPNdb'][str(spn)]['Name']\n #bin(0xf07d84b11200f084>>48 & ((1 << 8))-1)\n return \"%s: %s, \" % (str(spn),str(value))\n return data\n def J1939xmit(self, priority, pgn, sourceAddress, data=None,spns=None, timeout=3, count=1):\n '''\n Transmit a J1939 message on the attached CAN bus\n Currently returns the *last* result\n Examples of usage:\n j.J1939xmit(7,61444,4, data='FFFFFF6813FFFFFF'.decode('hex')) ==> Works as expected\n j.J1939xmit(7,61444,4, spns={190:0x123, 571:0x1122}) ===> Incomplete, please check the \"half-baked\" constructSPNs function\n '''\n extflag = 1 # always 1 because J1939 uses 29-bit IDs\n id= self.constructID(priority,pgn,sourceAddress)\n #TODO spns needs to receive the correct constructed SPNs returned from constructSPNs function\n #spns= self.constructSPNs(spns)# Returns spn-friendly data\n msg = struct.pack('>I',id)+chr(extflag)+data#'FFFFFF6813FFFFFF'.decode('hex')\n print \"msg= \", msg\n for i in range(count):\n self.c._send(0x44, msg)\n ts, result = self.c.recv(0x34, timeout)\n\n if result == None:\n print \"J1939xmit: Return is None!?\"\n resval = ord(result)\n if resval != 0:\n print \"J1939xmit() failed: %s\" % self.c.CAN_RESPS.get(resval)\n\n return resval\n\n def getSpnInfo(self, spn, data):\n #TODO Need to catch an error when spn is not in the passed data\n '''\n A utility function critical in extracting spns' info from data\n Example:\n getSpnInfo(190,0x1245)\n In [7]: j.getSpnInfo(190,0xaabbccddeeff)\n Out[7]: '190: 48076, '\n In [8]: hex(48076)\n Out[8]: '0xbbcc'\n '''\n startBit= self.j1939DB['J1939SPNdb'][str(spn)]['StartBit']\n spnLength= self.j1939DB['J1939SPNdb'][str(spn)]['SPNLength']\n value = (data >> startBit & ((1 << spnLength))-1)\n #name = self.j1939DB['J1939SPNdb'][str(spn)]['Name']\n #bin(0xf07d84b11200f084>>48 & ((1 << 8))-1)\n return \"%s: %s, \" % (str(spn),str(value))\n #return str(value), name\n\ndef hasAscii(msg, minbytes=4, strict=True):\n '''\n if minbytes == -1, every character has to be clean ASCII\n otherwise, look for strings of at least minbytes in length\n '''\n ascii_match = 0\n ascii_count = 0\n for byte in msg:\n if 0x30 <= ord(byte) < 0x7f:\n ascii_count +=1\n if ascii_count >= minbytes:\n ascii_match = 1\n else:\n if strict:\n return 0\n\n ascii_count = 0\n return ascii_match\n", "id": "11501948", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "cancat/j1939.py" }, { "content": "#!/usr/bin/env python2\n\nimport telnetlib\nimport subprocess\nimport signal\nimport time\n\n###############################################################\n# This script will automatically start a GDB debug \n# session to the STM32 discovery board using OpenOCD. The working\n# directory is assumed to be the project root\n###############################################################\n\n###############################################################\n# We need to be able to send a SIGTERM (ctrl-c) to GDB\n# without killing openocd or this script. Set up a custom\n# signal handler here that essentially ignores SIGTERM\n###############################################################\ndef signal_handler(signal, frame):\n pass # do nothing\n\n###############################################################\n# Start up the openocd thread\n###############################################################\n\n# We need gdb to respond to a SIGINT (ctrl-c), but by default,\n# that will cause every other child process to die, including \n# openocd. Disable sigint, then re-enable it after the child \n# spawns. The child inherits the current state of signal \n# handlers.\nsignal.signal(signal.SIGINT, signal.SIG_IGN)\nopenocd = subprocess.Popen([\"openocd\", \"-f\", \"board/st_nucleo_h743zi.cfg\"])\ntime.sleep(2) # Wait for this to start up\n\n# Set up a custom signal handler so that SIGINT doesn't kill\n# this script\nsignal.signal(signal.SIGINT, signal_handler)\n\n###############################################################\n# Reset the development board\n###############################################################\n\n# Flash the image\ntn = telnetlib.Telnet(\"127.0.0.1\", \"4444\")\ntn.read_until(\"> \")\ntn.write(\"poll\\n\")\ntn.read_until(\"> \")\ntn.write(\"reset\\n\")\ntn.read_until(\"> \")\ntn.write(\"exit\\n\")\ntn.close()\n\n###############################################################\n# Start the gdb session\n###############################################################\n\ntime.sleep(2)\ngdb_proc = subprocess.Popen([\"arm-none-eabi-gdb\", \"-ex\", \"target remote localhost:3333\", \"build/CANT.elf\", \"-ex\", \"set remote hardware-breakpoint-limit 6\", \"-ex\", \"set remote hardware-watchpoint-limit 4\"])\n\n# Spin until GDB is exited\nwhile gdb_proc.poll() == None:\n time.sleep(1)\n\n# Gracefully exit openocd\nopenocd.terminate()\n\n", "id": "9405283", "language": "Python", "matching_score": 0, "max_stars_count": 20, "path": "scripts/debug.py" } ]
0
martinwenisch
[ { "content": "from os import environ\n\ntoken_string = environ.get(\"MONITORA_TOKEN\")\n\nif not token_string or token_string == '':\n raise Exception(\"Missing monitora token.\")\n", "id": "8961498", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "Monitora-backend/app/data/token.py" }, { "content": "\n# for install Request-HTML use command: pip install requests-html\nfrom requests_html import HTMLSession\nimport json\nimport csv\nimport pathlib\n\n# set Google cloud Api_key. For C.D. api_key contact @karmi\napi_key = \"\"\n# set names of places which should be searched\nplaces_names = [\"charita\", \"červený kříž\", \"armáda spásy\", \"materiální sbírka\", \"humanitární pomoc\"]\nfile_name = \"outcome.csv\"\n# Set parameters for detail of the places to be recorded,\n# for more info: https://developers.google.com/maps/documentation/places/web-service/details\nparameters = [\"name\", \"formatted_phone_number\", \"website\", \"formatted_address\", \"opening_hours\"]\n\n# places where to search. Current list Czechia divison + Prague admin areas (1-22)\n# Current list source: https://cs.wikipedia.org/wiki/Administrativn%C3%AD_dělen%C3%AD_Prahy\n# Disclaimer: google places Api returns only 20 result per request, this should be consider when defining places granularity\nplaces = [\"Okres Benešov\", \"Okres Beroun\", \"Okres Blansko\", \"Okres Brno-město\", \"Okres Brno-venkov\", \"Okres Bruntál\",\n \"Okres Břeclav\", \"Okres Česká Lípa\", \"Okres České Budějovice\", \"Okres Český Krumlov\", \"Okres Děčín\",\n \"Okres Domažlice\", \"Okres Frýdek-Místek\", \"Okres H<NAME>\", \"Okres Hodonín\", \"Okres H<NAME>é\",\n \"Okres Cheb\", \"Okres Chomutov\", \"Okres Chrudim\", \"Okres Jablonec nad Nisou\", \"Okres Jeseník\", \"Okres Jičín\",\n \"Okres Jihlava\", \"<NAME>\", \"<NAME>\", \"<NAME>\", \"<NAME>\",\n \"<NAME>\", \"<NAME>\", \"<NAME>\", \"<NAME>\", \"<NAME>\", \"<NAME>\",\n \"<NAME>\", \"<NAME>\", \"<NAME>\", \"<NAME>\", \"<NAME>\", \"<NAME>\",\n \"<NAME>\", \"<NAME>\", \"<NAME>\", \"<NAME>-město\", \"<NAME>\", \"<NAME>\",\n \"<NAME>\", \"<NAME>-jih\", \"<NAME>-město\", \"Ok<NAME>zeň-sever\", \"Hlavní město Praha\",\n \"<NAME>-východ\", \"<NAME>-západ\", \"<NAME>\", \"<NAME>\", \"<NAME>\",\n \"<NAME>\", \"<NAME>\", \"<NAME>\", \"<NAME>ychnov nad Kněžnou\", \"<NAME>\",\n \"<NAME>\", \"<NAME>\", \"<NAME>\", \"<NAME>\", \"<NAME>\", \"<NAME>\",\n \"<NAME>\", \"<NAME>\", \"<NAME>\", \"<NAME>erské Hradiště\", \"Okres Ústí nad Labem\",\n \"Okres Ústí nad Orlicí\", \"<NAME>\", \"<NAME>\", \"<NAME>\", \"<NAME>\",\n \"Ok<NAME>dář nad sázavou\", \"Praha 1\", \"Praha 2\", \"Praha 3\", \"Praha 4\", \"Praha 5\", \"Praha 6\",\n \"Praha 7\", \"Praha 8\", \"Praha 9\", \"Praha 10\", \"Praha 11\", \"Praha 12\", \"Praha 13\", \"Praha 14\", \"Praha 15\",\n \"Praha 16\",\n \"Praha 17\", \"Praha 18\", \"Praha 19\", \"Praha 20\", \"Praha 21\", \"Praha 22\"]\n\n\ndef get_request_data(request):\n session = HTMLSession()\n request = session.get(request)\n return request.content\n\ndef get_place_details(ids):\n # init list of stored details\n details_list = []\n # prepare request url with all given parameters\n request_url = \"https://maps.googleapis.com/maps/api/place/details/json?fields=name\"\n for parameter in parameters:\n request_url = request_url + \"%2C\" + parameter\n # iterate through all id and get details of the place\n for unique_id in ids:\n request_url_full = request_url + \"&place_id=\" + unique_id + \"&key=\" + api_key\n try:\n data = get_request_data(request_url_full)\n json_data = json.loads(data)\n result = json_data[\"result\"]\n record_dict = {}\n # iterate through all parameter and get value with relevant manipulation\n for parameter in parameters:\n if parameter in result:\n if parameter == \"name\":\n record_dict[parameter] = result[\"name\"]\n print(\"Requesting details of \" + result[\"name\"])\n elif parameter == \"opening_hours\":\n opening_hours = result[parameter]\n days = \"\"\n for day in opening_hours[\"weekday_text\"]:\n days = days + day + \" , \"\n record_dict[parameter] = days\n elif parameter == \"formatted_address\":\n record_dict[\"formatted_address\"] = result[parameter]\n # HACK - hardcoded distribution of \"formatted_address\" to street , city and zipcode\n address_list = result[parameter].split(',')\n record_dict[\"street\"] = address_list[0]\n record_dict[\"zipcode\"] = address_list[len(address_list)-2][0:7]\n record_dict[\"city\"] = address_list[len(address_list)-2][7:len(address_list[len(address_list)-2])]\n else:\n record_dict[parameter] = result[parameter]\n else:\n record_dict[parameter] = \"n/a\"\n details_list.append(record_dict)\n\n except get_request_data.exceptions.RequestException as e:\n print(\"error:\" + e)\n\n return details_list\n\n\ndef write_csv(details, parameters_in):\n # writing to csv file\n with open(file_name, 'w') as csvfile:\n # creating a csv writer object\n csv_writer = csv.writer(csvfile)\n # HACK - hardcoded distribution of \"formatted_address\" to street , city and zipcode\n if \"formatted_address\" in parameters_in:\n parameters_in.insert(parameters_in.index(\"formatted_address\")+ 1, \"zipcode\")\n parameters_in.insert(parameters_in.index(\"formatted_address\")+ 1, \"city\")\n parameters_in.insert(parameters_in.index(\"formatted_address\")+ 1, \"street\")\n\n csv_writer.writerow(parameters_in)\n #itirate through all detais and create list for each row\n for detail in details:\n row = []\n for parameter in parameters_in:\n row.append(detail[parameter])\n csv_writer.writerow(row)\n csvfile.close()\n print(\"Saved in \" + file_name + \" in \" + str(pathlib.Path().absolute()))\n\n\ndef search_all(places_in, names_in):\n # int hashset to store unique id places (to avoid duplication)\n set_ids = set()\n # iterate through all names and all places to get complete list\n for name in names_in:\n for place in places_in:\n print(\"Searching name \" + name + \" in \" + place)\n # create request and get data in json format (dirty)\n request_string = \"https://maps.googleapis.com/maps/api/place/textsearch/json?query=\"\n request_string = request_string + name + \"+in+\" + place + \"&key=\" + api_key\n data = get_request_data(request_string)\n json_data = json.loads(data)\n # iterate results and saves only which are not duplicated\n for result in json_data[\"results\"]:\n if not result['place_id'] in set_ids:\n set_ids.add(result['place_id'])\n print(\" Adding id of \" + result['name'])\n return set_ids\n\nif __name__ == '__main__':\n # get non duplicated ids for each place based on search through all place_names and places\n ids = search_all(places, places_names)\n # get details for each place id\n details = get_place_details(ids)\n # save details to csv\n write_csv(details, parameters)\n", "id": "4592634", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "projects/umapa/google_places/main.py" } ]
0
wladimirguerra
[ { "content": "import asyncio\nimport atexit\nimport typing\nfrom typing import Callable\n\n\nclass ExecutorQueue:\n \"\"\"\n An executor queue class that provide a way to call `Callables` at a\n `call_interval`.\n\n The callback are executed in a thread pool (`ThreadPoolExecutor`) so\n it is possible to enqueue\n blocking callbacks too.\n\n Parameters\n ----------\n call_interval: float\n The interval between callbacks execution in seconds.\n callback_queue_size: int\n The maximum number of callback that can reside at the callback\n queue to be called. The minimum value is 10. If a number less\n than 10 is provided it will be coerced to 10.\n \"\"\"\n\n _call_interval: float\n\n # A queue where the str is the Tread name for debug purpose\n _queue: asyncio.Queue[(asyncio.Future, Callable[[], typing.Any], typing.Optional[str])]\n _dispatcher_task: typing.Optional[asyncio.Task] = None\n _loop: typing.Optional[asyncio.AbstractEventLoop] = None\n\n def __init__(self, *, call_interval: float = 0.5, callback_queue_size: int = 30) -> None:\n\n super().__init__()\n self._queue = asyncio.Queue(\n maxsize=max([callback_queue_size, 10]))\n\n if call_interval <= 0:\n raise ValueError(\"call_interval must be greater than zero.\")\n\n self._call_interval = call_interval\n self._loop = asyncio.get_event_loop()\n self._dispatcher_task = asyncio.create_task(self._dispatcher_worker())\n\n def cleanup():\n self._dispatcher_task.cancel()\n\n atexit.register(cleanup)\n\n async def ready(self):\n\n # A recursive test to reduce the waiting time\n async def is_ready(deep=0):\n ready_condition = self._loop is not None \\\n and self._dispatcher_task is not None \\\n and not self._dispatcher_task.done()\n\n if not ready_condition:\n if deep < 5:\n await asyncio.sleep(0.2)\n return await is_ready(deep + 1)\n return False\n return True\n\n return await is_ready()\n\n @property\n def call_interval(self):\n \"\"\"\n The interval to wait between two calls\n\n Returns\n -------\n float\n Interval in seconds\n \"\"\"\n return self._call_interval\n\n @call_interval.setter\n def call_interval(self, interval: float):\n \"\"\"\n The interval to wait between two calls\n\n Parameters\n ----------\n interval\n Interval in seconds\n \"\"\"\n if interval <= 0.5:\n raise ValueError(\"Interval must be greater than 500 ms.\")\n\n if interval is None:\n raise ValueError(\"Interval must not be null\")\n\n self._call_interval = interval\n\n async def _dispatcher_worker(self):\n \"\"\"\n The work dispatcher that runs a worker for each enqueued\n callable.\n \"\"\"\n while True:\n (future, _callback, thread_name) = await self._queue.get()\n\n # Run callback in a thread pool in case of the callback block the event loop\n # See https://docs.python.org/3/library/asyncio-eventloop.html#id14\n #\n # The future.set_result 'release' the enqueue_callback and let it return the future\n # that will have the callback value as it result.\n future.set_result(future.get_loop().run_in_executor(None, _callback))\n\n # Await between calls\n await asyncio.sleep(self._call_interval)\n\n def stop(self):\n \"\"\"\n Stops the queue scan and cancel the pending tasks.\n \"\"\"\n if self._dispatcher_task is not None:\n self._dispatcher_task.cancel()\n\n async def enqueue_callback(self, callback: Callable[[], typing.Any],\n thread_name_prefix: typing.Optional[str] = None,\n timeout: float = None) -> typing.Any:\n \"\"\"\n Enqueue callback to be executed one by one with\n `callback_interval` seconds between executions.\n\n The maximum number of calls that can be enqueued is 30 by\n default. If the maximum is reached it will wait for `timeout` to\n put in the queue. If the timeout occurs the ``TimeoutError`` is\n raised.\n\n Parameters\n ----------\n timeout\n The number in seconds to wait to put the callback in the\n queue if it is full. If it is not provided then no timeout\n erro will be raise and the code will be blocked until the\n callback is put in the queue.\n thread_name_prefix\n The Thread name for debug purpose.\n callback\n The callback to be enqueued\n\n Returns\n -------\n typing.Any\n The callback result.\n \"\"\"\n\n future = self._loop.create_future()\n put_task = self._queue.put((future, callback, thread_name_prefix))\n\n if timeout is not None:\n # Await 10 seconds to put item on queue if it is full\n await asyncio.wait_for(put_task, timeout=timeout)\n\n # `future` is a future of a future where will return the callback value.\n # So when `await future` it will return the future that will return the callback value. Thats\n # the reason to await again.\n # The first await will be 'released' when the callback get extracted from the queue.\n return await (await future)\n", "id": "12263619", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "src/callback_executor/executor.py" }, { "content": "# Copyright (c) 2021 <NAME>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n#\nimport asyncio\nimport logging\nimport math\nimport time\nfrom datetime import datetime\nfrom typing import Awaitable\nfrom unittest import IsolatedAsyncioTestCase\n\nfrom src.callback_executor import ExecutorQueue\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\nclass TestCallbackExecutor(IsolatedAsyncioTestCase):\n\n async def test_start_stop(self):\n exec_queue = ExecutorQueue(call_interval=23)\n self.assertEqual(exec_queue.call_interval, 23)\n self.assertTrue(await exec_queue.ready())\n exec_queue.stop()\n await asyncio.sleep(1)\n self.assertTrue(exec_queue._dispatcher_task.cancelled())\n\n async def test_enqueue_callback(self):\n exec_queue = ExecutorQueue(call_interval=23)\n\n def _callback():\n return 23\n\n return_value = await exec_queue.enqueue_callback(_callback)\n\n self.assertEqual(return_value, 23)\n exec_queue.stop()\n\n async def test_blocking_callbacks(self):\n def first_callback():\n first_call_time = datetime.now()\n print(f'First call at: ${first_call_time.strftime(\"%Y-%M-%D %H:%M:%S\")}')\n time.sleep(3)\n return first_call_time\n\n def second_callback():\n second_call_time = datetime.now()\n print(f'Second call at: ${second_call_time.strftime(\"%Y-%M-%D %H:%M:%S\")}')\n time.sleep(5)\n return second_call_time\n\n call_interval = 1\n\n exec_queue = ExecutorQueue(call_interval=call_interval)\n\n self.assertTrue(await exec_queue.ready())\n\n tasks: list[Awaitable] = [exec_queue.enqueue_callback(first_callback,\n \"first_callback\"),\n exec_queue.enqueue_callback(second_callback,\n \"second_callback\")]\n\n times = await asyncio.gather(*tasks)\n delta_time = (times[1] - times[0]).total_seconds()\n\n # Test if the time between the executions of the two callbacks is equal\n self.assertEqual(math.trunc(delta_time), call_interval)\n\n exec_queue.stop()\n", "id": "1316426", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/test_executor.py" } ]
0
tpape
[ { "content": "\"\"\"\nLDAP Authenticator plugin for JupyterHub\n\"\"\"\n\n# MIT License\n#\n# Copyright (c) 2018 <NAME>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport copy\nimport os\nimport pipes\nimport pwd\nimport re\nimport sys\nfrom subprocess import Popen, PIPE, STDOUT\nfrom jupyterhub.auth import Authenticator\nfrom jupyterhub.traitlets import Command\nimport ldap3\nfrom ldap3.utils.conv import escape_filter_chars\nfrom tornado import gen\nfrom traitlets import Any, Int, Bool, List, Unicode, Union, default, observe\n\n\nclass LDAPAuthenticator(Authenticator):\n \"\"\"\n LDAP Authenticator for Jupyterhub\n \"\"\"\n\n server_hosts = Union(\n [List(), Unicode()],\n config=True,\n help=\"\"\"\n List of Names, IPs, or the complete URLs in the scheme://hostname:hostport\n format of the server (required).\n \"\"\"\n )\n\n server_port = Int(\n allow_none=True,\n default_value=None,\n config=True,\n help=\"\"\"\n The port where the LDAP server is listening. Typically 389, for a\n cleartext connection, and 636 for a secured connection (defaults to None).\n \"\"\"\n )\n\n server_use_ssl = Bool(\n default_value=False,\n config=True,\n help=\"\"\"\n Boolean specifying if the connection is on a secure port (defaults to False).\n \"\"\"\n )\n\n server_connect_timeout = Int(\n allow_none=True,\n default_value=None,\n config=True,\n help=\"\"\"\n Timeout in seconds permitted when establishing an ldap connection before\n raising an exception (defaults to None).\n \"\"\"\n )\n\n server_receive_timeout = Int(\n allow_none=True,\n default_value=None,\n config=True,\n help=\"\"\"\n Timeout in seconds permitted for responses from established ldap\n connections before raising an exception (defaults to None).\n \"\"\"\n )\n\n server_pool_strategy = Unicode(\n default_value='FIRST',\n config=True,\n help=\"\"\"\n Available Pool HA strategies (defaults to 'FIRST').\n\n FIRST: Gets the first server in the pool, if 'server_pool_active' is\n set to True gets the first available server.\n ROUND_ROBIN: Each time the connection is open the subsequent server in\n the pool is used. If 'server_pool_active' is set to True unavailable\n servers will be discarded.\n RANDOM: each time the connection is open a random server is chosen in the\n pool. If 'server_pool_active' is set to True unavailable servers\n will be discarded.\n \"\"\"\n )\n\n server_pool_active = Union(\n [Bool(), Int()],\n default_value=True,\n config=True,\n help=\"\"\"\n If True the ServerPool strategy will check for server availability. Set\n to Integer for maximum number of cycles to try before giving up\n (defaults to True).\n \"\"\"\n )\n\n server_pool_exhaust = Union(\n [Bool(), Int()],\n default_value=False,\n config=True,\n help=\"\"\"\n If True, any inactive servers will be removed from the pool. If set to\n an Integer, this will be the number of seconds an unreachable server is\n considered offline. When this timeout expires the server is reinserted\n in the pool and checked again for availability (defaults to False).\n \"\"\"\n )\n\n bind_user_dn = Unicode(\n allow_none=True,\n default_value=None,\n config=True,\n help=\"\"\"\n The account of the user to log in for simple bind (defaults to None).\n \"\"\"\n )\n\n bind_user_password = Unicode(\n allow_none=True,\n default_value=None,\n config=True,\n help=\"\"\"\n The password of the user for simple bind (defaults to None)\n \"\"\"\n )\n\n user_search_base = Unicode(\n config=True,\n help=\"\"\"\n The location in the Directory Information Tree where the user search\n will start.\n \"\"\"\n )\n\n user_search_filter = Unicode(\n config=True,\n help=\"\"\"\n LDAP search filter to validate that the authenticating user exists\n within the organization. Search filters containing '{username}' will\n have that value substituted with the username of the authenticating user.\n \"\"\"\n )\n \n filter_by_group = Bool(\n default_value=True,\n config=True,\n help=\"\"\"\n Boolean specifying if the group membership filtering is enabled or not.\n \"\"\"\n )\n\n user_membership_attribute = Unicode(\n default_value='memberOf',\n config=True,\n help=\"\"\"\n LDAP Attribute used to associate user group membership\n (defaults to 'memberOf').\n \"\"\"\n )\n\n group_search_base = Unicode(\n config=True,\n help=\"\"\"\n The location in the Directory Information Tree where the group search\n will start. Search string containing '{group}' will be substituted\n with entries taken from allow_nested_groups.\n \"\"\"\n )\n\n group_search_filter = Unicode(\n config=True,\n help=\"\"\"\n LDAP search filter to return members of groups defined in the\n allowed_groups parameter. Search filters containing '{group}' will\n have that value substituted with the group dns provided in the\n allowed_groups parameter.\n \"\"\"\n )\n\n allowed_groups = Union(\n [Unicode(), List()],\n config=True,\n help=\"\"\"\n List of LDAP group DNs that users must be a member of in order to be granted\n login.\n \"\"\"\n )\n\n allow_nested_groups = Bool(\n default_value=False,\n config=True,\n help=\"\"\"\n Boolean allowing for recursive search of members within nested groups of\n allowed_groups (defaults to False).\n \"\"\"\n )\n\n username_pattern = Unicode(\n config=True,\n help=\"\"\"\n Regular expression pattern that all valid usernames must match. If a\n username does not match the pattern specified here, authentication will\n not be attempted. If not set, allow any username (defaults to None).\n \"\"\"\n )\n\n username_regex = Any(\n help=\"\"\"\n Compiled regex kept in sync with `username_pattern`\n \"\"\"\n )\n\n @observe('username_pattern')\n def _username_pattern_changed(self, change):\n if not change['new']:\n self.username_regex = None\n self.username_regex = re.compile(change['new'])\n\n create_user_home_dir = Bool(\n default_value=False,\n config=True,\n help=\"\"\"\n If set to True, will attempt to create a user's home directory\n locally if that directory does not exist already.\n \"\"\"\n )\n\n create_user_home_dir_cmd = Command(\n config=True,\n help=\"\"\"\n Command to create a users home directory.\n \"\"\"\n )\n @default('create_user_home_dir_cmd')\n def _default_create_user_home_dir_cmd(self):\n if sys.platform == 'linux':\n home_dir_cmd = ['mkhomedir_helper']\n else:\n self.log.debug(\"Not sure how to create a home directory on '%s' system\", sys.platform)\n home_dir_cmd = ['']\n return home_dir_cmd\n\n @gen.coroutine\n def add_user(self, user):\n username = user.name\n user_exists = yield gen.maybe_future(self.user_home_dir_exists(username))\n if not user_exists:\n if self.create_user_home_dir:\n yield gen.maybe_future(self.add_user_home_dir(username))\n else:\n raise KeyError(\"Domain user '%s' does not exists locally.\" % username)\n yield gen.maybe_future(super().add_user(user))\n\n def user_home_dir_exists(self, username):\n \"\"\"\n Verify user home directory exists\n \"\"\"\n user = pwd.getpwnam(username)\n home_dir = user[5]\n return bool(os.path.isdir(home_dir))\n\n def add_user_home_dir(self, username):\n \"\"\"\n Creates user home directory\n \"\"\"\n cmd = [arg.replace('USERNAME', username) for arg in self.create_user_home_dir_cmd] + [username]\n self.log.info(\"Creating '%s' user home directory using command '%s'\", username, ' '.join(map(pipes.quote, cmd)))\n create_dir = Popen(cmd, stdout=PIPE, stderr=STDOUT)\n create_dir.wait()\n if create_dir.returncode:\n err = create_dir.stdout.read().decode('utf8', 'replace')\n raise RuntimeError(\"Failed to create system user %s: %s\" % (username, err))\n\n def normalize_username(self, username):\n \"\"\"\n Normalize username for ldap query\n\n modifications:\n - format to lowercase\n - escape filter characters (ldap3)\n \"\"\"\n username = username.lower()\n username = escape_filter_chars(username)\n return username\n\n def validate_username(self, username):\n \"\"\"\n Validate a normalized username\n Return True if username is valid, False otherwise.\n \"\"\"\n if '/' in username:\n # / is not allowed in usernames\n return False\n if not username:\n # empty usernames are not allowed\n return False\n if not self.username_regex:\n return True\n return bool(self.username_regex.match(username))\n\n def validate_host(self, host):\n \"\"\"\n Validate hostname\n Return True if host is valid, False otherwise.\n \"\"\"\n host_ip_regex = re.compile(r'^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$')\n host_name_regex = re.compile(r'^((?!-)[a-z0-9\\-]{1,63}(?<!-)\\.){1,}((?!-)[a-z0-9\\-]{1,63}(?<!-)){1}$')\n host_url_regex = re.compile(r'^(ldaps?://)(((?!-)[a-z0-9\\-]{1,63}(?<!-)\\.){1,}((?!-)[a-z0-9\\-]{1,63}(?<!-)){1}):([0-9]{3})$')\n if bool(host_ip_regex.match(host)):\n # using ipv4 address\n valid = True\n elif bool(host_name_regex.match(host)):\n # using a hostname address\n valid = True\n elif bool(host_url_regex.match(host)):\n # using host url address\n valid = True\n else:\n # unsupported host format\n valid = False\n return valid\n\n def create_ldap_server_pool_obj(self, ldap_servers=None):\n \"\"\"\n Create ldap3 ServerPool Object\n \"\"\"\n server_pool = ldap3.ServerPool(\n ldap_servers,\n pool_strategy=self.server_pool_strategy.upper(),\n active=self.server_pool_active,\n exhaust=self.server_pool_exhaust\n )\n return server_pool\n\n def create_ldap_server_obj(self, host):\n \"\"\"\n Create ldap3 Server Object\n \"\"\"\n server = ldap3.Server(\n host,\n port=self.server_port,\n use_ssl=self.server_use_ssl,\n connect_timeout=self.server_connect_timeout\n )\n return server\n\n def ldap_connection(self, server_pool, username, password):\n \"\"\"\n Create ldaps Connection Object\n \"\"\"\n try:\n conn = ldap3.Connection(\n server_pool,\n user=username,\n password=password,\n auto_bind=ldap3.AUTO_BIND_TLS_BEFORE_BIND,\n read_only=True,\n receive_timeout=self.server_receive_timeout)\n except ldap3.core.exceptions.LDAPBindError as exc:\n msg = '\\n{exc_type}: {exc_msg}'.format(\n exc_type=exc.__class__.__name__,\n exc_msg=exc.args[0] if exc.args else '')\n self.log.error(\"Failed to connect to ldap: %s\", msg)\n return None\n return conn\n\n def get_nested_groups(self, conn, group):\n \"\"\"\n Recursively search group for nested memberships\n \"\"\"\n nested_groups = list()\n conn.search(\n search_base=self.group_search_base,\n search_filter=self.group_search_filter.format(group=group),\n search_scope=ldap3.SUBTREE)\n if conn.response:\n for nested_group in conn.response:\n nested_groups.extend([nested_group['dn']])\n groups = self.get_nested_groups(conn, nested_group['dn'])\n nested_groups.extend(groups)\n nested_groups = list(set(nested_groups))\n return nested_groups\n\n\n @gen.coroutine\n def authenticate(self, handler, data):\n\n # define vars\n username = data['username']\n password = data['password']\n server_pool = self.create_ldap_server_pool_obj()\n conn_servers = list()\n\n # validate credentials\n username = self.normalize_username(username)\n if not self.validate_username(username):\n self.log.error('Unsupported username supplied')\n return None\n if password is None or password.strip() == '':\n self.log.error('Empty password supplied')\n return None\n\n # cast server_hosts to list\n if isinstance(self.server_hosts, str):\n self.server_hosts = self.server_hosts.split()\n\n # validate hosts and populate server_pool object\n for host in self.server_hosts:\n host = host.strip().lower()\n if not self.validate_host(host):\n self.log.warning(\"Host '%s' not supplied in approved format. Removing host from Server Pool\", host)\n break\n server = self.create_ldap_server_obj(host)\n server_pool.add(server)\n conn_servers.extend([host])\n\n # verify ldap connection object parameters are defined\n if len(server_pool.servers) < 1:\n self.log.error(\"No hosts provided. ldap connection requires at least 1 host to connect to.\")\n return None\n if not self.bind_user_dn or self.bind_user_dn.strip() == '':\n self.log.error(\"'bind_user_dn' config value undefined. requried for ldap connection\")\n return None\n if not self.bind_user_password or self.bind_user_password.strip() == '':\n self.log.error(\"'bind_user_password' config value undefined. requried for ldap connection\")\n return None\n\n # verify ldap search object parameters are defined\n if not self.user_search_base or self.user_search_base.strip() == '':\n self.log.error(\"'user_search_base' config value undefined. requried for ldap search\")\n return None\n if not self.user_search_filter or self.user_search_filter.strip() == '':\n self.log.error(\"'user_search_filter' config value undefined. requried for ldap search\")\n return None\n\n # open ldap connection and authenticate\n self.log.debug(\"Attempting ldap connection to %s with user '%s'\", conn_servers, self.bind_user_dn)\n conn = self.ldap_connection(\n server_pool,\n self.bind_user_dn,\n self.bind_user_password)\n\n # proceed if connection has been established\n if not conn or not conn.bind():\n self.log.error(\n \"Could not establish ldap connection to %s using '%s' and supplied bind_user_password.\",\n conn_servers, self.bind_user_dn)\n return None\n else:\n self.log.debug(\n \"Successfully established connection to %s with user '%s'\",\n conn_servers, self.bind_user_dn)\n\n # compile list of permitted groups\n permitted_groups = copy.deepcopy(self.allowed_groups)\n if self.allow_nested_groups:\n for group in self.allowed_groups:\n nested_groups = self.get_nested_groups(conn, group)\n permitted_groups.extend(nested_groups)\n\n # format user search filter\n auth_user_search_filter = self.user_search_filter.format(\n username=username)\n\n # search for authenticating user in ldap\n self.log.debug(\"Attempting LDAP search using search_filter '%s'.\", auth_user_search_filter)\n conn.search(\n search_base=self.user_search_base,\n search_filter=auth_user_search_filter,\n search_scope=ldap3.SUBTREE,\n attributes=self.user_membership_attribute,\n paged_size=2)\n\n # handle abnormal search results\n if not conn.response or 'attributes' not in conn.response[0].keys():\n self.log.error(\n \"LDAP search '%s' found %i result(s).\",\n auth_user_search_filter, len(conn.response))\n return None\n elif len(conn.response) > 1:\n self.log.error(\n \"LDAP search '%s' found %i result(s). Please narrow search to 1 result.\",\n auth_user_search_filter, len(conn.response))\n return None\n else:\n self.log.debug(\"LDAP search '%s' found %i result(s).\", auth_user_search_filter, len(conn.response))\n\n # copy response to var\n search_response = copy.deepcopy(conn.response[0])\n\n # get authenticating user's ldap attributes\n if not search_response['dn'] or search_response['dn'].strip == '':\n self.log.error(\n \"Search results for user '%s' returned 'dn' attribute with undefined or null value.\",\n username)\n conn.unbind()\n return None\n else:\n self.log.debug(\n \"Search results for user '%s' returned 'dn' attribute as '%s'\",\n username, search_response['dn'])\n auth_user_dn = search_response['dn']\n if not search_response['attributes'][self.user_membership_attribute]:\n self.log.error(\n \"Search results for user '%s' returned '%s' attribute with undefned or null value.\",\n username, self.user_membership_attribute)\n conn.unbind()\n return None\n else:\n self.log.debug(\n \"Search results for user '%s' returned '%s' attribute as %s\",\n username, self.user_membership_attribute,\n search_response['attributes'][self.user_membership_attribute])\n auth_user_memberships = search_response['attributes'][self.user_membership_attribute]\n\n # is authenticating user a member of permitted_groups\n allowed_memberships = list(set(auth_user_memberships).intersection(permitted_groups))\n if bool(allowed_memberships) or not self.filter_by_group:\n self.log.debug(\n \"User '%s' found in the following allowed ldap groups %s. Proceeding with authentication.\",\n username, allowed_memberships)\n\n # rebind ldap connection with authenticating user, gather results, and close connection\n conn.rebind(\n user=auth_user_dn,\n password=password)\n auth_bound = copy.deepcopy(conn.bind())\n conn.unbind()\n if not auth_bound:\n self.log.error(\n \"Could not establish ldap connection to %s using '%s' and supplied bind_user_password.\",\n conn_servers, self.bind_user_dn)\n auth_response = None\n else:\n self.log.info(\"User '%s' sucessfully authenticated against ldap server %r.\", username, conn_servers)\n auth_response = username\n else:\n self.log.error(\"User '%s' is not a member of any permitted groups %s\", username, permitted_groups)\n auth_response = None\n\n permitted_groups = None\n return auth_response\n", "id": "10332477", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "ldapauthenticator/ldapauthenticator.py" } ]
0
rkeisling
[ { "content": "import sys\nfrom PIL import Image\nimport time\n\ndef print_ascii(image):\n # pass the image as command line argument\n image_path = image\n img = Image.open(image_path)\n\n # resize the image\n width, height = img.size\n aspect_ratio = height/width\n new_width = 40\n new_height = aspect_ratio * new_width * .35\n img = img.resize((new_width, int(new_height)))\n # new size of image\n # print(img.size)\n\n # convert image to greyscale format\n img = img.convert('L')\n\n pixels = img.getdata()\n\n # replace each pixel with a character from array\n chars = [\"B\",\"S\",\"#\",\"&\",\"@\",\"$\",\"%\",\"*\",\"!\",\":\",\".\"]\n new_pixels = [chars[pixel//25] for pixel in pixels]\n new_pixels = ''.join(new_pixels)\n\n # split string of chars into multiple strings of length equal to new width and create a list\n new_pixels_count = len(new_pixels)\n ascii_image = [new_pixels[index:index + new_width] for index in range(0, new_pixels_count, new_width)]\n ascii_image = \"\\n\".join(ascii_image)\n print(ascii_image)\n\n time.sleep(2)\n", "id": "6385092", "language": "Python", "matching_score": 1.527009129524231, "max_stars_count": 0, "path": "convert_image_to_ascii.py" }, { "content": "import sys, time\nfrom playsound import playsound\nimport random\nfrom threading import Thread\nfrom convert_image_to_ascii import print_ascii\n\ndef main():\n display_starting_junk()\n disply_intro()\n play_success_sound()\n display_vault_boy()\n\n\ndef display_starting_junk():\n for i in range(40):\n print('\\n')\n time.sleep(0.05)\n print_one_by_one_some_sound(make_starting_junk())\n for i in range(40):\n print('\\n')\n time.sleep(0.05)\n\ndef disply_intro():\n lines = ['*************** PIP-OS(R) V1.0 ***************\\n',\n ' \\n', ' \\n', ' \\n',\n 'COPYRIGHT 2075 ROBCO(R)\\n',\n 'LOADER V1.1\\n',\n 'EXEC VERSION 41.10\\n',\n '64k RAM SYSTEM\\n',\n '38911 BYTES FREE\\n',\n 'NO HOLOTAPE FOUND\\n',\n 'LOAD ROM(1): DEITRIX 303\\n']\n\n for line in lines:\n print_one_by_one_with_sound(line, delay=0.1)\n\n for i in range(30):\n print('\\n')\n time.sleep(0.05)\n\ndef display_vault_boy():\n img = 'images/vault_boy.jpeg'\n print_ascii(img)\n\ndef print_one_by_one_with_sound(text, delay=0.25):\n for char in text:\n if char != ' ' and char != '\\n':\n thread = Thread(target=play_random_sound)\n thread.start()\n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(delay)\n\ndef print_one_by_one_some_sound(text, delay=0.001):\n for char in text:\n roll_for_sound = random.randrange(1,51)\n if roll_for_sound == 7:\n thread = Thread(target=play_random_sound)\n thread.start()\n sys.stdout.write(char)\n sys.stdout.flush()\n time.sleep(delay)\n\ndef play_random_sound():\n sounds = ['sounds/short_type.wav', 'sounds/short_type2.wav',\n 'sounds/short_type3.wav', 'sounds/short_type4.wav',\n '', '', '', '', '', '', '']\n sound_to_play = random.choice(sounds)\n if sound_to_play == '':\n return\n playsound(sound_to_play)\n\ndef play_success_sound():\n playsound('sounds/ui_hacking_passgood.wav')\n\ndef make_starting_junk():\n keyphrases = [' start memory discovery', ' CPUO starting cell relocation',\n ' CPUO launch EFIO', ' CPUO starting EFIO']\n middle_pieces = [' 1', ' 0', ' 0x0000A4', ' 0x00000000000000000',\n ' 0x000014', ' 0x000009', ' 0x000000000000E003D']\n # get 3-7 of middle pieces to put in between keyphrases\n # begin large string with *\n huge_string = '*'\n for i in range(70):\n num_middle_pieces = random.randrange(3,8)\n middle_piece = ''\n for i in range(num_middle_pieces):\n middle_piece += random.choice(middle_pieces)\n huge_string += middle_piece\n huge_string += random.choice(keyphrases)\n\n return huge_string\n\nif __name__ == '__main__':\n main()\n", "id": "8641036", "language": "Python", "matching_score": 1.310582160949707, "max_stars_count": 0, "path": "run.py" }, { "content": "import npyscreen\nimport random\nimport os\n\nclass VaultApp(npyscreen.NPSAppManaged):\n def onStart(self):\n self.addForm('MAIN', myStartPipScreen, name='New Pip-Boy 3000')\n self.addForm('PIP', pipMainScreen, name='Pip-Boy 3000')\n self.addForm('INV', inventoryPage, name='INVENTORY')\n\n def change_form(self, name):\n self.switchForm(name)\n\nclass myStartPipScreen(npyscreen.ActionForm):\n def afterEditing(self):\n self.parentApp.setNextForm('PIP')\n\n def create(self):\n self.myName = self.add(npyscreen.TitleText,\n name=\"Your name here:\",\n begin_entry_at=16,\n use_two_lines=False)\n self.myVault = self.add(npyscreen.TitleSelectOne,\n max_height=5,\n name='Vault',\n values = ['101', '111', '108', '77', '81'],\n scroll_exit = True)\n def on_ok(self):\n self.userId = random.randrange(1,100000)\n self.userGreeting = \"User \" + str(self.userId)\n npyscreen.notify_wait(\"Welcome to the future, {0}.\".format(self.userGreeting), title=\"WELCOME\")\n npyscreen.notify_confirm(\"Vault-Tec and ROBCO Industries are not responsible for bodily harm resulting from use of the Pip-Boy 3000. Please consult your handbook for all guidelines and warnings.\", editw=1, title=\"WARNING\")\n exiting = npyscreen.notify_yes_no(\"Do you agree to the terms and conditions set forth by the handbook at this time?\", title=\"TERMS & CONDITIONS\", editw=1)\n if (exiting):\n npyscreen.notify_wait(\"Thank you. Proceeding...\")\n else:\n npyscreen.notify_wait(\"The Pip-Boy 3000 will now power off.\")\n\n def on_cancel(self):\n npyscreen.notify_wait(\"The Pip-Boy 3000 will now power off.\")\n\nclass pipMainScreen(npyscreen.ActionFormV2WithMenus):\n\n def create(self):\n # navigationOptions = ['STATUS', 'INVENTORY', 'DATA', 'MAP', 'RADIO']\n # self.option = self.add(npyscreen.TitleSelectOne,\n # name='Options',\n # values = navigationOptions,\n # scroll_exit = True)\n self.invButton = self.add(npyscreen.Button, name='INVENTORY', value_changed_callback=self.invButtonPress)\n\n def invButtonPress(self, widget):\n npyscreen.notify_wait(\"ACCESSING INVENTORY...\")\n self.parentApp.change_form('INV')\n\n def on_ok(self):\n selection = self.option.get_selected_objects()[0]\n if (selection == 'INVENTORY'):\n npyscreen.notify_wait(\"ACCESSING INVENTORY...\")\n self.parentApp.change_form('INV')\n\n else:\n npyscreen.notify_wait(\"I don't know how it got to this.\")\n\n\nclass inventoryPage(npyscreen.FormWithMenus):\n def afterEditing(self):\n self.parentApp.setNextForm('INV')\n\n def create(self):\n self.itemOne = baseballBat()\n self.itemTwo = knife()\n self.items = [self.itemOne, self.itemTwo]\n self.inventoryMenu = self.new_menu(name=\"Inventory\")\n self.createNewMenusForItems()\n\n def selectedMessage(self, nameOfItem):\n npyscreen.notify_confirm(\"You have selected {0}.\".format(nameOfItem), \"{0}\".format(nameOfItem), editw=1)\n\n def exit_form(self):\n self.parentApp.switchForm(None)\n\n def createNewMenusForItems(self):\n item_count = 1\n for item in self.items:\n if (item.count > 0):\n self.item = self.inventoryMenu.addNewSubmenu(item.name, \"{0}\".format(item_count), self.selectedMessage, [item.name])\n # TODO add refresh page to not show these if item.count > 0\n self.item.addItem(\"Equip\", onSelect=item.equip, shortcut=\"E\")\n self.item.addItem(\"Repair\", onSelect=item.repair, shortcut=\"R\")\n self.item.addItem(\"Drop\", onSelect=item.drop, shortcut=\"D\")\n item_count += 1\n\n\nclass weapon:\n def drop(self):\n if (self.count > 0):\n self.count -= 1\n npyscreen.notify_wait(\"You have dropped one of {0}.\".format(self.name))\n else:\n npyscreen.notify_wait(\"Failed to drop {0}. Item count too low ({1})\".format(self.name, self.count))\n\n def equip(self):\n if (self.equipped == False):\n self.equipped = True\n npyscreen.notify_wait(\"You have equipped {0}. Condition is {1}.\".format(self.name, self.condition))\n else:\n self.equipped = False\n npyscreen.notify_wait(\"You have unequipped {0}.\".format(self.name))\n\n def repair(self):\n if (self.condition < 100):\n self.condition += 25\n npyscreen.notify_wait(\"You have repaired {0}. Condition is now {1}.\".format(self.name, self.condition))\n else:\n npyscreen.notify_wait(\"{0} already at max condition.\".format(self.name))\n\n def __init__(self):\n self.name = \"\"\n self.count = 0\n self.condition = 0\n self.equipped = False\n\n\nclass baseballBat(weapon):\n def __init__(self):\n self.name = \"Baseball Bat\"\n self.count = 1\n self.condition = 50\n self.equipped = False\n\nclass knife(weapon):\n def __init__(self):\n self.name = \"Knife\"\n self.count = 1\n self.condition = 25\n self.equipped = False\n\n#def pipFunc(*args):\n# F = myStartPipScreen(name = \"New Pip-Boy\")\n# F.edit()\n# return \"Created record for \" + F.myName.value\n\nif __name__ == '__main__':\n TestApp = VaultApp().run()\n", "id": "2473018", "language": "Python", "matching_score": 1.8310774564743042, "max_stars_count": 0, "path": "npyscreen_app.py" }, { "content": "from gladiator import Gladiator\nimport random\nimport time\nnames = ['Maximus', 'Brutus', 'Romulus', 'Remus', 'Aurelius', 'Vulpes']\nweapons = ['Spear', 'Sword', 'Knife', 'Mace']\n\n\ndef main():\n print(\"\"\"\n Welcome to the colloseum!\n Here you will command your champion to fight to the\n death against a gladiator of an opposing nation.\n You may choose your champion's name, weapon, and\n decide what he is to do during the fight.\n Begging for mercy and fleeing are signs of cowardice.\n Begin!\n \"\"\")\n begin = input(\"What is your champion's name? \").strip().lower().capitalize()\n weapon = input(\"\"\"\n Choose your champion's weapon:\n Spear (7-11 damage)\n Sword (4-13 damage)\n Knife (4-7 damage)\n Mace (9 damage)\n Fists (The Weapons of Real Men) (2-4 damage)\n Enter your choice:\n \"\"\").strip().lower().capitalize()\n if weapon == \"Spear\":\n print(\"Pointy and stabby. Nice!\")\n low = 7\n high = 11\n elif weapon == \"Sword\":\n print(\"Boring and original. Fantastic.\")\n low = 4\n high = 13\n elif weapon == \"Knife\":\n print(\"... I mean, if that's what you want.\")\n low = 4\n high = 7\n elif weapon == \"Mace\":\n print(\"Who needs a sharp edge, huh?\")\n low = 9\n high = 10\n elif weapon == \"Fists\":\n print(\"You are a brave soul.\")\n low = 2\n high = 4\n else:\n print(\"Not an option!\")\n begin = Gladiator(begin, 100, 0, low, high)\n name = names[random.randint(0, 5)]\n opp_weapon = weapons[random.randint(0, 3)]\n print(\"Your champion's opponent will be {0}. He will wield a {1}.\".format(\n name, opp_weapon))\n if opp_weapon == \"Spear\":\n opp_low = 7\n opp_high = 11\n elif opp_weapon == \"Sword\":\n opp_low = 4\n opp_high = 13\n elif opp_weapon == \"Knife\":\n opp_low = 4\n opp_high = 7\n elif opp_weapon == \"Mace\":\n opp_low = 9\n opp_high = 10\n name = Gladiator(name, 100, 0, opp_low, opp_high)\n\n while True:\n dead_check = begin.isDead(name)\n if dead_check == 1:\n print(\"You have have perished in the fighting pits at the hands of {0}!\".format(\n name.name))\n break\n elif dead_check == 2:\n print(\"You have slain {0} and arise a champion!\".format(name.name))\n break\n time.sleep(1)\n choice = input(\"\"\"\n What would you like your champion to do?\n - Attack\n - Heal\n - Beg\n - Flee\n \"\"\").strip().lower().capitalize()\n if choice == \"Attack\":\n print(begin.attack(name))\n elif choice == \"Heal\":\n print(begin.heal())\n elif choice == \"Beg\":\n print(begin.beg())\n break\n elif choice == \"Flee\":\n print(\"You are stabbed in the back and die as you flee.\")\n break\n print(name.attack(begin))\nif __name__ == '__main__':\n main()\n", "id": "8392484", "language": "Python", "matching_score": 2.471465587615967, "max_stars_count": 0, "path": "glad_game.py" }, { "content": "import random\n\n\nclass Gladiator:\n \"\"\" A gladiator with attributes (health, rage, damage_low, and damage_high)\n that can heal and tells when he is dead. \"\"\"\n\n def __init__(self, name, health, rage, damage_low, damage_high):\n \"\"\" (str, int, int, int, int) -> NoneType\n\n Creates a gladiator with health, rage, damage_low, and damage_high.\n \"\"\"\n self.name = name\n self.health = health\n self.rage = rage\n self.damage_low = damage_low\n self.damage_high = damage_high\n\n def attack(self, target):\n \"\"\" (class, class) -> str\n\n Attacks a target.\n An attack hits, crits, or misses.\n Hit does damage between damage_low and damage_high.\n A miss does 0 damage.\n A crit does double the damage of a hit.\n A deadly crit does triple damage of the hit plus the amount of rage\n the gladiator has.\n Crits occur '<rage>%' of the time.\n If there is a crit, rage is set to 0. Otherwise, it is increased by 15.\n \"\"\"\n damage_power = random.randint(self.damage_low, self.damage_high)\n crit_yes_no = random.randint(1, 101)\n miss_yes_no = random.randint(1, 21)\n if miss_yes_no in [1, 5, 10, 20]:\n self.rage = self.rage - 15\n if self.rage < 0:\n self.rage = 0\n return \"{0} missed and was discouraged.\".format(\n self.name)\n if crit_yes_no in range(1, self.rage):\n if crit_yes_no in range(self.rage // 2):\n damage_power = damage_power * 3\n target.health = target.health - damage_power\n return \"{0} got an extreme critical hit on {1} for {2} damage and {1} has {3} health and {4} rage remaining!\".format(\n self.name, target.name, damage_power, target.health, target.rage)\n self.rage -= self.rage\n damage_power = damage_power * 2\n target.health = target.health - damage_power\n return \"{0} got a critical hit on {1} for {2} damage and {1} has {3} health and {4} rage remaining!\".format(\n self.name, target.name, damage_power, target.health, target.rage)\n self.rage += 15\n if self.rage < 0:\n self.rage = 0\n elif self.rage > 100:\n self.rage = 100\n target.health = target.health - damage_power\n return \"{0} was hit for {1} damage and has {2} health and {3} rage remaining.\".format(\n target.name, damage_power, target.health, target.rage)\n\n def heal(self):\n \"\"\" (class) -> str\n\n Increases the gladiators health by 10 and decreases his rage by 10.\n \"\"\"\n self.health += 10\n self.rage -= 10\n if self.rage < 0:\n self.rage = 0\n return \"{0} was healed for 10 points and now has {1} health remaining.\".format(self.name, self.health)\n\n def beg(self):\n \"\"\" (class) -> str\n\n Return the result of begging for your champion's life.\n \"\"\"\n dice_roll = random.randint(0, 101)\n if dice_roll in range(0, 51):\n return \"Your opponent feels no shame and kills you anyway.\"\n elif dice_roll in range(51, 101):\n return \"Your opponent is feeling generous and spares you.\"\n\n def isDead(self, target):\n \"\"\" (class, class) -> int\n\n Returns whether or not each gladiator is alive.\n \"\"\"\n if self.health <= 0:\n return 1\n elif target.health <= 0:\n return 2\n", "id": "9654176", "language": "Python", "matching_score": 1.7073272466659546, "max_stars_count": 0, "path": "gladiator.py" } ]
1.707327
SmileyChris
[ { "content": "import json\n\nfrom channels.generic.websocket import AsyncJsonWebsocketConsumer\n\nfrom ..constants import TRANSPORT_WS_PROTOCOL, WS_PROTOCOL\nfrom .subscriptions import subscription_server\n\n\nclass GraphQLSubscriptionConsumer(AsyncJsonWebsocketConsumer):\n async def connect(self):\n self.connection_context = None\n found_protocol = None\n for protocol in [WS_PROTOCOL, TRANSPORT_WS_PROTOCOL]:\n if protocol in self.scope[\"subprotocols\"]:\n found_protocol = protocol\n break\n if not found_protocol:\n await self.close()\n return\n self.connection_context = await subscription_server.handle(\n ws=self, request_context=self.scope\n )\n await self.accept(subprotocol=found_protocol)\n\n async def disconnect(self, code):\n if self.connection_context:\n self.connection_context.socket_closed = True\n await subscription_server.on_close(self.connection_context)\n\n async def receive_json(self, content):\n subscription_server.on_message(self.connection_context, content)\n\n @classmethod\n async def encode_json(cls, content):\n return json.dumps(content)\n", "id": "5687788", "language": "Python", "matching_score": 2.508737087249756, "max_stars_count": 7, "path": "graphql_ws/django/consumers.py" }, { "content": "from graphene_django.settings import graphene_settings\nfrom graphql import MiddlewareManager\n\nfrom ..base_async import (BaseAsyncConnectionContext,\n BaseAsyncSubscriptionServer)\nfrom ..observable_aiter import setup_observable_extension\n\nsetup_observable_extension()\n\n\nclass ChannelsConnectionContext(BaseAsyncConnectionContext):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.socket_closed = False\n\n async def send(self, data):\n if self.closed:\n return\n await self.ws.send_json(data)\n\n @property\n def closed(self):\n return self.socket_closed\n\n async def close(self, code):\n await self.ws.close(code=code)\n\n async def receive(self, code):\n \"\"\"\n Unused, as the django consumer handles receiving messages and passes\n them straight to ChannelsSubscriptionServer.on_message.\n \"\"\"\n\n\nclass ChannelsSubscriptionServer(BaseAsyncSubscriptionServer):\n async def handle(self, ws, request_context=None):\n connection_context = ChannelsConnectionContext(ws, request_context)\n await self.on_open(connection_context)\n return connection_context\n\n def get_graphql_params(self, connection_context, payload):\n params = super().get_graphql_params(connection_context, payload)\n middleware = graphene_settings.MIDDLEWARE\n if middleware:\n if not isinstance(middleware, MiddlewareManager):\n middleware = MiddlewareManager(\n *middleware, wrap_in_promise=False\n )\n params[\"middleware\"] = middleware\n return params\n\n\nsubscription_server = ChannelsSubscriptionServer(schema=graphene_settings.SCHEMA)\n", "id": "8985454", "language": "Python", "matching_score": 2.670684337615967, "max_stars_count": 7, "path": "graphql_ws/django/subscriptions.py" }, { "content": "from unittest import mock\n\nimport json\nimport promise\n\nimport pytest\n\nfrom graphql_ws import base, base_async\n\npytestmark = pytest.mark.asyncio\n\n\ntry:\n from unittest.mock import AsyncMock # Python 3.8+\nexcept ImportError:\n from mock import AsyncMock\n\n\nclass TstServer(base_async.BaseAsyncSubscriptionServer):\n def handle(self, *args, **kwargs):\n pass # pragma: no cover\n\n\n@pytest.fixture\ndef server():\n return TstServer(schema=None)\n\n\nasync def test_terminate(server: TstServer):\n context = AsyncMock(spec=base_async.BaseAsyncConnectionContext)\n await server.on_connection_terminate(connection_context=context, op_id=1)\n context.close.assert_called_with(1011)\n\n\nasync def test_send_error(server: TstServer):\n context = AsyncMock(spec=base_async.BaseAsyncConnectionContext)\n await server.send_error(connection_context=context, op_id=1, error=\"test error\")\n context.send.assert_called_with(\n {\"id\": 1, \"type\": \"error\", \"payload\": {\"message\": \"test error\"}}\n )\n\n\nasync def test_message(server: TstServer):\n server.process_message = AsyncMock()\n context = AsyncMock(spec=base_async.BaseAsyncConnectionContext)\n msg = {\"id\": 1, \"type\": base.GQL_CONNECTION_INIT, \"payload\": \"\"}\n await server.on_message(context, msg)\n server.process_message.assert_called_with(context, msg)\n\n\nasync def test_message_str(server: TstServer):\n server.process_message = AsyncMock()\n context = AsyncMock(spec=base_async.BaseAsyncConnectionContext)\n msg = {\"id\": 1, \"type\": base.GQL_CONNECTION_INIT, \"payload\": \"\"}\n await server.on_message(context, json.dumps(msg))\n server.process_message.assert_called_with(context, msg)\n\n\nasync def test_message_invalid(server: TstServer):\n server.send_error = AsyncMock()\n context = AsyncMock(spec=base_async.BaseAsyncConnectionContext)\n await server.on_message(context, message=\"'not-json\")\n assert server.send_error.called\n\n\nasync def test_resolver(server: TstServer):\n server.send_message = AsyncMock()\n context = AsyncMock(spec=base_async.BaseAsyncConnectionContext)\n result = mock.Mock()\n result.data = {\"test\": [1, 2]}\n result.errors = None\n await server.send_execution_result(\n context, op_id=1, execution_result=result\n )\n assert server.send_message.called\n\n\n@pytest.mark.asyncio\nasync def test_resolver_with_promise(server: TstServer):\n server.send_message = AsyncMock()\n context = AsyncMock(spec=base_async.BaseAsyncConnectionContext)\n result = mock.Mock()\n result.data = {\"test\": [1, promise.Promise(lambda resolve, reject: resolve(2))]}\n result.errors = None\n await server.send_execution_result(\n context, op_id=1, execution_result=result\n )\n assert server.send_message.called\n assert result.data == {\"test\": [1, 2]}\n\n\nasync def test_resolver_with_nested_promise(server: TstServer):\n server.send_message = AsyncMock()\n context = AsyncMock(spec=base_async.BaseAsyncConnectionContext)\n result = mock.Mock()\n inner = promise.Promise(lambda resolve, reject: resolve(2))\n outer = promise.Promise(lambda resolve, reject: resolve({\"in\": inner}))\n result.data = {\"test\": [1, outer]}\n result.errors = None\n await server.send_execution_result(\n context, op_id=1, execution_result=result\n )\n assert server.send_message.called\n assert result.data == {\"test\": [1, {\"in\": 2}]}\n", "id": "4393564", "language": "Python", "matching_score": 3.1153948307037354, "max_stars_count": 7, "path": "tests/test_base_async.py" }, { "content": "from collections import OrderedDict\n\ntry:\n from unittest import mock\nexcept ImportError:\n import mock\n\nimport pytest\nfrom graphql.execution.executors.sync import SyncExecutor\n\nfrom graphql_ws import base, base_sync, constants\n\n\n@pytest.fixture\ndef cc():\n cc = base.BaseConnectionContext(ws=None)\n cc.operations = {\"yes\": \"1\"}\n return cc\n\n\n@pytest.fixture\ndef ss():\n return base_sync.BaseSyncSubscriptionServer(schema=None)\n\n\nclass TestConnectionContextOperation:\n def test_no_operations_initially(self):\n cc = base.BaseConnectionContext(ws=None)\n assert not cc.operations\n\n def test_has_operation(self, cc):\n assert cc.has_operation(\"yes\")\n\n def test_has_operation_missing(self, cc):\n assert not cc.has_operation(\"no\")\n\n def test_register_operation(self, cc):\n cc.register_operation(\"new\", \"2\")\n assert \"new\" in cc.operations\n\n def test_get_operation(self, cc):\n assert cc.get_operation(\"yes\") == \"1\"\n\n def test_remove_operation(self, cc):\n cc.remove_operation(\"yes\")\n assert not cc.operations\n\n\nclass TestConnectionContextNotImplentedMethods:\n def test_receive(self):\n with pytest.raises(NotImplementedError):\n base.BaseConnectionContext(ws=None).receive()\n\n def test_send(self):\n with pytest.raises(NotImplementedError):\n base.BaseConnectionContext(ws=None).send(\"TEST\")\n\n def test_closed(self):\n with pytest.raises(NotImplementedError):\n base.BaseConnectionContext(ws=None).closed\n\n def test_close(self):\n with pytest.raises(NotImplementedError):\n base.BaseConnectionContext(ws=None).close(123)\n\n\nclass TestProcessMessage:\n def test_init(self, ss, cc):\n ss.on_connection_init = mock.Mock()\n ss.process_message(\n cc, {\"id\": \"1\", \"type\": constants.GQL_CONNECTION_INIT, \"payload\": \"payload\"}\n )\n ss.on_connection_init.assert_called_with(cc, \"1\", \"payload\")\n\n def test_terminate(self, ss, cc):\n ss.on_connection_terminate = mock.Mock()\n ss.process_message(cc, {\"id\": \"1\", \"type\": constants.GQL_CONNECTION_TERMINATE})\n ss.on_connection_terminate.assert_called_with(cc, \"1\")\n\n @pytest.mark.parametrize(\n \"transport_ws_protocol,expected_type\",\n ((False, constants.GQL_START), (True, constants.GQL_SUBSCRIBE)),\n )\n def test_start(self, ss, cc, transport_ws_protocol, expected_type):\n ss.get_graphql_params = mock.Mock()\n ss.get_graphql_params.return_value = {\"params\": True}\n cc.has_operation = mock.Mock()\n cc.has_operation.return_value = False\n cc.transport_ws_protocol = transport_ws_protocol\n ss.unsubscribe = mock.Mock()\n ss.on_start = mock.Mock()\n ss.process_message(\n cc, {\"id\": \"1\", \"type\": expected_type, \"payload\": {\"a\": \"b\"}}\n )\n assert not ss.unsubscribe.called\n ss.on_start.assert_called_with(cc, \"1\", {\"params\": True})\n\n def test_start_existing_op(self, ss, cc):\n ss.get_graphql_params = mock.Mock()\n ss.get_graphql_params.return_value = {\"params\": True}\n cc.has_operation = mock.Mock()\n cc.has_operation.return_value = True\n cc.unsubscribe = mock.Mock()\n ss.execute = mock.Mock()\n ss.send_message = mock.Mock()\n ss.process_message(\n cc, {\"id\": \"1\", \"type\": constants.GQL_START, \"payload\": {\"a\": \"b\"}}\n )\n assert cc.unsubscribe.called\n\n def test_start_bad_graphql_params(self, ss, cc):\n ss.get_graphql_params = mock.Mock()\n ss.get_graphql_params.return_value = None\n cc.has_operation = mock.Mock()\n cc.has_operation.return_value = False\n ss.send_error = mock.Mock()\n ss.unsubscribe = mock.Mock()\n ss.on_start = mock.Mock()\n ss.process_message(cc, {\"id\": \"1\", \"type\": None, \"payload\": {\"a\": \"b\"}})\n assert ss.send_error.called\n assert ss.send_error.call_args[0][:2] == (cc, \"1\")\n assert isinstance(ss.send_error.call_args[0][2], Exception)\n assert not ss.on_start.called\n\n @pytest.mark.parametrize(\n \"transport_ws_protocol,stop_type,invalid_stop_type\",\n (\n (False, constants.GQL_STOP, constants.GQL_COMPLETE),\n (True, constants.GQL_COMPLETE, constants.GQL_STOP),\n ),\n )\n def test_stop(\n self,\n ss,\n cc,\n transport_ws_protocol,\n stop_type,\n invalid_stop_type,\n ):\n ss.on_stop = mock.Mock()\n ss.send_error = mock.Mock()\n cc.transport_ws_protocol = transport_ws_protocol\n\n ss.process_message(cc, {\"id\": \"1\", \"type\": invalid_stop_type})\n assert ss.send_error.called\n assert ss.send_error.call_args[0][:2] == (cc, \"1\")\n assert isinstance(ss.send_error.call_args[0][2], Exception)\n assert not ss.on_stop.called\n\n ss.process_message(cc, {\"id\": \"1\", \"type\": stop_type})\n ss.on_stop.assert_called_with(cc, \"1\")\n\n def test_invalid(self, ss, cc):\n ss.send_error = mock.Mock()\n ss.process_message(cc, {\"id\": \"1\", \"type\": \"unknown\"})\n assert ss.send_error.called\n assert ss.send_error.call_args[0][:2] == (cc, \"1\")\n assert isinstance(ss.send_error.call_args[0][2], Exception)\n\n\ndef test_get_graphql_params(ss, cc):\n payload = {\n \"query\": \"req\",\n \"variables\": \"vars\",\n \"operationName\": \"query\",\n \"context\": {},\n }\n params = ss.get_graphql_params(cc, payload)\n assert isinstance(params.pop(\"executor\"), SyncExecutor)\n assert params == {\n \"request_string\": \"req\",\n \"variable_values\": \"vars\",\n \"operation_name\": \"query\",\n \"context_value\": {},\n }\n\n\ndef test_build_message(ss):\n assert ss.build_message(\"1\", \"query\", \"PAYLOAD\") == {\n \"id\": \"1\",\n \"type\": \"query\",\n \"payload\": \"PAYLOAD\",\n }\n\n\ndef test_build_message_partial(ss):\n assert ss.build_message(id=\"1\", op_type=None, payload=None) == {\"id\": \"1\"}\n assert ss.build_message(id=None, op_type=\"query\", payload=None) == {\"type\": \"query\"}\n assert ss.build_message(id=None, op_type=None, payload=\"PAYLOAD\") == {\n \"payload\": \"PAYLOAD\"\n }\n with pytest.raises(AssertionError):\n ss.build_message(id=None, op_type=None, payload=None)\n\n\n@pytest.mark.parametrize(\n \"transport_ws_protocol,expected_type\",\n ((False, constants.GQL_DATA), (True, constants.GQL_NEXT)),\n)\ndef test_send_execution_result(ss, cc, transport_ws_protocol, expected_type):\n cc.transport_ws_protocol = transport_ws_protocol\n ss.execution_result_to_dict = mock.Mock()\n ss.execution_result_to_dict.return_value = {\"res\": \"ult\"}\n ss.send_message = mock.Mock()\n ss.send_message.return_value = \"returned\"\n assert \"returned\" == ss.send_execution_result(cc, \"1\", \"result\")\n ss.send_message.assert_called_with(cc, \"1\", expected_type, {\"res\": \"ult\"})\n\n\ndef test_execution_result_to_dict(ss):\n result = mock.Mock()\n result.data = \"DATA\"\n result.errors = \"ER\"\n result_dict = ss.execution_result_to_dict(result)\n assert isinstance(result_dict, OrderedDict)\n assert result_dict == {\n \"data\": \"DATA\",\n \"errors\": [{\"message\": \"E\"}, {\"message\": \"R\"}],\n }\n\n\ndef test_send_message(ss, cc):\n ss.build_message = mock.Mock()\n ss.build_message.return_value = {\"mess\": \"age\"}\n cc.send = mock.Mock()\n cc.send.return_value = \"returned\"\n assert \"returned\" == ss.send_message(cc)\n cc.send.assert_called_with({\"mess\": \"age\"})\n\n\nclass TestSSNotImplemented:\n def test_handle(self, ss):\n with pytest.raises(NotImplementedError):\n ss.handle(ws=None, request_context=None)\n", "id": "112130", "language": "Python", "matching_score": 4.369595050811768, "max_stars_count": 7, "path": "tests/test_graphql_ws.py" }, { "content": "GRAPHQL_WS = \"graphql-ws\"\nWS_PROTOCOL = GRAPHQL_WS\nTRANSPORT_WS_PROTOCOL = \"graphql-transport-ws\"\n\nGQL_CONNECTION_INIT = \"connection_init\" # Client -> Server\nGQL_CONNECTION_ACK = \"connection_ack\" # Server -> Client\nGQL_CONNECTION_ERROR = \"connection_error\" # Server -> Client\n\n# NOTE: This one here don't follow the standard due to connection optimization\nGQL_CONNECTION_TERMINATE = \"connection_terminate\" # Client -> Server\nGQL_CONNECTION_KEEP_ALIVE = \"ka\" # Server -> Client\nGQL_START = \"start\" # Client -> Server (graphql-ws)\nGQL_SUBSCRIBE = \"subscribe\" # Client -> Server (graphql-transport-ws START equivalent)\nGQL_DATA = \"data\" # Server -> Client (graphql-ws)\nGQL_NEXT = \"next\" # Server -> Client (graphql-transport-ws DATA equivalent)\nGQL_ERROR = \"error\" # Server -> Client\nGQL_COMPLETE = \"complete\" # Server -> Client\n# (and Client -> Server for graphql-transport-ws STOP equivalent)\nGQL_STOP = \"stop\" # Client -> Server (graphql-ws only)\n", "id": "1746842", "language": "Python", "matching_score": 0.15567399561405182, "max_stars_count": 7, "path": "graphql_ws/constants.py" }, { "content": "from django.core.cache import cache, caches\n\nfrom .conf import MAIL_TOOLBAR_CACHE_KEY, MAIL_TOOLBAR_TTL\n\n# Use local memory cache if default cache is a DummyCache\nif caches.settings.get('default', {}).get('BACKEND', '').endswith('.DummyCache'):\n caches.settings['mail_panel'] = {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'mail-panel',\n }\n cache = caches.create_connection('mail_panel')\n\n\ndef load_outbox():\n \"\"\"\n Returns a dictionary of cached mail.\n \"\"\"\n return cache.get(MAIL_TOOLBAR_CACHE_KEY, {})\n\ndef save_outbox(outbox):\n \"\"\"\n Saves the dictionary of cached mail and sets expiry.\n \"\"\"\n cache.set(MAIL_TOOLBAR_CACHE_KEY, outbox, MAIL_TOOLBAR_TTL)\n \ndef clear_outbox():\n \"\"\"\n Utility function to clear the dictionary of cached mail. Typical use case: Starting a new real-human test session.\n \"\"\"\n cache.set(MAIL_TOOLBAR_CACHE_KEY, {})\n", "id": "6329345", "language": "Python", "matching_score": 0.7589395046234131, "max_stars_count": 0, "path": "mail_panel/utils.py" }, { "content": "import re\nfrom unittest import TestCase\n\nfrom embed_video.backends import VideoBackend, detect_backend\n\n\nclass CustomBackend(VideoBackend):\n re_detect = re.compile(r'http://myvideo\\.com/[0-9]+')\n re_code = re.compile(r'http://myvideo\\.com/(?P<code>[0-9]+)')\n\n pattern_url = '{protocol}://play.myvideo.com/c/{code}/'\n pattern_thumbnail_url = '{protocol}://thumb.myvideo.com/c/{code}/'\n\n\nclass CustomBackendTestCase(TestCase):\n def setUp(self):\n self.backend = detect_backend('http://myvideo.com/1530')\n\n def test_detect_backend(self):\n self.assertIsInstance(self.backend, CustomBackend)\n\n def test_code(self):\n self.assertEqual(self.backend.code, '1530')\n\n def test_url(self):\n self.assertEqual(self.backend.get_url(),\n 'http://play.myvideo.com/c/1530/')\n\n def test_url_https(self):\n self.backend.is_secure = True\n self.assertEqual(self.backend.get_url(),\n 'https://play.myvideo.com/c/1530/')\n\n def test_thumbnail(self):\n self.assertEqual(self.backend.get_thumbnail_url(),\n 'http://thumb.myvideo.com/c/1530/')\n", "id": "3059315", "language": "Python", "matching_score": 2.0664918422698975, "max_stars_count": 1, "path": "embed_video/tests/backends/tests_custom_backend.py" }, { "content": "from unittest import TestCase\nfrom unittest.mock import patch\n\nfrom django.forms import ValidationError\n\nfrom ..fields import EmbedVideoField, EmbedVideoFormField\nfrom ..backends import UnknownBackendException, UnknownIdException, \\\n YoutubeBackend\n\n\nclass EmbedVideoFieldTestCase(TestCase):\n def setUp(self):\n self.field = EmbedVideoField()\n\n def test_formfield_form_class(self):\n self.assertIsInstance(self.field.formfield(),\n EmbedVideoFormField)\n\n\nclass EmbedVideoFormFieldTestCase(TestCase):\n def setUp(self):\n self.formfield = EmbedVideoFormField()\n\n def test_validation_unknown_backend(self):\n with patch('embed_video.fields.detect_backend') as mock_detect_backend:\n mock_detect_backend.return_value = True\n mock_detect_backend.side_effect = UnknownBackendException\n self.assertRaises(ValidationError, self.formfield.validate,\n ('http://youtube.com/v/123/',))\n\n def test_validation_unknown_id(self):\n with patch('embed_video.fields.detect_backend') as mock_detect_backend:\n mock_detect_backend.return_value = True\n mock_detect_backend.side_effect = UnknownIdException\n self.assertRaises(ValidationError, self.formfield.validate,\n ('http://youtube.com/v/123/',))\n\n def test_validation_correct(self):\n url = 'http://www.youtube.com/watch?v=gauN0gzxTcU'\n with patch('embed_video.fields.detect_backend') as mock_detect_backend:\n mock_detect_backend.return_value = YoutubeBackend(url)\n self.assertEqual(url, self.formfield.validate(url))\n\n def test_validation_unknown_code(self):\n url = 'http://www.youtube.com/edit?abcd=abcd'\n self.assertRaises(ValidationError, self.formfield.validate, url)\n\n def test_validation_super(self):\n self.assertRaises(ValidationError, self.formfield.validate, '')\n\n def test_validation_allowed_empty(self):\n formfield = EmbedVideoFormField(required=False)\n self.assertIsNone(formfield.validate(''))\n", "id": "10486998", "language": "Python", "matching_score": 3.3627398014068604, "max_stars_count": 0, "path": "embed_video/tests/tests_fields.py" }, { "content": "from unittest import TestCase\n\nimport embed_video\n\n\nclass EmbedVideoTestCase(TestCase):\n def test_release(self):\n embed_video.VERSION = ('a', 'b', 'c', 'd')\n self.assertEqual('a.b.c-d', embed_video.get_release())\n\n def test_version(self):\n embed_video.VERSION = ('a', 'b', 'c', 'd')\n self.assertEqual('a.b.c', embed_video.get_version())\n", "id": "5308870", "language": "Python", "matching_score": 1.2848296165466309, "max_stars_count": 1, "path": "embed_video/tests/tests_init.py" }, { "content": "from django import template\nfrom django.template.loader import render_to_string\nfrom django.test import TestCase\nfrom django.utils.html import escape\nfrom django_navtag.templatetags.navtag import NavNode\n\nBASIC_TEMPLATE = \"\"\"\n{% load navtag %}\n{% nav \"banana\" %}\n{% if nav.apple %}Apple{% endif %}\n{% if nav.banana %}Banana{% endif %}\n\"\"\"\n\nFOR_TEMPLATE = \"\"\"\n{% load navtag %}\n{% nav \"banana\" for othernav %}\n{% if othernav.apple %}Apple{% endif %}\n{% if othernav.banana %}Banana{% endif %}\n\"\"\"\n\n\nclass NavTagTest(TestCase):\n def test_basic(self):\n t = template.Template(BASIC_TEMPLATE)\n content = t.render(template.Context()).strip()\n self.assertNotIn(\"Apple\", content)\n self.assertIn(\"Banana\", content)\n\n def test_for(self):\n t = template.Template(FOR_TEMPLATE)\n content = t.render(template.Context()).strip()\n self.assertNotIn(\"Apple\", content)\n self.assertIn(\"Banana\", content)\n\n def test_basic_extends(self):\n content = render_to_string(\"navtag_tests/home.txt\").strip()\n self.assertIn(\"- Home (active)\", content)\n self.assertNotIn(\"- Contact (active)\", content)\n\n content = render_to_string(\"navtag_tests/contact.txt\").strip()\n self.assertNotIn(\"- Home (active)\", content)\n self.assertIn(\"- Contact (active)\", content)\n\n def test_unset(self):\n content = render_to_string(\"navtag_tests/home.txt\").strip()\n self.assertIn(\"- Home (active)\", content)\n self.assertNotIn(\"- Contact (active)\", content)\n\n content = render_to_string(\"navtag_tests/home-unset.txt\").strip()\n self.assertNotIn(\"- Home (active)\", content)\n self.assertNotIn(\"- Contact (active)\", content)\n\n def test_heirarchical(self):\n content = render_to_string(\"navtag_tests/submenu/home.txt\").strip()\n self.assertIn(\"- Home (active)\", content)\n self.assertNotIn(\"- Fruit (active)\", content)\n self.assertNotIn(\" - Apple (active)\", content)\n self.assertNotIn(\" - Banana (active)\", content)\n\n content = render_to_string(\"navtag_tests/submenu/base_fruit.txt\").strip()\n self.assertNotIn(\"- Home (active)\", content)\n self.assertIn(\"- Fruit (active)\", content)\n self.assertNotIn(\" - Apple (active)\", content)\n self.assertNotIn(\" - Banana (active)\", content)\n\n content = render_to_string(\"navtag_tests/submenu/apple.txt\").strip()\n self.assertNotIn(\"- Home (active)\", content)\n self.assertIn(\"- Fruit (active)\", content)\n self.assertIn(\" - Apple (active)\", content)\n self.assertNotIn(\" - Banana (active)\", content)\n\n content = render_to_string(\"navtag_tests/submenu/banana.txt\").strip()\n self.assertNotIn(\"- Home (active)\", content)\n self.assertIn(\"- Fruit (active)\", content)\n self.assertNotIn(\" - Apple (active)\", content)\n self.assertIn(\" - Banana (active)\", content)\n\n def test_top_context(self):\n content = render_to_string(\"navtag_tests/context/home.txt\").strip()\n self.assertIn(\"- Home (active)\", content)\n self.assertIn(\"HOME\", content)\n\n def test_repr(self):\n node = NavNode()\n self.assertEqual(repr(node), \"<Nav node>\")\n\n def test_invalid_args(self):\n self.assertRaises(\n template.TemplateSyntaxError,\n template.Template,\n \"\"\"{% load navtag %}{% nav 'test' unexpected %}\"\"\",\n )\n\n def test_backwards_compatible_empty_tag(self):\n content = template.Template(\"{% load navtag %}{% nav %}\").render(\n template.Context()\n )\n self.assertEqual(content, \"\")\n\n content = template.Template(\"{% load navtag %}{% nav for sidenav %}\").render(\n template.Context()\n )\n self.assertEqual(content, \"\")\n\n def test_yell_if_context_variable_changed(self):\n t = template.Template('{% load navtag %}{% nav \"test\" %}{{ nav }}')\n c = template.Context({\"nav\": \"anything\"})\n c.update({\"nav\": \"test\"})\n self.assertRaises(template.TemplateSyntaxError, t.render, c)\n\n def test_nav_text(self):\n content = template.Template('{% load navtag %}{% nav text \"THIS\" %}').render(\n template.Context()\n )\n self.assertEqual(content, \"\")\n\n def test_nav_text_none(self):\n content = render_to_string(\"navtag_tests/text/base.txt\").strip()\n self.assertEqual(content, \"- Home\\n- Contact\")\n\n def test_nav_text_set(self):\n content = render_to_string(\"navtag_tests/text/home.txt\").strip()\n self.assertIn(\"Home [is active]\", content)\n self.assertNotIn(\"Contact [is active]\", content)\n\n content = render_to_string(\"navtag_tests/text/contact.txt\").strip()\n self.assertNotIn(\"Home [is active]\", content)\n self.assertIn(\"Contact [is active]\", content)\n\n def test_nav_default_text(self):\n content = (\n template.Template(\n '{% load navtag %}{% nav \"fruit\" %}{{ nav.fruit }}'\n ).render(template.Context(autoescape=False))\n ).strip()\n self.assertEqual(content, \"True\")\n\n content = (\n template.Template(\n '{% load navtag %}{% nav \"fruit.banana\" %}{{ nav.fruit }}'\n ).render(template.Context(autoescape=False))\n ).strip()\n self.assertEqual(content, \"{'banana': True}\")\n\n def test_escaping(self):\n content = (\n template.Template(\n \"\"\"{% load navtag %}{% nav text ' class=\"active\"' %}\"\"\"\n \"<p{{ nav }}>{{ name }}</p>\"\n ).render(template.Context({\"name\": \"Mc'D\"}))\n ).strip()\n escaped = escape(\"Mc'D\")\n self.assertEqual(content, \"\"\"<p class=\"active\">%s</p>\"\"\" % escaped)\n", "id": "2739912", "language": "Python", "matching_score": 2.432030439376831, "max_stars_count": 17, "path": "django_navtag/tests/test_navtag.py" }, { "content": "import re\nimport urllib.parse as urlparse\nfrom unittest import TestCase\nfrom unittest.mock import Mock, patch\n\nfrom django.template import TemplateSyntaxError\nfrom django.http import HttpRequest\nfrom django.template.base import Template\nfrom django.template.context import RequestContext\nfrom django.test.client import RequestFactory\n\nfrom embed_video.templatetags.embed_video_tags import VideoNode\n\nURL_PATTERN = re.compile(r'src=\"?\\'?([^\"\\'>]*)\"')\n\n\nclass EmbedTestCase(TestCase):\n def render_template(self, template_string, context=None):\n response = RequestContext(HttpRequest(), context)\n return Template(template_string).render(response).strip()\n\n def assertRenderedTemplate(self, template_string, output, context=None):\n rendered_output = self.render_template(template_string, context=context)\n self.assertEqual(rendered_output, output.strip())\n\n def url_dict(self, url):\n \"\"\"\n Parse the URL into a format suitable for comparison, ignoring the query\n parameter order.\n \"\"\"\n\n parsed = urlparse.urlparse(url)\n query = urlparse.parse_qs(parsed.query)\n\n return {\n 'scheme': parsed.scheme,\n 'netloc': parsed.netloc,\n 'path': parsed.path,\n 'params': parsed.params,\n 'query': query,\n 'fragment': parsed.fragment,\n }\n\n def assertUrlEqual(self, actual, expected, msg=None):\n \"\"\"Assert two URLs are equal, ignoring the query parameter order.\"\"\"\n actual_dict = self.url_dict(actual)\n expected_dict = self.url_dict(expected)\n\n self.assertEqual(actual_dict, expected_dict, msg=msg)\n\n def test_embed(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video 'http://www.youtube.com/watch?v=jsrRJyHBvzw' as ytb %}\n {% video ytb 'large' %}\n {% endvideo %}\n \"\"\"\n self.assertRenderedTemplate(\n template,\n '<iframe width=\"960\" height=\"720\" '\n 'src=\"https://www.youtube.com/embed/jsrRJyHBvzw?wmode=opaque\" '\n 'frameborder=\"0\" allowfullscreen></iframe>'\n )\n\n def test_embed_invalid_url(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video 'http://www.youtube.com/edit?abcd=efgh' as ytb %}\n {{ ytb.url }}\n {% endvideo %}\n \"\"\"\n self.assertRenderedTemplate(template, '')\n\n def test_embed_with_none_instance(self):\n template = \"\"\"\n {% with None as my_video %}\n {% load embed_video_tags %}\n {% video my_video %}{% endwith %}\n \"\"\"\n self.assertRenderedTemplate(template, '')\n\n def test_embed_empty_string(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video '' 'large' %}\n \"\"\"\n self.assertRenderedTemplate(template, '')\n\n def test_direct_embed_tag(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video \"http://www.youtube.com/watch?v=jsrRJyHBvzw\" \"large\" %}\n \"\"\"\n self.assertRenderedTemplate(\n template,\n '<iframe width=\"960\" height=\"720\" '\n 'src=\"https://www.youtube.com/embed/jsrRJyHBvzw?wmode=opaque\" '\n 'frameborder=\"0\" allowfullscreen></iframe>'\n )\n\n def test_direct_embed_tag_with_default_size(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video \"http://www.youtube.com/watch?v=jsrRJyHBvzw\" %}\n \"\"\"\n self.assertRenderedTemplate(\n template,\n '<iframe width=\"480\" height=\"360\" '\n 'src=\"https://www.youtube.com/embed/jsrRJyHBvzw?wmode=opaque\" '\n 'frameborder=\"0\" allowfullscreen></iframe>'\n )\n\n def test_direct_embed_invalid_url(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video \"https://soundcloud.com/xyz/foo\" %}\n \"\"\"\n self.assertRenderedTemplate(template, '')\n\n def test_user_size(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video 'http://www.youtube.com/watch?v=jsrRJyHBvzw' as ytb %}\n {% video ytb '800x800' %}\n {% endvideo %}\n \"\"\"\n self.assertRenderedTemplate(\n template,\n '<iframe width=\"800\" height=\"800\" '\n 'src=\"https://www.youtube.com/embed/jsrRJyHBvzw?wmode=opaque\" '\n 'frameborder=\"0\" allowfullscreen></iframe>'\n )\n\n def test_wrong_size(self):\n template = Template(\"\"\"\n {% load embed_video_tags %}\n {% video 'http://www.youtube.com/watch?v=jsrRJyHBvzw' 'so x huge' %}\n \"\"\")\n request = RequestContext(HttpRequest())\n self.assertRaises(TemplateSyntaxError, template.render, request)\n\n def test_tag_youtube(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video 'http://www.youtube.com/watch?v=jsrRJyHBvzw' as ytb %}\n {{ ytb.url }} {{ ytb.backend }}\n {% endvideo %}\n \"\"\"\n self.assertRenderedTemplate(\n template,\n 'https://www.youtube.com/embed/jsrRJyHBvzw?wmode=opaque '\n 'YoutubeBackend'\n )\n\n def test_tag_vimeo(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video 'https://vimeo.com/72304002' as vimeo %}\n {{ vimeo.url }} {{ vimeo.backend }} {{ vimeo.info.duration }}\n {% endvideo %}\n \"\"\"\n self.assertRenderedTemplate(\n template, 'https://player.vimeo.com/video/72304002 VimeoBackend 176'\n )\n\n def test_tag_soundcloud(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video 'https://soundcloud.com/community/soundcloud-case-study-wildlife' as soundcloud %}\n {{ soundcloud.url }} {{ soundcloud.backend }}\n {% endvideo %}\n \"\"\"\n self.assertRenderedTemplate(\n template,\n 'https://w.soundcloud.com/player/?visual=true&amp;url=https%3A%2F%2Fapi.soundcloud.com%2Ftracks%2F82244706&amp;show_artwork=true '\n 'SoundCloudBackend'\n )\n\n @patch('embed_video.backends.EMBED_VIDEO_TIMEOUT', 0.000001)\n @patch('urllib3.connectionpool.log')\n @patch('embed_video.templatetags.embed_video_tags.logger')\n def test_empty_if_timeout(self, embed_video_logger, urllib_logger):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video \"http://vimeo.com/72304002\" as my_video %}\n {{ my_video.thumbnail }}\n {% endvideo %}\n \"\"\"\n\n self.assertRenderedTemplate(template, '')\n\n urllib_logger.debug.assert_called_with(\n 'Starting new HTTPS connection (%d): %s:%s',\n 1, 'vimeo.com', 443,\n )\n\n embed_video_logger.exception.assert_called_with(\n 'Timeout reached during rendering embed video (`http://vimeo.com/72304002`)',\n )\n\n def test_relative_size(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video \"http://vimeo.com/72304002\" \"80%x30%\" %}\n \"\"\"\n self.assertRenderedTemplate(\n template,\n '<iframe width=\"80%\" height=\"30%\" '\n 'src=\"https://player.vimeo.com/video/72304002\" '\n 'frameborder=\"0\" allowfullscreen></iframe>'\n )\n\n def test_allow_spaces_in_size(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video \"http://vimeo.com/72304002\" \"80% x 300\" %}\n \"\"\"\n self.assertRenderedTemplate(\n template,\n '<iframe width=\"80%\" height=\"300\" '\n 'src=\"https://player.vimeo.com/video/72304002\" '\n 'frameborder=\"0\" allowfullscreen></iframe>'\n )\n\n def test_embed_with_query(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video 'http://www.youtube.com/watch?v=jsrRJyHBvzw' query=\"rel=1&wmode=transparent\" as ytb %}\n {{ ytb.url }}\n {% endvideo %}\n \"\"\"\n\n output = self.render_template(template)\n self.assertUrlEqual(\n output,\n 'https://www.youtube.com/embed/jsrRJyHBvzw?rel=1&wmode=transparent'\n )\n\n def test_direct_embed_with_query(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video 'http://www.youtube.com/watch?v=jsrRJyHBvzw' query=\"rel=1&wmode=transparent\" %}\n \"\"\"\n\n output = self.render_template(template)\n\n # The order of query parameters in the URL might change between Python\n # versions. Compare the URL and the outer part separately.\n\n url_pattern = re.compile(r'http[^\"]+')\n url = url_pattern.search(output).group(0)\n\n self.assertUrlEqual(\n url,\n 'https://www.youtube.com/embed/jsrRJyHBvzw?rel=1&wmode=transparent'\n )\n\n output_without_url = url_pattern.sub('URL', output)\n\n self.assertEqual(\n output_without_url,\n '<iframe width=\"480\" height=\"360\" '\n 'src=\"URL\" '\n 'frameborder=\"0\" allowfullscreen></iframe>'\n )\n\n def test_set_options(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% video 'http://www.youtube.com/watch?v=jsrRJyHBvzw' \"300x200\" is_secure=True query=\"rel=1\" %}\n \"\"\"\n self.assertRenderedTemplate(\n template,\n '<iframe width=\"300\" height=\"200\" '\n 'src=\"https://www.youtube.com/embed/jsrRJyHBvzw?rel=1\" '\n 'frameborder=\"0\" allowfullscreen></iframe>'\n )\n\n def test_size_as_variable(self):\n template = \"\"\"\n {% load embed_video_tags %}\n {% with size=\"500x200\" %}\n {% video 'http://www.youtube.com/watch?v=jsrRJyHBvzw' size %}\n {% endwith %}\n \"\"\"\n self.assertRenderedTemplate(\n template,\n '<iframe width=\"500\" height=\"200\" '\n 'src=\"https://www.youtube.com/embed/jsrRJyHBvzw?wmode=opaque\" '\n 'frameborder=\"0\" allowfullscreen></iframe>'\n )\n\n\nclass EmbedVideoNodeTestCase(TestCase):\n def setUp(self):\n self.parser = Mock()\n self.token = Mock(methods=['split_contents'])\n\n def test_repr(self):\n self.token.split_contents.return_value = (\n 'video', 'http://youtu.be/v/1234', 'as', 'myvideo'\n )\n self.parser.compile_filter.return_value = u'some_url'\n\n node = VideoNode(self.parser, self.token)\n self.assertEqual(str(node), '<VideoNode \"some_url\">')\n\n def test_videonode_iter(self):\n out = ['a', 'b', 'c', 'd']\n\n class FooNode(VideoNode):\n nodelist_file = out\n\n def __init__(self):\n pass\n\n node = FooNode()\n self.assertEqual(out, [x for x in node])\n\n def test_get_backend_secure(self):\n class SecureRequest(RequestFactory):\n is_secure = lambda x: True\n\n context = {'request': SecureRequest()}\n backend = VideoNode.get_backend('http://www.youtube.com/watch?v=jsrRJyHBvzw', context)\n self.assertTrue(backend.is_secure)\n\n def test_get_backend_insecure(self):\n class InsecureRequest(RequestFactory):\n is_secure = lambda x: False\n\n context = {'request': InsecureRequest()}\n backend = VideoNode.get_backend('http://www.youtube.com/watch?v=jsrRJyHBvzw', context)\n self.assertFalse(backend.is_secure)\n", "id": "8425663", "language": "Python", "matching_score": 4.239843845367432, "max_stars_count": 0, "path": "embed_video/tests/templatetags/tests_embed_video_tags.py" }, { "content": "from django.conf import settings\n\n\nEMBED_VIDEO_BACKENDS = getattr(settings, 'EMBED_VIDEO_BACKENDS', (\n 'embed_video.backends.YoutubeBackend',\n 'embed_video.backends.VimeoBackend',\n 'embed_video.backends.SoundCloudBackend',\n))\n\"\"\" :type: tuple[str] \"\"\"\n\nEMBED_VIDEO_TIMEOUT = getattr(settings, 'EMBED_VIDEO_TIMEOUT', 10)\n\"\"\" :type: int \"\"\"\n\nEMBED_VIDEO_YOUTUBE_DEFAULT_QUERY = \\\n getattr(settings, 'EMBED_VIDEO_YOUTUBE_DEFAULT_QUERY', 'wmode=opaque')\n\"\"\" :type: django.db.models.QuerySet | str \"\"\"\n", "id": "3185572", "language": "Python", "matching_score": 1.0140340328216553, "max_stars_count": 0, "path": "embed_video/settings.py" }, { "content": "import pytest\nimport datetime\nfrom decimal import Decimal\nfrom unittest.mock import patch\n\nfrom django.db.models import Manager\nfrom django.db.models.signals import m2m_changed\n\nfrom model_bakery import baker\nfrom model_bakery import random_gen\nfrom model_bakery.exceptions import (\n ModelNotFound,\n AmbiguousModelName,\n InvalidQuantityException,\n)\nfrom model_bakery.timezone import smart_datetime\n\nfrom tests.generic import models\nfrom tests.generic.forms import DummyGenericIPAddressFieldForm\n\n\nclass TestsModelFinder:\n def test_unicode_regression(self):\n obj = baker.prepare(\"generic.Person\")\n assert isinstance(obj, models.Person)\n\n def test_model_class(self):\n obj = baker.prepare(models.Person)\n assert isinstance(obj, models.Person)\n\n def test_app_model_string(self):\n obj = baker.prepare(\"generic.Person\")\n assert isinstance(obj, models.Person)\n\n def test_model_string(self):\n obj = baker.prepare(\"Person\")\n assert isinstance(obj, models.Person)\n\n def test_raise_on_ambiguous_model_string(self):\n with pytest.raises(AmbiguousModelName):\n baker.prepare(\"Ambiguous\")\n\n def test_raise_model_not_found(self):\n with pytest.raises(ModelNotFound):\n baker.Baker(\"non_existing.Model\")\n\n with pytest.raises(ModelNotFound):\n baker.Baker(\"NonExistingModel\")\n\n\n@pytest.mark.django_db\nclass TestsBakerCreatesSimpleModel:\n def test_consider_real_django_fields_only(self):\n id_ = models.ModelWithImpostorField._meta.get_field(\"id\")\n with patch.object(baker.Baker, \"get_fields\") as mock:\n f = Manager()\n f.name = \"foo\"\n mock.return_value = [id_, f]\n try:\n baker.make(models.ModelWithImpostorField)\n except TypeError:\n assert False, \"TypeError raised\"\n\n def test_make_should_create_one_object(self):\n person = baker.make(models.Person)\n assert isinstance(person, models.Person)\n\n # makes sure it is the person we created\n assert models.Person.objects.filter(id=person.id).exists()\n\n def test_prepare_should_not_persist_one_object(self):\n person = baker.prepare(models.Person)\n assert isinstance(person, models.Person)\n\n # makes sure database is clean\n assert not models.Person.objects.all().exists()\n assert person.id is None\n\n def test_non_abstract_model_creation(self):\n person = baker.make(models.NonAbstractPerson, name=\"bob\", happy=False)\n assert isinstance(person, models.NonAbstractPerson)\n assert \"bob\" == person.name\n assert person.happy is False\n\n def test_abstract_model_subclass_creation(self):\n instance = baker.make(models.SubclassOfAbstract)\n assert isinstance(instance, models.SubclassOfAbstract)\n assert isinstance(instance, models.AbstractModel)\n assert isinstance(instance.name, type(u\"\"))\n assert len(instance.name) == 30\n assert isinstance(instance.height, int)\n\n def test_multiple_inheritance_creation(self):\n multiple = baker.make(models.DummyMultipleInheritanceModel)\n assert isinstance(multiple, models.DummyMultipleInheritanceModel)\n assert models.Person.objects.filter(id=multiple.id).exists()\n assert models.DummyDefaultFieldsModel.objects.filter(\n default_id=multiple.default_id\n ).exists()\n\n\n@pytest.mark.django_db\nclass TestsBakerRepeatedCreatesSimpleModel:\n def test_make_should_create_objects_respecting_quantity_parameter(self):\n people = baker.make(models.Person, _quantity=5)\n assert models.Person.objects.count() == 5\n\n people = baker.make(models.Person, _quantity=5, name=\"<NAME>\")\n assert all(p.name == \"<NAME>\" for p in people)\n\n def test_make_raises_correct_exception_if_invalid_quantity(self):\n with pytest.raises(InvalidQuantityException):\n baker.make(_model=models.Person, _quantity=\"hi\")\n with pytest.raises(InvalidQuantityException):\n baker.make(_model=models.Person, _quantity=-1)\n with pytest.raises(InvalidQuantityException):\n baker.make(_model=models.Person, _quantity=0)\n\n def test_prepare_should_create_objects_respecting_quantity_parameter(self):\n people = baker.prepare(models.Person, _quantity=5)\n assert len(people) == 5\n assert all(not p.id for p in people)\n\n people = baker.prepare(models.Person, _quantity=5, name=\"<NAME>\")\n assert all(p.name == \"<NAME>\" for p in people)\n\n def test_prepare_raises_correct_exception_if_invalid_quantity(self):\n with pytest.raises(InvalidQuantityException):\n baker.prepare(_model=models.Person, _quantity=\"hi\")\n with pytest.raises(InvalidQuantityException):\n baker.prepare(_model=models.Person, _quantity=-1)\n with pytest.raises(InvalidQuantityException):\n baker.prepare(_model=models.Person, _quantity=0)\n\n\n@pytest.mark.django_db\nclass TestBakerPrepareSavingRelatedInstances:\n def test_default_behaviour_for_and_fk(self):\n dog = baker.prepare(models.Dog)\n\n assert dog.pk is None\n assert dog.owner.pk is None\n with pytest.raises(ValueError):\n dog.friends_with\n\n def test_create_fk_instances(self):\n dog = baker.prepare(models.Dog, _save_related=True)\n\n assert dog.pk is None\n assert dog.owner.pk\n with pytest.raises(ValueError):\n dog.friends_with\n\n def test_create_fk_instances_with_quantity(self):\n dog1, dog2 = baker.prepare(models.Dog, _save_related=True, _quantity=2)\n\n assert dog1.pk is None\n assert dog1.owner.pk\n with pytest.raises(ValueError):\n dog1.friends_with\n\n assert dog2.pk is None\n assert dog2.owner.pk\n with pytest.raises(ValueError):\n dog2.friends_with\n\n def test_create_one_to_one(self):\n lonely_person = baker.prepare(models.LonelyPerson, _save_related=True)\n\n assert lonely_person.pk is None\n assert lonely_person.only_friend.pk\n\n\n@pytest.mark.django_db\nclass TestBakerCreatesAssociatedModels:\n def test_dependent_models_with_ForeignKey(self):\n dog = baker.make(models.Dog)\n assert isinstance(dog.owner, models.Person)\n\n def test_foreign_key_on_parent_should_create_one_object(self):\n person_count = models.Person.objects.count()\n baker.make(models.GuardDog)\n assert models.Person.objects.count() == person_count + 1\n\n def test_foreign_key_on_parent_is_not_created(self):\n \"\"\"Foreign key on parent doesn't get created using owner.\"\"\"\n owner = baker.make(models.Person)\n person_count = models.Person.objects.count()\n dog = baker.make(models.GuardDog, owner=owner)\n assert models.Person.objects.count() == person_count\n assert dog.owner == owner\n\n def test_foreign_key_on_parent_id_is_not_created(self):\n \"\"\"Foreign key on parent doesn't get created using owner_id.\"\"\"\n owner = baker.make(models.Person)\n person_count = models.Person.objects.count()\n dog = baker.make(models.GuardDog, owner_id=owner.id)\n assert models.Person.objects.count() == person_count\n assert models.GuardDog.objects.get(pk=dog.pk).owner == owner\n\n def test_auto_now_add_on_parent_should_work(self):\n person_count = models.Person.objects.count()\n dog = baker.make(models.GuardDog)\n assert models.Person.objects.count() == person_count + 1\n assert dog.created\n\n def test_attrs_on_related_model_through_parent(self):\n baker.make(models.GuardDog, owner__name=\"john\")\n for person in models.Person.objects.all():\n assert person.name == \"john\"\n\n def test_access_related_name_of_m2m(self):\n try:\n baker.make(models.Person, classroom_set=[baker.make(models.Classroom)])\n except TypeError:\n assert False, \"type error raised\"\n\n def test_save_object_instances_when_handling_one_to_many_relations(self):\n owner = baker.make(models.Person)\n dogs_set = baker.prepare(models.Dog, owner=owner, _quantity=2,)\n\n assert 0 == models.Dog.objects.count() # ensure there're no dogs in our db\n home = baker.make(models.Home, owner=owner, dogs=dogs_set,)\n assert home.dogs.count() == 2\n assert 2 == models.Dog.objects.count() # dogs in dogs_set were created\n\n def test_prepare_fk(self):\n dog = baker.prepare(models.Dog)\n assert isinstance(dog, models.Dog)\n assert isinstance(dog.owner, models.Person)\n\n assert models.Person.objects.all().count() == 0\n assert models.Dog.objects.all().count() == 0\n\n def test_create_one_to_one(self):\n lonely_person = baker.make(models.LonelyPerson)\n\n assert models.LonelyPerson.objects.all().count() == 1\n assert isinstance(lonely_person.only_friend, models.Person)\n assert models.Person.objects.all().count() == 1\n\n def test_create_many_to_many_if_flagged(self):\n store = baker.make(models.Store, make_m2m=True)\n assert store.employees.count() == 5\n assert store.customers.count() == 5\n\n def test_regresstion_many_to_many_field_is_accepted_as_kwargs(self):\n employees = baker.make(models.Person, _quantity=3)\n customers = baker.make(models.Person, _quantity=3)\n\n store = baker.make(models.Store, employees=employees, customers=customers)\n\n assert store.employees.count() == 3\n assert store.customers.count() == 3\n assert models.Person.objects.count() == 6\n\n def test_create_many_to_many_with_set_default_quantity(self):\n store = baker.make(models.Store, make_m2m=True)\n assert store.employees.count() == baker.MAX_MANY_QUANTITY\n assert store.customers.count() == baker.MAX_MANY_QUANTITY\n\n def test_create_many_to_many_with_through_option(self):\n # School student's attr is a m2m relationship with a model through\n school = baker.make(models.School, make_m2m=True)\n assert models.School.objects.count() == 1\n assert school.students.count() == baker.MAX_MANY_QUANTITY\n assert models.SchoolEnrollment.objects.count() == baker.MAX_MANY_QUANTITY\n assert models.Person.objects.count() == baker.MAX_MANY_QUANTITY\n\n def test_does_not_create_many_to_many_as_default(self):\n store = baker.make(models.Store)\n assert store.employees.count() == 0\n assert store.customers.count() == 0\n\n def test_does_not_create_nullable_many_to_many_for_relations(self):\n classroom = baker.make(models.Classroom, make_m2m=False)\n assert classroom.students.count() == 0\n\n def test_nullable_many_to_many_is_not_created_even_if_flagged(self):\n classroom = baker.make(models.Classroom, make_m2m=True)\n assert not classroom.students.count()\n\n def test_m2m_changed_signal_is_fired(self):\n # TODO: Use object attrs instead of mocks for Django 1.4 compat\n self.m2m_changed_fired = False\n\n def test_m2m_changed(*args, **kwargs):\n self.m2m_changed_fired = True\n\n m2m_changed.connect(test_m2m_changed, dispatch_uid=\"test_m2m_changed\")\n baker.make(models.Store, make_m2m=True)\n assert self.m2m_changed_fired\n\n def test_simple_creating_person_with_parameters(self):\n kid = baker.make(models.Person, happy=True, age=10, name=\"Mike\")\n assert kid.age == 10\n assert kid.happy is True\n assert kid.name == \"Mike\"\n\n def test_creating_person_from_factory_using_paramters(self):\n person_baker_ = baker.Baker(models.Person)\n person = person_baker_.make(happy=False, age=20, gender=\"M\", name=\"John\")\n assert person.age == 20\n assert person.happy is False\n assert person.name == \"John\"\n assert person.gender == \"M\"\n\n def test_ForeignKey_model_field_population(self):\n dog = baker.make(models.Dog, breed=\"X1\", owner__name=\"Bob\")\n assert \"X1\" == dog.breed\n assert \"Bob\" == dog.owner.name\n\n def test_ForeignKey_model_field_population_should_work_with_prepare(self):\n dog = baker.prepare(models.Dog, breed=\"X1\", owner__name=\"Bob\")\n assert \"X1\" == dog.breed\n assert \"Bob\" == dog.owner.name\n\n def test_ForeignKey_model_field_population_for_not_required_fk(self):\n user = baker.make(models.User, profile__email=\"<EMAIL>\")\n assert \"<EMAIL>\" == user.profile.email\n\n def test_does_not_creates_null_ForeignKey(self):\n user = baker.make(models.User)\n assert not user.profile\n\n def test_passing_m2m_value(self):\n store = baker.make(models.Store, customers=[baker.make(models.Person)])\n assert store.customers.count() == 1\n\n def test_ensure_recursive_ForeignKey_population(self):\n bill = baker.make(models.PaymentBill, user__profile__email=\"<EMAIL>\")\n assert \"<EMAIL>\" == bill.user.profile.email\n\n def test_field_lookup_for_m2m_relationship(self):\n store = baker.make(models.Store, suppliers__gender=\"M\")\n suppliers = store.suppliers.all()\n assert suppliers\n for supplier in suppliers:\n assert \"M\" == supplier.gender\n\n def test_field_lookup_for_one_to_one_relationship(self):\n lonely_person = baker.make(models.LonelyPerson, only_friend__name=\"Bob\")\n assert \"Bob\" == lonely_person.only_friend.name\n\n def test_allow_create_fkey_related_model(self):\n try:\n person = baker.make(\n models.Person, dog_set=[baker.make(models.Dog), baker.make(models.Dog)]\n )\n except TypeError:\n assert False, \"type error raised\"\n\n assert person.dog_set.count() == 2\n\n def test_field_lookup_for_related_field(self):\n person = baker.make(\n models.Person, one_related__name=\"Foo\", fk_related__name=\"Bar\",\n )\n\n assert person.pk\n assert person.one_related.pk\n assert 1, person.fk_related.count()\n assert \"Foo\" == person.one_related.name\n assert \"Bar\" == person.fk_related.get().name\n\n def test_field_lookup_for_related_field_does_not_work_with_prepare(self):\n person = baker.prepare(\n models.Person, one_related__name=\"Foo\", fk_related__name=\"Bar\",\n )\n\n assert not person.pk\n assert 0 == models.RelatedNamesModel.objects.count()\n\n\n@pytest.mark.django_db\nclass TestHandlingUnsupportedModels:\n def test_unsupported_model_raises_an_explanatory_exception(self):\n try:\n baker.make(models.UnsupportedModel)\n assert False, \"Should have raised a TypeError\"\n except TypeError as e:\n assert \"not supported\" in repr(e)\n\n\n@pytest.mark.django_db\nclass TestHandlingModelsWithGenericRelationFields:\n def test_create_model_with_generic_relation(self):\n dummy = baker.make(models.DummyGenericRelationModel)\n assert isinstance(dummy, models.DummyGenericRelationModel)\n\n\n@pytest.mark.django_db\nclass TestHandlingContentTypeField:\n def test_create_model_with_contenttype_field(self):\n dummy = baker.make(models.DummyGenericForeignKeyModel)\n assert isinstance(dummy, models.DummyGenericForeignKeyModel)\n\n\n@pytest.mark.django_db\nclass TestHandlingContentTypeFieldNoQueries:\n def test_create_model_with_contenttype_field(self):\n dummy = baker.prepare(models.DummyGenericForeignKeyModel)\n assert isinstance(dummy, models.DummyGenericForeignKeyModel)\n\n\n@pytest.mark.django_db\nclass TestSkipNullsTestCase:\n def test_skip_null(self):\n dummy = baker.make(models.DummyNullFieldsModel)\n assert dummy.null_foreign_key is None\n assert dummy.null_integer_field is None\n\n\n@pytest.mark.django_db\nclass TestFillNullsTestCase:\n def test_create_nullable_many_to_many_if_flagged_and_fill_field_optional(self):\n classroom = baker.make(\n models.Classroom, make_m2m=True, _fill_optional=[\"students\"]\n )\n assert classroom.students.count() == 5\n\n def test_create_nullable_many_to_many_if_flagged_and_fill_optional(self):\n classroom = baker.make(models.Classroom, make_m2m=True, _fill_optional=True)\n assert classroom.students.count() == 5\n\n def test_nullable_many_to_many_is_not_created_if_not_flagged_and_fill_optional(\n self,\n ):\n classroom = baker.make(models.Classroom, make_m2m=False, _fill_optional=True)\n assert classroom.students.count() == 0\n\n\n@pytest.mark.django_db\nclass TestSkipBlanksTestCase:\n def test_skip_blank(self):\n dummy = baker.make(models.DummyBlankFieldsModel)\n assert dummy.blank_char_field == \"\"\n assert dummy.blank_text_field == \"\"\n\n\n@pytest.mark.django_db\nclass TestFillBlanksTestCase:\n def test_fill_field_optional(self):\n dummy = baker.make(\n models.DummyBlankFieldsModel, _fill_optional=[\"blank_char_field\"]\n )\n assert len(dummy.blank_char_field) == 50\n\n def test_fill_wrong_field(self):\n with pytest.raises(AttributeError) as exc_info:\n baker.make(\n models.DummyBlankFieldsModel,\n _fill_optional=[\"blank_char_field\", \"wrong\"],\n )\n\n msg = \"_fill_optional field(s) ['wrong'] are not related to model DummyBlankFieldsModel\"\n assert msg in str(exc_info.value)\n\n def test_fill_wrong_fields_with_parent(self):\n with pytest.raises(AttributeError):\n baker.make(models.SubclassOfAbstract, _fill_optional=[\"name\", \"wrong\"])\n\n def test_fill_many_optional(self):\n dummy = baker.make(\n models.DummyBlankFieldsModel,\n _fill_optional=[\"blank_char_field\", \"blank_text_field\"],\n )\n assert len(dummy.blank_text_field) == 300\n\n def test_fill_all_optional(self):\n dummy = baker.make(models.DummyBlankFieldsModel, _fill_optional=True)\n assert len(dummy.blank_char_field) == 50\n assert len(dummy.blank_text_field) == 300\n\n def test_fill_optional_with_integer(self):\n with pytest.raises(TypeError):\n baker.make(models.DummyBlankFieldsModel, _fill_optional=1)\n\n\n@pytest.mark.django_db\nclass TestFillAutoFieldsTestCase:\n def test_fill_autofields_with_provided_value(self):\n baker.make(models.DummyEmptyModel, id=237)\n saved_dummy = models.DummyEmptyModel.objects.get()\n assert saved_dummy.id == 237\n\n def test_keeps_prepare_autovalues(self):\n dummy = baker.prepare(models.DummyEmptyModel, id=543)\n assert dummy.id == 543\n dummy.save()\n saved_dummy = models.DummyEmptyModel.objects.get()\n assert saved_dummy.id == 543\n\n\n@pytest.mark.django_db\nclass TestSkipDefaultsTestCase:\n def test_skip_fields_with_default(self):\n dummy = baker.make(models.DummyDefaultFieldsModel)\n assert dummy.default_char_field == \"default\"\n assert dummy.default_text_field == \"default\"\n assert dummy.default_int_field == 123\n assert dummy.default_float_field == 123.0\n assert dummy.default_date_field == \"2012-01-01\"\n assert dummy.default_date_time_field == smart_datetime(2012, 1, 1)\n assert dummy.default_time_field == \"00:00:00\"\n assert dummy.default_decimal_field == Decimal(\"0\")\n assert dummy.default_email_field == \"<EMAIL>\"\n assert dummy.default_slug_field == \"a-slug\"\n\n\n@pytest.mark.django_db\nclass TestBakerHandlesModelWithNext:\n def test_creates_instance_for_model_with_next(self):\n instance = baker.make(\n models.BaseModelForNext, fk=baker.make(models.ModelWithNext),\n )\n\n assert instance.id\n assert instance.fk.id\n assert instance.fk.attr\n assert \"foo\" == instance.fk.next()\n\n\n@pytest.mark.django_db\nclass TestBakerHandlesModelWithList:\n def test_creates_instance_for_model_with_list(self):\n instance = baker.make(models.BaseModelForList, fk=[\"foo\"])\n\n assert instance.id\n assert [\"foo\"] == instance.fk\n\n\n@pytest.mark.django_db\nclass TestBakerGeneratesIPAdresses:\n def test_create_model_with_valid_ips(self):\n form_data = {\n \"ipv4_field\": random_gen.gen_ipv4(),\n \"ipv6_field\": random_gen.gen_ipv6(),\n \"ipv46_field\": random_gen.gen_ipv46(),\n }\n assert DummyGenericIPAddressFieldForm(form_data).is_valid()\n\n\n@pytest.mark.django_db\nclass TestBakerAllowsSaveParameters:\n def test_allows_save_kwargs_on_baker_make(self):\n owner = baker.make(models.Person)\n dog = baker.make(models.ModelWithOverridedSave, _save_kwargs={\"owner\": owner})\n assert owner == dog.owner\n\n dog1, dog2 = baker.make(\n models.ModelWithOverridedSave, _save_kwargs={\"owner\": owner}, _quantity=2\n )\n assert owner == dog1.owner\n assert owner == dog2.owner\n\n\n@pytest.mark.django_db\nclass TestBakerAutomaticallyRefreshFromDB:\n def test_refresh_from_db_if_true(self):\n person = baker.make(\n models.Person, birthday=\"2017-02-01\", _refresh_after_create=True\n )\n\n assert person.birthday == datetime.date(2017, 2, 1)\n\n def test_do_not_refresh_from_db_if_false(self):\n person = baker.make(\n models.Person, birthday=\"2017-02-01\", _refresh_after_create=False\n )\n\n assert person.birthday == \"2017-02-01\"\n assert person.birthday != datetime.date(2017, 2, 1)\n\n def test_do_not_refresh_from_db_by_default(self):\n person = baker.make(models.Person, birthday=\"2017-02-01\")\n\n assert person.birthday == \"2017-02-01\"\n assert person.birthday != datetime.date(2017, 2, 1)\n\n\n@pytest.mark.django_db\nclass TestBakerMakeCanFetchInstanceFromDefaultManager:\n def test_annotation_within_manager_get_queryset_are_run_on_make(self):\n \"\"\"A custom model Manager can be used within make().\n\n Passing ``_from_manager='objects'`` will force ``baker.make()``\n to return an instance that has been going through a given\n Manager, thus calling its ``get_queryset()`` method and associated\n code, like default annotations. As such the instance will have\n the same fields as one created in the application.\n\n \"\"\"\n movie = baker.make(models.MovieWithAnnotation)\n with pytest.raises(AttributeError):\n movie.name\n\n movie = baker.make(\n models.MovieWithAnnotation, title=\"Old Boy\", _from_manager=\"objects\",\n )\n assert movie.title == movie.name\n", "id": "12678303", "language": "Python", "matching_score": 2.6747610569000244, "max_stars_count": 0, "path": "tests/test_baker.py" }, { "content": "from django import forms\nfrom django.utils.safestring import mark_safe\n\nfrom .backends import detect_backend, UnknownBackendException, \\\n VideoDoesntExistException\nfrom .fields import EmbedVideoField\n\n\nclass AdminVideoWidget(forms.TextInput):\n \"\"\"\n Widget for video input in administration. If empty it works just like\n :py:class:`django.forms.TextInput`. Otherwise it renders embedded video\n together with input field.\n\n .. todo::\n\n Django 1.6 provides better parent for this widget -\n :py:class:`django.forms.URLInput`.\n\n \"\"\"\n\n output_format = u'<div style=\"float:left\" class=\"video\">' \\\n u'{video}<br />{input}</div>' \\\n u'<hr style=\"visibility: hidden; clear:both\">'\n\n def __init__(self, attrs=None):\n \"\"\"\n :type attrs: dict\n \"\"\"\n default_attrs = {'size': '40'}\n\n if attrs:\n default_attrs.update(attrs)\n\n super().__init__(default_attrs)\n\n def render(self, name, value='', attrs=None, size=(420, 315), renderer=None):\n \"\"\"\n :type name: str\n :type attrs: dict\n \"\"\"\n\n output = super().render(name, value, attrs, renderer)\n\n if not value:\n return output\n\n try:\n backend = detect_backend(value)\n return mark_safe(self.output_format.format(\n video=backend.get_embed_code(*size),\n input=output,\n ))\n except (UnknownBackendException, VideoDoesntExistException):\n return output\n\n\nclass AdminVideoMixin:\n \"\"\"\n Mixin using :py:class:`AdminVideoWidget` for fields with\n :py:class:`~embed_video.fields.EmbedVideoField`.\n\n Usage::\n\n from django.contrib import admin\n from embed_video.admin import AdminVideoMixin\n from .models import MyModel\n\n class MyModelAdmin(AdminVideoMixin, admin.ModelAdmin):\n pass\n\n admin.site.register(MyModel, MyModelAdmin)\n\n \"\"\"\n\n def formfield_for_dbfield(self, db_field, **kwargs):\n \"\"\"\n :type db_field: str\n \"\"\"\n if isinstance(db_field, EmbedVideoField):\n return db_field.formfield(widget=AdminVideoWidget)\n\n return super().formfield_for_dbfield(db_field, **kwargs)\n", "id": "12036060", "language": "Python", "matching_score": 3.117006301879883, "max_stars_count": 0, "path": "embed_video/admin.py" }, { "content": "from django.db import models\nfrom django.core.urlresolvers import reverse\n\nfrom embed_video.fields import EmbedVideoField\n\n\nclass Post(models.Model):\n title = models.CharField(max_length=50)\n video = EmbedVideoField(verbose_name='My video',\n help_text='This is a help text')\n\n def __unicode__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('posts:detail', kwargs={'pk': self.pk})\n", "id": "12646554", "language": "Python", "matching_score": 1.8019989728927612, "max_stars_count": 1, "path": "example_project/posts/models.py" }, { "content": "from django.conf.urls import url\n\nfrom .views import PostListView, PostDetailView\n\nurlpatterns = [\n url(r'(?P<pk>\\d+)/$', PostDetailView.as_view(), name='detail'),\n url(r'$', PostListView.as_view(), name='list'),\n]\n", "id": "9137649", "language": "Python", "matching_score": 0.2574909031391144, "max_stars_count": 0, "path": "example_project/posts/urls.py" }, { "content": "from __future__ import annotations\n\nimport atexit\nimport contextlib\nimport functools\nimport io\nimport logging\nimport os\nimport sys\nfrom itertools import zip_longest\nfrom tempfile import mktemp\nfrom typing import Any, Callable, Iterator, List, Optional, Sequence, Union\n\nimport click\nfrom click._compat import strip_ansi\n\nfrom pdm._vendor import halo\nfrom pdm._vendor.log_symbols.symbols import is_supported as supports_unicode\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(logging.NullHandler())\n\n\ndef ljust(text: str, length: int) -> str:\n \"\"\"Like str.ljust() but ignore all ANSI controlling characters.\"\"\"\n return text + \" \" * (length - len(strip_ansi(text)))\n\n\ndef rjust(text: str, length: int) -> str:\n \"\"\"Like str.rjust() but ignore all ANSI controlling characters.\"\"\"\n return \" \" * (length - len(strip_ansi(text))) + text\n\n\ndef centerize(text: str, length: int) -> str:\n \"\"\"Centerize the text while ignoring ANSI controlling characters.\"\"\"\n space_num = length - len(strip_ansi(text))\n left_space = space_num // 2\n return \" \" * left_space + text + \" \" * (space_num - left_space)\n\n\ndef supports_ansi() -> bool:\n \"\"\"Check if the current environment supports ANSI colors\"\"\"\n if os.getenv(\"CI\"):\n return False\n stream = sys.stdout\n if not hasattr(stream, \"fileno\"):\n return False\n try:\n return os.isatty(stream.fileno()) # type: ignore\n except io.UnsupportedOperation:\n return False\n\n\n# Export some style shortcut helpers\ngreen = functools.partial(click.style, fg=\"green\")\nred = functools.partial(click.style, fg=\"red\")\nyellow = functools.partial(click.style, fg=\"yellow\")\ncyan = functools.partial(click.style, fg=\"cyan\")\nblue = functools.partial(click.style, fg=\"blue\")\nbold = functools.partial(click.style, bold=True)\n\n# Verbosity levels\nNORMAL = 0\nDETAIL = 1\nDEBUG = 2\n\n\nclass DummySpinner:\n \"\"\"A dummy spinner class implementing needed interfaces.\n But only display text onto screen.\n \"\"\"\n\n def start(self, text: str) -> None:\n click.echo(text)\n\n def stop_and_persist(self, symbol: str = \" \", text: Optional[str] = None) -> None:\n click.echo(symbol + \" \" + (text or \"\"))\n\n succeed = fail = start\n\n text = property(lambda self: \"\", start)\n\n def __enter__(self) -> DummySpinner:\n return self\n\n def __exit__(self, *args: Any) -> None:\n pass\n\n\nclass UI:\n \"\"\"Terminal UI object\"\"\"\n\n def __init__(self, verbosity: int = NORMAL, no_ansi: Optional[bool] = None) -> None:\n self.verbosity = verbosity\n self._indent = \"\"\n self.supports_ansi = not no_ansi if no_ansi is not None else supports_ansi()\n\n def set_verbosity(self, verbosity: int) -> None:\n self.verbosity = verbosity\n\n def echo(\n self,\n message: str = \"\",\n err: bool = False,\n verbosity: int = NORMAL,\n **kwargs: Any,\n ) -> None:\n if self.verbosity >= verbosity:\n click.secho(\n self._indent + str(message), err=err, color=self.supports_ansi, **kwargs\n )\n\n def display_columns(\n self, rows: Sequence[Sequence[str]], header: Optional[List[str]] = None\n ) -> None:\n \"\"\"Print rows in aligned columns.\n\n :param rows: a rows of data to be displayed.\n :param header: a list of header strings.\n \"\"\"\n\n def get_aligner(align: str) -> Callable:\n if align == \">\":\n return rjust\n if align == \"^\":\n return centerize\n else:\n return ljust\n\n sizes = list(\n map(\n lambda column: max(map(lambda x: len(strip_ansi(x)), column)),\n zip_longest(header or [], *rows, fillvalue=\"\"),\n )\n )\n\n aligners = [ljust] * len(sizes)\n if header:\n aligners = []\n for i, head in enumerate(header):\n aligners.append(get_aligner(head[0]))\n if head[0] in (\">\", \"^\", \"<\"):\n header[i] = head[1:]\n self.echo(\n \" \".join(\n aligner(head, size)\n for aligner, head, size in zip(aligners, header, sizes)\n )\n )\n # Print a separator\n self.echo(\" \".join(\"-\" * size for size in sizes))\n for row in rows:\n self.echo(\n \" \".join(\n aligner(item, size)\n for aligner, item, size in zip(aligners, row, sizes)\n )\n )\n\n @contextlib.contextmanager\n def indent(self, prefix: str) -> Iterator[None]:\n \"\"\"Indent the following lines with a prefix.\"\"\"\n _indent = self._indent\n self._indent += prefix\n yield\n self._indent = _indent\n\n @contextlib.contextmanager\n def logging(self, type_: str = \"install\") -> Iterator[logging.Logger]:\n \"\"\"A context manager that opens a file for logging when verbosity is NORMAL or\n print to the stdout otherwise.\n \"\"\"\n file_name = mktemp(\".log\", f\"pdm-{type_}-\")\n\n if self.verbosity >= DETAIL:\n handler = logging.StreamHandler()\n else:\n handler = logging.FileHandler(file_name, encoding=\"utf-8\")\n handler.setLevel(logging.DEBUG)\n logger.handlers[1:] = [handler]\n pip_logger = logging.getLogger(\"pip.subprocessor\")\n pip_logger.handlers[:] = [handler]\n\n def cleanup() -> None:\n try:\n os.unlink(file_name)\n except OSError:\n pass\n\n try:\n yield logger\n except Exception:\n if self.verbosity < DETAIL:\n logger.exception(\"Error occurs\")\n self.echo(yellow(f\"See {file_name} for detailed debug log.\"), err=True)\n raise\n else:\n atexit.register(cleanup)\n finally:\n logger.handlers.remove(handler)\n pip_logger.handlers.remove(handler)\n\n def open_spinner(\n self, title: str, spinner: str = \"dots\"\n ) -> Union[DummySpinner, halo.Halo]:\n \"\"\"Open a spinner as a context manager.\"\"\"\n if self.verbosity >= DETAIL or not self.supports_ansi:\n return DummySpinner()\n else:\n return halo.Halo( # type: ignore\n title, spinner=spinner, indent=self._indent\n )\n\n\nclass Emoji:\n \"\"\"A collection of emoji characters used in terminal output\"\"\"\n\n if supports_unicode(): # type: ignore\n SUCC = \"🎉\"\n LOCK = \"🔒\"\n else:\n SUCC = \"\"\n LOCK = \"\"\n", "id": "7091819", "language": "Python", "matching_score": 0.8941715955734253, "max_stars_count": 0, "path": "pdm/termui.py" }, { "content": "from importlib import import_module\n\nfrom django.core.exceptions import ImproperlyConfigured\n\n\ndef import_by_path(dotted_path, error_prefix=''):\n \"\"\"\n Import a dotted module path and return the attribute/class designated by\n the last name in the path. Raise ImproperlyConfigured if something goes\n wrong.\n\n .. warning::\n .. deprecated:: Django 1.6\n\n Function :py:func:`django.utils.module_loading.import_by_path` has\n been added in Django 1.6.\n\n :param dotted_path: Path to imported attribute or class\n :type dotted_path: str\n\n :return: imported attribute or class\n \"\"\"\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n except ValueError:\n raise ImproperlyConfigured(\"%s%s doesn't look like a module path\" % (\n error_prefix, dotted_path))\n try:\n module = import_module(module_path)\n except ImportError as e:\n msg = '%sError importing module %s: \"%s\"' % (\n error_prefix, module_path, e)\n raise ImproperlyConfigured(msg)\n try:\n attr = getattr(module, class_name)\n except AttributeError:\n raise ImproperlyConfigured('%sModule \"%s\" does not define a \"%s\" \\\n attribute/class' %\n (error_prefix, module_path, class_name))\n return attr\n", "id": "7678457", "language": "Python", "matching_score": 0.2456737607717514, "max_stars_count": 1, "path": "embed_video/utils.py" }, { "content": "from setuptools import setup, find_packages\n\nimport os\n\nembed_video = __import__('embed_video')\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nREADME = read('README.rst')\nCHANGES = read('CHANGES.rst')\n\n\nsetup(\n name='django-embed-video',\n packages=find_packages(),\n package_data={'embed_video': ['templates/embed_video/*.html']},\n version=embed_video.get_version(),\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://github.com/jazzband/django-embed-video',\n description=embed_video.__doc__.strip(),\n long_description='\\n\\n'.join([README, CHANGES]),\n long_description_content_type='text/x-rst',\n classifiers=[\n 'Framework :: Django',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n keywords=['youtube', 'vimeo', 'video', 'soundcloud'],\n install_requires=['requests >= 2.19', 'Django >= 1.11'],\n setup_requires=['nose', 'readme'],\n tests_require=['Django', 'requests >= 2.19', 'coverage'],\n test_suite='nose.collector',\n)\n", "id": "6831209", "language": "Python", "matching_score": 1.8406286239624023, "max_stars_count": 0, "path": "setup.py" }, { "content": "import os\nimport django\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'embed_video.tests.django_settings'\n\ndjango.setup()\n", "id": "3307806", "language": "Python", "matching_score": 1.1913291215896606, "max_stars_count": 0, "path": "embed_video/tests/__init__.py" } ]
1.95356
Lurker-Coding
[ { "content": "from discord.ext import commands\n\n\nasync def check_permissions(ctx, perms, *, check=all):\n is_owner = await ctx.bot.is_owner(ctx.author)\n if is_owner:\n return True\n\n resolved = ctx.channel.permissions_for(ctx.author)\n return check(getattr(resolved, name, None) == value for name, value in perms.items())\n\n\ndef has_permissions(*, check=all, **perms):\n async def pred(ctx):\n return await check_permissions(ctx, perms, check=check)\n return commands.check(pred)\n\n\nasync def check_guild_permissions(ctx, perms, *, check=all):\n is_owner = await ctx.bot.is_owner(ctx.author)\n if is_owner:\n return True\n\n if ctx.guild is None:\n return False\n\n resolved = ctx.author.guild_permissions\n return check(getattr(resolved, name, None) == value for name, value in perms.items())\n\n\ndef has_guild_permissions(*, check=all, **perms):\n async def pred(ctx):\n return await check_guild_permissions(ctx, perms, check=check)\n return commands.check(pred)\n\n\ndef is_mod():\n async def pred(ctx):\n return await check_guild_permissions(ctx, {'manage_guild': True})\n return commands.check(pred)\n\n\ndef is_admin():\n async def pred(ctx):\n return await check_guild_permissions(ctx, {'administrator': True})\n return commands.check(pred)\n\n\ndef mod_or_permissions(**perms):\n perms['manage_guild'] = True\n\n async def predicate(ctx):\n return await check_guild_permissions(ctx, perms, check=any)\n return commands.check(predicate)\n\n\ndef admin_or_permissions(**perms):\n perms['administrator'] = True\n\n async def predicate(ctx):\n return await check_guild_permissions(ctx, perms, check=any)\n return commands.check(predicate)\n\n\ndef is_in_guilds(*guild_ids):\n def predicate(ctx):\n guild = ctx.guild\n if guild is None:\n return False\n return guild.id in guild_ids\n return commands.check(predicate)\n", "id": "1609876", "language": "Python", "matching_score": 0.46101486682891846, "max_stars_count": 2, "path": "modules/utils/checks.py" }, { "content": "import discord\nimport math\nimport datetime\nimport time\nimport rethinkdb as r\n\nfrom discord.ext import commands\nfrom config import prefixes\n\n\nclass Economy:\n\n def __init__(self, bot):\n self.bot = bot\n\n def _required_exp(self, level: int):\n if level < 0:\n return 0\n\n return 139 * level + 65\n\n def _level_exp(self, level: int):\n return level * 65 + 139 * level * (level - 1) // 2\n\n def _find_level(self, total_exp):\n return int((1 / 278) * (9 + math.sqrt(81 + 1112 * total_exp)))\n\n async def __has_account(self, user: int):\n if await r.table(\"economy\").get(str(user)).run(self.bot.r_conn):\n return True\n else:\n return False\n\n async def __get_balance(self, user: int):\n balance = await r.table(\"economy\").get(str(user)).run(self.bot.r_conn)\n\n return int(balance[\"balance\"])\n\n async def __has_level_account(self, user: int):\n if await r.table(\"levels\").get(str(user)).run(self.bot.r_conn):\n return True\n else:\n return False\n\n async def __create_level_account(self, user: int):\n data = {\n \"id\": str(user),\n \"title\": \"\",\n \"description\": \"\"\n }\n\n await r.table(\"levels\").insert(data).run(self.bot.r_conn)\n\n async def __check_level_account(self, user: int):\n if not await self.__has_level_account(user):\n await self.__create_level_account(user)\n\n async def __update_balance(self, user: int, amount: int):\n await r.table(\"economy\").get(str(user)).update({\n \"balance\": int(amount)\n }).run(self.bot.r_conn)\n\n async def __update_payday_time(self, user: int):\n await r.table(\"economy\").get(str(user)).update({\n \"lastpayday\": str(int(time.time()))\n }).run(self.bot.r_conn)\n\n async def __is_frozen(self, user: int):\n data = await r.table(\"economy\").get(str(user)).run(self.bot.r_conn)\n frozen = data.get(\"frozen\", False)\n if frozen:\n return True\n else:\n return False\n\n @commands.command()\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def register(self, ctx):\n \"\"\"Register an account.\"\"\"\n user = ctx.author\n await self.__has_level_account(user.id)\n\n if await self.__has_account(user.id):\n await ctx.send(\"You already have an account.\")\n else:\n data = {\n \"id\": str(user.id),\n \"balance\": 0,\n \"lastpayday\": \"0\",\n \"frozen\": False\n }\n\n await r.table(\"economy\").insert(data).run(self.bot.r_conn)\n await ctx.send(\"I hope you enjoy your new account!\")\n\n @commands.command(aliases=[\"bal\", \"money\", \"$\"])\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def balance(self, ctx, user:discord.Member=None):\n \"\"\"Shows yours or another users balance.\"\"\"\n if not user:\n user = ctx.author\n\n await self.__check_level_account(user.id)\n\n if await self.__has_account(user.id):\n balance = await self.__get_balance(user.id)\n\n await ctx.send(f\"Balance: **${balance}**\")\n else:\n await ctx.send(\"Balance: **$0**\")\n\n @commands.command()\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def daily(self, ctx):\n \"\"\"Grab yourself some free money.\"\"\"\n user = ctx.author\n await self.__check_level_account(user.id)\n\n if not await self.__has_account(user.id):\n return await ctx.send(f\"You do not have an account made. Please make one with `{prefixes[0]}register`.\")\n\n user_data = await r.table(\"economy\").get(str(user.id)).run(self.bot.r_conn)\n last_payday = user_data[\"lastpayday\"]\n user_balance = int(user_data[\"balance\"])\n\n if await self.__is_frozen(user.id):\n return await ctx.send(\":x: This account is frozen.\")\n\n tn = int(time.time())\n st = int(last_payday)\n tl = tn - st\n\n if not tl >= 86400:\n i = datetime.timedelta(seconds=86400 - tl)\n d = datetime.datetime(1, 1, 1) + i\n return await ctx.send(f\":x: You have **{d.strftime('%H:%M:&S')} until your next daily.**\")\n\n # change this so it changes amount if user has donated (at some point)\n await ctx.send(\"You have received **$500**!\")\n await self.__update_payday_time(user.id)\n await self.__update_balance(user.id, user_balance + 500)\n\n\ndef setup(bot):\n bot.add_cog(\n Economy(bot)\n )\n", "id": "4351033", "language": "Python", "matching_score": 0.9782357811927795, "max_stars_count": 2, "path": "modules/economy.py" }, { "content": "import discord\nimport aiohttp\n\nfrom discord.ext import commands\n\n\nclass Image:\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def neko(self, ctx):\n \"\"\"Sends a pic of a neko.\"\"\"\n async with aiohttp.ClientSession() as cs:\n async with cs.get(\"https://nekos.life/api/neko\") as r:\n res = await r.json()\n\n embed = discord.Embed(colour=3553599)\n embed.set_image(url=res[\"neko\"])\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def dog(self, ctx):\n \"\"\"Sends a pic of a dog.\"\"\"\n async with aiohttp.ClientSession() as cs:\n async with cs.get(\"https://random.dog/woof.json\") as r:\n res = await r.json()\n\n embed = discord.Embed(colour=3553599)\n embed.set_image(url=res[\"url\"])\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def cat(self, ctx):\n \"\"\"Sends a pic of a cat.\"\"\"\n async with aiohttp.ClientSession() as cs:\n async with cs.get(\"https://aws.random.cat/meow\") as r:\n res = await r.json()\n\n embed = discord.Embed(colour=3553599)\n embed.set_image(url=res[\"file\"])\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def lizard(self, ctx):\n \"\"\"Sends a pic of a lizard.\"\"\"\n async with aiohttp.ClientSession() as cs:\n async with cs.get(\"https://nekos.life/api/lizard\") as r:\n res = await r.json()\n\n embed = discord.Embed(colour=3553599)\n embed.set_image(url=res[\"url\"])\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def duck(self, ctx):\n \"\"\"Sends a pic of a duck.\"\"\"\n async with aiohttp.ClientSession() as cs:\n async with cs.get(\"https://random-d.uk/api/v1/random\") as r:\n res = await r.json()\n\n embed = discord.Embed(colour=3553599)\n embed.set_image(url=res[\"url\"])\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def panda(self, ctx):\n \"\"\"Sends a pic of a panda.\"\"\"\n async with aiohttp.ClientSession() as cs:\n async with cs.get(\"https://animals.anidiots.guide/panda\") as r:\n res = await r.json()\n\n embed = discord.Embed(colour=3553599)\n embed.set_image(url=res[\"link\"])\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def penguin(self, ctx):\n \"\"\"Sends a pic of a penguin.\"\"\"\n async with aiohttp.ClientSession() as cs:\n async with cs.get(\"https://animals.anidiots.guide/penguin\") as r:\n res = await r.json()\n\n embed = discord.Embed(colour=3553599)\n embed.set_image(url=res[\"link\"])\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def tiger(self, ctx):\n \"\"\"Sends a pic of a tiger.\"\"\"\n async with aiohttp.ClientSession() as cs:\n async with cs.get(\"https://animals.anidiots.guide/tiger\") as r:\n res = await r.json()\n\n embed = discord.Embed(colour=3553599)\n embed.set_image(url=res[\"link\"])\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def lion(self, ctx):\n \"\"\"Sends a pic of a lion.\"\"\"\n async with aiohttp.ClientSession() as cs:\n async with cs.get(\"https://animals.anidiots.guide/lion\") as r:\n res = await r.json()\n\n embed = discord.Embed(colour=3553599)\n embed.set_image(url=res[\"link\"])\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(\n Image(bot)\n )\n", "id": "2321936", "language": "Python", "matching_score": 1.1763722896575928, "max_stars_count": 2, "path": "modules/image.py" }, { "content": "import discord\nimport traceback\nimport io\nimport textwrap\nimport aiohttp\nimport subprocess\nimport re\nimport rethinkdb as r\n\nfrom discord.ext import commands\nfrom config import webhooks, prefixes\nfrom contextlib import redirect_stdout\nfrom .utils.chat_formatting import pagify, box\n\n\nclass Developer:\n\n def __init__(self, bot):\n self.bot = bot\n self._last_result = None\n\n def cleanup_code(self, content):\n \"\"\"Automatically removes code blocks from the code.\"\"\"\n if content.startswith(\"```\") and content.endswith(\"```\"):\n return '\\n'.join(content.split(\"\\n\")[1:-1])\n return content.strip(\"` \\n\")\n\n @commands.command(hidden=True)\n @commands.is_owner()\n async def load(self, ctx, *, module):\n \"\"\"Loads a module.\"\"\"\n try:\n self.bot.load_extension(f\"modules.{module}\")\n except Exception:\n for page in pagify(traceback.format_exc()):\n await ctx.send(box(text=page, lang=\"py\"))\n else:\n await ctx.send(f\"Loaded module: {module}\"\n .replace(\"modules.\", \"\"))\n\n @commands.command(hidden=True)\n @commands.is_owner()\n async def unload(self, ctx, *, module):\n \"\"\"Unload a module.\"\"\"\n try:\n self.bot.unload_extension(f\"modules.{module}\")\n except Exception:\n for page in pagify(traceback.format_exc()):\n await ctx.send(box(text=page, lang=\"py\"))\n else:\n await ctx.send(f\"Unloaded module: {module}\"\n .replace(\"modules.\", \"\"))\n\n @commands.command(name=\"reload\", hidden=True)\n @commands.is_owner()\n async def _reload(self, ctx, *, module):\n \"\"\"Reload a module.\"\"\"\n try:\n self.bot.unload_extension(f\"modules.{module}\")\n self.bot.load_extension(f\"modules.{module}\")\n except Exception:\n for page in pagify(traceback.format_exc()):\n await ctx.send(box(text=page, lang=\"py\"))\n else:\n await ctx.send(f\"Reloaded module: {module}\"\n .replace(\"modules.\", \"\"))\n\n @commands.command(hidden=True, aliases=[\"off\", \"shutoff\"])\n @commands.is_owner()\n async def shutdown(self, ctx):\n \"\"\"Shutdown the bot.\"\"\"\n await ctx.send(\"Shutting off...\")\n await self.bot.close()\n\n @commands.command(hidden=True)\n @commands.is_owner()\n async def speedtest(self, ctx):\n \"\"\"Return the vps' speedtest results\"\"\"\n await ctx.trigger_typing()\n\n data = subprocess.getoutput(\"speedtest --share --simple\")\n data = re.search(\"(?P<url>https?://[^\\s]+)\", str(data)).group(\"url\")\n embed = discord.Embed(colour=3553599)\n embed.set_image(url=data)\n\n await ctx.send(embed=embed)\n\n @commands.command(name=\"eval\", hidden=True, aliases=[\"ev\"])\n @commands.is_owner()\n async def _eval(self, ctx, *, code: str):\n \"\"\"Evaluate Python Code.\"\"\"\n env = {\n 'self': self,\n 'bot': self.bot,\n 'ctx': ctx,\n 'channel': ctx.channel,\n 'author': ctx.author,\n 'guild': ctx.guild,\n 'message': ctx.message,\n '_': self._last_result,\n 'r': r,\n 'r_conn': self.bot.r_conn\n }\n\n env.update(globals())\n code = self.cleanup_code(code)\n stdout = io.StringIO()\n to_compile = f'async def func():\\n{textwrap.indent(code, \" \")}'\n\n try:\n exec(to_compile, env)\n except Exception as e:\n return await ctx.send(f'`{e.__class__.__name__}`\\n```py\\n{e}\\n```')\n\n func = env['func']\n\n try:\n with redirect_stdout(stdout):\n ret = await func()\n except Exception:\n value = stdout.getvalue()\n await ctx.send(f'```py\\n{value}{traceback.format_exc()}\\n```')\n else:\n value = stdout.getvalue()\n try:\n await ctx.message.add_reaction('\\u2705')\n except:\n pass\n\n if ret is None:\n if value:\n await ctx.send(f'```py\\n{value}\\n```')\n else:\n self._last_result = ret\n await ctx.send(f'```py\\n{value}{ret}\\n```')\n\n @commands.command(hidden=True, aliases=[\"tc\"])\n @commands.is_owner()\n async def traceback(self, ctx, public: bool=False):\n \"\"\"\n Sends the last command exception.\n\n public: default False\n \"\"\"\n if not public:\n destination = ctx.author\n else:\n destination = ctx.channel\n\n if self.bot._last_exception:\n for page in pagify(self.bot._last_exception):\n await destination.send(box(text=page, lang=\"py\"))\n else:\n await ctx.send(\"No exception has yet to occur.\")\n\n @commands.command(hidden=True)\n @commands.is_owner()\n async def force(self, ctx, user: discord.Member, *, command):\n \"\"\"Forces a user to run the specified command.\"\"\"\n message = ctx.message\n message.author = user\n message.content = f\"{prefixes[0]}{''.join(command)}\"\n\n await self.bot.process_commands(message)\n await ctx.message.add_reaction(u\"\\U0001F44C\")\n\n async def on_guild_join(self, guild):\n embed = discord.Embed(colour=3553599, title=\"Guild Joined\")\n embed.add_field(\n name=\"Name:\",\n value=\"\",\n inline=True\n )\n embed.add_field(\n name=\"Members:\",\n value=\"\",\n inline=True\n )\n embed.add_field(\n name=\"Owner:\",\n value=f\"{guild.owner.name}#{guild.owner.discriminator} ({guild.owner.id})\",\n inline=True\n )\n try:\n embed.set_thumbnail(url=guild.icon_url)\n except: pass\n async with aiohttp.ClientSession() as cs:\n webhook = discord.Webhook.from_url(\n url=webhooks[\"guildjoin\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n await webhook.send(embed=embed)\n\n async def on_guild_remove(self, guild):\n embed = discord.Embed(colour=3553599, title=\"Guild Left\")\n embed.add_field(\n name=\"Name:\",\n value=\"\",\n inline=True\n )\n embed.add_field(\n name=\"Members:\",\n value=\"\",\n inline=True\n )\n embed.add_field(\n name=\"Owner:\",\n value=f\"{guild.owner.name}#{guild.owner.discriminator} ({guild.owner.id})\",\n inline=True\n )\n try:\n embed.set_thumbnail(url=guild.icon_url)\n except: pass\n async with aiohttp.ClientSession() as cs:\n webhook = discord.Webhook.from_url(\n url=webhooks[\"guildleave\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n await webhook.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(\n Developer(bot)\n )\n", "id": "2559141", "language": "Python", "matching_score": 3.3926703929901123, "max_stars_count": 2, "path": "modules/developer.py" }, { "content": "import discord\n\nfrom discord.ext import commands\nfrom .utils.paginator import HelpPaginator\n\n\nclass Help:\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(aliases=[\"cmds\", \"cmd\", \"commands\", \"command\", \"helpme\"])\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def help(self, ctx, command: str=None):\n \"\"\"Gives you a list of all of the commands\"\"\"\n if command:\n entity = self.bot.get_cog(command) or self.bot.get_command(command)\n\n if entity is None:\n clean = command.replace('@', '@\\u200b')\n\n return await ctx.send(f\"Command or category \\\"{clean}\\\" not found.\")\n elif isinstance(entity, commands.Command):\n p = await HelpPaginator.from_command(ctx, entity)\n else:\n p = await HelpPaginator.from_cog(ctx, entity)\n\n return await p.paginate()\n\n try:\n embed = discord.Embed(colour=3553599)\n embed.set_author(name=f\"{self.bot.user.name} Commands!\", icon_url=self.bot.user.avatar_url)\n embed.set_footer(text=f\"{len(self.bot.commands)} Total Commands\")\n\n try:\n embed.add_field(\n name=\"Informational\",\n value=\", \".join([f\"`{i.name}`\" for i in self.bot.commands if i.cog_name == \"Informational\" and not i.hidden]),\n inline=False\n )\n except:\n pass\n\n try:\n embed.add_field(\n name=\"Image\",\n value=\", \".join([f\"`{i.name}`\" for i in self.bot.commands if i.cog_name == \"Image\" and not i.hidden]),\n inline=False\n )\n except:\n pass\n\n try:\n embed.add_field(\n name=\"Economy\",\n value=\", \".join([f\"`{i.name}`\" for i in self.bot.commands if i.cog_name == \"Economy\" and not i.hidden]),\n inline=False\n )\n except:\n pass\n\n try:\n if ctx.author.id == 227110473466773504:\n embed.add_field(\n name=\"Developer\",\n value=\", \".join([f\"`{i.name}`\" for i in self.bot.commands if i.cog_name == \"Developer\"]),\n inline=False\n )\n except:\n pass\n\n return await ctx.send(embed=embed)\n except discord.HTTPException:\n return await ctx.send(\":x: I cannot send embeds here!\")\n except:\n pass\n\n\ndef setup(bot):\n bot.remove_command('help')\n bot.add_cog(\n Help(bot)\n )\n", "id": "9824087", "language": "Python", "matching_score": 3.3786792755126953, "max_stars_count": 2, "path": "modules/_help.py" }, { "content": "import discord\nimport aiohttp\nimport math\nimport psutil\n\nfrom discord.ext import commands\nfrom datetime import datetime\nfrom config import webhooks, twitch_clientID\n\n\nclass Informational:\n\n def __init__(self, bot):\n self.bot = bot\n\n def millify(self, n):\n millnames = ['', 'k', 'M', ' Billion']\n n = float(n)\n millidx = max(0, min(len(millnames) - 1, int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3))))\n\n return '{:.0f}{}'.format(n / 10 ** (3 * millidx), millnames[millidx])\n\n def delta(self, time):\n delta = time.days\n years, remainder = divmod(int(delta), 365)\n months, days = divmod(int(remainder), 30.4167)\n\n return f\"{int(years)} years, {int(months)} months, {int(days)} days\"\n\n @commands.command()\n @commands.cooldown(1, 14400, commands.BucketType.guild)\n async def contact(self, ctx, *message):\n \"\"\"Report an error to the developers.\"\"\"\n if ctx.guild.id == 472302438490046494:\n return await ctx.send(\"Please do not use contact in the support server.\")\n\n inv = await ctx.channel.create_invite(\n max_uses=3,\n reason=\"contact command in channel {} by user {}\".format(ctx.channel.name, ctx.author.name)\n )\n\n async with aiohttp.ClientSession() as cs:\n webhook = discord.Webhook.from_url(\n url=webhooks[\"contact\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n\n await webhook.send(\"<@&472304841528573962> Help has been requested in **{}** in the channel **{}** by **{}**.\\n\"\n \"**{}** said: **__\\\"{}\\\"__**\\n\\nHere is the invite: {}\"\n .format(ctx.guild.name, ctx.channel.name, ctx.author.name, ctx.author.name, ' '.join(message), inv))\n await ctx.send(\"Your issue has been sent. Please be patient for support.\\n\\ndiscord.gg/mqzcMca\")\n\n @commands.command()\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def ping(self, ctx):\n \"\"\"Ping... PONG!\"\"\"\n pingmsg = await ctx.send(\"Pinging so fast you won't even see this!!\")\n await pingmsg.edit(content=f\"Ping => {round(self.bot.latency * 1000, 2)} ms\")\n\n @commands.command(aliases=[\"server\", \"sinfo\", \"guildinfo\", \"ginfo\", \"guild\", \"si\", \"gi\"])\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def serverinfo(self, ctx):\n \"\"\"Get information on the server.\"\"\"\n region = {\n \"us-west\": \"US West\",\n \"us-east\": \"US East\",\n \"us-south\": \"US South\",\n \"us-central\": \"US Central\",\n \"eu-west\": \"EU West\",\n \"eu-central\": \"EU Central\",\n \"singapore\": \"Singapore\",\n \"london\": \"London\",\n \"sydney\": \"Sydney\",\n \"amsterdam\": \"Amsterdam\",\n \"frankfurt\": \"Frankfurt\",\n \"brazil\": \"Brazil\",\n \"hongkong\": \"Hongkong\",\n \"russia\": \"Russia\",\n \"vip-us-east\": \"[VIP] US East\",\n \"vip-us-west\": \"[VIP] US West\",\n \"vip-amsterdam\": \"[VIP] Amsterdam\"\n }\n\n verification = {\n 0: \"None\",\n 1: \"1 - Must have a verified email\",\n 2: \"2 - Must also be registered for more than 5 minutes\",\n 3: \"3 - Must also be member of the server for more than 10 minutes\",\n 4: \"4 - Must have a verified phone number\"\n }\n\n mfa = {\n 0: \"Disabled\",\n 1: \"Enabled\"\n }\n\n online = 0\n onlineEmoji = discord.utils.get(ctx.bot.emojis, name=\"UniOnline\")\n\n idle = 0\n idleEmoji = discord.utils.get(ctx.bot.emojis, name=\"UniIdle\")\n\n offline = 0\n offlineEmoji = discord.utils.get(ctx.bot.emojis, name=\"UniOffline\")\n\n dnd = 0\n dndEmoji = discord.utils.get(ctx.bot.emojis, name=\"UniDND\")\n\n for m in ctx.guild.members:\n if m.status == discord.Status.online:\n online = online + 1\n elif m.status == discord.Status.idle:\n idle = idle + 1\n elif m.status == discord.Status.offline:\n offline = offline + 1\n elif m.status == discord.Status.dnd:\n dnd = dnd + 1\n\n embed = discord.Embed(colour=3553599)\n embed.set_author(name=f\"{ctx.guild.name} ({ctx.guild.id})\", icon_url=ctx.guild.icon_url)\n embed.set_thumbnail(url=ctx.guild.icon_url)\n embed.add_field(\n name=\"Owner\",\n value=f\"{ctx.guild.owner.name}#{ctx.guild.owner.discriminator}\",\n inline=False\n )\n embed.add_field(\n name=\"Verification\",\n value=verification[int(ctx.guild.verification_level)],\n inline=False\n )\n embed.add_field(\n name=\"Region\",\n value=region[str(ctx.guild.region)],\n inline=True\n )\n embed.add_field(\n name=\"Two-Factor Authentication\",\n value=mfa[int(ctx.guild.mfa_level)],\n inline=True\n )\n embed.add_field(\n name=f\"{len(ctx.guild.channels)} Channels\",\n value=f\"{len(ctx.guild.text_channels)} Text Channels\\n{len(ctx.guild.voice_channels)} Voice Channels\",\n inline=False\n )\n embed.add_field(\n name=f\"{len(ctx.guild.roles) - 1} Roles\",\n value=\", \".join([str(x) for x in ctx.guild.roles][1:]),\n inline=False\n )\n embed.add_field(\n name=f\"{len(ctx.guild.emojis)} Emojis\",\n value=\" \".join([str(x) for x in ctx.guild.emojis]),\n inline=False\n )\n embed.add_field(\n name=f\"{ctx.guild.member_count} Members\",\n value=f\"{len([m for m in ctx.guild.members if not m.bot])} Humans\\n\"\n f\"{len([m for m in ctx.guild.members if m.bot])} Bots\",\n inline=True\n )\n embed.add_field(\n name=\"Member Status\\'\",\n value=f\"{onlineEmoji} {online}\\n\"\n f\"{idleEmoji} {idle}\\n\"\n f\"{dndEmoji} {dnd}\\n\"\n f\"{offlineEmoji} {offline}\",\n inline=True\n )\n\n await ctx.send(embed=embed)\n\n @commands.command(aliases=[\"uinfo\", \"user\", \"ui\"])\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def userinfo(self, ctx, user: discord.Member=None):\n \"\"\"Shows information on a user or yourself.\"\"\"\n if user is None:\n user = ctx.author\n\n try:\n playing = user.activity.name\n except:\n playing = \"None\"\n\n Status = {\n \"online\": \"Online\",\n \"idle\": \"Idle\",\n \"dnd\": \"Do Not Disturb\",\n \"offline\": \"Offline/Invisible\"\n }\n\n if user.roles[1:]:\n roles = \", \".join(sorted([x.name for x in user.roles][1:], key=[x.name for x in ctx.guild.roles[::-1][:-1]].index))\n else:\n roles = \"No Roles\"\n\n embed = discord.Embed(colour=3553599)\n embed.set_author(name=f\"{user} ({user.id})\", icon_url=user.avatar_url)\n embed.set_thumbnail(url=user.avatar_url)\n embed.add_field(\n name=\"Nickname\",\n value=user.nick if user.nick else \"None\",\n inline=True\n )\n embed.add_field(\n name=\"Is Bot?\",\n value=str(user.bot),\n inline=True\n )\n embed.add_field(\n name=\"Status\",\n value=Status[str(user.status)],\n inline=True\n )\n embed.add_field(\n name=\"Playing\",\n value=playing,\n inline=True\n )\n embed.add_field(\n name=\"Mutual Servers\",\n value=str(len(list(filter(lambda u: u.id == user.id, self.bot.get_all_members())))),\n inline=True\n )\n embed.add_field(\n name=\"Joined at\",\n value=f\"{user.joined_at.strftime('%-I:%M %p %d/%m/%Y')}\\n{self.delta(datetime.utcnow() - user.joined_at)} ago\",\n inline=False\n )\n embed.add_field(\n name=\"Created at\",\n value=f\"{user.created_at.strftime('%-I:%M %p %d/%m/%Y')}\\n{self.delta(datetime.utcnow() - user.created_at)} ago\",\n inline=False\n )\n embed.add_field(\n name=f\"{len(user.roles[1:])} Roles\",\n value=roles,\n inline=False\n )\n\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def avatar(self, ctx, user: discord.Member=None):\n \"\"\"Sends avatar of the user mentioned or you.\"\"\"\n if user is None:\n user = ctx.author\n if user.avatar_url is None:\n return await ctx.send(\"User has no avatar.\")\n embed = discord.Embed(title=f\"{user.name}'s avatar:\", colour=3553599)\n embed.set_image(url=user.avatar_url)\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def invite(self, ctx):\n \"\"\"Sends the bot's invite and an invite to the support server.\"\"\"\n if ctx.guild.id == 472302438490046494:\n return await ctx.send(f\"<https://discordapp.com/oauth2/authorize?client_id={self.bot.user.id}&scope=bot&permissions=0>\\n\"\n f\"\\nHere is my invite, make sure to give it the right permissions for what you need to use it for \"\n f\"or else it may not work properly. :heart:\")\n else:\n return await ctx.send(f\"<https://discordapp.com/oauth2/authorize?client_id={self.bot.user.id}&scope=bot&permissions=0>\\n\"\n f\"\\nHere is my invite, make sure to give it the right permissions or else it may not work properly. \"\n f\":heart:\\nAlso here's the support server if you have any questions or suggestions: \"\n f\"https://discord.gg/mqzcMca\")\n\n @commands.command()\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def twitch(self, ctx, account):\n \"\"\"Returns information on the Twitch account you want information on.\"\"\"\n try:\n async with aiohttp.ClientSession() as cs:\n async with cs.get(f\"https://api.twitch.tv/kraken/channels/{account}?client_id={twitch_clientID}\") as r:\n res = await r.json()\n creationdate = datetime.strptime(str(res['created_at']).split('.')[0], \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%m-%d-%Y\")\n\n embed = discord.Embed(colour=3553599)\n embed.set_author(name=res[\"display_name\"], icon_url=\"https://i.imgur.com/OQwQ8z0.jpg\", url=res[\"url\"])\n embed.add_field(\n name=\"Account ID\",\n value=res[\"_id\"],\n inline=True\n )\n embed.add_field(\n name=\"\\u200B\",\n value=\"\\u200B\",\n inline=True\n )\n embed.add_field(\n name=\"Followers\",\n value=res[\"followers\"],\n inline=True\n )\n embed.add_field(\n name=\"Created On\",\n value=creationdate,\n inline=True\n )\n embed.add_field(\n name=\"\\u200B\",\n value=\"\\u200B\",\n inline=True\n )\n embed.add_field(\n name=\"Channel Views\",\n value=res[\"views\"],\n inline=True\n )\n\n await ctx.send(embed=embed)\n except Exception:\n await ctx.send(f\"Unable to find account: {account}. Are you sure you spelt it correctly?\")\n\n @commands.command(aliases=[\"botstats\"])\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def stats(self, ctx):\n \"\"\"Gives you stats on the bot.\"\"\"\n memory = psutil.virtual_memory().total >> 20\n mem_usage = psutil.virtual_memory().used >> 20\n\n embed = discord.Embed(colour=3553599)\n embed.set_author(name=f\"{self.bot.user.name}'s Statistics\", icon_url=self.bot.user.avatar_url)\n embed.add_field(\n name=\"Server Count\",\n value=f\"{self.millify(len(self.bot.guilds))} ({str(len(self.bot.guilds))})\"\n )\n embed.add_field(\n name=\"User Count\",\n value=f\"{self.millify(len(self.bot.users))} ({str(len(self.bot.users))})\"\n )\n embed.add_field(\n name=\"Shard Count\",\n value=self.bot.shard_count\n )\n try:\n embed.add_field(\n name=\"Most Used Command\",\n value=self.bot.command_usage.most_common(1)[0][0],\n inline=False\n )\n except:\n embed.add_field(\n name=\"Most Used Command\",\n value=\"stats\",\n inline=False\n )\n embed.add_field(\n name=\"RAM Usage\",\n value=f\"{mem_usage}/{memory} MB ({int(memory - mem_usage)} MB free)\",\n inline=False\n )\n embed.add_field(\n name=\"Uptime\",\n value=self.bot.bot_uptime(),\n inline=False\n )\n\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(\n Informational(bot)\n )\n", "id": "1328317", "language": "Python", "matching_score": 4.257155418395996, "max_stars_count": 2, "path": "modules/informational.py" }, { "content": "import os\nimport discord\nimport aiohttp\nimport random\nimport time\nimport rethinkdb as r\n\nfrom discord.ext import commands\nfrom collections import Counter\nfrom datetime import datetime\nfrom pyfiglet import Figlet\nfrom config import database, prefixes, token, webhooks\n\n\ndef _prefixes(bot, msg):\n return commands.when_mentioned_or(*prefixes)(bot, msg)\n\n\nclass UniversalBot(commands.AutoShardedBot):\n\n def __init__(self):\n super().__init__(\n command_prefix=_prefixes,\n description=\"bad bot\",\n status=discord.Status.dnd,\n activity=discord.Game(name=\"Starting up...\"),\n pm_help=False,\n help_attrs={\n \"hidden\": True\n }\n )\n\n self._last_exception = None\n self.counter = Counter()\n self.command_usage = Counter()\n\n async def _init_rethink():\n r.set_loop_type(\"asyncio\")\n self.r_conn = await r.connect(\n host=database[\"host\"],\n port=database[\"port\"],\n db=database[\"db\"],\n user=database[\"user\"],\n password=database[\"password\"]\n )\n\n self.loop.create_task(_init_rethink())\n\n for file in os.listdir(\"modules\"):\n if file.endswith(\".py\"):\n name = file[:-3]\n try:\n self.load_extension(f\"modules.{name}\")\n except Exception as e:\n print(f\"Failed to load {name}: {e}\")\n\n async def on_command_error(self, context, exception):\n if isinstance(exception, commands.CommandNotFound):\n return\n\n async def on_command(self, ctx):\n try:\n if ctx.author.id not in [<PASSWORD>, 302523498226647041]:\n self.command_usage[str(ctx.command)] += 1\n except:\n pass\n try:\n if ctx.author.id not in [<PASSWORD>, 302523498226647041]:\n async with aiohttp.ClientSession() as cs:\n webhook = discord.Webhook.from_url(\n url=webhooks[\"command\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n await webhook.send(f\"[`{datetime.utcnow().strftime('%m-%d-%Y %H:%M:%S')}`] [`{ctx.guild.name} \"\n f\"({ctx.guild.id})`] User **{ctx.author.name}#{ctx.author.discriminator} ({ctx.author.id})** \"\n f\"ran the command **{ctx.command.name}**.\")\n except Exception as e:\n async with aiohttp.ClientSession() as cs:\n webhook = discord.Webhook.from_url(\n url=webhooks[\"command\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n await webhook.send(f\"Command Logger Failed:\\n`{type(e).__name__}`\\n```py\\n{e}\\n```\")\n\n async def send_cmd_help(self, ctx):\n if ctx.invoked_subcommand:\n pages = await self.formatter.format_help_for(ctx, ctx.invoked_subcommand)\n for page in pages:\n await ctx.send(page)\n else:\n pages = await self.formatter.format_help_for(ctx, ctx.command)\n for page in pages:\n await ctx.send(page)\n\n async def __level_handler(self, message):\n if not isinstance(message.channel, discord.TextChannel):\n return\n if message.content == \"\" or not len(message.content) > 5:\n return\n\n if random.randint(1, 10) == 1:\n author = message.author\n level_system = await r.table(\"levelSystem\").get(str(author.id)).run(self.r_conn)\n guildXP = await r.table(\"guildXP\").get(str(author.id)).run(self.r_conn)\n\n if not guildXP or not guildXP.get(str(message.author.id)):\n data = {\n str(message.author.id): {\n \"lastxp\": str(int(time.time())),\n \"xp\": 0\n }\n }\n\n if not guildXP:\n data[\"id\"] = str(message.guild.id)\n\n return await r.table(\"guildXP\").get(str(message.guild.id)).update(data).run(self.r_conn)\n\n if (int(time.time()) - int(guildXP.get(str(message.author.id))[\"lastxp\"])) >= 120:\n xp = guildXP.get(str(message.author.id))[\"xp\"] + random.randint(10, 40)\n data = {\n str(message.author.id): {\n \"xp\": xp,\n \"lastxp\": str(int(time.time()))\n }\n }\n\n await r.table(\"guildXP\").get(str(message.guild.id)).update(data).run(self.r_conn)\n\n if not level_system:\n data = {\n \"id\": str(author.id),\n \"xp\": 0,\n \"lastxp\": \"0\",\n \"blacklisted\": False,\n \"lastxptimes\": []\n }\n\n return await r.table(\"levelSystem\").insert(data).run(self.r_conn)\n\n if level_system.get(\"blacklisted\", False):\n return\n\n if (int(time.time()) - int(level_system[\"lastxp\"])) >= 120:\n lastxptimes = level_system[\"lastxptimes\"]\n lastxptimes.append(str(int(time.time())))\n\n xp = level_system[\"xp\"] + random.randint(10, 40)\n data = {\n \"xp\": xp,\n \"lastxp\": str(int(time.time())),\n \"lastxptimes\": lastxptimes\n }\n\n await r.table(\"levelSystem\").get(str(author.id)).update(data).run(self.r_conn)\n\n async def on_message(self, message):\n self.counter[\"messages_read\"] += 1\n\n if message.author.bot:\n return\n\n await self.process_commands(message)\n await self.__level_handler(message)\n\n async def close(self):\n self.r_conn.close()\n # self.redis.close()\n await super().close()\n\n async def on_shard_ready(self, shard_id):\n print(f\"Shard {shard_id} Connected.\")\n\n async def on_ready(self):\n if not hasattr(self, \"uptime\"):\n self.uptime = datetime.utcnow()\n\n print(Figlet().renderText(\"UniversalBot\"))\n print(f\"Shards: {self.shard_count}\")\n print(f\"Servers: {len(self.guilds)}\")\n print(f\"Users: {len(set(self.get_all_members()))}\")\n await self.change_presence(\n status=discord.Status.online,\n activity=discord.Game(f\"{prefixes[0]}help | {self.shard_count} Shards\")\n )\n\n def bot_uptime(self):\n now = datetime.utcnow()\n delta = now - self.uptime\n hours, remainder = divmod(int(delta.total_seconds()), 3600)\n minutes, seconds = divmod(remainder, 60)\n days, hours = divmod(hours, 24)\n fmt = \"{h} hours, {m} minutes, and {s} seconds\"\n\n if days:\n fmt = \"{d} days, \" + fmt\n\n return fmt.format(d=days, h=hours, m=minutes, s=seconds)\n\n def run(self):\n super().run(token)\n\n\nif __name__ == \"__main__\":\n UniversalBot().run()\n", "id": "5764198", "language": "Python", "matching_score": 4.477137565612793, "max_stars_count": 2, "path": "bot.py" }, { "content": "import discord\nimport aiohttp\nimport traceback\n\nfrom discord.ext import commands\nfrom config import webhooks\n\n\nclass ErrorHandler:\n\n def __init__(self, bot):\n self.bot = bot\n\n async def send_cmd_help(self, ctx):\n if ctx.invoked_subcommand:\n pages = await self.bot.formatter.format_help_for(ctx, ctx.invoked_subcommand)\n for page in pages:\n await ctx.send(page)\n else:\n pages = await self.bot.formatter.format_help_for(ctx, ctx.command)\n for page in pages:\n await ctx.send(page)\n\n async def on_command_error(self, ctx, exception):\n error = getattr(exception, \"original\", exception)\n\n if isinstance(error, discord.NotFound):\n return\n elif isinstance(error, discord.Forbidden):\n return\n elif isinstance(error, discord.HTTPException) or isinstance(error, aiohttp.ClientConnectionError):\n log = f\"HTTPException or ClientConnectionError in command {ctx.command.qualified_name}\\n\"\n log += \"\".join(traceback.format_exception(type(exception), exception, exception.__traceback__))\n self.bot._last_exception = log\n\n async with aiohttp.ClientSession() as cs:\n webhook = discord.Webhook.from_url(\n url=webhooks[\"error\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n embed = discord.Embed(colour=discord.Colour.red())\n embed.title = f\"Error in command {ctx.command.qualified_name}\"\n embed.description = \"HTTPException\"\n\n await webhook.send(embed=embed)\n if isinstance(exception, commands.NoPrivateMessage):\n return\n elif isinstance(exception, commands.DisabledCommand):\n await ctx.send(f\"{ctx.command.qualified_name} is currently disabled. Please try again at a later time.\")\n elif isinstance(exception, commands.CommandInvokeError):\n log = f\"HTTPException or ClientConnectionError in command {ctx.command.qualified_name}\\n\"\n log += \"\".join(traceback.format_exception(type(exception), exception, exception.__traceback__))\n self.bot._last_exception = log\n\n embed = discord.Embed(\n colour=discord.Colour.red(),\n title=f\"Error in command: **{ctx.command.qualified_name}**\",\n url=\"https://discord.gg/mqzcMca\",\n description=f\"If this error continues please tell the developers in the [Support Server](https://discord.gg/mqzcMca).\"\n f\"\\n```py\\n{exception}\\n```\"\n )\n await ctx.send(embed=embed)\n\n async with aiohttp.ClientSession() as cs:\n embed = discord.Embed(\n colour=discord.Colour.red(),\n title=f\"Error in Command: **{ctx.command.qualified_name}**\",\n description=f\"Error created by user: `{ctx.author.name}#{ctx.author.discriminator} ({ctx.author.id})`\"\n f\"\\n```py\\n{exception}\\n```\"\n )\n webhook = discord.Webhook.from_url(\n url=webhooks[\"error\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n await webhook.send(embed=embed)\n elif isinstance(exception, commands.BadArgument):\n await self.send_cmd_help(ctx)\n elif isinstance(exception, commands.MissingRequiredArgument):\n await self.send_cmd_help(ctx)\n elif isinstance(exception, commands.CheckFailure):\n await ctx.send(\"You are not allowed to use that command.\")\n elif isinstance(exception, commands.CommandOnCooldown):\n await ctx.send(':x: Command is on cooldown... {:.2f} seconds left.'.format(exception.retry_after), delete_after=10)\n elif isinstance(exception, commands.NotOwner):\n await ctx.send(\":x: This command is for the owner of the bot, so please do not try to use it again.\")\n elif isinstance(exception, commands.BotMissingPermissions):\n await ctx.send(\"The bot is missing the permissions needed for this command.\")\n elif isinstance(exception, commands.CommandNotFound):\n return\n else:\n return\n\n\ndef setup(bot):\n bot.add_cog(\n ErrorHandler(bot)\n )\n", "id": "9286298", "language": "Python", "matching_score": 2.0779964923858643, "max_stars_count": 2, "path": "modules/error_handler.py" }, { "content": "import asyncio\nimport aiohttp\nimport discord\nfrom config import botlists, webhooks\n\n\nclass BotList:\n\n def __init__(self, bot):\n self.bot = bot\n self.guildcount = len(bot.guilds)\n self.botid = bot.user.id\n\n async def updatebotlists(self):\n while True:\n async with aiohttp.ClientSession() as cs:\n webhook = discord.Webhook.from_url(\n url=webhooks[\"botlist\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n\n await webhook.send(f\"Attempting to push guild count of {len(self.bot.guilds)} to each botlist.\")\n\n # discordbotlabs.com is possibly shutting down February 1\n # I'm commenting this out then deleting it if they do shut down\n\n # try:\n # url = f\"https://discordbotlabs.com/api/\" # discordbotlabs.com does not have its api finished yet\n # payload = {\n # \"shard_id\": self.bot.shard_count,\n # \"guilds\": int(self.guildcount)\n # }\n # headers = {\n # \"Authorization\": \"Bot \" + botlists[\"discordbotlabscom\"],\n # \"Content-Type\": \"application/json\"\n # }\n #\n # async with aiohttp.ClientSession() as cs:\n # await cs.post(\n # url=url,\n # json=payload,\n # headers=headers\n # )\n # webhook = discord.Webhook.from_url(\n # url=webhooks[\"botlist\"],\n # adapter=discord.AsyncWebhookAdapter(cs)\n # )\n # await webhook.send(f\"Posted guild count of {self.guildcount} for botlist discordbotlabs.com\")\n # except Exception as e:\n # async with aiohttp.ClientSession() as cs:\n # webhook = discord.Webhook.from_url(\n # url=webhooks[\"botlist\"],\n # adapter=discord.AsyncWebhookAdapter(cs)\n # )\n # webhook.send(f\"Failed to post guild count to discordbotlabs.com\\n`{type(e).__name__}`\\n```py\\n{e}\\n```\")\n\n try:\n url = f\"https://discordbotlist.com/api/bots/{self.botid}/stats\"\n payload = {\n \"guilds\": int(self.guildcount),\n \"users\": len(set(self.bot.get_all_members()))\n }\n headers = {\n \"Authorization\": botlists[\"discordbotlistcom\"],\n \"Content-Type\": \"application/json\"\n }\n\n async with aiohttp.ClientSession() as cs:\n await cs.post(\n url=url,\n json=payload,\n headers=headers\n )\n webhook = discord.Webhook.from_url(\n url=webhooks[\"botlist\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n await webhook.send(f\"Posted guild count of {self.guildcount} for botlist discordbotlist.com\")\n except Exception as e:\n async with aiohttp.ClientSession() as cs:\n webhook = discord.Webhook.from_url(\n url=webhooks[\"botlist\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n webhook.send(f\"Failed to post guild count to discordbotlist.com\\n`{type(e).__name__}`\\n```py\\n{e}\\n```\")\n\n try:\n url = f\"https://discord.boats/api/bot/{self.bot.id}\"\n payload = {\n \"guilds\": int(self.guildcount)\n }\n headers = {\n \"Authorization\": botlists[\"discordboats\"],\n \"Content-Type\": \"application/json\"\n }\n\n async with aiohttp.ClientSession() as cs:\n await cs.post(\n url=url,\n json=payload,\n headers=headers\n )\n webhook = discord.Webhook.from_url(\n url=webhooks[\"botlist\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n await webhook.send(f\"Posted guild count of {self.guildcount} for botlist discord.boats\")\n except Exception as e:\n async with aiohttp.ClientSession() as cs:\n webhook = discord.Webhook.from_url(\n url=webhooks[\"botlist\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n webhook.send(f\"Failed to post guild count to discord.boats\\n`{type(e).__name__}`\\n```py\\n{e}\\n```\")\n\n try:\n url = f\"https://discordbots.org/api/bots/{self.botid}/stats\"\n payload = {\n \"server_count\": int(self.guildcount),\n \"shard_count\": self.bot.shard_count\n }\n headers = {\n \"Authorization\": botlists[\"discordbotsorg\"]\n }\n\n async with aiohttp.ClientSession() as cs:\n await cs.post(\n url=url,\n json=payload,\n headers=headers\n )\n webhook = discord.Webhook.from_url(\n url=webhooks[\"botlist\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n await webhook.send(f\"Posted guild count of {self.guildcount} for botlist discordbots.org\")\n except Exception as e:\n async with aiohttp.ClientSession() as cs:\n webhook = discord.Webhook.from_url(\n url=webhooks[\"botlist\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n webhook.send(f\"Failed to post guild count to discordbots.org\\n`{type(e).__name__}`\\n```py\\n{e}\\n```\")\n\n try:\n url = f\"https://bots.discord.pw/api/bots/{self.botid}/stats\"\n payload = {\n \"server_count\": int(self.guildcount),\n \"shard_count\": self.bot.shard_count\n }\n headers = {\n \"Authorization\": botlists[\"botsdiscordpw\"]\n }\n\n async with aiohttp.ClientSession() as cs:\n await cs.post(\n url=url,\n json=payload,\n headers=headers\n )\n webhook = discord.Webhook.from_url(\n url=webhooks[\"botlist\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n await webhook.send(f\"Posted guild count of {self.guildcount} for botlist bots.discord.pw\")\n except Exception as e:\n async with aiohttp.ClientSession() as cs:\n webhook = discord.Webhook.from_url(\n url=webhooks[\"botlist\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n webhook.send(f\"Failed to post guild count to bots.discord.pw\\n`{type(e).__name__}`\\n```py\\n{e}\\n```\")\n\n try:\n url = f\"https://botlist.space/api/bots/{self.botid}\"\n payload = {\n \"server_count\": int(self.guildcount)\n }\n headers = {\n \"Authorization\": botlists[\"botlistspace\"],\n \"Content-Type\": \"application/json\"\n }\n\n async with aiohttp.ClientSession() as cs:\n await cs.post(\n url=url,\n json=payload,\n headers=headers\n )\n webhook = discord.Webhook.from_url(\n url=webhooks[\"botlist\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n await webhook.send(f\"Posted guild count of {self.guildcount} for botlist botlist.space\")\n except Exception as e:\n async with aiohttp.ClientSession() as cs:\n webhook = discord.Webhook.from_url(\n url=webhooks[\"botlist\"],\n adapter=discord.AsyncWebhookAdapter(cs)\n )\n webhook.send(f\"Failed to post guild count to botlist.space\\n`{type(e).__name__}`\\n```py\\n{e}\\n```\")\n\n await asyncio.sleep(43200)\n\n # async def on_ready(self):\n # await self.updatebotlists()\n\n\ndef setup(bot):\n bot.add_cog(\n BotList(bot)\n )\n", "id": "3864011", "language": "Python", "matching_score": 2.2810375690460205, "max_stars_count": 2, "path": "modules/botlist.py" }, { "content": "import aiohttp\nfrom json import dumps\n\n\nasync def post(content):\n async with aiohttp.ClientSession() as session:\n async with session.post(\"http://server.cwelch.me/documents\", data=str(content).encode('utf-8')) as response:\n res = await response.json()\n return f\"http://server.cwelch.me/{res['key']}\"\n\n\nasync def formatted_post(content):\n async with aiohttp.ClientSession() as session:\n async with session.post(\"http://server.cwelch.me/documents\", data=str(dumps(content, indent=4)).encode('utf-8')) as response:\n res = await response.json()\n return f\"http://server.cwelch.me/{res['key']}\"\n", "id": "3727981", "language": "Python", "matching_score": 0.44681575894355774, "max_stars_count": 2, "path": "modules/utils/hastebin.py" } ]
2.179517
tongserenity
[ { "content": "\"\"\"\r\n2017 - 4 - 10 neko34\r\n这个模块主要是获取数据并且给出接口传输到Main中\r\n具体的实现还没有写\r\n\"\"\"\r\n\r\n\r\ndef getData():\r\n data = open(\"SampleData.txt\", \"r\").read()\r\n", "id": "4845756", "language": "Python", "matching_score": 1.676189661026001, "max_stars_count": 3, "path": "GUIdesign/DeriveData.py" }, { "content": "import tkinter as tk\r\nimport GUIdesign.DeriveData as GetData\r\n\r\n\"\"\"\r\n2017 - 4 - 10 neko34\r\n先将我们的界面设计出来\r\nDeriveData是为了这个界面\r\n\"\"\"\r\n# 实现GUI程序\r\n\r\ntop = tk.TK()\r\n\r\ndata = GetData.getData()\r\n\r\n", "id": "3952874", "language": "Python", "matching_score": 0.8075336217880249, "max_stars_count": 3, "path": "GUIdesign/Main.py" }, { "content": "\"\"\"\r\n2017 - 4 - 10 neko34\r\n将获得的写到数据库中,目前还是暂时写到SampleData中\r\n\"\"\"\r\n\r\nimport WebAPI.GetAPIData as gad\r\n\r\n# 取PM2.5的网址\r\npm2_5Url = \"\"\r\n\r\n# 从GetAPIData模块中获取urlString\r\ndef getData(urlString):\r\n htmlAns = gad.openUrl(urlString)\r\n\r\n return htmlAns\r\n\r\n# 写入数据存储中,现在目前是SampleData.txt\r\ndef writeToDB(dataToStore, addrStore):\r\n myfile = open(addrStore, \"w+\")\r\n myfile.write(dataToStore)", "id": "8230030", "language": "Python", "matching_score": 2.2803633213043213, "max_stars_count": 3, "path": "WebAPI/WriteToDB.py" }, { "content": "import urllib.request as ur\r\nimport urllib\r\n\"\"\"\r\n2017 - 4 - 10 neko34\r\n从网络中获取对应的数据,调用对应的API\r\n\"\"\"\r\n\r\n\r\ndef openUrl(urlString):\r\n html = ur.urlopen(urlString).read()\r\n\r\n return html\r\n\r\n", "id": "12749781", "language": "Python", "matching_score": 2.0805320739746094, "max_stars_count": 3, "path": "WebAPI/GetAPIData.py" } ]
1.878361
CasparovJR
[ { "content": "import statistics as s\ncrabList = [int(x) for x in open(\"Chal07.txt\", 'r').read().strip().split(',')]\nmean = int(s.mean(crabList))\nfuel = sum(abs(x-mean) * (abs(x-mean)+1)//2 for x in crabList)\nprint(fuel)", "id": "1213834", "language": "Python", "matching_score": 2.1324872970581055, "max_stars_count": 0, "path": "Chal07/Chal07Part2.py" }, { "content": "import statistics as s\nimport math\ndef sumCalc(mean): return sum(abs(x-mean) * (abs(x-mean)+1)//2 for x in crabList)\ncrabList = [int(x) for x in open(\"Chal07.txt\", 'r').read().strip().split(',')]\nprint(min(sumCalc(math.floor(s.mean(crabList))), sumCalc(math.ceil(s.mean(crabList)))))", "id": "4404932", "language": "Python", "matching_score": 2.3786873817443848, "max_stars_count": 0, "path": "Chal07/Chal07Part2(reliable).py" }, { "content": "import statistics as s\ncrabList = [int(x) for x in open(\"Chal07.txt\", 'r').read().strip().split(',')]\nmedian = s.median(crabList)\nfuel = sum(abs(median - pos) for pos in crabList)\nprint(int(fuel))", "id": "2724422", "language": "Python", "matching_score": 0.838320791721344, "max_stars_count": 0, "path": "Chal07/Chal07Part1.py" }, { "content": "inp = [[x for x in line] for line in open('Chal10.txt').read().strip().split()]\n\nscores = {')': 1, ']': 2, '}': 3, '>': 4}\ndic2 = {'(': ')', '[': ']', '{': '}', '<': '>'}\ndic = {')': '(', ']': '[', '}': '{', '>': '<'}\nscoreList = []\n\nfor i in inp:\n lst = []\n flag = False\n s = ''\n score = 0\n for j in i:\n lst.append(j)\n if j in scores.keys() and dic[j] == lst[-2]:\n lst.pop()\n lst.pop()\n elif j in scores.keys() and dic[j] != lst[-2]:\n flag = True\n break # discard corrupted\n\n if not flag:\n for i in lst:\n s += dic2[i]\n\n for i in list(reversed(s)):\n score = (score * 5) + scores[i]\n\n scoreList.append(score)\n\n\nprint(sorted(scoreList)[int((len(scoreList)-1)/2)])", "id": "5240193", "language": "Python", "matching_score": 1.9670192003250122, "max_stars_count": 0, "path": "Chal10/Chal10Part2.py" }, { "content": "inp = [[x for x in line] for line in open('Chal10.txt').read().strip().split()]\n\nscores = {')': 3, ']': 57, '}': 1197, '>': 25137}\ndic = {')': '(', ']': '[', '}': '{', '>': '<'}\nlst = []\ns = 0\nfor i in inp:\n for j in i:\n lst.append(j)\n if j in scores.keys() and dic[j] == lst[-2]:\n lst.pop()\n lst.pop()\n elif j in scores.keys() and dic[j] != lst[-2]:\n s += scores[j]\n break # find first one\nprint(s)", "id": "1151607", "language": "Python", "matching_score": 0.9021334052085876, "max_stars_count": 0, "path": "Chal10/Chal10Part1.py" }, { "content": "inp = [x.split('-') for x in open('Chal12.txt').read().strip().split()]\ndic = {}\nfor i in [x.split('-') for x in open('Chal12.txt').read().strip().split()]:\n dic[i[0]] = [] if i[0] not in dic.keys() else dic[i[0]]\n dic[i[1]] = [] if i[1] not in dic.keys() else dic[i[1]]\n dic[i[0]].append(i[1])\n dic[i[1]].append(i[0])\n\ndef DFS(curPos, dest, visited, path, allPaths):\n path = list(path)\n index = list(dic.keys()).index(curPos)\n\n if curPos.islower():\n visited[index] = True\n\n path.append(curPos)\n\n if curPos == dest:\n allPaths.append(path)\n else:\n for i in dic[curPos]:\n index2 = list(dic.keys()).index(i)\n if visited[index2] == False:\n DFS(i, dest, visited, path, allPaths)\n\n path.pop()\n visited[index] = False\n\nallPaths = []\nDFS('start', 'end', [False]*len(dic.keys()), [], allPaths)\nprint(len(allPaths))\n", "id": "1632963", "language": "Python", "matching_score": 1.41617751121521, "max_stars_count": 0, "path": "Chal12/Chal12Part1.py" }, { "content": "import pprint\n\ninp = [[int(x) for x in line] for line in open('Chal11.txt').read().strip().split()]\nstep = 0\nflashes = 0\n\ndef dfs(i, j, inp, flashes, visited):\n for k in [(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (-1, -1), (-1, 1), (1, -1)]:\n if (0 <= i+k[0] < len(inp) and 0 <= j+k[1] < len(inp[0])) and [i+k[0], j+k[1]] not in visited:\n inp[i+k[0]][j+k[1]] += 1\n\n if inp[i+k[0]][j+k[1]] > 9:\n visited.append([i+k[0], j+k[1]])\n inp, flashes, visited = dfs(i+k[0], j+k[1], inp, flashes, visited)\n inp[i+k[0]][j+k[1]] = 0\n flashes += 1\n\n return inp, flashes, visited\n\nwhile step < 100:\n visited = []\n #INITIAL\n for i in range(len(inp)):\n for j in range(len(inp[i])):\n inp[i][j] += 1\n\n #DFS\n for i in range(len(inp)):\n for j in range(len(inp[i])):\n if inp[i][j] > 9:\n visited.append([i, j])\n inp, flashes, visited = dfs(i, j, inp, flashes, visited)\n inp[i][j] = 0\n flashes += 1\n\n #CLEAN-UP\n for i in range(len(inp)):\n for j in range(len(inp[i])): \n if inp[i][j] > 9:\n inp[i][j] = 0\n\n step += 1\n \nprint(flashes)\n", "id": "6318209", "language": "Python", "matching_score": 1.2851536273956299, "max_stars_count": 0, "path": "Chal11/Chal11Part1.py" }, { "content": "from functools import reduce\ninp = [[int(x) for x in line] for line in open('Chal09.txt').read().strip().split()]\nbasins = []\n\ndef dfs(inp, i, j, s, visited):\n for k in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n if (0 <= i+k[0] < len(inp) and 0 <= j+k[1] < len(inp[0])):\n if int(inp[i+k[0]][j+k[1]]) != 9 and [i+k[0], j+k[1]] not in visited:\n visited.append([i+k[0], j+k[1]])\n s = dfs(inp, i+k[0], j+k[1], s+1, visited)\n return s\n\n\nfor i in range(len(inp)):\n for j in range(len(inp[i])):\n flag = False\n for k in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n if (0 <= i+k[0] < len(inp) and 0 <= j+k[1] < len(inp[0])):\n if inp[i][j] >= inp[i+k[0]][j+k[1]]:\n flag = True\n break\n\n if not flag:\n basinSize = dfs(inp, i, j, 0, [])\n basins.append(basinSize)\n\nbasins.sort()\nprint(reduce(lambda x, y: x*y, basins[-3:]))", "id": "11191934", "language": "Python", "matching_score": 1.3053929805755615, "max_stars_count": 0, "path": "Chal09/Chal09Part2.py" }, { "content": "inp = [[int(x) for x in line] for line in open('Chal09.txt').read().strip().split()]\ns = 0\nfor i in range(len(inp)):\n for j in range(len(inp[i])):\n flag = False\n for k in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n if (0 <= i+k[0] < len(inp) and 0 <= j+k[1] < len(inp[0])):\n if inp[i][j] >= inp[i+k[0]][j+k[1]]:\n flag = True\n break\n\n if not flag:\n s += 1+int(inp[i][j])\nprint(s)", "id": "8388809", "language": "Python", "matching_score": 0.8975435495376587, "max_stars_count": 0, "path": "Chal09/Chal09Part1.py" }, { "content": "# Credit Leijurv and u/Waste_Willingness723 for the idea of a set\ninp = set()\nwith open('Chal13.txt', 'r') as f:\n for i in f.readlines():\n if i != '\\n':\n try:\n i = i.split(',')\n inp.add((int(i[0]), int(i[1])))\n except:\n i = i[0].split(' ')[2].split('=')\n if i[0] == 'x':\n for j in list(inp):\n if int(j[0]) > int(i[1]):\n dif = int(j[0]) - int(i[1])\n newCoord = int(i[1]) - dif\n inp.remove(j)\n inp.add((newCoord, j[1]))\n\n elif i[0] == 'y':\n for j in list(inp):\n if int(j[1]) > int(i[1]):\n dif = int(j[1]) - int(i[1])\n newCoord = int(i[1]) - dif\n inp.remove(j)\n inp.add((j[0], newCoord))\n\ninp = list(inp)\ninp.sort()\n\npaper = [['.' for _ in range(inp[-1][0]+1)] for _ in range(inp[-1][1]+1)]\nfor i in inp:\n paper[i[1]][i[0]] = '#'\nfor i in paper:\n print(*i)\n", "id": "8300424", "language": "Python", "matching_score": 2.194485664367676, "max_stars_count": 0, "path": "Chal13/improved.py" }, { "content": "import itertools\ninp = []\nwith open('Chal13.txt', 'r') as f:\n for i in f.readlines():\n if i != '\\n':\n try:\n i = i.split(',')\n inp.append([int(i[0]), int(i[1])])\n except:\n i = i[0].split(' ')[2].split('=')\n if i[0] == 'x':\n for j in range(len(inp)):\n if int(inp[j][0]) > int(i[1]):\n dif = int(inp[j][0]) - int(i[1])\n newCoord = int(i[1]) - dif\n inp[j][0] = newCoord\n\n break # just the first fold, so break afterwards\n\n inp.sort()\n print(len(list(k for k,_ in itertools.groupby(inp))))", "id": "403705", "language": "Python", "matching_score": 0.6483895182609558, "max_stars_count": 0, "path": "Chal13/Chal13Part1.py" }, { "content": "ventDiagram = []\nlargestX = 0\nlargestY = 0\n\nwith open(\"Chal05.txt\", 'r') as f:\n for i in f.readlines():\n coords = i.strip().split(' -> ')\n coords = [coords[0].split(','), coords[1].split(',')]\n #coords = [j for i in coords for j in i]\n maximumX = max(list(map(lambda x: int(x[0]), coords)))\n maximumY = max(list(map(lambda x: int(x[1]), coords)))\n\n if maximumX >= largestX:\n largestX = maximumX\n if maximumY >= largestY:\n largestY = maximumY\n\n ventDiagram = [[0 for _ in range(largestX+1)] for _ in range(largestY+1)]\n\nwith open(\"Chal05.txt\", 'r') as f:\n overlap = 0\n for i in f.readlines():\n coords = i.strip().split(' -> ')\n coords = [coords[0].split(','), coords[1].split(',')]\n x = list(map(lambda k: int(k[0]), coords))\n y = list(map(lambda k: int(k[1]), coords))\n minX, maxX = min(x[0],x[1]), max(x[0],x[1])\n minY, maxY = min(y[0],y[1]), max(y[0],y[1])\n \n dx = 1 if x[0]-x[1] < 0 else -1 if x[0]-x[1] > 0 else 0\n dy = 1 if y[0]-y[1] < 0 else -1 if y[0]-y[1] > 0 else 0\n\n # generate points in the line\n while x[0] != x[1] or y[0] != y[1]:\n ventDiagram[y[0]][x[0]] += 1\n\n if ventDiagram[y[0]][x[0]] == 2:\n overlap += 1\n\n x[0] += dx\n y[0] += dy\n\n if x[0] == x[1] and y[0] == y[1]:\n ventDiagram[y[0]][x[0]] += 1\n if ventDiagram[y[0]][x[0]] == 2:\n overlap += 1\n \n print(overlap)\n\n\n", "id": "4037390", "language": "Python", "matching_score": 3.7599198818206787, "max_stars_count": 0, "path": "Chal05/Chal05Part2.py" }, { "content": "ventDiagram = []\nlargestX = 0\nlargestY = 0\n\nwith open(\"Chal05.txt\", 'r') as f:\n for i in f.readlines():\n coords = i.strip().split(' -> ')\n coords = [coords[0].split(','), coords[1].split(',')]\n #coords = [j for i in coords for j in i]\n maximumX = max(list(map(lambda x: int(x[0]), coords)))\n maximumY = max(list(map(lambda x: int(x[1]), coords)))\n\n if maximumX >= largestX:\n largestX = maximumX\n if maximumY >= largestY:\n largestY = maximumY\n\n ventDiagram = [[0 for _ in range(largestX+1)] for _ in range(largestY+1)]\n\nwith open(\"Chal05.txt\", 'r') as f:\n overlap = 0\n for i in f.readlines():\n coords = i.strip().split(' -> ')\n coords = [coords[0].split(','), coords[1].split(',')]\n x = list(map(lambda k: int(k[0]), coords))\n y = list(map(lambda k: int(k[1]), coords))\n minX, maxX = min(x[0],x[1]), max(x[0],x[1])\n minY, maxY = min(y[0],y[1]), max(y[0],y[1])\n\n # generate points in the line\n if x[0]==x[1] or y[0]==y[1]:\n for x in range(minX, maxX+1):\n for y in range(minY, maxY+1):\n ventDiagram[y][x] += 1\n if ventDiagram[y][x] == 2:\n overlap += 1\n \n print(overlap)\n\n\n", "id": "4075064", "language": "Python", "matching_score": 0.0013936326140537858, "max_stars_count": 0, "path": "Chal05/Chal05Part1.py" }, { "content": "bingoNum = [int(x) for x in open('Chal04.txt').readline().strip().split(',')]\ncurBoard = []\nearliestBingo = len(bingoNum)\nearliestBingoBoard = []\n\ndef winCon(curBoard, earliestBingo, earliestBingoBoard):\n for i in curBoard:\n for j in i:\n if j not in bingoNum:\n break\n \n rightMost = -1\n for j in i:\n if rightMost < bingoNum.index(j):\n rightMost = bingoNum.index(j)\n\n if rightMost < earliestBingo:\n earliestBingo = rightMost\n earliestBingoBoard = curBoard\n\n return earliestBingo, earliestBingoBoard\n\nwith open('Chal04.txt', 'r') as f:\n for i, x in enumerate(f.readlines()[2:]):\n if (i+1)%6 > 0:\n l = x.strip().split(' ')\n l = [int(i) for i in l if i]\n curBoard.append(l)\n else:\n earliestBingo, earliestBingoBoard = winCon(curBoard, earliestBingo, earliestBingoBoard)\n earliestBingo, earliestBingoBoard = winCon(list(map(list, zip(*curBoard))), earliestBingo, earliestBingoBoard)\n\n curBoard = []\n\n s = 0\n for i in earliestBingoBoard:\n for j in i:\n if j not in bingoNum[:earliestBingo+1]:\n s+=j\n\n print(s*bingoNum[earliestBingo])", "id": "6871068", "language": "Python", "matching_score": 3.6946375370025635, "max_stars_count": 0, "path": "Chal04/Chal04Part1.py" }, { "content": "bingoNum = [int(x) for x in open('Chal04.txt').readline().strip().split(',')]\ncurBoard = []\nearliestBingos = []\n\ndef winCon(curBoard, curEarliestBingo, curEarliestBingoBoard):\n curEarliestBingo = len(bingoNum)\n curEarliestBingoBoard = []\n\n for i in curBoard:\n for j in i:\n if j not in bingoNum:\n break\n \n rightMost = -1\n for j in i:\n if rightMost < bingoNum.index(j):\n rightMost = bingoNum.index(j)\n\n if rightMost < curEarliestBingo:\n curEarliestBingo = rightMost\n curEarliestBingoBoard = curBoard\n\n return curEarliestBingo, curEarliestBingoBoard\n\nwith open('Chal04.txt', 'r') as f:\n for i, x in enumerate(f.readlines()[2:]):\n if (i+1)%6 > 0:\n l = x.strip().split(' ')\n l = [int(i) for i in l if i]\n curBoard.append(l)\n else:\n curEarliestBingo, curEarliestBingoBoard = winCon(curBoard, 0, [])\n curEarliestBingo, curEarliestBingoBoard = winCon(list(map(list, zip(*curBoard))), curEarliestBingo, curEarliestBingoBoard)\n earliestBingos.append([curEarliestBingo, curEarliestBingoBoard])\n curBoard = []\n\n latestBingo = -1\n latestBingoBoard = []\n for x in earliestBingos:\n if x[0] > latestBingo:\n latestBingo = x[0]\n latestBingoBoard = x[1]\n\n s = 0\n for i in latestBingoBoard:\n for j in i:\n if j not in bingoNum[:latestBingo+1]:\n s+=j\n\n print(s*bingoNum[latestBingo])", "id": "11374668", "language": "Python", "matching_score": 0.016744360327720642, "max_stars_count": 0, "path": "Chal04/Chal04Part2.py" }, { "content": "with open(\"./Chal01.txt\", \"r\") as f:\n l = []\n s = 0\n\n before = 0\n after = 0\n increased = 0\n\n for i in f.readlines():\n s = int(i.split()[0])\n l.append(s)\n\n if len(l) == 3:\n after = sum(l)\n l.pop(0)\n\n if after > before and before != 0:\n increased += 1\n\n before = after\n \n print(increased)", "id": "6624034", "language": "Python", "matching_score": 1.6582098007202148, "max_stars_count": 0, "path": "Chal01/Chal01Part2.py" }, { "content": "with open(\"./Chal01.txt\", \"r\") as f:\n before = 0\n after = 0\n increased = 0\n \n for i in f.readlines():\n after = int(i.split()[0])\n if after > before and before != 0:\n increased += 1\n before = after\n\n print(increased)", "id": "10831975", "language": "Python", "matching_score": 0.3577178716659546, "max_stars_count": 0, "path": "Chal01/Chal01Part1.py" }, { "content": "with open('Chal08.txt','r') as f:\n sum = 0\n for i in f.readlines():\n i = i.strip().split(' | ')\n output = i[1].split()\n for i in output:\n if len(i) == 2 or len(i) == 4 or len(i) == 3 or len(i) == 7:\n sum += 1\n print(sum)\n", "id": "12633974", "language": "Python", "matching_score": 1.632588267326355, "max_stars_count": 0, "path": "Chal08/Chal08Part1.py" }, { "content": "with open('Chal08.txt','r') as f:\n su = 0\n for i in f.readlines():\n segments = ['','','','','','','','','','']\n i = i.strip().split(' | ')\n input = i[0].split()\n output = i[1].split()\n # compare 1 with 4, get possible middle and top left segments -- known = [1, 4, 7, 8]\n middleBottomSeg = ''\n for i in input:\n if len(i) == 2:\n segments[1] = ''.join(sorted(i))\n if len(i) == 4:\n segments[4] = ''.join(sorted(i))\n if len(i) == 3:\n segments[7] = ''.join(sorted(i))\n if len(i) == 7:\n segments[8] = ''.join(sorted(i))\n for i in segments[4]:\n if i not in segments[1]:\n middleBottomSeg += i\n # compare this possible middle and top left segments with len(5) words, if both of those are in the word, then that word is 5 -- known = [1, 4, 5, 7, 8]\n for i in input:\n if len(i) == 5:\n if all(s in i for s in middleBottomSeg):\n segments[5] = ''.join(sorted(i))\n # now compare 5 with 6, 0 and 9, find the differences, if there are 3 differences then the word is 0, otherwise if the word contains all of 1 then its 9\n # otherwise its 6 -- known = [0, 1, 4, 5, 6, 7, 8, 9]\n for i in input:\n if len(i) == 6:\n count = 0\n for j in i:\n if j not in segments[5]:\n count+=1\n \n if count == 2:\n segments[0] = ''.join(sorted(i))\n else:\n count = 0\n for j in segments[1]:\n if j in i:\n count += 1\n if count == 2:\n segments[9] = ''.join(sorted(i))\n else:\n segments[6] = ''.join(sorted(i))\n # now compare 4 with the other len(5) words, if theres 3 differences, then its 3, otherwise its 2 -- known = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n for i in input:\n if len(i) == 5 and ''.join(sorted(i)) not in segments:\n count = 0\n for j in i:\n if j not in segments[4]:\n count+=1\n if count == 3:\n segments[2] = ''.join(sorted(i))\n else:\n segments[3] = ''.join(sorted(i))\n # now get the number\n s = ''\n for i in output:\n if ''.join(sorted(i)) in segments:\n s += str(segments.index(''.join(sorted(i))))\n su+=int(s)\n\n print(su)\n\n\n", "id": "2739492", "language": "Python", "matching_score": 0.09088808298110962, "max_stars_count": 0, "path": "Chal08/Chal08Part2.py" }, { "content": "lant = []\nwith open('Chal06.txt','r') as f:\n for i in f.readlines():\n lant = i.strip().split(',')\n lant = list(map(int, lant))\n\nsim = 0\nwhile sim < 80:\n\n for i in range(len(lant)):\n if lant[i] >= 0:\n lant[i] -= 1\n\n if lant[i] == -1:\n lant[i] = 6\n lant.append(8)\n\n sim += 1\n print(sim)\n\nprint(len(lant))", "id": "2962393", "language": "Python", "matching_score": 1.3015962839126587, "max_stars_count": 0, "path": "Chal06/Chal06Part1.py" }, { "content": "from collections import Counter\ndic = Counter({-1:0, 0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0})\ndic.update(int(x) for x in open('Chal06.txt').read().strip().split(','))\n\nsim = 0\nwhile sim < 256:\n for i in dic:\n if i < 8:\n dic[i] = dic[i+1]\n if i == 7:\n dic[i+1] = dic[-1]\n\n dic[6] += dic[-1]\n dic[-1] = 0\n sim+=1\n\nprint(sum(dic.values()))", "id": "8861742", "language": "Python", "matching_score": 1.0570216178894043, "max_stars_count": 0, "path": "Chal06/Chal06Part2.py" }, { "content": "from z3 import *\ndef gen():\n inp = [x.strip().split() for x in open('Chal24.txt').readlines()]\n counter = 1\n majorCounter = 0\n l = []\n stack = []\n\n for i, x in enumerate(inp):\n if i % 18 == 0:\n if len(l) == 2:\n stack.append(l)\n majorCounter += 1\n l = []\n counter = 1\n\n if (counter == 6):\n l.append(x[2])\n\n if (counter == 16) and len(l) == 1:\n l.append(x[2])\n\n counter+=1\n\n counter = 0\n stack2 = []\n finalInputs = []\n while counter < 14:\n if int(stack[counter][0]) > 0:\n stack2.append([counter, stack[counter][1]])\n elif int(stack[counter][0]) < 0:\n popped_value = stack2.pop()\n finalInputs.append(f\"in{popped_value[0]} + {int(stack[counter][0])+int(popped_value[1])} == in{counter}\")\n\n counter+=1\n\n return finalInputs\n\n\nmaxSol = [0 for _ in range(14)]\nminSol = [0 for _ in range(14)]\ndef opti(s, ss, x, y):\n mmx = s.maximize(globals()[x])\n mmy = s.maximize(globals()[y])\n minx = ss.minimize(globals()[x])\n miny = ss.minimize(globals()[y])\n\n while s.check() == sat:\n maxSol[int(x.split('in')[1])] = str(mmx.value())\n maxSol[int(y.split('in')[1])] = str(mmy.value())\n\n while ss.check() == sat:\n minSol[int(x.split('in')[1])] = str(minx.value())\n minSol[int(y.split('in')[1])] = str(miny.value())\n\ndef main():\n finalInputs = gen()\n for z in finalInputs:\n s = Optimize()\n ss = Optimize()\n x = z.split(' + ')[0]\n y = z.split(' == ')[1]\n globals()[x], globals()[y] = Ints(f'{x}, {y}')\n s.set(priority='pareto')\n s.add(eval(z), globals()[x] <= 9, globals()[y] <= 9, globals()[x] >= 1, globals()[y] >= 1)\n ss.set(priority='pareto')\n ss.add(eval(z), globals()[x] <= 9, globals()[y] <= 9, globals()[x] >= 1, globals()[y] >= 1)\n\n opti(s, ss, x, y)\n \n print(''.join(maxSol), ''.join(minSol))\n\nmain()", "id": "12149797", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "Chal24/Chal24Part1and2.py" }, { "content": "with open(\"./Chal02.txt\", \"r\") as f:\n h = 0\n d = 0\n\n for i in f.readlines():\n command, amount = i.split()[0], int(i.split()[1])\n if command == \"forward\":\n h += amount\n elif command == \"down\":\n d += amount\n else:\n d -= amount\n \n print(h*d)", "id": "11418284", "language": "Python", "matching_score": 0.0795605257153511, "max_stars_count": 0, "path": "Chal02/Chal02Part1.py" }, { "content": "with open(\"Chal03.txt\", 'r') as f:\n bitCount = 0\n m = []\n l = []\n for x in f.readlines():\n x = x.split()[0]\n m.append(x)\n l.append(x)\n\n while bitCount < 12:\n zeroesCountj = 0\n onesCountj = 0\n\n zeroesCountk = 0\n onesCountk = 0\n\n for j in l:\n if j[bitCount] == \"1\":\n onesCountj += 1\n if j[bitCount] == \"0\":\n zeroesCountj += 1\n\n for k in m:\n if k[bitCount] == \"1\":\n onesCountk += 1\n if k[bitCount] == \"0\":\n zeroesCountk += 1\n\n if zeroesCountj > onesCountj:\n # zeroes more common, ones least common\n if len(l) > 1:\n l = list(filter(lambda a: a[bitCount] != '0', l))\n\n elif onesCountj >= zeroesCountj:\n if len(l) > 1:\n l = list(filter(lambda a: a[bitCount] != '1', l))\n\n if zeroesCountk > onesCountk:\n # Ones more common, ones least common\n if len(m) > 1:\n m = list(filter(lambda a: a[bitCount] != '1', m))\n\n elif onesCountk >= zeroesCountk:\n if len(m) > 1:\n m = list(filter(lambda a: a[bitCount] != '0', m))\n\n bitCount += 1\n\n print(int(l[0], 2) * int(m[0], 2))", "id": "9196935", "language": "Python", "matching_score": 1.7477680444717407, "max_stars_count": 0, "path": "Chal03/Chal03Part2.py" }, { "content": "with open(\"Chal03.txt\", 'r') as f:\n lZeroes = [0]*12\n lOnes = [0]*12\n for x in f.readlines():\n x = x.split()[0]\n for j, i in enumerate(x):\n if i == \"1\":\n lOnes[j] += 1\n if i == \"0\":\n lZeroes[j] += 1\n \n sCommon = \"\"\n sLeast = \"\"\n for k in range(len(lZeroes)):\n if lZeroes[k] > lOnes[k]:\n sCommon += \"0\"\n sLeast += \"1\"\n elif lOnes[k] > lZeroes[k]:\n sCommon += \"1\"\n sLeast += \"0\"\n\n print(int(sCommon, 2) * int(sLeast, 2))\n \n", "id": "5398635", "language": "Python", "matching_score": 1.717684268951416, "max_stars_count": 0, "path": "Chal03/Chal03Part1.py" } ]
1.301596
nextflow
[ { "content": "from ucloud.core import auth\n\n\ndef main():\n cred = auth.Credential(\n \"<EMAIL>20854146120\",\n \"<KEY>\",\n )\n d = {\"Action\": \"DescribeUHostInstance\", \"Region\": \"cn-bj2\", \"Limit\": 10}\n print(cred.verify_ac(d))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "3555371", "language": "Python", "matching_score": 0.5762367248535156, "max_stars_count": 37, "path": "examples/auth/main.py" }, { "content": "import typing\nimport logging\nimport json as json_mod\n\nfrom ucloud.core import exc\nfrom ucloud.core.transport import utils\nfrom ucloud.core.utils.compat import str\n\nlogger = logging.getLogger(__name__)\n\n\nclass Request:\n def __init__(\n self,\n url: str,\n method: str = \"GET\",\n params: dict = None,\n data: dict = None,\n json: dict = None,\n headers: dict = None,\n **kwargs\n ):\n self.url = url\n self.method = method\n self.params = params\n self.data = data\n self.json = json\n self.headers = headers\n self.request_time = 0\n\n def payload(self):\n payload = (self.params or {}).copy()\n payload.update(self.data or {})\n payload.update(self.json or {})\n return payload\n\n\nREQUEST_UUID_HEADER_KEY = \"X-UCLOUD-REQUEST-UUID\"\n\n\nclass Response:\n def __init__(\n self,\n url: str,\n method: str,\n request: Request = None,\n status_code: int = None,\n reason: str = None,\n headers: dict = None,\n content: bytes = None,\n encoding: str = None,\n **kwargs\n ):\n self.url = url\n self.method = method\n self.request = request\n self.status_code = status_code\n self.reason = reason\n self.content = content\n self.encoding = encoding\n self.response_time = 0\n self.headers = headers or {}\n self.request_uuid = self.headers.get(REQUEST_UUID_HEADER_KEY)\n\n def json(self, **kwargs) -> typing.Optional[dict]:\n \"\"\" json will return the bytes of content\n \"\"\"\n if not self.content:\n return None\n\n try:\n return self._decode_json(**kwargs)\n except Exception as e:\n raise exc.InvalidResponseException(\n self.content, str(e), request_uuid=self.request_uuid\n )\n\n @property\n def text(self):\n \"\"\" text will return the unicode string of content,\n see `requests.Response.text`\n \"\"\"\n if not self.content:\n return str(\"\")\n\n # Decode unicode from given encoding.\n try:\n content = str(self.content, self.encoding, errors=\"replace\")\n except (LookupError, TypeError):\n content = str(self.content, errors=\"replace\")\n return content\n\n def _decode_json(self, **kwargs):\n encoding = utils.guess_json_utf(self.content)\n if encoding is not None:\n try:\n return json_mod.loads(self.content.decode(encoding), **kwargs)\n except UnicodeDecodeError:\n pass\n return json_mod.loads(self.text, **kwargs)\n\n\nclass SSLOption:\n def __init__(\n self,\n ssl_verify: bool = True,\n ssl_cacert: str = None,\n ssl_cert: str = None,\n ssl_key: str = None,\n ):\n self.ssl_verify = ssl_verify\n self.ssl_cacert = ssl_cacert\n self.ssl_cert = ssl_cert\n self.ssl_key = ssl_key\n\n\nclass Transport:\n \"\"\" the abstract class of transport implementation \"\"\"\n\n def send(self, req: Request, **options: typing.Any) -> Response:\n raise NotImplementedError\n", "id": "141632", "language": "Python", "matching_score": 1.8570036888122559, "max_stars_count": 0, "path": "ucloud/core/transport/http.py" }, { "content": "import json\nimport uuid\n\nimport pytest\nimport logging\nimport requests_mock\nfrom collections import Counter\n\nfrom tests.test_unit.test_core.consts import TEST_URL\nfrom ucloud.core import exc\nfrom ucloud.core.transport import (\n RequestsTransport,\n Request,\n Response,\n utils,\n http,\n)\n\nlogger = logging.getLogger(__name__)\n\n\n@pytest.fixture(name=\"transport\", scope=\"function\", autouse=True)\ndef transport_factory():\n return RequestsTransport()\n\n\n@pytest.mark.parametrize(\n argnames=(\"status_code\", \"content\", \"expect\", \"expect_exc\", \"retryable\"),\n argvalues=(\n (\n 200,\n '{\"Action\": \"Mock\", \"RetCode\": 0}',\n {\"Action\": \"Mock\", \"RetCode\": 0},\n None,\n False,\n ),\n (500, \"{}\", None, exc.HTTPStatusException, False),\n (429, \"{}\", None, exc.HTTPStatusException, True),\n (500, \"x\", None, exc.HTTPStatusException, False),\n (200, \"x\", None, exc.InvalidResponseException, False),\n ),\n)\ndef test_transport(\n transport, status_code, content, expect, expect_exc, retryable\n):\n with requests_mock.Mocker() as m:\n m.post(TEST_URL, text=content, status_code=status_code)\n\n got_exc = None\n try:\n resp = transport.send(Request(url=TEST_URL, method=\"post\", json={}))\n assert resp.json() == expect\n except Exception as e:\n got_exc = e\n\n if expect_exc:\n assert str(got_exc)\n assert got_exc.retryable == retryable\n assert isinstance(got_exc, expect_exc)\n\n\ndef test_transport_handler(transport):\n req_key, resp_key, exc_key = \"req\", \"resp\", \"exc\"\n counter = Counter({req_key: 0, resp_key: 0, exc_key: 0})\n\n def request_handler(r):\n counter[req_key] += 1\n return r\n\n def response_handler(r):\n counter[resp_key] += 1\n return r\n\n def exception_handler(r):\n counter[exc_key] += 1\n return r\n\n transport.middleware.request(handler=request_handler)\n transport.middleware.response(handler=response_handler)\n transport.middleware.exception(handler=exception_handler)\n\n expect = {\"foo\": \"bar\"}\n req = Request(url=TEST_URL, method=\"post\", json=expect)\n\n with requests_mock.Mocker() as m:\n request_uuid = str(uuid.uuid4())\n m.post(\n TEST_URL,\n text=json.dumps(expect),\n status_code=200,\n headers={http.REQUEST_UUID_HEADER_KEY: request_uuid},\n )\n resp = transport.send(req)\n assert resp.text\n assert resp.json() == expect\n assert resp.request_uuid == request_uuid\n\n with pytest.raises(Exception):\n transport.send(Request(url=\"/\"))\n\n assert counter[req_key] == 2\n assert counter[resp_key] == 1\n assert counter[exc_key] == 1\n\n\ndef test_guess_json_utf():\n encodings = [\n \"utf-32\",\n \"utf-8-sig\",\n \"utf-16\",\n \"utf-8\",\n \"utf-16-be\",\n \"utf-16-le\",\n \"utf-32-be\",\n \"utf-32-le\",\n ]\n for e in encodings:\n s = json.dumps(\"表意字符\").encode(e)\n assert utils.guess_json_utf(s) == e\n\n\ndef test_request_methods():\n req = Request(\n TEST_URL, data={\"foo\": 42}, json={\"bar\": 42}, params={\"q\": \"search\"}\n )\n assert req.payload() == {\"foo\": 42, \"bar\": 42, \"q\": \"search\"}\n\n\ndef test_response_methods():\n r = Response(TEST_URL, \"post\")\n assert not r.text\n assert r.json() is None\n\n r = Response(TEST_URL, \"post\", content=b\"\\xd6\", encoding=\"utf-8\")\n with pytest.raises(exc.InvalidResponseException):\n assert r.json() is None\n", "id": "7151504", "language": "Python", "matching_score": 4.013066291809082, "max_stars_count": 37, "path": "tests/test_unit/test_core/test_transport.py" }, { "content": "import json\nimport uuid\n\nimport pytest\nimport logging\nimport collections\nimport requests_mock\n\nfrom ucloud.client import Client\nfrom ucloud.core import exc\nfrom ucloud.core.transport import RequestsTransport, http\nfrom ucloud.testing.mock import MockedTransport\n\nfrom tests.test_unit.test_core import consts\n\nlogger = logging.getLogger(__name__)\n\n\n@pytest.fixture(scope=\"function\", autouse=True)\ndef client():\n return Client(\n {\n \"region\": \"cn-bj2\",\n \"public_key\": \"foo\",\n \"private_key\": \"foo\",\n \"timeout\": 10,\n \"max_retries\": 3,\n \"ssl_verify\": False,\n }\n )\n\n\n@pytest.fixture(scope=\"function\", autouse=True)\ndef transport():\n return MockedTransport()\n\n\ndef test_client_invoke(client):\n expected = {\"RetCode\": 0, \"Action\": \"Foo\"}\n with requests_mock.Mocker() as m:\n m.post(\n consts.TEST_URL,\n text=json.dumps(expected),\n headers={http.REQUEST_UUID_HEADER_KEY: str(uuid.uuid4())},\n )\n assert client.invoke(\"Foo\") == expected\n\n\ndef test_client_invoke_code_error(client):\n expected = {\"RetCode\": 171, \"Action\": \"Foo\", \"Message\": \"签名错误\"}\n\n with requests_mock.Mocker() as m:\n m.post(\n consts.TEST_URL,\n text=json.dumps(expected),\n headers={http.REQUEST_UUID_HEADER_KEY: str(uuid.uuid4())},\n )\n\n with pytest.raises(exc.RetCodeException):\n try:\n client.invoke(\"Foo\")\n except exc.RetCodeException as e:\n assert e.retryable is False\n assert e.json() == {\n \"RetCode\": 171,\n \"Action\": \"Foo\",\n \"Message\": \"签名错误\",\n }\n raise e\n\n\ndef test_client_invoke_with_retryable_error(client):\n # RetCodeError is retryable when code is greater than 2000\n with requests_mock.Mocker() as m:\n m.post(\n consts.TEST_URL,\n text=json.dumps({\"RetCode\": 10000, \"Action\": \"Foo\"}),\n )\n with pytest.raises(exc.RetCodeException):\n client.invoke(\"Foo\")\n\n\ndef test_client_invoke_with_unexpected_error(client):\n def raise_error(_):\n raise ValueError(\"temporary error\")\n\n transport = RequestsTransport()\n transport.middleware.request(raise_error)\n client.transport = transport\n\n with pytest.raises(ValueError):\n client.invoke(\"Foo\")\n\n\ndef test_client_try_import(client):\n for name in dir(client):\n if name.startswith(\"_\") or name in [\n \"invoke\",\n \"logged_request_handler\",\n \"logged_response_handler\",\n \"logged_exception_handler\",\n ]:\n continue\n\n client_factory = getattr(client, name)\n if isinstance(client_factory, collections.Callable):\n print(client_factory())\n", "id": "1777049", "language": "Python", "matching_score": 2.244948387145996, "max_stars_count": 37, "path": "tests/test_unit/test_core/test_client.py" }, { "content": "TEST_URL = \"https://api.ucloud.cn/\"\n", "id": "6033251", "language": "Python", "matching_score": 0, "max_stars_count": 37, "path": "tests/test_unit/test_core/consts.py" }, { "content": "\"\"\" Code is generated by ucloud-model, DO NOT EDIT IT. \"\"\"\n\nfrom ucloud.core.typesystem import schema, fields\n\n\nclass UHostImageSetSchema(schema.ResponseSchema):\n \"\"\" UHostImageSet - DescribeImage\n \"\"\"\n\n fields = {\n \"CreateTime\": fields.Int(required=False, load_from=\"CreateTime\"),\n \"Features\": fields.List(fields.Str()),\n \"FuncType\": fields.Str(required=False, load_from=\"FuncType\"),\n \"ImageDescription\": fields.Str(\n required=False, load_from=\"ImageDescription\"\n ),\n \"ImageId\": fields.Str(required=False, load_from=\"ImageId\"),\n \"ImageName\": fields.Str(required=False, load_from=\"ImageName\"),\n \"ImageSize\": fields.Int(required=False, load_from=\"ImageSize\"),\n \"ImageType\": fields.Str(required=False, load_from=\"ImageType\"),\n \"IntegratedSoftware\": fields.Str(\n required=False, load_from=\"IntegratedSoftware\"\n ),\n \"Links\": fields.Str(required=False, load_from=\"Links\"),\n \"MinimalCPU\": fields.Str(required=False, load_from=\"MinimalCPU\"),\n \"OsName\": fields.Str(required=False, load_from=\"OsName\"),\n \"OsType\": fields.Str(required=False, load_from=\"OsType\"),\n \"State\": fields.Str(required=False, load_from=\"State\"),\n \"Vendor\": fields.Str(required=False, load_from=\"Vendor\"),\n \"Zone\": fields.Str(required=False, load_from=\"Zone\"),\n }\n\n\nclass SpreadInfoSchema(schema.ResponseSchema):\n \"\"\" SpreadInfo - 每个可用区中硬件隔离组信息\n \"\"\"\n\n fields = {\n \"UHostCount\": fields.Int(required=False, load_from=\"UHostCount\"),\n \"Zone\": fields.Str(required=False, load_from=\"Zone\"),\n }\n\n\nclass IsolationGroupSchema(schema.ResponseSchema):\n \"\"\" IsolationGroup - 硬件隔离组信息\n \"\"\"\n\n fields = {\n \"GroupId\": fields.Str(required=False, load_from=\"GroupId\"),\n \"GroupName\": fields.Str(required=False, load_from=\"GroupName\"),\n \"Remark\": fields.Str(required=False, load_from=\"Remark\"),\n \"SpreadInfoSet\": fields.List(SpreadInfoSchema()),\n }\n\n\nclass UHostDiskSetSchema(schema.ResponseSchema):\n \"\"\" UHostDiskSet - DescribeUHostInstance\n \"\"\"\n\n fields = {\n \"BackupType\": fields.Str(required=False, load_from=\"BackupType\"),\n \"DiskId\": fields.Str(required=False, load_from=\"DiskId\"),\n \"DiskType\": fields.Str(required=True, load_from=\"DiskType\"),\n \"Drive\": fields.Str(required=False, load_from=\"Drive\"),\n \"Encrypted\": fields.Bool(required=False, load_from=\"Encrypted\"),\n \"IsBoot\": fields.Str(required=True, load_from=\"IsBoot\"),\n \"Name\": fields.Str(required=False, load_from=\"Name\"),\n \"Size\": fields.Int(required=False, load_from=\"Size\"),\n \"Type\": fields.Str(required=False, load_from=\"Type\"),\n }\n\n\nclass UHostIPSetSchema(schema.ResponseSchema):\n \"\"\" UHostIPSet - DescribeUHostInstance\n \"\"\"\n\n fields = {\n \"Bandwidth\": fields.Int(required=False, load_from=\"Bandwidth\"),\n \"Default\": fields.Str(required=True, load_from=\"Default\"),\n \"IP\": fields.Str(required=False, load_from=\"IP\"),\n \"IPId\": fields.Str(required=False, load_from=\"IPId\"),\n \"Mac\": fields.Str(required=True, load_from=\"Mac\"),\n \"SubnetId\": fields.Str(required=False, load_from=\"SubnetId\"),\n \"Type\": fields.Str(required=False, load_from=\"Type\"),\n \"VPCId\": fields.Str(required=False, load_from=\"VPCId\"),\n \"Weight\": fields.Int(required=True, load_from=\"Weight\"),\n }\n\n\nclass UHostInstanceSetSchema(schema.ResponseSchema):\n \"\"\" UHostInstanceSet - DescribeUHostInstance\n \"\"\"\n\n fields = {\n \"AutoRenew\": fields.Str(required=False, load_from=\"AutoRenew\"),\n \"BasicImageId\": fields.Str(required=False, load_from=\"BasicImageId\"),\n \"BasicImageName\": fields.Str(\n required=False, load_from=\"BasicImageName\"\n ),\n \"BootDiskState\": fields.Str(required=False, load_from=\"BootDiskState\"),\n \"CPU\": fields.Int(required=False, load_from=\"CPU\"),\n \"ChargeType\": fields.Str(required=False, load_from=\"ChargeType\"),\n \"CreateTime\": fields.Int(required=False, load_from=\"CreateTime\"),\n \"DiskSet\": fields.List(UHostDiskSetSchema()),\n \"ExpireTime\": fields.Int(required=False, load_from=\"ExpireTime\"),\n \"GPU\": fields.Int(required=False, load_from=\"GPU\"),\n \"HostType\": fields.Str(required=False, load_from=\"HostType\"),\n \"HotplugFeature\": fields.Bool(\n required=False, load_from=\"HotplugFeature\"\n ),\n \"IPSet\": fields.List(UHostIPSetSchema()),\n \"ImageId\": fields.Str(required=False, load_from=\"ImageId\"),\n \"IsolationGroup\": fields.Str(\n required=False, load_from=\"IsolationGroup\"\n ),\n \"LifeCycle\": fields.Str(required=False, load_from=\"LifeCycle\"),\n \"MachineType\": fields.Str(required=False, load_from=\"MachineType\"),\n \"Memory\": fields.Int(required=False, load_from=\"Memory\"),\n \"Name\": fields.Str(required=False, load_from=\"Name\"),\n \"NetCapability\": fields.Str(required=False, load_from=\"NetCapability\"),\n \"NetworkState\": fields.Str(required=False, load_from=\"NetworkState\"),\n \"OsName\": fields.Str(required=False, load_from=\"OsName\"),\n \"OsType\": fields.Str(required=False, load_from=\"OsType\"),\n \"Remark\": fields.Str(required=False, load_from=\"Remark\"),\n \"State\": fields.Str(required=False, load_from=\"State\"),\n \"StorageType\": fields.Str(required=False, load_from=\"StorageType\"),\n \"SubnetType\": fields.Str(required=False, load_from=\"SubnetType\"),\n \"Tag\": fields.Str(required=False, load_from=\"Tag\"),\n \"TimemachineFeature\": fields.Str(\n required=False, load_from=\"TimemachineFeature\"\n ),\n \"TotalDiskSpace\": fields.Int(\n required=False, load_from=\"TotalDiskSpace\"\n ),\n \"UHostId\": fields.Str(required=False, load_from=\"UHostId\"),\n \"UHostType\": fields.Str(required=False, load_from=\"UHostType\"),\n \"Zone\": fields.Str(required=False, load_from=\"Zone\"),\n }\n\n\nclass UHostSnapshotSetSchema(schema.ResponseSchema):\n \"\"\" UHostSnapshotSet - DescribeUHostInstanceSnapshot\n \"\"\"\n\n fields = {\n \"SnapshotName\": fields.Str(required=False, load_from=\"SnapshotName\"),\n \"SnapshotState\": fields.Str(required=False, load_from=\"SnapshotState\"),\n \"SnapshotTime\": fields.Str(required=False, load_from=\"SnapshotTime\"),\n }\n\n\nclass UHostTagSetSchema(schema.ResponseSchema):\n \"\"\" UHostTagSet - DescribeUHostTags\n \"\"\"\n\n fields = {\n \"Tag\": fields.Str(required=False, load_from=\"Tag\"),\n \"TotalCount\": fields.Int(required=False, load_from=\"TotalCount\"),\n \"Zone\": fields.Str(required=False, load_from=\"Zone\"),\n }\n\n\nclass UHostPriceSetSchema(schema.ResponseSchema):\n \"\"\" UHostPriceSet - 主机价格\n \"\"\"\n\n fields = {\n \"ChargeType\": fields.Str(required=True, load_from=\"ChargeType\"),\n \"Price\": fields.Float(required=True, load_from=\"Price\"),\n }\n", "id": "5353985", "language": "Python", "matching_score": 3.8191463947296143, "max_stars_count": 1, "path": "ucloud/services/uhost/schemas/models.py" }, { "content": "\"\"\" Code is generated by ucloud-model, DO NOT EDIT IT. \"\"\"\n\nfrom ucloud.core.typesystem import schema, fields\n\n\nclass UnetEIPAddrSetSchema(schema.ResponseSchema):\n \"\"\" UnetEIPAddrSet - DescribeEIP\n \"\"\"\n\n fields = {\n \"IP\": fields.Str(required=False, load_from=\"IP\"),\n \"OperatorName\": fields.Str(required=False, load_from=\"OperatorName\"),\n }\n\n\nclass UnetAllocateEIPSetSchema(schema.ResponseSchema):\n \"\"\" UnetAllocateEIPSet - AllocateEIP\n \"\"\"\n\n fields = {\n \"EIPAddr\": fields.List(UnetEIPAddrSetSchema()),\n \"EIPId\": fields.Str(required=False, load_from=\"EIPId\"),\n }\n\n\nclass VIPSetSchema(schema.ResponseSchema):\n \"\"\" VIPSet - VIPSet\n \"\"\"\n\n fields = {\n \"VIP\": fields.Str(required=False, load_from=\"VIP\"),\n \"VIPId\": fields.Str(required=False, load_from=\"VIPId\"),\n \"VPCId\": fields.Str(required=False, load_from=\"VPCId\"),\n }\n\n\nclass EIPAddrSetSchema(schema.ResponseSchema):\n \"\"\" EIPAddrSet - DescribeShareBandwidth\n \"\"\"\n\n fields = {\n \"IP\": fields.Str(required=False, load_from=\"IP\"),\n \"OperatorName\": fields.Str(required=False, load_from=\"OperatorName\"),\n }\n\n\nclass UnetBandwidthPackageSetSchema(schema.ResponseSchema):\n \"\"\" UnetBandwidthPackageSet - DescribeBandwidthPackage\n \"\"\"\n\n fields = {\n \"Bandwidth\": fields.Int(required=False, load_from=\"Bandwidth\"),\n \"BandwidthPackageId\": fields.Str(\n required=False, load_from=\"BandwidthPackageId\"\n ),\n \"CreateTime\": fields.Int(required=False, load_from=\"CreateTime\"),\n \"DisableTime\": fields.Int(required=False, load_from=\"DisableTime\"),\n \"EIPAddr\": fields.List(EIPAddrSetSchema()),\n \"EIPId\": fields.Str(required=False, load_from=\"EIPId\"),\n \"EnableTime\": fields.Int(required=False, load_from=\"EnableTime\"),\n }\n\n\nclass UnetBandwidthUsageEIPSetSchema(schema.ResponseSchema):\n \"\"\" UnetBandwidthUsageEIPSet - DescribeBandwidthUsage\n \"\"\"\n\n fields = {\n \"CurBandwidth\": fields.Float(required=False, load_from=\"CurBandwidth\"),\n \"EIPId\": fields.Str(required=False, load_from=\"EIPId\"),\n }\n\n\nclass ShareBandwidthSetSchema(schema.ResponseSchema):\n \"\"\" ShareBandwidthSet - DescribeEIP\n \"\"\"\n\n fields = {\n \"ShareBandwidth\": fields.Int(\n required=False, load_from=\"ShareBandwidth\"\n ),\n \"ShareBandwidthId\": fields.Str(\n required=False, load_from=\"ShareBandwidthId\"\n ),\n \"ShareBandwidthName\": fields.Str(\n required=False, load_from=\"ShareBandwidthName\"\n ),\n }\n\n\nclass UnetEIPResourceSetSchema(schema.ResponseSchema):\n \"\"\" UnetEIPResourceSet - DescribeEIP\n \"\"\"\n\n fields = {\n \"EIPId\": fields.Str(required=False, load_from=\"EIPId\"),\n \"ResourceId\": fields.Str(required=False, load_from=\"ResourceId\"),\n \"ResourceName\": fields.Str(required=False, load_from=\"ResourceName\"),\n \"ResourceType\": fields.Str(required=False, load_from=\"ResourceType\"),\n \"SubResourceId\": fields.Str(required=False, load_from=\"SubResourceId\"),\n \"SubResourceName\": fields.Str(\n required=False, load_from=\"SubResourceName\"\n ),\n \"SubResourceType\": fields.Str(\n required=False, load_from=\"SubResourceType\"\n ),\n }\n\n\nclass UnetEIPSetSchema(schema.ResponseSchema):\n \"\"\" UnetEIPSet - DescribeEIP\n \"\"\"\n\n fields = {\n \"Bandwidth\": fields.Int(required=False, load_from=\"Bandwidth\"),\n \"BandwidthType\": fields.Int(required=False, load_from=\"BandwidthType\"),\n \"ChargeType\": fields.Str(required=False, load_from=\"ChargeType\"),\n \"CreateTime\": fields.Int(required=False, load_from=\"CreateTime\"),\n \"EIPAddr\": fields.List(UnetEIPAddrSetSchema()),\n \"EIPId\": fields.Str(required=False, load_from=\"EIPId\"),\n \"Expire\": fields.Bool(required=False, load_from=\"Expire\"),\n \"ExpireTime\": fields.Int(required=False, load_from=\"ExpireTime\"),\n \"Name\": fields.Str(required=False, load_from=\"Name\"),\n \"PayMode\": fields.Str(required=False, load_from=\"PayMode\"),\n \"Remark\": fields.Str(required=False, load_from=\"Remark\"),\n \"Resource\": UnetEIPResourceSetSchema(),\n \"ShareBandwidthSet\": ShareBandwidthSetSchema(),\n \"Status\": fields.Str(required=False, load_from=\"Status\"),\n \"Tag\": fields.Str(required=False, load_from=\"Tag\"),\n \"Weight\": fields.Int(required=False, load_from=\"Weight\"),\n }\n\n\nclass FirewallRuleSetSchema(schema.ResponseSchema):\n \"\"\" FirewallRuleSet - DescribeFirewall\n \"\"\"\n\n fields = {\n \"DstPort\": fields.Str(required=False, load_from=\"DstPort\"),\n \"Priority\": fields.Str(required=False, load_from=\"Priority\"),\n \"ProtocolType\": fields.Str(required=False, load_from=\"ProtocolType\"),\n \"Remark\": fields.Str(required=False, load_from=\"Remark\"),\n \"RuleAction\": fields.Str(required=False, load_from=\"RuleAction\"),\n \"SrcIP\": fields.Str(required=False, load_from=\"SrcIP\"),\n }\n\n\nclass FirewallDataSetSchema(schema.ResponseSchema):\n \"\"\" FirewallDataSet - DescribeFirewall\n \"\"\"\n\n fields = {\n \"CreateTime\": fields.Int(required=False, load_from=\"CreateTime\"),\n \"FWId\": fields.Str(required=True, load_from=\"FWId\"),\n \"GroupId\": fields.Str(required=True, load_from=\"GroupId\"),\n \"Name\": fields.Str(required=False, load_from=\"Name\"),\n \"Remark\": fields.Str(required=False, load_from=\"Remark\"),\n \"ResourceCount\": fields.Int(required=False, load_from=\"ResourceCount\"),\n \"Rule\": fields.List(FirewallRuleSetSchema()),\n \"Tag\": fields.Str(required=False, load_from=\"Tag\"),\n \"Type\": fields.Str(required=False, load_from=\"Type\"),\n }\n\n\nclass ResourceSetSchema(schema.ResponseSchema):\n \"\"\" ResourceSet - 资源信息\n \"\"\"\n\n fields = {\n \"Name\": fields.Str(required=False, load_from=\"Name\"),\n \"PrivateIP\": fields.Str(required=False, load_from=\"PrivateIP\"),\n \"Remark\": fields.Str(required=False, load_from=\"Remark\"),\n \"ResourceID\": fields.Str(required=False, load_from=\"ResourceID\"),\n \"ResourceType\": fields.Str(required=False, load_from=\"ResourceType\"),\n \"Status\": fields.Int(required=False, load_from=\"Status\"),\n \"Tag\": fields.Str(required=False, load_from=\"Tag\"),\n \"Zone\": fields.Int(required=False, load_from=\"Zone\"),\n }\n\n\nclass EIPSetDataSchema(schema.ResponseSchema):\n \"\"\" EIPSetData - describeShareBandwidth\n \"\"\"\n\n fields = {\n \"Bandwidth\": fields.Int(required=False, load_from=\"Bandwidth\"),\n \"EIPAddr\": fields.List(EIPAddrSetSchema()),\n \"EIPId\": fields.Str(required=False, load_from=\"EIPId\"),\n }\n\n\nclass UnetShareBandwidthSetSchema(schema.ResponseSchema):\n \"\"\" UnetShareBandwidthSet - DescribeShareBandwidth\n \"\"\"\n\n fields = {\n \"BandwidthGuarantee\": fields.Int(\n required=False, load_from=\"BandwidthGuarantee\"\n ),\n \"ChargeType\": fields.Str(required=False, load_from=\"ChargeType\"),\n \"CreateTime\": fields.Int(required=False, load_from=\"CreateTime\"),\n \"EIPSet\": fields.List(EIPSetDataSchema()),\n \"ExpireTime\": fields.Int(required=False, load_from=\"ExpireTime\"),\n \"Name\": fields.Str(required=False, load_from=\"Name\"),\n \"PostPayStartTime\": fields.Int(\n required=False, load_from=\"PostPayStartTime\"\n ),\n \"ShareBandwidth\": fields.Int(\n required=False, load_from=\"ShareBandwidth\"\n ),\n \"ShareBandwidthId\": fields.Str(\n required=False, load_from=\"ShareBandwidthId\"\n ),\n }\n\n\nclass VIPDetailSetSchema(schema.ResponseSchema):\n \"\"\" VIPDetailSet - VIPDetailSet\n \"\"\"\n\n fields = {\n \"CreateTime\": fields.Int(required=False, load_from=\"CreateTime\"),\n \"Name\": fields.Str(required=False, load_from=\"Name\"),\n \"RealIp\": fields.Str(required=False, load_from=\"RealIp\"),\n \"SubnetId\": fields.Str(required=False, load_from=\"SubnetId\"),\n \"VIP\": fields.Str(required=False, load_from=\"VIP\"),\n \"VIPId\": fields.Str(required=False, load_from=\"VIPId\"),\n \"VPCId\": fields.Str(required=False, load_from=\"VPCId\"),\n \"Zone\": fields.Str(required=False, load_from=\"Zone\"),\n }\n\n\nclass EIPPayModeSetSchema(schema.ResponseSchema):\n \"\"\" EIPPayModeSet - GetEIPPayModeEIP\n \"\"\"\n\n fields = {\n \"EIPId\": fields.Str(required=False, load_from=\"EIPId\"),\n \"EIPPayMode\": fields.Str(required=False, load_from=\"EIPPayMode\"),\n }\n\n\nclass EIPPriceDetailSetSchema(schema.ResponseSchema):\n \"\"\" EIPPriceDetailSet - GetEIPPrice\n \"\"\"\n\n fields = {\n \"ChargeType\": fields.Str(required=False, load_from=\"ChargeType\"),\n \"Price\": fields.Float(required=False, load_from=\"Price\"),\n \"PurchaseValue\": fields.Int(required=False, load_from=\"PurchaseValue\"),\n }\n", "id": "11152027", "language": "Python", "matching_score": 1.3277466297149658, "max_stars_count": 1, "path": "ucloud/services/unet/schemas/models.py" }, { "content": "import base64\nimport typing\nimport collections\n\nfrom ucloud.core.typesystem import abstract\nfrom ucloud.core.exc import ValidationException\nfrom ucloud.core.utils.compat import str\n\n\nclass List(abstract.Field):\n \"\"\" array param is the custom field to parse custom param such as:\n\n - IP.N\n - UDisk.N.Size\n - NetInterface.N.EIP.Bandwidth\n \"\"\"\n\n def __init__(\n self,\n item: typing.Union[abstract.Field, abstract.Schema],\n default=list,\n **kwargs\n ):\n super(List, self).__init__(default=default, **kwargs)\n self.item = item\n\n def dumps(self, value, name=None, **kwargs):\n if not isinstance(value, collections.Iterable):\n raise ValidationException(\n \"invalid field {}, expect list, got {}\".format(\n name, type(value)\n )\n )\n\n errors = []\n values = []\n for each in value:\n try:\n v = self.item.dumps(each)\n except ValidationException as e:\n errors.extend(e.errors)\n else:\n values.append(v)\n\n if len(errors) > 0:\n raise ValidationException(errors)\n\n return values\n\n def loads(self, value, name=None, **kwargs):\n if not isinstance(value, collections.Iterable):\n raise ValidationException(\n \"invalid field {}, expect list, got {}\".format(\n name, type(value)\n )\n )\n\n errors = []\n values = []\n for each in value:\n try:\n v = self.item.loads(each)\n except ValidationException as e:\n errors.extend(e.errors)\n else:\n values.append(v)\n\n if len(errors) > 0:\n raise ValidationException(errors)\n\n return values\n\n\nclass Str(abstract.Field):\n def dumps(self, value, name=None, **kwargs):\n return self._convert(value, name)\n\n def loads(self, value, name=None, **kwargs):\n return self._convert(value, name)\n\n def _convert(self, value, name=None):\n if self.strict and not isinstance(value, str):\n self.fail(name, \"str\", type(value))\n\n return str(value)\n\n\nclass Base64(Str):\n def dumps(self, value, name=None, **kwargs):\n s = super(Base64, self).dumps(value, name)\n return base64.b64encode(s.encode()).decode()\n\n def loads(self, value, name=None, **kwargs):\n s = super(Base64, self).loads(value, name)\n return base64.b64decode(s.encode()).decode()\n\n\nclass Int(abstract.Field):\n def dumps(self, value, name=None, **kwargs):\n return self._convert(value, name)\n\n def loads(self, value, name=None, **kwargs):\n return self._convert(value, name)\n\n def _convert(self, value, name=None):\n if self.strict and not isinstance(value, int):\n self.fail(name, \"int\", type(value))\n\n try:\n return int(value)\n except ValueError:\n self.fail(name, \"int\", type(value))\n\n\nclass Float(abstract.Field):\n def dumps(self, value, name=None, **kwargs):\n return self._convert(value, name)\n\n def loads(self, value, name=None, **kwargs):\n return self._convert(value, name)\n\n def _convert(self, value, name=None):\n if self.strict and not isinstance(value, float):\n self.fail(name, \"float\", type(value))\n\n try:\n return float(value)\n except ValueError:\n self.fail(name, \"float\", type(value))\n\n\nclass Bool(abstract.Field):\n def dumps(self, value, name=None, **kwargs):\n return self._convert(value, name)\n\n def loads(self, value, name=None, **kwargs):\n return self._convert(value, name)\n\n def _convert(self, value, name=None):\n if self.strict and not isinstance(value, bool):\n self.fail(name, \"bool\", type(value))\n\n if value == \"true\" or value is True:\n return True\n\n if value == \"false\" or value is False:\n return False\n\n self.fail(name, \"bool\", type(value))\n", "id": "6050961", "language": "Python", "matching_score": 2.0805370807647705, "max_stars_count": 1, "path": "ucloud/core/typesystem/fields.py" }, { "content": "\"\"\" Code is generated by ucloud-model, DO NOT EDIT IT. \"\"\"\n\n\nfrom ucloud.core.typesystem import schema, fields\nfrom ucloud.services.ucloudstack.schemas import models\n\n\"\"\" UCloudStack API Schema\n\"\"\"\n\n\n\"\"\"\nAPI: AllocateEIP\n\n申请外网IP\n\"\"\"\n\n\nclass AllocateEIPRequestSchema(schema.RequestSchema):\n \"\"\" AllocateEIP - 申请外网IP\n \"\"\"\n\n fields = {\n \"Bandwidth\": fields.Int(required=True, dump_to=\"Bandwidth\"),\n \"ChargeType\": fields.Str(required=True, dump_to=\"ChargeType\"),\n \"IP\": fields.Str(required=False, dump_to=\"IP\"),\n \"IPVersion\": fields.Str(required=False, dump_to=\"IPVersion\"),\n \"Name\": fields.Str(required=True, dump_to=\"Name\"),\n \"OperatorName\": fields.Str(required=True, dump_to=\"OperatorName\"),\n \"Quantity\": fields.Int(required=False, dump_to=\"Quantity\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass AllocateEIPResponseSchema(schema.ResponseSchema):\n \"\"\" AllocateEIP - 申请外网IP\n \"\"\"\n\n fields = {\n \"EIPID\": fields.Str(required=True, load_from=\"EIPID\"),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: AttachDisk\n\n绑定硬盘\n\"\"\"\n\n\nclass AttachDiskRequestSchema(schema.RequestSchema):\n \"\"\" AttachDisk - 绑定硬盘\n \"\"\"\n\n fields = {\n \"DiskID\": fields.Str(required=True, dump_to=\"DiskID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"ResourceType\": fields.Str(required=True, dump_to=\"ResourceType\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass AttachDiskResponseSchema(schema.ResponseSchema):\n \"\"\" AttachDisk - 绑定硬盘\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: AttachNIC\n\n绑定UCloudStack网卡\n\"\"\"\n\n\nclass AttachNICRequestSchema(schema.RequestSchema):\n \"\"\" AttachNIC - 绑定UCloudStack网卡\n \"\"\"\n\n fields = {\n \"NICID\": fields.Str(required=True, dump_to=\"NICID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass AttachNICResponseSchema(schema.ResponseSchema):\n \"\"\" AttachNIC - 绑定UCloudStack网卡\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: BindAlarmTemplate\n\n绑定告警模板\n\"\"\"\n\n\nclass BindAlarmTemplateRequestSchema(schema.RequestSchema):\n \"\"\" BindAlarmTemplate - 绑定告警模板\n \"\"\"\n\n fields = {\n \"AlarmTemplateID\": fields.Str(required=True, dump_to=\"AlarmTemplateID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceIDs\": fields.List(fields.Str()),\n \"ResourceType\": fields.Str(required=True, dump_to=\"ResourceType\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass BindAlarmTemplateResponseSchema(schema.ResponseSchema):\n \"\"\" BindAlarmTemplate - 绑定告警模板\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: BindEIP\n\n绑定外网 IP\n\"\"\"\n\n\nclass BindEIPRequestSchema(schema.RequestSchema):\n \"\"\" BindEIP - 绑定外网 IP\n \"\"\"\n\n fields = {\n \"EIPID\": fields.Str(required=True, dump_to=\"EIPID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"ResourceType\": fields.Str(required=True, dump_to=\"ResourceType\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass BindEIPResponseSchema(schema.ResponseSchema):\n \"\"\" BindEIP - 绑定外网 IP\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: BindPhysicalIP\n\n绑定物理 IP ,被绑定的资源必须处于运行中或有效状态。\n\"\"\"\n\n\nclass BindPhysicalIPRequestSchema(schema.RequestSchema):\n \"\"\" BindPhysicalIP - 绑定物理 IP ,被绑定的资源必须处于运行中或有效状态。\n \"\"\"\n\n fields = {\n \"PhysicalIPID\": fields.Str(required=True, dump_to=\"PhysicalIPID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"ResourceType\": fields.Str(required=True, dump_to=\"ResourceType\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass BindPhysicalIPResponseSchema(schema.ResponseSchema):\n \"\"\" BindPhysicalIP - 绑定物理 IP ,被绑定的资源必须处于运行中或有效状态。\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: BindSecurityGroup\n\n绑定安全组\n\"\"\"\n\n\nclass BindSecurityGroupRequestSchema(schema.RequestSchema):\n \"\"\" BindSecurityGroup - 绑定安全组\n \"\"\"\n\n fields = {\n \"NICType\": fields.Str(required=False, dump_to=\"NICType\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"SGID\": fields.Str(required=True, dump_to=\"SGID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass BindSecurityGroupResponseSchema(schema.ResponseSchema):\n \"\"\" BindSecurityGroup - 绑定安全组\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: CloneDisk\n\n克隆硬盘\n\"\"\"\n\n\nclass CloneDiskRequestSchema(schema.RequestSchema):\n \"\"\" CloneDisk - 克隆硬盘\n \"\"\"\n\n fields = {\n \"ChargeType\": fields.Str(required=True, dump_to=\"ChargeType\"),\n \"Name\": fields.Str(required=True, dump_to=\"Name\"),\n \"Quantity\": fields.Int(required=False, dump_to=\"Quantity\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SrcID\": fields.Str(required=True, dump_to=\"SrcID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CloneDiskResponseSchema(schema.ResponseSchema):\n \"\"\" CloneDisk - 克隆硬盘\n \"\"\"\n\n fields = {\n \"DiskID\": fields.Str(required=True, load_from=\"DiskID\"),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: CreateCertificate\n\n创建证书\n\"\"\"\n\n\nclass CreateCertificateRequestSchema(schema.RequestSchema):\n \"\"\" CreateCertificate - 创建证书\n \"\"\"\n\n fields = {\n \"Certificate\": fields.Str(required=True, dump_to=\"Certificate\"),\n \"CertificateType\": fields.Str(required=True, dump_to=\"CertificateType\"),\n \"Name\": fields.Str(required=True, dump_to=\"Name\"),\n \"PrivateKey\": fields.Str(required=False, dump_to=\"PrivateKey\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Remark\": fields.Str(required=False, dump_to=\"Remark\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateCertificateResponseSchema(schema.ResponseSchema):\n \"\"\" CreateCertificate - 创建证书\n \"\"\"\n\n fields = {\n \"CertificateID\": fields.Str(required=True, load_from=\"CertificateID\"),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: CreateCustomImage\n\n创建自制镜像\n\"\"\"\n\n\nclass CreateCustomImageRequestSchema(schema.RequestSchema):\n \"\"\" CreateCustomImage - 创建自制镜像\n \"\"\"\n\n fields = {\n \"ImageDescription\": fields.Str(\n required=False, dump_to=\"ImageDescription\"\n ),\n \"ImageName\": fields.Str(required=True, dump_to=\"ImageName\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VMID\": fields.Str(required=True, dump_to=\"VMID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateCustomImageResponseSchema(schema.ResponseSchema):\n \"\"\" CreateCustomImage - 创建自制镜像\n \"\"\"\n\n fields = {\n \"ImageID\": fields.Str(required=True, load_from=\"ImageID\"),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: CreateDisk\n\n创建硬盘\n\"\"\"\n\n\nclass CreateDiskRequestSchema(schema.RequestSchema):\n \"\"\" CreateDisk - 创建硬盘\n \"\"\"\n\n fields = {\n \"ChargeType\": fields.Str(required=True, dump_to=\"ChargeType\"),\n \"DiskSpace\": fields.Int(required=True, dump_to=\"DiskSpace\"),\n \"Name\": fields.Str(required=True, dump_to=\"Name\"),\n \"Quantity\": fields.Int(required=False, dump_to=\"Quantity\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SetType\": fields.Str(required=True, dump_to=\"SetType\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateDiskResponseSchema(schema.ResponseSchema):\n \"\"\" CreateDisk - 创建硬盘\n \"\"\"\n\n fields = {\n \"DiskID\": fields.Str(required=True, load_from=\"DiskID\"),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: CreateLB\n\n创建负载均衡\n\"\"\"\n\n\nclass CreateLBRequestSchema(schema.RequestSchema):\n \"\"\" CreateLB - 创建负载均衡\n \"\"\"\n\n fields = {\n \"ChargeType\": fields.Str(required=True, dump_to=\"ChargeType\"),\n \"EIPID\": fields.Str(required=False, dump_to=\"EIPID\"),\n \"LBType\": fields.Str(required=True, dump_to=\"LBType\"),\n \"Name\": fields.Str(required=True, dump_to=\"Name\"),\n \"Quantity\": fields.Int(required=False, dump_to=\"Quantity\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Remark\": fields.Str(required=False, dump_to=\"Remark\"),\n \"SGID\": fields.Str(required=False, dump_to=\"SGID\"),\n \"SubnetID\": fields.Str(required=True, dump_to=\"SubnetID\"),\n \"VMType\": fields.Str(required=True, dump_to=\"VMType\"),\n \"VPCID\": fields.Str(required=True, dump_to=\"VPCID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateLBResponseSchema(schema.ResponseSchema):\n \"\"\" CreateLB - 创建负载均衡\n \"\"\"\n\n fields = {\n \"LBID\": fields.Str(required=False, load_from=\"LBID\"),\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: CreateNATGW\n\n创建NAT网关\n\"\"\"\n\n\nclass CreateNATGWRequestSchema(schema.RequestSchema):\n \"\"\" CreateNATGW - 创建NAT网关\n \"\"\"\n\n fields = {\n \"ChargeType\": fields.Str(required=True, dump_to=\"ChargeType\"),\n \"EIPID\": fields.Str(required=True, dump_to=\"EIPID\"),\n \"Name\": fields.Str(required=True, dump_to=\"Name\"),\n \"Quantity\": fields.Int(required=False, dump_to=\"Quantity\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Remark\": fields.Str(required=False, dump_to=\"Remark\"),\n \"SGID\": fields.Str(required=True, dump_to=\"SGID\"),\n \"SubnetID\": fields.Str(required=True, dump_to=\"SubnetID\"),\n \"VMType\": fields.Str(required=True, dump_to=\"VMType\"),\n \"VPCID\": fields.Str(required=True, dump_to=\"VPCID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateNATGWResponseSchema(schema.ResponseSchema):\n \"\"\" CreateNATGW - 创建NAT网关\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n \"NATGWID\": fields.Str(required=False, load_from=\"NATGWID\"),\n }\n\n\n\"\"\"\nAPI: CreateNATGWRule\n\n添加NAT网关白名单\n\"\"\"\n\n\nclass CreateNATGWRuleRequestSchema(schema.RequestSchema):\n \"\"\" CreateNATGWRule - 添加NAT网关白名单\n \"\"\"\n\n fields = {\n \"BindResourceID\": fields.Str(required=True, dump_to=\"BindResourceID\"),\n \"NATGWID\": fields.Str(required=True, dump_to=\"NATGWID\"),\n \"NATGWType\": fields.Str(required=True, dump_to=\"NATGWType\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateNATGWRuleResponseSchema(schema.ResponseSchema):\n \"\"\" CreateNATGWRule - 添加NAT网关白名单\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"RuleID\": fields.Str(required=False, load_from=\"RuleID\"),\n }\n\n\n\"\"\"\nAPI: CreateNIC\n\n创建网卡\n\"\"\"\n\n\nclass CreateNICRequestSchema(schema.RequestSchema):\n \"\"\" CreateNIC - 创建网卡\n \"\"\"\n\n fields = {\n \"IP\": fields.Str(required=False, dump_to=\"IP\"),\n \"Name\": fields.Str(required=True, dump_to=\"Name\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SGID\": fields.Str(required=False, dump_to=\"SGID\"),\n \"SubnetID\": fields.Str(required=True, dump_to=\"SubnetID\"),\n \"VPCID\": fields.Str(required=True, dump_to=\"VPCID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateNICResponseSchema(schema.ResponseSchema):\n \"\"\" CreateNIC - 创建网卡\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"NICID\": fields.Str(required=True, load_from=\"NICID\"),\n }\n\n\n\"\"\"\nAPI: CreatePhysicalIP\n\n创建物理 IP ,需确保平台已配置物理 IP 线路相关信息及物理网络联通性。\n\"\"\"\n\n\nclass CreatePhysicalIPRequestSchema(schema.RequestSchema):\n \"\"\" CreatePhysicalIP - 创建物理 IP ,需确保平台已配置物理 IP 线路相关信息及物理网络联通性。\n \"\"\"\n\n fields = {\n \"Name\": fields.Str(required=True, dump_to=\"Name\"),\n \"OperatorName\": fields.Str(required=True, dump_to=\"OperatorName\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Remark\": fields.Str(required=False, dump_to=\"Remark\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreatePhysicalIPResponseSchema(schema.ResponseSchema):\n \"\"\" CreatePhysicalIP - 创建物理 IP ,需确保平台已配置物理 IP 线路相关信息及物理网络联通性。\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n \"PhysicalIPID\": fields.Str(required=True, load_from=\"PhysicalIPID\"),\n }\n\n\n\"\"\"\nAPI: CreateRS\n\n为负载均衡的 VServer 添加后端服务节点。\n\"\"\"\n\n\nclass CreateRSRequestSchema(schema.RequestSchema):\n \"\"\" CreateRS - 为负载均衡的 VServer 添加后端服务节点。\n \"\"\"\n\n fields = {\n \"BindResourceID\": fields.Str(required=True, dump_to=\"BindResourceID\"),\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"Port\": fields.Int(required=True, dump_to=\"Port\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VSID\": fields.Str(required=True, dump_to=\"VSID\"),\n \"Weight\": fields.Int(required=True, dump_to=\"Weight\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateRSResponseSchema(schema.ResponseSchema):\n \"\"\" CreateRS - 为负载均衡的 VServer 添加后端服务节点。\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n \"RSID\": fields.Str(required=False, load_from=\"RSID\"),\n }\n\n\n\"\"\"\nAPI: CreateSecurityGroup\n\n创建安全组\n\"\"\"\n\n\nclass CreateSecurityGroupRequestSchema(schema.RequestSchema):\n \"\"\" CreateSecurityGroup - 创建安全组\n \"\"\"\n\n fields = {\n \"Name\": fields.Str(required=True, dump_to=\"Name\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Remark\": fields.Str(required=False, dump_to=\"Remark\"),\n \"Rule\": fields.List(fields.Str()),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateSecurityGroupResponseSchema(schema.ResponseSchema):\n \"\"\" CreateSecurityGroup - 创建安全组\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"SGID\": fields.Str(required=False, load_from=\"SGID\"),\n }\n\n\n\"\"\"\nAPI: CreateSecurityGroupRule\n\n创建安全组规则\n\"\"\"\n\n\nclass CreateSecurityGroupRuleRequestSchema(schema.RequestSchema):\n \"\"\" CreateSecurityGroupRule - 创建安全组规则\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Rules\": fields.List(fields.Str()),\n \"SGID\": fields.Str(required=True, dump_to=\"SGID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateSecurityGroupRuleResponseSchema(schema.ResponseSchema):\n \"\"\" CreateSecurityGroupRule - 创建安全组规则\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"SGRuleID\": fields.Str(required=False, load_from=\"SGRuleID\"),\n }\n\n\n\"\"\"\nAPI: CreateSnapshot\n\n创建硬盘快照\n\"\"\"\n\n\nclass CreateSnapshotRequestSchema(schema.RequestSchema):\n \"\"\" CreateSnapshot - 创建硬盘快照\n \"\"\"\n\n fields = {\n \"DiskID\": fields.Str(required=True, dump_to=\"DiskID\"),\n \"Name\": fields.Str(required=True, dump_to=\"Name\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Remark\": fields.Str(required=False, dump_to=\"Remark\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateSnapshotResponseSchema(schema.ResponseSchema):\n \"\"\" CreateSnapshot - 创建硬盘快照\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"SnapshotID\": fields.Str(required=True, load_from=\"SnapshotID\"),\n }\n\n\n\"\"\"\nAPI: CreateSubnet\n\n创建子网\n\"\"\"\n\n\nclass CreateSubnetRequestSchema(schema.RequestSchema):\n \"\"\" CreateSubnet - 创建子网\n \"\"\"\n\n fields = {\n \"Name\": fields.Str(required=True, dump_to=\"Name\"),\n \"Network\": fields.Str(required=True, dump_to=\"Network\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Remark\": fields.Str(required=False, dump_to=\"Remark\"),\n \"VPCID\": fields.Str(required=True, dump_to=\"VPCID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateSubnetResponseSchema(schema.ResponseSchema):\n \"\"\" CreateSubnet - 创建子网\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"SubnetID\": fields.Str(required=False, load_from=\"SubnetID\"),\n }\n\n\n\"\"\"\nAPI: CreateUser\n\n管理员添加账号\n\"\"\"\n\n\nclass CreateUserRequestSchema(schema.RequestSchema):\n \"\"\" CreateUser - 管理员添加账号\n \"\"\"\n\n fields = {\n \"PassWord\": fields.Str(required=True, dump_to=\"PassWord\"),\n \"UserEmail\": fields.Str(required=True, dump_to=\"UserEmail\"),\n }\n\n\nclass CreateUserResponseSchema(schema.ResponseSchema):\n \"\"\" CreateUser - 管理员添加账号\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"UserID\": fields.Int(required=False, load_from=\"UserID\"),\n }\n\n\n\"\"\"\nAPI: CreateVMInstance\n\n创建虚拟机\n\"\"\"\n\n\nclass CreateVMInstanceRequestSchema(schema.RequestSchema):\n \"\"\" CreateVMInstance - 创建虚拟机\n \"\"\"\n\n fields = {\n \"Bandwidth\": fields.Str(required=False, dump_to=\"Bandwidth\"),\n \"BootDiskSetType\": fields.Str(required=True, dump_to=\"BootDiskSetType\"),\n \"CPU\": fields.Int(required=True, dump_to=\"CPU\"),\n \"ChargeType\": fields.Str(required=True, dump_to=\"ChargeType\"),\n \"DataDiskSetType\": fields.Str(required=True, dump_to=\"DataDiskSetType\"),\n \"DataDiskSpace\": fields.Int(required=False, dump_to=\"DataDiskSpace\"),\n \"GPU\": fields.Int(required=False, dump_to=\"GPU\"),\n \"IPVersion\": fields.Str(required=False, dump_to=\"IPVersion\"),\n \"ImageID\": fields.Str(required=True, dump_to=\"ImageID\"),\n \"InternalIP\": fields.Str(required=False, dump_to=\"InternalIP\"),\n \"InternetIP\": fields.Str(required=False, dump_to=\"InternetIP\"),\n \"LANSGID\": fields.Str(required=False, dump_to=\"LANSGID\"),\n \"Memory\": fields.Int(required=True, dump_to=\"Memory\"),\n \"Name\": fields.Str(required=True, dump_to=\"Name\"),\n \"OperatorName\": fields.Str(required=False, dump_to=\"OperatorName\"),\n \"Password\": fields.Str(required=True, dump_to=\"Password\"),\n \"Quantity\": fields.Int(required=False, dump_to=\"Quantity\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SubnetID\": fields.Str(required=True, dump_to=\"SubnetID\"),\n \"VMType\": fields.Str(required=True, dump_to=\"VMType\"),\n \"VPCID\": fields.Str(required=True, dump_to=\"VPCID\"),\n \"WANSGID\": fields.Str(required=True, dump_to=\"WANSGID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateVMInstanceResponseSchema(schema.ResponseSchema):\n \"\"\" CreateVMInstance - 创建虚拟机\n \"\"\"\n\n fields = {\n \"DiskID\": fields.Str(required=False, load_from=\"DiskID\"),\n \"EIPID\": fields.Str(required=False, load_from=\"EIPID\"),\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n \"VMID\": fields.Str(required=False, load_from=\"VMID\"),\n }\n\n\n\"\"\"\nAPI: CreateVPC\n\n创建VPC\n\"\"\"\n\n\nclass CreateVPCRequestSchema(schema.RequestSchema):\n \"\"\" CreateVPC - 创建VPC\n \"\"\"\n\n fields = {\n \"Name\": fields.Str(required=True, dump_to=\"Name\"),\n \"Network\": fields.Str(required=True, dump_to=\"Network\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Remark\": fields.Str(required=False, dump_to=\"Remark\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateVPCResponseSchema(schema.ResponseSchema):\n \"\"\" CreateVPC - 创建VPC\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"VPCID\": fields.Str(required=False, load_from=\"VPCID\"),\n }\n\n\n\"\"\"\nAPI: CreateVS\n\n创建负载均衡VServer\n\"\"\"\n\n\nclass CreateVSRequestSchema(schema.RequestSchema):\n \"\"\" CreateVS - 创建负载均衡VServer\n \"\"\"\n\n fields = {\n \"CACertificateID\": fields.Str(\n required=False, dump_to=\"CACertificateID\"\n ),\n \"Domain\": fields.Str(required=False, dump_to=\"Domain\"),\n \"HealthcheckType\": fields.Str(required=True, dump_to=\"HealthcheckType\"),\n \"KeepaliveTimeout\": fields.Int(\n required=False, dump_to=\"KeepaliveTimeout\"\n ),\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"Path\": fields.Str(required=False, dump_to=\"Path\"),\n \"PersistenceKey\": fields.Str(required=False, dump_to=\"PersistenceKey\"),\n \"PersistenceType\": fields.Str(\n required=False, dump_to=\"PersistenceType\"\n ),\n \"Port\": fields.Int(required=True, dump_to=\"Port\"),\n \"Protocol\": fields.Str(required=True, dump_to=\"Protocol\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SSLMode\": fields.Str(required=False, dump_to=\"SSLMode\"),\n \"Scheduler\": fields.Str(required=True, dump_to=\"Scheduler\"),\n \"ServerCertificateID\": fields.Str(\n required=False, dump_to=\"ServerCertificateID\"\n ),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateVSResponseSchema(schema.ResponseSchema):\n \"\"\" CreateVS - 创建负载均衡VServer\n \"\"\"\n\n fields = {\n \"Action\": fields.Str(required=True, load_from=\"Action\"),\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n \"RetCode\": fields.Int(required=True, load_from=\"RetCode\"),\n \"VSID\": fields.Str(required=False, load_from=\"VSID\"),\n }\n\n\n\"\"\"\nAPI: CreateVSPolicy\n\n创建七层负载均衡内容转发规则,仅当 VServer 的监听协议为 HTTP 时有效。\n\"\"\"\n\n\nclass CreateVSPolicyRequestSchema(schema.RequestSchema):\n \"\"\" CreateVSPolicy - 创建七层负载均衡内容转发规则,仅当 VServer 的监听协议为 HTTP 时有效。\n \"\"\"\n\n fields = {\n \"Domain\": fields.Str(required=False, dump_to=\"Domain\"),\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"Path\": fields.Str(required=False, dump_to=\"Path\"),\n \"RSIDs\": fields.List(fields.Str()),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VSID\": fields.Str(required=True, dump_to=\"VSID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass CreateVSPolicyResponseSchema(schema.ResponseSchema):\n \"\"\" CreateVSPolicy - 创建七层负载均衡内容转发规则,仅当 VServer 的监听协议为 HTTP 时有效。\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n \"PolicyID\": fields.Str(required=False, load_from=\"PolicyID\"),\n }\n\n\n\"\"\"\nAPI: DeleteCertificate\n\n删除证书\n\"\"\"\n\n\nclass DeleteCertificateRequestSchema(schema.RequestSchema):\n \"\"\" DeleteCertificate - 删除证书\n \"\"\"\n\n fields = {\n \"CertificateID\": fields.Str(required=True, dump_to=\"CertificateID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteCertificateResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteCertificate - 删除证书\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteCustomImage\n\n删除自制镜像\n\"\"\"\n\n\nclass DeleteCustomImageRequestSchema(schema.RequestSchema):\n \"\"\" DeleteCustomImage - 删除自制镜像\n \"\"\"\n\n fields = {\n \"ImageID\": fields.Str(required=True, dump_to=\"ImageID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteCustomImageResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteCustomImage - 删除自制镜像\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteDisk\n\n删除硬盘\n\"\"\"\n\n\nclass DeleteDiskRequestSchema(schema.RequestSchema):\n \"\"\" DeleteDisk - 删除硬盘\n \"\"\"\n\n fields = {\n \"DiskID\": fields.Str(required=True, dump_to=\"DiskID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteDiskResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteDisk - 删除硬盘\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteLB\n\n删除负载均衡\n\"\"\"\n\n\nclass DeleteLBRequestSchema(schema.RequestSchema):\n \"\"\" DeleteLB - 删除负载均衡\n \"\"\"\n\n fields = {\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteLBResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteLB - 删除负载均衡\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteNATGW\n\n删除NAT网关\n\"\"\"\n\n\nclass DeleteNATGWRequestSchema(schema.RequestSchema):\n \"\"\" DeleteNATGW - 删除NAT网关\n \"\"\"\n\n fields = {\n \"NATGWID\": fields.Str(required=True, dump_to=\"NATGWID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteNATGWResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteNATGW - 删除NAT网关\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteNATGWRule\n\n删除NAT网关白名单\n\"\"\"\n\n\nclass DeleteNATGWRuleRequestSchema(schema.RequestSchema):\n \"\"\" DeleteNATGWRule - 删除NAT网关白名单\n \"\"\"\n\n fields = {\n \"NATGWID\": fields.Str(required=True, dump_to=\"NATGWID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"RuleID\": fields.Str(required=True, dump_to=\"RuleID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteNATGWRuleResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteNATGWRule - 删除NAT网关白名单\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteNIC\n\n删除网卡\n\"\"\"\n\n\nclass DeleteNICRequestSchema(schema.RequestSchema):\n \"\"\" DeleteNIC - 删除网卡\n \"\"\"\n\n fields = {\n \"NICID\": fields.Str(required=True, dump_to=\"NICID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteNICResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteNIC - 删除网卡\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeletePhysicalIP\n\n删除物理IP\n\"\"\"\n\n\nclass DeletePhysicalIPRequestSchema(schema.RequestSchema):\n \"\"\" DeletePhysicalIP - 删除物理IP\n \"\"\"\n\n fields = {\n \"PhysicalIPID\": fields.Str(required=True, dump_to=\"PhysicalIPID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeletePhysicalIPResponseSchema(schema.ResponseSchema):\n \"\"\" DeletePhysicalIP - 删除物理IP\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteRS\n\n移除负载均衡的单个服务节点\n\"\"\"\n\n\nclass DeleteRSRequestSchema(schema.RequestSchema):\n \"\"\" DeleteRS - 移除负载均衡的单个服务节点\n \"\"\"\n\n fields = {\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"RSID\": fields.Str(required=True, dump_to=\"RSID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VSID\": fields.Str(required=True, dump_to=\"VSID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteRSResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteRS - 移除负载均衡的单个服务节点\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteSecurityGroup\n\n删除安全组\n\"\"\"\n\n\nclass DeleteSecurityGroupRequestSchema(schema.RequestSchema):\n \"\"\" DeleteSecurityGroup - 删除安全组\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SGID\": fields.Str(required=True, dump_to=\"SGID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteSecurityGroupResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteSecurityGroup - 删除安全组\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteSecurityGroupRule\n\n删除安全组规则\n\"\"\"\n\n\nclass DeleteSecurityGroupRuleRequestSchema(schema.RequestSchema):\n \"\"\" DeleteSecurityGroupRule - 删除安全组规则\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SGID\": fields.Str(required=True, dump_to=\"SGID\"),\n \"SGRuleID\": fields.Str(required=True, dump_to=\"SGRuleID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteSecurityGroupRuleResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteSecurityGroupRule - 删除安全组规则\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteSnapshot\n\n删除快照,仅支持状态为正常的快照进行删除操作。\n\"\"\"\n\n\nclass DeleteSnapshotRequestSchema(schema.RequestSchema):\n \"\"\" DeleteSnapshot - 删除快照,仅支持状态为正常的快照进行删除操作。\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SnapshotID\": fields.Str(required=True, dump_to=\"SnapshotID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteSnapshotResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteSnapshot - 删除快照,仅支持状态为正常的快照进行删除操作。\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteSubnet\n\n删除子网\n\"\"\"\n\n\nclass DeleteSubnetRequestSchema(schema.RequestSchema):\n \"\"\" DeleteSubnet - 删除子网\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SubnetID\": fields.Str(required=True, dump_to=\"SubnetID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteSubnetResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteSubnet - 删除子网\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteVMInstance\n\n删除虚拟机\n\"\"\"\n\n\nclass DeleteVMInstanceRequestSchema(schema.RequestSchema):\n \"\"\" DeleteVMInstance - 删除虚拟机\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VMID\": fields.Str(required=True, dump_to=\"VMID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteVMInstanceResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteVMInstance - 删除虚拟机\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteVPC\n\n删除VPC\n\"\"\"\n\n\nclass DeleteVPCRequestSchema(schema.RequestSchema):\n \"\"\" DeleteVPC - 删除VPC\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VPCID\": fields.Str(required=True, dump_to=\"VPCID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteVPCResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteVPC - 删除VPC\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteVS\n\n删除VServer\n\"\"\"\n\n\nclass DeleteVSRequestSchema(schema.RequestSchema):\n \"\"\" DeleteVS - 删除VServer\n \"\"\"\n\n fields = {\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VSID\": fields.Str(required=True, dump_to=\"VSID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteVSResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteVS - 删除VServer\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DeleteVSPolicy\n\n删除七层负载均衡内容转发规则,仅当 VServer 的监听协议为 HTTP 时有效。\n\"\"\"\n\n\nclass DeleteVSPolicyRequestSchema(schema.RequestSchema):\n \"\"\" DeleteVSPolicy - 删除七层负载均衡内容转发规则,仅当 VServer 的监听协议为 HTTP 时有效。\n \"\"\"\n\n fields = {\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"PolicyID\": fields.Str(required=True, dump_to=\"PolicyID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VSID\": fields.Str(required=True, dump_to=\"VSID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DeleteVSPolicyResponseSchema(schema.ResponseSchema):\n \"\"\" DeleteVSPolicy - 删除七层负载均衡内容转发规则,仅当 VServer 的监听协议为 HTTP 时有效。\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DescribeCertificate\n\n查询证书\n\"\"\"\n\n\nclass DescribeCertificateRequestSchema(schema.RequestSchema):\n \"\"\" DescribeCertificate - 查询证书\n \"\"\"\n\n fields = {\n \"CertificateIDs\": fields.List(fields.Str()),\n \"CertificateType\": fields.Str(\n required=False, dump_to=\"CertificateType\"\n ),\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeCertificateResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeCertificate - 查询证书\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.CertificateInfoSchema(), required=False, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeDisk\n\n获取硬盘信息\n\"\"\"\n\n\nclass DescribeDiskRequestSchema(schema.RequestSchema):\n \"\"\" DescribeDisk - 获取硬盘信息\n \"\"\"\n\n fields = {\n \"DiskIDs\": fields.List(fields.Str()),\n \"DiskType\": fields.Str(required=False, dump_to=\"DiskType\"),\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeDiskResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeDisk - 获取硬盘信息\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.DiskInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeEIP\n\n获取外网IP的信息\n\"\"\"\n\n\nclass DescribeEIPRequestSchema(schema.RequestSchema):\n \"\"\" DescribeEIP - 获取外网IP的信息\n \"\"\"\n\n fields = {\n \"BindResourceID\": fields.Str(required=False, dump_to=\"BindResourceID\"),\n \"EIPIDs\": fields.List(fields.Str()),\n \"IPVersion\": fields.Str(required=False, dump_to=\"IPVersion\"),\n \"Limit\": fields.Str(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Str(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeEIPResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeEIP - 获取外网IP的信息\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.EIPInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"Totalcount\": fields.Int(required=False, load_from=\"Totalcount\"),\n }\n\n\n\"\"\"\nAPI: DescribeImage\n\n获取镜像信息,包括默认镜像和自制镜像。\n\"\"\"\n\n\nclass DescribeImageRequestSchema(schema.RequestSchema):\n \"\"\" DescribeImage - 获取镜像信息,包括默认镜像和自制镜像。\n \"\"\"\n\n fields = {\n \"ImageIDs\": fields.List(fields.Str()),\n \"ImageType\": fields.Str(required=False, dump_to=\"ImageType\"),\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeImageResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeImage - 获取镜像信息,包括默认镜像和自制镜像。\n \"\"\"\n\n fields = {\n \"Action\": fields.Str(required=True, load_from=\"Action\"),\n \"Infos\": fields.List(\n models.ImageInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"RetCode\": fields.Int(required=True, load_from=\"RetCode\"),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeLB\n\n获取负载均衡信息\n\"\"\"\n\n\nclass DescribeLBRequestSchema(schema.RequestSchema):\n \"\"\" DescribeLB - 获取负载均衡信息\n \"\"\"\n\n fields = {\n \"LBIDs\": fields.List(fields.Str()),\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SubnetID\": fields.Str(required=False, dump_to=\"SubnetID\"),\n \"VPCID\": fields.Str(required=False, dump_to=\"VPCID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeLBResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeLB - 获取负载均衡信息\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.LBInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeMetric\n\n获取资源监控信息\n\"\"\"\n\n\nclass DescribeMetricRequestSchema(schema.RequestSchema):\n \"\"\" DescribeMetric - 获取资源监控信息\n \"\"\"\n\n fields = {\n \"BeginTime\": fields.Str(required=True, dump_to=\"BeginTime\"),\n \"EndTime\": fields.Str(required=True, dump_to=\"EndTime\"),\n \"MetricName\": fields.List(fields.Str()),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"ResourceType\": fields.Str(required=True, dump_to=\"ResourceType\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeMetricResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeMetric - 获取资源监控信息\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.MetricInfoSchema(), required=False, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=False, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeNATGW\n\n获取NAT网关信息\n\"\"\"\n\n\nclass DescribeNATGWRequestSchema(schema.RequestSchema):\n \"\"\" DescribeNATGW - 获取NAT网关信息\n \"\"\"\n\n fields = {\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"NATGWIDs\": fields.List(fields.Str()),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeNATGWResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeNATGW - 获取NAT网关信息\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.NATGWInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeNATGWRule\n\n获取NAT网关白名单信息 \n\"\"\"\n\n\nclass DescribeNATGWRuleRequestSchema(schema.RequestSchema):\n \"\"\" DescribeNATGWRule - 获取NAT网关白名单信息 \n \"\"\"\n\n fields = {\n \"BindResourceIDs\": fields.List(fields.Str()),\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"NATGWID\": fields.Str(required=True, dump_to=\"NATGWID\"),\n \"NATGWType\": fields.Str(required=True, dump_to=\"NATGWType\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"RuleIDs\": fields.List(fields.Str()),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeNATGWRuleResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeNATGWRule - 获取NAT网关白名单信息 \n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.NATGWRuleInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeNIC\n\n获取网卡信息\n\"\"\"\n\n\nclass DescribeNICRequestSchema(schema.RequestSchema):\n \"\"\" DescribeNIC - 获取网卡信息\n \"\"\"\n\n fields = {\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"NICIDs\": fields.List(fields.Str()),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeNICResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeNIC - 获取网卡信息\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.NICInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeOPLogs\n\n查询操作日志\n\"\"\"\n\n\nclass DescribeOPLogsRequestSchema(schema.RequestSchema):\n \"\"\" DescribeOPLogs - 查询操作日志\n \"\"\"\n\n fields = {\n \"BeginTime\": fields.Int(required=True, dump_to=\"BeginTime\"),\n \"EndTime\": fields.Int(required=True, dump_to=\"EndTime\"),\n \"IsSuccess\": fields.Str(required=False, dump_to=\"IsSuccess\"),\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=False, dump_to=\"ResourceID\"),\n \"ResourceType\": fields.Str(required=False, dump_to=\"ResourceType\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeOPLogsResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeOPLogs - 查询操作日志\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.OPLogInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribePhysicalIP\n\n获取物理IP信息 \n\"\"\"\n\n\nclass DescribePhysicalIPRequestSchema(schema.RequestSchema):\n \"\"\" DescribePhysicalIP - 获取物理IP信息 \n \"\"\"\n\n fields = {\n \"Limit\": fields.Str(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Str(required=False, dump_to=\"Offset\"),\n \"PhysicalIPIDs\": fields.List(fields.Str()),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribePhysicalIPResponseSchema(schema.ResponseSchema):\n \"\"\" DescribePhysicalIP - 获取物理IP信息 \n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.PhysicalIPInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=False, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeRS\n\n获取负载均衡服务的服务节点信息\n\"\"\"\n\n\nclass DescribeRSRequestSchema(schema.RequestSchema):\n \"\"\" DescribeRS - 获取负载均衡服务的服务节点信息\n \"\"\"\n\n fields = {\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"RSIDs\": fields.List(fields.Str()),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VSID\": fields.Str(required=False, dump_to=\"VSID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeRSResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeRS - 获取负载均衡服务的服务节点信息\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.RSInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeRecycledResource\n\n查询回收站资源\n\"\"\"\n\n\nclass DescribeRecycledResourceRequestSchema(schema.RequestSchema):\n \"\"\" DescribeRecycledResource - 查询回收站资源\n \"\"\"\n\n fields = {\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceIDs\": fields.List(fields.Str()),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeRecycledResourceResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeRecycledResource - 查询回收站资源\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.RecycledResourceInfoSchema(),\n required=True,\n load_from=\"Infos\",\n ),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeSecurityGroup\n\n查询安全组信息\n\"\"\"\n\n\nclass DescribeSecurityGroupRequestSchema(schema.RequestSchema):\n \"\"\" DescribeSecurityGroup - 查询安全组信息\n \"\"\"\n\n fields = {\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SGIDs\": fields.List(fields.Str()),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeSecurityGroupResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeSecurityGroup - 查询安全组信息\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.SGInfoSchema(), required=False, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=False, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeSecurityGroupResource\n\n查询安全组绑定的资源信息\n\"\"\"\n\n\nclass DescribeSecurityGroupResourceRequestSchema(schema.RequestSchema):\n \"\"\" DescribeSecurityGroupResource - 查询安全组绑定的资源信息\n \"\"\"\n\n fields = {\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SGID\": fields.Str(required=True, dump_to=\"SGID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeSecurityGroupResourceResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeSecurityGroupResource - 查询安全组绑定的资源信息\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.SGResourceInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeSnapshot\n\n查询硬盘快照信息\n\"\"\"\n\n\nclass DescribeSnapshotRequestSchema(schema.RequestSchema):\n \"\"\" DescribeSnapshot - 查询硬盘快照信息\n \"\"\"\n\n fields = {\n \"DiskID\": fields.Str(required=False, dump_to=\"DiskID\"),\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SnapshotIDs\": fields.List(fields.Str()),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeSnapshotResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeSnapshot - 查询硬盘快照信息\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.SnapshotInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeStorageType\n\n查询存储类型\n\"\"\"\n\n\nclass DescribeStorageTypeRequestSchema(schema.RequestSchema):\n \"\"\" DescribeStorageType - 查询存储类型\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeStorageTypeResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeStorageType - 查询存储类型\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.StorageTypeInfoSchema(), required=False, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=False, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeSubnet\n\n查询子网信息\n\"\"\"\n\n\nclass DescribeSubnetRequestSchema(schema.RequestSchema):\n \"\"\" DescribeSubnet - 查询子网信息\n \"\"\"\n\n fields = {\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SubnetIDs\": fields.List(fields.Str()),\n \"VPCID\": fields.Str(required=False, dump_to=\"VPCID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeSubnetResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeSubnet - 查询子网信息\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.SubnetInfoSchema(), required=False, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=False, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeUser\n\n查询租户信息\n\"\"\"\n\n\nclass DescribeUserRequestSchema(schema.RequestSchema):\n \"\"\" DescribeUser - 查询租户信息\n \"\"\"\n\n fields = {\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"UserIDs\": fields.List(fields.Int()),\n }\n\n\nclass DescribeUserResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeUser - 查询租户信息\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.UserInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeVMInstance\n\n查询虚拟机\n\"\"\"\n\n\nclass DescribeVMInstanceRequestSchema(schema.RequestSchema):\n \"\"\" DescribeVMInstance - 查询虚拟机\n \"\"\"\n\n fields = {\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SubnetID\": fields.Str(required=False, dump_to=\"SubnetID\"),\n \"VMIDs\": fields.List(fields.Str()),\n \"VPCID\": fields.Str(required=False, dump_to=\"VPCID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeVMInstanceResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeVMInstance - 查询虚拟机\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.VMInstanceInfoSchema(), required=False, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=False, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeVMType\n\n查询主机机型\n\"\"\"\n\n\nclass DescribeVMTypeRequestSchema(schema.RequestSchema):\n \"\"\" DescribeVMType - 查询主机机型\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeVMTypeResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeVMType - 查询主机机型\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.VMTypeInfoSchema(), required=False, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=False, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeVPC\n\n查询VPC信息\n\"\"\"\n\n\nclass DescribeVPCRequestSchema(schema.RequestSchema):\n \"\"\" DescribeVPC - 查询VPC信息\n \"\"\"\n\n fields = {\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VPCIDs\": fields.List(fields.Str()),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeVPCResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeVPC - 查询VPC信息\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.VPCInfoSchema(), required=False, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=False, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeVS\n\n获取负载均衡 VServer 信息\n\"\"\"\n\n\nclass DescribeVSRequestSchema(schema.RequestSchema):\n \"\"\" DescribeVS - 获取负载均衡 VServer 信息\n \"\"\"\n\n fields = {\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VSIDs\": fields.List(fields.Str()),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeVSResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeVS - 获取负载均衡 VServer 信息\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.VSInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DescribeVSPolicy\n\n获取七层负载均衡内容转发规则信息,仅当 VServer 的监听协议为 HTTP 时有效。\n\"\"\"\n\n\nclass DescribeVSPolicyRequestSchema(schema.RequestSchema):\n \"\"\" DescribeVSPolicy - 获取七层负载均衡内容转发规则信息,仅当 VServer 的监听协议为 HTTP 时有效。\n \"\"\"\n\n fields = {\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"Limit\": fields.Int(required=False, dump_to=\"Limit\"),\n \"Offset\": fields.Int(required=False, dump_to=\"Offset\"),\n \"PolicyIDs\": fields.List(fields.Str()),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VSID\": fields.Str(required=False, dump_to=\"VSID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DescribeVSPolicyResponseSchema(schema.ResponseSchema):\n \"\"\" DescribeVSPolicy - 获取七层负载均衡内容转发规则信息,仅当 VServer 的监听协议为 HTTP 时有效。\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.VSPolicyInfoSchema(), required=True, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"TotalCount\": fields.Int(required=True, load_from=\"TotalCount\"),\n }\n\n\n\"\"\"\nAPI: DetachDisk\n\n解绑硬盘\n\"\"\"\n\n\nclass DetachDiskRequestSchema(schema.RequestSchema):\n \"\"\" DetachDisk - 解绑硬盘\n \"\"\"\n\n fields = {\n \"DiskID\": fields.Str(required=True, dump_to=\"DiskID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DetachDiskResponseSchema(schema.ResponseSchema):\n \"\"\" DetachDisk - 解绑硬盘\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DetachNIC\n\n解绑UClouStack网卡\n\"\"\"\n\n\nclass DetachNICRequestSchema(schema.RequestSchema):\n \"\"\" DetachNIC - 解绑UClouStack网卡\n \"\"\"\n\n fields = {\n \"NICID\": fields.Str(required=True, dump_to=\"NICID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DetachNICResponseSchema(schema.ResponseSchema):\n \"\"\" DetachNIC - 解绑UClouStack网卡\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: DisableRS\n\n禁用负载均衡的单个服务节点\n\"\"\"\n\n\nclass DisableRSRequestSchema(schema.RequestSchema):\n \"\"\" DisableRS - 禁用负载均衡的单个服务节点\n \"\"\"\n\n fields = {\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"RSID\": fields.Str(required=True, dump_to=\"RSID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VSID\": fields.Str(required=True, dump_to=\"VSID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass DisableRSResponseSchema(schema.ResponseSchema):\n \"\"\" DisableRS - 禁用负载均衡的单个服务节点\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: EnableRS\n\n启用负载均衡的单个服务节点\n\"\"\"\n\n\nclass EnableRSRequestSchema(schema.RequestSchema):\n \"\"\" EnableRS - 启用负载均衡的单个服务节点\n \"\"\"\n\n fields = {\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"RSID\": fields.Str(required=True, dump_to=\"RSID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VSID\": fields.Str(required=True, dump_to=\"VSID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass EnableRSResponseSchema(schema.ResponseSchema):\n \"\"\" EnableRS - 启用负载均衡的单个服务节点\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: GetDiskPrice\n\n获取硬盘价格\n\"\"\"\n\n\nclass GetDiskPriceRequestSchema(schema.RequestSchema):\n \"\"\" GetDiskPrice - 获取硬盘价格\n \"\"\"\n\n fields = {\n \"ChargeType\": fields.Str(required=True, dump_to=\"ChargeType\"),\n \"DiskSpace\": fields.Int(required=True, dump_to=\"DiskSpace\"),\n \"Quantity\": fields.Int(required=False, dump_to=\"Quantity\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SetType\": fields.Str(required=True, dump_to=\"SetType\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass GetDiskPriceResponseSchema(schema.ResponseSchema):\n \"\"\" GetDiskPrice - 获取硬盘价格\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.PriceInfoSchema(), required=False, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: GetEIPPrice\n\n获取外网IP价格\n\"\"\"\n\n\nclass GetEIPPriceRequestSchema(schema.RequestSchema):\n \"\"\" GetEIPPrice - 获取外网IP价格\n \"\"\"\n\n fields = {\n \"Bandwidth\": fields.Int(required=True, dump_to=\"Bandwidth\"),\n \"ChargeType\": fields.Str(required=True, dump_to=\"ChargeType\"),\n \"OpertatorName\": fields.Str(required=True, dump_to=\"OpertatorName\"),\n \"Quantity\": fields.Int(required=False, dump_to=\"Quantity\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass GetEIPPriceResponseSchema(schema.ResponseSchema):\n \"\"\" GetEIPPrice - 获取外网IP价格\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.PriceInfoSchema(), required=False, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: GetVMInstancePrice\n\n获取虚拟机价格\n\"\"\"\n\n\nclass GetVMInstancePriceRequestSchema(schema.RequestSchema):\n \"\"\" GetVMInstancePrice - 获取虚拟机价格\n \"\"\"\n\n fields = {\n \"BootDiskSetType\": fields.Str(required=True, dump_to=\"BootDiskSetType\"),\n \"CPU\": fields.Int(required=True, dump_to=\"CPU\"),\n \"ChargeType\": fields.Str(required=True, dump_to=\"ChargeType\"),\n \"DataDiskSetType\": fields.Str(required=True, dump_to=\"DataDiskSetType\"),\n \"DataDiskSpace\": fields.Int(required=True, dump_to=\"DataDiskSpace\"),\n \"GPU\": fields.Int(required=False, dump_to=\"GPU\"),\n \"ImageID\": fields.Str(required=True, dump_to=\"ImageID\"),\n \"Memory\": fields.Int(required=True, dump_to=\"Memory\"),\n \"OSType\": fields.Str(required=True, dump_to=\"OSType\"),\n \"Quantity\": fields.Int(required=False, dump_to=\"Quantity\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VMType\": fields.Str(required=True, dump_to=\"VMType\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass GetVMInstancePriceResponseSchema(schema.ResponseSchema):\n \"\"\" GetVMInstancePrice - 获取虚拟机价格\n \"\"\"\n\n fields = {\n \"Infos\": fields.List(\n models.PriceInfoSchema(), required=False, load_from=\"Infos\"\n ),\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: ModifyEIPBandwidth\n\n调整外网IP带宽\n\"\"\"\n\n\nclass ModifyEIPBandwidthRequestSchema(schema.RequestSchema):\n \"\"\" ModifyEIPBandwidth - 调整外网IP带宽\n \"\"\"\n\n fields = {\n \"Bandwidth\": fields.Int(required=True, dump_to=\"Bandwidth\"),\n \"EIPID\": fields.Str(required=True, dump_to=\"EIPID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass ModifyEIPBandwidthResponseSchema(schema.ResponseSchema):\n \"\"\" ModifyEIPBandwidth - 调整外网IP带宽\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: ModifyNameAndRemark\n\n修改资源名称和备注\n\"\"\"\n\n\nclass ModifyNameAndRemarkRequestSchema(schema.RequestSchema):\n \"\"\" ModifyNameAndRemark - 修改资源名称和备注\n \"\"\"\n\n fields = {\n \"Name\": fields.Str(required=True, dump_to=\"Name\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Remark\": fields.Str(required=False, dump_to=\"Remark\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass ModifyNameAndRemarkResponseSchema(schema.ResponseSchema):\n \"\"\" ModifyNameAndRemark - 修改资源名称和备注\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: PoweroffVMInstance\n\n断电虚拟机,可能导致丢失数据甚至损坏操作系统,仅适用于虚拟机死机及级端测试场景。\n\"\"\"\n\n\nclass PoweroffVMInstanceRequestSchema(schema.RequestSchema):\n \"\"\" PoweroffVMInstance - 断电虚拟机,可能导致丢失数据甚至损坏操作系统,仅适用于虚拟机死机及级端测试场景。\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VMID\": fields.Str(required=True, dump_to=\"VMID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass PoweroffVMInstanceResponseSchema(schema.ResponseSchema):\n \"\"\" PoweroffVMInstance - 断电虚拟机,可能导致丢失数据甚至损坏操作系统,仅适用于虚拟机死机及级端测试场景。\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: Recharge\n\n管理员给租户充值\n\"\"\"\n\n\nclass RechargeRequestSchema(schema.RequestSchema):\n \"\"\" Recharge - 管理员给租户充值\n \"\"\"\n\n fields = {\n \"Amount\": fields.Int(required=True, dump_to=\"Amount\"),\n \"FromType\": fields.Str(required=True, dump_to=\"FromType\"),\n \"SerialNo\": fields.Str(required=True, dump_to=\"SerialNo\"),\n \"UserID\": fields.Int(required=True, dump_to=\"UserID\"),\n }\n\n\nclass RechargeResponseSchema(schema.ResponseSchema):\n \"\"\" Recharge - 管理员给租户充值\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: ReinstallVMInstance\n\n重装系统,关机的虚拟机才可以重装系统\n\"\"\"\n\n\nclass ReinstallVMInstanceRequestSchema(schema.RequestSchema):\n \"\"\" ReinstallVMInstance - 重装系统,关机的虚拟机才可以重装系统\n \"\"\"\n\n fields = {\n \"ImageID\": fields.Str(required=True, dump_to=\"ImageID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VMID\": fields.Str(required=True, dump_to=\"VMID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass ReinstallVMInstanceResponseSchema(schema.ResponseSchema):\n \"\"\" ReinstallVMInstance - 重装系统,关机的虚拟机才可以重装系统\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: ReleaseEIP\n\n删除外网IP\n\"\"\"\n\n\nclass ReleaseEIPRequestSchema(schema.RequestSchema):\n \"\"\" ReleaseEIP - 删除外网IP\n \"\"\"\n\n fields = {\n \"EIPID\": fields.Str(required=True, dump_to=\"EIPID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass ReleaseEIPResponseSchema(schema.ResponseSchema):\n \"\"\" ReleaseEIP - 删除外网IP\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: RenewResource\n\n续费回收站资源 \n\"\"\"\n\n\nclass RenewResourceRequestSchema(schema.RequestSchema):\n \"\"\" RenewResource - 续费回收站资源 \n \"\"\"\n\n fields = {\n \"Quantity\": fields.Int(required=False, dump_to=\"Quantity\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass RenewResourceResponseSchema(schema.ResponseSchema):\n \"\"\" RenewResource - 续费回收站资源 \n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: ResetVMInstancePassword\n\n重置虚拟机密码,主机必须开机才可以重置密码\n\"\"\"\n\n\nclass ResetVMInstancePasswordRequestSchema(schema.RequestSchema):\n \"\"\" ResetVMInstancePassword - 重置虚拟机密码,主机必须开机才可以重置密码\n \"\"\"\n\n fields = {\n \"Password\": fields.Str(required=True, dump_to=\"Password\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VMID\": fields.Str(required=True, dump_to=\"VMID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass ResetVMInstancePasswordResponseSchema(schema.ResponseSchema):\n \"\"\" ResetVMInstancePassword - 重置虚拟机密码,主机必须开机才可以重置密码\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: ResizeVMConfig\n\n修改虚拟机配置\n\"\"\"\n\n\nclass ResizeVMConfigRequestSchema(schema.RequestSchema):\n \"\"\" ResizeVMConfig - 修改虚拟机配置\n \"\"\"\n\n fields = {\n \"CPU\": fields.Int(required=True, dump_to=\"CPU\"),\n \"Memory\": fields.Int(required=True, dump_to=\"Memory\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VMID\": fields.Str(required=True, dump_to=\"VMID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass ResizeVMConfigResponseSchema(schema.ResponseSchema):\n \"\"\" ResizeVMConfig - 修改虚拟机配置\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: RestartVMInstance\n\n重启虚拟机\n\"\"\"\n\n\nclass RestartVMInstanceRequestSchema(schema.RequestSchema):\n \"\"\" RestartVMInstance - 重启虚拟机\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VMID\": fields.Str(required=True, dump_to=\"VMID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass RestartVMInstanceResponseSchema(schema.ResponseSchema):\n \"\"\" RestartVMInstance - 重启虚拟机\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: RollbackResource\n\n恢复回收站资源\n\"\"\"\n\n\nclass RollbackResourceRequestSchema(schema.RequestSchema):\n \"\"\" RollbackResource - 恢复回收站资源\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass RollbackResourceResponseSchema(schema.ResponseSchema):\n \"\"\" RollbackResource - 恢复回收站资源\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: RollbackSnapshot\n\n将某个快照内的数据回滚到原云硬盘,仅支持正常状态的快照进行回滚操作,回滚时硬盘必须处于未绑定或其挂载的主机为关机状态。\n\"\"\"\n\n\nclass RollbackSnapshotRequestSchema(schema.RequestSchema):\n \"\"\" RollbackSnapshot - 将某个快照内的数据回滚到原云硬盘,仅支持正常状态的快照进行回滚操作,回滚时硬盘必须处于未绑定或其挂载的主机为关机状态。\n \"\"\"\n\n fields = {\n \"DiskID\": fields.Str(required=True, dump_to=\"DiskID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SnapshotID\": fields.Str(required=True, dump_to=\"SnapshotID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass RollbackSnapshotResponseSchema(schema.ResponseSchema):\n \"\"\" RollbackSnapshot - 将某个快照内的数据回滚到原云硬盘,仅支持正常状态的快照进行回滚操作,回滚时硬盘必须处于未绑定或其挂载的主机为关机状态。\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: StartVMInstance\n\n开启虚拟机\n\"\"\"\n\n\nclass StartVMInstanceRequestSchema(schema.RequestSchema):\n \"\"\" StartVMInstance - 开启虚拟机\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VMID\": fields.Str(required=True, dump_to=\"VMID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass StartVMInstanceResponseSchema(schema.ResponseSchema):\n \"\"\" StartVMInstance - 开启虚拟机\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: StopVMInstance\n\n关闭虚拟机\n\"\"\"\n\n\nclass StopVMInstanceRequestSchema(schema.RequestSchema):\n \"\"\" StopVMInstance - 关闭虚拟机\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VMID\": fields.Str(required=True, dump_to=\"VMID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass StopVMInstanceResponseSchema(schema.ResponseSchema):\n \"\"\" StopVMInstance - 关闭虚拟机\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n \"VMID\": fields.Str(required=False, load_from=\"VMID\"),\n }\n\n\n\"\"\"\nAPI: TerminateResource\n\n销毁资源\n\"\"\"\n\n\nclass TerminateResourceRequestSchema(schema.RequestSchema):\n \"\"\" TerminateResource - 销毁资源\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass TerminateResourceResponseSchema(schema.ResponseSchema):\n \"\"\" TerminateResource - 销毁资源\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: UnBindEIP\n\n解绑外网IP\n\"\"\"\n\n\nclass UnBindEIPRequestSchema(schema.RequestSchema):\n \"\"\" UnBindEIP - 解绑外网IP\n \"\"\"\n\n fields = {\n \"EIPID\": fields.Str(required=True, dump_to=\"EIPID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"ResourceType\": fields.Str(required=True, dump_to=\"ResourceType\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass UnBindEIPResponseSchema(schema.ResponseSchema):\n \"\"\" UnBindEIP - 解绑外网IP\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: UnBindSecurityGroup\n\n解绑安全组\n\"\"\"\n\n\nclass UnBindSecurityGroupRequestSchema(schema.RequestSchema):\n \"\"\" UnBindSecurityGroup - 解绑安全组\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"SGID\": fields.Str(required=True, dump_to=\"SGID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass UnBindSecurityGroupResponseSchema(schema.ResponseSchema):\n \"\"\" UnBindSecurityGroup - 解绑安全组\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: UnbindAlarmTemplate\n\n解绑告警模板\n\"\"\"\n\n\nclass UnbindAlarmTemplateRequestSchema(schema.RequestSchema):\n \"\"\" UnbindAlarmTemplate - 解绑告警模板\n \"\"\"\n\n fields = {\n \"AlarmTemplateID\": fields.Str(required=True, dump_to=\"AlarmTemplateID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceIDs\": fields.List(fields.Str()),\n \"ResourceType\": fields.Str(required=True, dump_to=\"ResourceType\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass UnbindAlarmTemplateResponseSchema(schema.ResponseSchema):\n \"\"\" UnbindAlarmTemplate - 解绑告警模板\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: UnbindPhysicalIP\n\n解绑物理IP\n\"\"\"\n\n\nclass UnbindPhysicalIPRequestSchema(schema.RequestSchema):\n \"\"\" UnbindPhysicalIP - 解绑物理IP\n \"\"\"\n\n fields = {\n \"PhysicalIPID\": fields.Str(required=True, dump_to=\"PhysicalIPID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceID\": fields.Str(required=True, dump_to=\"ResourceID\"),\n \"ResourceType\": fields.Str(required=True, dump_to=\"ResourceType\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass UnbindPhysicalIPResponseSchema(schema.ResponseSchema):\n \"\"\" UnbindPhysicalIP - 解绑物理IP\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: UpdateAlarmTemplateRule\n\n更新告警模板规则\n\"\"\"\n\n\nclass UpdateAlarmTemplateRuleRequestSchema(schema.RequestSchema):\n \"\"\" UpdateAlarmTemplateRule - 更新告警模板规则\n \"\"\"\n\n fields = {\n \"AlarmStrategy\": fields.Str(required=True, dump_to=\"AlarmStrategy\"),\n \"AlarmTemplateID\": fields.Str(required=True, dump_to=\"AlarmTemplateID\"),\n \"AlarmTemplateRuleID\": fields.Str(\n required=True, dump_to=\"AlarmTemplateRuleID\"\n ),\n \"Compare\": fields.Str(required=True, dump_to=\"Compare\"),\n \"ContactGroupID\": fields.Str(required=True, dump_to=\"ContactGroupID\"),\n \"MetricName\": fields.Str(required=True, dump_to=\"MetricName\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"ResourceType\": fields.Str(required=True, dump_to=\"ResourceType\"),\n \"Threshold\": fields.Str(required=True, dump_to=\"Threshold\"),\n \"TriggerCount\": fields.Str(required=True, dump_to=\"TriggerCount\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass UpdateAlarmTemplateRuleResponseSchema(schema.ResponseSchema):\n \"\"\" UpdateAlarmTemplateRule - 更新告警模板规则\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: UpdateRS\n\n修改负载均衡的服务节点\n\"\"\"\n\n\nclass UpdateRSRequestSchema(schema.RequestSchema):\n \"\"\" UpdateRS - 修改负载均衡的服务节点\n \"\"\"\n\n fields = {\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"Port\": fields.Int(required=False, dump_to=\"Port\"),\n \"RSID\": fields.Str(required=True, dump_to=\"RSID\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VSID\": fields.Str(required=True, dump_to=\"VSID\"),\n \"Weight\": fields.Int(required=False, dump_to=\"Weight\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass UpdateRSResponseSchema(schema.ResponseSchema):\n \"\"\" UpdateRS - 修改负载均衡的服务节点\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: UpdateSecurityGroupRule\n\n修改安全组规则\n\"\"\"\n\n\nclass UpdateSecurityGroupRuleRequestSchema(schema.RequestSchema):\n \"\"\" UpdateSecurityGroupRule - 修改安全组规则\n \"\"\"\n\n fields = {\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Rules\": fields.List(fields.Str()),\n \"SGID\": fields.Str(required=True, dump_to=\"SGID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass UpdateSecurityGroupRuleResponseSchema(schema.ResponseSchema):\n \"\"\" UpdateSecurityGroupRule - 修改安全组规则\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: UpdateVS\n\n修改负载均衡VServer\n\"\"\"\n\n\nclass UpdateVSRequestSchema(schema.RequestSchema):\n \"\"\" UpdateVS - 修改负载均衡VServer\n \"\"\"\n\n fields = {\n \"CACertificateID\": fields.Str(\n required=False, dump_to=\"CACertificateID\"\n ),\n \"Domain\": fields.Str(required=False, dump_to=\"Domain\"),\n \"HealthcheckType\": fields.Str(\n required=False, dump_to=\"HealthcheckType\"\n ),\n \"KeepaliveTimeout\": fields.Int(\n required=False, dump_to=\"KeepaliveTimeout\"\n ),\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"Path\": fields.Str(required=False, dump_to=\"Path\"),\n \"PersistenceKey\": fields.Str(required=False, dump_to=\"PersistenceKey\"),\n \"PersistenceType\": fields.Str(\n required=False, dump_to=\"PersistenceType\"\n ),\n \"Port\": fields.Int(required=False, dump_to=\"Port\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"SSLMode\": fields.Str(required=False, dump_to=\"SSLMode\"),\n \"Scheduler\": fields.Str(required=False, dump_to=\"Scheduler\"),\n \"ServerCertificateID\": fields.Str(\n required=False, dump_to=\"ServerCertificateID\"\n ),\n \"VSID\": fields.Str(required=True, dump_to=\"VSID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass UpdateVSResponseSchema(schema.ResponseSchema):\n \"\"\" UpdateVS - 修改负载均衡VServer\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: UpdateVSPolicy\n\n更新七层负载均衡内容转发规则,仅当 VServer 的监听协议为 HTTP 时有效。\n\"\"\"\n\n\nclass UpdateVSPolicyRequestSchema(schema.RequestSchema):\n \"\"\" UpdateVSPolicy - 更新七层负载均衡内容转发规则,仅当 VServer 的监听协议为 HTTP 时有效。\n \"\"\"\n\n fields = {\n \"Domain\": fields.Str(required=False, dump_to=\"Domain\"),\n \"LBID\": fields.Str(required=True, dump_to=\"LBID\"),\n \"Path\": fields.Str(required=False, dump_to=\"Path\"),\n \"PolicyID\": fields.Str(required=True, dump_to=\"PolicyID\"),\n \"RSIDs\": fields.List(fields.Str()),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"VSID\": fields.Str(required=True, dump_to=\"VSID\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass UpdateVSPolicyResponseSchema(schema.ResponseSchema):\n \"\"\" UpdateVSPolicy - 更新七层负载均衡内容转发规则,仅当 VServer 的监听协议为 HTTP 时有效。\n \"\"\"\n\n fields = {\n \"Message\": fields.Str(required=False, load_from=\"Message\"),\n }\n\n\n\"\"\"\nAPI: UpgradeDisk\n\n扩容硬盘,为保证数据完整性,容量扩容前建议暂停对当前硬盘的所有文件系统读写操作,并进入操作系统进行 `umount ` 或`脱机` 操作。\n\"\"\"\n\n\nclass UpgradeDiskRequestSchema(schema.RequestSchema):\n \"\"\" UpgradeDisk - 扩容硬盘,为保证数据完整性,容量扩容前建议暂停对当前硬盘的所有文件系统读写操作,并进入操作系统进行 `umount ` 或`脱机` 操作。\n \"\"\"\n\n fields = {\n \"DiskID\": fields.Str(required=True, dump_to=\"DiskID\"),\n \"DiskSpace\": fields.Int(required=True, dump_to=\"DiskSpace\"),\n \"Region\": fields.Str(required=True, dump_to=\"Region\"),\n \"Zone\": fields.Str(required=True, dump_to=\"Zone\"),\n }\n\n\nclass UpgradeDiskResponseSchema(schema.ResponseSchema):\n \"\"\" UpgradeDisk - 扩容硬盘,为保证数据完整性,容量扩容前建议暂停对当前硬盘的所有文件系统读写操作,并进入操作系统进行 `umount ` 或`脱机` 操作。\n \"\"\"\n\n fields = {\n \"Action\": fields.Str(required=True, load_from=\"Action\"),\n \"Message\": fields.Str(required=True, load_from=\"Message\"),\n \"RetCode\": fields.Int(required=True, load_from=\"RetCode\"),\n }\n", "id": "3165243", "language": "Python", "matching_score": 3.6542394161224365, "max_stars_count": 0, "path": "ucloud/services/ucloudstack/schemas/apis.py" }, { "content": "\"\"\" Code is generated by ucloud-model, DO NOT EDIT IT. \"\"\"\n\nimport typing\n\n\nfrom ucloud.core.client import Client\nfrom ucloud.services.vpc.schemas import apis\n\n\nclass VPCClient(Client):\n def __init__(\n self, config: dict, transport=None, middleware=None, logger=None\n ):\n super(VPCClient, self).__init__(config, transport, middleware, logger)\n\n def add_vpc_network(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" AddVPCNetwork - 添加VPC网段\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **Network** (list) - (Required) 增加网段\n - **VPCId** (str) - (Required) 源VPC短ID\n\n **Response**\n\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.AddVPCNetworkRequestSchema().dumps(d)\n\n # build options\n kwargs[\"max_retries\"] = 0 # ignore retry when api is not idempotent\n\n resp = self.invoke(\"AddVPCNetwork\", d, **kwargs)\n return apis.AddVPCNetworkResponseSchema().loads(resp)\n\n def associate_route_table(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" AssociateRouteTable - 绑定子网的路由表\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **RouteTableId** (str) - (Required) 路由表ID,仅限自定义路由表\n - **SubnetId** (str) - (Required) 子网ID\n\n **Response**\n\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.AssociateRouteTableRequestSchema().dumps(d)\n\n resp = self.invoke(\"AssociateRouteTable\", d, **kwargs)\n return apis.AssociateRouteTableResponseSchema().loads(resp)\n\n def clone_route_table(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" CloneRouteTable - 根据一张现有路由表复制一张新的路由表\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **RouteTableId** (str) - (Required) 被克隆的路由表ID\n\n **Response**\n\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.CloneRouteTableRequestSchema().dumps(d)\n\n # build options\n kwargs[\"max_retries\"] = 0 # ignore retry when api is not idempotent\n\n resp = self.invoke(\"CloneRouteTable\", d, **kwargs)\n return apis.CloneRouteTableResponseSchema().loads(resp)\n\n def create_route_table(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" CreateRouteTable - 创建路由表\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **VPCId** (str) - (Required) VPC ID\n - **Name** (str) - 路由表名称 Default RouteTable\n - **Remark** (str) - 备注\n - **Tag** (str) - 业务组\n\n **Response**\n\n - **RouteTableId** (str) - 路由表ID\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.CreateRouteTableRequestSchema().dumps(d)\n\n # build options\n kwargs[\"max_retries\"] = 0 # ignore retry when api is not idempotent\n\n resp = self.invoke(\"CreateRouteTable\", d, **kwargs)\n return apis.CreateRouteTableResponseSchema().loads(resp)\n\n def create_subnet(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" CreateSubnet - 创建子网\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **Subnet** (str) - (Required) 子网网络地址,例如192.168.0.0\n - **VPCId** (str) - (Required) VPC资源ID\n - **Netmask** (int) - 子网网络号位数,默认为24\n - **Remark** (str) - 备注\n - **SubnetName** (str) - 子网名称,默认为Subnet\n - **Tag** (str) - 业务组名称,默认为Default\n\n **Response**\n\n - **SubnetId** (str) - 子网ID\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.CreateSubnetRequestSchema().dumps(d)\n\n # build options\n kwargs[\"max_retries\"] = 0 # ignore retry when api is not idempotent\n\n resp = self.invoke(\"CreateSubnet\", d, **kwargs)\n return apis.CreateSubnetResponseSchema().loads(resp)\n\n def create_vpc(self, req: typing.Optional[dict] = None, **kwargs) -> dict:\n \"\"\" CreateVPC - 创建VPC\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **Name** (str) - (Required) VPC名称\n - **Network** (list) - (Required) VPC网段\n - **Remark** (str) - 备注\n - **Tag** (str) - 业务组名称\n - **Type** (int) - VPC类型\n\n **Response**\n\n - **VPCId** (str) - VPC资源Id\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.CreateVPCRequestSchema().dumps(d)\n\n # build options\n kwargs[\"max_retries\"] = 0 # ignore retry when api is not idempotent\n\n resp = self.invoke(\"CreateVPC\", d, **kwargs)\n return apis.CreateVPCResponseSchema().loads(resp)\n\n def create_vpc_intercom(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" CreateVPCIntercom - 新建VPC互通关系\n\n **Request**\n\n - **ProjectId** (str) - (Config) 源VPC所在项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 源VPC所在地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **DstVPCId** (str) - (Required) 目的VPC短ID\n - **VPCId** (str) - (Required) 源VPC短ID\n - **DstProjectId** (str) - 目的VPC项目ID。默认与源VPC同项目。\n - **DstRegion** (str) - 目的VPC所在地域,默认与源VPC同地域。\n\n **Response**\n\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.CreateVPCIntercomRequestSchema().dumps(d)\n\n # build options\n kwargs[\"max_retries\"] = 0 # ignore retry when api is not idempotent\n\n resp = self.invoke(\"CreateVPCIntercom\", d, **kwargs)\n return apis.CreateVPCIntercomResponseSchema().loads(resp)\n\n def delete_route_table(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" DeleteRouteTable - 删除自定义路由表\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **RouteTableId** (str) - (Required) 路由ID\n\n **Response**\n\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.DeleteRouteTableRequestSchema().dumps(d)\n\n resp = self.invoke(\"DeleteRouteTable\", d, **kwargs)\n return apis.DeleteRouteTableResponseSchema().loads(resp)\n\n def delete_subnet(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" DeleteSubnet - 删除子网\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **SubnetId** (str) - (Required) 子网ID\n\n **Response**\n\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.DeleteSubnetRequestSchema().dumps(d)\n\n resp = self.invoke(\"DeleteSubnet\", d, **kwargs)\n return apis.DeleteSubnetResponseSchema().loads(resp)\n\n def delete_vpc(self, req: typing.Optional[dict] = None, **kwargs) -> dict:\n \"\"\" DeleteVPC - 删除VPC\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **VPCId** (str) - (Required) VPC资源Id\n\n **Response**\n\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.DeleteVPCRequestSchema().dumps(d)\n\n resp = self.invoke(\"DeleteVPC\", d, **kwargs)\n return apis.DeleteVPCResponseSchema().loads(resp)\n\n def delete_vpc_intercom(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" DeleteVPCIntercom - 删除VPC互通关系\n\n **Request**\n\n - **ProjectId** (str) - (Config) 源VPC所在项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 源VPC所在地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **DstVPCId** (str) - (Required) 目的VPC短ID\n - **VPCId** (str) - (Required) 源VPC短ID\n - **DstProjectId** (str) - 目的VPC所在项目ID,默认为源VPC所在项目ID\n - **DstRegion** (str) - 目的VPC所在地域,默认为源VPC所在地域\n\n **Response**\n\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.DeleteVPCIntercomRequestSchema().dumps(d)\n\n resp = self.invoke(\"DeleteVPCIntercom\", d, **kwargs)\n return apis.DeleteVPCIntercomResponseSchema().loads(resp)\n\n def describe_route_table(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" DescribeRouteTable - 获取路由表详细信息(包括路由策略)\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **BusinessId** (str) - 业务组ID\n - **Limit** (int) - Limit\n - **OffSet** (int) - OffSet\n - **RouteTableId** (str) - 路由表ID\n - **VPCId** (str) - VPC ID\n\n **Response**\n\n - **RouteTables** (list) - 见 **RouteTableInfo** 模型定义\n - **TotalCount** (int) - RouteTables字段的数量\n\n **Response Model**\n\n **RouteRuleInfo**\n\n - **DstAddr** (str) - 目的地址,比如10.10.8/24\n - **NexthopId** (str) - 路由下一跳ID,比如uvnet-3eljvj\n - **NexthopType** (str) - 下一跳类型,比如local、vnet\n - **Remark** (str) - 路由规则备注\n - **RouteRuleId** (str) - 规则ID\n - **RuleType** (int) - 路由规则类型(0表示系统路由,1表示自定义路由)\n\n **RouteTableInfo**\n\n - **CreateTime** (int) - 创建时间戳\n - **Remark** (str) - 路由表备注\n - **RouteRules** (list) - 见 **RouteRuleInfo** 模型定义\n - **RouteTableId** (str) - 路由表ID\n - **RouteTableType** (int) - 路由表类型,1为默认,0为自定义\n - **SubnetCount** (str) - 绑定了该路由表的子网数量\n - **Tag** (str) - 业务组\n - **VPCId** (str) - 路由表所属vpc\n - **VPCName** (str) - vpc名称\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.DescribeRouteTableRequestSchema().dumps(d)\n\n resp = self.invoke(\"DescribeRouteTable\", d, **kwargs)\n return apis.DescribeRouteTableResponseSchema().loads(resp)\n\n def describe_subnet(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" DescribeSubnet - 获取子网信息\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **BusinessId** (str) - 业务组\n - **Limit** (int) - 列表长度,默认为20\n - **Offset** (int) - 偏移量,默认为0\n - **RouteTableId** (str) - 路由表Id\n - **ShowAvailableIPs** (bool) - 是否返回子网的可用IP数,true为是,false为否,默认不返回\n - **SubnetId** (str) - 子网id,适用于一次查询一个子网信息\n - **SubnetIds** (list) - 子网id数组,适用于一次查询多个子网信息\n - **Tag** (str) - 业务组名称,默认为Default\n - **VPCId** (str) - VPC资源id\n\n **Response**\n\n - **DataSet** (list) - 见 **SubnetInfo** 模型定义\n - **TotalCount** (int) - 子网总数量\n\n **Response Model**\n\n **SubnetInfo**\n\n - **AvailableIPs** (int) - 可用IP数量\n - **CreateTime** (int) - 创建时间\n - **Gateway** (str) - 子网网关\n - **HasNATGW** (bool) - 是否有natgw\n - **IPv6Network** (str) - 子网关联的IPv6网段\n - **Netmask** (str) - 子网掩码\n - **Remark** (str) - 备注\n - **RouteTableId** (str) - 路由表Id\n - **Subnet** (str) - 子网网段\n - **SubnetId** (str) - 子网Id\n - **SubnetName** (str) - 子网名称\n - **SubnetType** (int) - 子网类型\n - **Tag** (str) - 业务组\n - **VPCId** (str) - VPCId\n - **VPCName** (str) - VPC名称\n - **Zone** (str) - 可用区名称\n\n \"\"\"\n # build request\n d = {\n \"ProjectId\": self.config.project_id,\n \"Region\": self.config.region,\n }\n req and d.update(req)\n d = apis.DescribeSubnetRequestSchema().dumps(d)\n\n resp = self.invoke(\"DescribeSubnet\", d, **kwargs)\n return apis.DescribeSubnetResponseSchema().loads(resp)\n\n def describe_subnet_resource(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" DescribeSubnetResource - 展示子网资源\n **Request**\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **SubnetId** (str) - (Required) 子网id\n - **Limit** (int) - 单页返回数据长度,默认为20\n - **Offset** (int) - 列表起始位置偏移量,默认为0\n - **ResourceType** (str) - 资源类型,默认为全部资源类型。枚举值为:UHOST,云主机;PHOST,物理云主机;ULB,负载均衡;UHADOOP_HOST,hadoop节点;UFORTRESS_HOST,堡垒机;UNATGW,NAT网关;UKAFKA,Kafka消息队列;UMEM,内存存储;DOCKER,容器集群;UDB,数据库;UDW,数据仓库;VIP,内网VIP.\n\n **Response**\n - **DataSet** (list) - 见 **SubnetResource** 模型定义\n - **TotalCount** (int) - 总数\n\n **Response Model**\n\n **SubnetResource**\n\n - **IP** (str) - 资源ip\n - **IPv6Address** (str) - 资源的IPv6地址\n - **Name** (str) - 资源名称\n - **ResourceId** (str) - 资源Id\n - **ResourceType** (str) - 资源类型。对应的资源类型:UHOST,云主机;PHOST,物理云主机;ULB,负载均衡;UHADOOP_HOST,hadoop节点;UFORTRESS_HOST,堡垒机;UNATGW,NAT网关;UKAFKA,Kafka消息队列;UMEM,内存存储;DOCKER,容器集群;UDB,数据库;UDW,数据仓库;VIP,内网VIP.\n - **SubResourceId** (str) - 资源绑定的虚拟网卡的实例ID\n - **SubResourceName** (str) - 资源绑定的虚拟网卡的实例名称\n - **SubResourceType** (str) - 资源绑定的虚拟网卡的类型\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.DescribeSubnetResourceRequestSchema().dumps(d)\n\n resp = self.invoke(\"DescribeSubnetResource\", d, **kwargs)\n return apis.DescribeSubnetResourceResponseSchema().loads(resp)\n\n def describe_vpc(self, req: typing.Optional[dict] = None, **kwargs) -> dict:\n \"\"\" DescribeVPC - 获取VPC信息\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **Limit** (int) -\n - **Offset** (int) -\n - **Tag** (str) - 业务组名称\n - **VPCIds** (list) - VPCId\n\n **Response**\n\n - **DataSet** (list) - 见 **VPCInfo** 模型定义\n\n **Response Model**\n\n **VPCNetworkInfo**\n\n - **Network** (str) - vpc地址空间\n - **SubnetCount** (int) - 地址空间中子网数量\n\n **VPCInfo**\n\n - **CreateTime** (int) -\n - **IPv6Network** (str) - VPC关联的IPv6网段\n - **Name** (str) -\n - **Network** (list) -\n - **NetworkInfo** (list) - 见 **VPCNetworkInfo** 模型定义\n - **OperatorName** (str) - VPC关联的IPv6网段所属运营商\n - **SubnetCount** (int) -\n - **Tag** (str) -\n - **UpdateTime** (int) -\n - **VPCId** (str) - VPCId\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.DescribeVPCRequestSchema().dumps(d)\n\n resp = self.invoke(\"DescribeVPC\", d, **kwargs)\n return apis.DescribeVPCResponseSchema().loads(resp)\n\n def describe_vpc_intercom(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" DescribeVPCIntercom - 获取VPC互通信息\n\n **Request**\n\n - **ProjectId** (str) - (Config) 源VPC所在项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 源VPC所在地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **VPCId** (str) - (Required) VPC短ID\n - **DstProjectId** (str) - 目的项目ID,默认为全部项目\n - **DstRegion** (str) - 目的VPC所在地域,默认为全部地域\n\n **Response**\n\n - **DataSet** (list) - 见 **VPCIntercomInfo** 模型定义\n\n **Response Model**\n\n **VPCIntercomInfo**\n\n - **DstRegion** (str) - 所属地域\n - **Name** (str) - VPC名字\n - **Network** (list) - VPC的地址空间\n - **ProjectId** (str) - 项目Id\n - **Tag** (str) - 业务组(未分组显示为 Default)\n - **VPCId** (str) - VPCId\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.DescribeVPCIntercomRequestSchema().dumps(d)\n\n resp = self.invoke(\"DescribeVPCIntercom\", d, **kwargs)\n return apis.DescribeVPCIntercomResponseSchema().loads(resp)\n\n def modify_route_rule(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" ModifyRouteRule - 路由策略增、删、改\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **RouteRule** (list) - (Required) 格式: RouteRuleId | 目的网段 | 下一跳类型 | 下一跳 |优先级| 备注 | 增、删、改标志 (下一跳类型为instance或者vip,下一跳为云主机id或者vip的id,优先级使用0,动作标志为add/delete/update) 。\"添加\"示例: test_id | 10.8.0.0/16 | instance | uhost-xd8ja | 0 | Default Route Rule| add (添加的RouteRuleId填任意非空字符串) 。\"删除\"示例: routerule-xk3jxa | 10.8.0.0/16 | instance | uhost-xd8ja | 0 | Default Route Rule| delete (RouteRuleId来自DescribeRouteTable中) 。“修改”示例: routerule-xk3jxa | 10.8.0.0/16 | instance | uhost-cjksa2 | 0 | Default Route Rule| update (RouteRuleId来自DescribeRouteTable中)\n - **RouteTableId** (str) - (Required) 通过DescribeRouteTable拿到\n\n **Response**\n\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.ModifyRouteRuleRequestSchema().dumps(d)\n\n resp = self.invoke(\"ModifyRouteRule\", d, **kwargs)\n return apis.ModifyRouteRuleResponseSchema().loads(resp)\n\n def update_route_table_attribute(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" UpdateRouteTableAttribute - 更新路由表基本信息\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **RouteTableId** (str) - (Required) 路由表ID\n - **Name** (str) - 名称\n - **Remark** (str) - 备注\n - **Tag** (str) - 业务组名称\n\n **Response**\n\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.UpdateRouteTableAttributeRequestSchema().dumps(d)\n\n resp = self.invoke(\"UpdateRouteTableAttribute\", d, **kwargs)\n return apis.UpdateRouteTableAttributeResponseSchema().loads(resp)\n\n def update_subnet_attribute(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" UpdateSubnetAttribute - 更新子网信息\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **SubnetId** (str) - (Required) 子网ID\n - **Name** (str) - 子网名称(如果Name不填写,Tag必须填写)\n - **Tag** (str) - 业务组名称(如果Tag不填写,Name必须填写)\n\n **Response**\n\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.UpdateSubnetAttributeRequestSchema().dumps(d)\n\n resp = self.invoke(\"UpdateSubnetAttribute\", d, **kwargs)\n return apis.UpdateSubnetAttributeResponseSchema().loads(resp)\n\n def update_vpc_network(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\" UpdateVPCNetwork - 更新VPC网段\n\n **Request**\n\n - **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_\n - **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_\n - **Network** (list) - (Required) 需要保留的VPC网段。当前仅支持删除VPC网段,添加网段请参考 `AddVPCNetwork <https://docs.ucloud.cn/api/vpc2.0-api/add_vpc_network>`_\n - **VPCId** (str) - (Required) VPC的ID\n\n **Response**\n\n - **Message** (str) - 错误信息\n\n \"\"\"\n # build request\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.UpdateVPCNetworkRequestSchema().dumps(d)\n\n resp = self.invoke(\"UpdateVPCNetwork\", d, **kwargs)\n return apis.UpdateVPCNetworkResponseSchema().loads(resp)\n\n def describe_network_interface(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.DescribeNetworkInterfaceRequestSchema().dumps(d)\n\n resp = self.invoke(\"DescribeNetworkInterface\", d, **kwargs)\n return apis.DescribeNetworkInterfaceResponseSchema().loads(resp)\n\n def create_network_interface(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.CreateNetworkInterfaceRequestSchema().dumps(d)\n\n resp = self.invoke(\"CreateNetworkInterface\", d, **kwargs)\n return apis.CreateNetworkInterfaceResponseSchema().loads(resp)\n\n def modify_network_interface(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.ModifyNetworkInterfaceRequestSchema().dumps(d)\n\n resp = self.invoke(\"ModifyNetworkInterface\", d, **kwargs)\n return apis.ModifyNetworkInterfaceResponseSchema().loads(resp)\n\n def delete_network_interface(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.DeleteNetworkInterfaceRequestSchema().dumps(d)\n\n resp = self.invoke(\"DeleteNetworkInterface\", d, **kwargs)\n return apis.DeleteNetworkInterfaceResponseSchema().loads(resp)\n\n def attach_network_interface(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.AttachNetworkInterfaceRequestSchema().dumps(d)\n\n resp = self.invoke(\"AttachNetworkInterface\", d, **kwargs)\n return apis.AttachNetworkInterfaceResponseSchema().loads(resp)\n\n def detach_network_interface(\n self, req: typing.Optional[dict] = None, **kwargs\n ) -> dict:\n d = {\"ProjectId\": self.config.project_id, \"Region\": self.config.region}\n req and d.update(req)\n d = apis.DetachNetworkInterfaceRequestSchema().dumps(d)\n\n resp = self.invoke(\"DetachNetworkInterface\", d, **kwargs)\n return apis.DetachNetworkInterfaceResponseSchema().loads(resp)\n", "id": "10360118", "language": "Python", "matching_score": 3.319485664367676, "max_stars_count": 0, "path": "ucloud/services/vpc/client.py" }, { "content": "\"\"\" Code is generated by ucloud-model, DO NOT EDIT IT. \"\"\"\n\n\nimport pytest\n\nfrom ucloud.core import exc\nfrom ucloud.testing import funcs, op, env, utest\nfrom ucloud.testing.driver import spec\n\n\nscenario = spec.scenario(5183, \"UCloudStack_02\", owners=[\"<EMAIL>\"])\n\n\n@pytest.mark.skipif(env.is_ut(), reason=env.get_skip_reason())\ndef test_scenario_5183(ustack_client, variables):\n scenario.store.update(variables)\n\n scenario.store[\"Region\"] = \"cn\"\n scenario.store[\"Zone\"] = \"zone-01\"\n scenario.store[\"BeginTime\"] = funcs.get_timestamp(10,) - 3600\n scenario.store[\"EndTime\"] = funcs.get_timestamp(10,)\n\n # 查询VPC信息\n describe_vpc_00(ustack_client)\n\n # 查询安全组信息\n describe_security_group_01(ustack_client)\n\n # 查询存储类型\n describe_storage_type_02(ustack_client)\n\n # 查询主机机型\n describe_vm_type_03(ustack_client)\n\n # 获取镜像信息,包括默认镜像和自制镜像。\n describe_image_04(ustack_client)\n\n # 获取镜像信息,包括默认镜像和自制镜像。\n describe_image_05(ustack_client)\n\n # 创建虚拟机\n create_vm_instance_06(ustack_client)\n\n # 查询虚拟机\n describe_vm_instance_07(ustack_client)\n\n # 申请外网IP\n allocate_eip_08(ustack_client)\n\n # 创建负载均衡\n create_lb_09(ustack_client)\n\n # 获取负载均衡信息\n describe_lb_10(ustack_client)\n\n # 创建负载均衡VServer\n create_vs_11(ustack_client)\n\n # 修改负载均衡VServer\n update_vs_12(ustack_client)\n\n # 添加服务节点\n create_rs_13(ustack_client)\n\n # 修改服务节点\n update_rs_14(ustack_client)\n\n # 获取服务节点信息\n describe_rs_15(ustack_client)\n\n # 禁用服务节点\n disable_rs_16(ustack_client)\n\n # 获取服务节点信息\n describe_rs_17(ustack_client)\n\n # 启用服务节点\n enable_rs_18(ustack_client)\n\n # 获取服务节点信息\n describe_rs_19(ustack_client)\n\n # 创建内容转发规则\n create_vs_policy_20(ustack_client)\n\n # 获取内容转发规则信息\n describe_vs_policy_21(ustack_client)\n\n # 更新内容转发规则\n update_vs_policy_22(ustack_client)\n\n # 删除内容转发规则\n delete_vs_policy_23(ustack_client)\n\n # 移除服务节点\n delete_rs_24(ustack_client)\n\n # 断电虚拟机\n poweroff_vm_instance_25(ustack_client)\n\n # 删除虚拟机\n delete_vm_instance_26(ustack_client)\n\n # 删除负载均衡\n delete_lb_27(ustack_client)\n\n\n@scenario.api(\n title=\"查询VPC信息\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DescribeVPCResponse\"),\n ],\n action=\"DescribeVPC\",\n)\ndef describe_vpc_00(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"Region\": variables.get(\"Region\"),\n }\n\n try:\n resp = client.ucloudstack().describe_vpc(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n variables[\"VPCID\"] = utest.value_at_path(resp, \"Infos.0.VPCID\")\n variables[\"SubnetID\"] = utest.value_at_path(\n resp, \"Infos.0.SubnetInfos.0.SubnetID\"\n )\n return resp\n\n\n@scenario.api(\n title=\"查询安全组信息\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DescribeSecurityGroupResponse\"),\n ],\n action=\"DescribeSecurityGroup\",\n)\ndef describe_security_group_01(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"Region\": variables.get(\"Region\"),\n }\n\n try:\n resp = client.ucloudstack().describe_security_group(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n variables[\"SGID\"] = utest.value_at_path(resp, \"Infos.0.SGID\")\n return resp\n\n\n@scenario.api(\n title=\"查询存储类型\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DescribeStorageTypeResponse\"),\n ],\n action=\"DescribeStorageType\",\n)\ndef describe_storage_type_02(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"Region\": variables.get(\"Region\"),\n }\n\n try:\n resp = client.ucloudstack().describe_storage_type(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n variables[\"StorageType\"] = utest.value_at_path(resp, \"Infos.0.StorageType\")\n return resp\n\n\n@scenario.api(\n title=\"查询主机机型\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DescribeVMTypeResponse\"),\n ],\n action=\"DescribeVMType\",\n)\ndef describe_vm_type_03(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"Region\": variables.get(\"Region\"),\n }\n\n try:\n resp = client.ucloudstack().describe_vm_type(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n variables[\"VMType\"] = \"ComputeSetBBBB\"\n return resp\n\n\n@scenario.api(\n title=\"获取镜像信息,包括默认镜像和自制镜像。\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DescribeImageResponse\"),\n ],\n action=\"DescribeImage\",\n)\ndef describe_image_04(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"Region\": variables.get(\"Region\"),\n }\n\n try:\n resp = client.ucloudstack().describe_image(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n variables[\"Infos\"] = utest.value_at_path(resp, \"Infos\")\n return resp\n\n\n@scenario.api(\n title=\"获取镜像信息,包括默认镜像和自制镜像。\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DescribeImageResponse\"),\n ],\n action=\"DescribeImage\",\n)\ndef describe_image_05(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"Region\": variables.get(\"Region\"),\n \"ImageIDs\": [\n funcs.search_value(\n variables.get(\"Infos\"),\n \"OSName\",\n \"CentOS 6.5 x86_64\",\n \"ImageID\",\n ),\n ],\n }\n\n try:\n resp = client.ucloudstack().describe_image(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n variables[\"ImageID\"] = utest.value_at_path(resp, \"Infos.0.ImageID\")\n return resp\n\n\n@scenario.api(\n title=\"创建虚拟机\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"CreateVMInstanceResponse\"),\n ],\n action=\"CreateVMInstance\",\n)\ndef create_vm_instance_06(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"WANSGID\": variables.get(\"SGID\"),\n \"VPCID\": variables.get(\"VPCID\"),\n \"VMType\": variables.get(\"VMType\"),\n \"SubnetID\": variables.get(\"SubnetID\"),\n \"Region\": variables.get(\"Region\"),\n \"Quantity\": 1,\n \"Password\": \"<PASSWORD>\",\n \"Name\": \"host_test\",\n \"Memory\": 2048,\n \"ImageID\": variables.get(\"ImageID\"),\n \"DataDiskSpace\": 10,\n \"DataDiskSetType\": \"StorageSetBBBB\",\n \"ChargeType\": \"Month\",\n \"CPU\": 1,\n \"BootDiskSetType\": \"StorageSetBBBB\",\n }\n\n try:\n resp = client.ucloudstack().create_vm_instance(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n variables[\"VMID\"] = utest.value_at_path(resp, \"VMID\")\n return resp\n\n\n@scenario.api(\n title=\"查询虚拟机\",\n max_retries=3,\n retry_interval=1,\n startup_delay=90,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DescribeVMInstanceResponse\"),\n (\"str_eq\", \"Infos.0.State\", \"Running\"),\n ],\n action=\"DescribeVMInstance\",\n)\ndef describe_vm_instance_07(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VMIDs\": [variables.get(\"VMID\"),],\n \"Region\": variables.get(\"Region\"),\n }\n\n try:\n resp = client.ucloudstack().describe_vm_instance(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n variables[\"InterfaceID\"] = utest.value_at_path(\n resp, \"Infos.0.IPInfos.0.InterfaceID\"\n )\n variables[\"DiskID\"] = utest.value_at_path(\n resp, \"Infos.0.DiskInfos.0.DiskID\"\n )\n return resp\n\n\n@scenario.api(\n title=\"申请外网IP\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"AllocateEIPResponse\"),\n ],\n action=\"AllocateEIP\",\n)\ndef allocate_eip_08(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"Region\": variables.get(\"Region\"),\n \"OperatorName\": \"Bgp\",\n \"Name\": \"test_eip\",\n \"ChargeType\": \"Month\",\n \"Bandwidth\": 2,\n }\n\n try:\n resp = client.ucloudstack().allocate_eip(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n variables[\"EIPID\"] = utest.value_at_path(resp, \"EIPID\")\n return resp\n\n\n@scenario.api(\n title=\"创建负载均衡\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"CreateLBResponse\"),\n ],\n action=\"CreateLB\",\n)\ndef create_lb_09(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VPCID\": variables.get(\"VPCID\"),\n \"VMType\": variables.get(\"VMType\"),\n \"SubnetID\": variables.get(\"SubnetID\"),\n \"SGID\": variables.get(\"SGID\"),\n \"Region\": variables.get(\"Region\"),\n \"Name\": \"lb_test1\",\n \"LBType\": \"WAN\",\n \"EIPID\": variables.get(\"EIPID\"),\n \"ChargeType\": \"Month\",\n }\n\n try:\n resp = client.ucloudstack().create_lb(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n variables[\"LBID\"] = utest.value_at_path(resp, \"LBID\")\n return resp\n\n\n@scenario.api(\n title=\"获取负载均衡信息\",\n max_retries=3,\n retry_interval=1,\n startup_delay=60,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DescribeLBResponse\"),\n (\"str_eq\", \"Infos.0.LBStatus\", \"Running\"),\n ],\n action=\"DescribeLB\",\n)\ndef describe_lb_10(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"Region\": variables.get(\"Region\"),\n \"LBIDs\": [variables.get(\"LBID\"),],\n }\n\n try:\n resp = client.ucloudstack().describe_lb(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n return resp\n\n\n@scenario.api(\n title=\"创建负载均衡VServer\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"CreateVSResponse\"),\n ],\n action=\"CreateVS\",\n)\ndef create_vs_11(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"Scheduler\": \"wrr\",\n \"Region\": variables.get(\"Region\"),\n \"Protocol\": \"HTTP\",\n \"Port\": 123,\n \"LBID\": variables.get(\"LBID\"),\n \"HealthcheckType\": \"Port\",\n }\n\n try:\n resp = client.ucloudstack().create_vs(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n variables[\"VSID\"] = utest.value_at_path(resp, \"VSID\")\n return resp\n\n\n@scenario.api(\n title=\"修改负载均衡VServer\",\n max_retries=3,\n retry_interval=1,\n startup_delay=10,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"UpdateVSResponse\"),\n ],\n action=\"UpdateVS\",\n)\ndef update_vs_12(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VSID\": variables.get(\"VSID\"),\n \"Scheduler\": \"ip_hash\",\n \"Region\": variables.get(\"Region\"),\n \"LBID\": variables.get(\"LBID\"),\n }\n\n try:\n resp = client.ucloudstack().update_vs(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n return resp\n\n\n@scenario.api(\n title=\"添加服务节点\",\n max_retries=3,\n retry_interval=1,\n startup_delay=10,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"CreateRSResponse\"),\n ],\n action=\"CreateRS\",\n)\ndef create_rs_13(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"Weight\": 1,\n \"VSID\": variables.get(\"VSID\"),\n \"Region\": variables.get(\"Region\"),\n \"Port\": 112,\n \"LBID\": variables.get(\"LBID\"),\n \"BindResourceID\": variables.get(\"VMID\"),\n }\n\n try:\n resp = client.ucloudstack().create_rs(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n variables[\"RSID\"] = utest.value_at_path(resp, \"RSID\")\n return resp\n\n\n@scenario.api(\n title=\"修改服务节点\",\n max_retries=3,\n retry_interval=1,\n startup_delay=10,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"UpdateRSResponse\"),\n ],\n action=\"UpdateRS\",\n)\ndef update_rs_14(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"Weight\": 55,\n \"VSID\": variables.get(\"VSID\"),\n \"Region\": variables.get(\"Region\"),\n \"RSID\": variables.get(\"RSID\"),\n \"LBID\": variables.get(\"LBID\"),\n }\n\n try:\n resp = client.ucloudstack().update_rs(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n return resp\n\n\n@scenario.api(\n title=\"获取服务节点信息\",\n max_retries=3,\n retry_interval=1,\n startup_delay=30,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DescribeRSResponse\"),\n ],\n action=\"DescribeRS\",\n)\ndef describe_rs_15(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VSID\": variables.get(\"VSID\"),\n \"Region\": variables.get(\"Region\"),\n \"LBID\": variables.get(\"LBID\"),\n }\n\n try:\n resp = client.ucloudstack().describe_rs(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n variables[\"RSID_1\"] = utest.value_at_path(resp, \"Infos.0.RSID\")\n return resp\n\n\n@scenario.api(\n title=\"禁用服务节点\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DisableRSResponse\"),\n ],\n action=\"DisableRS\",\n)\ndef disable_rs_16(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VSID\": variables.get(\"VSID\"),\n \"Region\": variables.get(\"Region\"),\n \"RSID\": variables.get(\"RSID\"),\n \"LBID\": variables.get(\"LBID\"),\n }\n\n try:\n resp = client.ucloudstack().disable_rs(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n return resp\n\n\n@scenario.api(\n title=\"获取服务节点信息\",\n max_retries=3,\n retry_interval=1,\n startup_delay=30,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DescribeRSResponse\"),\n (\"str_eq\", \"Infos.0.RSMode\", \"Disable\"),\n ],\n action=\"DescribeRS\",\n)\ndef describe_rs_17(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VSID\": variables.get(\"VSID\"),\n \"Region\": variables.get(\"Region\"),\n \"RSIDs\": [variables.get(\"RSID\"),],\n \"LBID\": variables.get(\"LBID\"),\n }\n\n try:\n resp = client.ucloudstack().describe_rs(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n return resp\n\n\n@scenario.api(\n title=\"启用服务节点\",\n max_retries=3,\n retry_interval=1,\n startup_delay=10,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"EnableRSResponse\"),\n ],\n action=\"EnableRS\",\n)\ndef enable_rs_18(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VSID\": variables.get(\"VSID\"),\n \"Region\": variables.get(\"Region\"),\n \"RSID\": variables.get(\"RSID\"),\n \"LBID\": variables.get(\"LBID\"),\n }\n\n try:\n resp = client.ucloudstack().enable_rs(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n return resp\n\n\n@scenario.api(\n title=\"获取服务节点信息\",\n max_retries=3,\n retry_interval=1,\n startup_delay=30,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DescribeRSResponse\"),\n (\"str_eq\", \"Infos.0.RSMode\", \"Enable\"),\n ],\n action=\"DescribeRS\",\n)\ndef describe_rs_19(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VSID\": variables.get(\"VSID\"),\n \"Region\": variables.get(\"Region\"),\n \"RSIDs\": [variables.get(\"RSID\"),],\n \"LBID\": variables.get(\"LBID\"),\n }\n\n try:\n resp = client.ucloudstack().describe_rs(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n return resp\n\n\n@scenario.api(\n title=\"创建内容转发规则\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"CreateVSPolicyResponse\"),\n ],\n action=\"CreateVSPolicy\",\n)\ndef create_vs_policy_20(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VSID\": variables.get(\"VSID\"),\n \"Region\": variables.get(\"Region\"),\n \"RSIDs\": [variables.get(\"RSID\"),],\n \"Path\": \"/test12321\",\n \"LBID\": variables.get(\"LBID\"),\n \"Domain\": \"test.com11\",\n }\n\n try:\n resp = client.ucloudstack().create_vs_policy(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n variables[\"PolicyID\"] = utest.value_at_path(resp, \"PolicyID\")\n return resp\n\n\n@scenario.api(\n title=\"获取内容转发规则信息\",\n max_retries=3,\n retry_interval=1,\n startup_delay=20,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DescribeVSPolicyResponse\"),\n ],\n action=\"DescribeVSPolicy\",\n)\ndef describe_vs_policy_21(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VSID\": variables.get(\"VSID\"),\n \"Region\": variables.get(\"Region\"),\n \"PolicyIDs\": [variables.get(\"PolicyID\"),],\n \"LBID\": variables.get(\"LBID\"),\n }\n\n try:\n resp = client.ucloudstack().describe_vs_policy(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n return resp\n\n\n@scenario.api(\n title=\"更新内容转发规则\",\n max_retries=3,\n retry_interval=1,\n startup_delay=10,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"UpdateVSPolicyResponse\"),\n ],\n action=\"UpdateVSPolicy\",\n)\ndef update_vs_policy_22(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VSID\": variables.get(\"VSID\"),\n \"Region\": variables.get(\"Region\"),\n \"PolicyID\": variables.get(\"PolicyID\"),\n \"Path\": \"/testnew\",\n \"LBID\": variables.get(\"LBID\"),\n \"Domain\": \"test.comnew\",\n }\n\n try:\n resp = client.ucloudstack().update_vs_policy(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n return resp\n\n\n@scenario.api(\n title=\"删除内容转发规则\",\n max_retries=3,\n retry_interval=1,\n startup_delay=10,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DeleteVSPolicyResponse\"),\n ],\n action=\"DeleteVSPolicy\",\n)\ndef delete_vs_policy_23(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VSID\": variables.get(\"VSID\"),\n \"Region\": variables.get(\"Region\"),\n \"PolicyID\": variables.get(\"PolicyID\"),\n \"LBID\": variables.get(\"LBID\"),\n }\n\n try:\n resp = client.ucloudstack().delete_vs_policy(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n return resp\n\n\n@scenario.api(\n title=\"移除服务节点\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DeleteRSResponse\"),\n ],\n action=\"DeleteRS\",\n)\ndef delete_rs_24(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VSID\": variables.get(\"VSID\"),\n \"Region\": variables.get(\"Region\"),\n \"RSID\": variables.get(\"RSID\"),\n \"LBID\": variables.get(\"LBID\"),\n }\n\n try:\n resp = client.ucloudstack().delete_rs(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n return resp\n\n\n@scenario.api(\n title=\"断电虚拟机\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"PoweroffVMInstanceResponse\"),\n ],\n action=\"PoweroffVMInstance\",\n)\ndef poweroff_vm_instance_25(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VMID\": variables.get(\"VMID\"),\n \"Region\": variables.get(\"Region\"),\n }\n\n try:\n resp = client.ucloudstack().poweroff_vm_instance(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n return resp\n\n\n@scenario.api(\n title=\"删除虚拟机\",\n max_retries=3,\n retry_interval=1,\n startup_delay=60,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DeleteVMInstanceResponse\"),\n ],\n action=\"DeleteVMInstance\",\n)\ndef delete_vm_instance_26(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"VMID\": variables.get(\"VMID\"),\n \"Region\": variables.get(\"Region\"),\n }\n\n try:\n resp = client.ucloudstack().delete_vm_instance(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n return resp\n\n\n@scenario.api(\n title=\"删除负载均衡\",\n max_retries=3,\n retry_interval=1,\n startup_delay=0,\n fast_fail=False,\n validators=lambda variables: [\n (\"str_eq\", \"RetCode\", 0),\n (\"str_eq\", \"Action\", \"DeleteLBResponse\"),\n ],\n action=\"DeleteLB\",\n)\ndef delete_lb_27(step, client):\n variables = step.store\n\n d = {\n \"Zone\": variables.get(\"Zone\"),\n \"Region\": variables.get(\"Region\"),\n \"LBID\": variables.get(\"LBID\"),\n }\n\n try:\n resp = client.ucloudstack().delete_lb(d)\n except exc.RetCodeException as e:\n resp = e.json()\n\n return resp\n", "id": "69784", "language": "Python", "matching_score": 1.3137656450271606, "max_stars_count": 0, "path": "tests/test_acceptance/test_scenario_5183.py" }, { "content": "import collections\n\nfrom ucloud.core.utils import compat\n\n\nclass UCloudException(Exception):\n @property\n def retryable(self):\n return False\n\n\nMAX_COMMON_RET_CODE = 2000\n\n\nclass TransportException(UCloudException):\n pass\n\n\nclass HTTPStatusException(TransportException):\n def __init__(self, status_code: int, request_uuid: str = None):\n self.status_code = status_code\n self.request_uuid = request_uuid\n\n @property\n def retryable(self):\n return self.status_code in [429, 502, 503, 504]\n\n def __str__(self):\n return \"[{uuid}] {self.status_code} http status error\".format(\n self=self, uuid=self.request_uuid or \"*\"\n )\n\n\nclass InvalidResponseException(TransportException):\n def __init__(self, content: bytes, message: str, request_uuid: str = None):\n self.content = content\n self.message = message\n self.request_uuid = request_uuid\n\n @property\n def retryable(self):\n return False\n\n def __str__(self):\n return \"[{uuid}] {self.message}: {self.content}\".format(\n self=self, uuid=self.request_uuid or \"*\"\n )\n\n\nclass RetCodeException(UCloudException):\n def __init__(\n self, action: str, code: int, message: str, request_uuid: str = None\n ):\n self.action = action\n self.code = code\n self.message = message\n self.request_uuid = request_uuid\n\n @property\n def retryable(self):\n return self.code > MAX_COMMON_RET_CODE\n\n def __str__(self):\n return \"[{uuid}] {self.action} - {self.code}: {self.message}\".format(\n self=self, uuid=self.request_uuid or \"*\"\n )\n\n def json(self):\n return {\n \"RetCode\": self.code,\n \"Message\": self.message or \"\",\n \"Action\": self.action or \"\",\n }\n\n\nclass RetryTimeoutException(UCloudException):\n pass\n\n\nclass ValidationException(UCloudException):\n def __init__(self, e=None):\n if isinstance(e, compat.string_types):\n self.errors = [e]\n elif isinstance(e, collections.Iterable):\n self.errors = e or []\n else:\n self.errors = [e]\n\n @property\n def retryable(self):\n return False\n\n def __str__(self):\n return str([str(e) for e in self.errors])\n", "id": "9255034", "language": "Python", "matching_score": 5.144997596740723, "max_stars_count": 37, "path": "ucloud/core/exc/_exc.py" }, { "content": "from ucloud.core.exc._exc import (\n UCloudException,\n ValidationException,\n RetCodeException,\n RetryTimeoutException,\n TransportException,\n HTTPStatusException,\n InvalidResponseException,\n)\n", "id": "1404232", "language": "Python", "matching_score": 1.9862245321273804, "max_stars_count": 37, "path": "ucloud/core/exc/__init__.py" } ]
2.080537
stripes39
[ { "content": "\"\"\"Kestrel Client\"\"\"\n\nfrom client import Client", "id": "4322247", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "kestrel/__init__.py" } ]
0
IgnacioBarroso
[ { "content": "from django import forms\nfrom .models import *\nfrom django.contrib.auth.forms import UserCreationForm\n\nclass CustomUserCreationForm(UserCreationForm):\n pass\n\nclass TaskForm(forms.ModelForm):\n \n class Meta:\n model = Task\n fields = '__all__'\n\n", "id": "11872991", "language": "Python", "matching_score": 2.081418752670288, "max_stars_count": 0, "path": "app/forms.py" }, { "content": "from django.shortcuts import render, redirect\nfrom .models import *\nfrom .forms import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login\n\ndef register(request):\n data = {\n 'form': CustomUserCreationForm()\n }\n \n if request.method == 'POST':\n form = CustomUserCreationForm(data=request.POST)\n if form.is_valid():\n form.save()\n user = authenticate(username=request.POST['username'], password=request.POST['<PASSWORD>'])\n login(request, user)\n return redirect('index')\n else:\n data['form'] = form\n \n return render(request, 'registration/register.html', data)\n\n@login_required\ndef index(request):\n tasks = Task.objects.all()\n \n if request.method == 'POST':\n form = TaskForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/') \n \n if request.method == 'GET':\n date = request.GET.get('date')\n word = request.GET.get('word')\n \n if word and date:\n tasks = tasks.filter(created_at=date, title__contains=word)\n \n if date:\n tasks = tasks.filter(created_at=date)\n \n if word:\n tasks = tasks.filter(title__contains=word)\n \n context = {'tasks': tasks} \n return render(request, 'index.html', context)\n\n@login_required\ndef delete(request, id):\n Task.objects.filter(id=id).delete()\n return redirect('/')\n\n@login_required\ndef update_status(request, id):\n task_status = (Task.objects.filter(id=id))[0].completed\n task = Task.objects.filter(id=id)\n if task_status == True:\n task.update(completed=False)\n else:\n task.update(completed=True)\n return redirect('/')", "id": "5803552", "language": "Python", "matching_score": 2.0492470264434814, "max_stars_count": 0, "path": "app/views.py" }, { "content": "from django.urls import path\nfrom django.urls.conf import include\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('delete/<int:id>', views.delete, name='delete'),\n path('update_status/<int:id>', views.update_status, name='update_status'),\n path('register', views.register, name='register'),\n]\n", "id": "10532438", "language": "Python", "matching_score": 0.5499436259269714, "max_stars_count": 0, "path": "app/urls.py" } ]
2.049247
pascalberski
[ { "content": "\"\"\"\nConstants for NiceHash\n\"\"\"\n# Base component constants\nNAME = \"NiceHash\"\nDOMAIN = \"nicehash\"\nDOMAIN_DATA = f\"{DOMAIN}_data\"\nVERSION = \"0.1.0\"\n\nISSUE_URL = \"https://github.com/brianberg/ha-nicehash/issues\"\n\n# Icons\nICON_CURRENCY_BTC = \"mdi:currency-btc\"\nICON_CURRENCY_EUR = \"mdi:currency-eur\"\nICON_CURRENCY_USD = \"mdi:currency-usd\"\nICON_EXCAVATOR = \"mdi:excavator\"\nICON_MEMORY = \"mdi:memory\"\nICON_PICKAXE = \"mdi:pickaxe\"\nICON_PULSE = \"mdi:pulse\"\nICON_THERMOMETER = \"mdi:thermometer\"\nICON_SPEEDOMETER = \"mdi:speedometer\"\nICON_POWER = \"mdi:power-plug\"\n\n# Platforms\nSENSOR = \"sensor\"\nPLATFORMS = [SENSOR]\n\n\n# Configuration and options\nCONF_API_KEY = \"api_key\"\nCONF_API_SECRET = \"api_secret\"\nCONF_ORGANIZATION_ID = \"organization_id\"\nCONF_CURRENCY = \"currency\"\nCONF_BALANCES_ENABLED = \"balances\"\nCONF_RIGS_ENABLED = \"rigs\"\nCONF_DEVICES_ENABLED = \"devices\"\nCONF_PAYOUTS_ENABLED = \"payouts\"\n\n# Defaults\nDEFAULT_NAME = NAME\nFORMAT_DATETIME = \"%d-%m-%Y %H:%M\"\n\n# Startup\nSTARTUP_MESSAGE = f\"\"\"\n-------------------------------------------------------------------\n{NAME}\nVersion: {VERSION}\nThis is a custom integration!\nIf you have any issues with this you need to open an issue here:\n{ISSUE_URL}\n-------------------------------------------------------------------\n\"\"\"\n\n# NiceHash\nNICEHASH_API_URL = \"https://api2.nicehash.com\"\nNICEHASH_ATTRIBUTION = \"Data provided by NiceHash\"\n# Currency\nCURRENCY_BTC = \"BTC\"\nCURRENCY_USD = \"USD\"\nCURRENCY_EUR = \"EUR\"\n# Balance type\nBALANCE_TYPE_AVAILABLE = \"available\"\nBALANCE_TYPE_PENDING = \"pending\"\nBALANCE_TYPE_TOTAL = \"total\"\n# Device status\nDEVICE_STATUS_UNKNOWN = \"UNKNOWN\"\nDEVICE_STATUS_DISABLED = \"DISABLED\"\nDEVICE_STATUS_INACTIVE = \"INACTIVE\"\nDEVICE_STATUS_MINING = \"MINING\"\nDEVICE_STATUS_BENCHMARKING = \"BENCHMARKING\"\nDEVICE_STATUS_ERROR = \"ERROR\"\nDEVICE_STATUS_PENDING = \"PENDING\"\nDEVICE_STATUS_OFFLINE = \"OFFLINE\"\n# Device stat\nDEVICE_SPEED_RATE = \"device-speed-rate\"\nDEVICE_SPEED_ALGORITHM = \"device-speed-algorithm\"\nDEVICE_LOAD = \"device-load\"\nDEVICE_RPM = \"device-rpm\"\n# Payout types\nPAYOUT_USER = \"USER\"\n# Magic numbers\nMAX_TWO_BYTES = 65536\n", "id": "4323200", "language": "Python", "matching_score": 4.522286415100098, "max_stars_count": 1, "path": "custom_components/nicehash/const.py" }, { "content": "\"\"\"\nConstants for go-echarger\n\"\"\"\n# Base component constants\nfrom homeassistant.const import CONF_NAME\n\n\nNAME = \"go-eCharger Integration\"\nDOMAIN = \"goecharger\"\nDOMAIN_DATA = f\"{DOMAIN}_data\"\nVERSION = \"0.0.2\"\nDEFAULT_NAME = \"goecharger\"\n\nISSUE_URL = \"https://github.com/pascalberski/ha-goecharger/issues\"\n\n# Icons\nICON_BATTERY = \"mdi:battery-50\"\nICON_PLUG = \"mdi:ev-plug-type2\"\nICON_ALLOW = \"mdi:check-bold\"\nICON_ENERGY = \"mdi:lightning-bolt-circle\"\nICON_CURRENT = \"mdi:lightning-bolt-outline\"\nICON_VOLTAGE = \"mdi:lightning-bolt\"\nICON_POWER = \"mdi:car-electric\"\n\n# Platforms\nSENSOR = \"sensor\"\nPLATFORMS = [SENSOR]\n\n# Configuration and options\nCONF_HOST = \"host\"\nCONF_NAME = \"name\"\nCONF_CHARGERS = \"chargers\"\nCONF_ID = \"id\"\n\n# Startup\nSTARTUP_MESSAGE = f\"\"\"\n-------------------------------------------------------------------\n{NAME} by <NAME> (@pascalberski)\nVersion: {VERSION}\nThis is a custom integration!\nIf you have any issues with this you need to open an issue here:\n{ISSUE_URL}\n-------------------------------------------------------------------\n\"\"\"\n", "id": "9307444", "language": "Python", "matching_score": 4.529603004455566, "max_stars_count": 1, "path": "custom_components/goecharger/const.py" }, { "content": "\"\"\"\nConstants for nhqm-octune\n\"\"\"\n\n\n\nNAME = \"NiceHash QuickMiner OCTune\"\nDOMAIN = \"octune\"\nDOMAIN_DATA = f\"{DOMAIN}_data\"\nVERSION = \"0.0.4\"\nDEFAULT_NAME = \"octune\"\n\nISSUE_URL = \"https://github.com/pascalberski/ha-nhqm-octune/issues\"\n\nATTRIBUTION = \"Integration by <NAME>\"\n\n# Icons\nICON_HASHRATE = \"mdi:speedometer\"\nICON_FAN = \"mdi:fan\"\nICON_TEMP_HOTSPOT = \"mdi:thermometer-high\"\nICON_TEMP_VRAM = \"mdi:thermometer-lines\"\nICON_TEMP = \"mdi:thermometer\"\nICON_POWER = \"mdi:power-plug\"\nICON_OVERHEATING = \"mdi:fire\"\n\n# Platforms\nSENSOR = \"sensor\"\nPLATFORMS = [SENSOR]\n\n# Configuration and options\nCONF_MINERS = \"miners\"\nCONF_HOST = \"host\"\nCONF_PORT = \"port\"\nCONF_AUTH = \"auth\"\nCONF_NAME = \"name\"\n\nREFRESH_INTERVAL = \"refreshinterval\"\n\n# Startup\nSTARTUP_MESSAGE = f\"\"\"\n-------------------------------------------------------------------\n{NAME} by <NAME> (@pascalberski)\nVersion: {VERSION}\nThis is a custom integration!\nIf you have any issues with this you need to open an issue here:\n{ISSUE_URL}\n-------------------------------------------------------------------\n\"\"\"\n", "id": "5743424", "language": "Python", "matching_score": 0.7996183633804321, "max_stars_count": 0, "path": "custom_components/octune/const.py" }, { "content": "\"\"\"\nSensors\n\"\"\"\nimport logging\nfrom time import sleep\nfrom unittest import result\n\nfrom homeassistant.helpers.entity import Entity\n\nfrom custom_components.octune.api import OCTuneApiClient\n\nfrom .const import (\n ATTRIBUTION,\n ICON_FAN,\n ICON_HASHRATE,\n ICON_OVERHEATING,\n ICON_POWER,\n ICON_TEMP,\n ICON_TEMP_HOTSPOT,\n ICON_TEMP_VRAM,\n)\n\nfrom .coordinators import SensorDataUpdateCoordinator\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Sensor(Entity):\n \"\"\"\n Status Api Sensor\n \"\"\"\n\n def __init__(self, coordinator: SensorDataUpdateCoordinator, device=None):\n \"\"\"Initialize the sensor\"\"\"\n self.coordinator = coordinator\n self.host = coordinator.host\n self.port = coordinator.port\n self.auth = coordinator.auth\n self.minername = coordinator.minername\n self.device = device\n\n @property\n def name(self):\n \"\"\"Device name\"\"\"\n return \"Device\"\n\n @property\n def should_poll(self):\n \"\"\"No need to poll, Coordinator notifies entity of updates\"\"\"\n return False\n\n @property\n def available(self):\n \"\"\"Whether sensor is available\"\"\"\n return self.coordinator.last_update_success\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_HASHRATE\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return None\n\n async def async_added_to_hass(self):\n \"\"\"Connect to dispatcher listening for entity data notifications\"\"\"\n self.async_on_remove(\n self.coordinator.async_add_listener(self.async_write_ha_state)\n )\n\n async def async_update(self):\n \"\"\"Update entity\"\"\"\n await self.coordinator.async_request_refresh()\n\n def _get_data(self):\n try:\n #_LOGGER.debug(\"coordinator.data: %s\", self.coordinator.data)\n devices = self.coordinator.data\n\n for device in devices:\n #_LOGGER.debug(\"comapre %s == %s = %s\", device.get(\"uuid\"), self.device.get(\"uuid\"), (device.get(\"uuid\") == self.device.get(\"uuid\")))\n if device.get(\"uuid\") == self.device.get(\"uuid\"):\n #_LOGGER.debug(\"device: %s\", device)\n return device\n except Exception as exc:\n _LOGGER.error(\"Unable to get api data\\n%s\", exc)\n return None\n\n def log_updates(self, value):\n \"\"\" Log new values \"\"\"\n _LOGGER.debug(\"%s (%s, %s, %s): %s\", str(type(self)), self.minername, self.device.get(\"uuid\"), self.device.get(\"name\"), value)\n\n def _get_default_attributes(self, device_type):\n results = {\n \"attribution\": ATTRIBUTION,\n }\n\n if device_type == \"GPU\" or device_type == \"RIG\":\n results[\"rig\"] = self.minername\n results[\"host\"] = self.host\n if device_type == \"GPU\":\n results[\"uuid\"] = self.device.get(\"uuid\")\n\n return results\n\n\nclass TemperatureSensor(Sensor):\n \"\"\"\n displays GPU temperature\n \"\"\"\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n device_name = self.device.get(\"name\")\n return f\"{self.minername} {device_name} Temperature\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n device_uuid = self.device.get(\"uuid\")\n return f\"octune:{device_uuid}:temperature\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n #_LOGGER.debug(\"data type: %s\", str(type(self._get_data())))\n self._state = float(self._get_data().get(\"gpu_temp\"))\n self.log_updates(self._state)\n return self._state\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return \"°C\"\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_TEMP\n\n @property\n def device_state_attributes(self):\n \"\"\"Sensor device state attributes\"\"\"\n results = self._get_default_attributes(\"GPU\")\n results[\"temperature\"] = self._state\n return results\n\n\nclass VramTemperatureSensor(Sensor):\n \"\"\"\n displays GPU vram temperature\n \"\"\"\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n device_name = self.device.get(\"name\")\n return f\"{self.minername} {device_name} VRAM Temperature\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n device_uuid = self.device.get(\"uuid\")\n return f\"octune:{device_uuid}:vramtemperature\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n self._state = float(self._get_data().get(\"__vram_temp\"))\n self.log_updates(self._state)\n return self._state\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return \"°C\"\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_TEMP_VRAM\n\n @property\n def device_state_attributes(self):\n \"\"\"Sensor device state attributes\"\"\"\n results = self._get_default_attributes(\"GPU\")\n results[\"vram temperature\"] = self._state\n return results\n\n\nclass HotspotTemperatureSensor(Sensor):\n \"\"\"\n displays GPU hotspot temperature\n \"\"\"\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n device_name = self.device.get(\"name\")\n return f\"{self.minername} {device_name} Hotspot Temperature\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n device_uuid = self.device.get(\"uuid\")\n return f\"octune:{device_uuid}:hotspottemperature\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n self._state = float(self._get_data().get(\"__hotspot_temp\"))\n self.log_updates(self._state)\n return self._state\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return \"°C\"\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_TEMP_HOTSPOT\n\n @property\n def device_state_attributes(self):\n \"\"\"Sensor device state attributes\"\"\"\n results = self._get_default_attributes(\"GPU\")\n results[\"hotspot temperature\"] = self._state\n return results\n\n\nclass HashrateSensor(Sensor):\n \"\"\"\n displays hashrate\n \"\"\"\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n if (self.device is None):\n return f\"{self.minername} Hashrate\"\n device_name = self.device.get(\"name\")\n return f\"{self.minername} {device_name} Hashrate\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n if (self.device is None):\n return f\"octune:{self.minername}:hashrate\"\n device_uuid = self.device.get(\"uuid\")\n return f\"octune:{device_uuid}:hashrate\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n self._state = 0\n try:\n self._state = round(float(self._get_data().get(\"algorithms\")[0].get(\"speed\"))/1000000, 2)\n except TypeError:\n _LOGGER.debug(\"device not mining\")\n self.log_updates(self._state)\n return self._state\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return \"MH/s\"\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_HASHRATE\n\n @property\n def device_state_attributes(self):\n \"\"\"Sensor device state attributes\"\"\"\n results = None\n if (self.device is None):\n results = self._get_default_attributes(\"RIG\")\n else:\n results = self._get_default_attributes(\"GPU\")\n results[\"hashrate\"] = self._state\n return results\n\nclass FanRpmSensor(Sensor):\n \"\"\"\n displays fan rpm\n \"\"\"\n\n def __init__(self, coordinator: SensorDataUpdateCoordinator, fanid: int, device=None):\n super().__init__(coordinator, device)\n self.fanid = fanid\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n device_name = self.device.get(\"name\")\n return f\"{self.minername} {device_name} Fan {self.fanid} RPM\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n device_uuid = self.device.get(\"uuid\")\n return f\"octune:{device_uuid}:fanrpm:{self.fanid}\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n self._state = float(self._get_data().get(\"fans\")[self.fanid].get(\"current_rpm\"))\n self.log_updates(self._state)\n return self._state\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return \"RPM\"\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_FAN\n\n @property\n def device_state_attributes(self):\n \"\"\"Sensor device state attributes\"\"\"\n results = self._get_default_attributes(\"GPU\")\n results[\"rpm\"] = self._state\n return results\n\nclass FanSensor(Sensor):\n \"\"\"\n displays fan speed in percent\n \"\"\"\n\n def __init__(self, coordinator: SensorDataUpdateCoordinator, fanid: int, device=None):\n super().__init__(coordinator, device)\n self.fanid = fanid\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n device_name = self.device.get(\"name\")\n return f\"{self.minername} {device_name} Fan {self.fanid} Speed\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n device_uuid = self.device.get(\"uuid\")\n return f\"octune:{device_uuid}:fanspeed:{self.fanid}\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n self._state = float(self._get_data().get(\"fans\")[self.fanid].get(\"current_level\"))\n self.log_updates(self._state)\n return self._state\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return \"%\"\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_FAN\n\n @property\n def device_state_attributes(self):\n \"\"\"Sensor device state attributes\"\"\"\n results = self._get_default_attributes(\"GPU\")\n results[\"speed\"] = self._state\n return results\n\nclass PowerSensor(Sensor):\n \"\"\"\n displays power usage in watt\n \"\"\"\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n device_name = self.device.get(\"name\")\n return f\"{self.minername} {device_name} Power\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n device_uuid = self.device.get(\"uuid\")\n return f\"octune:{device_uuid}:power\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n self._state = float(self._get_data().get(\"gpu_power_usage\"))\n self.log_updates(self._state)\n return self._state\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return \"W\"\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_POWER\n\n @property\n def device_state_attributes(self):\n \"\"\"Sensor device state attributes\"\"\"\n results = self._get_default_attributes(\"GPU\")\n results[\"power\"] = self._state\n return results\n\nclass OverheatingSensor(Sensor):\n \"\"\"\n displays if a gpu overheats\n \"\"\"\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n device_name = self.device.get(\"name\")\n return f\"{self.minername} {device_name} Overheating\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n device_uuid = self.device.get(\"uuid\")\n return f\"octune:{device_uuid}:overheating\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n self._state = bool(self._get_data().get(\"too_hot\"))\n self.log_updates(self._state)\n return self._state\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return None\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_OVERHEATING\n\n @property\n def device_state_attributes(self):\n \"\"\"Sensor device state attributes\"\"\"\n results = self._get_default_attributes(\"GPU\")\n results[\"overheating\"] = self._state\n return results\n", "id": "9835389", "language": "Python", "matching_score": 4.969995021820068, "max_stars_count": 0, "path": "custom_components/octune/devicesensors.py" }, { "content": "\"\"\"\nSensor platform for Charger\n\"\"\"\nimport logging\n\nfrom homeassistant.core import Config, HomeAssistant\n\nfrom custom_components.octune.api import OCTuneApiClient\n\nfrom .devicesensors import (\n FanRpmSensor,\n FanSensor,\n HashrateSensor,\n HotspotTemperatureSensor,\n OverheatingSensor,\n PowerSensor,\n TemperatureSensor,\n VramTemperatureSensor,\n)\n\nfrom .const import (\n DOMAIN,\n)\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_platform(\n hass: HomeAssistant, config: Config, async_add_entities, discovery_info=None\n):\n \"\"\"Setup charger sensor platform\"\"\"\n _LOGGER.debug(\"Creating new sensor components\")\n\n data = hass.data[DOMAIN]\n # Configuration\n # host = data.get(\"host\")\n # client = data.get(\"client\")\n\n # charger sensors\n sensor_coordinators = data.get(\"sensor_coordinators\")\n for sensor_coordinator in sensor_coordinators:\n sensors = await create_miner_sensors(sensor_coordinator)\n async_add_entities(sensors, True)\n\n\nasync def create_miner_sensors(coordinator):\n \"\"\" create sensor for a mining rig \"\"\"\n sensors = [\n #HashrateSensor(coordinator, coordinator.host, coordinator.port, coordinator.auth)\n ]\n\n _client = OCTuneApiClient(coordinator.host, coordinator.port, coordinator.auth)\n\n devices = (await _client.get_devices())\n for device in devices:\n sensors.extend(create_device_sensors(coordinator, device))\n\n return sensors\n\ndef create_device_sensors(coordinator, device):\n \"\"\" create sensor for a single GPU \"\"\"\n sensors = [\n #HashrateSensor(coordinator, coordinator.host, coordinator.port, coordinator.auth, device)\n TemperatureSensor(coordinator, device),\n VramTemperatureSensor(coordinator, device),\n HotspotTemperatureSensor(coordinator, device),\n HashrateSensor(coordinator, device),\n PowerSensor(coordinator, device),\n OverheatingSensor(coordinator, device)\n ]\n\n fans_len = len(device.get(\"fans\"))\n for i in range(fans_len):\n sensors.append(FanRpmSensor(coordinator, i, device))\n sensors.append(FanSensor(coordinator, i, device))\n\n return sensors\n", "id": "10843339", "language": "Python", "matching_score": 3.2463228702545166, "max_stars_count": 0, "path": "custom_components/octune/sensor.py" }, { "content": "\"\"\"\nSensor platform for Charger\n\"\"\"\nimport logging\n\nfrom homeassistant.core import Config, HomeAssistant\n\nfrom .charger_sensors import (\n CurrentSensor,\n PowerSensor,\n StateSensor,\n AllowSensor,\n TotalEnergySensor,\n TotalPowerSensor,\n VoltageSensor,\n)\n\nfrom .const import (\n DOMAIN,\n)\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_platform(\n hass: HomeAssistant, config: Config, async_add_entities, discovery_info=None\n):\n \"\"\"Setup charger sensor platform\"\"\"\n _LOGGER.debug(\"Creating new charger sensor components\")\n\n data = hass.data[DOMAIN]\n # Configuration\n # host = data.get(\"host\")\n # client = data.get(\"client\")\n\n # charger sensors\n sensor_coordinators = data.get(\"sensor_coordinators\")\n for sensor_coordinator in sensor_coordinators:\n charger_sensors = create_charger_sensors(sensor_coordinator)\n async_add_entities(charger_sensors, True)\n\n\ndef create_charger_sensors(coordinator):\n charger_sensors = [\n StateSensor(coordinator, coordinator.charger_id, coordinator.charger_name),\n AllowSensor(coordinator, coordinator.charger_id, coordinator.charger_name),\n TotalEnergySensor(\n coordinator, coordinator.charger_id, coordinator.charger_name\n ),\n TotalPowerSensor(coordinator, coordinator.charger_id, coordinator.charger_name),\n ]\n\n for i in range(1, 4):\n charger_sensors.append(\n VoltageSensor(\n coordinator, coordinator.charger_id, coordinator.charger_name, i\n )\n )\n charger_sensors.append(\n CurrentSensor(\n coordinator, coordinator.charger_id, coordinator.charger_name, i\n )\n )\n charger_sensors.append(\n PowerSensor(\n coordinator, coordinator.charger_id, coordinator.charger_name, i\n )\n )\n\n return charger_sensors\n", "id": "1983833", "language": "Python", "matching_score": 2.322267770767212, "max_stars_count": 1, "path": "custom_components/goecharger/sensor.py" }, { "content": "\"\"\"\nCharger Sensors\n\"\"\"\nimport logging\n\nfrom homeassistant.helpers.entity import Entity\n\nfrom .const import (\n ICON_ALLOW,\n ICON_CURRENT,\n ICON_PLUG,\n ICON_ENERGY,\n ICON_POWER,\n ICON_VOLTAGE,\n)\n\nfrom .coordinators import SensorDataUpdateCoordinator\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ApiSensor(Entity):\n \"\"\"\n Status Api Sensor\n \"\"\"\n\n def __init__(self, coordinator: SensorDataUpdateCoordinator, charger_id: int, charger_name: str, phase=0):\n \"\"\"Initialize the sensor\"\"\"\n self.coordinator = coordinator\n self.charger_id = charger_id\n self.charger_name = charger_name\n self.phase = phase\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n return \"Charger\"\n\n @property\n def should_poll(self):\n \"\"\"No need to poll, Coordinator notifies entity of updates\"\"\"\n return False\n\n @property\n def available(self):\n \"\"\"Whether sensor is available\"\"\"\n return self.coordinator.last_update_success\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_PLUG\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return None\n\n async def async_added_to_hass(self):\n \"\"\"Connect to dispatcher listening for entity data notifications\"\"\"\n self.async_on_remove(\n self.coordinator.async_add_listener(self.async_write_ha_state)\n )\n\n async def async_update(self):\n \"\"\"Update entity\"\"\"\n await self.coordinator.async_request_refresh()\n\n def _get_status(self):\n try:\n return self.coordinator.data\n except Exception as exc:\n _LOGGER.error(\"Unable to get api data\\n%s\", exc)\n return None\n\n\nclass StateSensor(ApiSensor):\n \"\"\"\n Displays car attribute [Ready|Charging|Waiting|Finished]\n \"\"\"\n\n _state = \"Unknown\"\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n return f\"{self.charger_name} State\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n return f\"goecharger:{self.charger_id}:state\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n car = self._get_status().get(\"car\")\n _LOGGER.debug(\"status (car): %s\", car)\n if car:\n if car == \"1\":\n self._state = \"Ready\"\n if car == \"2\":\n self._state = \"Charging\"\n if car == \"3\":\n self._state = \"Waiting\"\n if car == \"4\":\n self._state = \"Finished\"\n else:\n self._state = \"Unknown\"\n\n return self._state\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_PLUG\n\n\nclass AllowSensor(ApiSensor):\n \"\"\"\n Displays alw sensor [True|False]\n \"\"\"\n\n _state = \"Unknown\"\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n return f\"{self.charger_name} Allow\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n return f\"goecharger:{self.charger_id}:allow\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n alw = self._get_status().get(\"alw\")\n _LOGGER.debug(\"allow (alw): %s\", alw)\n if alw:\n if alw == \"1\":\n self._state = True\n if alw == \"0\":\n self._state = False\n else:\n self._state = \"Unknown\"\n\n return self._state\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_ALLOW\n\n\nclass TotalEnergySensor(ApiSensor):\n \"\"\"\n Displays eto sensor\n \"\"\"\n\n _state = \"Unknown\"\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n return f\"{self.charger_name} Total Energy\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n return f\"goecharger:{self.charger_id}:total_energy\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n eto = self._get_status().get(\"eto\")\n _LOGGER.debug(\"total energy (eto): %s\", eto)\n if eto:\n self._state = float(eto) / 10\n else:\n self._state = \"Unknown\"\n\n return self._state\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_ENERGY\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return \"kWh\"\n\n\nclass VoltageSensor(ApiSensor):\n \"\"\"\n Displays nrg sensor (voltage)\n \"\"\"\n\n _state = \"Unknown\"\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n return f\"{self.charger_name} Voltage L{self.phase}\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n return f\"goecharger:{self.charger_id}:voltage_L{self.phase}\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n nrg = self._get_status().get(\"nrg\")\n volt = 0\n _LOGGER.debug(\"energy (nrg): %s\", nrg)\n if nrg:\n volt = nrg[self.phase - 1]\n self._state = int(volt)\n else:\n self._state = \"Unknown\"\n\n return self._state\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_VOLTAGE\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return \"V\"\n\n\nclass CurrentSensor(ApiSensor):\n \"\"\"\n Displays nrg sensor (current)\n \"\"\"\n\n _state = \"Unknown\"\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n return f\"{self.charger_name} Current L{self.phase}\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n return f\"goecharger:{self.charger_id}:current_L{self.phase}\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n nrg = self._get_status().get(\"nrg\")\n current = -1\n _LOGGER.debug(\"energy (nrg): %s\", nrg)\n if nrg:\n current = nrg[self.phase + 3]\n self._state = float(current) / 10\n else:\n self._state = \"Unknown\"\n\n return self._state\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_CURRENT\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return \"A\"\n\n\nclass PowerSensor(ApiSensor):\n \"\"\"\n Displays nrg sensor (power)\n \"\"\"\n\n _state = \"Unknown\"\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n return f\"{self.charger_name} Power L{self.phase}\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n return f\"goecharger:{self.charger_id}:power_L{self.phase}\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n nrg = self._get_status().get(\"nrg\")\n power = -1\n _LOGGER.debug(\"energy (nrg): %s\", nrg)\n if nrg:\n power = nrg[self.phase + 6]\n self._state = float(power) / 10\n else:\n self._state = \"Unknown\"\n\n return self._state\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_POWER\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return \"kW\"\n\n\nclass TotalPowerSensor(ApiSensor):\n \"\"\"\n Displays nrg sensor (total_power)\n \"\"\"\n\n _state = \"Unknown\"\n\n @property\n def name(self):\n \"\"\"Sensor name\"\"\"\n return f\"{self.charger_name} Total Power\"\n\n @property\n def unique_id(self):\n \"\"\"Unique entity id\"\"\"\n return f\"goecharger:{self.charger_id}:total_power\"\n\n @property\n def state(self):\n \"\"\"Sensor state\"\"\"\n nrg = self._get_status().get(\"nrg\")\n power = -1\n _LOGGER.debug(\"energy (nrg): %s\", nrg)\n if nrg:\n power = nrg[11]\n self._state = float(power) / 100\n else:\n self._state = \"Unknown\"\n\n return self._state\n\n @property\n def icon(self):\n \"\"\"Sensor icon\"\"\"\n return ICON_POWER\n\n @property\n def unit_of_measurement(self):\n \"\"\"Sensor unit of measurement\"\"\"\n return \"kW\"\n", "id": "9237693", "language": "Python", "matching_score": 3.0567615032196045, "max_stars_count": 1, "path": "custom_components/goecharger/charger_sensors.py" }, { "content": "\"\"\"\ngo-eCharger sensor data update coordinator\n\"\"\"\nfrom datetime import timedelta\nimport logging\n\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.update_coordinator import (\n DataUpdateCoordinator,\n UpdateFailed,\n)\n\nfrom .const import (\n DOMAIN,\n)\nfrom .charger import ChargerApiClient\n\nREFRESH_INTERVAL_STATUS = timedelta(minutes=1)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass SensorDataUpdateCoordinator(DataUpdateCoordinator):\n \"\"\"Manages fetching Status Data from charger api\"\"\"\n\n def __init__(self, hass: HomeAssistant, client: ChargerApiClient, charger_id: int, charger_name: str):\n \"\"\"Initialize\"\"\"\n self.name = f\"{DOMAIN}_{charger_id}_sensor_coordinator\"\n self._client = client\n self.charger_id = charger_id\n self.charger_name = charger_name\n\n super().__init__(\n hass, _LOGGER, name=self.name, update_interval=REFRESH_INTERVAL_STATUS\n )\n\n async def _async_update_data(self):\n \"\"\"Update charger sensors\"\"\"\n try:\n return await self._client.get_status()\n except Exception as exc:\n raise UpdateFailed from exc\n", "id": "8587957", "language": "Python", "matching_score": 4.8318281173706055, "max_stars_count": 1, "path": "custom_components/goecharger/coordinators.py" }, { "content": "\"\"\"\nsensor data update coordinator\n\"\"\"\nfrom datetime import timedelta\nimport logging\n\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.update_coordinator import (\n DataUpdateCoordinator,\n UpdateFailed,\n)\n\nfrom .const import (\n DOMAIN,\n REFRESH_INTERVAL,\n)\nfrom .api import OCTuneApiClient\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass SensorDataUpdateCoordinator(DataUpdateCoordinator):\n \"\"\"Manages fetching Status Data from octune api\"\"\"\n\n def __init__(self, hass: HomeAssistant, client: OCTuneApiClient, host: str, port: int, auth: str, minername: str, refresh_interval:timedelta):\n \"\"\"Initialize\"\"\"\n self.name = f\"{DOMAIN}_{host}_sensor_coordinator\"\n self._client = client\n self.host = host\n self.port = port\n self.auth = auth\n self.minername = minername\n\n super().__init__(\n hass, _LOGGER, name=self.name, update_interval=refresh_interval\n )\n\n async def _async_update_data(self):\n \"\"\"Update sensors\"\"\"\n try:\n _LOGGER.debug('raise update')\n return await self._client.get_devices()\n except Exception as exc:\n raise UpdateFailed from exc\n", "id": "1472154", "language": "Python", "matching_score": 1.5958694219589233, "max_stars_count": 0, "path": "custom_components/octune/coordinators.py" }, { "content": "\"\"\"\nIntegrates your go-eCharger with Home Assistant\n\nFor more details about this integration, please refer to\nhttps://github.com/pascalberski/ha-goecharger\n\"\"\"\nimport logging\nfrom homeassistant.const import CONF_ID\n\nfrom homeassistant.core import Config, HomeAssistant\nfrom homeassistant.helpers import discovery\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.exceptions import PlatformNotReady\n\nfrom .const import CONF_HOST, CONF_NAME, CONF_CHARGERS\n\nfrom .const import (\n DOMAIN,\n STARTUP_MESSAGE,\n)\nfrom .charger import ChargerApiClient\nfrom .coordinators import (\n SensorDataUpdateCoordinator,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup(hass: HomeAssistant, config: Config):\n \"\"\"Set up this integration\"\"\"\n if hass.data.get(DOMAIN) is None:\n hass.data.setdefault(DOMAIN, {})\n _LOGGER.debug(STARTUP_MESSAGE)\n\n charger_config = config[DOMAIN]\n # Configuration\n #host = charger_config.get(CONF_HOST)\n\n chargers = charger_config.get(CONF_CHARGERS)\n sensor_coordinators = []\n\n for charger in chargers:\n host = charger.get(CONF_HOST)\n charger_name = charger.get(CONF_NAME)\n charger_id = charger.get(CONF_ID)\n\n client = ChargerApiClient(host)\n\n _LOGGER.debug(f\"initialising sensor coordinator - {charger_id} - {charger_name} - {host}\")\n sensor_coordinator = SensorDataUpdateCoordinator(hass, client, charger_id, charger_name)\n await sensor_coordinator.async_refresh()\n\n if not sensor_coordinator.last_update_success:\n _LOGGER.error(\"Unable to get data from charger\")\n raise PlatformNotReady\n\n sensor_coordinators.append(sensor_coordinator)\n\n hass.data[DOMAIN][\"sensor_coordinators\"] = sensor_coordinators\n\n await discovery.async_load_platform(hass, \"sensor\", DOMAIN, {}, config)\n\n return True\n", "id": "7346391", "language": "Python", "matching_score": 5.414044380187988, "max_stars_count": 1, "path": "custom_components/goecharger/__init__.py" }, { "content": "\"\"\"\nIntegrates for OCTune with Home Assistant\n\nFor more details about this integration, please refer to\nhttps://github.com/pascalberski/ha-nhqm-octune\n\"\"\"\nimport logging\nfrom datetime import timedelta\nfrom homeassistant.core import Config, HomeAssistant\nfrom homeassistant.helpers import discovery\nfrom homeassistant.exceptions import PlatformNotReady\n\nfrom .const import CONF_HOST, CONF_NAME, CONF_PORT, CONF_AUTH, CONF_MINERS, REFRESH_INTERVAL\n\nfrom .const import (\n DOMAIN,\n STARTUP_MESSAGE,\n)\nfrom .api import OCTuneApiClient\nfrom .coordinators import (\n SensorDataUpdateCoordinator,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup(hass: HomeAssistant, config: Config):\n \"\"\"Set up this integration\"\"\"\n if hass.data.get(DOMAIN) is None:\n hass.data.setdefault(DOMAIN, {})\n _LOGGER.debug(STARTUP_MESSAGE)\n\n integration_config = config[DOMAIN]\n # Configuration\n #host = charger_config.get(CONF_HOST)\n\n refresh_interval = timedelta(seconds=int(integration_config.get(REFRESH_INTERVAL)))\n miners = integration_config.get(CONF_MINERS)\n sensor_coordinators = []\n\n for miner in miners:\n host = miner.get(CONF_HOST)\n port = miner.get(CONF_PORT)\n auth = miner.get(CONF_AUTH)\n minername = miner.get(CONF_NAME)\n\n client = OCTuneApiClient(host, port, auth)\n\n _LOGGER.debug(\"initialising sensor coordinator %s - %s:%s - %s\", minername, host, port, auth)\n sensor_coordinator = SensorDataUpdateCoordinator(hass, client, host, port, auth, minername, refresh_interval)\n await sensor_coordinator.async_refresh()\n\n if not sensor_coordinator.last_update_success:\n _LOGGER.error(\"Unable to get data from miner\")\n raise PlatformNotReady\n\n sensor_coordinators.append(sensor_coordinator)\n\n hass.data[DOMAIN][\"sensor_coordinators\"] = sensor_coordinators\n\n await discovery.async_load_platform(hass, \"sensor\", DOMAIN, {}, config)\n\n return True\n", "id": "6875370", "language": "Python", "matching_score": 0.7231106162071228, "max_stars_count": 0, "path": "custom_components/octune/__init__.py" }, { "content": "\"\"\"\nOCTune API interface\n\"\"\"\nimport logging\nimport httpx\n\n_LOGGER = logging.getLogger(__name__)\n\nclass OCTuneApiClient:\n \"\"\" OCTune api interface \"\"\"\n def __init__(self, host, port, auth):\n self.host = host\n self.port = port\n self.auth = auth\n\n async def get_devices(self):\n \"\"\" return the combinded json array \"\"\"\n devices = await self.get_devices_json()\n workers = await self.get_workers_json()\n\n i = 0\n for device in devices:\n for worker in workers:\n if (worker.get(\"device_uuid\") == device.get(\"uuid\")):\n devices[i][\"algorithms\"] = worker.get(\"algorithms\")\n workers.remove(worker)\n i += 1\n\n return devices\n\n async def get_device_by_id(self, id):\n \"\"\" return a device by id or uuid \"\"\"\n device = (await self.request(\"GET\", \"/api?command={\\\"id\\\":1,\\\"method\\\":\\\"device.get\\\",\\\"params\\\":[\\\"\" + id + \"\\\"]}\")).get(\"device\")\n workers = await self.get_workers_json()\n\n for worker in workers:\n if (worker.get(\"device_uuid\") == device.get(\"uuid\")):\n device[\"algorithms\"] = worker.get(\"algorithms\")\n\n return device\n\n async def get_devices_json(self):\n \"\"\" get devices json array \"\"\"\n return (await self.request(\"GET\", \"/devices_cuda\")).get(\"devices\")\n\n async def get_workers_json(self):\n \"\"\" get workers json array \"\"\"\n return (await self.request(\"GET\", \"/workers\")).get(\"workers\")\n\n async def request(self, method, path):\n \"\"\" Request Helper \"\"\"\n async with httpx.AsyncClient() as client:\n try:\n url = \"http://\" + self.host + \":\" + str(self.port) + path\n _LOGGER.debug(\"http \" + method + \" request: \" + url)\n\n response = None\n if (method == \"GET\"):\n response = await client.get(url)\n\n if response.status_code == 200:\n return response.json()\n else:\n raise Exception(\"error while communication with api\")\n except Exception as exc:\n _LOGGER.error(str(type(exc)))\n", "id": "4742421", "language": "Python", "matching_score": 2.2931315898895264, "max_stars_count": 0, "path": "custom_components/octune/api.py" }, { "content": "\"\"\"\ngo-eCharger API interface\n\nReferences:\n - https://github.com/goecharger/go-eCharger-API-v2/\n - https://github.com/goecharger/go-eCharger-API-v1/\n\"\"\"\nimport httpx\nimport logging\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ChargerApiClient:\n def __init__(self, host):\n self.host = host\n\n async def get_status(self):\n return await self.request(\"GET\", \"/status\")\n\n async def request(self, method, path):\n async with httpx.AsyncClient() as client:\n url = \"http://\" + self.host + path\n _LOGGER.debug(url)\n\n response = await client.request(method, url)\n _LOGGER.debug(f\"response: {response}\")\n\n if response.status_code == 200:\n return response.json()\n else:\n raise Exception(\"error while communication with charger api\")\n", "id": "2382747", "language": "Python", "matching_score": 1.1751869916915894, "max_stars_count": 1, "path": "custom_components/goecharger/charger.py" } ]
3.056762
Weyaaron
[ { "content": "from typing import Any\n\n\ndef post_validate_result_in_bounds(min: Any, max: Any):\n print(\"One\")\n\n def wrapper_with_func(func):\n print(\"Two\")\n\n def raise_error(*args):\n print(\"Three\")\n result = func(*args)\n if not min < result < max:\n raise AssertionError(\"The Validator failed!\")\n return result\n\n return raise_error\n\n return wrapper_with_func\n\n\ndef decorator_with_arg(*name, **kw):\n range = kw.get(\"is_in_range\", [])\n print(f\"Has been called with{name}\")\n\n def inner_func(func):\n return func\n\n return inner_func\n", "id": "7884479", "language": "Python", "matching_score": 2.934943199157715, "max_stars_count": 0, "path": "src/post_execution/post_validators.py" }, { "content": "from typeguard import typechecked\n\nfrom src.post_execution.post_validators import post_validate_result_in_bounds\n\n\n@post_validate_result_in_bounds(min=0, max=100)\n#@typechecked\ndef add_one_up_until(input: int, max: int) -> int:\n if input < max:\n return input + 1\n return input\n\n\nif __name__ == '__main__':\n for i in range(0, 10):\n print(add_one_up_until(i, 5))\n\n\"\"\"\ndef pretty_sumab(func):\n def inner(a, b):\n print(str(a) + \" + \" + str(b) + \" is \", end=\"\")\n return func(a, b)\n\n return inner\n\n\n@pretty_sumab\ndef sumab(a, b):\n summed = a + b\n print(summed)\n\n\n#if __name__ == \"__main__\":\n# sumab(5, 3)\n\n\n\nfrom src.input_validator import this_is_a_wrapper\n\n\n@this_is_a_wrapper\ndef basic_func(age:int)->int:\n return age +10\n\n\nif __name__ == '__main__':\n basic_func(10)\n \"\"\"\n", "id": "8151469", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "testcode.py" }, { "content": "from typing import List\nimport pandas as pd\n\n\n\n\ndef read_template()->str:\n\n with open('./template.csv', 'r') as file:\n return file.read()\n\n\ndef bind_dates_with_data(date_triples, data_triples) -> dict:\n result = {}\n for tuple_date_el in date_triples:\n min_distance = 10\n data_found = None\n\n for tuple_data_el in data_triples:\n distance = int(abs(tuple_data_el[0] - tuple_date_el[0]))\n if distance < min_distance:\n min_distance = distance\n data_found = tuple_data_el[2]\n result.update({tuple_date_el[2]: data_found})\n\n return result\n\n\ndef load_frame() -> pd.DataFrame():\n\n frame = pd.read_csv(\"./template.csv\")\n\n return frame\n", "id": "7235585", "language": "Python", "matching_score": 1.9951132535934448, "max_stars_count": 0, "path": "src/utils.py" }, { "content": "from datetime import datetime\n\nfrom src.mine import filter_dates, filter_temps, filter_times\nimport pandas as pd\n\nfrom src.classes.pdfpagecontainer import PdfPageContainer\nfrom src.utils import load_frame, bind_dates_with_data\n\n\nclass Zyklus:\n def __init__(self, pdf_page: PdfPageContainer) -> None:\n\n self.dataframe = load_frame()\n self.pdf_page = pdf_page\n self.length = 0\n self.year = 0\n\n\n def extract_temps(self):\n\n date_triples = filter_dates(self.pdf_page.triples)\n temp_triples = filter_temps(self.pdf_page.triples)\n\n bound_temps = bind_dates_with_data(date_triples, temp_triples)\n\n def map_temp(key_el):\n match_str = key_el.strftime(\"%d.%m\") + \".\"\n value= bound_temps[match_str]\n if value is None:\n return 'None'\n return value\n\n def map_false(arg):\n return False\n\n self.dataframe[\"temperature.value\"] = self.dataframe[\"date\"].map(map_temp)\n self.dataframe[\"temperature.exclude\"] = self.dataframe[\"date\"].map(map_false)\n self.dataframe.drop(\n index=self.dataframe[self.dataframe[\"temperature.value\"] == \"None\"].index,\n inplace=True,\n )\n\n def extract_dates(self):\n date_triples = filter_dates(self.pdf_page.triples)\n\n year = self.pdf_page.triples[3][2].split(\".\")[4]\n for triple_el in date_triples:\n date_str = triple_el[2] + year\n date_result = datetime.strptime(date_str, \"%d.%m.%Y\")\n new_series = pd.Series({\"date\": date_result})\n self.dataframe = self.dataframe.append(new_series, ignore_index=True)\n\n\n def extract_times(self):\n time_tuples = filter_times(self.pdf_page.triples)\n date_tuples = filter_dates(self.pdf_page.triples)\n\n bound_data = bind_dates_with_data(date_tuples, time_tuples)\n\n def map_dict(key_el):\n\n match_str = key_el.strftime(\"%d.%m\") + \".\"\n try:\n return bound_data[match_str]\n except KeyError:\n return None\n\n self.dataframe[\"temperature.time\"] = self.dataframe[\"date\"].map(map_dict)\n\n def extract_bleeding_values(self) -> None:\n shapes = self.pdf_page.shapes\n triples = self.pdf_page.triples\n date_triples = filter_dates(triples)\n\n result = {}\n for shape_el in shapes:\n x_koordinate = shape_el.path[0][1]\n min_distance = 10\n date_found = None\n for tuple_el in date_triples:\n distance = int(abs(tuple_el[0] - x_koordinate))\n if distance < min_distance:\n min_distance = distance\n date_found = tuple_el\n result.update({date_found[2]: shape_el})\n\n def map_bleeding(date_arg):\n length_type = {13: 2, 7: 3, 14: 1}\n\n match_str = date_arg.strftime(\"%d.%m\") + \".\"\n try:\n shape = result[match_str]\n except KeyError:\n return None\n return length_type[len(shape.path)]\n\n self.dataframe[\"bleeding.value\"] = self.dataframe[\"date\"].map(map_bleeding)\n\n def map_false(arg):\n return False\n\n self.dataframe[\"bleeding.exclude\"] = self.dataframe[\"date\"].map(map_false)\n\n def extract_mukus_values(\n self,\n ):\n triples = self.pdf_page.triples\n date_triples = filter_dates(triples)\n\n allowed_values = [\"S\", \"S+\"]\n str_values_present = [el for el in triples if el[2] in allowed_values]\n\n bound_values = bind_dates_with_data(date_triples, str_values_present)\n\n def map_feeling(arg):\n match_str = arg.strftime(\"%d.%m\") + \".\"\n if match_str in bound_values.keys():\n return 1\n return 0\n\n def map_texture(arg):\n match_str = arg.strftime(\"%d.%m\") + \".\"\n\n try:\n value = bound_values[match_str]\n #todo: Explore the none value\n except KeyError:\n return None\n\n if value == \"S+\":\n return 2\n if value == \"S\":\n return 1\n return 0\n\n def map_false(arg):\n return False\n\n self.dataframe[\"mucus.feeling\"] = self.dataframe[\"date\"].map(map_feeling)\n self.dataframe[\"mucus.texture\"] = self.dataframe[\"date\"].map(map_texture)\n self.dataframe[\"mucus.exclude\"] = self.dataframe[\"date\"].map(map_false)\n self.dataframe[\"mucus.value\"] = self.dataframe[\"mucus.feeling\"] + self.dataframe[\"mucus.texture\"]\n\n\n", "id": "10321506", "language": "Python", "matching_score": 2.835944652557373, "max_stars_count": 0, "path": "src/classes/zyklus.py" }, { "content": "from datetime import date\nfrom pathlib import Path\n\nfrom src.mine import load_pdf\n\nfrom src.classes.zyklus import Zyklus\nfrom src.utils import read_template\n\n\ndef main():\n\n\n result = read_template()\n list_triple_lists = load_pdf(Path(\"./data/data.pdf\"))\n\n for i in range(len(list_triple_lists)):\n new_zyklus = Zyklus(list_triple_lists[i])\n new_zyklus.extract_dates()\n new_zyklus.extract_times()\n new_zyklus.extract_bleeding_values()\n new_zyklus.extract_temps()\n new_zyklus.extract_mukus_values()\n new_zyklus.dataframe.set_index(\"date\", inplace=True)\n new_zyklus.dataframe[\"temperature.value\"].dropna(inplace=True)\n new_zyklus.dataframe[\"temperature.time\"].dropna(inplace=True)\n result +=new_zyklus.dataframe.to_csv(header=False)\n\n today = date.today()\n\n outpath = f'result_{today}.csv'\n if Path(outpath).is_file():\n print(\"The target file already exists, please move/remove it first\")\n exit(-1)\n with open(outpath,'w') as file:\n file.write(result)\n\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "5462586", "language": "Python", "matching_score": 1.9789685010910034, "max_stars_count": 0, "path": "main.py" }, { "content": "import re\nfrom typing import List\n\nimport minecart\n\nfrom pathlib import Path\n\nfrom src.classes.pdfpagecontainer import PdfPageContainer\n\n\ndef filter_dates(koordinate_list: List[tuple]) -> List[tuple]:\n\n date_reg = re.compile(\"[0-9]{2}\\.\")\n date_list = [el for el in koordinate_list if re.fullmatch(date_reg, el[2])]\n\n result = []\n\n for i in range(0, len(date_list) - 1, 2):\n final_date = date_list[i][2] + date_list[i + 1][2]\n result.append((date_list[i][0], date_list[i][1], final_date))\n\n return result\n\n\ndef filter_times(triples) -> List:\n past_index = False\n temp_list = []\n\n for tuple_el in triples:\n # rather hacky, might break\n if \",\" in tuple_el[2]:\n past_index = False\n if past_index:\n temp_list.append(tuple_el)\n if tuple_el[2] == \"UHRZEIT\":\n past_index = True\n\n result = []\n for i in range(0, len(temp_list) - 1, 2):\n final_temp = temp_list[i][2] + temp_list[i + 1][2]\n result.append((temp_list[i][0], temp_list[i][1], final_temp))\n\n return result\n\n\ndef filter_temps(triples) -> List:\n\n past_index = False\n temp_list = []\n\n for tuple_el in triples:\n if tuple_el[2] == \"PERIODE\":\n past_index = False\n if past_index:\n temp_list.append(tuple_el)\n if tuple_el[2] == \"BT\":\n past_index = True\n\n result = []\n for i in range(0, len(temp_list) - 1, 2):\n final_temp = temp_list[i][2] + temp_list[i + 1][2]\n final_temp = final_temp.replace(\",\", \".\")\n result.append((temp_list[i][0], temp_list[i][1], final_temp.strip('\"')))\n\n return result\n\n\ndef load_pdf(pdf_path: Path) -> List[PdfPageContainer]:\n\n target_color = (1, 0, 0.498039)\n list_result = []\n with open(pdf_path, \"rb\") as file:\n\n doc = minecart.Document(file)\n\n for page_el in doc.iter_pages():\n new_container = PdfPageContainer()\n for letter_el in page_el.letterings:\n bbox = letter_el.get_bbox()\n new_container.triples.append(\n (int(bbox[0]), int(bbox[1]), str(letter_el))\n )\n\n filled_shapes = [el for el in page_el.shapes if el.fill is not None]\n new_container.shapes = [\n el for el in filled_shapes if el.fill.color.as_rgb() == target_color\n ]\n\n list_result.append(new_container)\n return list_result\n", "id": "3634663", "language": "Python", "matching_score": 3.2611608505249023, "max_stars_count": 0, "path": "src/mine.py" }, { "content": "class PdfPageContainer:\n def __init__(self):\n self.triples = []\n self.shapes = []\n", "id": "12298364", "language": "Python", "matching_score": 0.4574931561946869, "max_stars_count": 0, "path": "src/classes/pdfpagecontainer.py" } ]
1.995113
tomaash
[ { "content": "\ndef appender(dict,arr,val):\n if len(arr) > 1:\n try:\n dict[arr[0]]\n except KeyError:\n dict[arr[0]]={}\n return {arr[0]: appender(dict[arr[0]],arr[1:],val)}\n else:\n dict[arr[0]]=val\n return \n\ndef nested_params(prm):\n prm2={}\n for param in prm:\n parray = param.replace(']',\"\").split('[')\n appender(prm2,parray,prm[param])\n return prm2\n\nprint nested_params(params)\n", "id": "8147986", "language": "Python", "matching_score": 1.1104350090026855, "max_stars_count": 1, "path": "tmp/test.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2008 GAEO Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"GAEO controller package\n\"\"\"\n\nimport new\nimport os\nimport re\nimport logging\n\nfrom google.appengine.ext.webapp import template\n\nimport gaeo\nimport errors\nimport helper\n\nclass BaseController(object):\n \"\"\"The BaseController is the base class of action controllers.\n Action controller handles the requests from clients.\n \"\"\"\n\n\n def __init__(self, hnd, params = {}):\n self.hnd = hnd\n self.resp = self.response = hnd.response\n self.req = self.request = hnd.request\n self.params = params\n\n rp = hnd.request.params.mixed()\n for k in rp:\n self.params[k] = rp[k]\n\n self._controller = params['controller']\n self._action = params['action']\n self.has_rendered = False\n self.__config = gaeo.Config()\n\n self.__tpldir = os.path.join(\n self.__config.template_dir,\n self._controller\n )\n self._template_values = {}\n\n # implement parameter nesting as in rails\n self.params=self.__nested_params(self.params)\n \n # detect the mobile platform\n self._is_mobile = self.__detect_mobile()\n self._is_iphone = self.__detect_iphone()\n\n # create the session\n try:\n store = self.__config.session_store\n exec('from gaeo.session.%s import %sSession' %\n (store, store.capitalize()))\n\n self.session = eval('%sSession' % store.capitalize())(\n hnd, '%s_session' % self.__config.app_name)\n except:\n raise errors.ControllerInitError('Initialize Session Error!')\n\n # add helpers\n helpers = dir(helper)\n for h in helpers:\n if not re.match('^__.*__$', h):\n self.__dict__[h] = new.instancemethod(eval('helper.%s' % h), self, BaseController)\n\n def before_action(self):\n pass\n\n def after_action(self):\n pass\n\n def render(self, *text, **opt):\n o = self.resp.out\n h = self.resp.headers\n\n if text:\n h['Content-Type'] = 'text/plain'\n for t in text:\n o.write(str(t))\n elif opt:\n if opt.get('text'):\n o.write(str(opt.get('text')))\n elif opt.get('json'):\n h['Content-Type'] = 'application/json; charset=utf-8'\n o.write(opt.get('json'))\n elif opt.get('xml'):\n h['Content-Type'] = 'text/xml; charset=utf-8'\n o.write(opt.get('xml'))\n elif opt.get('template'):\n context = {}\n if isinstance(opt.get('values'), dict):\n context.update(opt.get('values'))\n o.write(template.render(\n os.path.join(self.__tpldir,\n opt.get('template') + '.html'),\n context\n ))\n else:\n raise errors.ControllerRenderTypeError('Render type error')\n self.has_rendered = True\n\n def redirect(self, url, perm = True):\n self.has_rendered = True # dirty hack, make gaeo don't find the template\n self.hnd.redirect(url, perm)\n\n def __detect_mobile(self):\n h = self.request.headers\n\n # wap.wml\n ha = h.get('Accept')\n if ha and (ha.find('text/vnd.wap.wml') > -1 or ha.find('application/vnd.wap.xhtml+xml') > -1):\n return True\n \n wap_profile = h.get('X-Wap-Profile')\n profile = h.get(\"Profile\")\n opera_mini = h.get('X-OperaMini-Features')\n ua_pixels = h.get('UA-pixels')\n \n if wap_profile or profile or opera_mini or ua_pixels:\n return True\n \n # FIXME: add common user agents\n common_uas = ['sony', 'noki', 'java', 'midp', 'benq', 'wap-', 'wapi']\n \n ua = h.get('User-Agent')\n if ua and ua[0:4].lower() in common_uas:\n return True\n \n return False\n \n def __detect_iphone(self):\n \"\"\" for detecting iPhone/iPod \"\"\"\n ua = self.request.headers.get('User-Agent')\n if ua:\n ua = ua.lower();\n return ua.find('iphone') > -1 or ua.find('ipod') > -1\n else:\n return False\n \n\n # Helper methods for parameter nesting as in rails\n def __appender(self,dict,arr,val):\n if len(arr) > 1:\n try:\n dict[arr[0]]\n except KeyError:\n dict[arr[0]]={}\n return {arr[0]: self.__appender(dict[arr[0]],arr[1:],val)}\n else:\n dict[arr[0]]=val\n return \n\n def __nested_params(self,prm):\n prm2={}\n for param in prm:\n parray = param.replace(']',\"\").split('[').split('-')\n self.__appender(prm2,parray,prm[param])\n return prm2\n \n", "id": "838310", "language": "Python", "matching_score": 4.338656425476074, "max_stars_count": 1, "path": "gaeo_old/controller/__init__.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2008 <NAME> & <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\" The gaeo controller errors \"\"\"\n\nclass ControllerError(Exception):\n \"\"\" Base error class of controllers' errors \"\"\"\n\nclass ControllerInitError(ControllerError):\n pass\n\nclass ControllerRenderError(ControllerError):\n \"\"\" error occured while render \"\"\"\n\nclass ControllerRenderTypeError(ControllerRenderError):\n \"\"\" Render an invalid type \"\"\"\n", "id": "10785981", "language": "Python", "matching_score": 0.6124660968780518, "max_stars_count": 1, "path": "gaeo_old/controller/errors.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2008 <NAME> & <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport re\nfrom copy import copy\nimport logging\n\nclass RuleError(Exception):\n \"\"\"Base Error\"\"\"\n\nclass RuleNoControllerError(RuleError):\n \"\"\"No controller\"\"\"\n\nclass Rule(object):\n \"\"\" Handles each routing rule. \"\"\"\n def __init__(self, pattern, **param):\n super(Rule, self).__init__()\n\n self.pattern = pattern[:-1] if pattern.endswith('/') else pattern\n self.regex = self.pattern\n self.param = param\n self.matches = re.findall(':([^/]+)', self.pattern)\n\n for i in range(len(self.matches)):\n self.regex = self.regex.replace(':' + self.matches[i], '([^/]+)')\n self.param[self.matches[i]] = i\n self.validate()\n\n def __eq__(self, other):\n return self.regex == other.regex\n\n def __getattr__(self, attr):\n try:\n return getattr(self, 'param')[attr]\n except KeyError:\n raise AttributeError, attr\n\n def __str__(self):\n from operator import itemgetter\n return ', '.join(['%s: %s' % (k, v) for k, v in \\\n sorted(self.param.items(), key = itemgetter(1))])\n\n def match_url(self, url):\n if url.endswith('/'):\n url = url[:-1]\n try:\n mat = re.findall(self.regex, url)[0]\n except IndexError:\n return None\n\n if isinstance(mat, basestring):\n if self.matches:\n self.param[self.matches[0]] = mat\n elif isinstance(mat, tuple):\n for i in range(len(mat)):\n self.param[self.matches[i]] = mat[i]\n\n return self.param\n\n def url_for(self, controller, **param):\n param['controller'] = controller\n url = self.pattern\n for match in self.matches:\n if match not in param:\n return None\n url = url.replace(':' + match, str(param[match]))\n del param[match]\n\n # extra parameters\n ep = '&'.join(['%s=%s' % (k, v) for k, v in param.items() if k not in self.param])\n\n return url + '?' + ep if ep else url\n\n def validate(self):\n if 'controller' not in self.param:\n raise RuleNoControllerError\n\n if 'action' not in self.param:\n self.param['action'] = 'index'\n\n if not self.regex.startswith('^'):\n self.regex = '^' + self.regex\n if not self.regex.endswith('$'):\n self.regex = self.regex + '$'\n\n\nclass Router:\n \"\"\" Handles the url routing... \"\"\"\n\n class __impl:\n def __init__(self):\n self.__routing_root = {\n 'controller': 'welcome',\n 'action': 'index',\n }\n self.__routing_table = []\n # used to store default pattern (but match last)\n self.__routing_table_fallback = [\n Rule('/:controller/:action'),\n Rule('/:controller')\n ]\n\n def connect(self, pattern, **tbl):\n \"\"\" Add routing pattern \"\"\"\n\n rule = Rule(pattern, **tbl)\n if rule not in self.__routing_table:\n self.__routing_table.append(rule)\n\n def disconnect(self, pattern):\n rule = Rule(pattern)\n if rule in self.__routing_table:\n self.__routing_table.remove(rule)\n\n def root(self, **map):\n \"\"\" Set the root (/) routing... \"\"\"\n self.__routing_root['controller'] = \\\n map.get('controller', self.__routing_root['controller'])\n self.__routing_root['action'] = \\\n map.get('action', self.__routing_root['action'])\n\n def resolve(self, url):\n \"\"\" Resolve the url to the correct mapping \"\"\"\n\n if url == '/':\n return self.__routing_root\n\n ret = self.__resolve_by_table(url, self.__routing_table)\n if ret is None: # fallback\n ret = self.__resolve_by_table(url, self.__routing_table_fallback)\n return ret\n\n def __resolve_by_table(self, url, rules):\n \"\"\" Resolve url by the given table \"\"\"\n for r in rules:\n ret = r.match_url(url)\n if ret:\n return ret\n return None\n\n def url_for(self, controller, **param):\n for r in self.__routing_table:\n ret = r.url_for(controller, **param)\n if ret:\n return ret\n return None\n\n __instance = None\n\n def __init__(self):\n if Router.__instance is None:\n Router.__instance = Router.__impl()\n self.__dict__['_Router__instance'] = Router.__instance\n\n def __getattr__(self, attr):\n return getattr(self.__instance, attr)\n\n def __setattr__(self, attr, value):\n return setattr(self.__instance, attr, value)\n\n", "id": "6510313", "language": "Python", "matching_score": 2.6541736125946045, "max_stars_count": 1, "path": "gaeo/dispatch/router.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2008 <NAME> & <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\" The gaeo library package. \"\"\"\n\nimport re\nimport logging\n\nfrom google.appengine.ext import webapp\n\nfrom gaeo.dispatch import dispatcher\n\n\nclass Config:\n \"\"\" The singleton of GAEO's configuration \"\"\"\n\n class __impl:\n def __init__(self):\n self.template_dir = ''\n self.session_store = 'memcache'\n self.app_name = ''\n\n __instance = None\n\n def __init__(self):\n if Config.__instance is None:\n Config.__instance = Config.__impl()\n\n self.__dict__['_Config__instance'] = Config.__instance\n\n def __getattr__(self, attr):\n return getattr(self.__instance, attr)\n\n def __setattr__(self, attr, value):\n return setattr(self.__instance, attr, value)\n\n\nclass MainHandler(webapp.RequestHandler):\n \"\"\"Handles all requests\n \"\"\"\n def get(self):\n self.__process_request()\n\n def post(self):\n self.__process_request()\n\n def __process_request(self):\n \"\"\"dispatch the request\"\"\"\n dispatcher.dispatch(self)\n\n", "id": "11567875", "language": "Python", "matching_score": 1.749193787574768, "max_stars_count": 1, "path": "gaeo_old/__init__.py" }, { "content": "class Proxy(object):\n pass\n # def __setattr__(self, name, value):\n # self.__dict__[name] = value\n \n \np=Proxy.__dict__\n# print p\n\no=Proxy()\no.__setattr__('kill',3)\n\n\nprint o.kill", "id": "11212509", "language": "Python", "matching_score": 0.4092594087123871, "max_stars_count": 1, "path": "tmp/test2.py" }, { "content": "from google.appengine.ext import db\nfrom gaeo.model import BaseModel\nfrom google.appengine.ext.db.djangoforms import *\n\nclass Proxy(object):\n def __init__(self,user_data):\n for key in user_data:\n self.__setattr__(key,user_data[key])\n\nclass User(BaseModel): \n first_name = db.StringProperty()\n last_name = db.StringProperty()\n password = db.StringProperty()\n email = db.EmailProperty()\n\n def name(self):\n return self.first_name+\" \"+self.last_name\n\n def __unicode__(self):\n return self.first_name+\" \"+self.last_name\n\n\nclass Group(BaseModel):\n name = db.StringProperty()\n description = db.TextProperty()\n\n @property\n def members(self):\n return Contact.gql(\"WHERE groups = :1\", self.key())\n\nROLES = (\n # value, label\n ('a',\"role a\"),\n ('b',\"role b\"),\n ('c',\"role c\")\n)\n\nlistgroups = map(lambda x: (x.key(),x.name),Group().all().fetch(100))\n#print listgroups\n\nclass Contact(BaseModel):\n # User that owns this entry.\n # owner = db.UserProperty()\n owner = db.ReferenceProperty(User, required=False, collection_name='companies')\n\n # Basic info.\n name = db.StringProperty()\n birth_day = db.DateProperty()\n\n # Address info.\n address = db.PostalAddressProperty()\n roles = db.StringListProperty()\n\n # The original organization properties have been replaced by\n # an implicitly created property called 'companies'. \n\n # Group affiliation\n groups = db.ListProperty(db.Key)\n\nclass ContactForm(ModelForm):\n roles = forms.CharField(widget=forms.CheckboxSelectMultiple(choices=ROLES))\n groups = forms.CharField(widget=forms.CheckboxSelectMultiple(choices=listgroups))\n class Meta:\n model = Contact\n\nclass GroupForm(ModelForm):\n class Meta:\n model = Group\n\nclass Company(BaseModel):\n name = db.StringProperty()\n description = db.StringProperty()\n company_address = db.PostalAddressProperty()\n\nclass ContactCompany(BaseModel):\n contact = db.ReferenceProperty(Contact,\n required=True,\n collection_name='companies')\n company = db.ReferenceProperty(Company,\n required=True,\n collection_name='contacts')\n title = db.StringProperty()", "id": "6746305", "language": "Python", "matching_score": 2.56160306930542, "max_stars_count": 1, "path": "application/model/core.py" }, { "content": "from gaeo.controller import BaseController\nfrom model.core import *\nimport time\nimport datetime\nimport logging\n\nclass ContactController(BaseController):\n def index(self):\n self.contacts = Contact.all().fetch(100)\n self.message = 'Welcome!'\n listgroups = map(lambda x: (x.key(),x.name),Group().all().fetch(100))\n # print listgroups\n # print Group.all().get().key()\n \n\n def bootstrap(self):\n usr=User.all().get()\n cnt=Contact().all().get()\n# Contact().update_attributes({\"owner\": usr, \"name\": \"test\", \"birth_day\": datetime.date.today(), \"address\": \"ulice 1\"})\n Group().update_attributes({\"name\": \"newgroup\", \"description\": \"tasdfasdgest\"}) \n # Contact().update_attributes(owner=usr, name=\"test\", birth_day=\"123\", address=\"ulice 1\") \n\n # print groups\n #self.redirect('/')\n \n def new(self):\n self.form = ContactForm({\"contact-birth_day\": datetime.date.today()}, prefix=\"contact\")\n\n def edit(self):\n self.contact=Contact.get(self.params['id'])\n self.form = ContactForm(instance=self.contact, prefix=\"contact\")\n \n def delete(self):\n Contact.get(self.params['id']).delete()\n self.redirect('/contact')\n\n def __sanitize_params(self):\n mykey=User.get(self.params['contact']['owner']).key()\n self.params['contact']['owner']=mykey\n \n # logging.info(type(self.params['contact']['birth_day']))\n # logging.info(type(\"\"))\n if type(self.params['contact']['birth_day'])==type(u''):\n date_array=self.params['contact']['birth_day'].split('-')\n self.params['contact']['birth_day']=datetime.date(int(date_array[0]),int(date_array[1]),int(date_array[2]))\n \n if self.params['contact'].has_key('roles'):\n roles = self.params['contact']['roles']\n else:\n roles = []\n \n if self.params['contact'].has_key('groups'):\n groups = self.params['contact']['groups']\n else: \n groups = []\n \n if type(roles)!=type([]):\n roles=[roles] \n if type(groups)!=type([]):\n groups=[groups] \n \n new_group_array = map(lambda x: Group.get(x).key(), groups)\n self.params['contact']['groups']=new_group_array\n self.params['contact']['roles']=roles\n\n \n def __prefixize_params(self):\n for key, value in self.params['contact'].items():\n self.params['contact'][\"contact-\"+key]=value\n \n def create(self):\n try:\n self.__sanitize_params()\n Contact().update_attributes(self.params['contact'])\n self.redirect('/contact')\n except Exception, error_text: \n self.__sanitize_params()\n self.__prefixize_params()\n self.render(template = 'new', values = { \n 'form': ContactForm(self.params['contact'],prefix=\"contact\"),\n 'contact': Proxy(self.params['contact']),\n 'error': error_text,\n 'message': self.params\n })\n \n def update(self):\n try:\n self.__sanitize_params()\n Contact.get(self.params['id']).update_attributes(self.params['contact'])\n self.redirect('/contact')\n except Exception, error_text:\n self.__sanitize_params()\n self.__prefixize_params()\n# self.params['contact']['key']=self.params['id']\n self.render(template = 'edit', values = { \n 'form': ContactForm(self.params['contact'],prefix=\"contact\"),\n 'contact': Contact.get(self.params['id']),\n 'error': error_text,\n 'message': self.params })\n\n", "id": "2774232", "language": "Python", "matching_score": 3.7605245113372803, "max_stars_count": 1, "path": "application/controller/contact.py" }, { "content": "from gaeo.controller import BaseController\nfrom model.core import *\n\nclass GroupController(BaseController):\n def index(self):\n self.groups = Group.all().fetch(100)\n self.message = 'Welcome!'\n\n def new(self):\n self.form = GroupForm(prefix=\"group\")\n\n def edit(self):\n self.group=Group.get(self.params['id'])\n self.form = GroupForm(instance=self.group, prefix=\"group\")\n \n def delete(self):\n Group.get(self.params['id']).delete()\n self.redirect('/group')\n \n def create(self):\n try:\n Group().update_attributes(self.params['group'])\n self.redirect('/group') \n except Exception, error_text: \n proxy=Proxy(self.params['group'])\n self.render(template = 'new', values = { \n 'group': proxy,\n 'message': error_text })\n\n def update(self):\n try:\n Group.get(self.params['id']).update_attributes(self.params['group'])\n self.redirect('/group')\n except Exception, error_text:\n self.params['group']['key']=self.params['id']\n proxy=Proxy(self.params['group'])\n self.render(template = 'edit', values = { \n 'group': proxy,\n 'message': error_text })\n", "id": "273190", "language": "Python", "matching_score": 3.863715171813965, "max_stars_count": 1, "path": "application/controller/group.py" }, { "content": "from gaeo.controller import BaseController\nfrom model.core import *\n\nclass UserController(BaseController):\n def index(self):\n self.users = User.all().fetch(100)\n self.message = 'Welcome!'\n\n def new(self):\n pass \n\n def edit(self):\n self.user=User.get(self.params['id'])\n \n def delete(self):\n User.get(self.params['id']).delete()\n self.redirect('/user')\n \n def create(self):\n try:\n User().update_attributes(self.params['user'])\n self.redirect('/user') \n except Exception, error_text: \n proxy_user=Proxy(self.params['user'])\n self.render(template = 'new', values = { \n 'user': proxy_user,\n 'message': error_text })\n\n def update(self):\n try:\n User.get(self.params['id']).update_attributes(self.params['user'])\n self.redirect('/user')\n except Exception, error_text:\n self.params['user']['key']=self.params['id']\n proxy_user=Proxy(self.params['user'])\n self.render(template = 'edit', values = { \n 'user': proxy_user,\n 'message': error_text })\n \n", "id": "2361485", "language": "Python", "matching_score": 2.2656190395355225, "max_stars_count": 1, "path": "application/controller/user.py" }, { "content": "from gaeo.controller import BaseController\n\nclass WelcomeController(BaseController):\n def index(self):\n self.redirect('/user')\n", "id": "753572", "language": "Python", "matching_score": 0.02185100130736828, "max_stars_count": 1, "path": "application/controller/welcome.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2008 GAEO Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"GAEO model package\n\"\"\"\nimport re\nfrom google.appengine.ext import db\n\n#connection to lib.rest\nimport lib.rest.appengine\n\ndef pluralize(noun):\n if re.search('[sxz]$', noun):\n return re.sub('$', 'es', noun)\n elif re.search('[^aeioudgkprt]h$', noun):\n return re.sub('$', 'es', noun)\n elif re.search('[^aeiou]y$', noun):\n return re.sub('y$', 'ies', noun)\n else:\n return noun + 's'\n\n#connection to lib.rest\nclass BaseModel(lib.rest.appengine.ResourceModel):\n \"\"\"BaseModel is the base class of data model.\"\"\"\n\n @classmethod\n def belongs_to(cls, ref_cls):\n \"\"\" Declare a many-to-one relationship \"\"\"\n if ref_cls is None:\n raise Exception('No referenced class')\n \n ref_name = ref_cls.__name__.lower()\n if ref_name not in cls._properties:\n attr = db.ReferenceProperty(ref_cls, collection_name=pluralize(cls.__name__.lower()))\n cls._properties[ref_name] = attr\n attr.__property_config__(cls, ref_name)\n\n @classmethod\n def has_and_belongs_to_many(cls, ref_cls):\n if ref_cls is None:\n raise Exception('No referenced class')\n \n f_name = pluralize(cls.__name__.lower())\n t_name = pluralize(ref_cls.__name__.lower())\n \n if t_name not in cls._properties:\n attr = db.ListProperty(db.Key)\n cls._properties[t_name] = attr\n attr.__property_config__(cls, t_name)\n if f_name not in ref_cls._properties:\n attr = property(lambda self: cls.gql('WHERE %s = :1' % t_name, self.key()))\n ref_cls._properties[f_name] = attr\n attr.__property_config__(ref_cls, f_name)\n \n @classmethod\n def named_scope(cls, name, order_by=None, **conds):\n if name not in cls._properties:\n cond_str = \"WHERE \"\n for cond in conds.iterkeys():\n if len(cond_str) > 6:\n cond_str += ' AND '\n cond_str += '%s %s' % (cond, conds[cond])\n \n if order_by:\n cond_str += ' ORDER BY %s' % order_by\n \n attr = property(lambda self: cls.gql(cond_str))\n cls._properties[name] = attr\n attr.__property_config__(cls, name)\n\n @classmethod\n def find(cls, attribute, value):\n return cls.gql(\"WHERE %s = :1\" % attribute, value )\n \n def update_attributes(self, kwd_dict = {}, **kwds):\n \"\"\"Update the specified properties\"\"\"\n need_change = False\n \n # if user passed a dict, merge to kwds (Issue #3)\n if kwd_dict:\n kwd_dict.update(kwds)\n kwds = kwd_dict\n \n props = self.properties()\n for prop in props.values():\n if prop.name in kwds:\n if not need_change:\n need_change = True\n prop.__set__(self, kwds[prop.name])\n \n if need_change:\n self.update()\n\n def set_attributes(self, kwd_dict = {}, **kwds):\n \"\"\"set the specified properties, but not update\"\"\"\n \n # Issue #3\n if kwd_dict:\n kwd_dict.update(kwds)\n kwds = kwd_dict\n \n props = self.properties()\n for prop in props.values():\n if prop.name in kwds:\n prop.__set__(self, kwds[prop.name])\n \n def save(self):\n self.put()\n \n def update(self):\n self.put()\n \n", "id": "8734648", "language": "Python", "matching_score": 5.401370048522949, "max_stars_count": 1, "path": "gaeo/model/__init__.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2008 GAEO Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"GAEO model package\n\"\"\"\nimport re\n\nfrom google.appengine.ext import db\n\ndef pluralize(noun):\n if re.search('[sxz]$', noun):\n return re.sub('$', 'es', noun)\n elif re.search('[^aeioudgkprt]h$', noun):\n return re.sub('$', 'es', noun)\n elif re.search('[^aeiou]y$', noun):\n return re.sub('y$', 'ies', noun)\n else:\n return noun + 's'\n\nclass BaseModel(db.Model):\n \"\"\"BaseModel is the base class of data model.\"\"\"\n\n @classmethod\n def has_and_belongs_to_many(cls, ref_cls):\n if ref_cls is None:\n raise Exception('No referenced class')\n \n f_name = pluralize(cls.__name__.lower())\n t_name = pluralize(ref_cls.__name__.lower())\n \n if t_name not in cls.__dict__:\n cls.__dict__[t_name] = db.ListProperty(db.Key)\n if f_name not in ref_cls.__dict__:\n ref_cls.__dict__[f_name] = property(lambda self: cls.gql('WHERE %s = :1' % t_name, self.key()))\n \n @classmethod\n def named_scope(cls, name, order_by=None, **conds):\n if name not in cls.__dict__:\n cond_str = \"WHERE \"\n for cond in conds.iterkeys():\n if len(cond_str) > 6:\n cond_str += ' AND '\n cond_str += '%s %s' % (cond, conds[cond])\n \n if order_by:\n cond_str += ' ORDER BY %s' % order_by\n cls.__dict__[name] = property(lambda self: cls.gql(cond_str))\n \n def update_attributes(self, kwd_dict = {}, **kwds):\n \"\"\"Update the specified properties\"\"\"\n need_change = False\n \n # if user passed a dict, merge to kwds (Issue #3)\n if kwd_dict:\n kwd_dict.update(kwds)\n kwds = kwd_dict\n \n props = self.properties()\n for prop in props.values():\n if prop.name in kwds:\n if not need_change:\n need_change = True\n prop.__set__(self, kwds[prop.name])\n \n if need_change:\n self.update()\n\n def set_attributes(self, kwd_dict = {}, **kwds):\n \"\"\"set the specified properties, but not update\"\"\"\n \n # Issue #3\n if kwd_dict:\n kwd_dict.update(kwds)\n kwds = kwd_dict\n \n props = self.properties()\n for prop in props.values():\n if prop.name in kwds:\n prop.__set__(self, kwds[prop.name])\n \n def save(self):\n self.put()\n \n def update(self):\n self.put()\n \n", "id": "9980673", "language": "Python", "matching_score": 1.2883191108703613, "max_stars_count": 1, "path": "gaeo/model/__init__.py" }, { "content": "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Release information\"\"\"\n\n\nversion = \"0.1\"\nauthor = \"ericsk\"\nemail = \"<EMAIL>\"\ncopyright = \"Copyright 2008~ ericsk and contributors\"\nlicense = \"Apache License 2.0 <http://www.apache.org/licenses/LICENSE-2.0>\"\nurl = \"http://code.google.com/p/google-app-engine-oil/\"\ndownload_url=\"http://code.google.com/p/google-app-engine-oil/\"\ndescription=\"Web Framework for Google Appengine\"\nlong_description = \"\"\"\n.. contents::\n :depth: 2\n\nAbout\n======\n\n GAEO (Goole App Engine Oil) is a simple framework that normalize the code layout\n and coding style, so we developed GAEO.\n\nLicense\n=======\n\n GAEO is under Apache License 2.0. \n http://www.apache.org/licenses/LICENSE-2.0\n\n\nInstall & Usage\n===============\n\n 1. Put the unzipped gaeo package in your favorite directory ($GAEO_HOME), \n and add $GAEO_HOME/bin to your $PATH environment variable.\n\n 2. Create new project by using (you may have to make gaeo.py excutable first)\n\n gaeo.py <project_name>\n\n e.g.,\n\n gaeo.py foo\n\n it will create a foo/ directory and a project with the same name.\n\n 3. For more tutorial, please view our QuickStart guide:\n http://sites.google.com/a/gaeo.org/tutorial/\n\nMore Information\n================\n\n Project home\n http://code.google.com/p/google-app-engine-oil/\n\n Discussion Group\n http://groups.google.com/group/google-app-engine-oil\n\n\"\"\"\n", "id": "4166150", "language": "Python", "matching_score": 2.737950325012207, "max_stars_count": 1, "path": "bin/release.py" }, { "content": "#/usr/bin/env python\ntry:\n from setuptools import setup, find_packages\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n from setuptools import setup, find_packages\n\nfrom pkg_resources import DistributionNotFound\n\nimport sys\nimport os\nimport glob\n\nexecfile(os.path.join('bin', 'release.py'))\n\n# setup params\n# it's possible to remove chardet dependency while porting\nrequired_modules = [\"\"]\nextra_modules = {}\n\nsetup(\n name=\"gaeo\",\n version=version,\n author=author,\n author_email=email,\n download_url=download_url,\n license=license,\n keywords = \"appengine, webframework\",\n description=description,\n long_description=long_description,\n url=url,\n zip_safe=False,\n install_requires = required_modules,\n extras_require = extra_modules,\n include_package_data = True,\n packages=find_packages(exclude=[\"ez_setup\"]),\n entry_points = \"\"\"\n [console_scripts]\n gaeogen = bin.gaeogen:commandline\n \"\"\",\n classifiers = [\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n ],\n )\n\n", "id": "8964306", "language": "Python", "matching_score": 0.4835399389266968, "max_stars_count": 1, "path": "setup.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2008 <NAME> & <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport re\nimport logging\n\nimport router\nimport sys \nimport os\nfrom traceback import *\n\nHTTP_ERRORS = {\n '400': 'Bad Request',\n '402': 'Payment Required',\n '403': 'Forbidden',\n '404': 'Not Found',\n '500': 'Internal Server Error'\n}\n\nTXMT_LINKS = False # set true to show textmate links on tracebacks\nDEBUG = True # set true to show traceback on error pages\n\ndef dispatch(hnd):\n \n # generate nice traceback with optional textmate links\n def nice_traceback(traceback):\n tb=\"\"\n for line in traceback.splitlines(1):\n filename = re.findall('File \"(.+)\",', line)\n linenumber = re.findall(', line\\s(\\d+),', line)\n modulename = re.findall(', in ([A-Za-z]+)', line)\n if filename and linenumber and not re.match(\"<(.+)>\",filename[0]):\n fn=filename[0]\n mn=\"in %s\" % modulename[0] if modulename else \"\"\n fnshort=os.path.basename(fn)\n ln=linenumber[0]\n if TXMT_LINKS:\n html=\"<a href='txmt://open/?url=file://%s&line=%s'>%s:%s %s</a> %s\" % (fn,ln,fnshort,ln,mn,line)\n else:\n html=\"<b>%s:%s %s</b> %s\" % (fnshort,ln,mn,line)\n tb+=html\n else:\n tb+=line\n return tb\n \n # show error and write to log\n def show_error(code, log_msg = ''):\n hnd.error(code)\n if sys.exc_info()[0]:\n exception_name = sys.exc_info()[0].__name__\n exception_details = str(sys.exc_info()[1])\n exception_traceback = ''.join(format_exception(*sys.exc_info()))\n special_info = str(exception_details) != str(log_msg)\n logging.error(exception_name)\n logging.error(exception_details)\n logging.error(log_msg)\n logging.error(exception_traceback)\n hnd.response.out.write('<h1>%s</h1>' % HTTP_ERRORS[str(code)])\n if DEBUG:\n tb=nice_traceback(exception_traceback)\n if special_info: logging.error(log_msg)\n hnd.response.out.write('<h3>%s: %s</h3>' % (exception_name, exception_details))\n if special_info: hnd.response.out.write('<pre> %s </pre>' % log_msg)\n hnd.response.out.write('<h1> Traceback </h1>')\n hnd.response.out.write('<pre> %s </pre>' % tb)\n else:\n hnd.response.out.write('<h1> %s </h1>' % log_msg)\n\n # resolve the URL\n url = hnd.request.path\n r = router.Router()\n route = r.resolve(url)\n\n if route is None:\n try:\n raise Exception('invalid URL')\n except Exception, e:\n show_error(500, e)\n else:\n # create the appropriate controller\n try:\n exec('from controller import %s' % route['controller']) in globals()\n ctrl = eval('%s.%sController' % (\n route['controller'],\n route['controller'].capitalize()\n ))(hnd, route)\n\n # dispatch\n logging.info('URL \"%s\" is dispatched to: %sController#%s',\n url,\n route['controller'].capitalize(),\n route['action'])\n except ImportError, e:\n show_error(404, \"Controller doesn't exist\")\n except AttributeError, e: # the controller has not been defined.\n show_error(404, \"Controller doesn't exist\")\n except Exception, e:\n show_error(500, e)\n \n else:\n try:\n action = getattr(ctrl, route['action'], None)\n if action is not None:\n ctrl.implicit_action()\n ctrl.before_action()\n action()\n ctrl.after_action()\n\n if not ctrl.has_rendered:\n ctrl.render(template=route['action'], values=ctrl.__dict__)\n else: # invalid action\n logging.error('Invalid action `%s` in `%s`' % (route['action'], route['controller']))\n try:\n raise Exception('invalid action')\n except Exception, e:\n show_error(500, e)\n except Exception, e:\n show_error(500, e)\n", "id": "7264860", "language": "Python", "matching_score": 4.6283087730407715, "max_stars_count": 1, "path": "gaeo/dispatch/dispatcher.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2008 <NAME> & <NAME>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport re\nimport logging\n\nimport router\n\ndef dispatch(hnd):\n # resolve the URL\n url = hnd.request.path\n r = router.Router()\n route = r.resolve(url)\n if route is None:\n raise Exception('invalid URL')\n else:\n # create the appropriate controller\n try:\n exec('from controller import %s' % route['controller'])\n ctrl = eval('%s.%sController' % (\n route['controller'],\n route['controller'].capitalize()\n ))(hnd, route)\n\n # dispatch\n logging.info('URL \"%s\" is dispatched to: %sController#%s',\n url,\n route['controller'].capitalize(),\n route['action'])\n except ImportError, e:\n hnd.error(404)\n # FIXME: What msg is suitable for response ?\n logging.error(e)\n hnd.response.out.write('<h1>404 Not Found</h1>')\n except AttributeError, e: # the controller has not been defined.\n hnd.error(404)\n logging.error(e)\n hnd.response.out.write('<h1>404 Not Found</h1>')\n else:\n ctrl.before_action()\n getattr(ctrl, route['action'])()\n ctrl.after_action()\n\n if not ctrl.has_rendered:\n ctrl.render(template=route['action'], values=ctrl.__dict__)\n", "id": "8151463", "language": "Python", "matching_score": 0.8424277901649475, "max_stars_count": 1, "path": "gaeo_old/dispatch/dispatcher.py" }, { "content": "#!/usr/bin/env python\nfrom __future__ import with_statement\n\nimport os\nimport sys\nimport string\n\nGAEOGEN_VERSION = 0.1\n\ndef usage():\n return \"\"\"Usage: gaeogen.py <generation type> [args]\nGAEOGen command line tool, version %s.\n\nAvailable generation types:\n\n * controller - generates a controller class and actions (w/ templates)\n\n usage: gaeogen.py controller <controller_name> [<action1>, <action2>, ..., <actionN>]\n\n e.g.,\n gaeogen.py controller Say\n gaeogen.py controller Product new create edit delete\n\n\n * model - generates a data model class\n\n usage: gaeogen.py model <model_name> [<property_name>:<property_type>, ...]\n\n e.g.,\n gaeogen.py model User\n\n * scaffold -\n\n usage: gaeogen.py scaffold <controller_name> [action, ...] [<property_name>:<property_type>, ...]\n\n*NOTE* that you should use this tool under your project's directory root.\"\"\" % (GAEOGEN_VERSION)\n\ndef create_file(file_name, content):\n if not os.path.exists(os.path.dirname(file_name)):\n os.makedirs(os.path.dirname(file_name), 0755)\n with open(file_name, 'w') as f:\n f.write('\\n'.join(content))\n\nclass GenBase(object):\n def __init__(self, name):\n super(GenBase, self).__init__()\n self.name = name\n self.content = []\n\n def generate_content(self):\n raise NotImplementedError\n\n def save(self, file_name):\n self.generate_content()\n create_file(file_name, self.content)\n\nclass GenController(GenBase):\n def __init__(self, name):\n super(GenController, self).__init__(name)\n self.actions = {}\n\n def add_action(self, name, content=['pass']):\n self.actions[name] = content\n\n def generate_content(self):\n self.content = [\n 'from gaeo.controller import BaseController',\n '',\n 'class %sController(BaseController):' % self.name.capitalize(),\n ]\n\n if not self.actions:\n self.content.append(' pass')\n\n for act in sorted(self.actions.keys()):\n self.content.append('%sdef %s(self):' % (' ' * 4, act))\n self.content += map(lambda f: ' ' * 8 + f, self.actions[act])\n self.content.append('')\n\nclass GenModel(GenBase):\n def __init__(self, name):\n super(GenModel, self).__init__(name)\n self.props = {}\n\n def add_property(self, arg):\n name, sep, prop = arg.partition(':')\n if name and prop:\n self.props[name] = prop\n\n def generate_content(self):\n self.content = [\n 'from google.appengine.ext import db',\n 'from gaeo.model import BaseModel',\n '',\n 'class %s(BaseModel):' % self.name.capitalize(),\n ]\n\n if not self.props:\n self.content.append(' pass')\n\n for name in sorted(self.props.keys()):\n self.content.append(' ' * 4 + '%s = %s' % (name, self.props[name]))\n self.content.append('')\n\nclass GenScaffold(object):\n def __init__(self, name, properties):\n self.name = name\n self.properties = properties\n\n def create_action(self, action):\n ''' Create action content for controllers '''\n # TODO: add correct content.\n if action == 'new':\n return ['pass']\n elif action == 'create':\n return ['pass']\n else: # default\n return ['pass']\n\n def create_page(self, action):\n ''' Create HTML page for template '''\n # TODO: add correct content.\n return [\n '<h1>%sController#%s</h1>' % (self.name.capitalize(), action)\n ]\n\ndef gen_controller(argv, template_helper=None):\n cur_dir = os.getcwd()\n\n controller_name = argv[0].lower()\n ctrl = GenController(controller_name)\n\n application_dir = os.path.join(cur_dir, 'application')\n controller_dir = os.path.join(application_dir, 'controller')\n template_dir = os.path.join(application_dir, 'templates', controller_name)\n\n if not os.path.exists(template_dir):\n print 'Creating %s ...' % (template_dir)\n os.makedirs(template_dir, 0755)\n\n for arg in argv[1:]:\n print 'Creating %s/%s.html ...' % (template_dir, arg)\n\n if template_helper:\n ctrl.add_action(arg, template_helper.create_action(arg))\n create_file(os.path.join(template_dir, '%s.html' % arg),\n template_helper.create_page(arg))\n else:\n ctrl.add_action(arg)\n create_file(os.path.join(template_dir, '%s.html' % arg), [\n '<h1>%sController#%s</h1>' % (controller_name.capitalize(), arg)\n ])\n \n print 'Creating %s/%s.py ...' % (controller_dir, controller_name)\n ctrl.save(os.path.join(controller_dir, '%s.py' % controller_name))\n return ctrl\n\ndef gen_model(argv):\n cur_dir = os.getcwd()\n\n model_name = argv[0].lower()\n application_dir = os.path.join(cur_dir, 'application')\n model_dir = os.path.join(application_dir, 'model')\n\n # check if the model directory had been created\n if not os.path.exists(os.path.join(model_dir, '__init__.py')):\n create_file(os.path.join(model_dir, '__init__.py'), [])\n\n mdl = GenModel(model_name)\n for arg in argv[1:]:\n mdl.add_property(arg) \n\n print 'Creating Model %s ...' % model_name\n mdl.save(os.path.join(model_dir, '%s.py' % model_name))\n return mdl\n\ndef gen_scaffold(argv):\n name = argv[0].lower()\n\n model_argv = [name]\n ctrlr_argv = [name]\n\n for arg in argv[1:]:\n if ':' in arg:\n model_argv.append(arg)\n else:\n ctrlr_argv.append(arg)\n\n gen_model(model_argv)\n scaffold = GenScaffold(name, model_argv[1:])\n gen_controller(ctrlr_argv, scaffold)\n return scaffold\n\ndef main(argv):\n gen_type = argv[1].lower()\n try:\n func = eval('gen_%s' % (gen_type))\n except NameError:\n print usage()\n return False\n\n if argv[2] is None:\n print \"Usage: %s %s <%s name>\" % (argv[0], gen_type, gen_type)\n return False\n\n try:\n return func(argv[2:])\n except:\n import traceback\n traceback.print_exc()\n return False\n return True\n\ndef commandline():\n if len(sys.argv) < 3 or '--help' in sys.argv or 'help' in sys.argv or not main(sys.argv):\n print usage()\n sys.exit(1)\n\n\nif __name__ == '__main__':\n commandline()", "id": "4718956", "language": "Python", "matching_score": 2.8675613403320312, "max_stars_count": 1, "path": "bin/gaeogen.py" }, { "content": "#!/usr/bin/env python\nfrom __future__ import with_statement\n\nimport os\nimport sys\nfrom getopt import getopt\nfrom shutil import copytree\n\ndef usage(app_name):\n return 'Usage: %s <project name>' % (app_name)\n\ndef create_file(file_name, content):\n if not os.path.exists(os.path.dirname(file_name)):\n os.makedirs(os.path.dirname(file_name), 0755)\n with open(file_name, 'w') as f:\n f.write('\\n'.join(content))\n\ndef create_app_yaml(app_yaml_file, project_name):\n create_file(app_yaml_file, [\n 'application: %s' % (project_name),\n 'version: 1',\n 'api_version: 1',\n 'runtime: python',\n '',\n 'handlers:',\n '- url: /css',\n ' static_dir: assets/css',\n '- url: /js',\n ' static_dir: assets/js',\n '- url: /img',\n ' static_dir: assets/img',\n '- url: /favicon.ico',\n ' static_files: favicon.ico',\n ' upload: favicon.ico',\n '- url: .*',\n ' script: main.py',\n '',\n ])\n\ndef create_main_py(main_py_file):\n create_file(main_py_file, [\n \"import os\",\n \"import sys\",\n \"import wsgiref.handlers\",\n \"\",\n \"from google.appengine.ext import webapp\",\n \"\",\n \"import gaeo\",\n \"from gaeo.dispatch import router\",\n \"\",\n \"def initRoutes():\",\n \" r = router.Router()\",\n \" \",\n \" #TODO: add routes here\",\n \"\",\n \" r.connect('/:controller/:action/:id')\",\n \"\",\n \"def main():\",\n \" # add the project's directory to the import path list.\",\n \" sys.path.append(os.path.dirname(__file__))\",\n \" sys.path.append(os.path.join(os.path.dirname(__file__), 'application'))\",\n \"\",\n \" # get the gaeo's config (singleton)\",\n \" config = gaeo.Config()\",\n \" # setup the templates' location\",\n \" config.template_dir = os.path.join(\",\n \" os.path.dirname(__file__), 'application', 'templates')\",\n \"\",\n \" initRoutes()\",\n \"\",\n \" app = webapp.WSGIApplication([\",\n \" (r'.*', gaeo.MainHandler),\",\n \" ], debug=True)\",\n \" wsgiref.handlers.CGIHandler().run(app)\",\n \"\",\n \"if __name__ == '__main__':\",\n \" main()\",\n \"\",\n ])\n\n\ndef create_controller_py(controller_py):\n create_file(controller_py, [\n 'from gaeo.controller import BaseController',\n '',\n 'class WelcomeController(BaseController):',\n ' def index(self):',\n ' pass',\n '',\n ])\n\ndef create_default_template(index_html_file):\n create_file(index_html_file, [\n '<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\"',\n ' \"http://www.w3.org/TR/html4/strict.dtd\">',\n '<html>',\n ' <head>',\n ' <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\">',\n ' <title>GAEO Default Template</title>',\n ' </head>',\n ' <body>',\n ' <h1>It works!!</h1>',\n ' </body>',\n '</html>',\n '',\n ])\n\ndef create_eclipse_project(project_home, project_name):\n proj = os.path.join(project_home, '.project')\n pydevproj = os.path.join(project_home, '.pydevproject')\n \n create_file(proj, [\n '<?xml version=\"1.0\" encoding=\"UTF-8\"?>',\n '<projectDescription>',\n ' <name>%s</name>' % project_name,\n ' <comment></comment>',\n ' <projects>',\n ' </projects>',\n ' <buildSpec>',\n ' <buildCommand>',\n ' <name>org.python.pydev.PyDevBuilder</name>',\n ' <arguments>',\n ' </arguments>',\n ' </buildCommand>',\n ' </buildSpec>',\n ' <natures>',\n ' <nature>org.python.pydev.pythonNature</nature>',\n ' </natures>',\n '</projectDescription>'\n ])\n \n create_file(pydevproj, [\n '<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>',\n '<?eclipse-pydev version=\"1.0\"?>',\n '',\n '<pydev_project>',\n ' <pydev_property name=\"org.python.pydev.PYTHON_PROJECT_VERSION\">python 2.5</pydev_property>',\n ' <pydev_pathproperty name=\"org.python.pydev.PROJECT_SOURCE_PATH\">',\n ' <path>/%s</path>' % project_name,\n ' </pydev_pathproperty>',\n '</pydev_project>'\n ])\n\ndef main(argv):\n ignore_exist_proj = False \n create_eclipse_proj = False\n\n cur_dir = os.getcwd()\n\n optlist, args = getopt(argv, '', ['eclipse'])\n\n for opt, value in optlist:\n if opt == '--eclipse':\n create_eclipse_proj = True\n\n project_name = args[0]\n\n # create project directory\n project_home = os.path.join(cur_dir, project_name)\n if os.path.exists(project_home):\n print '%s exists' % (project_home)\n return\n else:\n os.mkdir(project_home, 0755)\n\n project_name = os.path.basename(project_name).lower()\n\n # create <project_name>/application/__init__.py\n application_dir = os.path.join(project_home, 'application')\n create_file(os.path.join(application_dir, '__init__.py'), [])\n\n # create <project_name>/application/controller/welcome.py\n controller_dir = os.path.join(application_dir, 'controller')\n create_file(os.path.join(controller_dir, '__init__.py'), [])\n # create default controller (welcome.py)\n create_controller_py(os.path.join(controller_dir, 'welcome.py'))\n\n # create default template\n create_default_template(os.path.join(application_dir, 'templates', 'welcome', 'index.html'))\n\n # create blank model module\n model_dir = os.path.join(application_dir, 'model')\n create_file(os.path.join(model_dir, '__init__.py'), [])\n\n # create app.yaml\n create_app_yaml(os.path.join(project_home, 'app.yaml'), project_name)\n\n # create main.py\n create_main_py(os.path.join(project_home, 'main.py'))\n\n # create assets directories\n assets_dir = os.path.join(project_home, 'assets')\n os.mkdir(assets_dir, 0755)\n for d in ['css', 'img', 'js']:\n target_dir = os.path.join(assets_dir, d)\n os.mkdir(target_dir, 0755)\n\n # create an empty favicon.ico\n create_file(os.path.join(project_home, 'favicon.ico'), [])\n\n # copy GAEO directory\n copytree(os.path.join(os.path.dirname(__file__), '..', 'gaeo'), os.path.join(project_home, 'gaeo'))\n\n # create the eclipse project file\n if create_eclipse_proj:\n create_eclipse_project(project_home, project_name)\n\n print 'The \"%s\" project has been created.' % project_name\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n main(sys.argv[1:])\n else:\n print usage(sys.argv[0]);\n", "id": "5007506", "language": "Python", "matching_score": 4.622596740722656, "max_stars_count": 1, "path": "bin/gaeo.py" }, { "content": "import os\nimport sys\nimport wsgiref.handlers\n\nfrom google.appengine.ext import webapp\n\nimport gaeo\nfrom gaeo.dispatch import router\n\ndef initRoutes():\n r = router.Router()\n \n #TODO: add routes here\n\n r.connect('/:controller/:action/:id')\n\ndef main():\n # add the project's directory to the import path list.\n sys.path.append(os.path.dirname(__file__))\n sys.path.append(os.path.join(os.path.dirname(__file__), 'application'))\n\n # get the gaeo's config (singleton)\n config = gaeo.Config()\n # setup the templates' location\n config.template_dir = os.path.join(\n os.path.dirname(__file__), 'application', 'templates')\n\n initRoutes()\n\n app = webapp.WSGIApplication([\n (r'.*', gaeo.MainHandler),\n ], debug=True)\n wsgiref.handlers.CGIHandler().run(app)\n\nif __name__ == '__main__':\n main()\n", "id": "2963118", "language": "Python", "matching_score": 2.1006383895874023, "max_stars_count": 1, "path": "main.py" } ]
2.413611
genwch
[ { "content": "from .pdtb import *\nfrom .pdvw import *", "id": "12560472", "language": "Python", "matching_score": 0.7283242344856262, "max_stars_count": 0, "path": "gwpd/__init__.py" }, { "content": "from .pdtbl import *\nfrom .pdvw import *", "id": "6938737", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "pddb/lib/pddb/__init__.py" }, { "content": "from .home import *\nfrom .login import *\nfrom .job import *\nfrom .profile import *\n\n__route__ = [{\"route\": home, \"prefix\": \"/\"}, {\"route\": login, \"prefix\": \"/\"},\n {\"route\": job, \"prefix\": \"/job\"}, {\"route\": profile, \"prefix\": \"/profile\"}]\n", "id": "12228584", "language": "Python", "matching_score": 0.8392705917358398, "max_stars_count": 0, "path": "jmapp/routes/__init__.py" }, { "content": "from .login import *\nfrom .register import *\nfrom .job import *\nfrom .apply import *\nfrom .offer import *\nfrom .profile import *\nfrom .apply_profile import *\n", "id": "12530241", "language": "Python", "matching_score": 0.33192524313926697, "max_stars_count": 0, "path": "model/__init__.py" }, { "content": "from flask import Flask, redirect\nfrom flask_jwt_extended import JWTManager\nfrom . import routes as rt\n\nfrom . import lib as lib\n\napp = Flask(__name__)\nsecret = \"jobmatch-secret\"\napp.config['JWT_SECRET_KEY'] = secret\napp.secret_key = secret\njwt = JWTManager(app)\n\nfor o in rt.__route__:\n app.register_blueprint(o.get(\"route\"), url_prefix=o.get(\"prefix\", \"/\"))\n\n# app.register_blueprint(rt.home, url_prefix=\"/\")\n# app.register_blueprint(rt.login, url_prefix=\"/\")\n# app.register_blueprint(rt.job, url_prefix=\"/job\")\n# app.register_blueprint(rt.profile, url_prefix=\"/profile\")\n\n\n@jwt.expired_token_loader\ndef my_expired_token_callback(jwt_header, jwt_payload):\n return redirect(\"/login?msg=Token Expired\")\n", "id": "1574988", "language": "Python", "matching_score": 2.5589513778686523, "max_stars_count": 0, "path": "jmapp/app.py" }, { "content": "from flask import Flask\nfrom flask_restful import Api\nfrom flask_jwt_extended import JWTManager\nimport gwcomm as comm\nimport resources as res\n\nlg = comm.logger(__name__)\nlg.info(\"init\")\napp = Flask(__name__)\napi = Api(app)\ncomm.add_env([\"SECRET_KEY\", \"SRV_ID\", \"SRV_SCT\", \"API_ROOT\"])\napp.config[\"SECRET_KEY\"] = comm.sysconf.get(\"secret_key\")\njwt = JWTManager(app)\nres.init_api_routes(api)\nres.init_routes(app)\n", "id": "1585862", "language": "Python", "matching_score": 1.5447455644607544, "max_stars_count": 0, "path": "apiapp/app.py" }, { "content": "import gwcomm as comm\n\nlg = comm.logger(__name__)\ncomm.add_env([\"API_HTTP\", \"API_HOST\", \"API_PORT\",\n \"API_DATA\", \"API_USR\", \"API_PWD\"])\n\n\ndef get_token():\n import requests\n conf = comm.sysconf\n url = conf.get(\"api\", {}).get(\"auth\", \"\") if conf.get(\"api\", {}).get(\n \"auth\", \"\") != \"\" else \"{}://{}:{}/auth\".format(conf.get(\"api_http\", \"http\"), conf.get(\"api_host\", \"127.0.0.1\"), conf.get(\"api_port\", \"5000\"))\n body = {\"usr_cde\": conf.get(\"api_usr\", \"\"),\n \"password\": conf.get(\"api_pwd\", \"\")}\n lg.info(f\"init - url: {url}\")\n try:\n res = requests.post(url, json=body)\n except:\n lg.error(f\"Error - connection fail - {url}\")\n comm.sysconf[\"token\"] = None\n return None\n if res.status_code != 200:\n lg.error(f\"Error - {res.msg}\")\n comm.sysconf[\"token\"] = None\n return None\n token = res.json().get(\"access_token\")\n comm.sysconf[\"token\"] = token\n return token\n\n\ndef get_header(token=None):\n token = get_token() if token == None else token\n if token == None:\n lg.error(\"Error - no token\")\n return {}\n return {\"Authorization\": f\"Bearer {token}\"}\n", "id": "669966", "language": "Python", "matching_score": 4.614516735076904, "max_stars_count": 0, "path": "gwapi/auth.py" }, { "content": "import gwcomm as comm\n\nlg = comm.logger(__name__)\ncomm.add_env([\"API_HTTP\", \"API_HOST\", \"API_PORT\",\n \"API_DATA\", \"API_USR\", \"API_PWD\"])\n\n\ndef get(url):\n import requests\n from .auth import get_header\n conf = comm.sysconf\n dataurl = conf.get(\"api\", {}).get(\"data\", \"\") if conf.get(\"api\", {}).get(\n \"data\", \"\") != \"\" else \"{}://{}:{}{}\".format(conf.get(\"api_http\", \"http\"), conf.get(\"api_host\", \"127.0.0.1\"), conf.get(\"api_port\", \"5000\"), conf.get(\"api_data\", \"/\"))\n url = \"{}{}\".format(dataurl, url)\n header = get_header(conf.get(\"token\", None))\n lg.info(f\"init - url: {url}\")\n try:\n res = requests.get(url, headers=header)\n except:\n lg.error(f\"Error - connection fail - {url}\")\n comm.sysconf[\"token\"] = None\n return {}\n\n if res.status_code != 200:\n lg.error(f\"Error - {res.json()}\")\n comm.sysconf[\"token\"] = None\n return {}\n return res.json()\n\n\ndef upsert(url, data):\n import requests\n from .auth import get_header\n conf = comm.sysconf\n dataurl = conf.get(\"api\", {}).get(\"data\", \"\") if conf.get(\"api\", {}).get(\n \"data\", \"\") != \"\" else \"{}://{}:{}{}\".format(conf.get(\"api_http\", \"http\"), conf.get(\"api_host\", \"127.0.0.1\"), conf.get(\"api_port\", \"5000\"), conf.get(\"api_data\", \"/\"))\n url = \"{}{}\".format(dataurl, url)\n header = get_header(conf.get(\"token\", None))\n lg.info(f\"init - url: {url}\")\n try:\n res = requests.post(url, json=data, headers=header)\n except:\n lg.error(f\"Error - connection fail - {url}\")\n comm.sysconf[\"token\"] = None\n return False\n if res.status_code != 200:\n lg.error(f\"Error - {res.json()}\")\n comm.sysconf[\"token\"] = None\n return False\n return True\n", "id": "12050365", "language": "Python", "matching_score": 1.3360646963119507, "max_stars_count": 0, "path": "gwapi/data.py" }, { "content": "from .auth import *\nfrom .data import *\n", "id": "2255764", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "gwapi/__init__.py" }, { "content": "from .auth import *\nfrom .apiclient import *", "id": "6607396", "language": "Python", "matching_score": 0.07478898763656616, "max_stars_count": 0, "path": "jmapp/lib/__init__.py" }, { "content": "from .auth import *\nfrom .data import *\nfrom .map import *\nfrom .convc import *\nfrom .scrape import *\nfrom .gimyscrape import *\nfrom .static import *\n\ndef init_routes(app):\n app.register_blueprint(static, url_prefix=\"/\")\n\ndef init_api_routes(api):\n import gwcomm as comm\n apiroot = comm.sysconf.get(\"api_root\", \"/api/v1\")\n apilst = []\n\n # auth\n api.add_resource(authapi, \"/auth\", endpoint=\"auth\")\n api.add_resource(signapi, \"/sign\", endpoint=\"sign\")\n\n # data\n urllst = [f\"{apiroot}/data/<sec>/<type>\", f\"{apiroot}/data/<sec>/<type>/p/<page>\",\n f\"{apiroot}/data/<sec>/<type>/<id>\", f\"{apiroot}/data/<sec>/<type>/<id>/p/<page>\",\n f\"{apiroot}/data/<sec>/<type>/<col>/<id>\", f\"{apiroot}/data/<sec>/<type>/<col>/<id>/p/<page>\"]\n datalst = []\n seclst = [1, 2, 3, 9]\n secpath = [\"public\", \"protected\", \"private\", \"auth\"]\n for i in range(len(urllst)):\n for s in seclst:\n api.add_resource(dataapi, urllst[i],\n endpoint=f\"data{s}_p{i}\", resource_class_kwargs={\"data_path\": \"./conf/data\", \"security\": s})\n datalst.append({\"endpoint\": f\"data<sec>_p{i}\", \"url\": urllst[i]})\n apilst += datalst\n\n # conv\n api.add_resource(\n convcapi, f\"{apiroot}/convc/<to>\", endpoint=\"convert_chinese\")\n apilst.append({\"endpoint\": \"convert_chinese\",\n \"url\": f\"{apiroot}/convc/<sc|tc>\"})\n\n # scrape\n urllst = [f\"{apiroot}/scrape\", f\"{apiroot}/scrape/<type>\",\n f\"{apiroot}/scrape/<type>/p/<page>\"]\n scrapelst = []\n for i in range(len(urllst)):\n api.add_resource(\n scrapeapi, urllst[i], resource_class_kwargs={\"scrape_path\": \"./conf/scrape\"}, endpoint=f\"scrape_p{i}\")\n scrapelst.append({\"endpoint\": f\"scrape_p{i}\", \"url\": urllst[i]})\n apilst += scrapelst\n\n # gimy scrape\n urllst = [f\"{apiroot}/gimy/<type>\", f\"{apiroot}/gimy/<type>/p/<page>\",\n f\"{apiroot}/gimy/<type>/<id>\", f\"{apiroot}/gimy/<type>/<id>/p/<page>\",\n f\"{apiroot}/gimy/<type>/scat_id/<scat_id>\", f\"{apiroot}/gimy/<type>/scat_id/<scat_id>/p/<page>\",\n f\"{apiroot}/gimy/<type>/<id>/<st>/<ep>\", f\"{apiroot}/gimy/<type>/<id>/<st>/<ep>/p/<page>\"]\n gimylst = []\n for i in range(len(urllst)):\n api.add_resource(\n gimyscrape, urllst[i], resource_class_kwargs={\"scrape_path\": \"./conf/scrape/gimy.tv\"}, endpoint=f\"gimy_p{i}\")\n gimylst.append({\"endpoint\": f\"gimy_p{i}\", \"url\": urllst[i]})\n apilst += gimylst\n\n # map\n api.add_resource(mapapi, f\"{apiroot}\", resource_class_args=(apilst),\n resource_class_kwargs={\"data_path\": \"./conf/data\", \"scrape_path\": \"./conf/scrape\", \"gimy_path\": \"./conf/scrape/gimy.tv\"}, endpoint=\"map\")\n api.add_resource(mapapi, f\"{apiroot}/data\", resource_class_args=(datalst),\n resource_class_kwargs={\"data_path\": \"./conf/data\"}, endpoint=\"map_data\")\n api.add_resource(mapapi, f\"{apiroot}/gimy\", resource_class_args=(gimylst),\n resource_class_kwargs={\"gimy_path\": \"./conf/scrape/gimy.tv\"}, endpoint=\"map_gimy\")\n for s in range(len(seclst)):\n api.add_resource(mapapi, f\"{apiroot}/data/{seclst[s]}\", resource_class_args=(datalst),\n resource_class_kwargs={\"data_path\": f\"./conf/data/{secpath[s]}\"}, endpoint=f\"map_data_s{seclst[s]}\")\n # for s in range(len(scrapelst)):\n # api.add_resource(mapapi, scrapelst[s], resource_class_args=(scrapelst), endpoint=f\"map_scrape_{s}\")\n", "id": "8476638", "language": "Python", "matching_score": 2.460024356842041, "max_stars_count": 0, "path": "apiapp/resources/__init__.py" }, { "content": "from flask_restful import Resource\nimport gwcomm as comm\n\nlg = comm.logger(__name__)\n\n\nclass mapapi(Resource):\n def __init__(self, *args, **kwargs):\n self.__res = [a for a in args]\n self.__datas = self.__get_types(kwargs.get(\"data_path\", \"\"))\n self.__scrapes = self.__get_types(kwargs.get(\"scrape_path\", \"\"))\n self.__gimy = self.__get_types(kwargs.get(\"gimy_path\", \"\"))\n\n def __get_types(self, path):\n import os\n fs = comm.filesystem()\n return [{\"type\": c.get(\"name\")} for c in fs.ls_dict(os.path.join(path, \"*.json\"))]\n\n def get(self):\n lg.info(\"Get Method\")\n rtn = {\"endpoints\": self.__res}\n if self.__datas != []:\n rtn[\"datas\"] = self.__datas\n if self.__scrapes != []:\n rtn[\"scrapes\"] = self.__scrapes\n if self.__gimy != []:\n rtn[\"gimy\"] = self.__gimy\n return rtn, 200\n", "id": "6327868", "language": "Python", "matching_score": 0.5211916565895081, "max_stars_count": 0, "path": "apiapp/resources/map.py" }, { "content": "def data_paging(data, paging, page):\n rtn = data[paging*(page-1):paging*(page)]\n return rtn, int(len(data)/paging)+1\n", "id": "882888", "language": "Python", "matching_score": 0.09356266260147095, "max_stars_count": 0, "path": "apiapp/lib/paging.py" }, { "content": "class filesystem():\n def to_dict(self, fullpath):\n p, f = self.__file_split(fullpath=fullpath)\n n = self.fname(f)\n e = self.fext(f)\n return {\"full\": fullpath, \"path\": p, \"file\": f, \"name\": n, \"ext\": e}\n\n def fname(self, fullpath):\n _, rtn = self.__file_split(fullpath=fullpath)\n return rtn.split(\".\")[0]\n\n def fext(self, fullpath):\n _, rtn = self.__file_split(fullpath=fullpath)\n rtn = rtn.split(\".\")\n return \"\" if len(rtn) == 1 else rtn[-1]\n\n def file(self, fullpath):\n _, rtn = self.__file_split(fullpath=fullpath)\n return rtn\n\n def fpath(self, fullpath):\n rtn, _ = self.__file_split(fullpath=fullpath)\n return rtn\n\n def __file_split(self, fullpath):\n import os\n return os.path.split(fullpath)\n\n def ls(self, folder):\n import glob\n return glob.glob(folder)\n\n def ls_dict(self, folder):\n return [self.to_dict(r) for r in self.ls(folder=folder)]\n", "id": "7451093", "language": "Python", "matching_score": 0.8668606877326965, "max_stars_count": 0, "path": "gwcomm/filesystem.py" }, { "content": "from .logger import *\nfrom .filesystem import *\nfrom .conf import *\n", "id": "3978306", "language": "Python", "matching_score": 0.8009926080703735, "max_stars_count": 0, "path": "gwcomm/__init__.py" }, { "content": "import gwcomm as comm\n\n\nclass scrape():\n\n def __init__(self, name=None, url=None, conf=None, *args, **kwargs):\n self._lg = comm.logger(f\"scrape({name})\")\n self._fullconf = conf\n self._cont = \"\"\n if name == None:\n return\n self._para = kwargs\n self._para[\"page\"] = self._para.get(\"page\", 1)\n self._conf = comm.conv_conf(conf.get(name, {}), self._para)\n self._url = self._conf.get(\"url\", None) if url == None else url\n if self._url != None and self._url != \"\":\n self._lg.info(f\"init - url: {self._url}\")\n self._cont = self.content(url=self._url)\n\n def content(self, url=None):\n \"\"\"Get html content\n\n Keyword Arguments:\n url {string} -- uri (default: {None})\n\n Returns:\n string -- html content\n \"\"\"\n import requests\n url = self._url if url == None else url\n res = requests.get(url)\n if res.status_code == 200:\n from bs4 import BeautifulSoup\n return BeautifulSoup(res.content, features=\"html.parser\")\n return \"\"\n\n def __extract(self, cont, conf):\n \"\"\"Extract elements from content\n\n Arguments:\n cont {string} -- html content\n conf {dict} -- config\n\n Returns:\n list -- list of elements\n \"\"\"\n for c in conf.get(\"steps\", []):\n if cont != None and cont != \"\":\n try:\n cont = cont.find(c.get(\"tag\"), c.get(\"attr\", None))\n except:\n self._lg.debug(\"find_all - \\ncont: {}\\ntag: {}\\nattr: {}\".format(cont, c.get(\"tag\"), c.get(\n \"attr\", None)))\n cont = None\n if cont == None:\n return None\n try:\n cont = cont.find_all(conf.get(\"tag\"), conf.get(\n \"attr\", None))\n except:\n self._lg.debug(\"find_all - \\ncont: {}\\ntag: {}\\nattr: {}\".format(cont, conf.get(\"tag\"), conf.get(\n \"attr\", None)))\n cont = \"\"\n try:\n cont = self.__getpos(\n lst=cont, pos=conf.get(\"pos\", None), rtnlst=True)\n except:\n self._lg.debug(\n \"getpos - \\ncont: {}\\npos: {}\".format(cont, conf.get(\"pos\", None)))\n cont = \"\"\n return cont\n\n def __getpos(self, lst, pos, rtnlst=False):\n \"\"\"Get object of list with pos, return the latest object if pos > len(lst)\n\n Arguments:\n lst {list} -- List of object\n pos {int} -- Position\n\n Keyword Arguments:\n rtnlst {bool} -- Force to return value in list (default: {False})\n\n Returns:\n list / string -- Value of list\n \"\"\"\n rtn = lst\n if pos == None:\n if not(rtnlst):\n if len(rtn) == 0:\n rtn = \"\"\n else:\n rtn = rtn[len(rtn)-1]\n elif len(lst) >= 1:\n pos = min(len(lst)-1, pos)\n rtn = rtn[pos]\n if rtnlst:\n rtn = [rtn]\n return rtn\n\n def __process(self, val, conf):\n \"\"\"Process value\n\n Arguments:\n val {string} -- Value\n conf {dict} -- Config\n\n Returns:\n string -- Value\n \"\"\"\n import re\n method = conf.get(\"method\", None)\n if method != None:\n val = val.get(method, \"\")\n elif val != \"\":\n if conf.get(\"isscript\", False):\n val = val.string\n else:\n val = val.text\n # self._lg.debug(f\"val: {val}\")\n for c in conf.get(\"process\", []):\n for k, v in c.items():\n if k == \"replace\":\n val = re.sub(v.get(\"fm\", \"\"), v.get(\"to\", \"\"), val)\n elif k == \"split\":\n sp = val.split(v.get(\"delim\", \" \"))\n # self._lg.debug(f\"val: {val}, sp: {sp}\")\n val = self.__getpos(lst=sp, pos=v.get(\n \"pos\", None), rtnlst=v.get(\"islist\", False))\n elif k == \"isstrip\" and v == True and isinstance(val, str):\n val = val.strip()\n return val\n\n def __validate(self, val, conf):\n \"\"\"Validate value\n\n Arguments:\n val {list} -- Value\n conf {dict} -- Config\n\n Returns:\n list -- list of value\n \"\"\"\n for cfg in [v for v in conf if v.get(\"validate\", []) != []]:\n vcfg = cfg.get(\"validate\", {})\n if vcfg.get(\"isnumeric\", False):\n val = [i for i in val if i.get(\n cfg.get(\"name\"), \"\").isnumeric()]\n elif vcfg.get(\"ignval\", None) != None:\n val = [i for i in val if i.get(\n cfg.get(\"name\"), \"\") != vcfg.get(\"ignval\", None)]\n return val\n\n def items(self, cont=None, conf=None):\n \"\"\"Extract items from html content\n\n Keyword Arguments:\n cont {string} -- html content (default: {None})\n conf {dict} -- Config (default: {None})\n\n Returns:\n list -- list of items\n \"\"\"\n conf = self._conf if conf == None else conf\n cont = self._cont if cont == None else cont\n cfg = conf.get(\"items\", {})\n cont = self.__extract(cont, cfg)\n if cont == \"\" or cont == None:\n return []\n self._items = [self.info(value=i, conf=conf) for i in cont]\n self._items = self.__validate(\n val=self._items, conf=conf.get(\"info\", []))\n return self._items\n\n def info(self, value, conf=None):\n \"\"\"Extract info from items\n\n Arguments:\n value {string} -- Value\n\n Keyword Arguments:\n conf {dict} -- Config (default: {None})\n\n Returns:\n string -- Value\n \"\"\"\n conf = self._conf if conf == None else conf\n cfg = conf.get(\"info\", [])\n rtn = {}\n for c in cfg:\n val = self.__extract(cont=value, conf=c.get(\"get\", {}))\n val = self.__getpos(lst=val, pos=c.get(\"pos\", None))\n rtn[c.get(\"name\")] = self.__process(val=val, conf=c)\n return rtn\n", "id": "1204218", "language": "Python", "matching_score": 1.2662010192871094, "max_stars_count": 0, "path": "gwws/scrape.py" }, { "content": "class obj_dict(dict):\n \"\"\"Adv dict class to enable sub-key\n\n Init Arguments:\n *args {tuple} -- key = value\n or\n **kwargs {dict} -- {key: value}\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.__setitem__(*args, **kwargs)\n\n def __split(self, key: str) -> dict:\n \"\"\"Private function for split key to parent-child\n\n Arguments:\n key {str} -- key of dict\n\n Returns:\n dict -- dict return\n \"\"\"\n o = key.split(\".\")\n rtn = {}\n if len(o) == 1:\n rtn = {\"org\": key, \"key\": o[0]}\n else:\n rtn = {\"org\": key, \"key\": \".\".join(o[1:]), \"par\": o[0]}\n return rtn\n\n def __setattr__(self, *args, **kwargs):\n self.__setitem__(*args, **kwargs)\n\n def __getattr__(self, *args, **kwargs):\n return self.get(*args, **kwargs)\n\n def __setitem__(self, *args, **kwargs):\n if args == () and kwargs == {}:\n return\n if args != ():\n try:\n (k, v) = args\n kwargs.update({k: v})\n except:\n kwargs.update(args[0])\n for k, v in kwargs.items():\n o = self.__split(k)\n if o.get(\"par\", None) == None:\n self.update({o.get(\"org\"): v})\n else:\n par = self.get(o.get(\"par\"), None)\n if par == None:\n nobj = obj_dict()\n nobj[o.get(\"key\")] = v\n self.update({o.get(\"par\"): nobj})\n else:\n if isinstance(par, obj_dict):\n self.get(o.get(\"par\"), None).update({o.get(\"key\"): v})\n else:\n continue\n", "id": "9804608", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "obj_dict/app.py" }, { "content": "from .init import *\nfrom .app import *", "id": "11352050", "language": "Python", "matching_score": 0.10657951235771179, "max_stars_count": 0, "path": "apiapp/__init__.py" }, { "content": "_BLACK = 0\n_WHITE = 255\n_GRAY = 128\n\n_S_FONT = 15\n_M_FONT = 25\n_L_FONT = 40\n\n_FONTPATH = \"./static/font/\"\n\n\nclass img_cls():\n def __init__(self, name: str, w_h: tuple, fill: int = _WHITE):\n from PIL import Image, ImageDraw\n self._BACK = fill\n self._img = Image.new(\"1\", (w_h[0], w_h[1]), fill)\n self._draw = ImageDraw.Draw(self._img)\n self._w_h = w_h\n self._name = name\n self._obj = []\n\n def add_text(self, text: str, x_y: tuple = (0, 0), size: int = _M_FONT, fill: int = _BLACK) -> (((int, int), (int, int)), bool):\n exists = []\n (x_y, w_h) = self.__cal_xy(text=text, size=size) if x_y == (0, 0) else x_y\n for k, o in enumerate(self._obj):\n if o.get(\"x_y\") == x_y:\n if o.get(\"text\") == text:\n return ((x_y, (None, None)), True)\n else:\n exists.append(self._obj.pop(k))\n break\n for e in exists:\n self.__draw_text(text=e.get(\"text\"), x_y=e.get(\n \"x_y\"), size=e.get(\"size\"), fill=self._BACK, append_obj=False)\n (x_y, (w, h)) = self.__draw_text(\n text=text, x_y=x_y, size=size, fill=fill)\n return ((x_y, (w, h)), False)\n\n def __get_font(self, size):\n from PIL import ImageFont\n return ImageFont.truetype(f\"{_FONTPATH}unifont-13.0.06.ttf\", size)\n\n def __cal_xy(self, text: str, size: int = _M_FONT):\n font = self.__get_font(size)\n draw = self._draw\n (w, h) = draw.textsize(text, font=font)\n if w % 2 == 1:\n w += 1\n if h % 2 == 1:\n h += 1\n (ow, oh) = self._w_h\n x_y = (int((ow-w)//2), int((oh-h)//2))\n return (x_y, (w, h))\n\n def __tuple_add(self, x_y: tuple, offset: int = 0) -> list:\n rtn = []\n for o in range(offset+1):\n rtn.append((x_y[0]+o, x_y[1]))\n rtn.append((x_y[0], x_y[1]+o))\n return list(set(rtn))\n\n def __draw_text(self, text: str, x_y: tuple = (0, 0), size: int = _M_FONT, fill: int = _BLACK, append_obj=True) -> ((int, int), (int, int)):\n draw = self._draw\n offset = 1\n font = self.__get_font(size)\n (x_y, (w, h)) = self.__cal_xy(text, size)\n for p in self.__tuple_add(x_y, offset):\n draw.text(p, text, font=font, fill=fill)\n if append_obj:\n self._obj.append(\n {\"text\": text, \"x_y\": x_y, \"size\": size, \"fill\": fill})\n return (x_y, (w+offset, h+offset))\n\n def img(self):\n return self._img\n\n def w_h(self):\n return self._w_h\n\n def rotate(img):\n return img.rotate(180, expand=True)\n\n\nclass screen_cls():\n def __init__(self):\n self.new()\n\n def new(self):\n self._obj = []\n\n def add(self, *args, **kwargs):\n default = {\"name\": None, \"obj\": None, \"x_y\": (0, 0), \"w_h\": (0, 0)}\n for a in args:\n kwargs.update(a)\n img = img_cls(name=kwargs.get(\"name\"), w_h=kwargs.get(\n \"w_h\", default.get(\"w_h\")), fill=kwargs.get(\"fill\", None))\n kwargs.update({\"obj\": img})\n obj = {k: kwargs.get(k, v) for k, v in default.items()}\n self._obj.append(obj)\n\n def get(self) -> list:\n return self._obj\n\n\ndef set_screen(w_h: tuple, bg_img) -> list:\n (epd_w, epd_h) = w_h\n srns = []\n srn = screen_cls()\n sec_margin = 35\n w = (epd_w-sec_margin)//2\n srn.add(name=\"clk_sec\", x_y=(w, 0), w_h=(sec_margin, epd_h), fill=_WHITE)\n srn.add(name=\"clk_hr\", x_y=(0, 0), w_h=(w, epd_h), fill=_WHITE)\n srn.add(name=\"clk_min\", x_y=(w+sec_margin, 0), w_h=(w, epd_h), fill=_WHITE)\n srns.append({\"name\": \"clock\", \"srn\": srn,\n \"img\": bg_img, \"idel\": 5, \"switch\": 25})\n del srn\n srn = screen_cls()\n srn.add(name=\"cal_date\", x_y=(0, 0), w_h=(epd_w, epd_h//2), fill=_WHITE)\n srn.add(name=\"cal_info\", x_y=(0, epd_h//2),\n w_h=(epd_w, epd_h//2), fill=_BLACK)\n srns.append({\"name\": \"calendar\", \"srn\": srn,\n \"img\": bg_img, \"idel\": 5, \"switch\": 35})\n return srns\n\n\ndef main():\n import lib.epd as epdlib\n from PIL import Image\n import time\n from datetime import datetime\n epd = epdlib.EPD()\n epd_w = epd.height\n epd_h = epd.width\n\n epd.init(epd.FULL_UPDATE)\n epd.Clear(0xFF)\n\n epd.init(epd.FULL_UPDATE)\n bg = img_cls(name=\"bg\", w_h=(epd_w, epd_h))\n bg_img = bg.img()\n epd.displayPartBaseImage(epd.getbuffer(bg_img))\n\n epd.init(epd.PART_UPDATE)\n srns = set_screen((epd_w, epd_h), bg_img)\n start = datetime.now()\n target = 50\n img = bg_img.copy()\n idx = len(srns)-1\n # idx=0\n init = True\n while True:\n now = datetime.now()\n switch = srns[idx].get(\"switch\", 0)\n # if init:\n # second=35\n # else:\n second = int(now.strftime(\"%S\"))\n if (second >= switch and second < switch+srns[idx].get(\"idel\", 0.5)*4) or init:\n # print(f\"swap-second: {second}, idx: {idx}\")\n idx = 0 if idx+1 >= len(srns) else idx+1\n srn = srns[idx].get(\"srn\")\n # img=srns[idx].get(\"img\", bg_img).copy()\n img = bg_img.copy()\n if init:\n idel = 0.1\n else:\n idel = srns[idx].get(\"idel\", 0.5)\n swap = True\n init = False\n refresh = False\n for s in srn.get():\n rtn = True\n o = s.get(\"obj\")\n (ox, oy) = s.get(\"x_y\")\n (ow, oh) = s.get(\"w_h\")\n if s.get(\"name\") == \"clk_hr\":\n (((x, y), (w, h)), rtn) = o.add_text(\n text=now.strftime(\"%H\"), size=110, fill=_BLACK)\n elif s.get(\"name\") == \"clk_min\":\n (((x, y), (w, h)), rtn) = o.add_text(\n text=now.strftime(\"%M\"), size=110, fill=_BLACK)\n elif s.get(\"name\") == \"clk_sec\":\n text = \":\"\n # if second % 2==1:\n # text=\" \"\n (((x, y), (w, h)), rtn) = o.add_text(\n text=text, x_y=(-10, 12), size=110, fill=_BLACK)\n # print(f\"text: {text}, rtn: {rtn}\")\n elif s.get(\"name\") == \"cal_date\":\n (((x, y), (w, h)), rtn) = o.add_text(\n text=now.strftime(\"%d%b%y\"), size=60, fill=_BLACK)\n elif s.get(\"name\") == \"cal_info\":\n (((x, y), (w, h)), rtn) = o.add_text(\n text=now.strftime(\"%A\"), size=60, fill=_WHITE)\n if rtn and not(swap):\n continue\n refresh = True\n oimg = o.img().copy()\n img.paste(oimg, (ox, oy, ox+ow, oy+oh))\n if refresh:\n swap = False\n # print(f\"refresh - {second}\")\n epd.displayPartial(epd.getbuffer(img_cls.rotate(img)))\n time.sleep(idel)\n epd.sleep()\n\n\nif __name__ == \"__main__\":\n import sys\n try:\n main()\n except KeyboardInterrupt:\n print(\"Waiting for exit...\")\n import lib.epd as epdlib\n try:\n epd = epdlib.EPD()\n epd.init(epd.FULL_UPDATE)\n epd.Clear(0xFF)\n epd.sleep()\n except:\n print(\"Clear fail\")\n pass\n sys.exit()\n", "id": "4123038", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "rpiwepd/app.py" }, { "content": "from .epd import *\n", "id": "12634181", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "rpiwepd/lib/__init__.py" }, { "content": "from .epdconfig import *\nfrom .epd2in13_V2 import *\n", "id": "3919987", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "rpiwepd/lib/epd/__init__.py" }, { "content": "import setuptools\nimport os\npkgname=os.path.basename(os.getcwd())\nwith open(\".CURVER\", \"r\", encoding=\"utf-8\") as fh:\n curver = fh.readline()\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\nf=open(\"requirements.txt\", \"r\")\ndepends=f.read().split(\"\\n\")\nf.close()\n\nsetuptools.setup(\n name=f\"{pkgname}-genwch\", # Replace with your own username\n version=f\"{curver}\",\n author=\"genwch\",\n author_email=\"\",\n description=\"Using pandas as db in python\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=f\"https://github.com/genwch/{pkgname}\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n install_requires=depends,\n python_requires='>=3.7',\n)\n", "id": "7085532", "language": "Python", "matching_score": 0.6666056513786316, "max_stars_count": 0, "path": "setup.py" }, { "content": "sysconf = {}\n\n\ndef load_conf(file):\n import json\n from os import path\n if not(path.exists(file)):\n print(f\"File not found: {file}\")\n return {}\n with open(file, 'r') as f:\n js = json.load(f)\n return js\n\n\ndef conv_conf(fm, to={}):\n def __replace(src, to):\n import re\n rtn = src\n for k, v in to.items():\n if isinstance(v, dict):\n rtn = __replace(rtn, v)\n elif isinstance(v, int):\n rtn = re.sub(\"{{%s}}\" % (k), str(v), rtn)\n elif isinstance(v, str):\n rtn = re.sub(\"{{%s}}\" % (k), v, rtn)\n return rtn\n\n def __replace_dict(fm, to):\n c = fm\n if isinstance(c, dict):\n for k, v in c.items():\n if isinstance(v, dict):\n v = __replace_dict(v, to)\n elif isinstance(v, list):\n [__replace_dict(x, to) for x in v]\n elif isinstance(v, str):\n v = __replace(v, to)\n c[k] = v\n return c\n fm = __replace_dict(fm, to)\n fm = __replace_dict(fm, fm)\n return fm\n\n\ndef add_env(env=[]):\n import os\n for e in env:\n val = os.getenv(f\"PY_{e}\", None)\n if val != None:\n sysconf[e.lower()] = val\n return sysconf\n\n\nsysconf = load_conf(\"./conf/system.json\")\nsysconf = conv_conf(sysconf)\n", "id": "1838044", "language": "Python", "matching_score": 1.467745065689087, "max_stars_count": 0, "path": "gwcomm/conf.py" }, { "content": "from abc import ABC\nfrom .html2obj import *\nimport re\n\n\nclass extract(ABC):\n def __init__(self):\n pass\n\n def extract(self, conf, obj: list = [], showexec: bool = False) -> list:\n def __extr(conf, obj: list = []):\n rtn = []\n obj = [conf] if obj == [] else obj\n for o in obj:\n c = self.conv_conf(conf, o)\n rtn += self.extract_data(c)\n return rtn\n import timeit\n conf = conf if isinstance(conf, list) else [conf]\n obj = obj if isinstance(obj, list) else [obj] \n # obj = []\n for i, c in enumerate(conf):\n starttime = timeit.default_timer()\n obj = __extr(c, obj)\n if showexec:\n print(\n f\"{i}:\\tcnt: {len(obj)}\\ttime - {timeit.default_timer() - starttime}\")\n return obj\n\n def extract_data(self, conf: str):\n obj = html2obj(url=conf.get(\"url\"))\n items = obj.get_xpath(xpath=conf.get(\"items\"))\n data = []\n for i in items:\n dt = {}\n skip = False\n for k, v in conf.get(\"data\", {}).items():\n val = None\n if isinstance(v, dict):\n for tk, tv in v.items():\n if tk == \"_act\":\n continue\n elif tk == \"get\":\n val = i\n elif tk == \"fix\":\n val = v\n tv = tk\n else:\n val = i.get(tk, {})\n if val != None:\n val = val.get(tv, None)\n else:\n val = i.get(v, None)\n if val != None:\n act = conf.get(\"data\", {}).get(k, {}).get(\"_act\", [])\n for a in act:\n for tk, tv in a.items():\n if tk == \"re\":\n fm = tv\n to = \"\"\n if isinstance(tv, dict):\n for fm, to in tv.items():\n break\n # print(val, fm)\n val = re.sub(fm, to, val)\n elif tk == \"split\":\n sp = \" \"\n cnt = 0\n if isinstance(tv, dict):\n for sp, cnt in tv.items():\n break\n # print(val, sp, cnt)\n val = val.split(sp)[cnt]\n elif tk == \"check\":\n for fn, rst in tv.items():\n break\n if fn == \"type\":\n if rst == \"int\":\n try:\n tmp = int(val)\n except:\n skip = True\n elif rst==\"str\":\n if val==None or val == \"\":\n skip=True\n elif fn == \"notin\":\n skip = (val in rst)\n elif tk == \"conv_json\":\n import json\n val = json.loads(val)\n if isinstance(tv, str):\n val = val.get(tv, None)\n elif tk == \"pfx\":\n val = f\"{tv}{val}\"\n if val != None and val != \"\":\n dt.update({k: val})\n if not(skip) and dt != {}:\n data.append(dt)\n return data\n\n def conv_conf(self, conf: dict, org_conf: dict = {}) -> dict:\n def __nestget(conf: dict, key: str, default: str = \"\") -> str:\n rtn = default\n for k, v in conf.items():\n if k == key:\n rtn = conf.get(k, default)\n break\n else:\n if isinstance(v, dict):\n rtn = __nestget(v, key, default)\n break\n return rtn\n\n org_conf = conf if org_conf == {} else org_conf\n rtn = conf.copy()\n for k, v in conf.items():\n if isinstance(v, dict):\n v = self.conv_conf(v, org_conf)\n elif isinstance(v, list):\n v = [self.conv_conf(o, org_conf) for o in v]\n elif isinstance(v, str) or isinstance(v, int):\n if v != None:\n key = str(v).split(\"}}\")[:-1]\n # print(v, key)\n for tk in key:\n ttk = tk.split(\"{{\", 1)\n if len(ttk) == 1:\n continue\n v = re.sub(\"{{%s}}\" % (ttk[1]), str(__nestget(org_conf, ttk[1], \"\")), v)\n rtn.update({k: v})\n return rtn\n", "id": "4488535", "language": "Python", "matching_score": 1.1895227432250977, "max_stars_count": 0, "path": "html2obj/lib/extract.py" }, { "content": "from .formathtml import *\nfrom .hobj import *\nfrom .html2obj import *\nfrom .extract import *\n", "id": "7207676", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "html2obj/lib/__init__.py" }, { "content": "from .scrape import *\nfrom .gimy_scrape import *", "id": "8244677", "language": "Python", "matching_score": 0.8740825057029724, "max_stars_count": 0, "path": "gwws/__init__.py" }, { "content": "from flask import Blueprint\nimport gwcomm as comm\n\nfs = comm.filesystem()\nfname = fs.fname(__file__)\nstatic = Blueprint(fname, __name__)\nlg = comm.logger(__name__)\n\n\n@static.route(\"/link/<media>/<st>/<ep>\")\ndef getlink(*args, **kwargs):\n import gwws as ws\n import resources as res\n from flask import redirect\n media = kwargs.get(\"media\", None)\n st = kwargs.get(\"st\", None)\n ep = kwargs.get(\"ep\", None)\n lg.debug(f\"media: {media} st: {st}, ep: {ep}\")\n gimy = res.gimyscrape(scrape_path=\"./conf/scrape/gimy.tv\")\n linkws = ws.gimy_scrape(conf=gimy.conf)\n if st == None or ep == None:\n return \"\", 404\n lst = [{\"st\": st, \"ep\": ep}]\n data = linkws.get(type=\"links\", lst=[\n {\"media_id\": media, \"stream_ep\": lst}])\n if data == []:\n return \"\", 404\n for d in data:\n url = d.get(\"url\")\n break\n return redirect(url)\n\n\n# @static.route(\"/link/<media>/e/<ep>\")\n# @static.route(\"/link/<media>/s/<st>\")\n# @static.route(\"/link/<media>/<st>/<ep>\")\n# def getlink(*args, **kwargs):\n# import gwws as ws\n# import resources as res\n# from flask import redirect, Response\n# import re\n# import requests\n# media = kwargs.get(\"media\", None)\n# st = kwargs.get(\"st\", None)\n# ep = kwargs.get(\"ep\", None)\n# lg.debug(f\"media: {media} st: {st}, ep: {ep}\")\n# gimy = res.gimyscrape(scrape_path=\"./conf/scrape/gimy.tv\")\n# streamws = ws.gimy_scrape(conf=gimy.conf)\n# linkws = ws.gimy_scrape(conf=gimy.conf)\n# if st != None and ep != None:\n# eplst = [{\"st\": st, \"ep\": ep}]\n# else:\n# stream = streamws.get(type=\"streams\", lst=[{\"media_id\": media}])\n# t = []\n# for s in stream:\n# t = s.get(\"stream_ep\", [])\n# break\n# if st == None:\n# st = []\n# for s in t:\n# s.update({\"media_id\": media})\n# st.append(s)\n# eplst = [{\"st\": s.get(\"st\", 1), \"ep\": s.get(\"ep\", 1)}\n# for s in st if s.get(\"ep\", 1) == ep]\n# elif ep == None:\n# ep = []\n# for s in t:\n# s.update({\"media_id\": media})\n# ep.append(s)\n# eplst = [{\"st\": s.get(\"st\", 1), \"ep\": s.get(\"ep\", 1)}\n# for s in ep if s.get(\"st\", 1) == st if int(s.get(\"ep\", 1)) <= 2]\n# data = linkws.get(type=\"links\", lst=[\n# {\"media_id\": media, \"stream_ep\": eplst}])\n# # rtn = [\"#EXTM3U\", ]\n# rtn = [\"#EXTM3U\", \"#EXT-X-START:TIME-OFFSET=0\"]\n# cnt = 0\n# for d in data:\n# url = d.get(\"url\")\n# rurl = \"/\".join(url.split(\"/\")[:-1])\n# lg.info(f\"rurl: {rurl}, url: {url}\")\n# r = requests.get(url)\n# try:\n# lnk = str(r.content)\n# lnk = re.sub(\"^b'\", \"\", lnk)\n# lnk = re.sub(\"'$\", \"\", lnk)\n# lg.debug(f\"lnk: {lnk}\")\n# lnk = lnk.split(\"\\\\n\")\n# except:\n# lnk = []\n# for l in lnk:\n# t = re.findall(\"^#EXTM3U\", l)\n# if t == []:\n# t = re.findall(\"^#EXT\", l)\n# if t == []:\n# rtn.append(f\"#EXT-X-MEDIA-SEQUENCE:{cnt}\")\n# cnt += 1\n# rtn.append(f\"#EXTINF:{cnt},{cnt}\")\n# l = f\"{rurl}/{l}\"\n# # else:\n# # attr = l.split(\":\")[1].split(\",\")\n# # attrs = {a.split(\"=\")[0]: a.split(\"=\")[1] for a in attr}\n# # lg.debug(f\"attrs: {attrs}\")\n# # attrs[\"PROGRAM-ID\"] = cnt\n# # l = \"{}{}\".format(\n# # \"#EXT-X-STREAM-INF:\", \",\".join([f\"{k}={v}\" for k, v in attrs.items()]))\n# rtn.append(l)\n# rtn.append(\"#EXT-X-ENDLIST\")\n# rtn = \"\\n\".join(rtn)\n# lg.debug(f\"rtn: {rtn}\")\n# return Response(rtn, mimetype='application/vnd.apple.mpegurl'), 200\n # return redirect(url)\n", "id": "106148", "language": "Python", "matching_score": 1.9597364664077759, "max_stars_count": 0, "path": "apiapp/resources/static.py" }, { "content": "from .scrape import *\n\n\nclass gimy_scrape(scrape):\n def get(self, type, lst=None, conf=None):\n \"\"\"Get Gimy objects\n\n Arguments:1\n type {string} -- cats / subcats / medias / streams / links\n\n Keyword Arguments:\n lst {list} -- Filter list (default: {None})\n conf {dict} -- Config (default: {None})\n\n Returns:\n list -- List of dict\n \"\"\"\n conf = self._fullconf if conf == None else conf\n # self._lg.debug(f\"conf: {conf}\")\n if type == \"cats\":\n return self.__get_cats(conf=conf)\n elif type == \"subcats\":\n try:\n if lst[0].get(\"cat_id\", None) == None:\n lst = None\n except:\n lst = None\n lst = self.get(type=\"cats\", conf=conf) if lst == None else lst\n return self.__get_subcats(lst=lst, conf=conf)\n elif type == \"medias\":\n try:\n if lst[0].get(\"scat_id\", None) == None:\n lst = None\n except:\n lst = None\n lst = self.get(type=\"subcats\", conf=conf) if lst == None else lst\n return self.__get_medias(lst=lst, conf=conf)\n elif type == \"streams\":\n try:\n if lst[0].get(\"scat_id\", None) != None:\n lst = self.get(type=\"medias\", lst=lst, conf=conf)\n else:\n if lst[0].get(\"media_id\", None) == None:\n lst = None\n except:\n lst = None\n lst = self.get(type=\"medias\", conf=conf) if lst == None else lst\n # medias = self.__get_medias(lst=lst, conf=conf)\n return self.__get_streams(lst=lst, conf=conf)\n elif type == \"links\":\n try:\n if lst[0].get(\"media_id\", None) == None:\n lst = None\n elif lst[0].get(\"stream_ep\", None) == None:\n lst = None\n except:\n lst = None\n lst = self.get(type=\"streams\", conf=conf) if lst == None else lst\n return self.__get_links(lst=lst, conf=conf)\n return []\n\n def __get_cats(self, conf=None):\n import copy\n conf = self._fullconf if conf == None else conf\n cfg = copy.deepcopy(conf)\n cat_ws = scrape(name=\"cat\", conf=cfg)\n cats = cat_ws.items()\n del cat_ws\n return cats\n\n def __get_subcats(self, lst, conf=None):\n import copy\n conf = self._fullconf if conf == None else conf\n subcats = []\n for c in lst:\n cfg = copy.deepcopy(conf)\n cat_id = c.get(\"cat_id\", \"\")\n subcat_ws = scrape(name=\"subcat\", conf=cfg, cat_id=cat_id)\n t = []\n for i in subcat_ws.items():\n i[\"cat_id\"] = cat_id\n t.append(i)\n subcats += t\n del subcat_ws\n return subcats\n\n def __get_medias(self, lst, conf=None):\n import copy\n conf = self._fullconf if conf == None else conf\n pages = conf.get(\"media\", {}).get(\"pages\", 1)\n medias = []\n for c in lst:\n scat_id = c.get(\"scat_id\", \"\")\n for page in range(pages):\n cfg = copy.deepcopy(conf)\n media_ws = scrape(\n name=\"media\", conf=cfg, scat_id=scat_id, page=page+1)\n medias += media_ws.items()\n del media_ws\n return medias\n\n def split_data(self, lst, cols, keys, grp):\n \"\"\"Split streams data into st / ep\n\n Arguments:\n lst {list} -- Source\n cols {list} -- Column list\n keys {list} -- Key list\n grp {string} -- Group\n\n Returns:\n list -- List of value\n \"\"\"\n grp_dt = lst.get(grp, [])\n if grp_dt == []:\n grp_dt = [{k: lst.get(k) for k in cols}]\n else:\n t = []\n for i in grp_dt:\n for k in keys:\n i[k] = lst.get(k)\n t.append(i)\n grp_dt = t\n return grp_dt\n\n def __get_streams(self, lst, conf=None):\n import copy\n conf = self._fullconf if conf == None else conf\n streams = []\n for c in lst:\n cfg = copy.deepcopy(conf)\n media_id = c.get(\"media_id\", \"\")\n stream_ws = scrape(\n name=\"stream\", conf=cfg, media_id=media_id)\n stream = {\"media_id\": media_id}\n for c in conf.get(\"stream\", {}).get(\"confs\", []):\n cfg = conf.get(c, {})\n t = stream_ws.items(conf=cfg)\n if len(t) == 1:\n for i in t:\n for k, v in i.items():\n stream[k] = v\n elif len(t) == 0:\n stream = {}\n break\n else:\n stream[c] = t\n if stream != {}:\n streams.append(stream)\n del stream_ws\n return streams\n\n def __get_links(self, lst, conf=None):\n import copy\n conf = self._fullconf if conf == None else conf\n links = []\n for c in lst:\n media_id = c.get(\"media_id\")\n for e in c.get(\"stream_ep\", []):\n st = e.get(\"st\")\n ep = e.get(\"ep\")\n cfg = copy.deepcopy(conf)\n link_ws = scrape(\n name=\"link\", conf=cfg, media_id=media_id, st=st, ep=ep)\n link = {\"media_id\": media_id, \"st\": st, \"ep\": ep}\n for t in link_ws.items():\n if len(t) == 1:\n for k, v in t.items():\n link[k] = v\n links.append(link)\n break\n return links\n", "id": "3081783", "language": "Python", "matching_score": 2.641549587249756, "max_stars_count": 0, "path": "gwws/gimy_scrape.py" }, { "content": "from flask_restful import Resource\nfrom flask_jwt_extended import jwt_required\nimport gwcomm as comm\nimport gwws as ws\nfrom lib.token import *\nfrom lib.paging import *\nfrom .scrape import *\n\nlg = comm.logger(__name__)\n\n\nclass gimyscrape(scrapeapi):\n def parameters(self, para):\n type = para.get(\"type\", None)\n id = para.get(\"id\", None)\n scat_id = para.get(\"scat_id\", None)\n st = para.get(\"st\", None)\n ep = para.get(\"ep\", None)\n if (id != None or scat_id != None) and type != None:\n if type == \"cat\":\n lst = {}\n elif type == \"subcat\":\n lst = {\"cat_id\": id}\n elif type == \"media\":\n lst = {\"scat_id\": id}\n elif type == \"stream\":\n if scat_id != None:\n lst = {\"scat_id\": scat_id}\n else:\n lst = {\"media_id\": id}\n elif type == \"link\":\n lst = {\"media_id\": id}\n if st != None and ep != None:\n stream_ep = {}\n stream_ep[\"st\"] = st\n stream_ep[\"ep\"] = ep\n lst[\"stream_ep\"] = [stream_ep]\n else:\n lst = {}\n type = f\"{type}s\" if type != None else None\n page = para.get(\"page\", 1)\n return type, [lst], page\n\n @jwt_required\n def get(self, *args, **kwargs):\n lg.info(\"Get Method\")\n from flask import request\n from flask_restplus import abort\n self.__owner, self.__acl = get_identity(request=request)\n js = request.get_json()\n type, lst, page = self.parameters(kwargs)\n conf = js if js != None else self.conf\n if conf == None or conf == {}:\n abort(404, msg=\"Config not defined\")\n if type == None:\n abort(404, msg=\"type not defined\")\n check_acl(self.__acl, self.reqacl)\n # lg.debug(f\"conf: {conf}\")\n dataws = ws.gimy_scrape(conf=conf)\n u = conf.get(\"stream\", {}).get(\"url\", \"\")\n lg.debug(f\"type: {type}, lst: {lst}, url: {u}\")\n data = dataws.get(type=type, lst=lst)\n paging = self.conf.get(\"paging\", 20)\n pages = 1\n if page != \"all\":\n try:\n page = int(page)\n except:\n page = 1\n data, pages = data_paging(data, paging, page)\n return {\"data\": data, \"pages\": pages}, 200\n # return {\"data\": data}, 200\n", "id": "12482354", "language": "Python", "matching_score": 4.2008209228515625, "max_stars_count": 0, "path": "apiapp/resources/gimyscrape.py" }, { "content": "from flask_restful import Resource\nfrom flask_jwt_extended import jwt_required\nimport gwcomm as comm\nimport gwws as ws\nfrom lib.token import *\nfrom lib.paging import *\n\nlg = comm.logger(__name__)\n\n\nclass scrapeapi(Resource):\n def __init__(self, *args, **kwargs):\n self.reqacl = 0\n self.__acl = 0\n self.__owner = None\n path = kwargs.get(\"scrape_path\", \"\")\n self.__read_cfg(path)\n self.conf = comm.sysconf.get(\"scrape\", {})\n\n def __read_cfg(self, path):\n import os\n fs = comm.filesystem()\n cfg = {}\n for c in fs.ls_dict(os.path.join(path, \"*.json\")):\n cfg[c.get(\"name\")] = comm.load_conf(c.get(\"full\"))\n comm.sysconf[\"scrape\"] = cfg\n\n def parameters(self, para):\n type = para.get(\"type\", None)\n page = para.get(\"page\", 1)\n return type, page\n\n @jwt_required\n def get(self, *args, **kwargs):\n lg.info(\"Get Method\")\n from flask import request\n from flask_restplus import abort\n self.__owner, self.__acl = get_identity(request=request)\n js = request.get_json()\n type, page = self.parameters(kwargs)\n conf = js if js != None else self.conf\n if conf == None or conf == {}:\n abort(404, msg=\"Config not defined\")\n if type == None:\n conf = {\"data\": conf}\n type = \"data\"\n check_acl(self.__acl, self.reqacl)\n dataws = ws.scrape(name=type, conf=conf)\n data = dataws.items()\n if conf.get(type, {}).get(\"info\", {}) == {}:\n return {\"content\": str(dataws.content())}, 200\n else:\n paging = self.conf.get(\"paging\", 100)\n pages = 1\n if page != \"all\":\n try:\n page = int(page)\n except:\n page = 1\n data, pages = data_paging(data, paging, page)\n return {\"data\": data, \"pages\": pages}, 200\n", "id": "1894790", "language": "Python", "matching_score": 4.1776628494262695, "max_stars_count": 0, "path": "apiapp/resources/scrape.py" }, { "content": "from flask_restful import Resource\nfrom flask_jwt_extended import jwt_required\nimport gwcomm as comm\nfrom lib.token import *\nfrom lib.paging import *\n\nlg = comm.logger(__name__)\n\n\nclass dataapi(Resource):\n def __init__(self, *args, **kwargs):\n self.__reqacl = 0\n self.__acl = 0\n self.__owner = None\n self.__confpath = kwargs.get(\"conf_path\", \"./conf/data\")\n self.__conf = {}\n\n def __parameters(self, para):\n from flask_restplus import abort\n import gwpd as pdfx\n model = para.get(\"type\", None)\n sec = para.get(\"sec\", 0)\n col = para.get(\"col\", None)\n id = para.get(\"id\", None)\n page = para.get(\"page\", 1)\n self.__conf = comm.load_conf(\n \"{}/{}.json\".format(self.__confpath, model))\n if isinstance(sec, str):\n sec = int(sec)\n if model == None:\n abort(400, msg=f\"Undefined <type>- {model}\")\n lg.debug(f\"set pd\")\n try:\n dt = pdfx.pdvw(model=model, path=self.__confpath,\n security=sec, owner=self.__owner)\n except Exception as e:\n lg.warning(f\"{model} - Not view - {e}\")\n try:\n lg.debug(f\"{model} - start pdtb\")\n dt = pdfx.pdtb(model=model, path=self.__confpath,\n security=sec, owner=self.__owner)\n lg.debug(f\"{model} - end pddt\")\n except Exception as e:\n lg.warning(f\"{model} - Not table - {e}\")\n abort(\n 400, msg=f\"{model} - Invalid <sec>/<model> - {sec}/{model}\")\n\n if col == None:\n data = dt.get() if id == None else dt.get(key=id)\n else:\n data = dt.get(filter={col: id})\n try:\n acl = int(dt.acl) if isinstance(dt.acl, str) else dt.acl\n except:\n acl = 0\n return data, dt, acl, page\n\n @jwt_required\n def get(self, *args, **kwargs):\n lg.info(\"Get Method\")\n from flask import request\n from flask_restplus import abort\n self.__owner, self.__acl = get_identity(request=request)\n data, obj, self.__reqacl, page = self.__parameters(kwargs)\n cols = {c.get(\"name\"): c.get(\"model\", \"str\") for c in obj._cols()}\n check_acl(self.__acl, self.__reqacl)\n if data == []:\n abort(404, msg=\"Not found\", data=[], cols=cols, pages=1)\n paging = self.__conf.get(\"paging\", 100)\n pages = 1\n if page != \"all\":\n try:\n page = int(page)\n except:\n page = 1\n data, pages = data_paging(data, paging, page)\n return {\"data\": data, \"cols\": cols, \"pages\": pages}, 200\n\n @jwt_required\n def post(self, *args, **kwargs):\n lg.info(\"Post Method\")\n from flask import request\n from flask_restplus import abort\n self.__owner, self.__acl = get_identity(request=request)\n data, obj, self.__reqacl, _ = self.__parameters(kwargs)\n check_acl(self.__acl, self.__reqacl)\n\n # upsert\n from flask import request\n try:\n body = request.get_json()\n except:\n abort(400, msg=\"Invalid JSON\")\n datas = body.get(\"datas\", [])\n if datas == []:\n datas = [body]\n for d in datas:\n rtn, df = obj.upsert(d)\n if not(rtn):\n abort(400, msg=\"Upsert fail\")\n rtn, _ = obj.save()\n if not(rtn):\n abort(400, msg=\"Save fail\")\n lg.debug(df)\n return {\"data\": obj.get()}, 200\n", "id": "12390405", "language": "Python", "matching_score": 4.811119079589844, "max_stars_count": 0, "path": "apiapp/resources/data.py" }, { "content": "from flask_restful import Resource\nfrom flask_jwt_extended import jwt_required\nimport gwcomm as comm\nfrom lib.token import *\nfrom pyhanlp import *\n\nlg = comm.logger(__name__)\n\n\nclass convcapi(Resource):\n def __init__(self, *args, **kwargs):\n self.__reqacl = 0\n self.__acl = 0\n self.__owner = None\n\n def __parameters(self, para):\n to = para.get(\"to\", None)\n return to\n\n @jwt_required\n def get(self, *args, **kwargs):\n lg.info(\"Get Method\")\n from flask import request\n from flask_restplus import abort\n self.__owner, self.__acl = get_identity(request=request)\n js = request.get_json()\n data = js.get(\"data\", \"\")\n to = self.__parameters(kwargs)\n check_acl(self.__acl, self.__reqacl)\n if data == \"\":\n abort(404, msg=\"Not found\")\n if to == \"tc\":\n data = HanLP.convertToTraditionalChinese(data)\n elif to == \"sc\":\n data = HanLP.convertToSimplifiedChinese(data)\n return {\"data\": data}, 200\n", "id": "12532778", "language": "Python", "matching_score": 2.6345744132995605, "max_stars_count": 0, "path": "apiapp/resources/convc.py" }, { "content": "def decode_token(token):\n \"\"\"Decode token to identity\n\n Arguments:\n token {string} -- Bearer token\n\n Returns:\n dict -- Identity\n string -- token\n \"\"\"\n import jwt\n import re\n import gwcomm as comm\n token = re.sub(\"Bearer \", \"\", token)\n identity = jwt.decode(token, comm.sysconf.get(\n \"secret_key\")).get(\"identity\", {})\n return identity, token\n\n\ndef get_identity(request):\n from flask_restplus import abort\n token = request.headers.get(\"Authorization\")\n identity, _ = decode_token(token=token)\n owner = identity.get(\"usr_cde\", None)\n acl = identity.get(\"acl\", 0)\n if isinstance(acl, str):\n acl = int(acl)\n return owner, acl\n\n\ndef check_acl(acl, reqacl):\n from flask_restplus import abort\n if acl < reqacl:\n abort(401, msg=f\"ACL not enough - required {reqacl}\")\n return False\n return True\n", "id": "7516199", "language": "Python", "matching_score": 2.0642201900482178, "max_stars_count": 0, "path": "apiapp/lib/token.py" }, { "content": "def jwt_required(func):\n def wrapper(*args, **kwargs):\n from flask import redirect, session, request\n from flask_jwt_extended import decode_token\n from datetime import datetime\n # attempt to grab the jwt from request\n try:\n token = session.get(\"access_token\", None)\n except:\n token = None\n jwt_data = decode_token(token) if token != None else None\n # if the grab worked and the identity key is in the dict then proceed\n if jwt_data and jwt_data.get(\"exp\", 0) >= int(datetime.now().strftime('%s')):\n return func(*args, **kwargs)\n else:\n return redirect(f'/login?msg=Require Login&rurl={request.path}', code=302)\n return wrapper\n\n\ndef get_user():\n from flask import session\n from flask_jwt_extended import decode_token\n try:\n token = session.get(\"access_token\", None)\n except:\n token = None\n jwt_data = decode_token(token) if token != None else None\n if jwt_data == None:\n return (None, None, None)\n return tuple(jwt_data.get(\"sub\").split(\";\"))\n", "id": "6587940", "language": "Python", "matching_score": 1.9304324388504028, "max_stars_count": 0, "path": "jmapp/lib/auth.py" }, { "content": "import os\nfrom flask import Blueprint\nfrom model import login_mod, register_mod\n\nfname = os.path.basename(__file__).split(\".\")[0]\nlogin = Blueprint(fname, __name__)\nlogin_m = login_mod()\n# login_m.add([{\"usr_cde\": \"test\", \"password\": \"<PASSWORD>\"}])\n\nreg_m = register_mod()\n\n# def api_filter(data: list, filt: dict):\n# import copy\n# tmp = copy.deepcopy(data)\n# for k, v in filt.items():\n# tmp = [t for t in tmp if t.get(k) == v]\n# return tmp\n\n\n# def api_data(*args, **kwargs):\n# for a in args:\n# kwargs.update(a)\n# break\n# data = [{\"uid\": \"111\", \"usr_cde\": \"test\", \"password\": \"<PASSWORD>\"}]\n# rtn = api_filter(data, kwargs)\n# return rtn\n\n\n@login.route(\"/login\", methods=[\"GET\"])\ndef get():\n from flask import render_template, request, session\n session.pop('usr_cde', None)\n session.pop('access_token', None)\n paralst = (\"msg\", \"rurl\")\n para = {p: request.args.get(p, None) for p in paralst}\n form = login_m.form()\n print(form.cols)\n return render_template(\"login.html.j2\", obj=[{\"type\": \"form\", \"obj\": form}], msg=para.get(\"msg\"), rurl=para.get(\"rurl\", \"/\"))\n\n\n@login.route(\"/login\", methods=[\"POST\"])\ndef post():\n from flask import redirect, request, session\n from flask_jwt_extended import create_access_token\n paralst = (\"usr_cde\", \"password\")\n para = {p: request.form.get(p, None) for p in paralst}\n rtn, _ = login_m.find(para, user={\"username\": para.get(\"usr_cde\", None), \"password\": para.get(\"password\", None)})\n if len(rtn) > 0:\n dt = rtn[0]\n getparalst = (\"msg\", \"rurl\")\n getpara = {p: request.args.get(p, None) for p in getparalst}\n rurl = getpara.get(\"rurl\") if getpara.get(\"rurl\") != None else \"/\"\n uid = dt.get(\"uid\")\n usr_cde = para.get(\"usr_cde\")\n pwd = para.get(\"password\")\n access_token = create_access_token(identity=f\"{uid};{usr_cde};{pwd}\")\n session[\"usr_cde\"] = para.get(\"usr_cde\")\n session[\"access_token\"] = access_token\n return redirect(rurl)\n session.pop('usr_cde', None)\n session.pop('access_token', None)\n return redirect(\"/login?msg={}\".format(\"Invalid username or password\"))\n\n\n@login.route(\"/logout\", methods=[\"GET\"])\ndef logout_get():\n from flask import redirect, session\n session.pop('usr_cde', None)\n # session.pop('pwd', None)\n session.pop('access_token', None)\n return redirect(\"/\")\n\n\n@login.route(\"/reg\", methods=[\"GET\"])\ndef reg_get():\n from flask import render_template, request\n paralst = (\"msg\", \"\")\n para = {p: request.args.get(p, None) for p in paralst}\n form = reg_m.form()\n return render_template(\"register.html.j2\", obj=[{\"type\": \"form\", \"obj\": form}], msg=para.get(\"msg\"))\n\n\n@login.route(\"/reg\", methods=[\"POST\"])\ndef reg_post():\n from flask import redirect, request, session\n from flask_jwt_extended import create_access_token\n paralst = (\"usr_cde\", \"password\")\n para = {p: request.form.get(p, None) for p in paralst}\n rtn, _ = login_m.find(para)\n if len(rtn) == 0:\n print(\"login add\")\n rtn = login_m.add(para)\n if rtn != []:\n return redirect(\"/login\")\n # if rtn != []:\n # return redirect(\"/login\")\n return redirect(\"/reg?msg={}\".format(\"Invalid input\"))\n", "id": "12111615", "language": "Python", "matching_score": 2.4416301250457764, "max_stars_count": 0, "path": "jmapp/routes/login.py" }, { "content": "import requests\nfrom flask import abort\n\nconf = {\"host\": \"https://jmapi.geo.freeddns.org\",\n \"account\": {\"username\": \"system\", \"password\": \"<PASSWORD>\"}}\n\n\ndef get_token(username: str = None, password: str = None) -> str:\n from .auth import get_user\n _, usr, pwd = get_user()\n username = usr if username == None else username\n password = <PASSWORD> if password == None else password\n username = conf.get(\"account\").get(\n \"username\") if username == None else username\n password = conf.get(\"account\").get(\n \"password\") if password == None else password\n headers = {\"accept\": \"application/json\",\n \"Content-Type\": \"application/x-www-form-urlencoded\"}\n data = {\"username\": username, \"password\": password}\n url = \"{}/token\".format(conf.get(\"host\"))\n rtn = requests.post(url,\n headers=headers, data=data)\n rtn = rtn.json()\n token = rtn.get(\"access_token\", None)\n return token\n\n\ndef get_data(*args, **kwargs) -> list:\n for a in args:\n kwargs.update(a)\n usr = kwargs.get(\"username\", None)\n pwd = kwargs.get(\"password\", None)\n type = kwargs.get(\"type\", None)\n code = kwargs.get(\"code\", None)\n token = get_token(username=usr, password=<PASSWORD>)\n if token == None:\n abort(401, description=\"Authentication failed\")\n headers = {\"accept\": \"application/json\",\n \"Authorization\": f\"Bearer {token}\"}\n url = \"{}/{}\".format(conf.get(\"host\"), type)\n if code != None:\n url = f\"{url}/{code}\"\n rtn = requests.get(url,\n headers=headers)\n if rtn.status_code != 200:\n abort(403)\n rtn = rtn.json()\n return rtn.get(\"items\", [])\n\n\ndef post_data(*args, **kwargs) -> list:\n rtn, data = post_data_with_rtn(*args, **kwargs)\n return rtn\n\n\ndef post_data_with_rtn(*args, **kwargs) -> list:\n for a in args:\n kwargs.update(a)\n type = kwargs.get(\"type\", None)\n code = kwargs.get(\"code\", None)\n data = kwargs.get(\"data\", {})\n token = get_token()\n if token == None:\n return False\n # abort(401, description=\"Authentication failed\")\n headers = {\"accept\": \"application/json\",\n \"Authorization\": f\"Bearer {token}\"}\n url = \"{}/{}\".format(conf.get(\"host\"), type)\n if code != None:\n url = f\"{url}/{code}\"\n rtn = requests.post(url,\n headers=headers, json=data)\n if rtn.status_code != 200:\n return False, None\n return True, rtn.json()\n", "id": "7295586", "language": "Python", "matching_score": 1.3035924434661865, "max_stars_count": 0, "path": "jmapp/lib/apiclient.py" }, { "content": "def http_exception(status_code=200, loc=[], msg=\"\", type=\"\"):\n from fastapi import status, HTTPException\n if status_code == 401:\n return HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=msg,\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n elif status_code == 422:\n return HTTPException(\n status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,\n detail=[{\"loc\": loc, \"msg\": msg, \"type\": type}],\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n return HTTPException(status_code=status_code, detail=msg)\n", "id": "1056018", "language": "Python", "matching_score": 0.017587702721357346, "max_stars_count": 0, "path": "jmapi/lib/auth.py" }, { "content": "from fastapi import APIRouter, Depends\nimport models as mod\n\n\napiroute = APIRouter()\ntags = [\"jobcat\"]\ndependencies = []\n\nrouter = {\"route\": apiroute, \"tags\": tags, \"dependencies\": dependencies}\n\n\ndef restore_model(name):\n import pickle\n import os\n path = os.path.join(os.getcwd(), \"jobcat\")\n model, data, target, score = pickle.load(\n open(os.path.join(path, \"{}.pkl\".format(name)), 'rb'))\n return model, data, target, score\n\n\n@apiroute.post(\"/jobcat\", response_model=mod.JobCat)\nasync def get_job_cat(job_desc: mod.JobDesc):\n pred = \"\"\n model, data, target, score = restore_model(\"model2\")\n jobdesc=job_desc.job_desc\n # job_desc = dt.get(\"job_desc\", \"\")\n # print(data, target, score, job_desc)\n for p in model.predict([jobdesc]):\n pred = p\n break\n # print(model, data, target, score)\n return {\"job_cat\": pred}\n", "id": "9530714", "language": "Python", "matching_score": 2.1270532608032227, "max_stars_count": 0, "path": "jmapi/routes/jobcat.py" }, { "content": "from typing import Optional\nfrom pydantic import BaseModel\n\n\nclass JobCat(BaseModel):\n job_cat: str\n\nclass JobDesc(BaseModel):\n job_desc: str\n", "id": "3364373", "language": "Python", "matching_score": 0.28382939100265503, "max_stars_count": 0, "path": "models/jobcat.py" }, { "content": "from . import auth\nfrom . import jobcat\nfrom . import data\nimport models as mod\n\ndebug = False\n\n__routes__ = [auth.router, jobcat.router]\n\nfor k, v in mod.__model__.items():\n __routes__.append(data.data_rt(mod_name=k, debug=debug).router)\n", "id": "7406811", "language": "Python", "matching_score": 0.8971680402755737, "max_stars_count": 0, "path": "jmapi/routes/__init__.py" }, { "content": "from .users import *\nfrom .profiles import *\nfrom .token import *\nfrom .jobs import *\nfrom .applies import *\nfrom .offers import *\nfrom .comments import *\nfrom .jobcat import *\n\n__model__ = {\"users\": users_mod, \"profiles\": profiles_mod, \"jobs\": jobs_mod,\n \"applies\": applies_mod, \"offers\": offers_mod, \"comments\": comments_mod}\n", "id": "9237200", "language": "Python", "matching_score": 1.0003176927566528, "max_stars_count": 0, "path": "models/__init__.py" }, { "content": "from abc import ABC\nfrom fastapi_pagination import Page, paginate, add_pagination\nfrom typing import Optional\nfrom fastapi import APIRouter, Depends\nfrom .auth import oauth2_scheme, token2user\nimport models as mod\nfrom jmapi import lib as lib\n\n\nclass data_rt(ABC):\n def __init__(self, mod_name, debug=False):\n self.__mod_name = mod_name\n self.__data_mod = mod.__model__.get(mod_name)\n self.debug = debug\n self.data_mod = self.__data_mod(debug=self.debug)\n self.model = self.data_mod._model\n self.router = self.set_route()\n\n def get(self, token: str = Depends(oauth2_scheme), code: Optional[str] = None):\n owner = token2user(token)\n self.data_mod.set_owner(owner)\n filt = {}\n if code != None:\n keycols = self.data_mod.cols(attr=\"key\")\n filt = {c: code for c in keycols}\n df = self.data_mod.select(filt)\n if df.empty:\n return paginate([])\n rtn = self.data_mod.to_dict(df)\n return paginate(rtn)\n\n def post(self, token: str = Depends(oauth2_scheme), code: Optional[str] = None, data: dict = {}):\n owner = token2user(token)\n self.data_mod.set_owner(owner)\n updcols = self.data_mod.cols(attr=\"updcol\")\n keycols = self.data_mod.cols(attr=\"key\")\n if code != None:\n data.update({c: code for c in keycols})\n upd = {k: v for k, v in data.items() if k in updcols}\n key = {k: v for k, v in data.items() if k in keycols and v != None}\n upd.update(key)\n rtn, dt = self.data_mod.upsert(upd)\n if not(rtn):\n raise lib.http_exception(\n status_code=422, loc=[], msg=\"Invalid key value\")\n self.data_mod.save()\n return paginate(dt)\n\n def set_route(self):\n tags = [self.__mod_name]\n # dependencies = [Depends(oauth2_scheme)]\n dependencies = None\n path = \"/{}\".format(self.__mod_name)\n pathwithpara = \"%s/{code}\" % (path)\n apiroute = APIRouter(tags=tags, dependencies=dependencies)\n apiroute.add_api_route(\n path=path, methods=[\"get\"], name=f\"Get {self.__mod_name}\",\n endpoint=self.get, response_model=Page[self.model])\n apiroute.add_api_route(\n path=pathwithpara, methods=[\"get\"], name=f\"Get {self.__mod_name}\",\n endpoint=self.get, response_model=Page[self.model])\n apiroute.add_api_route(\n path=path, methods=[\"post\"], name=f\"Post {self.__mod_name}\",\n endpoint=self.post, response_model=Page[self.model])\n apiroute.add_api_route(\n path=pathwithpara, methods=[\"post\"], name=f\"Post {self.__mod_name}\",\n endpoint=self.post, response_model=Page[self.model])\n return {\"route\": apiroute}\n", "id": "2318184", "language": "Python", "matching_score": 5.695098876953125, "max_stars_count": 0, "path": "jmapi/routes/data.py" }, { "content": "from fastapi import APIRouter, Depends\nfrom fastapi_pagination import Page, paginate, add_pagination\n\nfrom .auth import oauth2_scheme\nfrom jmapi import lib as lib\n\nimport models as mod\njobs = mod.jobs_mod(debug=True)\n\napiroute = APIRouter()\ntags = [\"jobs\"]\ndependencies=[Depends(oauth2_scheme)]\n\nrouter={\"route\": apiroute, \"tags\": tags, \"dependencies\": dependencies}\n\n@apiroute.get(\"/jobs\", response_model=Page[jobs._model])\ndef get_job():\n# def get_job(token: str = Depends(oauth2_scheme)):\n rtn = jobs.to_dict()\n return paginate(rtn)\n\n@apiroute.post(\"/jobs\", response_model=Page[jobs._model])\ndef post_job(job: jobs._model):\n updcols = jobs.cols(attr=\"updcol\")\n keycols = jobs.cols(attr=\"key\")\n upd = {j[0]: j[1] for j in job if j[0] in updcols}\n key = {j[0]: j[1] for j in job if j[0] in keycols and j[1] != None}\n upd.update(key)\n rtn, dt = jobs.upsert(upd)\n if not(rtn):\n raise lib.http_exception(\n status_code=422, loc=[upd], msg=\"Invalid key value\")\n return paginate(dt)\n", "id": "7448676", "language": "Python", "matching_score": 3.765021324157715, "max_stars_count": 0, "path": "jmapi/routes/jobs.py" }, { "content": "from fastapi import FastAPI, Depends\nfrom fastapi_pagination import add_pagination\nfrom . import routes as rt\napp = FastAPI()\n\nfor r in rt.__routes__:\n app.include_router(r.get(\"route\"), tags=r.get(\"tags\"), dependencies=r.get(\"dependencies\"))\n\napp=add_pagination(app)\n\n\n# @app.get(\"/\")\n# # def home(token: str = Depends(oauth2_scheme)):\n# def home():\n# jobs.insert({\"job_desc\": \"xxx\"})\n# jobs.update({\"job_cde\": \"JD0000000002\", \"job_desc\": \"yyy\"})\n# # jobs.upsert({\"job_desc\": \"xxx\"})\n# rtn = jobs.to_dict()\n# return {\"data\": rtn}\n\n\n# # @app.get(\"/jobs\", response_model=Page[jobs._model])\n# # def get_job():\n# # rtn = jobs.to_dict()\n# # return paginate(rtn)\n\n\n# @app.post(\"/jobs\", response_model=Page[jobs._model])\n# def post_job(job: jobs._model):\n# updcols = jobs.cols(attr=\"updcol\")\n# keycols = jobs.cols(attr=\"key\")\n# upd = {j[0]: j[1] for j in job if j[0] in updcols}\n# key = {j[0]: j[1] for j in job if j[0] in keycols and j[1] != None}\n# upd.update(key)\n# # if key != {}:\n# # filtcond=key.copy()\n# # print(filtcond)\n# # filt, idx = jobs.filter(filtcond)\n# # upd.update(key)\n# # if idx == []:\n# # raise lib.http_exception(\n# # status_code=422, loc=[upd], msg=\"Invalid key value1\")\n# rtn, dt = jobs.upsert(upd)\n# if not(rtn):\n# print(\"2\", upd)\n# raise lib.http_exception(\n# status_code=422, loc=[upd], msg=\"Invalid key value\")\n# return paginate(dt)\n\n\n# # @app.get(\"/items/\")\n# # async def read_items(token: str = Depends(oauth2_scheme)):\n# # return {\"token\": token}\n\n\n# @app.post(\"/token\", response_model=mod.Token)\n# async def get_token(form_data: OAuth2PasswordRequestForm = Depends()):\n# user = lib.get_user(users, {\"usr_cde\": form_data.username,\n# \"password\": form_data.password})\n# if user is None:\n# raise lib.http_exception(\n# status_code=401, msg=\"Incorrect username or password\")\n# access_token = lib.create_access_token(\n# data={\"sub\": user.get(\"usr_cde\")}, expires_delta=lib.get_token_exp()\n# )\n# return {\"access_token\": access_token, \"token_type\": \"bearer\"}\n\n\n# @app.post(\"/me\")\n# async def get_current_user(token: str = Depends(oauth2_scheme)):\n# from jose import jwt, JWTError\n# credentials_exception = lib.http_exception(\n# status_code=401, msg=\"Could not validate credentials\")\n# try:\n# payload = jwt.decode(token, lib.SECRET_KEY, algorithms=[lib.ALGORITHM])\n# username: str = payload.get(\"sub\")\n# if username is None:\n# raise credentials_exception\n# token_data = mod.TokenData(username=username)\n# except JWTError:\n# raise credentials_exception\n# user = lib.get_user(users, {\"usr_cde\": token_data.username})\n# if user is None:\n# raise credentials_exception\n# return {\"uid\": user.get(\"uid\"), \"usr_cde\": user.get(\"usr_cde\")}\n\n# add_pagination(app)\n", "id": "5135816", "language": "Python", "matching_score": 6.757087230682373, "max_stars_count": 0, "path": "jmapi/app.py" }, { "content": "from fastapi import APIRouter, Depends\nfrom fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm\n\nfrom jmapi import lib as lib\n\nimport models as mod\nusers = mod.users_mod(debug=False)\n# if users.count() == 0:\n# rtn = users.upsert({\"usr_cde\": \"test\", \"password\": \"<PASSWORD>\"})\n# if rtn:\n# users.save()\nusers.upsert({\"usr_cde\": \"system\", \"password\": \"<PASSWORD>\"})\n\napiroute = APIRouter()\ntags = [\"auth\"]\ndependencies = []\n\nrouter = {\"route\": apiroute, \"tags\": tags, \"dependencies\": dependencies}\n\nSECRET_KEY = \"JMAPI_SECRET\"\nALGORITHM = \"HS256\"\nACCESS_TOKEN_EXPIRE_MINUTES = 30\noauth2_scheme = OAuth2PasswordBearer(tokenUrl=\"token\")\n\n\ndef create_access_token(data: dict, expires_delta=None):\n from datetime import datetime, timedelta\n from jose import jwt\n to_encode = data.copy()\n if expires_delta:\n expire = datetime.utcnow() + expires_delta\n else:\n expire = datetime.utcnow() + timedelta(minutes=15)\n to_encode.update({\"exp\": expire})\n encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)\n return encoded_jwt\n\n\ndef get_user(users, filt: dict):\n users.load()\n users.upsert({\"usr_cde\": \"system\", \"password\": \"<PASSWORD>\"})\n df, idx = users.filter(filt)\n if df.empty:\n return None\n for d in users.to_dict(df):\n return d\n return None\n\n\ndef get_token_exp():\n from datetime import timedelta\n return timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n\n\n@apiroute.post(\"/token\", response_model=mod.Token)\nasync def get_token(form_data: OAuth2PasswordRequestForm = Depends()):\n user = get_user(users, {\"usr_cde\": form_data.username,\n \"password\": form_data.password})\n if user is None:\n raise lib.http_exception(\n status_code=401, msg=\"Incorrect username or password\")\n access_token = create_access_token(\n data={\"sub\": user.get(\"usr_cde\")}, expires_delta=get_token_exp()\n )\n return {\"access_token\": access_token, \"token_type\": \"bearer\"}\n\n\ndef token2user(token: str):\n from jose import jwt, JWTError\n try:\n payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])\n username: str = payload.get(\"sub\")\n except:\n return None\n return username\n\n\n@apiroute.post(\"/me\")\nasync def get_current_user(token: str = Depends(oauth2_scheme)):\n from jose import jwt, JWTError\n credentials_exception = lib.http_exception(\n status_code=401, msg=\"Could not validate credentials\")\n try:\n payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])\n username: str = payload.get(\"sub\")\n if username is None:\n raise credentials_exception\n token_data = mod.TokenData(username=username)\n except JWTError:\n raise credentials_exception\n user = get_user(users, {\"usr_cde\": token_data.username})\n if user is None:\n raise credentials_exception\n return {\"uid\": user.get(\"uid\"), \"usr_cde\": user.get(\"usr_cde\")}\n", "id": "228605", "language": "Python", "matching_score": 2.37044095993042, "max_stars_count": 0, "path": "jmapi/routes/auth.py" }, { "content": "from flask_restful import Resource\nfrom flask_jwt_extended import jwt_required\nimport gwcomm as comm\n\nlg = comm.logger(__name__)\n\n\nclass authapi(Resource):\n def post(self):\n \"\"\"Post Method - Login\n\n Request JSON:\n usr_cde {string} -- User Code\n password {string} -- Password\n\n Returns:\n dict -- {\"access_token\": {{Bearer token}}}\n int -- 200 - success, 401 - fail\n \"\"\"\n lg.info(\"Post Method\")\n from flask import request\n from flask_jwt_extended import create_access_token\n from flask_restplus import abort\n from lib.modlogin import modlogin\n import datetime\n body = request.get_json()\n if body == None:\n abort(401, msg=\"Invalid user or password\")\n owner = body.get(\"usr_cde\", None)\n owner = comm.sysconf.get(\n \"srv_id\", \"SYSTEM\") if owner == None else owner\n login = modlogin(owner=owner)\n chk, df = login.check(usr_cde=body.get(\n \"usr_cde\", None), password=body.get(\"password\", None))\n del login\n if not(chk):\n abort(401, msg=\"Invalid user or password\")\n exp = datetime.timedelta(hours=12)\n id = {k: v for k, v in df[0].items() if k in [\"usr_cde\", \"acl\"]}\n token = create_access_token(\n identity=id, expires_delta=exp)\n return {\"access_token\": token}, 200\n\n\nclass signapi(Resource):\n \"\"\"API Resource - Registration\n\n Methods:\n POST -- Register\n \"\"\"\n\n @jwt_required\n def post(self):\n \"\"\"Post Method - Register\n\n Returns:\n dict -- Message\n int -- 200 - success, 401 - fail\n \"\"\"\n from flask_restplus import abort\n from lib.token import decode_token\n from flask import request\n from lib.modlogin import modlogin\n token = request.headers.get(\"Authorization\")\n identity, token = decode_token(token)\n acl = -1\n if identity.get(\"acl\", 0) != acl:\n abort(401, msg=f\"ACL not enough - required {acl}\")\n body = request.get_json()\n owner = body.get(\"usr_cde\", None)\n owner = comm.sysconf.get(\n \"SRV_ID\", \"SYSTEM\") if owner == None else owner\n login = modlogin(owner=owner)\n rtn = login.reg(body)\n del login\n if not(rtn):\n abort(401, msg=\"Registration fail\")\n return {\"msg\": f\"Registered. Welcome {owner}\"}, 200\n", "id": "12270503", "language": "Python", "matching_score": 3.4263434410095215, "max_stars_count": 0, "path": "apiapp/resources/auth.py" }, { "content": "from flask_restful import Resource\nimport gwcomm as comm\n\nlg = comm.logger(__name__)\n\n\nclass modlogin():\n \"\"\"Login class\n\n Functions:\n reg -- Registration\n check -- Check login\n \"\"\"\n\n def __init__(self, owner=None):\n import gwpd as pdfx\n\n owner = comm.sysconf.get(\n \"srv_id\", \"SYSTEM\") if owner == None else owner\n self.__usr_pwd = pdfx.pdvw(\"usr_pwd\", path=\"./conf/data/auth\", owner=owner)\n if len(self.__usr_pwd.get()) == 0:\n lg.warning(\"No account\")\n pwd = comm.sysconf.get(\"srv_sct\", \"<PASSWORD>\")\n if owner != \"\" and pwd != \"\":\n lg.warning(\"Init super user for registration\")\n rtn = self.reg(\n {\"usr_cde\": owner, \"usr_name\": \"Super Account\", \"password\": <PASSWORD>, \"acl\": -1})\n if not(rtn):\n lg.error(\"Save fail - Super account\")\n\n def reg(self, data):\n \"\"\"Registration function\n\n Arguments:\n data {dict} -- {\"usr_cde\", \"usr_name\", \"password\"}\n\n Returns:\n bool -- True - success, False - fail\n \"\"\"\n from datetime import datetime\n obj = self.__usr_pwd\n rtn, _ = obj.insert(data)\n if not(rtn):\n lg.error(\"insert fail\")\n return rtn\n rtn, _ = obj.save()\n return rtn\n\n def check(self, usr_cde, password):\n \"\"\"Check login\n\n Arguments:\n usr_cde {string} -- User Code\n password {string} -- Password\n\n Returns:\n bool -- True - success, False - fail\n list -- DataFrame\n \"\"\"\n rtn = self.__usr_pwd.get(\n filter={\"usr_cde\": usr_cde, \"password\": password})\n return False if rtn == [] else True, rtn\n", "id": "8282776", "language": "Python", "matching_score": 1.8198158740997314, "max_stars_count": 0, "path": "apiapp/lib/modlogin.py" }, { "content": "from .pdtb import *\n\n\nclass pdvw(pdtb):\n \"\"\"Pandas view\n\n Arguments:\n model {string} -- Model\n\n Keyword Arguments:\n path {string} -- Model config path (default: {None})\n owner {string} -- Record owner (default: {None})\n security {int} -- Security type (default: {0})\n 0 -- None\n 1 -- Public\n 2 -- Protected\n 3 -- Private\n 9 -- Auth\n \"\"\"\n\n def __join_df(self, filter={}, dfs=[], applyacl=True):\n \"\"\"Join multi df\n\n Returns:\n pd.DataFrame -- dataframe\n \"\"\"\n import pandas as pd\n if self._src == []:\n return None\n df = None\n if filter != {}:\n dfs = []\n if len(dfs) != len(self._src):\n dfs = []\n srclst = self._src\n for i in range(len(self._src)):\n s = self._src[i]\n o = s.get(\"obj\")\n name = s.get(\"name\")\n j = [c.get(\"name\") for c in s.get(\"join\")]\n if dfs != []:\n tdf = dfs[i]\n else:\n tdf = o._get_df()\n self._lg.debug(f\"filter: {filter}\")\n tdf = o.filter(df=tdf, filter=filter)\n\n if applyacl:\n tdf = o._apply_acl(df=tdf)\n\n if isinstance(df, pd.DataFrame):\n tdf = tdf[[c.get(\"name\") for c in o._cols()]]\n df = pd.merge(df, tdf, on=j, how=\"left\")\n else:\n df = tdf\n return df\n\n def save(self):\n rtn = True\n for s in self._src:\n o = s.get(\"obj\")\n r, _ = o.save()\n rtn = rtn and r\n if not(rtn):\n return rtn\n return True, self._get_df()\n\n def _load(self):\n \"\"\"Load - redirect to join dataframe\n\n Returns:\n pd.DataFrame -- dataframe\n \"\"\"\n rtn = self.__join_df()\n return rtn\n\n def get(self, df=None, filter={}, key=None, applyacl=None, allcols=False):\n \"\"\"Get data in list of dict\n\n Keyword Arguments:\n filter {dict} -- filter (default: {{}})\n\n Returns:\n list -- list of data in {dict}\n \"\"\"\n import pandas as pd\n applyacl = self._req_security(act=0, applyacl=applyacl)\n if key != None and filter == {}:\n filter = {c: key for c in self._get_cols(\"iskey\")}\n # self._lg.debug(f\"filter: {filter}\")\n if isinstance(df, pd.DataFrame):\n df = df\n else:\n df = self.__join_df(filter=filter, applyacl=applyacl)\n df = self.filter(df=df, filter=filter)\n if not(allcols):\n df = df[[c.get(\"name\") for c in self._cols()]]\n return df.to_dict(\"records\")\n\n def upsert(self, data):\n df = self.__join_df()\n if self._isexists(data=data, df=df, applyacl=False):\n rtn, df = self.update(data)\n else:\n rtn, df = self.insert(data)\n return rtn, df\n\n def update(self, data):\n rtn = True\n xdata = {}\n for s in self._src:\n o = s.get(\"obj\")\n name = s.get(\"name\")\n for c in s.get(\"join\", []):\n v_dt = data.get(c.get(\"name\"), None)\n v = xdata.get(c.get(\"name\"), None)\n if v_dt == None:\n if v != None:\n data[c.get(\"name\")] = v\n tmp = o.get_first(\n filter=data, applyacl=False, allcols=True)\n self._lg.debug(f\"update - {name} - {tmp} = {data}\")\n rtn = rtn and o.update(data=data)\n xdata = self._concat_dict(xdata, o.get_first(\n filter=data, applyacl=False, allcols=True))\n if not(rtn):\n return rtn, None\n self._df = self.__join_df()\n return True, self._df\n\n def insert(self, data):\n rtn = True\n xdata = {}\n for s in self._src:\n o = s.get(\"obj\")\n name = s.get(\"name\")\n for c in s.get(\"join\", []):\n if data.get(c.get(\"name\"), None) == None:\n v = xdata.get(c.get(\"name\"), None)\n if v != None:\n data[c.get(\"name\")] = v\n self._lg.debug(f\"insert - {name} - {xdata} = {data}\")\n rtn = rtn and o.insert(data=data)\n xdata = self._concat_dict(xdata, o.get_first(\n filter=data, applyacl=False, allcols=True))\n if not(rtn):\n return rtn, None\n self._df = self.__join_df()\n return True, self._df\n\n def __init__(self, model, path=None, owner=None, security=0):\n try:\n super().__init__(model=model, path=path, owner=owner, security=security)\n except:\n pass\n self._lg = comm.logger(f\"pdvw({model})\")\n self._lg.info(\"init\")\n self._src = []\n for s in self._conf.get(\"source\", []):\n spath = path if s.get(\"security\", None) == None else None\n mod = s.get(\"conf\")\n sec = s.get(\"security\")\n obj = pdtb(model=mod, path=spath, owner=owner,\n security=sec)\n if obj.acl != None and self.acl == None:\n self.acl = obj.acl\n elif obj.acl != None and self.acl != None:\n self.acl = max(obj.acl, self.acl)\n else:\n self.acl = 0\n self._src.append({\"obj\": obj, \"name\": s.get(\n \"name\"), \"join\": s.get(\"join\", [])})\n if self._src == []:\n raise\n try:\n df = self._load()\n except Exception as e:\n self._lg.error(f\"{e}\")\n df = None\n pass\n import pandas as pd\n if not(isinstance(df, pd.DataFrame)):\n raise\n self._df = df\n", "id": "3406942", "language": "Python", "matching_score": 5.496730327606201, "max_stars_count": 0, "path": "gwpd/pdvw.py" }, { "content": "import gwcomm as comm\n\n\nclass pdtb():\n \"\"\"Pandas table\n\n Arguments:\n model {string} -- Model\n\n Keyword Arguments:\n path {string} -- Model config path (default: {None})\n owner {string} -- Record owner (default: {None})\n security {int} -- Security type (default: {0})\n 0 -- None\n 1 -- Public\n 2 -- Protected\n 3 -- Private\n 9 -- Auth\n\n Functions:\n save -- Save dataframe\n\n Method:\n secpath -- Security path\n \"\"\"\n secpath = {0: \"\", 1: \"public\", 2: \"protected\", 3: \"private\", 9: \"auth\"}\n _df = None\n\n def _join_path(self, *args):\n \"\"\"Join path\n\n Arguments:\n args {tuple} -- Path\n\n Returns:\n string -- concate path\n \"\"\"\n import os\n path = args[0]\n for i in range(len(args)-1):\n if args[i+1] != \"\":\n path = os.path.join(path, args[i+1])\n return path\n\n def _load_conf(self, file):\n \"\"\"Load config file\n\n Arguments:\n file {string} -- config file\n\n Returns:\n dict -- config content\n \"\"\"\n import json\n with open(file, 'r') as f:\n conf = json.load(f)\n return conf\n\n def _get_cols(self, tag=None):\n \"\"\"Get columns\n\n Keyword Arguments:\n tag {string} -- Column flag (default: {None})\n\n Returns:\n list -- column list\n \"\"\"\n cols = self._conf.get(\"cols\", []) if self._conf.get(\n \"sink\", {}) == {} else self._conf.get(\"sink\", {}).get(\"cols\", [])\n if tag == None:\n rtn = [c.get(\"name\") for c in cols]\n else:\n rtn = [c.get(\"name\")\n for c in cols if c.get(tag, False)]\n return rtn\n\n def save(self):\n \"\"\"Save dataframe\n\n Returns:\n bool -- True - success, False - fail\n pd.DataFrame -- Full dataframe\n \"\"\"\n import pandas as pd\n path = self._conf.get(\"path\")\n if isinstance(self._df, pd.DataFrame):\n df = self._df\n else:\n cols = self._get_cols()\n cols += [c.get(\"name\") for c in [m for m in self._meta]]\n df = pd.DataFrame(columns=cols)\n df.to_parquet(path=path, compression='gzip')\n self._lg.info(\"df saved\")\n return True, df\n\n def _load(self):\n \"\"\"Load dataframe\n\n Returns:\n pd.DataFrame -- Full dataframe\n \"\"\"\n import pandas as pd\n import os\n path = self._conf.get(\"path\")\n if os.path.isfile(path):\n df = pd.read_parquet(path=path)\n else:\n _, df = self.save()\n self._lg.info(f\"df loaded - {self._name}\")\n return df\n\n def __remove_col(self, data, tag=\"iskey\"):\n \"\"\"Remove columns\n\n Arguments:\n data {dict} -- data\n\n Keyword Arguments:\n tag {string} -- tag (default: {\"iskey\"})\n\n Returns:\n dict -- data\n \"\"\"\n cols = self._get_cols(tag)\n rtn = {k: v for k, v in data.items() if k not in cols}\n return rtn\n\n def __split_data_key(self, data):\n \"\"\"Split data into data and key for update\n\n Arguments:\n data {dict} -- data\n\n Returns:\n dict -- data\n dict -- key\n \"\"\"\n key = {k: v for k, v in data.items() if k in self._get_cols(tag=\"iskey\")}\n data = self.__remove_col(data, tag=\"iskey\")\n data = self.__remove_col(data, tag=\"ignupd\")\n return data, key\n\n def _cols(self):\n return self._conf.get(\"sink\", self._conf).get(\"cols\", [])\n\n def __add_meta(self, data, isupd=False):\n \"\"\"Add metadata to data\n\n Arguments:\n data {dict} -- data\n\n Keyword Arguments:\n isupd {bool} -- is update? (default: {False})\n\n Returns:\n dict -- data\n \"\"\"\n conf = [{k: v for k, v in m.items()}\n for m in self._meta\n if m.get(\"ignupd\", False) == False or isupd == False]\n if conf == []:\n return data\n for k, v in self.__default_col(data=data, conf=conf, gen=True).items():\n data[k] = v\n return data\n\n def __default_col(self, data, conf=None, gen=False):\n \"\"\"Set default value to columns\n\n Arguments:\n data {dict} -- data\n\n Keyword Arguments:\n conf {string} -- column config (default: {None})\n gen {bool} -- apply auto gen (default: {False})\n\n Returns:\n dict -- data\n \"\"\"\n def genrun(conf):\n pfx = conf.get(\"pfx\", \"\")\n ln = conf.get(\"len\", 1)\n try:\n tdt = self.get(applyacl=False)\n cur = len(tdt)+1\n except:\n cur = 1\n run = str(cur).zfill(ln)\n return f\"{pfx}{run}\"\n\n def genuuid():\n import uuid\n return uuid.uuid4()\n\n from datetime import datetime\n conf = self._conf.get(\"cols\", []) if conf == None else conf\n now = datetime.now()\n rtn = {}\n for c in conf:\n v = data.get(c.get(\"name\"), None)\n if v == None:\n if c.get(\"genuuid\", False):\n v = genuuid()\n elif c.get(\"genrun\", False):\n v = genrun(c.get(\"genrun_dtl\", {}))\n elif c.get(\"gendtm\", False):\n v = now\n elif c.get(\"isowner\", False):\n v = self._owner\n else:\n v = c.get(\"default\", 0 if c.get(\n \"type\", \"str\") == \"int\" else \"\")\n if c.get(\"type\", \"str\") == \"int\":\n v = int(v)\n elif c.get(\"type\", \"str\") == \"uuid\":\n v = str(v)\n elif c.get(\"type\", \"str\") == \"str\":\n v = str(v)\n rtn[c.get(\"name\")] = v\n return rtn\n\n def __valid(self, data, chktag=[\"required\"]):\n \"\"\"Validate columns value\n\n Arguments:\n data {dict} -- data\n\n Keyword Arguments:\n chktag {list} -- check tag (default: {[\"required\"]})\n \"\"\"\n def __chkcol(cols, tag):\n for c in self._get_cols(tag=tag):\n if c not in cols:\n self._lg.warning(f\"Missing {tag} - {c}\")\n return False\n return True\n cols = [k for k, v in data.items()]\n rtn = True\n for c in chktag:\n rtn = rtn and __chkcol(cols, c)\n return rtn\n\n def _concat_dict(self, fdt, ldt):\n \"\"\"Concat dict\n\n Arguments:\n fdt {dict} -- 1st dict\n ldt {dict} -- 2nd dict\n\n Returns:\n dict -- data\n \"\"\"\n for k, v in ldt.items():\n if k not in [k for k, v in fdt.items()]:\n fdt[k] = v\n return fdt\n\n def _get_df(self, df=None):\n \"\"\"get default df\n\n Keyword Arguments:\n df {pd.DataFrame} -- dataframe (default: {None})\n applyacl {bool} -- apply acl (default: {True})\n\n Returns:\n pd.DataFrame -- dataframe\n \"\"\"\n import pandas as pd\n if not(isinstance(df, pd.DataFrame)):\n df = self._df\n return df\n\n def _apply_acl(self, df=None):\n df = self._get_df(df)\n filter = {\"creby\": self._owner}\n return self.filter(df=df, filter=filter)\n\n def filter(self, df, filter):\n \"\"\"Filter dataframe\n\n Keyword Arguments:\n filter {dict} -- filter (default: {{}})\n df {pd.DataFrame} -- dataframe (default: {None})\n\n Returns:\n pd.DataFrame -- dataframe\n \"\"\"\n df = self._get_df(df=df)\n try:\n for k, v in filter.items():\n df = df[df[k] == v]\n except:\n pass\n return df\n\n def _isexists(self, data, df=None, applyacl=None):\n \"\"\"check data is exists\n\n Arguments:\n data {dict} -- data\n\n Keyword Arguments:\n df {pd.DataFrame} -- dataframe (default: {None})\n\n Returns:\n bool -- is exists\n \"\"\"\n applyacl = self._req_security(act=0, applyacl=applyacl)\n df = self._get_df(df=df)\n if applyacl:\n df = self._apply_acl(df=df)\n key = {c: data.get(c, None) for c in self._get_cols(tag=\"iskey\")}\n df = self.filter(filter=key, df=df)\n if df.empty:\n return False\n return True\n\n def _req_security(self, act, applyacl=None):\n \"\"\"Require security\n\n Arguments:\n act {int} -- 0 - read, 1 - update\n\n Returns:\n bool -- True - require, False - ignore\n \"\"\"\n if applyacl != None:\n return applyacl\n sec = self._security\n if sec == 2:\n if act == 1:\n return True\n elif sec == 3:\n return True\n return False\n\n def insert(self, data):\n \"\"\"Insert records\n\n Arguments:\n data {dict} -- data\n\n Returns:\n bool -- True - success, False - fail\n pd.DataFrame -- data\n \"\"\"\n df = self._get_df()\n data = self.__default_col(data=data, gen=True)\n data = self.__add_meta(data=data, isupd=False)\n rtn = self.__valid(data, chktag=[\"required\"])\n if not(rtn):\n return False, None\n if self._isexists(data=data, df=df, applyacl=False):\n self._lg.error(\"record exists\")\n return False, None\n self._lg.debug(f\"insert - {self._name} - {data}\")\n df.loc[-1] = data\n df.index = df.index + 1\n df = df.sort_index()\n self._df = df\n return True, df\n\n def update(self, data):\n \"\"\"Update\n\n Arguments:\n data {dict} -- data\n\n Returns:\n bool -- True - success, False - fail\n \"\"\"\n import pandas as pd\n df = self._get_df()\n data = self.__default_col(data=data, gen=False)\n data = self.__add_meta(data=data, isupd=True)\n rtn = self.__valid(data, chktag=[\"required\"])\n if not(rtn):\n return False, None\n if not(self._isexists(data=data, df=df, applyacl=False)):\n self._lg.error(\"record not exists\")\n return False, None\n if self._req_security(act=1, applyacl=None):\n df = self._apply_acl(df=df)\n if not(self._isexists(data=data, df=df)):\n self._lg.error(\"apply sec - record not exists\")\n return False, None\n data, key = self.__split_data_key(data)\n df_filt = self.filter(filter=key, df=df)\n idx = df_filt.index.tolist()\n self._lg.debug(data)\n df.update(pd.DataFrame(data, index=idx))\n return True, df\n\n def upsert(self, data):\n df = self._get_df()\n if self._isexists(data=data, df=df):\n rtn, df = self.update(data)\n else:\n rtn, df = self.insert(data)\n return rtn, df\n\n def get(self, df=None, filter={}, key=None, applyacl=None, allcols=False):\n \"\"\"Get data in list of dict\n\n Keyword Arguments:\n filter {dict} -- filter (default: {{}})\n\n Returns:\n list -- list of data in {dict}\n \"\"\"\n applyacl = self._req_security(act=0, applyacl=applyacl)\n df = self._get_df(df=df)\n if key != None and filter == {}:\n filter = {c: key for c in self._get_cols(\"iskey\")}\n if filter != {}:\n df = self.filter(df=df, filter=filter)\n if applyacl:\n df = self._apply_acl(df=df)\n if not(allcols):\n df = df[[c.get(\"name\") for c in self._cols()]]\n return df.to_dict(\"records\")\n\n def get_first(self, filter={}, key=None, applyacl=None, allcols=False):\n \"\"\"Get 1st data in dict\n\n Keyword Arguments:\n filter {dict} -- filter (default: {{}})\n\n Returns:\n dict -- data in {dict}\n \"\"\"\n df = self.get(filter=filter, key=key,\n applyacl=applyacl, allcols=allcols)\n if df == []:\n return {}\n return df[0]\n\n def __init__(self, model, path=None, owner=None, security=0):\n self._lg = comm.logger(f\"pdtb({model})\")\n path = \"./conf/data\" if path == None else path\n conffile = self._join_path(path, self.secpath[security], model+\".json\")\n self._conf = self._load_conf(file=conffile)\n self._name = self._conf.get(\"name\")\n self._lg.info(f\"init - {self._name}\")\n self._security = security\n self._owner = owner\n self.acl = self._conf.get(\"acl\", 0)\n self._meta = [{\"name\": \"creby\", \"type\": \"str\",\n \"isowner\": True, \"ignupd\": True},\n {\"name\": \"credtm\", \"type\": \"datetime\",\n \"gendtm\": True, \"ignupd\": True},\n {\"name\": \"updby\", \"type\": \"str\",\n \"isowner\": True},\n {\"name\": \"upddtm\", \"type\": \"datetime\",\n \"gendtm\": True}]\n if self._owner == None:\n self.acl = 0\n self._meta = []\n df = self._load()\n import pandas as pd\n if not(isinstance(df, pd.DataFrame)):\n raise\n self._df = df\n", "id": "8160908", "language": "Python", "matching_score": 2.2733840942382812, "max_stars_count": 0, "path": "gwpd/pdtb.py" }, { "content": "from abc import ABC, abstractmethod\n\nfrom jmapp import lib\n\n\nclass form_mod(ABC):\n data = []\n\n def __init__(self, id, action, method, cols, btns):\n self.id = f\"form-{id}\"\n self.action = action\n self.method = method\n self.cols = cols\n self.btns = btns\n\n def __str__(self):\n return str(self.data)\n\n\nclass table_mod(ABC):\n data = []\n\n def __init__(self, id, cols):\n self.id = f\"tbl-{id}\"\n self.cols = cols\n\n def __str__(self):\n return str(self.data)\n\n\nclass model_template(ABC):\n\n def __init__(self, data: list = None):\n self.data = []\n self.form_cols_attr = [\"type\", \"name\", \"label\",\n \"val\", \"placeholder\", \"readonly\", \"opt\", \"maxval\", \"onchange\"]\n self.table_cols_attr = [\"type\", \"name\",\n \"label\", \"val\", \"url\", \"act\", \"isown\", \"disable\", \"width\", \"maxval\", \"visable\"]\n\n self.set_id()\n self.set_action()\n self.set_method()\n self.set_owner_col()\n self.set_type()\n self.set_cols()\n self.set_btns()\n self.check_owner = [c.get(\"name\")\n for c in self.cols if c.get(\"check_owner\", False)]\n self.not_owner = [c.get(\"name\")\n for c in self.cols if c.get(\"not_owner\", False)]\n if isinstance(data, type(None)):\n data = []\n self.add(data)\n self.table_temp = table_mod(id=self.id, cols=self.cols)\n self.init_form()\n self.init_table()\n\n def init_form(self):\n self.form_temp = None\n self.form_temp = form_mod(\n id=self.id, action=self.action, method=self.method, cols=self.cols, btns=self.btns)\n\n def init_table(self):\n self.table_temp = None\n self.table_temp = table_mod(id=self.id, cols=self.cols)\n\n def set_id(self):\n self.id = self.__class__.__name__\n pass\n\n def set_type(self):\n self.type = self.__class__.__name__\n pass\n\n def set_action(self):\n self.action = \"#\"\n pass\n\n def set_owner_col(self):\n self.owner_col = \"upd_by\"\n pass\n\n def set_method(self):\n self.method = \"POST\"\n pass\n\n def set_cols(self):\n self.cols = []\n pass\n\n def get_opts(self):\n import copy\n rtn = []\n cols = copy.deepcopy(self.cols)\n for c in cols:\n if c.get(\"type\") in [\"moption\"]:\n opt = c.get(\"opt\", [])\n for d in self.data:\n for k, v in d.items():\n if k == c.get(\"name\"):\n topts = v.split(\";\")\n opt += topts\n opt = list(set(opt))\n rtn.append({\"name\": c.get(\"name\"), \"opts\": opt})\n return rtn\n\n def set_btns(self):\n self.btns = [{\"type\": \"submit\", \"name\": \"submit\", \"label\": \"Submit\", \"readonly\": False},\n {\"type\": \"reset\", \"name\": \"reset\",\n \"label\": \"Reset\", \"readonly\": False},\n {\"type\": \"button\", \"name\": \"back\", \"label\": \"Back\", \"act\": \"window.history.back();\", \"always\": True}]\n pass\n\n def __str__(self):\n return str(self.data)\n\n def table(self, filt: dict = {}, owner: str = None, join: list = [], title: str = None):\n def nest_repl(data, val):\n import re\n for k, v in data.items():\n if isinstance(val, int):\n val = str(val)\n if isinstance(v, int):\n v = str(v)\n # print(k, v, val, type(v), type(val))\n if isinstance(v, str) and isinstance(val, str):\n val = val.replace(\"{{%s}}\" % (k), v)\n if isinstance(val, str):\n val = re.sub(\"{{(.*)}}\", \"\", val)\n return val\n\n def join_dt(cols, data, join):\n if join == ():\n return (cols, data)\n key = [c.get(\"name\") for c in cols if c.get(\"key\", False)]\n tmp = []\n for d in data:\n for k in key:\n for j in join[1]:\n jcols = [{\"type\": \"hidden\", \"name\": k}\n for k, v in j.items()]\n if d.get(k) == j.get(k):\n d.update(j)\n break\n tmp.append(d)\n return (join[0], tmp)\n\n import copy\n self.init_table()\n data, _ = self.find(filt=filt)\n dcols = self.cols\n\n jcols = []\n for j in join:\n tcols, data = join_dt(dcols, data, j)\n jcols += tcols\n cols = dcols + jcols\n ndata = []\n jcolval = {j.get(\"name\"): j.get(\"val\") for j in jcols if j.get(\n \"val\", None) not in (\"\", None)}\n cnt = 0\n for d in data:\n ntbl_dt = []\n for cseq in cols:\n if cseq.get(\"tbl_hide\", False):\n continue\n ndt = copy.deepcopy(cseq)\n v = d.get(cseq.get(\"name\"), cseq.get(\"val\", None))\n if v != None:\n ndt.update({\"val\": v})\n # print(cseq.get(\"name\"), v)\n if cseq.get(\"name\") in self.check_owner:\n isown = d.get(self.owner_col, None) == owner\n ndt.update({\"isown\": isown})\n if cseq.get(\"name\") in self.not_owner:\n notown = d.get(self.owner_col, None) != owner\n ndt.update({\"notown\": notown})\n if cseq.get(\"visable\", None) != None:\n nd = {\"owner\": owner}\n nd.update(d)\n nd.update(jcolval)\n nv = nest_repl(nd, cseq.get(\"visable\"))\n # print(cseq.get(\"name\"), nv)\n nv = eval(nv)\n ndt.update({\"visable\": nv})\n if cseq.get(\"disable\", None) != None:\n nd = {\"owner\": owner}\n nd.update(d)\n nv = nest_repl(nd, cseq.get(\"disable\"))\n nv = eval(nv)\n ndt.update({\"disable\": nv})\n if cseq.get(\"name\") in [d.get(\"name\") for d in dcols]:\n ndt = {nk: nest_repl(d, nv) for nk, nv in ndt.items()}\n if ndt.get(\"name\") not in [d.get(\"name\") for d in ntbl_dt]:\n ntbl_dt.append(ndt)\n ndata.append(ntbl_dt)\n cnt += 1\n self.table_temp.data = ndata\n cols = []\n for c in dcols:\n if c.get(\"tbl_hide\", False):\n continue\n cols.append({k: v for k, v in c.items()\n if k in self.table_cols_attr})\n self.table_temp.cols = cols\n if title != None:\n self.table_temp.title = title\n return self.table_temp\n\n def form(self, filt: dict = {}):\n self.init_form()\n dt, _ = self.find(filt=filt)\n data = {}\n for d in dt:\n data = d\n break\n if filt == {}:\n data = {}\n if data == {} and filt != {}:\n data.update(filt)\n cols = []\n tcol = self.cols.copy()\n for c in tcol:\n c.update({\"val\": \"\"})\n if data != {}:\n c.update({\"val\": v for k, v in data.items()\n if k == c.get(\"name\")})\n if c.get(\"uuid\", False):\n c.update({\"readonly\": True})\n elif c.get(\"genrun\", None) != None:\n c.update({\"readonly\": True})\n opt = c.get(\"opt\", [])\n if opt != []:\n c.update({\"opt\": list(set(opt))})\n cols.append({k: v for k, v in c.items()\n if k in self.form_cols_attr})\n self.form_temp.cols = cols\n return self.form_temp\n\n def add(self, record):\n if not(isinstance(record, list)):\n record = [record]\n for r in record:\n rtn = lib.post_data(type=self.type, data=r)\n if not(rtn):\n return []\n return record\n\n def refresh(self, username: str = None, password: str = None):\n if username != None and password != None:\n self.data = lib.get_data(\n type=self.type, username=username, password=password)\n else:\n self.data = lib.get_data(type=self.type)\n\n def set_md5(self, val):\n import hashlib\n return hashlib.md5(val.encode()).hexdigest()\n\n def find(self, filt, user: dict = {}):\n import copy\n if dict == {}:\n self.refresh()\n else:\n self.refresh(username=user.get(\"username\"),\n password=user.get(\"password\"))\n if not(isinstance(filt, list)):\n filt = [filt]\n rtn_data = []\n for f in filt:\n data = copy.deepcopy(self.data)\n for k, v in f.items():\n col = [c for c in self.cols if c.get(\"name\") == k]\n if col != []:\n if col[0].get(\"md5\", False):\n v = self.set_md5(v)\n data = [d for d in data if d.get(k, None) == v]\n rtn_data += [d for d in data if d not in rtn_data]\n idx = [self.data.index(d) for d in rtn_data]\n return data, idx\n\n def rm(self, record):\n if not(isinstance(record, list)):\n record = [record]\n for r in record:\n _, idx = self.find(r)\n data = [d for i, d in enumerate(self.data) if i not in idx]\n self.data = data\n return self.data\n\n def cnt(self):\n return len(self.data)\n\n def autogen(self, data):\n if not(isinstance(data, list)):\n data = [data]\n rtn = []\n for d in data:\n for c in self.cols:\n if d.get(c.get(\"name\"), None) == None or d.get(c.get(\"name\"), \"\") == \"\":\n if c.get(\"uuid\", False):\n d[c.get(\"name\")] = self.uuid()\n elif c.get(\"genrun\", None) != None:\n d[c.get(\"name\")] = self.genrun(mask=c.get(\"genrun\"))\n rtn.append(d)\n return rtn\n\n def uuid(self):\n from uuid import uuid4\n return str(uuid4())\n\n def genrun(self, mask):\n try:\n cnt = self.cnt()+1\n nmask = mask.split(\"!!\")\n genpat = nmask[1].split(\"-\")\n if genpat[0] == \"cnt\":\n runnum = str(cnt).zfill(int(genpat[1]))\n rtn = \"{}{}{}\".format(nmask[0], runnum, nmask[2])\n else:\n rtn = mask\n except:\n rtn = mask\n return rtn\n", "id": "9108189", "language": "Python", "matching_score": 2.728602170944214, "max_stars_count": 0, "path": "model/_template.py" }, { "content": "from ._template import *\n\n# tbl = {\"cols\": [{\"name\": \"txt\", \"control\": \"input\", \"sort\": \"true\", \"label\": \"text\"}],\n# \"data\": [[{\"type\": \"link\", \"url\": \"./\", \"val\": \"r1\"}],\n# [{\"type\": \"button\", \"act\": \"location.href='{}';\".format(\n# \"./\"), \"val\": \"r3\"}],\n# [{\"type\": \"checkbox\", \"val\": True}],\n# [{\"val\": \"r2\"}]],\n# \"id\": \"tbl_a\"}\n\n\nclass job_mod(model_template):\n\n def set_type(self):\n self.type = \"jobs\"\n\n def set_cols(self):\n self.cols = [{\"type\": \"hidden\", \"name\": \"jid\", \"label\": \"ID\", \"val\": \"\", \"uuid\": True, \"width\": \"0%\"},\n {\"type\": \"link\", \"name\": \"job_cde\", \"label\": \"Code\", \"val\": \"\", \"width\": \"5%\",\n \"url\": \"/job/view?jid={{jid}}\", \"genrun\": \"JD!!cnt-10!!\", \"key\": True},\n {\"type\": \"text\", \"name\": \"company\", \"width\": \"5%\",\n \"label\": \"Company\", \"val\": \"\", \"tbl_hide\": True},\n {\"type\": \"text\", \"name\": \"title\", \"width\": \"50\",\n \"label\": \"Title\", \"val\": \"\"},\n {\"type\": \"textarea\", \"name\": \"scope\", \"width\": \"5%\",\n \"label\": \"Scope\", \"val\": \"\", \"tbl_hide\": True},\n {\"type\": \"textarea\", \"name\": \"requirement\", \"width\": \"5%\",\n \"label\": \"Requirement\", \"val\": \"\", \"tbl_hide\": True, \"onchange\": \"get_job_cat(this, 'form-{}-{}');\".format(self.id, \"job_cat\")},\n {\"type\": \"text\", \"name\": \"experience\", \"width\": \"5%\",\n \"label\": \"Experience (Year)\", \"val\": \"\"},\n {\"type\": \"text\", \"name\": \"amount\", \"width\": \"5%\",\n \"label\": \"Amount\", \"val\": \"\"},\n {\"type\": \"text\", \"name\": \"period\", \"width\": \"5%\",\n \"label\": \"Period\", \"val\": \"\"},\n {\"type\": \"moption\", \"name\": \"job_cat\", \"label\": \"Cat\", \"width\": \"5%\",\n \"val\": \"\", \"opt\": [\"SW\", \"HW\"]},\n {\"type\": \"button\", \"name\": \"edit\", \"label\": \"Edit\", \"val\": \"\", \"disable\": \"'{{appcnt}}'!='0'\",\n \"visable\": \"'{{cre_by}}'=='{{owner}}'\", \"width\": \"5%\", \"url\": \"/job/view?edit=1&jid={{jid}}\"},\n {\"type\": \"button\", \"name\": \"apply\", \"label\": \"Apply\", \"val\": \"\", \"disable\": \"'{{aid}}'!=''\",\n \"visable\": \"'{{cre_by}}'!='{{owner}}' and '{{owner}}'!=''\", \"width\": \"5%\", \"url\": \"/job/apply?job_cde={{job_cde}}\"},\n {\"type\": \"hidden\", \"name\": \"cre_by\",\n \"label\": \"cre_by\", \"val\": \"\", \"width\": \"0%\"}]\n return\n", "id": "11544465", "language": "Python", "matching_score": 3.7717437744140625, "max_stars_count": 0, "path": "model/job.py" }, { "content": "from ._template import *\n\n\nclass apply_mod(model_template):\n\n def set_type(self):\n self.type = \"applies\"\n\n def set_cols(self):\n self.cols = [{\"type\": \"hidden\", \"name\": \"aid\", \"label\": \"ID\", \"val\": \"\", \"uuid\": True, \"key\": True},\n {\"type\": \"hidden\", \"name\": \"job_cde\",\n \"label\": \"Job\", \"val\": \"\"},\n {\"type\": \"link\", \"name\": \"cre_by\",\n \"label\": \"Applier\", \"val\": \"\", \"url\": \"/profile?usr_cde={{cre_by}}\"}]\n for i in range(5):\n self.cols.append({\"type\": \"button\", \"name\": f\"rate{i+1}\", \"label\": f\"{i+1}\", \"val\": \"\", \"disable\": \"'{{rate}}'!=''\",\n \"visable\": \"'{{cre_by}}'!='{{owner}}' and '{{oid}}'!=''\", \"width\": \"5\", \"url\": \"./offer?job_cde={{job_cde}}&aid={{aid}}&rate=%s\" % (f\"{i+1}\")})\n self.cols.append({\"type\": \"button\", \"name\": \"offer\", \"label\": \"Offer\", \"val\": \"\", \"disable\": \"'{{oid}}'!=''\",\n \"visable\": \"'{{cre_by}}'!='{{owner}}' and '{{offcnt}}'!='1'\", \"width\": \"80\", \"url\": \"./offer?job_cde={{job_cde}}&aid={{aid}}\"})\n return\n", "id": "8560203", "language": "Python", "matching_score": 4.399857521057129, "max_stars_count": 0, "path": "model/apply.py" }, { "content": "from ._template import *\n\n\nclass offer_mod(model_template):\n\n def set_type(self):\n self.type = \"offers\"\n\n def set_cols(self):\n self.cols = [{\"type\": \"hidden\", \"name\": \"oid\", \"label\": \"ID\", \"val\": \"\", \"uuid\": True},\n {\"type\": \"hidden\", \"name\": \"job_cde\",\n \"label\": \"Job\", \"val\": \"\"},\n {\"type\": \"hidden\", \"name\": \"aid\",\n \"label\": \"Apply\", \"val\": \"\"},\n {\"type\": \"star\", \"name\": \"rate\",\n \"label\": \"Rate\", \"val\": \"\", \"maxval\": \"5\"},\n {\"type\": \"hidden\", \"name\": \"cre_by\",\n \"label\": \"\", \"val\": \"\", \"url\": \"/profile?usr_cde={{cre_by}}\"}\n # ,\n # {\"type\": \"button\", \"name\": \"offer\", \"label\": \"\", \"val\": \"\", \"disable\": \"{{oid}}\",\n # \"not_owner\": True, \"width\": \"80\", \"url\": \"./offer?job_cde={{job_cde}}\"}\n ]\n return\n", "id": "911540", "language": "Python", "matching_score": 2.737135171890259, "max_stars_count": 0, "path": "model/offer.py" }, { "content": "import os\nfrom flask import Blueprint\nfrom jmapp.lib.auth import jwt_required\n\nfrom model import apply_mod, offer_mod, profile_mod, apply_pro_mod\n\nfname = os.path.basename(__file__).split(\".\")[0]\nprofile = Blueprint(fname, __name__)\n\nprofile_m = profile_mod()\napply_m = apply_mod()\noffer_m = offer_mod()\napply_pro_m = apply_pro_mod()\n\n\n@profile.route(\"/\", methods=[\"GET\"])\n@jwt_required\ndef profile_get():\n from flask import render_template, session, request\n from flask import render_template, request\n paralst = (\"msg\", \"usr_cde\")\n para = {p: request.args.get(p, None) for p in paralst}\n filt = {\"usr_cde\": para.get(\"usr_cde\", None)} if para.get(\n \"usr_cde\", None) != None else {}\n owner = session[\"usr_cde\"]\n usr_cde = para.get(\"usr_cde\", None)\n form = profile_m.form(filt={\"usr_cde\": usr_cde})\n form.readonly = owner != usr_cde\n app_tbl = apply_m.table(filt={\"cre_by\": usr_cde}, owner=owner)\n dt = []\n rate = []\n rate_temp = []\n tmp_off_dt=[]\n for r in app_tbl.data:\n for a in r:\n if a.get(\"name\") == \"aid\":\n aid = a.get(\"val\")\n off_form = offer_m.form(filt={\"aid\": aid})\n rate_temp = [\n c for c in off_form.cols if c.get(\"name\") == \"rate\"]\n tmp_off_r={c.get(\"name\"): c.get(\"val\") for c in off_form.cols if c.get(\"name\") in (\"job_cde\", \"rate\")}\n if tmp_off_r != {}:\n tmp_off_dt.append(tmp_off_r)\n for t in [c.get(\"val\") for c in off_form.cols if c.get(\"name\") == \"rate\" if c.get(\"val\", None) not in (\"\", None)]:\n rate.append(t)\n break\n if rate != []:\n trate = 0\n for r in rate:\n trate += int(r)\n for t in rate_temp:\n t.update({\"val\": trate/len(rate)})\n form.cols.append(t)\n break\n obj = [{\"type\": \"form\", \"obj\": form}]\n\n cols = [\"job_cde\", \"rate\"]\n off_col = [c for c in offer_m.cols if c.get(\"name\") in cols]\n off_dt = [{k: v for k, v in d.items() if k in cols} for d in tmp_off_dt]\n\n join=[(off_col, off_dt)]\n app_p_tbl=apply_pro_m.table(filt={\"cre_by\": usr_cde}, owner=owner, join=join)\n obj.append({\"type\": \"tbl\", \"obj\": app_p_tbl})\n\n return render_template(\"profile.html.j2\", obj=obj, msg=para.get(\"msg\", None))\n\n\n@profile.route(\"/\", methods=[\"POST\"])\ndef profile_post():\n from flask import redirect, request, session\n owner = session[\"usr_cde\"]\n form = profile_m.form(filt={\"usr_cde\": owner})\n cols = [c.get(\"name\") for c in form.cols]\n para = {k: v for k, v in request.form.items() if k in cols}\n rtn = profile_m.add(para)\n if rtn != []:\n return redirect(\"/\")\n return redirect(\"?msg={}\".format(\"Invalid input\"))\n", "id": "3925384", "language": "Python", "matching_score": 4.897747039794922, "max_stars_count": 0, "path": "jmapp/routes/profile.py" }, { "content": "import os\nfrom flask import Blueprint\nfrom jmapp.lib.auth import jwt_required\n\nfrom model import job_mod, apply_mod, offer_mod\n\nfname = os.path.basename(__file__).split(\".\")[0]\njob = Blueprint(fname, __name__)\n\njob_m = job_mod()\napply_m = apply_mod()\noffer_m = offer_mod()\n\n\n@job.route(\"/\", methods=[\"GET\"])\n@jwt_required\ndef job_get():\n from flask import render_template, session\n owner = session.get(\"usr_cde\", None)\n app_tbl = apply_m.table(owner=owner)\n app_dt, _ = apply_m.find({\"cre_by\": owner})\n cols = [\"aid\", \"job_cde\"]\n app_col = [c for c in apply_m.cols if c.get(\"name\") in cols]\n app_dt = [{k: v for k, v in d.items() if k in cols} for d in app_dt]\n app_col.append({\"type\": \"text\", \"name\": \"appcnt\", \"val\": \"\"})\n join = [(app_col, app_dt)]\n\n tbl = job_m.table(owner=owner, join=join, title=\"Job list\")\n opts = job_m.get_opts()\n return render_template(\"job.html.j2\", obj=[{\"type\": \"tbl\", \"obj\": tbl, \"opts\": opts}], newbtn=True)\n\n\n@job.route(\"/view\", methods=[\"GET\"])\ndef job_view_get():\n from flask import render_template, request, session\n owner = session.get(\"usr_cde\", None)\n paralst = (\"msg\", \"jid\", \"edit\", \"job_cde\")\n para = {p: request.args.get(p, None) for p in paralst}\n if para.get(\"jid\", None)==None and para.get(\"job_cde\", None)!=None:\n filt = {\"job_cde\": para.get(\"job_cde\", None)}\n print(filt)\n form = job_m.form(filt=filt)\n print(form)\n else:\n filt = {\"jid\": para.get(\"jid\", None)} if para.get(\n \"jid\", None) != None else {}\n form = job_m.form(filt=filt)\n readonly = True if para.get(\"edit\", \"0\") != \"1\" else False\n form.readonly = readonly\n obj = [{\"type\": \"form\", \"obj\": form}]\n jcde = None\n for j in [c.get(\"val\") for c in form.cols if c.get(\"name\") == \"job_cde\"]:\n jcde = j\n break\n creby = None\n for j in [c.get(\"val\") for c in form.cols if c.get(\"name\") == \"cre_by\"]:\n creby = j\n break\n if jcde != None:\n off_dt, _ = offer_m.find({\"job_cde\": jcde})\n if creby == owner:\n # if para.get(\"edit\", \"0\") == \"1\":\n cols = [\"oid\", \"aid\", \"rate\"]\n off_col = [c for c in offer_m.cols if c.get(\"name\") in cols]\n # print(off_dt)\n tdt = []\n for d in off_dt:\n dt = {k: v for k, v in d.items() if k in cols}\n dt.update({\"offcnt\": str(len(off_dt))})\n tdt.append(dt)\n off_dt = tdt\n print(off_dt)\n # off_dt = [{k: v for k, v in d.items() if k in cols}\n # for d in off_dt]\n off_col.append(\n {\"type\": \"text\", \"name\": \"offcnt\", \"val\": str(len(off_dt))})\n join = [(off_col, off_dt)]\n app_tbl = apply_m.table(\n filt={\"job_cde\": jcde}, join=join, owner=owner)\n if app_tbl.data != []:\n obj.append({\"type\": \"tbl\", \"obj\": app_tbl})\n else:\n off_form = offer_m.form(filt={\"job_cde\": jcde})\n off_form.readonly = True\n if off_form.cols != []:\n aid = [c.get(\"val\")\n for c in off_form.cols if c.get(\"name\") == \"aid\"]\n app_tbl = apply_m.table(filt={\"aid\": aid[0], \"cre_by\": owner})\n if app_tbl.data != []:\n obj.append({\"type\": \"form\", \"obj\": off_form})\n\n return render_template(\"job.html.j2\", obj=obj, msg=para.get(\"msg\", None))\n\n\n@job.route(\"/view\", methods=[\"POST\"])\ndef job_view_post():\n from flask import redirect, request, session\n owner = session[\"usr_cde\"]\n args = request.args.get(\"jid\", None)\n urlpara = \"&jid={}\".format(args) if args != None else \"\"\n filt = {\"jid\": args} if args != None else {}\n\n form = job_m.form()\n para = {k: v for k, v in request.form.items(\n ) if k in [c.get(\"name\") for c in form.cols]}\n if filt != {}:\n para.update(filt)\n rtn = job_m.add(para)\n if rtn != []:\n return redirect(\"/job\")\n\n return redirect(\"?msg={}{}\".format(\"Invalid input\", urlpara))\n\n\n@job.route(\"/apply\", methods=[\"GET\"])\ndef job_apply_get():\n from flask import redirect, request, session\n args = request.args.get(\"job_cde\", None)\n urlpara = \"&job_cde={}\".format(args) if args != None else \"\"\n filt = {\"job_cde\": args} if args != None else {}\n\n form = apply_m.form()\n para = {k: v for k, v in request.form.items(\n ) if k in [c.get(\"name\") for c in form.cols]}\n if filt != {}:\n para.update(filt)\n rtn = apply_m.add(para)\n if rtn != []:\n return redirect(\"/job\")\n return redirect(\"/job?msg={}\".format(\"Apply fail\"))\n\n\n\ndef byte2dict(data)->dict:\n import ast\n byte_str = data\n dict_str = byte_str.decode(\"UTF-8\")\n mydata = ast.literal_eval(dict_str)\n return mydata\n\n@job.route(\"/jobcat\", methods=[\"POST\"])\ndef job_cat_post():\n from flask import request\n import jmapp.lib as lib\n paralst = [\"job_desc\"]\n rtn=byte2dict(request.data)\n para = {p: rtn.get(p, None) for p in paralst}\n rtn, data = lib.post_data_with_rtn(type=\"jobcat\", data=para)\n if rtn:\n return data\n return {}\n\n\n@job.route(\"/offer\", methods=[\"GET\"])\ndef job_offer_get():\n from flask import redirect, request, session\n paralst = (\"job_cde\", \"aid\", \"rate\")\n para = {p: request.args.get(p, None) for p in paralst}\n\n # filt = {\"job_cde\": para.get(\"job_cde\"), \"aid\": para.get(\"aid\")}\n\n # form = apply_m.form()\n # para = {k: v for k, v in request.form.items(\n # ) if k in [c.get(\"name\") for c in form.cols]}\n # if filt != {}:\n # para.update(filt)\n rtn = offer_m.add(para)\n return redirect(\"/job\")\n", "id": "438776", "language": "Python", "matching_score": 4.457500457763672, "max_stars_count": 0, "path": "jmapp/routes/job.py" }, { "content": "import os\nfrom flask import Blueprint\nfrom jmapp.lib.auth import jwt_required\n\nfrom model import job_mod, apply_mod\n\nfname = os.path.basename(__file__).split(\".\")[0]\nhome = Blueprint(fname, __name__)\n\n# job_m = job_mod()\n# apply_m = apply_mod()\n\n\n@home.route(\"/\", methods=[\"GET\"])\ndef home_get():\n from flask import render_template\n return render_template(\"home.html.j2\")\n# def home_get():\n# from flask import render_template, session\n# owner = session.get(\"usr_cde\", None)\n# obj=[]\n# if owner!=None:\n# app_tbl = apply_m.table(filt={\"cre_by\": owner}, owner=owner, title=\"Apply list\")\n# app_dt, _ = apply_m.find({\"cre_by\": owner})\n# cols = [\"aid\", \"job_cde\"]\n# app_col = [c for c in apply_m.cols if c.get(\"name\") in cols]\n# app_dt = [{k: v for k, v in d.items() if k in cols} for d in app_dt]\n# join = [(app_col, app_dt)]\n\n# tbl = job_m.table(owner=owner, join=join, title=\"Job list\")\n# opts = job_m.get_opts()\n# obj.append({\"type\": \"tbl\", \"obj\": tbl, \"opts\": opts})\n# obj.append({\"type\": \"tbl\", \"obj\": app_tbl})\n# else:\n# tbl = job_m.table(title=\"Job list\")\n# opts = job_m.get_opts()\n# obj.append({\"type\": \"tbl\", \"obj\": tbl, \"opts\": opts})\n\n# return render_template(\"index.html.j2\", obj=obj)\n# # return render_template(\"index.html.j2\")\n", "id": "8965345", "language": "Python", "matching_score": 1.3667749166488647, "max_stars_count": 0, "path": "jmapp/routes/home.py" }, { "content": "from abc import ABC, abstractmethod\nfrom .pdtbl import *\nimport pandas as pd\n\n\nclass pdvw(ABC):\n def __init__(self, owner: str = \"system\", debug: bool = False):\n self.owner = owner\n self.debug = debug\n obj = self.init_obj()\n self._tbl = self.__set_tbl(obj)\n self._cols = obj.get(\"cols\", [])\n self.load()\n pass\n\n def __set_tbl(self, obj):\n owner = self.owner\n debug = self.debug\n rtn = []\n for t in obj.get(\"source\", []):\n tbl = t.get(\"tbl\")\n tbl.set_owner(owner)\n tbl.set_debug(debug)\n name = t.get(\"name\", type(tbl).__name__)\n join = t.get(\"join\", [])\n rtn.append({\"name\": name, \"tbl\": tbl, \"join\": join})\n return rtn\n\n @abstractmethod\n def init_obj(self) -> dict:\n pass\n\n def __conv_args(self, *args, **kwargs) -> list:\n rtn = []\n for a in args:\n rtn = [a] if isinstance(a, dict) else a\n break\n if rtn == []:\n rtn = [kwargs]\n if kwargs != {}:\n nrtn = []\n for r in rtn:\n r.update(kwargs)\n nrtn.append(r)\n rtn = nrtn\n return rtn\n\n def filter(self, *args, **kwargs):\n data = self.__conv_args(*args, **kwargs)\n if data == []:\n df = self._df()\n else:\n df = self._df(filt=data[0])\n return df, [0]\n\n def upsert(self, *args, **kwargs):\n import copy\n data = self.__conv_args(*args, **kwargs)\n tbl_bk = copy.deepcopy(self._tbl)\n for d in data:\n try:\n rtn = self.update(d)\n except:\n rtn = False\n pass\n if not(rtn):\n rtn = self.insert(d)\n if not(rtn):\n if self.debug:\n print(f\"upsert fail - {d}\")\n self._tbl = tbl_bk\n return False\n return True\n\n def update(self, *args, **kwargs):\n import copy\n data = self.__conv_args(*args, **kwargs)\n tbl_bk = copy.deepcopy(self._tbl)\n for d in data:\n tbl_dt = {}\n for t in self._tbl:\n jcols = t.get(\"join\", [])\n ndt = copy.deepcopy(d)\n tbl = t.get(\"tbl\")\n tname = t.get(\"name\")\n for j in jcols:\n val = tbl_dt.get(j, None)\n if val != None:\n ndt.update({j: val})\n rtn = tbl.update(ndt)\n filt_col = {c: d.get(c) for c in tbl.cols(attr=\"key\")}\n filt, _ = tbl.filter(filt_col)\n for f in tbl.to_dict(filt):\n tbl_dt = f\n break\n if not(rtn):\n if self.debug:\n print(f\"upd {tname} fail - {d}\")\n self._tbl = tbl_bk\n return False\n return True\n\n def insert(self, *args, **kwargs):\n import copy\n data = self.__conv_args(*args, **kwargs)\n tbl_bk = copy.deepcopy(self._tbl)\n for d in data:\n for t in self._tbl:\n tbl = t.get(\"tbl\")\n tname = t.get(\"name\")\n rtn = tbl.insert(d)\n if not(rtn):\n if self.debug:\n print(f\"ins {tname} fail - {d}\")\n self._tbl = tbl_bk\n return False\n return True\n\n def _df(self, filt: dict = {}) -> pd.DataFrame:\n fcols = [k for k, v in filt.items()]\n df = pd.DataFrame()\n for t in self._tbl:\n cols = [c.get(\"col\")\n for c in self._cols if c.get(\"tbl\") == t.get(\"name\")]\n jcols = t.get(\"join\", [])\n cols += jcols\n nfilt = [c for c in cols if c in fcols]\n if filt != {}:\n tfilt = {k: v for k, v in filt.items() if k in nfilt}\n ndf, _ = t.get(\"tbl\").filter(tfilt)\n ndf = ndf[cols]\n else:\n ndf = t.get(\"tbl\")._df[cols]\n if df.empty:\n df = ndf\n else:\n df = pd.merge(df, ndf, on=jcols, how=\"left\")\n return df\n\n def __repr__(self) -> dict:\n return self._df().to_dict('records')\n\n def __str__(self) -> str:\n return self._df().to_string()\n\n def save(self) -> bool:\n for s in self._tbl:\n tbl = s.get(\"tbl\")\n tname = s.get(\"name\")\n if not(tbl.save()):\n if self.debug:\n print(f\"save {tname} fail\")\n return False\n return True\n\n def load(self) -> bool:\n for s in self._tbl:\n tbl = s.get(\"tbl\")\n tname = s.get(\"name\")\n if not(tbl.load()):\n if self.debug:\n print(f\"load {tname} fail\")\n return False\n return True\n", "id": "3743249", "language": "Python", "matching_score": 3.1023612022399902, "max_stars_count": 0, "path": "pddb/lib/pddb/pdvw.py" }, { "content": "from abc import ABC, abstractmethod\nimport pandas as pd\nfrom .columns import Column\nfrom enum import Enum\n\n\nclass ACL(Enum):\n PUBLIC = 0\n SHARED = 1\n PRIVATE = 2\n\n\nclass pdtbl(ABC):\n def __init__(self, owner: str = \"system\", debug: bool = False):\n self.set_debug(debug)\n (obj, path, acl) = self.init_obj()\n obj = self.__set_sys_obj(obj)\n self._obj = self.__obj2col(obj)\n self._model = self.__obj2mod(self._obj)\n self._df = self.init_df(cols=[o.name for o in self._obj])\n self.__upddtm()\n self.set_owner(owner)\n self._path = path\n self._acl = acl\n self.load()\n pass\n\n def __obj2mod(self, obj: dict):\n from pydantic import create_model\n from typing import Optional\n fields = {}\n for o in obj:\n name = o.get(\"name\")\n type = o.get(\"type\", str)\n if o.get(\"optional\", False):\n type = Optional[type]\n default = None\n if self.debug:\n print(name, \"optional\", type)\n else:\n default = ...\n fields[name] = (type, default)\n model = create_model(self.__class__.__name__, **fields)\n return model\n # pass\n\n def set_owner(self, owner: str):\n self.owner = owner\n\n def set_debug(self, debug: bool):\n self.debug = debug\n\n def __set_sys_obj(self, obj: dict) -> dict:\n from datetime import datetime\n obj.update({\"cre_dtm\": {\"type\": datetime, \"curdtm\": True, \"ignupd\": True},\n \"cre_by\": {\"type\": str, \"owner\": True, \"ignupd\": True},\n \"upd_dtm\": {\"type\": datetime, \"curdtm\": True},\n \"upd_by\": {\"type\": str, \"owner\": True}})\n return obj\n\n def cols(self, attr: str = None) -> list:\n if attr == None:\n return [o.name for o in self._obj]\n return [o.name for o in self._obj if o.get(attr, False)]\n\n def __upddtm(self) -> bool:\n from datetime import datetime\n self.datetime = datetime.now()\n return True\n\n def __obj2col(self, obj: dict) -> list:\n cnt = 0\n rtn = []\n for k, v in obj.items():\n if isinstance(v, type):\n v = {\"type\": v}\n v.update({\"name\": k, \"iloc\": cnt})\n rtn.append(self.__set_col(v))\n cnt += 1\n return rtn\n\n def __set_col(self, *args, **kwargs) -> Column:\n return Column(*args, **kwargs)\n\n @abstractmethod\n def init_obj(self) -> dict:\n pass\n\n def init_df(self, cols: list = []) -> pd.DataFrame:\n cols = self.cols() if cols == [] else cols\n return pd.DataFrame(columns=cols)\n\n def __conv_args(self, *args, **kwargs) -> dict:\n rtn = []\n for a in args:\n rtn = [a] if isinstance(a, dict) else a\n break\n if rtn == []:\n rtn = [kwargs]\n if kwargs != {}:\n nrtn = []\n for r in rtn:\n r.update(kwargs)\n nrtn.append(r)\n rtn = nrtn\n return rtn\n\n def check_require(self, data: dict) -> bool:\n for r in self.cols(attr=\"require\"):\n if data.get(r, None) == None:\n return False\n return True\n\n def __set_uuid(self, data: dict) -> dict:\n import uuid\n for g in self.cols(attr=\"uuid\"):\n if data.get(g, None) == None or data.get(g, None) == \"\":\n data.update({g: str(uuid.uuid4())})\n return data\n\n def __set_md5(self, data: dict) -> dict:\n import hashlib\n for g in self.cols(attr=\"md5\"):\n val = data.get(g, None)\n if val != None:\n data.update({g: hashlib.md5(val.encode()).hexdigest()})\n return data\n\n def __set_genrun(self, data: dict) -> dict:\n for g in self.cols(attr=\"genrun\"):\n if data.get(g, None) == None or data.get(g, None) == \"\":\n obj = [o for o in self._obj if o.get(\"name\") == g][0]\n nmask = obj.get(\"genrun_pattern\").split(\"!!\")\n genpat = nmask[1].split(\"-\")\n if genpat[0] == \"cnt\":\n cnt = len(self._df)+1\n runnum = str(cnt).zfill(int(genpat[1]))\n val = \"{}{}{}\".format(nmask[0], runnum, nmask[2])\n else:\n val = None\n data.update({g: val})\n return data\n\n def __set_now(self, data: dict) -> dict:\n for g in self.cols(attr=\"curdtm\"):\n val = data.get(g, None)\n if val == None:\n data.update({g: self.datetime})\n return data\n\n def __set_owner(self, data: dict) -> dict:\n for g in self.cols(attr=\"owner\"):\n val = data.get(g, None)\n if val == None:\n data.update({g: self.owner})\n return data\n\n def count(self) -> int:\n return len(self._df)\n\n def __prepare_data(self, *args, **kwargs) -> list:\n data = self.__conv_args(*args, **kwargs)\n rtn = []\n for d in data:\n if d.get(\"__gen_uuid\", True):\n d = self.__set_uuid(data=d)\n if d.get(\"__mask_md5\", True):\n d = self.__set_md5(data=d)\n if d.get(\"__genrun\", True):\n d = self.__set_genrun(data=d)\n if d.get(\"__cur_dtm\", True):\n d = self.__set_now(data=d)\n d = self.__set_owner(data=d)\n for o in self._obj:\n d[o.get(\"name\")] = d.get(o.get(\"name\"), o.get(\"default\", None))\n newd = {k: v for k, v in d.items() if k in self.cols()}\n rtn.append(newd)\n return rtn\n\n def filter(self, filtcond: dict, filtowner=False) -> (list, int):\n df = self._df\n filt = filtcond.copy()\n if filtowner:\n filt.update({\"upd_by\": self.owner})\n filtcol = [k for k, v in filt.items()]\n for d in self.__prepare_data(filt):\n data = d\n break\n filt = {k: v for k, v in data.items() if k in filtcol}\n if self.debug:\n print(f\"filter - {self.to_dict(df)}, {filt}\")\n for k, v in filt.items():\n df = df[df[k] == v]\n if df.empty:\n return (pd.DataFrame(), [])\n return (df, df.index.to_list())\n\n def select(self, filtcond: dict) -> (list):\n filtowner = self.check_acl(issel=True)\n df, _ = self.filter(filtcond=filtcond, filtowner=filtowner)\n return df\n\n def upsert(self, *args, **kwargs) -> (bool, list):\n if kwargs.get(\"__upddtm\", True):\n self.__upddtm()\n kwargs.update({\"__upddtm\": False})\n data = self.__prepare_data(*args, **kwargs)\n df = self._df\n for d in data:\n key = {k: v for k, v in d.items() if k in self.cols(attr=\"key\")}\n filt, idx = self.filter(key)\n if idx == []:\n if self.debug:\n print(f\"upsert(ins) - {d}\")\n df = df.append(d, ignore_index=True)\n else:\n if self.debug:\n print(f\"upsert(upd) - {d}\")\n filtowner = self.check_acl(isupd=True)\n filt, idx = self.filter(key, filtowner=filtowner)\n if idx == []:\n return False, None\n else:\n for k, v in d.items():\n if k not in self.cols(attr=\"key\") and k not in self.cols(attr=\"uuid\") and k not in self.cols(attr=\"ignupd\"):\n upd = pd.Series({i: v for i in idx})\n df[k].update(upd)\n self._df = df\n return True, data\n\n def check_acl(self, isupd=False, issel=False) -> bool:\n filtowner = True if self.owner != \"system\" else False\n if self._acl == ACL.PUBLIC:\n filtowner = False\n if not(isupd):\n if self._acl == ACL.SHARED:\n filtowner = False\n if not(issel):\n if self._acl == ACL.PRIVATE:\n filtowner = False\n return filtowner\n\n def update(self, *args, **kwargs) -> (bool, list):\n if kwargs.get(\"__upddtm\", True):\n self.__upddtm()\n data = self.__prepare_data(*args, **kwargs)\n df = self._df\n for d in data:\n key = {<KEY>, v in d.items() if k in self.cols(attr=\"key\")}\n filtowner = self.check_acl(isupd=True)\n filt, idx = self.filter(key, filtowner=filtowner)\n if idx != []:\n if self.debug:\n print(f\"update - {d}\")\n for k, v in d.items():\n if k not in self.cols(attr=\"key\") and k not in self.cols(attr=\"uuid\") and k not in self.cols(attr=\"ignupd\"):\n upd = pd.Series({i: v for i in idx})\n df[k].update(upd)\n else:\n return False, None\n self._df = df\n return True, data\n\n def insert(self, *args, **kwargs) -> (bool, list):\n if kwargs.get(\"__upddtm\", True):\n self.__upddtm()\n data = self.__prepare_data(*args, **kwargs)\n df = self._df\n for d in data:\n key = {k: v for k, v in d.items() if k in self.cols(attr=\"key\")}\n filt, idx = self.filter(key)\n if idx == []:\n if self.debug:\n print(f\"insert - {d}\")\n df = df.append(d, ignore_index=True)\n else:\n return False, None\n self._df = df\n return True, data\n\n def __repr__(self) -> dict:\n return self._df.to_dict(\"records\")\n\n def to_dict(self, df: pd.DataFrame = pd.DataFrame()) -> dict:\n df = self._df if df.empty else df\n return df.to_dict(\"records\")\n\n def __str__(self) -> str:\n return self._df.to_string()\n\n def save(self, path: str = None) -> bool:\n path = self._path if path == None else path\n try:\n self._df.to_parquet(path=path, compression='gzip')\n except:\n return False\n return True\n\n def load(self, path: str = None) -> bool:\n path = self._path if path == None else path\n try:\n self._df = pd.read_parquet(path=path)\n except:\n return False\n return True\n", "id": "6746698", "language": "Python", "matching_score": 3.5909221172332764, "max_stars_count": 0, "path": "pddb/lib/pddb/pdtbl.py" }, { "content": "from abc import ABC\n\n\nclass Column(ABC):\n def __init__(self, *args, **kwargs):\n self.set_col(*args, **kwargs)\n\n def set_col(self, *args, **kwargs):\n for a in args:\n kwargs.update(a)\n break\n self.idx = kwargs.get(\"iloc\", 0)\n self.name = kwargs.get(\"name\", f\"col{self.idx}\")\n self.type = kwargs.get(\"type\", str)\n self.key = kwargs.get(\"key\", False)\n self.uuid = kwargs.get(\"uuid\", False)\n self.md5 = kwargs.get(\"md5\", False)\n self.genrun_pattern = kwargs.get(\"genrun\", None)\n self.genrun = self.genrun_pattern != None\n self.owner = kwargs.get(\"owner\", False)\n self.curdtm = kwargs.get(\"curdtm\", False)\n self.igncre = kwargs.get(\"igncre\", False)\n self.ignupd = kwargs.get(\"ignupd\", False)\n self.optional = not(kwargs.get(\"require\", False))\n self.require = kwargs.get(\"require\", False or self.key)\n self.updcol = not(\n self.curdtm or self.uuid or self.genrun or self.owner)\n self.default = kwargs.get(\"default\", None)\n\n def get(self, attr, default=None):\n default = self.default if default == None else default\n attrs = self.__dict__\n return attrs.get(attr, default)\n\n def __repr__(self):\n d = [f\"{k}: {v}\" for k, v in self.__dict__.items()]\n # rtn=\"{%s}\" % (\", \".join(d))\n return \"{%s}\" % (\", \".join(d))\n", "id": "10663158", "language": "Python", "matching_score": 0.6247450709342957, "max_stars_count": 0, "path": "pddb/lib/pddb/columns.py" }, { "content": "from abc import ABC\n\nclass hobj(ABC):\n cols: tuple = (\"_attr\", \"_text\", \"_i\")\n\n def __init__(self, rec: dict):\n for k, v in rec.items():\n if k == \"_tag\":\n self.__setattr__(\"_name\", v)\n elif k in self.cols:\n self.__setattr__(k, v)\n else:\n continue\n pass\n\n def get(self, name: str, default=None):\n return self.getattr(name=name, default=default)\n\n def getattr(self, name: str, default=None):\n try:\n rtn = self.__getattribute__(name)\n except:\n rtn = default\n return rtn\n\n def append(self, name, val, forcelist: bool = True):\n if val == None:\n return\n vals = self.getattr(name, [])\n if not(isinstance(vals, list)):\n vals = [vals]\n # if name !=\"_elem\":\n # val.__setattr__(\"_i\",len(vals)+1)\n vals.append(val)\n if forcelist:\n vals = list(set(vals))\n if vals != []:\n if not(forcelist):\n if len(vals) == 1:\n vals = vals[0]\n self.__setattr__(name, vals)\n\n def concat(self, name, val):\n vals = self.getattr(name, \"\")\n if isinstance(val, list):\n vals += \" \".join(val)\n else:\n vals += \" \"+val\n if vals != \"\":\n self.__setattr__(name, vals.strip())\n\n def addchild(self, child):\n ctag = child._name\n self.append(name=\"_elem\", val=ctag)\n self.append(name=ctag, val=child, forcelist=False)\n if ctag==\"script\":\n return\n try:\n self.concat(name=\"_text\", val=child._text)\n except:\n pass\n\n def __repr__(self):\n return str(self.__dict__)\n\n", "id": "187441", "language": "Python", "matching_score": 1.8726935386657715, "max_stars_count": 0, "path": "html2obj/lib/hobj.py" }, { "content": "from abc import ABC\n\nclass formathtml(ABC):\n unique_tag: tuple = (\"html\", \"head\", \"body\")\n selfend_tag: tuple = (\"br\", \"img\", \"meta\", \"link\", \"input\")\n valid_tag: tuple = (\"path\", \"a\", \"abbr\", \"acronym\", \"address\", \"applet\", \"area\", \"article\", \"aside\", \"audio\", \"b\", \"base\", \"basefont\", \"bdi\", \"bdo\", \"big\", \"blockquote\", \"body\", \"br\", \"button\", \"canvas\", \"caption\", \"center\", \"cite\", \"code\", \"col\", \"colgroup\", \"data\", \"datalist\", \"dd\", \"del\", \"details\", \"dfn\", \"dialog\", \"dir\", \"div\", \"dl\", \"dt\", \"em\", \"embed\", \"fieldset\", \"figcaption\", \"figure\", \"font\", \"footer\", \"form\", \"frame\", \"frameset\", \"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\", \"head\", \"header\", \"hr\", \"html\", \"i\", \"iframe\", \"img\", \"input\", \"ins\",\n \"kbd\", \"label\", \"legend\", \"li\", \"link\", \"main\", \"map\", \"mark\", \"meta\", \"meter\", \"nav\", \"noframes\", \"noscript\", \"object\", \"ol\", \"optgroup\", \"option\", \"output\", \"p\", \"param\", \"picture\", \"pre\", \"progress\", \"q\", \"rp\", \"rt\", \"ruby\", \"s\", \"samp\", \"script\", \"section\", \"select\", \"small\", \"source\", \"span\", \"strike\", \"strong\", \"style\", \"sub\", \"summary\", \"sup\", \"svg\", \"table\", \"tbody\", \"td\", \"template\", \"textarea\", \"tfoot\", \"th\", \"thead\", \"time\", \"title\", \"tr\", \"track\", \"tt\", \"u\", \"ul\", \"var\", \"video\", \"wbr\")\n cols: tuple = (\"_tag\", \"_attr\", \"_text\", \"_child\")\n rmhtml: tuple = (\"&emsp;\", \"&nbsp;\")\n\n def __init__(self, url: str = None):\n if url != None:\n html = self.get_content(url=url)\n if html != None:\n self.set(html=html)\n pass\n\n def get_content(self, url: str) -> str:\n import requests\n req = requests.get(url)\n if req.status_code == 200:\n return req.text\n return None\n\n def set(self, html: str) -> list:\n def __getattr(attr: str) -> dict:\n import re\n rtn = {}\n vals = []\n joinb4 = False\n for a in attr.split(\" \"):\n if joinb4:\n vals.append(a)\n if len(a)>1 and a.strip()[-1] == \"\\\"\":\n val = \" \".join(vals)\n val = re.sub(\"^\\\"\", \"\", val)\n val = re.sub(\"\\\"$\", \"\", val)\n rtn.update({key: val})\n vals = []\n joinb4 = False\n else:\n keys = a.split(\"=\", 1)\n key = keys[0]\n if len(keys) == 1:\n rtn.update({key: True})\n else:\n val = keys[1].strip()\n if len(val)>1 and val[-1] == \"\\\"\" and val not in (\" \", \"\\\"\"):\n val = re.sub(\"^\\\"\", \"\", val)\n val = re.sub(\"\\\"$\", \"\", val)\n rtn.update({key: val})\n else:\n vals.append(val)\n joinb4 = True\n return rtn\n\n def __2list(html: str) -> list:\n import re\n rtn = []\n for o in html.split(\"<\"):\n cont = o.split(\">\", 1)\n tags = cont[0].split(\" \", 1)\n tag = tags[0].strip()\n obj = {}\n if tag == \"\":\n continue\n if tag[0] == \"!\":\n continue\n end = (tag[0] == \"/\")\n tag = re.sub(\"^/\", \"\", tag)\n if tag not in self.valid_tag:\n rtn[len(\n rtn)-1].update({\"_text\": \"{}{}{}\".format(rtn[len(rtn)-1].get(\"_text\", \"\"), \"<\", o)})\n continue\n selfend = (tag in self.selfend_tag)\n # if tag==\"link\":\n # print(selfend)\n obj.update({\"_tag\": tag})\n if len(tags) > 1:\n attr = tags[1].strip()\n if attr != \"\":\n selfend = ((attr[-1] == \"/\")\n or (tag in self.selfend_tag))\n attr = re.sub(\"/$\", \"\", attr).strip()\n if attr != \"\":\n obj.update({\"_attr\": __getattr(attr)})\n if len(cont) > 1:\n text = cont[1].strip()\n for l in self.rmhtml:\n text = re.sub(l, \"\", text)\n if text != \"\":\n obj.update({\"_text\": text})\n obj.update({\"_end\": end})\n obj.update({\"_idx\": len(rtn)})\n rtn.append(obj)\n if selfend:\n tmp = obj.copy()\n tmp.update({\"_end\": True})\n tmp.update({\"_idx\": len(rtn)})\n rtn.append({k: v for k, v in tmp.items()\n if k in (\"_tag\", \"_end\", \"_idx\")})\n return rtn\n\n def __set_end(obj: list) -> list:\n rmlst = []\n for t in self.unique_tag:\n st = [r for r in obj if r.get(\"_tag\") == t]\n end = [r for r in obj if r.get(\"_tag\") == t and r.get(\"_end\")]\n if end == []:\n obj.append({\"_tag\": t, \"_end\": True})\n rtn = obj.copy()\n for t in self.unique_tag:\n lst = [r for r in rtn if r.get(\"_tag\") == t]\n if len(lst) > 2:\n lst = lst[1:]\n lst = lst[:-1]\n rmlst += [r.get(\"_idx\") for r in lst]\n rtn = []\n for r in obj:\n if r.get(\"_idx\") not in rmlst:\n r.update({\"_idx\": len(rtn)})\n rtn.append(r)\n for i, o in enumerate(rtn):\n if o.get(\"_end\"):\n continue\n tag = o.get(\"_tag\")\n idx = o.get(\"_idx\")\n cnt = 1\n for r in rtn:\n if r.get(\"_tag\") == tag and r.get(\"_idx\") > idx:\n if r.get(\"_end\"):\n cnt -= 1\n else:\n cnt += 1\n if cnt == 0:\n rtn[i].update({\"_eidx\": r.get(\"_idx\")})\n text = r.get(\"_text\", \"\").strip()\n if text != \"\":\n rtn[i].update({\"_text\": text})\n break\n return rtn\n\n def __set_level(obj: list) -> list:\n level = 0\n rtn = obj.copy()\n for i, o in enumerate(rtn):\n rtn[i].update({\"_level\": level})\n if i != 0:\n if o.get(\"_end\"):\n level -= 1\n else:\n level += 1\n rtn[i].update({\"_level\": level})\n return rtn\n\n rtn = __2list(html=html)\n rtn = __set_end(obj=rtn)\n\n rtn = __set_level(obj=rtn)\n self.obj = rtn\n return rtn\n pass\n\n", "id": "10263890", "language": "Python", "matching_score": 2.248831272125244, "max_stars_count": 0, "path": "html2obj/lib/formathtml.py" }, { "content": "from abc import ABC\nfrom .formathtml import *\nfrom .hobj import *\n\nclass html2obj(ABC):\n fullxpath: list = []\n _obj: list = []\n\n def __init__(self, url: str = None):\n self.set_url(url=url)\n pass\n\n def set_url(self, url: str):\n if url != None:\n fh = formathtml(url)\n obj = self._obj = fh.obj\n self.build_fullxpath(obj=obj)\n self.set(obj=obj)\n return self._obj\n\n def getattr(self, name: str, default=None):\n try:\n rtn = self.__getattribute__(name)\n except:\n rtn = default\n return rtn\n\n def set(self, obj: list = [], level: int = 0, idx: tuple = ()) -> list:\n\n def __getobj(obj: list, level: int, idx: tuple) -> list:\n if idx == ():\n fidx = 0\n eidx = len(obj)-1\n else:\n fidx = idx[0]\n eidx = idx[1]\n return [o for o in obj if o.get(\"_level\") == level and not(o.get(\"_end\")) and o.get(\"_idx\") >= fidx and o.get(\"_idx\") <= eidx]\n obj = self._obj if obj == [] else obj\n rtn = []\n for r in __getobj(obj=obj, level=level, idx=idx):\n o = hobj(r)\n fidx = r.get(\"_idx\")\n eidx = r.get(\"_eidx\", None)\n if eidx != None:\n for c in self.set(obj=obj, level=level+1, idx=(fidx, eidx)):\n o.addchild(c)\n rtn.append(o)\n for r in rtn:\n self.__setattr__(r._name, r)\n break\n return rtn\n\n def build_fullxpath(self, obj: list = []) -> list:\n obj = self._obj if obj == [] else obj\n cols = (\"_tag\", \"_attr\", \"_xpath\")\n rtn = []\n for o in obj:\n if o.get(\"_attr\", {}) == {}:\n continue\n idx = o.get(\"_idx\")\n level = o.get(\"_level\")\n xpath = [\"{}[*]\".format(t.get(\"_tag\")) for t in obj if t.get(\"_idx\")\n < idx and t.get(\"_eidx\", -1) > idx and t.get(\"_level\") < level]\n if xpath == []:\n continue\n o.update({\"_xpath\": \"/\".join(xpath)})\n rtn.append({k: v for k, v in o.items() if k in cols})\n self.fullxpath = rtn\n return rtn\n\n def get_xpath(self, xpath: str, obj=None)-> list:\n def __splitxp(xpath: str):\n import re\n txp=xpath.split(\"[\", 1)\n xp=txp[0]\n idx=\"*\"\n opt=[]\n attr=\"\"\n attrval=\"\"\n if len(txp)>1:\n opt=txp[1].split(\"]\")[0].split(\"=\", 1)\n if len(opt)==1:\n idx=opt[0]\n else:\n attr=opt[0]\n attr=re.sub(\"^@\", \"\", attr)\n attrval=opt[1]\n for l in (\"^\\\"\", \"\\\"$\", \"^'\", \"'$\"):\n attrval=re.sub(l, \"\", attrval).strip()\n return xp, idx, attr, attrval\n\n def __get_obj(xp: str, obj: list, isfull: bool):\n rtn=[]\n xp, idx, attr, attrval = __splitxp(xp)\n if xp==\"\":\n return obj, isfull\n if isfull:\n if xp not in (\"\", \"*\"):\n for o in obj:\n t=o.getattr(xp, [])\n if isinstance(t, list):\n rtn+=t\n else:\n rtn.append(t)\n if attr!=\"\":\n obj=rtn.copy()\n rtn=[]\n for o in obj:\n for k, v in o.getattr(\"_attr\", {}).items():\n if k==attr and (attrval in v.split(\" \") or attrval==v):\n rtn.append(o)\n else:\n if idx!=\"*\":\n idx=int(idx)-1\n # print(\"get idx\", xp, idx, len(rtn))\n try:\n rtn=[rtn[idx]]\n except:\n print(\"! not found\")\n else:\n for o in self.fullxpath:\n if xp!=\"*\":\n if xp!=o.get(\"_tag\", \"\"):\n continue\n for k, v in o.get(\"_attr\", {}).items():\n if k==attr and attrval in v.split(\" \"):\n xpath=\"/{}/{}[@{}=\\\"{}\\\"]\".format(o.get(\"_xpath\", \"\"), xp, k, v)\n rtn+=self.get_xpath(xpath)\n # print(xp, idx, attr, attrval, len(rtn))\n return rtn, True\n xpath=xpath.split(\"/\")[1:]\n rtn=[self] if obj==None else obj.copy()\n isfull=True\n for i, xp in enumerate(xpath):\n if i==0 and xp==\"\":\n isfull=False\n rtn, isfull=__get_obj(xp, rtn, isfull)\n # print(i, xp, len(rtn))\n if rtn==[]:\n break\n return rtn\n\n def oget_xpath(self, xpath: str, obj=None) -> list:\n def is_integer(n):\n try:\n float(n)\n except ValueError:\n return False\n else:\n return float(n).is_integer()\n\n def __getxpath(xpath: str):\n import re\n xpath = xpath.split(\"[\", 1)\n idx = 0\n attr = {}\n if len(xpath) == 1:\n xpath = xpath[0]\n return xpath, idx, attr\n opt = re.sub(\"]$\", \"\", xpath[1]).strip()\n xpath = xpath[0]\n if xpath == \"\":\n return None, idx, attr\n if is_integer(opt):\n idx = int(opt)\n elif opt == \"*\":\n idx = -1\n else:\n attrs = opt.split(\"=\", 1)\n key=attrs[0]\n key = re.sub(\"^@\", \"\", key)\n tattr = attrs[1]\n for l in (\"^\\\"\", \"\\\"$\", \"^'\", \"'$\"):\n tattr = re.sub(l, \"\", tattr)\n if len(attrs) > 1:\n attr.update({key: tattr})\n return xpath, idx, attr\n\n def __getobj(obj, xp: str, attr: dict, fullpath: bool):\n nxp = \"\"\n rtn = None\n if attr!={}:\n for p in self.fullxpath:\n nopt = \"\"\n if xp != \"*\":\n if xp != p.get(\"_tag\"):\n continue\n attrs = p.get(\"_attr\", {})\n for k, v in attr.items():\n aval=attrs.get(k, None)\n if aval != None and v in aval.split(\" \"):\n nxp = \"/{}/{}[@{}]\".format(p.get(\"_xpath\"),\n p.get(\"_tag\"), f\"{k}='{v}'\")\n if not(fullpath):\n rtn = self.get_xpath(xpath=nxp)\n else:\n rtn = obj.getattr(p.get(\"_tag\"), None)\n # rtn = [o.getattr(p.get(\"_tag\"), None) for o in obj]\n elif xp == \"\":\n pass\n else:\n rtn = [o.getattr(xp, None) for o in obj]\n return rtn\n\n rtn = []\n xpath = xpath.split(\"/\")[1:]\n isfullpath = True\n\n obj = self if obj == None else obj\n for i, xp in enumerate(xpath):\n xp, idx, attr = __getxpath(xp)\n if i == 0 and xp == \"\":\n isfullpath = False\n if xp == \"\":\n continue\n obj = __getobj(obj, xp, attr, fullpath=isfullpath)\n if obj == None or obj == []:\n break\n if not(isinstance(obj, list)):\n obj=[obj]\n if i == len(xpath)-1:\n # print([o.get(\"_name\") for o in obj])\n rtn = [o for o in obj]\n else:\n if idx == 0:\n obj = obj\n elif idx>0:\n obj = obj[idx-1]\n else:\n txp = \"/\".join(xpath[i:])\n rtn=[]\n for o in obj:\n tmp=self.get_xpath(xpath=txp, obj=o)\n rtn+=tmp\n break\n # return rtn\n # return [self.get_xpath(xpath=txp, obj=o)[0] for o in obj]\n return rtn\n\n def __str__(self):\n return str(self.__dict__)\n\n", "id": "7469526", "language": "Python", "matching_score": 1.311305284500122, "max_stars_count": 0, "path": "html2obj/lib/html2obj.py" }, { "content": "loggers = {}\n\n\ndef logger(func):\n import logging\n global loggers\n if loggers.get(func):\n return loggers.get(func)\n logger = logging.getLogger(func)\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n '%(asctime)s\\t%(name)s.%(funcName)s()\\t%(levelname)s\\t%(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n loggers[func] = logger\n return logger\n", "id": "6181379", "language": "Python", "matching_score": 0.2719036340713501, "max_stars_count": 0, "path": "gwcomm/logger.py" }, { "content": "from ._template import *\n\n\nclass register_mod(model_template):\n def set_type(self):\n self.type = \"users\"\n\n def set_cols(self):\n self.cols = [{\"type\": \"text\", \"name\": \"usr_cde\", \"label\": \"Login\", \"val\": \"\", \"placeholder\": \"Enter Login Name\"},\n {\"type\": \"password\", \"name\": \"password\", \"label\": \"Password\",\n \"val\": \"\", \"placeholder\": \"Enter Password\"},\n {\"type\": \"password\", \"name\": \"c_password\", \"label\": \"Confirm Password\", \"val\": \"\", \"placeholder\": \"Confirm Password\"}]\n return\n\n def set_btns(self):\n self.btns = [{\"type\": \"submit\", \"name\": \"submit\", \"label\": \"Register\"},\n {\"type\": \"reset\", \"name\": \"reset\", \"label\": \"Reset\"}]\n return\n", "id": "8869231", "language": "Python", "matching_score": 4.36666202545166, "max_stars_count": 0, "path": "model/register.py" }, { "content": "from ._template import *\n\n\nclass login_mod(model_template):\n def set_type(self):\n self.type = \"users\"\n\n def set_cols(self):\n self.cols = [{\"type\": \"hidden\", \"name\": \"uid\", \"label\": \"Login\", \"val\": \"\", \"uuid\": True},\n {\"type\": \"text\", \"name\": \"usr_cde\", \"label\": \"Login\",\n \"val\": \"\", \"placeholder\": \"Enter Login Name\"},\n {\"type\": \"password\", \"name\": \"password\", \"label\": \"Password\", \"val\": \"\", \"placeholder\": \"Enter Password\", \"md5\": True}]\n return\n\n def set_btns(self):\n self.btns = [{\"type\": \"submit\", \"name\": \"submit\", \"label\": \"Login\"},\n {\"type\": \"reset\", \"name\": \"reset\", \"label\": \"Reset\"}]\n return\n", "id": "12008535", "language": "Python", "matching_score": 2.329439163208008, "max_stars_count": 0, "path": "model/login.py" }, { "content": "from ._template import *\n\n\nclass profile_mod(model_template):\n\n def set_type(self):\n self.type = \"profiles\"\n\n def set_cols(self):\n self.cols = [{\"type\": \"hidden\", \"name\": \"pid\", \"label\": \"ID\", \"val\": \"\", \"uuid\": True},\n {\"type\": \"hidden\", \"name\": \"usr_cde\", \"label\": \"Code\", \"val\": \"\",\n \"key\": True},\n {\"type\": \"text\", \"name\": \"usr_name\",\n \"label\": \"Name\", \"val\": \"\", \"placeholder\": \"Name\"},\n {\"type\": \"email\", \"name\": \"email\",\n \"label\": \"Email\", \"val\": \"\", \"placeholder\": \"Email\"},\n {\"type\": \"textarea\", \"name\": \"usr_desc\",\n \"label\": \"Description\", \"val\": \"\", \"placeholder\": \"Description\"},\n {\"type\": \"text\", \"name\": \"education\",\n \"label\": \"Education Level\", \"val\": \"\", \"placeholder\": \"Education Level\"},\n {\"type\": \"text\", \"name\": \"qualification\",\n \"label\": \"Qualification\", \"val\": \"\", \"placeholder\": \"Qualification\"},\n {\"type\": \"text\", \"name\": \"work_exp\",\n \"label\": \"Working Experience (Year)\", \"val\": \"\", \"placeholder\": \"Working Experience\"},\n {\"type\": \"text\", \"name\": \"programing\",\n \"label\": \"Programing\", \"val\": \"\", \"placeholder\": \"Programing\"},\n {\"type\": \"moption\", \"name\": \"job_cat\",\n \"label\": \"Job Cat\", \"val\": \"\", \"opt\": [\"SW\", \"HW\"]},\n {\"type\": \"hidden\", \"name\": \"cre_by\",\n \"label\": \"cre_by\", \"val\": \"\"}]\n return\n", "id": "2243095", "language": "Python", "matching_score": 3.332287549972534, "max_stars_count": 0, "path": "model/profile.py" }, { "content": "import pddb\n\n\nclass profiles_mod(pddb.pdtbl):\n def init_obj(self) -> dict:\n acl = pddb.ACL.SHARED\n path = \"./data/profiles.parquet\"\n obj = {\"pid\": {\"type\": str, \"uuid\": True, \"ignupd\": True},\n \"usr_cde\": {\"type\": str, \"key\": True},\n \"usr_name\": {\"type\": str},\n \"email\": {\"type\": str},\n \"usr_desc\": {\"type\": str},\n \"education\": {\"type\": str},\n \"qualification\": {\"type\": str},\n \"work_exp\": {\"type\": str},\n \"programing\": {\"type\": str},\n \"job_cat\": {\"type\": str},\n \"status\": {\"type\": bool, \"default\": True}}\n return (obj, path, acl)\n", "id": "7692601", "language": "Python", "matching_score": 2.899304151535034, "max_stars_count": 0, "path": "models/profiles.py" }, { "content": "import pddb\n\n\nclass jobs_mod(pddb.pdtbl):\n def init_obj(self) -> dict:\n acl = pddb.ACL.SHARED\n # acl = pddb.ACL.PRIVATE\n path = \"./data/jobs.parquet\"\n obj = {\"jid\": {\"type\": str, \"uuid\": True, \"ignupd\": True},\n \"job_cde\": {\"type\": str, \"key\": True, \"genrun\": \"JD!!cnt-10!!\"},\n \"company\": {\"type\": str},\n \"title\": {\"type\": str},\n \"scope\": {\"type\": str},\n \"requirement\": {\"type\": str},\n \"experience\": {\"type\": str},\n \"amount\": {\"type\": str},\n \"period\": {\"type\": str},\n \"job_cat\": {\"type\": str},\n \"status\": {\"type\": bool, \"default\": True}}\n return (obj, path, acl)\n", "id": "3978500", "language": "Python", "matching_score": 2.751969337463379, "max_stars_count": 0, "path": "models/jobs.py" }, { "content": "import pddb\n\n\nclass users_mod(pddb.pdtbl):\n def init_obj(self) -> dict:\n acl = pddb.ACL.SHARED\n path = \"./data/users.parquet\"\n obj = {\"uid\": {\"type\": str, \"uuid\": True, \"ignupd\": True},\n \"usr_cde\": {\"type\": str, \"key\": True},\n \"password\": {\"type\": str, \"require\": True, \"md5\": True},\n \"status\": {\"type\": bool, \"default\": True}}\n return (obj, path, acl)\n", "id": "6352514", "language": "Python", "matching_score": 2.7650394439697266, "max_stars_count": 0, "path": "models/users.py" }, { "content": "import pddb\n\n\nclass offers_mod(pddb.pdtbl):\n def init_obj(self) -> dict:\n acl = pddb.ACL.SHARED\n # acl = pddb.ACL.PRIVATE\n path = \"./data/offers.parquet\"\n obj = {\"oid\": {\"type\": str, \"uuid\": True, \"ignupd\": True},\n \"job_cde\": {\"type\": str, \"require\": True, \"key\": True},\n \"aid\": {\"type\": str, \"require\": True},\n \"rate\": {\"type\": str},\n \"status\": {\"type\": bool, \"default\": True}}\n return (obj, path, acl)\n", "id": "12250450", "language": "Python", "matching_score": 0.5373077392578125, "max_stars_count": 0, "path": "models/offers.py" }, { "content": "import sys\n\n# sys.path.append(\"/home/coder/code/py/pddb\")\n# sys.path.append(\"/home/coder/code/py/jmapi\")\nfrom .app import *\n", "id": "1032427", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "jmapi/__init__.py" }, { "content": "from .pddb import *\n", "id": "150618", "language": "Python", "matching_score": 0.5373077392578125, "max_stars_count": 0, "path": "pddb/lib/__init__.py" } ]
1.846255
MrLonelyZC88
[ { "content": "# -*- coding:utf-8 -*-\n\"\"\"\nXtdFile类单元测试\n\"\"\"\n\nimport unittest\n\nfrom vaspy.matstudio import ArcFile, XtdFile\nfrom tests import path\n\n\nclass XtdTest(unittest.TestCase):\n\n def setUp(self):\n self.maxDiff = True\n\n def test_construction_query(self):\n \" Test XtdFile object construction and query functions. \"\n filename = path + \"/00-04.xtd\"\n arcname = path + \"/00-04.arc\"\n xtd = XtdFile(filename, arcname)\n\n self.assertTrue(isinstance(xtd.arcfile, ArcFile))\n\n # Check if no arcfile.\n xtd = XtdFile(filename)\n self.assertTrue(xtd.arcfile is None)\n\n def test_coords_iterator(self):\n \" Make sure we can get correct direct coordinates. \"\n filename = path + \"/00-04.xtd\"\n arcname = path + \"/00-04.arc\"\n xtd = XtdFile(filename, arcname)\n\n ref_coords = [[0.05100029522154211, 0.39850231693493543, 0.11064568869218162],\n [0.05100029522154211, 0.39850231693493543, 0.33250176930623526],\n [0.05100029522154211, 0.14850086526108972, 0.0],\n [0.05100029522154211, 0.14850086526108972, 0.22168693759127747],\n [0.05100029522154211, 0.14850086526108972, 0.4409576855929274],\n [0.30100174689538783, 0.14850086526108972, 0.11064568869218162],\n [0.30100174689538783, 0.14850086526108972, 0.33250176930623526],\n [0.30100174689538783, 0.39850231693493543, 0.0],\n [0.30100174689538783, 0.39850231693493543, 0.22168693759127747],\n [0.30100174689538783, 0.39850231693493543, 0.4409576855929274],\n [0.5510031985692336, 0.39850231693493543, 0.11064568869218162],\n [0.5510031985692336, 0.39850231693493543, 0.33250176930623526],\n [0.5510031985692336, 0.14850086526108972, 0.0],\n [0.5510031985692336, 0.14850086526108972, 0.22168693759127747],\n [0.5510031985692336, 0.14850086526108972, 0.4409576855929274],\n [0.801004650369027, 0.14850086526108972, 0.11064568869218162],\n [0.801004650369027, 0.14850086526108972, 0.33250176930623526],\n [0.801004650369027, 0.39850231693493543, 0.0],\n [0.801004650369027, 0.39850231693493543, 0.22168693759127747],\n [0.801004650369027, 0.39850231693493543, 0.4409576855929274],\n [0.05100029522154211, 0.8985052204085746, 0.11064568869218162],\n [0.05100029522154211, 0.8985052204085746, 0.33250176930623526],\n [0.05100029522154211, 0.6485037686087811, 0.0],\n [0.05100029522154211, 0.6485037686087811, 0.22168693759127747],\n [0.05100029522154211, 0.6485037686087811, 0.4409576855929274],\n [0.30100174689538783, 0.6485037686087811, 0.11064568869218162],\n [0.30100174689538783, 0.6485037686087811, 0.33250176930623526],\n [0.30100174689538783, 0.8985052204085746, 0.0],\n [0.30100174689538783, 0.8985052204085746, 0.22168693759127747],\n [0.30100174689538783, 0.8985052204085746, 0.4409576855929274],\n [0.5510031985692336, 0.8985052204085746, 0.11064568869218162],\n [0.5510031985692336, 0.8985052204085746, 0.33250176930623526],\n [0.5510031985692336, 0.6485037686087811, 0.0],\n [0.5510031985692336, 0.6485037686087811, 0.22168693759127747],\n [0.5510031985692336, 0.6485037686087811, 0.4409576855929274],\n [0.801004650369027, 0.6485037686087811, 0.11064568869218162],\n [0.801004650369027, 0.6485037686087811, 0.33250176930623526],\n [0.801004650369027, 0.8985052204085746, 0.0],\n [0.801004650369027, 0.8985052204085746, 0.22168693759127747],\n [0.801004650369027, 0.8985052204085746, 0.4409576855929274],\n [0.43073120809088394, 0.526894851885438, 0.5129361059766553]]\n for ret_coords in xtd.coords_iterator:\n ret_coords = ret_coords.tolist()\n break\n\n self.assertListEqual(ref_coords, ret_coords)\n\n", "id": "10717824", "language": "Python", "matching_score": 4.105085849761963, "max_stars_count": 1, "path": "tests/xtd_test.py" }, { "content": "# -*- coding:utf-8 -*-\n\"\"\"\nArcFile类单元测试.\n\"\"\"\n\nimport inspect\nimport os\nimport unittest\n\nfrom vaspy.matstudio import ArcFile\n\nfrom tests import path\n\n\nclass ArcTest(unittest.TestCase):\n\n def setUp(self):\n self.maxDiff = True\n\n def test_construction_query(self):\n \" Test ArcFile construction and query function. \"\n filename = path + \"/00-04.arc\" \n arc = ArcFile(filename)\n\n # Check query functions.\n self.assertEqual(arc.filename, filename)\n\n ref_lengths = [7.9398, 7.9398, 17.9398]\n self.assertListEqual(arc.lengths, ref_lengths)\n\n ref_angles = [90.0, 90.0, 90.0]\n self.assertListEqual(arc.angles, ref_angles)\n\n def test_coords_iterator(self):\n \" Make sure we can get coordinates correctly. \"\n filename = path + \"/00-04.arc\" \n arc = ArcFile(filename)\n\n for ret_coords in arc.coords_iterator:\n ret_coords = ret_coords.tolist()\n break\n ref_coords = [[0.404932144, 3.164028696, 1.984961526],\n [0.404932144, 3.164028696, 5.965015241],\n [0.404932144, 1.17906717, 0.0],\n [0.404932144, 1.17906717, 3.977019323],\n [0.404932144, 1.17906717, 7.910692688],\n [2.38989367, 1.17906717, 1.984961526],\n [2.38989367, 1.17906717, 5.965015241],\n [2.38989367, 3.164028696, 0.0],\n [2.38989367, 3.164028696, 3.977019323],\n [2.38989367, 3.164028696, 7.910692688],\n [4.374855196, 3.164028696, 1.984961526],\n [4.374855196, 3.164028696, 5.965015241],\n [4.374855196, 1.17906717, 0.0],\n [4.374855196, 1.17906717, 3.977019323],\n [4.374855196, 1.17906717, 7.910692688],\n [6.359816723, 1.17906717, 1.984961526],\n [6.359816723, 1.17906717, 5.965015241],\n [6.359816723, 3.164028696, 0.0],\n [6.359816723, 3.164028696, 3.977019323],\n [6.359816723, 3.164028696, 7.910692688],\n [0.404932144, 7.133951749, 1.984961526],\n [0.404932144, 7.133951749, 5.965015241],\n [0.404932144, 5.148990222, 0.0],\n [0.404932144, 5.148990222, 3.977019323],\n [0.404932144, 5.148990222, 7.910692688],\n [2.38989367, 5.148990222, 1.984961526],\n [2.38989367, 5.148990222, 5.965015241],\n [2.38989367, 7.133951749, 0.0],\n [2.38989367, 7.133951749, 3.977019323],\n [2.38989367, 7.133951749, 7.910692688],\n [4.374855196, 7.133951749, 1.984961526],\n [4.374855196, 7.133951749, 5.965015241],\n [4.374855196, 5.148990222, 0.0],\n [4.374855196, 5.148990222, 3.977019323],\n [4.374855196, 5.148990222, 7.910692688],\n [6.359816723, 5.148990222, 1.984961526],\n [6.359816723, 5.148990222, 5.965015241],\n [6.359816723, 7.133951749, 0.0],\n [6.359816723, 7.133951749, 3.977019323],\n [6.359816723, 7.133951749, 7.910692688],\n [3.419919646, 4.183439745, 9.201971154]]\n\n self.assertListEqual(ret_coords, ref_coords)\n\n def test_elements(self):\n \" Test query function elements(). \"\n filename = path + \"/00-04.arc\" \n arc = ArcFile(filename)\n\n ref_elements = ['Pt', 'Pt', 'Pt', 'Pt', 'Pt',\n 'Pt', 'Pt', 'Pt', 'Pt', 'Pt',\n 'Pt', 'Pt', 'Pt', 'Pt', 'Pt',\n 'Pt', 'Pt', 'Pt', 'Pt', 'Pt',\n 'Pt', 'Pt', 'Pt', 'Pt', 'Pt',\n 'Pt', 'Pt', 'Pt', 'Pt', 'Pt',\n 'Pt', 'Pt', 'Pt', 'Pt', 'Pt',\n 'Pt', 'Pt', 'Pt', 'Pt', 'Pt', 'O']\n\n ret_elements = arc.elements\n\n self.assertListEqual(ref_elements, ret_elements)\n\nif \"__main__\" == __name__:\n suite = unittest.TestLoader().loadTestsFromTestCase(ArcTest)\n unittest.TextTestRunner(verbosity=2).run(suite) \n\n", "id": "4103320", "language": "Python", "matching_score": 0.910462498664856, "max_stars_count": 1, "path": "tests/arc_test.py" }, { "content": "# -*- coding:utf-8 -*-\n'''\nInCar单元测试.\n'''\n\nimport inspect\nimport os\nimport unittest\n\nfrom vaspy.incar import InCar\nfrom tests import path\n\n\nclass InCarTest(unittest.TestCase):\n\n def setUp(self):\n # Create an InCar object.\n self.maxDiff = True\n\n def test_rdata(self):\n \" Test data line in INCAR can be read correctly. \"\n filename = path + \"/INCAR\"\n incar = InCar(filename)\n\n # Test integer parameter.\n ref_line = \"ISTART = 0 # 0 = new job, 1 = restart\"\n pnames, datas = incar.rdata(ref_line)\n self.assertListEqual(pnames, [\"ISTART\"])\n self.assertListEqual(datas, [\"0\"])\n\n # Test string parameter.\n ref_line = \"PREC = Normal # [Low/Medium/High/Accurate/Normal]\"\n pnames, datas = incar.rdata(ref_line)\n self.assertListEqual(pnames, [\"PREC\"])\n self.assertListEqual(datas, [\"Normal\"])\n \n # Test comment line.\n ref_line = \"! Electronic Structure\"\n result = incar.rdata(ref_line)\n self.assertIsNone(result)\n\n # Test multi-parameter line.\n ref_line = \"LHFCALC = .TRUE. ; HFSCREEN = 0.2 # HSE\"\n pnames, datas = incar.rdata(ref_line)\n self.assertListEqual(pnames, [\"LHFCALC\", \"HFSCREEN\"])\n self.assertListEqual(datas, [\".TRUE.\", \"0.2\"])\n\n def test_load(self):\n \" Test all data in INCAR can be loaded. \"\n filename = path + \"/INCAR\"\n incar = InCar(filename)\n\n ref_pnames = ['SYSTEM', 'ISTART', 'ISPIN', 'PREC', 'ENCUT',\n 'NELM', 'NELMIN', 'ISMEAR', 'SIGMA', 'LREAL',\n 'EDIFFG', 'ALGO', 'ISIF', 'NSW', 'IBRION', 'POTIM',\n 'ISYM', 'NWRITE', 'LCHARG', 'LWAVE', 'NCORE']\n\n ref_datas = ['per', '0', '2', 'Normal', '450', '400', '3',\n '1', '0.1', 'A', '-0.05', 'Fast', '2', '900',\n '1', '0.2', '0', '1', '.False.', '.False.', '4']\n\n for pname, data in zip(ref_pnames, ref_datas):\n self.assertEqual(getattr(incar, pname), data)\n\n def test_parameter_set(self):\n \" Test existed parameter can be set correctly. \"\n filename = path + \"/INCAR\"\n incar = InCar(filename)\n\n self.assertTrue(incar.ISIF, \"2\")\n incar.set(\"ISIF\", 3)\n self.assertTrue(incar.ISIF, \"3\")\n\n def test_parameter_add(self):\n \" Test new parameter can be added correctly. \"\n filename = path + \"/INCAR\"\n incar = InCar(filename)\n\n self.assertFalse(hasattr(incar, \"TEST_zjshao\"))\n incar.add(\"TEST_zjshao\", \"True\")\n self.assertTrue(incar.TEST_zjshao, \"True\")\n\n def test_parameter_del(self):\n \" Make sure we can remove parameters correctly. \"\n filename = path + \"/INCAR\"\n incar = InCar(filename)\n\n # Check before deletion.\n self.assertTrue(hasattr(incar, \"ISIF\"))\n self.assertTrue(\"ISIF\" in incar.pnames)\n\n pname, value = incar.pop(\"ISIF\")\n\n # Check after deletion.\n self.assertEqual(pname, \"ISIF\")\n self.assertEqual(value, \"2\")\n self.assertFalse(hasattr(incar, \"ISIF\"))\n self.assertFalse(\"ISIF\" in incar.pnames)\n\n def test_compare(self):\n \" Make sure we can compare two InCar objects correctly. \"\n # Two equal INCAR.\n filename1 = path + \"/INCAR\"\n filename2 = path + \"/INCAR2\"\n incar1 = InCar(filename1)\n incar2 = InCar(filename1)\n a_dict, b_dict = incar1.compare(incar2)\n\n self.assertDictEqual(a_dict, {})\n self.assertDictEqual(b_dict, {})\n\n # Different INCAR.\n incar1 = InCar(filename1)\n incar2 = InCar(filename2)\n a_dict, b_dict = incar1.compare(incar2)\n\n self.assertDictEqual(a_dict, {'ISMEAR': '1', 'LREAL': 'A'})\n self.assertDictEqual(b_dict, {'ISMEAR': '2', 'LREAL': ''})\n\n def test_eq(self):\n \" Test __eq__() function.\"\n # Two equal INCAR.\n filename1 = path + \"/INCAR\"\n filename2 = path + \"/INCAR2\"\n incar1 = InCar(filename1)\n incar2 = InCar(filename1)\n self.assertTrue(incar1 == incar2)\n\n # Different INCAR.\n incar1 = InCar(filename1)\n incar2 = InCar(filename2)\n self.assertFalse(incar1 == incar2)\n\n def test_ne(self):\n \" Test __ne__() function.\"\n # Two equal INCAR.\n filename1 = path + \"/INCAR\"\n filename2 = path + \"/INCAR2\"\n incar1 = InCar(filename1)\n incar2 = InCar(filename1)\n self.assertFalse(incar1 != incar2)\n\n # Different INCAR.\n incar1 = InCar(filename1)\n incar2 = InCar(filename2)\n self.assertTrue(incar1 != incar2)\n\n def test_tofile(self):\n \" Test INCAR content can be write to file. \"\n # NEED IMPLEMENTATIN\n pass\n\nif \"__main__\" == __name__: \n suite = unittest.TestLoader().loadTestsFromTestCase(InCarTest)\n unittest.TextTestRunner(verbosity=2).run(suite) \n\n", "id": "7766401", "language": "Python", "matching_score": 0.8406742811203003, "max_stars_count": 1, "path": "tests/incar_test.py" }, { "content": "# -*- coding:utf-8 -*-\n'''\nXdatCar单元测试\n'''\n\nimport unittest\n\nfrom vaspy.iter import XdatCar\nfrom tests import path\n\n\nclass XdatCarTest(unittest.TestCase):\n\n def setUp(self):\n self.maxDiff = True\n self.filename = path + '/XDATCAR'\n\n def test_construction(self):\n xdatcar = XdatCar(self.filename)\n\n def test_iterable(self):\n \" Make sure the xdatcar object is iterable.\"\n xdatcar = XdatCar(self.filename)\n generator = iter(xdatcar)\n item = next(generator)\n\n self.assertEqual(item.step, 1)\n\n ref_coord = [[ 0.48879659, 0.44702103, 0.44019084],\n [ 0.26154368, 0.57210582, 0.59668515],\n [ 0.24597513, 0.43606684, 0.52376893],\n [ 0.24574759, 0.56815233, 0.44240966],\n [ 0.50014572, 0.45350806, 0.59636402],\n [ 0.49367292, 0.60948493, 0.503559 ],\n [ 0.79700021, 0.50682229, 0.51311363],\n [ 0.28253148, 0.5235407 , 0.5177858 ],\n [ 0.49369403, 0.5136658 , 0.51609812],\n [ 0.68726824, 0.50910242, 0.52968761]]\n self.assertListEqual(item.coordinates.tolist(), ref_coord)\n\n # Test interfaces in AtomCo class.\n\n def test_cart2dir(self):\n \" Make sure we can convert direct coordinates to cartesian coordinates.\"\n xdatcar = XdatCar(self.filename)\n cart_coord = [1.35366921, 0.95761009, 8.09795]\n dir_coord = xdatcar.cart2dir(xdatcar.bases, cart_coord)\n self.assertListEqual(dir_coord.tolist(), [0.135366921,\n 0.09576100900000001,\n 0.8097950000000002])\n\n # Test 2x3 array.\n cart_coord = [[1.35366921, 0.95761009, 8.09795],\n [1.35366921, 0.95761009, 8.09795]]\n dir_coord = xdatcar.cart2dir(xdatcar.bases, cart_coord)\n ref_coord = [[0.135366921, 0.09576100900000001, 0.8097950000000002],\n [0.135366921, 0.09576100900000001, 0.8097950000000002]]\n self.assertListEqual(dir_coord.tolist(), ref_coord)\n\n def test_dir2cart(self):\n \" Make sure we can convert cartesian to direct\"\n xdatcar = XdatCar(self.filename)\n dir_coord = [0.5, 0.5, 0.5]\n cart_coord = xdatcar.dir2cart(xdatcar.bases, dir_coord).tolist()\n self.assertListEqual(cart_coord, [5.0, 5.0, 5.0])\n\n dir_coord = [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]\n cart_coord = xdatcar.dir2cart(xdatcar.bases, dir_coord).tolist()\n self.assertListEqual(cart_coord, [[5.0, 5.0, 5.0], [5.0, 5.0, 5.0]])\n\nif \"__main__\" == __name__: \n suite = unittest.TestLoader().loadTestsFromTestCase(XdatCarTest)\n unittest.TextTestRunner(verbosity=2).run(suite) \n\n", "id": "12647429", "language": "Python", "matching_score": 3.4212992191314697, "max_stars_count": 1, "path": "tests/xdatcar_test.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport unittest\n\nfrom arc_test import ArcTest\nfrom incar_test import InCarTest\nfrom oszicar_test import OsziCarTest\nfrom outcar_test import OutCarTest\nfrom xsd_test import XsdTest\nfrom xtd_test import XtdTest\nfrom poscar_test import PosCarTest\nfrom xyzfile_test import XyzFileTest\nfrom cif_test import CifFileTest\nfrom ani_test import AniFileTest\nfrom xdatcar_test import XdatCarTest\n\ndef suite():\n suite = unittest.TestSuite([\n unittest.TestLoader().loadTestsFromTestCase(ArcTest),\n unittest.TestLoader().loadTestsFromTestCase(InCarTest),\n unittest.TestLoader().loadTestsFromTestCase(OsziCarTest),\n unittest.TestLoader().loadTestsFromTestCase(OutCarTest),\n unittest.TestLoader().loadTestsFromTestCase(XsdTest),\n unittest.TestLoader().loadTestsFromTestCase(XtdTest),\n unittest.TestLoader().loadTestsFromTestCase(PosCarTest),\n unittest.TestLoader().loadTestsFromTestCase(XyzFileTest),\n unittest.TestLoader().loadTestsFromTestCase(CifFileTest),\n unittest.TestLoader().loadTestsFromTestCase(AniFileTest),\n unittest.TestLoader().loadTestsFromTestCase(XdatCarTest),\n ])\n\n return suite\n\nif \"__main__\" == __name__:\n result = unittest.TextTestRunner(verbosity=2).run(suite())\n\n if result.errors or result.failures:\n raise ValueError(\"Get errors and failures.\")\n\n", "id": "9977564", "language": "Python", "matching_score": 2.2500038146972656, "max_stars_count": 1, "path": "tests/vaspy_test.py" }, { "content": "# -*- coding:utf-8 -*-\n'''\nAniFile单元测试\n'''\n\nimport unittest\n\nfrom vaspy.iter import AniFile\nfrom vaspy.atomco import XyzFile\nfrom tests import path\n\n\nclass AniFileTest(unittest.TestCase):\n\n def setUp(self):\n self.maxDiff = True\n self.filename = path + '/OUT.ANI'\n\n def test_construction(self):\n ani = AniFile(self.filename)\n\n def test_iterable(self):\n \" Make sure the ani object is iterable.\"\n ani = AniFile(self.filename)\n generator = iter(ani)\n xyz = next(generator)\n\n self.assertTrue(isinstance(xyz, XyzFile))\n self.assertListEqual(xyz.atom_types, [\"Pt\", \"C\", \"O\"])\n self.assertListEqual(xyz.atom_numbers, [40, 1, 1])\n\nif \"__main__\" == __name__: \n suite = unittest.TestLoader().loadTestsFromTestCase(AniFileTest)\n unittest.TextTestRunner(verbosity=2).run(suite) \n\n", "id": "7486765", "language": "Python", "matching_score": 3.5649497509002686, "max_stars_count": 1, "path": "tests/ani_test.py" }, { "content": "# -*- coding:utf-8 -*-\n'''\nCifFile单元测试\n'''\n\nimport unittest\n\nfrom vaspy.atomco import CifFile\nfrom tests import path\n\n\nclass CifFileTest(unittest.TestCase):\n\n def setUp(self):\n self.maxDiff = True\n\n def test_construction(self):\n filename = path + '/ceo2-111.cif'\n cif = CifFile(filename)\n\nif \"__main__\" == __name__: \n suite = unittest.TestLoader().loadTestsFromTestCase(CifFileTest)\n unittest.TextTestRunner(verbosity=2).run(suite) \n\n", "id": "190989", "language": "Python", "matching_score": 0.8631108403205872, "max_stars_count": 1, "path": "tests/cif_test.py" }, { "content": "import logging\nimport sys\nimport os\n\nif sys.version > \"3\":\n PY2 = False\nelse:\n PY2 = True\n\n\n__version__ = '0.8.10'\n__all__ = ['atomco', 'electro', 'iter', 'matstudio', 'plotter', 'incar']\n\n\n# Initialize logger.\nlogger = logging.getLogger(\"vaspy\")\nlogger.setLevel(logging.INFO)\nconsole_hdlr = logging.StreamHandler()\nconsole_hdlr.setLevel(logging.INFO)\nformatter = logging.Formatter(\"%(name)s %(levelname)-8s %(message)s\")\nconsole_hdlr.setFormatter(formatter)\nlogger.addHandler(console_hdlr)\n\n\nclass VasPy(object):\n def __init__(self, filename):\n \"\"\"\n Base class to be inherited by all classes in VASPy.\n \"\"\"\n # Check filename validity.\n if not os.path.exists(filename):\n raise ValueError(\"{} not exist.\".format(filename))\n\n self.filename = filename\n\n\nclass LazyProperty(object):\n \"\"\"\n Descriptor for lazy property.\n \"\"\"\n def __init__(self, func):\n self.func = func\n\n def __get__(self, instance, owner):\n val = self.func(instance)\n setattr(instance, self.func.__name__, val)\n return val\n\n", "id": "4760352", "language": "Python", "matching_score": 0.1542820781469345, "max_stars_count": 1, "path": "vaspy/__init__.py" } ]
1.580233
oheneralov
[ { "content": "#!/usr/bin/python3\n\nimport time\nimport os\nfrom flask import Flask, request, redirect, jsonify\nimport urllib\nfrom werkzeug.utils import secure_filename\nimport cv2\nimport numpy as np\nfrom keras.models import load_model\nfrom imagenet_utils import decode_predictions\nfrom keras.preprocessing import image\nfrom resnet50architecture import ResNet50\n\n\napp = Flask(__name__)\n\nUPLOAD_FOLDER = './uploads'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\napp.secret_key = \"secret key\"\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\ncustom_resnet_model = ResNet50(include_top=True, weightsPath ='custom_resnet_weights.h5')\n\n\n\n\ndef allowed_file(filename):\n\treturn '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/file-upload', methods=['POST'])\ndef upload_file():\n\t# check if the post request has the file part\n\tif 'file' not in request.files:\n\t\tresp = jsonify({'message' : 'No file part in the request'})\n\t\tresp.status_code = 400\n\t\treturn resp\n\tfile = request.files['file']\n\tif file.filename == '':\n\t\tresp = jsonify({'message' : 'No file selected for uploading'})\n\t\tresp.status_code = 400\n\t\treturn resp\n\tif file and allowed_file(file.filename):\n\t\tfilename = secure_filename(file.filename)\n\t\tfilePath = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n\t\tfile.save(filePath)\n\t\timg = cv2.imread(filePath)\n\t\timg = cv2.resize(img,(224,224))\n\t\timg = np.reshape(img,[1,224,224,3])\n\t\tpreds = custom_resnet_model.predict(img)\n\t\tresult = decode_predictions(preds, top = 1)\n\t\tobject_name = result[0][0][1]\n\t\tresp = jsonify({'message' : 'File successfully uploaded', 'object': {'object_name': object_name}})\n\t\tresp.status_code = 201\n\t\treturn resp\n\telse:\n\t\tresp = jsonify({'message' : 'Allowed file types are png, jpg, jpeg, gif'})\n\t\tresp.status_code = 400\n\t\treturn resp\n\n\n\n\n@app.route('/')\ndef hello():\n return 'Hello!'\n\n\napp.run(debug = False, threaded = False, port=5000, host='0.0.0.0')\n", "id": "8306051", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "cnn/app.py" } ]
0
Eurkon
[ { "content": "# -*- coding: utf-8 -*-\r\n# @Author : Eurkon\r\n# @Date : 2021/6/5 10:19\r\n\r\nimport requests\r\n\r\n\r\ndef baidu_tongji(params):\r\n \"\"\"重定向请求百度统计,解决跨域问题\r\n\r\n Args:\r\n params (dict): {site_id: 网站id, access_token: token, ...}\r\n\r\n Returns:\r\n json: 百度统计返回的网页统计数据\r\n \"\"\"\r\n url = 'https://openapi.baidu.com/rest/2.0/tongji/report/getData?'\r\n req = requests.post(url=url, data=params)\r\n data = req.json()\r\n\r\n return data", "id": "7192018", "language": "Python", "matching_score": 1.4301995038986206, "max_stars_count": 5, "path": "api/baidu/api/tongji.py" }, { "content": "# -*- coding: utf-8 -*- \r\n# @Author : Eurkon\r\n# @Date : 2021/6/9 17:13\r\n\r\nfrom api.baidu.api.tongji import baidu_tongji\r\nfrom api.baidu.api.translate import baidu_translate\r\n", "id": "5135351", "language": "Python", "matching_score": 0.5731872320175171, "max_stars_count": 5, "path": "api/baidu/api/api.py" }, { "content": "# -*- coding: utf-8 -*-\n# @Author : Eurkon\n# @Date : 2021/11/9 9:44\n\nimport requests\nimport re\nimport execjs\n\nlang_dict = {'中文': 'zh', '日语': 'jp', '日语假名': 'jpka', '泰语': 'th', '法语': 'fra', '英语': 'en', '西班牙语': 'spa',\n '韩语': 'kor',\n '土耳其语': 'tr', '越南语': 'vie', '马来语': 'ms', '德语': 'de', '俄语': 'ru', '伊朗语': 'ir', '阿拉伯语': 'ara',\n '爱沙尼亚语': 'est',\n '白俄罗斯语': 'be', '保加利亚语': 'bul', '印地语': 'hi', '冰岛语': 'is', '波兰语': 'pl', '波斯语': 'fa', '丹麦语': 'dan',\n '菲律宾语': 'tl',\n '芬兰语': 'fin', '荷兰语': 'nl', '加泰罗尼亚语': 'ca', '捷克语': 'cs', '克罗地亚语': 'hr', '拉脱维亚语': 'lv', '立陶宛语': 'lt',\n '罗马尼亚语': 'rom',\n '南非语': 'af', '挪威语': 'no', '巴西语': 'pt_BR', '葡萄牙语': 'pt', '瑞典语': 'swe', '塞尔维亚语': 'sr', '世界语': 'eo',\n '斯洛伐克语': 'sk',\n '斯洛文尼亚语': 'slo', '斯瓦希里语': 'sw', '乌克兰语': 'uk', '希伯来语': 'iw', '希腊语': 'el', '匈牙利语': 'hu', '亚美尼亚语': 'hy',\n '意大利语': 'it',\n '印尼语': 'id', '阿尔巴尼亚语': 'sq', '阿姆哈拉语': 'am', '阿萨姆语': 'as', '阿塞拜疆语': 'az', '巴斯克语': 'eu', '孟加拉语': 'bn',\n '波斯尼亚语': 'bs',\n '加利西亚语': 'gl', '格鲁吉亚语': 'ka', '古吉拉特语': 'gu', '豪萨语': 'ha', '伊博语': 'ig', '因纽特语': 'iu', '爱尔兰语': 'ga',\n '祖鲁语': 'zu',\n '卡纳达语': 'kn', '哈萨克语': 'kk', '吉尔吉斯语': 'ky', '卢森堡语': 'lb', '马其顿语': 'mk', '马耳他语': 'mt', '毛利语': 'mi',\n '马拉提语': 'mr',\n '尼泊尔语': 'ne', '奥利亚语': 'or', '旁遮普语': 'pa', '凯楚亚语': 'qu', '塞茨瓦纳语': 'tn', '僧加罗语': 'si', '泰米尔语': 'ta',\n '塔塔尔语': 'tt',\n '泰卢固语': 'te', '乌尔都语': 'ur', '乌兹别克语': 'uz', '威尔士语': 'cy', '约鲁巴语': 'yo', '粤语': 'yue', '文言文': 'wyw',\n '中文繁体': 'cht'}\n\nurl = 'https://fanyi.baidu.com/v2transapi'\nheaders = {\n 'Referer': 'https://fanyi.baidu.com/?aldtype=16047',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36'\n}\n\n\ndef baidu_translate(params):\n \"\"\"百度翻译\n\n Args:\n params (dict): {from: 源语言, to: 翻译语言, content: 翻译内容}\n\n Returns:\n json: {result: 翻译后的内容}\n \"\"\"\n # 多次请求保证获取 token,并用此 token 进行翻译\n session = requests.Session()\n session.headers = headers\n\n session.get(url='https://fanyi.baidu.com/', headers=headers)\n html = session.get(url='https://fanyi.baidu.com/', headers=headers).text\n token = re.findall(r\"token: '(.*?)'\", html)[0]\n gtk = re.findall(r\"window.gtk = '(.*?)';\", html)[0]\n\n with open('../js/translate.js', 'r', encoding='UTF-8') as file:\n js_text = file.read()\n # 编译加载js字符串\n js = execjs.compile(js_text)\n sign = js.call(\"e\", params['content'], gtk)\n\n params = {\n 'from': lang_dict[params['from']],\n 'to': lang_dict[params['to']],\n 'query': params['content'],\n 'simple_means_flag': '3',\n 'sign': sign,\n 'token': token,\n 'domain': 'common'\n }\n\n response = session.get(url=url, headers=headers, params=params)\n message = response.json()\n return {'result': message['trans_result']['data'][0]['dst']}\n\n\nif __name__ == '__main__':\n print(baidu_translate({'from': '英语', 'to': '中文', 'content': 'Hello World'}))\n", "id": "2813565", "language": "Python", "matching_score": 7.801943778991699, "max_stars_count": 5, "path": "api/baidu/api/translate.py" }, { "content": "# -*- coding: utf-8 -*- \r\n# @Author : Eurkon\r\n# @Date : 2021/6/10 9:56\r\n\r\nimport execjs\r\nimport requests\r\n\r\nlang_dict = {'中文': 'zh-CN', '阿尔巴尼亚语': 'sq', '阿拉伯语': 'ar',\r\n '阿姆哈拉语': 'am', '阿塞拜疆语': 'az', '爱尔兰语': 'ga',\r\n '爱沙尼亚语': 'et', '巴斯克语': 'eu', '白俄罗斯语': 'be',\r\n '保加利亚语': 'bg', '冰岛语': 'is', '波兰语': 'pl', '波斯尼亚语': 'bs',\r\n '波斯语': 'fa', '布尔语': 'af', '丹麦语': 'da', '德语': 'de', '俄语': 'ru', '法语': 'fr',\r\n '菲律宾语': 'tl', '芬兰语': 'fi', '弗里西语': 'fy', '高棉语': 'km', '格鲁吉亚语': 'ka',\r\n '古吉拉特语': 'gu', '哈萨克语': 'kk', '海地克里奥尔语': 'ht', '韩语': 'ko',\r\n '豪萨语': 'ha', '荷兰语': 'nl', '吉尔吉斯语': 'ky', '加利西亚语': 'gl', '加泰罗尼亚语': 'ca',\r\n '捷克语': 'cs', '卡纳达语': 'kn', '科西嘉语': 'co', '克罗地亚语': 'hr',\r\n '库尔德语': 'ku', '拉丁语': 'la', '拉脱维亚语': 'lv', '老挝语': 'lo', '立陶宛语': 'lt',\r\n '卢森堡语': 'lb', '罗马尼亚语': 'ro', '马尔加什语': 'mg', '马耳他语': 'mt',\r\n '马拉地语': 'mr', '马拉雅拉姆语': 'mf', '马来语': 'ms', '马其顿语': 'mk',\r\n '毛利语': 'mi', '蒙古语': 'mn', '孟加拉语': 'bn', '缅甸语': 'my', '苗语': 'hmn', '南非克萨语': 'xh', '南非祖鲁语': 'zu',\r\n '尼泊尔语': 'ne', '挪威语': 'no', '旁遮普语': 'pa', '葡萄牙语': 'pt', '普什图语': 'ps',\r\n '齐切瓦语': 'ny', '日语': 'ja', '瑞典语': 'sv', '萨摩亚语': 'sm', '塞尔维亚语': 'sr',\r\n '赛所托语': 'st', '僧伽罗语': 'si', '世界语': 'eo', '斯洛伐克语': 'sk', '斯洛文尼亚语': 'sl',\r\n '斯瓦希里语': 'sw', '苏格兰盖尔语': 'gd', '宿务语': 'ceb', '索马里语': 'so', '塔吉克语': 'tg', '泰卢固语': 'te',\r\n '泰米尔语': 'ta', '泰语': 'th', '土耳其语': 'tr', '威尔士语': 'cy', '乌尔都语': 'ur',\r\n '乌克兰语': 'uk', '乌兹别克语': 'uz', '西班牙语': 'es', '希伯来语': 'rw', '希腊语': 'el',\r\n '夏威夷语': 'haw', '信德语': 'sd', '匈牙利语': 'hu', '修纳语': 'sn',\r\n '亚美尼亚语': 'hy', '伊博语': 'ig', '意大利语': 'it', '意第绪语': 'yi', '印地语': 'hi',\r\n '印尼巽他': 'su', '印尼语': 'id', '印尼爪哇语': 'jw', '英语': 'en', '约鲁巴语': 'yo', '越南语': 'vi', '中文繁体': 'zh-TW'}\r\n\r\n\r\ndef google_translate(params):\r\n \"\"\"谷歌翻译\r\n\r\n Args:\r\n params (dict): {from: 源语言, to: 翻译语言, content: 翻译内容}\r\n\r\n Returns:\r\n json: {result: 翻译后的内容}\r\n \"\"\"\r\n\r\n with open('../js/translate.js', 'r', encoding='UTF-8') as file:\r\n js_text = file.read()\r\n # 编译加载js字符串\r\n js = execjs.compile(js_text)\r\n tk = js.call(\"TL\", str(params['content']))\r\n\r\n if len(params['content']) > 4891:\r\n return {'error': '内容过长'}\r\n else:\r\n url = \"http://translate.google.cn/translate_a/single?client=t\" \\\r\n \"&sl={}&tl={}&hl=zh-CN&dt=at&dt=bd&dt=ex&dt=ld&dt=md&dt=qca\" \\\r\n \"&dt=rw&dt=rm&dt=ss&dt=t&ie=UTF-8&oe=UTF-8&clearbtn=1&otf=1&pc=1\" \\\r\n \"&srcrom=0&ssel=0&tsel=0&kc=2&tk={}&q={}\".format(lang_dict[params['from']], lang_dict[params['to']], tk,\r\n params['content'])\r\n headers = {\r\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',\r\n 'referer': 'https://translate.google.cn/',\r\n 'authority': 'translate.google.cn'\r\n }\r\n response = requests.get(url=url, headers=headers)\r\n\r\n end = response.text.find(\"\\\",\")\r\n if end > 4:\r\n message = response.text[4:end]\r\n else:\r\n return {'error': '翻译失败'}\r\n return {'result': message}\r\n\r\n\r\nif __name__ == '__main__':\r\n print(google_translate({'from': '英语', 'to': '中文', 'content': 'Hello World'}))\r\n", "id": "5370708", "language": "Python", "matching_score": 1.5304179191589355, "max_stars_count": 5, "path": "api/google/api/translate.py" }, { "content": "# -*- coding: utf-8 -*- \r\n# @Author : Eurkon\r\n# @Date : 2021/6/10 10:41\r\n\r\nfrom api.google.api.translate import google_translate", "id": "8843122", "language": "Python", "matching_score": 0.3158349096775055, "max_stars_count": 5, "path": "api/google/api/api.py" }, { "content": "# -*- coding: utf-8 -*-\n# @Author : Eurkon\n# @Date : 2021/11/10 16:32\n\nimport requests\nfrom hashlib import md5\nimport time\nimport random\n\n# 请求地址\nurl = \"http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule\"\n\nappVersion = \"5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36\"\n\nheaders = {\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Connection\": \"keep-alive\",\n \"Content-Length\": \"244\",\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"Cookie\": \"OUTFOX_SEARCH_USER_ID=-1506602845@10.169.0.82; JSESSIONID=aaaUggpd8kfhja1AIJYpx; OUTFOX_SEARCH_USER_ID_NCOO=108436537.92676207; ___rl__test__cookies=1597502296408\",\n \"Host\": \"fanyi.youdao.com\",\n \"Origin\": \"http://fanyi.youdao.com\",\n \"Referer\": \"http://fanyi.youdao.com/\",\n \"user-agent\": appVersion,\n \"X-Requested-With\": \"XMLHttpRequest\",\n}\n\n\ndef youdao_translate(content):\n \"\"\"有道翻译\n\n Args:\n params (dict): {content: 翻译内容}\n\n Returns:\n json: {result: 翻译后的内容}\n \"\"\"\n word = content['content']\n bv = md5(appVersion.encode()).hexdigest()\n lts = str(int(time.time() * 1000))\n salt = lts + str(random.randint(0, 9))\n sign = md5((\"fanyideskweb\" + word + salt + \"]BjuETDhU)zqSxf-=B#7m\").encode()).hexdigest()\n params = {\n \"i\": word,\n \"from\": \"AUTO\",\n \"to\": \"AUTO\",\n \"smartresult\": \"dict\",\n \"client\": \"fanyideskweb\",\n \"salt\": salt,\n \"sign\": sign,\n \"lts\": lts,\n \"bv\": bv,\n \"doctype\": \"json\",\n \"version\": \"2.1\",\n \"keyfrom\": \"fanyi.web\",\n \"action\": \"FY_BY_REALTlME\"\n }\n\n response = requests.post(url=url, headers=headers, data=params)\n result = response.json()\n return {'result': result[\"translateResult\"][0][0]['tgt']}\n\n\nif __name__ == \"__main__\":\n print(youdao_translate({'content': '你好'}))\n", "id": "10159690", "language": "Python", "matching_score": 2.021482467651367, "max_stars_count": 5, "path": "api/youdao/api/translate.py" }, { "content": "# -*- coding: utf-8 -*- \r\n# @Author : Eurkon\r\n# @Date : 2021/6/3 16:55\r\n\r\nimport json\r\nfrom urllib import parse\r\nfrom urllib.parse import urlparse\r\nfrom http.server import BaseHTTPRequestHandler\r\n\r\nfrom api.baidu.api.api import *\r\nfrom api.google.api.api import *\r\nfrom api.tools.api.api import *\r\nfrom api.weibo.api.api import *\r\nfrom api.youdao.api.api import *\r\n\r\n\r\nclass handler(BaseHTTPRequestHandler):\r\n def do_GET(self):\r\n try:\r\n params = dict(parse.parse_qsl(urlparse(self.path).query))\r\n if 'api' in params:\r\n api = str(params['api'])\r\n del params['api']\r\n data = eval(\"{0}\".format(api))(params)\r\n else:\r\n data = {'error': '请输入API'}\r\n except Exception as e:\r\n data = {'error': str(e)}\r\n\r\n self.send_response(200)\r\n self.send_header('Access-Control-Allow-Origin', '*')\r\n self.send_header('Cache-Control', 'no-cache')\r\n if isinstance(data, bytes):\r\n res = data\r\n elif isinstance(data, dict) or isinstance(data, list):\r\n res = json.dumps(data).encode('utf-8')\r\n else:\r\n res = str(data).encode('urf-8')\r\n\r\n self.send_header('Content-type', 'text/plain')\r\n self.end_headers()\r\n self.wfile.write(res)\r\n return\r\n", "id": "2281284", "language": "Python", "matching_score": 1.164612054824829, "max_stars_count": 5, "path": "api/index.py" }, { "content": "# -*- coding: utf-8 -*- \r\n# @Author : Eurkon\r\n# @Date : 2021/6/15 11:31\r\n\r\nfrom api.index import handler\r\nimport http\r\n\r\nif __name__ == '__main__':\r\n\r\n try:\r\n server = http.server.HTTPServer(('localhost', 8888), handler)\r\n print('Started http server')\r\n server.serve_forever()\r\n except KeyboardInterrupt:\r\n print('^C received, shutting down server')\r\n server.socket.close()", "id": "9435643", "language": "Python", "matching_score": 0, "max_stars_count": 5, "path": "init.py" }, { "content": "# -*- coding: utf-8 -*- \r\n# @Author : Eurkon\r\n# @Date : 2021/6/15 9:44\r\n\r\nimport io\r\nimport os\r\n\r\n\r\ndef tools_qrcode(params):\r\n \"\"\"生成二维码\r\n\r\n Args:\r\n params (dict): {content: 内容}\r\n\r\n Returns:\r\n bytes: 字节流\r\n \"\"\"\r\n import qrcode\r\n\r\n img = qrcode.make(str(params['content']))\r\n # 创建一个字节流管道\r\n img_bytes = io.BytesIO()\r\n # 将图片数据存入字节流管道, format可以按照具体文件的格式填写\r\n img.save(img_bytes, format=\"PNG\")\r\n # 从字节流管道中获取二进制\r\n image_bytes = img_bytes.getvalue()\r\n return image_bytes\r\n\r\n\r\ndef tools_qrcode_color(params):\r\n \"\"\"生成二维码\r\n\r\n Args:\r\n params (dict): {words: 内容(不能是中文), picture: 背景, colorized: 是否为彩色}\r\n\r\n Returns:\r\n str: 图片地址\r\n \"\"\"\r\n from MyQR import myqr\r\n\r\n words = params['words']\r\n name = 'qrcode.png'\r\n path = os.path.dirname(os.path.dirname(__file__)) + '/img/'\r\n picture = None\r\n colorized = False\r\n\r\n if 'picture' in params:\r\n picture = str(params['picture'])\r\n if picture[-4:] == '.gif':\r\n name = 'qrcode.gif'\r\n\r\n if 'colorized' in params and str(params['colorized']).lower() == 'true':\r\n colorized = True\r\n\r\n myqr.run(\r\n words=words,\r\n picture=picture,\r\n colorized=colorized,\r\n save_name=name,\r\n save_dir=path\r\n )\r\n\r\n return path + name\r\n\r\n\r\nif __name__ == '__main__':\r\n # print(qrcode({'words': 'https://blog.eurkon.com/',\r\n # 'picture': '../img/background.gif',\r\n # 'colorized': True}))\r\n print(tools_qrcode({'content': 'https://blog.eurkon.com/'}))\r\n\r\n\r\n", "id": "5810010", "language": "Python", "matching_score": 1.4978911876678467, "max_stars_count": 5, "path": "api/tools/api/qrcode.py" }, { "content": "# -*- coding: utf-8 -*- \r\n# @Author : Eurkon\r\n# @Date : 2021/6/15 9:44\r\n\r\nfrom api.tools.api.qrcode import tools_qrcode", "id": "2175240", "language": "Python", "matching_score": 0, "max_stars_count": 5, "path": "api/tools/api/api.py" }, { "content": "# -*- coding: utf-8 -*-\r\n# @Author : Eurkon\r\n# @Date : 2021/6/5 10:16\r\n\r\nimport json\r\nimport requests\r\n\r\n\r\ndef weibo_top(params):\r\n \"\"\"微博热搜\r\n\r\n Args:\r\n params (dict): {}\r\n\r\n Returns:\r\n json: {title: 标题, url: 地址, num: 热度数值, hot: 热搜等级}\r\n \"\"\"\r\n\r\n data = []\r\n response = requests.get(\"https://weibo.com/ajax/side/hotSearch\")\r\n data_json = response.json()['data']['realtime']\r\n jyzy = {\r\n '电影': '影',\r\n '剧集': '剧',\r\n '综艺': '综',\r\n '音乐': '音'\r\n }\r\n\r\n for data_item in data_json:\r\n hot = ''\r\n # 如果是广告,则不添加\r\n if 'is_ad' in data_item:\r\n continue\r\n if 'flag_desc' in data_item:\r\n hot = jyzy.get(data_item['flag_desc'])\r\n if 'is_boom' in data_item:\r\n hot = '爆'\r\n if 'is_hot' in data_item:\r\n hot = '热'\r\n if 'is_fei' in data_item:\r\n hot = '沸'\r\n if 'is_new' in data_item:\r\n hot = '新'\r\n\r\n dic = {\r\n 'title': data_item['note'],\r\n 'url': 'https://s.weibo.com/weibo?q=%23' + data_item['word'] + '%23',\r\n 'num': data_item['num'],\r\n 'hot': hot\r\n }\r\n data.append(dic)\r\n\r\n return data\r\n\r\n\r\nif __name__ == '__main__':\r\n print(weibo_top({}))\r\n", "id": "8771798", "language": "Python", "matching_score": 1.6096291542053223, "max_stars_count": 5, "path": "api/weibo/api/top.py" }, { "content": "# -*- coding: utf-8 -*- \r\n# @Author : Eurkon\r\n# @Date : 2021/6/9 17:13\r\n\r\nfrom api.weibo.api.top import weibo_top", "id": "19682", "language": "Python", "matching_score": 1.132767677307129, "max_stars_count": 5, "path": "api/weibo/api/api.py" } ]
1.297406
alfayedtch
[ { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Dec 25 20:22:42 2020\r\n\r\n@author: <NAME>\r\n\"\"\"\r\nimport tkinter\r\nimport sys,pygame, pygame.mixer\r\nfrom tkinter import filedialog\r\nfrom pygame.locals import *\r\nfrom Detection import detectionim \r\n\r\n\r\npygame.init()\r\n\r\ntaille = width, height = 620,480\r\n\r\n\r\n\r\nscreen = pygame.display.set_mode(taille)\r\n\"\"\"les boutons\"\"\"\r\nbtnimage = pygame.image.load('btnimage.png')\r\nbtnimage = pygame.transform.scale(btnimage, (200,53))\r\nbtnvideo = pygame.image.load('btnvideo.png')\r\nbtnvideo = pygame.transform.scale(btnvideo, (200,53))\r\n\r\n\"\"\"les panneaux\"\"\"\r\nstop = pygame.image.load('stop.png')\r\nstop = pygame.transform.scale(stop, (100,100))\r\nAB25 = pygame.image.load('AB25.png')\r\nAB25 = pygame.transform.scale(AB25, (100,100))\r\ninterdit = pygame.image.load('interdit.png')\r\ninterdit = pygame.transform.scale(interdit, (100,100))\r\nceder = pygame.image.load('ceder.png')\r\nceder = pygame.transform.scale(ceder, (100,100))\r\nson = pygame.mixer.Sound('occ.wav')\r\nson.play()\r\n\r\nscreen.blit(btnimage,(200,200))\r\nscreen.blit(btnvideo,(200,300))\r\nscreen.blit(stop,(0,0))\r\nscreen.blit(AB25,(520,380))\r\nscreen.blit(interdit,(0,380))\r\nscreen.blit(ceder,(520,0))\r\n\r\npygame.display.flip()\r\n\r\n\r\nwhile 1:\r\n mx,my = pygame.mouse.get_pos()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif (event.type == MOUSEBUTTONDOWN and mx>=200 and mx<=400 and my >= 200 and my <=253):\r\n root = tkinter.Tk()\r\n root.wm_withdraw()\r\n try:\r\n filename = filedialog.askopenfilename(filetypes =((\"Image\", \"*.png .jpg\"),(\"All Files\",\"*.*\")))\r\n except:\r\n print(\"choisir un bon fichier\")\r\n if str(filename) !=\"\":\r\n print(\"le fichier est :\",filename)\r\n \r\n elif (event.type == MOUSEBUTTONDOWN and mx>=200 and mx<=400 and my >= 300 and my <=353):\r\n print(\"Charger une video\")\r\n \r\n", "id": "5551192", "language": "Python", "matching_score": 4.410179138183594, "max_stars_count": 0, "path": "upload_file_with_tkinter_and_pygame.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Dec 25 20:22:42 2020\r\n\r\n@author: <NAME>\r\n\"\"\"\r\nimport tkinter\r\nimport sys,pygame, pygame.mixer\r\nfrom tkinter import filedialog\r\nfrom pygame.locals import *\r\nfrom yolov3_image import detectionim\r\nfrom yolov3_video import yolov3_video\r\nfrom yolov3_camera import yolov3_webcam\r\nfrom recon import web\r\n\r\npygame.init()\r\n\r\ntaille = width, height = 620,480\r\n\r\n\r\n\r\nscreen = pygame.display.set_mode(taille)\r\n\"\"\"les boutons\"\"\"\r\nbtnimage = pygame.image.load('btnimage.png')\r\nbtnimage = pygame.transform.scale(btnimage, (200,53))\r\nbtnvideo = pygame.image.load('btnvideo.png')\r\nbtnvideo = pygame.transform.scale(btnvideo, (200,53))\r\nbtnwebcam = pygame.image.load('btnWeb.png')\r\nbtnwebcam = pygame.transform.scale(btnwebcam, (200,53))\r\n\r\n\r\n\r\nscreen.blit(btnimage,(200,200))\r\nscreen.blit(btnvideo,(200,300))\r\nscreen.blit(btnwebcam,(200,400))\r\n\r\n\r\npygame.display.flip()\r\n\r\nnew = web()\r\nwhile 1:\r\n mx,my = pygame.mouse.get_pos()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n try:\r\n pygame.quit()\r\n sys.exit(0)\r\n except Exception as e:\r\n raise e\r\n elif (event.type == MOUSEBUTTONDOWN and mx>=200 and mx<=400 and my >= 200 and my <=253):\r\n root = tkinter.Tk()\r\n root.wm_withdraw()\r\n try:\r\n filename = filedialog.askopenfilename(filetypes =((\"Image\", \"*.png .jpg\"),(\"All Files\",\"*.*\")))\r\n except:\r\n print(\"choisir un bon fichier\")\r\n if str(filename) !=\"\":\r\n detectionim(str(filename))\r\n \r\n elif (event.type == MOUSEBUTTONDOWN and mx>=200 and mx<=400 and my >= 300 and my <=353):\r\n root = tkinter.Tk()\r\n root.wm_withdraw()\r\n try:\r\n filename = filedialog.askopenfilename(filetypes =((\"Video\", \"*.mp4\"),(\"All Files\",\"*.*\")))\r\n except:\r\n print(\"choisir un bon fichier\")\r\n if str(filename) !=\"\":\r\n yolov3_video(str(filename))\r\n elif (event.type == MOUSEBUTTONDOWN and mx>=200 and mx<=400 and my >= 400 and my <=453):\r\n try:\r\n new.start()\r\n except Exception as E:\r\n raise E\r\n print(\"slt\")\r\n \r\n", "id": "10944233", "language": "Python", "matching_score": 2.0763349533081055, "max_stars_count": 0, "path": "code/fenetre.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 4 18:28:28 2021\r\n\r\n@author: <NAME>\r\n\"\"\"\r\nfrom yolov3_camera import yolov3_webcam\r\n\r\nclass web:\r\n def __init__(self):\r\n self.moi = True\r\n \r\n def start(self):\r\n self.moi = True\r\n while self.moi:\r\n yolov3_webcam(self)\r\n \r\n def stop(self):\r\n self.moi = False\r\n ", "id": "8076114", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "code/recon.py" }, { "content": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import loader\n\n# Create your views here.\n\ndef index(request):\n data = {'nom':'TCHAGNAO','prenom':'al-fayed', 'logiciel':['word','excel','powerPoint'],'age':19 }\n return HttpResponse(loader.get_template('index.html').render(data))", "id": "2825175", "language": "Python", "matching_score": 0.14799150824546814, "max_stars_count": 0, "path": "vue/views.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 10 00:47:36 2020\r\n\r\n@author: <NAME>\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\ndef detectionim(im):\r\n path_to_weights = '../utils/weights/yolov3_ts_train_11000.weights'\r\n path_to_cfg = '../utils/cfg/yolov3_ts_test.cfg'\r\n \r\n \r\n net = cv2.dnn.readNet(path_to_weights,path_to_cfg)\r\n classes=[]\r\n \r\n with open('../utils/names/ts_data.names','r') as f:\r\n classes = f.read().splitlines()\r\n \r\n \r\n img = cv2.imread(im)\r\n height,width,_ = img.shape\r\n \r\n blob = cv2.dnn.blobFromImage(img,1/255,(416,416),(0,0,0),swapRB=True,crop=False)\r\n net.setInput(blob)\r\n output_layers_names = net.getUnconnectedOutLayersNames()\r\n layersOutputs = net.forward(output_layers_names)\r\n \r\n \r\n boxes=[]\r\n confidences = []\r\n class_ids = []\r\n \r\n for output in layersOutputs:\r\n for detection in output:\r\n scores = detection[5:]\r\n class_id = np.argmax(scores)\r\n confidence = scores[class_id]\r\n if confidence > 0.5:\r\n center_x = int(detection[0]*width)\r\n center_y = int(detection[1]*height)\r\n w = int(detection[2]*width)\r\n h = int(detection[3]*height)\r\n \r\n x = int(center_x - w/2)\r\n y = int(center_y - h/2)\r\n \r\n boxes.append([x, y, w, h])\r\n confidences.append((float(confidence)))\r\n class_ids.append(class_id)\r\n \r\n print(len(boxes))\r\n indexes = cv2.dnn.NMSBoxes(boxes,confidences,0.5,0.4)\r\n print(indexes.flatten())\r\n print(len(indexes.flatten()) )\r\n font = cv2.FONT_HERSHEY_PLAIN\r\n colors =np.random.uniform(0, 255, size=(len(boxes),3))\r\n for i in indexes.flatten():\r\n x, y, w, h = boxes[i]\r\n label = str(classes[class_ids[i]])\r\n confidence = str(round(confidences[i],2))\r\n color = colors[i]\r\n cv2.rectangle(img,(x,y), (x+w, y+h), color, 2)\r\n cv2.putText(img, label +\" \"+ confidence, (x,y+20), font, 2,(255,255,255),2)\r\n \r\n \r\n \r\n cv2.imshow('image',img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()", "id": "12463446", "language": "Python", "matching_score": 2.1264402866363525, "max_stars_count": 0, "path": "code/yolov3_image.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 4 13:40:24 2021\r\n\r\n@author: <NAME>\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\n# Create a VideoCapture object and read from input file\r\n# If the input is the camera, pass 0 instead of the video file name\r\ndef lecture(path):\r\n # path=path[:-4]+'-result'+path[-4:]\r\n print(str(path))\r\n cap = cv2.VideoCapture(str(path))\r\n \r\n # Check if camera opened successfully\r\n if (cap.isOpened()== False): \r\n print(\"Error opening video stream or file\")\r\n \r\n # Read until video is completed\r\n while(cap.isOpened()):\r\n # Capture frame-by-frame\r\n ret, frame = cap.read()\r\n if ret == True:\r\n \r\n # Display the resulting frame\r\n cv2.imshow('Frame',frame)\r\n \r\n # Press Q on keyboard to exit\r\n if cv2.waitKey(25) & 0xFF == ord('q'):\r\n break\r\n \r\n # Break the loop\r\n else: \r\n break\r\n \r\n # When everything done, release the video capture object\r\n cap.release()\r\n \r\n # Closes all the frames\r\n cv2.destroyAllWindows()\r\n", "id": "10582178", "language": "Python", "matching_score": 1.4493225812911987, "max_stars_count": 0, "path": "code/lecture.py" } ]
1.762829
pondruska
[ { "content": "from typing import Tuple\n\nimport numpy as np\nfrom shapely.geometry import LineString, Polygon\n\n\ndef _get_boundingbox(centroid: np.ndarray, yaw: float, extent: np.ndarray) -> Polygon:\n x, y = centroid[0], centroid[1]\n sin, cos = np.sin(yaw), np.cos(yaw)\n width, length = extent[0] / 2, extent[1] / 2\n\n x1, y1 = (x + width * cos - length * sin, y + width * sin + length * cos)\n x2, y2 = (x + width * cos + length * sin, y + width * sin - length * cos)\n x3, y3 = (x - width * cos + length * sin, y - width * sin - length * cos)\n x4, y4 = (x - width * cos - length * sin, y - width * sin + length * cos)\n return Polygon([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])\n\n\ndef _get_sides(bbox: Polygon) -> Tuple[LineString, LineString, LineString, LineString]:\n (x1, y1), (x2, y2), (x3, y3), (x4, y4) = bbox.exterior.coords[:-1]\n return (\n LineString([(x1, y1), (x2, y2)]),\n LineString([(x3, y3), (x4, y4)]),\n LineString([(x1, y1), (x4, y4)]),\n LineString([(x2, y2), (x3, y3)]),\n )\n\n\ndef within_range(ego_centroid: np.ndarray, ego_extent: np.ndarray, agents: np.ndarray) -> np.ndarray:\n agent_centroids = agents[\"centroid\"]\n agent_extents = agents[\"extent\"]\n distance = np.linalg.norm(ego_centroid - agent_centroids, axis=-1)\n max_range = 0.5 * (np.linalg.norm(ego_extent[:2]) + np.linalg.norm(agent_extents[:, 2], axis=-1))\n return agents[distance < max_range]\n\n\ndef detect_collision(\n pred_centroid: np.ndarray, pred_yaw: float, pred_extent: np.ndarray, target_agents: np.ndarray\n) -> Tuple[str, str]:\n \"\"\"\n Computes whether a collision occurred between ego and any another agent.\n Also computes the type of collision: rear, front, or side.\n For this, we compute the intersection of ego's four sides with a target\n agent and measure the length of this intersection. A collision\n is classified into a class, if the corresponding length is maximal,\n i.e. a front collision exhibits the longest intersection with\n egos front edge.\n \"\"\"\n ego_bbox = _get_boundingbox(centroid=pred_centroid, yaw=pred_yaw, extent=pred_extent)\n for agent in within_range(pred_centroid, pred_extent, target_agents):\n agent_bbox = _get_boundingbox(agent[\"centroid\"], agent[\"yaw\"], agent[\"extent\"])\n\n if ego_bbox.intersects(agent_bbox):\n front_side, rear_side, left_side, right_side = _get_sides(ego_bbox)\n\n intersection_length_per_side = np.asarray(\n [\n agent_bbox.intersection(front_side).length,\n agent_bbox.intersection(rear_side).length,\n agent_bbox.intersection(left_side).length,\n agent_bbox.intersection(right_side).length,\n ]\n )\n collision_type = [\"front\", \"rear\", \"side\", \"side\"][np.argmax(intersection_length_per_side)]\n return collision_type, agent[\"track_id\"]\n return \"\", \"\"\n", "id": "1314525", "language": "Python", "matching_score": 0.8107584714889526, "max_stars_count": 0, "path": "l5kit/l5kit/planning/utils.py" }, { "content": "import warnings\nfrom typing import Dict, List\n\nimport torch\nimport torch.nn as nn\nfrom torchvision.models.resnet import resnet18, resnet50\n\n\nclass PlanningModel(nn.Module):\n def __init__(\n self,\n model_arch: str,\n num_input_channels: int,\n num_targets: int,\n weights_scaling: List[float],\n criterion: nn.Module,\n pretrained: bool = True,\n ) -> None:\n super().__init__()\n self.model_arch = model_arch\n self.num_input_channels = num_input_channels\n self.num_targets = num_targets\n self.register_buffer(\"weights_scaling\", torch.tensor(weights_scaling))\n self.pretrained = pretrained\n self.criterion = criterion\n\n if pretrained and self.num_input_channels != 3:\n warnings.warn(\"There is no pre-trained model with num_in_channels != 3, first layer will be reset\")\n\n if model_arch == \"resnet18\":\n self.model = resnet18(pretrained=pretrained)\n self.model.fc = nn.Linear(in_features=512, out_features=num_targets)\n elif model_arch == \"resnet50\":\n self.model = resnet50(pretrained=pretrained)\n self.model.fc = nn.Linear(in_features=2048, out_features=num_targets)\n else:\n raise NotImplementedError(f\"Model arch {model_arch} unknown\")\n\n if self.num_input_channels != 3:\n self.model.conv1 = nn.Conv2d(\n self.num_input_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False\n )\n\n def forward(self, data_batch: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n # [batch_size, channels, height, width]\n image_batch = data_batch[\"image\"]\n # [batch_size, num_steps * 2]\n outputs = self.model(image_batch)\n batch_size = len(data_batch[\"image\"])\n\n if self.training:\n if self.criterion is None:\n raise NotImplementedError(\"Loss function is undefined.\")\n\n # [batch_size, num_steps * 2]\n targets = (torch.cat((data_batch[\"target_positions\"], data_batch[\"target_yaws\"]), dim=2)).view(\n batch_size, -1\n )\n # [batch_size, num_steps]\n target_weights = (data_batch[\"target_availabilities\"].unsqueeze(-1) * self.weights_scaling).view(\n batch_size, -1\n )\n loss = torch.mean(self.criterion(outputs, targets) * target_weights)\n train_dict = {\"loss\": loss}\n return train_dict\n else:\n predicted = outputs.view(batch_size, -1, 3)\n # [batch_size, num_steps, 2->(XY)]\n pred_positions = predicted[:, :, :2]\n # [batch_size, num_steps, 1->(yaw)]\n pred_yaws = predicted[:, :, 2:3]\n eval_dict = {\"positions\": pred_positions, \"yaws\": pred_yaws}\n return eval_dict\n", "id": "7018841", "language": "Python", "matching_score": 0.4841277003288269, "max_stars_count": 0, "path": "l5kit/l5kit/planning/model.py" }, { "content": "from typing import Sequence, Union, cast\n\nimport numpy as np\nimport pymap3d as pm\nimport transforms3d\n\n\ndef compute_agent_pose(agent_centroid_m: np.ndarray, agent_yaw_rad: float) -> np.ndarray:\n \"\"\"Return the agent pose as a 3x3 matrix. This corresponds to world_from_agent matrix.\n\n Args:\n agent_centroid_m (np.ndarry): 2D coordinates of the agent\n agent_yaw_rad (float): yaw of the agent\n\n Returns:\n (np.ndarray): 3x3 world_from_agent matrix\n \"\"\"\n # Compute agent pose from its position and heading\n return np.array(\n [\n [np.cos(agent_yaw_rad), -np.sin(agent_yaw_rad), agent_centroid_m[0]],\n [np.sin(agent_yaw_rad), np.cos(agent_yaw_rad), agent_centroid_m[1]],\n [0, 0, 1],\n ]\n )\n\n\ndef rotation33_as_yaw(rotation: np.ndarray) -> float:\n \"\"\"Compute the yaw component of given 3x3 rotation matrix.\n\n Args:\n rotation (np.ndarray): 3x3 rotation matrix (np.float64 dtype recommended)\n\n Returns:\n float: yaw rotation in radians\n \"\"\"\n return cast(float, transforms3d.euler.mat2euler(rotation)[2])\n\n\ndef yaw_as_rotation33(yaw: float) -> np.ndarray:\n \"\"\"Create a 3x3 rotation matrix from given yaw.\n The rotation is counter-clockwise and it is equivalent to:\n [cos(yaw), -sin(yaw), 0.0],\n [sin(yaw), cos(yaw), 0.0],\n [0.0, 0.0, 1.0],\n\n Args:\n yaw (float): yaw rotation in radians\n\n Returns:\n np.ndarray: 3x3 rotation matrix\n \"\"\"\n return transforms3d.euler.euler2mat(0, 0, yaw)\n\n\ndef vertical_flip(tm: np.ndarray, y_dim_size: int) -> np.ndarray:\n \"\"\"Return a new matrix that also performs a flip on the y axis.\n\n Args:\n tm: the original 3x3 matrix\n y_dim_size: this should match the resolution on y. It makes all coordinates positive\n\n Returns: a new 3x3 matrix.\n\n \"\"\"\n flip_y = np.eye(3)\n flip_y[1, 1] = -1\n tm = np.matmul(flip_y, tm)\n tm[1, 2] += y_dim_size\n return tm\n\n\ndef transform_points(points: np.ndarray, transf_matrix: np.ndarray) -> np.ndarray:\n \"\"\"\n Transform a set of 2D/3D points using the given transformation matrix.\n Assumes row major ordering of the input points. The transform function has 3 modes:\n - points (N, F), transf_matrix (F+1, F+1)\n all points are transformed using the matrix and the output points have shape (N, F).\n - points (B, N, F), transf_matrix (F+1, F+1)\n all sequences of points are transformed using the same matrix and the output points have shape (B, N, F).\n transf_matrix is broadcasted.\n - points (B, N, F), transf_matrix (B, F+1, F+1)\n each sequence of points is transformed using its own matrix and the output points have shape (B, N, F).\n\n Note this function assumes points.shape[-1] == matrix.shape[-1] - 1, which means that last\n rows in the matrices do not influence the final results.\n For 2D points only the first 2x3 parts of the matrices will be used.\n\n Args:\n points (np.ndarray): Input points of shape (N, F) or (B, N, F)\n with F = 2 or 3 depending on input points are 2D or 3D points.\n transf_matrix (np.ndarray): Transformation matrix of shape (F+1, F+1) or (B, F+1, F+1) with F = 2 or 3.\n\n Returns:\n np.ndarray: Transformed points of shape (N, F) or (B, N, F) depending on the dimensions of the input points.\n \"\"\"\n points_log = f\" received points with shape {points.shape} \"\n matrix_log = f\" received matrices with shape {transf_matrix.shape} \"\n\n assert points.ndim in [2, 3], f\"points should have ndim in [2,3],{points_log}\"\n assert transf_matrix.ndim in [2, 3], f\"matrix should have ndim in [2,3],{matrix_log}\"\n assert points.ndim >= transf_matrix.ndim, f\"points ndim should be >= than matrix,{points_log},{matrix_log}\"\n\n points_feat = points.shape[-1]\n assert points_feat in [2, 3], f\"last points dimension must be 2 or 3,{points_log}\"\n assert transf_matrix.shape[-1] == transf_matrix.shape[-2], f\"matrix should be a square matrix,{matrix_log}\"\n\n matrix_feat = transf_matrix.shape[-1]\n assert matrix_feat in [3, 4], f\"last matrix dimension must be 3 or 4,{matrix_log}\"\n assert points_feat == matrix_feat - 1, f\"points last dim should be one less than matrix,{points_log},{matrix_log}\"\n\n def _transform(points: np.ndarray, transf_matrix: np.ndarray) -> np.ndarray:\n num_dims = transf_matrix.shape[-1] - 1\n transf_matrix = np.transpose(transf_matrix, (0, 2, 1))\n return points @ transf_matrix[:, :num_dims, :num_dims] + transf_matrix[:, -1:, :num_dims]\n\n if points.ndim == transf_matrix.ndim == 2:\n points = np.expand_dims(points, 0)\n transf_matrix = np.expand_dims(transf_matrix, 0)\n return _transform(points, transf_matrix)[0]\n\n elif points.ndim == transf_matrix.ndim == 3:\n return _transform(points, transf_matrix)\n\n elif points.ndim == 3 and transf_matrix.ndim == 2:\n transf_matrix = np.expand_dims(transf_matrix, 0)\n return _transform(points, transf_matrix)\n else:\n raise NotImplementedError(f\"unsupported case!{points_log},{matrix_log}\")\n\n\ndef transform_point(point: np.ndarray, transf_matrix: np.ndarray) -> np.ndarray:\n \"\"\"Transform a single vector using transformation matrix.\n This function call transform_points internally\n Args:\n point (np.ndarray): vector of shape (N)\n transf_matrix (np.ndarray): transformation matrix of shape (N+1, N+1)\n\n Returns:\n np.ndarray: vector of same shape as input point\n \"\"\"\n point = np.expand_dims(point, 0)\n return transform_points(point, transf_matrix)[0]\n\n\ndef ecef_to_geodetic(point: Union[np.ndarray, Sequence[float]]) -> np.ndarray:\n \"\"\"Convert given ECEF coordinate into latitude, longitude, altitude.\n\n Args:\n point (Union[np.ndarray, Sequence[float]]): ECEF coordinate vector\n\n Returns:\n np.ndarray: latitude, altitude, longitude\n \"\"\"\n return np.array(pm.ecef2geodetic(point[0], point[1], point[2]))\n\n\ndef geodetic_to_ecef(lla_point: Union[np.ndarray, Sequence[float]]) -> np.ndarray:\n \"\"\"Convert given latitude, longitude, and optionally altitude into ECEF\n coordinates. If no altitude is given, altitude 0 is assumed.\n\n Args:\n lla_point (Union[np.ndarray, Sequence[float]]): Latitude, Longitude and optionally Altitude\n\n Returns:\n np.ndarray: 3D ECEF coordinate\n \"\"\"\n if len(lla_point) == 2:\n return np.array(pm.geodetic2ecef(lla_point[0], lla_point[1], 0), dtype=np.float64)\n else:\n return np.array(pm.geodetic2ecef(lla_point[0], lla_point[1], lla_point[2]), dtype=np.float64)\n", "id": "11173976", "language": "Python", "matching_score": 4.255991458892822, "max_stars_count": 0, "path": "l5kit/l5kit/geometry/transform.py" }, { "content": "from .angle import angle_between_vectors, angular_distance, compute_yaw_around_north_from_direction\nfrom .image import crop_rectangle_from_image\nfrom .transform import (\n compute_agent_pose,\n ecef_to_geodetic,\n geodetic_to_ecef,\n rotation33_as_yaw,\n transform_point,\n transform_points,\n vertical_flip,\n yaw_as_rotation33,\n)\nfrom .voxel import normalize_intensity, points_within_bounds, voxel_coords_to_intensity_grid\n\n__all__ = [\n \"angle_between_vectors\",\n \"compute_yaw_around_north_from_direction\",\n \"crop_rectangle_from_image\",\n \"rotation33_as_yaw\",\n \"yaw_as_rotation33\",\n \"vertical_flip\",\n \"transform_points\",\n \"transform_point\",\n \"ecef_to_geodetic\",\n \"geodetic_to_ecef\",\n \"points_within_bounds\",\n \"voxel_coords_to_intensity_grid\",\n \"normalize_intensity\",\n \"angular_distance\",\n \"compute_agent_pose\",\n]\n", "id": "7793367", "language": "Python", "matching_score": 1.0902409553527832, "max_stars_count": 0, "path": "l5kit/l5kit/geometry/__init__.py" }, { "content": "from collections import defaultdict\nfrom typing import List, Optional\n\nimport cv2\nimport numpy as np\n\nfrom ..data.filter import filter_tl_faces_by_status\nfrom ..data.map_api import MapAPI\nfrom ..geometry import rotation33_as_yaw, transform_point, transform_points\nfrom .rasterizer import Rasterizer\nfrom .render_context import RenderContext\n\n# sub-pixel drawing precision constants\nCV2_SHIFT = 8 # how many bits to shift in drawing\nCV2_SHIFT_VALUE = 2 ** CV2_SHIFT\n\n\ndef elements_within_bounds(center: np.ndarray, bounds: np.ndarray, half_extent: float) -> np.ndarray:\n \"\"\"\n Get indices of elements for which the bounding box described by bounds intersects the one defined around\n center (square with side 2*half_side)\n\n Args:\n center (float): XY of the center\n bounds (np.ndarray): array of shape Nx2x2 [[x_min,y_min],[x_max, y_max]]\n half_extent (float): half the side of the bounding box centered around center\n\n Returns:\n np.ndarray: indices of elements inside radius from center\n \"\"\"\n x_center, y_center = center\n\n x_min_in = x_center > bounds[:, 0, 0] - half_extent\n y_min_in = y_center > bounds[:, 0, 1] - half_extent\n x_max_in = x_center < bounds[:, 1, 0] + half_extent\n y_max_in = y_center < bounds[:, 1, 1] + half_extent\n return np.nonzero(x_min_in & y_min_in & x_max_in & y_max_in)[0]\n\n\ndef cv2_subpixel(coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Cast coordinates to numpy.int but keep fractional part by previously multiplying by 2**CV2_SHIFT\n cv2 calls will use shift to restore original values with higher precision\n\n Args:\n coords (np.ndarray): XY coords as float\n\n Returns:\n np.ndarray: XY coords as int for cv2 shift draw\n \"\"\"\n coords = coords * CV2_SHIFT_VALUE\n coords = coords.astype(np.int)\n return coords\n\n\nclass SemanticRasterizer(Rasterizer):\n \"\"\"\n Rasteriser for the vectorised semantic map (generally loaded from json files).\n \"\"\"\n\n def __init__(\n self, render_context: RenderContext, semantic_map_path: str, world_to_ecef: np.ndarray,\n ):\n self.render_context = render_context\n self.raster_size = render_context.raster_size_px\n self.pixel_size = render_context.pixel_size_m\n self.ego_center = render_context.center_in_raster_ratio\n\n self.world_to_ecef = world_to_ecef\n\n self.proto_API = MapAPI(semantic_map_path, world_to_ecef)\n\n self.bounds_info = self.get_bounds()\n\n # TODO is this the right place for this function?\n def get_bounds(self) -> dict:\n \"\"\"\n For each elements of interest returns bounds [[min_x, min_y],[max_x, max_y]] and proto ids\n Coords are computed by the MapAPI and, as such, are in the world ref system.\n\n Returns:\n dict: keys are classes of elements, values are dict with `bounds` and `ids` keys\n \"\"\"\n lanes_ids = []\n crosswalks_ids = []\n\n lanes_bounds = np.empty((0, 2, 2), dtype=np.float) # [(X_MIN, Y_MIN), (X_MAX, Y_MAX)]\n crosswalks_bounds = np.empty((0, 2, 2), dtype=np.float) # [(X_MIN, Y_MIN), (X_MAX, Y_MAX)]\n\n for element in self.proto_API:\n element_id = MapAPI.id_as_str(element.id)\n\n if self.proto_API.is_lane(element):\n lane = self.proto_API.get_lane_coords(element_id)\n x_min = min(np.min(lane[\"xyz_left\"][:, 0]), np.min(lane[\"xyz_right\"][:, 0]))\n y_min = min(np.min(lane[\"xyz_left\"][:, 1]), np.min(lane[\"xyz_right\"][:, 1]))\n x_max = max(np.max(lane[\"xyz_left\"][:, 0]), np.max(lane[\"xyz_right\"][:, 0]))\n y_max = max(np.max(lane[\"xyz_left\"][:, 1]), np.max(lane[\"xyz_right\"][:, 1]))\n\n lanes_bounds = np.append(lanes_bounds, np.asarray([[[x_min, y_min], [x_max, y_max]]]), axis=0)\n lanes_ids.append(element_id)\n\n if self.proto_API.is_crosswalk(element):\n crosswalk = self.proto_API.get_crosswalk_coords(element_id)\n x_min = np.min(crosswalk[\"xyz\"][:, 0])\n y_min = np.min(crosswalk[\"xyz\"][:, 1])\n x_max = np.max(crosswalk[\"xyz\"][:, 0])\n y_max = np.max(crosswalk[\"xyz\"][:, 1])\n\n crosswalks_bounds = np.append(\n crosswalks_bounds, np.asarray([[[x_min, y_min], [x_max, y_max]]]), axis=0,\n )\n crosswalks_ids.append(element_id)\n\n return {\n \"lanes\": {\"bounds\": lanes_bounds, \"ids\": lanes_ids},\n \"crosswalks\": {\"bounds\": crosswalks_bounds, \"ids\": crosswalks_ids},\n }\n\n def rasterize(\n self,\n history_frames: np.ndarray,\n history_agents: List[np.ndarray],\n history_tl_faces: List[np.ndarray],\n agent: Optional[np.ndarray] = None,\n ) -> np.ndarray:\n if agent is None:\n ego_translation_m = history_frames[0][\"ego_translation\"]\n ego_yaw_rad = rotation33_as_yaw(history_frames[0][\"ego_rotation\"])\n else:\n ego_translation_m = np.append(agent[\"centroid\"], history_frames[0][\"ego_translation\"][-1])\n ego_yaw_rad = agent[\"yaw\"]\n\n raster_from_world = self.render_context.raster_from_world(ego_translation_m, ego_yaw_rad)\n world_from_raster = np.linalg.inv(raster_from_world)\n\n # get XY of center pixel in world coordinates\n center_in_raster_px = np.asarray(self.raster_size) * (0.5, 0.5)\n center_in_world_m = transform_point(center_in_raster_px, world_from_raster)\n\n sem_im = self.render_semantic_map(center_in_world_m, raster_from_world, history_tl_faces[0])\n return sem_im.astype(np.float32) / 255\n\n def render_semantic_map(\n self, center_in_world: np.ndarray, raster_from_world: np.ndarray, tl_faces: np.ndarray\n ) -> np.ndarray:\n \"\"\"Renders the semantic map at given x,y coordinates.\n\n Args:\n center_in_world (np.ndarray): XY of the image center in world ref system\n raster_from_world (np.ndarray):\n Returns:\n np.ndarray: RGB raster\n\n \"\"\"\n\n img = 255 * np.ones(shape=(self.raster_size[1], self.raster_size[0], 3), dtype=np.uint8)\n\n # filter using half a radius from the center\n raster_radius = float(np.linalg.norm(self.raster_size * self.pixel_size)) / 2\n\n # get active traffic light faces\n active_tl_ids = set(filter_tl_faces_by_status(tl_faces, \"ACTIVE\")[\"face_id\"].tolist())\n\n # plot lanes\n lanes_lines = defaultdict(list)\n\n for idx in elements_within_bounds(center_in_world, self.bounds_info[\"lanes\"][\"bounds\"], raster_radius):\n lane = self.proto_API[self.bounds_info[\"lanes\"][\"ids\"][idx]].element.lane\n\n # get image coords\n lane_coords = self.proto_API.get_lane_coords(self.bounds_info[\"lanes\"][\"ids\"][idx])\n xy_left = cv2_subpixel(transform_points(lane_coords[\"xyz_left\"][:, :2], raster_from_world))\n xy_right = cv2_subpixel(transform_points(lane_coords[\"xyz_right\"][:, :2], raster_from_world))\n lanes_area = np.vstack((xy_left, np.flip(xy_right, 0))) # start->end left then end->start right\n\n # Note(lberg): this called on all polygons skips some of them, don't know why\n cv2.fillPoly(img, [lanes_area], (17, 17, 31), lineType=cv2.LINE_AA, shift=CV2_SHIFT)\n\n lane_type = \"default\" # no traffic light face is controlling this lane\n lane_tl_ids = set([MapAPI.id_as_str(la_tc) for la_tc in lane.traffic_controls])\n for tl_id in lane_tl_ids.intersection(active_tl_ids):\n if self.proto_API.is_traffic_face_colour(tl_id, \"red\"):\n lane_type = \"red\"\n elif self.proto_API.is_traffic_face_colour(tl_id, \"green\"):\n lane_type = \"green\"\n elif self.proto_API.is_traffic_face_colour(tl_id, \"yellow\"):\n lane_type = \"yellow\"\n\n lanes_lines[lane_type].extend([xy_left, xy_right])\n\n cv2.polylines(img, lanes_lines[\"default\"], False, (255, 217, 82), lineType=cv2.LINE_AA, shift=CV2_SHIFT)\n cv2.polylines(img, lanes_lines[\"green\"], False, (0, 255, 0), lineType=cv2.LINE_AA, shift=CV2_SHIFT)\n cv2.polylines(img, lanes_lines[\"yellow\"], False, (255, 255, 0), lineType=cv2.LINE_AA, shift=CV2_SHIFT)\n cv2.polylines(img, lanes_lines[\"red\"], False, (255, 0, 0), lineType=cv2.LINE_AA, shift=CV2_SHIFT)\n\n # plot crosswalks\n crosswalks = []\n for idx in elements_within_bounds(center_in_world, self.bounds_info[\"crosswalks\"][\"bounds\"], raster_radius):\n crosswalk = self.proto_API.get_crosswalk_coords(self.bounds_info[\"crosswalks\"][\"ids\"][idx])\n\n xy_cross = cv2_subpixel(transform_points(crosswalk[\"xyz\"][:, :2], raster_from_world))\n crosswalks.append(xy_cross)\n\n cv2.polylines(img, crosswalks, True, (255, 117, 69), lineType=cv2.LINE_AA, shift=CV2_SHIFT)\n\n return img\n\n def to_rgb(self, in_im: np.ndarray, **kwargs: dict) -> np.ndarray:\n return (in_im * 255).astype(np.uint8)\n\n def num_channels(self) -> int:\n return 3\n", "id": "5350292", "language": "Python", "matching_score": 5.742848873138428, "max_stars_count": 0, "path": "l5kit/l5kit/rasterization/semantic_rasterizer.py" }, { "content": "from typing import List, Optional, Tuple, Union\n\nimport cv2\nimport numpy as np\n\nfrom l5kit.data.zarr_dataset import AGENT_DTYPE\n\nfrom ..data.filter import filter_agents_by_labels, filter_agents_by_track_id\nfrom ..geometry import rotation33_as_yaw, transform_points\nfrom .rasterizer import EGO_EXTENT_HEIGHT, EGO_EXTENT_LENGTH, EGO_EXTENT_WIDTH, Rasterizer\nfrom .render_context import RenderContext\nfrom .semantic_rasterizer import CV2_SHIFT, cv2_subpixel\n\n\ndef get_ego_as_agent(frame: np.ndarray) -> np.ndarray: # TODO this can be useful to have around\n \"\"\"\n Get a valid agent with information from the frame AV. Ford Fusion extent is used\n\n Args:\n frame (np.ndarray): the frame we're interested in\n\n Returns: an agent np.ndarray of the AV\n\n \"\"\"\n ego_agent = np.zeros(1, dtype=AGENT_DTYPE)\n ego_agent[0][\"centroid\"] = frame[\"ego_translation\"][:2]\n ego_agent[0][\"yaw\"] = rotation33_as_yaw(frame[\"ego_rotation\"])\n ego_agent[0][\"extent\"] = np.asarray((EGO_EXTENT_LENGTH, EGO_EXTENT_WIDTH, EGO_EXTENT_HEIGHT))\n return ego_agent\n\n\ndef draw_boxes(\n raster_size: Tuple[int, int],\n raster_from_world: np.ndarray,\n agents: np.ndarray,\n color: Union[int, Tuple[int, int, int]],\n) -> np.ndarray:\n \"\"\"\n Draw multiple boxes in one sweep over the image.\n Boxes corners are extracted from agents, and the coordinates are projected in the image plane.\n Finally, cv2 draws the boxes.\n\n Args:\n raster_size (Tuple[int, int]): Desired output image size\n world_to_image_space (np.ndarray): 3x3 matrix to convert from world to image coordinated\n agents (np.ndarray): array of agents to be drawn\n color (Union[int, Tuple[int, int, int]]): single int or RGB color\n\n Returns:\n np.ndarray: the image with agents rendered. RGB if color RGB, otherwise GRAY\n \"\"\"\n if isinstance(color, int):\n im = np.zeros((raster_size[1], raster_size[0]), dtype=np.uint8)\n else:\n im = np.zeros((raster_size[1], raster_size[0], 3), dtype=np.uint8)\n\n corners_base_coords = (np.asarray([[-1, -1], [-1, 1], [1, 1], [1, -1]]) * 0.5)[None, :, :]\n\n # compute the corner in world-space (start in origin, rotate and then translate)\n corners_m = corners_base_coords * agents[\"extent\"][:, None, :2] # corners in zero\n s = np.sin(agents[\"yaw\"])\n c = np.cos(agents[\"yaw\"])\n # note this is clockwise because it's right-multiplied and not left-multiplied later,\n # and therefore we're still rotating counterclockwise.\n rotation_m = np.moveaxis(np.array(((c, s), (-s, c))), 2, 0)\n box_world_coords = corners_m @ rotation_m + agents[\"centroid\"][:, None, :2]\n\n box_raster_coords = transform_points(box_world_coords.reshape((-1, 2)), raster_from_world)\n\n # fillPoly wants polys in a sequence with points inside as (x,y)\n box_raster_coords = cv2_subpixel(box_raster_coords.reshape((-1, 4, 2)))\n cv2.fillPoly(im, box_raster_coords, color=color, lineType=cv2.LINE_AA, shift=CV2_SHIFT)\n return im\n\n\nclass BoxRasterizer(Rasterizer):\n def __init__(\n self, render_context: RenderContext, filter_agents_threshold: float, history_num_frames: int,\n ):\n \"\"\"\n\n Args:\n render_context (RenderContext): Render context\n filter_agents_threshold (float): Value between 0 and 1 used to filter uncertain agent detections\n history_num_frames (int): Number of frames to rasterise in the past\n \"\"\"\n super(BoxRasterizer, self).__init__()\n self.render_context = render_context\n self.raster_size = render_context.raster_size_px\n self.filter_agents_threshold = filter_agents_threshold\n self.history_num_frames = history_num_frames\n\n def rasterize(\n self,\n history_frames: np.ndarray,\n history_agents: List[np.ndarray],\n history_tl_faces: List[np.ndarray],\n agent: Optional[np.ndarray] = None,\n ) -> np.ndarray:\n # all frames are drawn relative to this one\"\n frame = history_frames[0]\n if agent is None:\n ego_translation_m = history_frames[0][\"ego_translation\"]\n ego_yaw_rad = rotation33_as_yaw(frame[\"ego_rotation\"])\n else:\n ego_translation_m = np.append(agent[\"centroid\"], history_frames[0][\"ego_translation\"][-1])\n ego_yaw_rad = agent[\"yaw\"]\n\n raster_from_world = self.render_context.raster_from_world(ego_translation_m, ego_yaw_rad)\n\n # this ensures we always end up with fixed size arrays, +1 is because current time is also in the history\n out_shape = (self.raster_size[1], self.raster_size[0], self.history_num_frames + 1)\n agents_images = np.zeros(out_shape, dtype=np.uint8)\n ego_images = np.zeros(out_shape, dtype=np.uint8)\n\n for i, (frame, agents) in enumerate(zip(history_frames, history_agents)):\n agents = filter_agents_by_labels(agents, self.filter_agents_threshold)\n # note the cast is for legacy support of dataset before April 2020\n av_agent = get_ego_as_agent(frame).astype(agents.dtype)\n\n if agent is None:\n agents_image = draw_boxes(self.raster_size, raster_from_world, agents, 255)\n ego_image = draw_boxes(self.raster_size, raster_from_world, av_agent, 255)\n else:\n agent_ego = filter_agents_by_track_id(agents, agent[\"track_id\"])\n if len(agent_ego) == 0: # agent not in this history frame\n agents_image = draw_boxes(self.raster_size, raster_from_world, np.append(agents, av_agent), 255)\n ego_image = np.zeros_like(agents_image)\n else: # add av to agents and remove the agent from agents\n agents = agents[agents != agent_ego[0]]\n agents_image = draw_boxes(self.raster_size, raster_from_world, np.append(agents, av_agent), 255)\n ego_image = draw_boxes(self.raster_size, raster_from_world, agent_ego, 255)\n\n agents_images[..., i] = agents_image\n ego_images[..., i] = ego_image\n\n # combine such that the image consists of [agent_t, agent_t-1, agent_t-2, ego_t, ego_t-1, ego_t-2]\n out_im = np.concatenate((agents_images, ego_images), -1)\n\n return out_im.astype(np.float32) / 255\n\n def to_rgb(self, in_im: np.ndarray, **kwargs: dict) -> np.ndarray:\n \"\"\"\n get an rgb image where agents further in the past have faded colors\n\n Args:\n in_im: the output of the rasterize function\n kwargs: this can be used for additional customization (such as colors)\n\n Returns: an RGB image with agents and ego coloured with fading colors\n \"\"\"\n hist_frames = in_im.shape[-1] // 2\n in_im = np.transpose(in_im, (2, 0, 1))\n\n # this is similar to the draw history code\n out_im_agent = np.zeros((self.raster_size[1], self.raster_size[0], 3), dtype=np.float32)\n agent_chs = in_im[:hist_frames][::-1] # reverse to start from the furthest one\n agent_color = (0, 0, 1) if \"agent_color\" not in kwargs else kwargs[\"agent_color\"]\n for ch in agent_chs:\n out_im_agent *= 0.85 # magic fading constant for the past\n out_im_agent[ch > 0] = agent_color\n\n out_im_ego = np.zeros((self.raster_size[1], self.raster_size[0], 3), dtype=np.float32)\n ego_chs = in_im[hist_frames:][::-1]\n ego_color = (0, 1, 0) if \"ego_color\" not in kwargs else kwargs[\"ego_color\"]\n for ch in ego_chs:\n out_im_ego *= 0.85\n out_im_ego[ch > 0] = ego_color\n\n out_im = (np.clip(out_im_agent + out_im_ego, 0, 1) * 255).astype(np.uint8)\n return out_im\n\n def num_channels(self) -> int:\n return (self.history_num_frames + 1) * 2\n", "id": "1223800", "language": "Python", "matching_score": 5.139461517333984, "max_stars_count": 0, "path": "l5kit/l5kit/rasterization/box_rasterizer.py" }, { "content": "from typing import List, Optional\n\nimport cv2\nimport numpy as np\n\nfrom .box_rasterizer import BoxRasterizer\nfrom .rasterizer import Rasterizer\nfrom .render_context import RenderContext\nfrom .satellite_rasterizer import SatelliteRasterizer\n\n\nclass SatBoxRasterizer(Rasterizer):\n \"\"\"Combine a Satellite and a Box Rasterizers into a single class\n \"\"\"\n\n def __init__(\n self,\n render_context: RenderContext,\n filter_agents_threshold: float,\n history_num_frames: int,\n map_im: np.ndarray,\n world_to_aerial: np.ndarray,\n interpolation: int = cv2.INTER_LINEAR,\n ):\n super(SatBoxRasterizer, self).__init__()\n self.render_context = render_context\n self.raster_size = render_context.raster_size_px\n self.pixel_size = render_context.pixel_size_m\n self.ego_center = render_context.center_in_raster_ratio\n self.filter_agents_threshold = filter_agents_threshold\n self.history_num_frames = history_num_frames\n\n self.map_im = map_im\n self.world_to_aerial = world_to_aerial\n self.interpolation = interpolation\n\n self.box_rast = BoxRasterizer(render_context, filter_agents_threshold, history_num_frames)\n self.sat_rast = SatelliteRasterizer(render_context, map_im, world_to_aerial, interpolation)\n\n def rasterize(\n self,\n history_frames: np.ndarray,\n history_agents: List[np.ndarray],\n history_tl_faces: List[np.ndarray],\n agent: Optional[np.ndarray] = None,\n ) -> np.ndarray:\n im_out_box = self.box_rast.rasterize(history_frames, history_agents, history_tl_faces, agent)\n im_out_sat = self.sat_rast.rasterize(history_frames, history_agents, history_tl_faces, agent)\n return np.concatenate([im_out_box, im_out_sat], -1)\n\n def to_rgb(self, in_im: np.ndarray, **kwargs: dict) -> np.ndarray:\n im_out_box = self.box_rast.to_rgb(in_im[..., :-3], **kwargs)\n im_out_sat = self.sat_rast.to_rgb(in_im[..., -3:], **kwargs)\n\n # merge the two together using box as mask\n mask = np.any(im_out_box > 0, axis=-1)\n im_out_sat[mask] = im_out_box[mask]\n return im_out_sat\n\n def num_channels(self) -> int:\n return self.box_rast.num_channels() + self.sat_rast.num_channels()\n", "id": "1677643", "language": "Python", "matching_score": 2.4597201347351074, "max_stars_count": 0, "path": "l5kit/l5kit/rasterization/sat_box_rasterizer.py" }, { "content": "import numpy as np\nimport pytest\n\nfrom l5kit.data import TL_FACE_DTYPE, ChunkedDataset, LocalDataManager, filter_agents_by_frames\nfrom l5kit.rasterization import Rasterizer, build_rasterizer\nfrom l5kit.sampling import get_history_slice\n\n\ndef check_rasterizer(cfg: dict, rasterizer: Rasterizer, zarr_dataset: ChunkedDataset) -> None:\n frames = zarr_dataset.frames[:] # Load all frames into memory\n for current_frame in [0, 50, len(frames) - 1]:\n history_num_frames = cfg[\"model_params\"][\"history_num_frames\"]\n s = get_history_slice(current_frame, history_num_frames, 1, include_current_state=True)\n frames_to_rasterize = frames[s]\n agents = filter_agents_by_frames(frames_to_rasterize, zarr_dataset.agents)\n tl_faces = [np.empty(0, dtype=TL_FACE_DTYPE) for _ in agents] # TODO TR_FACES\n im = rasterizer.rasterize(frames_to_rasterize, agents, tl_faces)\n assert len(im.shape) == 3\n assert im.shape[-1] == rasterizer.num_channels()\n assert im.shape[:2] == tuple(cfg[\"raster_params\"][\"raster_size\"])\n assert im.max() <= 1\n assert im.min() >= 0\n assert im.dtype == np.float32\n\n rgb_im = rasterizer.to_rgb(im)\n assert im.shape[:2] == rgb_im.shape[:2]\n assert rgb_im.shape[2] == 3 # RGB has three channels\n assert rgb_im.dtype == np.uint8\n\n\n@pytest.mark.parametrize(\"map_type\", [\"py_semantic\", \"py_satellite\", \"box_debug\"])\ndef test_rasterizer_created_from_config(\n map_type: str, zarr_dataset: ChunkedDataset, dmg: LocalDataManager, cfg: dict\n) -> None:\n cfg[\"raster_params\"][\"map_type\"] = map_type\n rasterizer = build_rasterizer(cfg, dmg)\n check_rasterizer(cfg, rasterizer, zarr_dataset)\n", "id": "531662", "language": "Python", "matching_score": 0.25292661786079407, "max_stars_count": 0, "path": "l5kit/l5kit/tests/rasterization/rasterizer_e2e_test.py" }, { "content": "import numpy as np\nimport pytest\nimport torch\n\nfrom l5kit.dataset.utils import convert_str_to_fixed_length_tensor, kMaxStrLength\n\n\ndef test_convert_str() -> None:\n # assert the type of the return\n assert convert_str_to_fixed_length_tensor(\"test\").dtype == torch.uint8\n\n # test with a string with the same value\n rep_count = 10\n fixed_str = \"a\" * rep_count\n str_cast = convert_str_to_fixed_length_tensor(fixed_str).numpy()\n assert len(np.unique(str_cast[:rep_count])) == 1\n assert np.allclose(str_cast[rep_count:], 0)\n\n # test with a str with different values\n fixed_str = \"ab\"\n str_cast = convert_str_to_fixed_length_tensor(fixed_str).numpy()\n assert len(np.unique(str_cast)) == 3\n\n # test with a str longer than th\n with pytest.raises(AssertionError):\n convert_str_to_fixed_length_tensor(\"a\" * (kMaxStrLength + 1))\n", "id": "11851946", "language": "Python", "matching_score": 1.1637018918991089, "max_stars_count": 0, "path": "l5kit/l5kit/tests/dataset/utils_test.py" }, { "content": "#!/usr/bin/env python\nfrom setuptools import find_packages, setup\n\nfrom l5kit import __version__\n\nsetup(\n name=\"l5kit\",\n version=__version__,\n description=\"Lyft Autonomous Vehicle Research library\",\n author=\"<NAME>\",\n author_email=\"<EMAIL>\",\n url=\"https://github.com/lyft/l5kit\",\n license=\"apache2\",\n install_requires=[\n \"imageio\",\n \"matplotlib\",\n \"numpy\",\n \"opencv-contrib-python-headless\",\n \"protobuf>=3.12.2\",\n \"pymap3d\",\n \"scipy\",\n \"setuptools\",\n \"torch>=1.5.0,<2.0.0\",\n \"torchvision>=0.6.0,<1.0.0\",\n \"tqdm\",\n \"transforms3d\",\n \"zarr\",\n \"pyyaml\",\n \"notebook\",\n \"ptable\",\n \"ipywidgets\",\n \"shapely\",\n ],\n extras_require={\n \"dev\": [\"pytest\", \"mypy\", \"setuptools\", \"twine\", \"wheel\", \"pytest-cov\", \"flake8\",\n \"black==19.10b0\", \"isort\", \"Sphinx\", \"sphinx-rtd-theme\", \"recommonmark\",\n \"pre-commit\"]\n },\n packages=find_packages(exclude=[\"*.tests\", \"*.tests.*\", \"tests.*\", \"tests\"]),\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "id": "130519", "language": "Python", "matching_score": 0.1411445140838623, "max_stars_count": 0, "path": "l5kit/setup.py" } ]
1.126971
tkykm
[ { "content": "import json\nimport os\nimport urllib.request\n\ndef format_message(data):\n payload = {\n 'username': 'Trusted Advisor',\n 'icon_emoji': ':police_car:',\n 'text': '{} {}'.format(get_mention(data['detail']['status']), data['detail-type']),\n 'attachments': [\n {\n 'fallback': 'Detailed information',\n 'color': get_color(data['detail']['status']),\n 'title': data['detail']['check-name'],\n 'text': \"```\\n{0}\\n```\".format(data['detail']['check-item-detail']),\n 'fields': [\n {\n 'title': 'Account ID',\n 'value': data['account'],\n 'short': True\n },\n {\n 'title': 'Status',\n 'value': data['detail']['status'],\n 'short': True\n },\n {\n 'title': 'ARN',\n 'value': data['detail']['resource_id'],\n 'short': True\n },\n {\n 'title': 'Region',\n 'value': data['detail']['check-item-detail']['Region'] if 'Region' in data['detail']['check-item-detail'] else \"\",\n 'short': True\n }\n ]\n }\n ]\n }\n return payload\n\ndef get_color(status):\n color = '#666666' \n if status == 'ERROR':\n color = 'danger'\n elif status == 'WARN':\n color = 'warning'\n return color\n\ndef get_mention(status):\n if status == 'ERROR':\n return '<!channel>'\n else:\n return ''\n\ndef notify_slack(url, payload):\n data = json.dumps(payload).encode('utf-8')\n method = 'POST'\n headers = {'Content-Type': 'application/json'}\n\n request = urllib.request.Request(url, data = data, method = method, headers = headers)\n with urllib.request.urlopen(request) as response:\n return response.read().decode('utf-8')\n\ndef lambda_handler(event, context):\n webhook_urls = os.environ['WEBHOOK_URLS']\n payload = format_message(event)\n if event['detail'].get('check-name') == \"Amazon EBS Snapshots\":\n return None\n responses = []\n for webhook_url in webhook_urls.split(','):\n responses.append(notify_slack(webhook_url, payload))\n return responses\n", "id": "1396111", "language": "Python", "matching_score": 3.583616256713867, "max_stars_count": 7, "path": "trustedadvisor/trustedadvisor.py" }, { "content": "import json\nimport os\nimport urllib.request\n\ndef format_message(data):\n severity_level = get_severity_level(data['detail']['severity'])\n payload = {\n 'username': 'GuardDuty Finding',\n 'icon_emoji': ':guardduty:',\n 'text': '{} GuardDuty Finding in {}'.format(severity_level['mention'], data['detail']['region']),\n 'attachments': [\n {\n 'fallback': 'Detailed information on GuardDuty Finding.',\n 'color': severity_level['color'],\n 'title': data['detail']['title'],\n 'text': data['detail']['description'],\n 'fields': [\n {\n 'title': 'Account ID',\n 'value': data['detail']['accountId'],\n 'short': True\n },\n {\n 'title': 'Severity',\n 'value': severity_level['label'],\n 'short': True\n },\n {\n 'title': 'Type',\n 'value': data['detail']['type'],\n 'short': False\n }\n ]\n }\n ]\n }\n return payload\n\ndef get_severity_level(severity):\n # ref: http://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings.html#guardduty_findings-severity\n if severity == 0.0:\n level = {'label': 'Information', 'color': 'good', 'mention': ''}\n elif 0.1 <= severity <= 3.9:\n level = {'label': 'Low', 'color': 'warning', 'mention': ''}\n elif 4.0 <= severity <= 6.9:\n level = {'label': 'Medium', 'color': 'warning', 'mention': '<!here>'}\n elif 7.0 <= severity <= 8.9:\n level = {'label': 'High', 'color': 'danger', 'mention': '<!channel>'}\n elif 9.0 <= severity <= 10.0:\n level = {'label': 'Critical', 'color': 'danger', 'mention': '<!channel>'}\n else:\n level = {'label': 'Unknow', 'color': '#666666', 'mention': ''}\n return level\n\ndef notify_slack(url, payload):\n data = json.dumps(payload).encode('utf-8')\n method = 'POST'\n headers = {'Content-Type': 'application/json'}\n\n request = urllib.request.Request(url, data = data, method = method, headers = headers)\n with urllib.request.urlopen(request) as response:\n return response.read().decode('utf-8')\n\ndef lambda_handler(event, context):\n webhook_urls = os.environ['WEBHOOK_URLS']\n payload = format_message(event)\n responses = []\n for webhook_url in webhook_urls.split(','):\n responses.append( notify_slack(webhook_url, payload) )\n return responses\n", "id": "2624481", "language": "Python", "matching_score": 4.645555019378662, "max_stars_count": 7, "path": "guardduty/guardduty.py" }, { "content": "import json\nimport os\nimport urllib.request\n\ndef format_message(data):\n accountid = data['accountId']\n data['invokingEvent'] = json.loads(data[\"invokingEvent\"])\n if not 'configurationItemDiff' in data['invokingEvent']:\n print(data)\n return None\n diff = data['invokingEvent'].get('configurationItemDiff')\n if not diff:\n return None\n configurationitem = data['invokingEvent'].get('configurationItem')\n if not configurationitem:\n print(data)\n return None\n payload = {\n 'username': 'AWS Config',\n 'icon_emoji': ':camera:',\n 'text': '',\n 'attachments': [\n {\n 'fallback': 'Summary',\n 'color': '#439FE0',\n 'title': data['invokingEvent'].get('messageType'),\n 'fields': [\n {\n 'title': 'Account ID',\n 'value': accountid,\n 'short': True\n },\n {\n 'title': 'ARN',\n 'value': configurationitem.get('ARN'),\n 'short': True\n },\n {\n 'title': 'changeType',\n 'value': diff.get('changeType'),\n 'short': True\n },\n {\n 'title': 'Region',\n 'value': configurationitem.get('awsRegion'),\n 'short': True\n }\n ]\n },\n {\n \"title\": 'configuration',\n \"fallback\": \"Detail\",\n \"color\": \"#439FE0\",\n \"text\": \"```\\n{0}\\n```\".format(data['invokingEvent'])\n }\n ]\n }\n return payload\n\n\ndef notify_slack(url, payload):\n data = json.dumps(payload).encode('utf-8')\n method = 'POST'\n headers = {'Content-Type': 'application/json'}\n\n request = urllib.request.Request(url, data = data, method = method, headers = headers)\n with urllib.request.urlopen(request) as response:\n return response.read().decode('utf-8')\n\ndef lambda_handler(event, context):\n webhook_urls = os.environ['WEBHOOK_URLS']\n payload = format_message(event)\n if not payload:\n return None\n responses = []\n for webhook_url in webhook_urls.split(','):\n responses.append( notify_slack(webhook_url, payload) )\n return responses\n\n", "id": "9965909", "language": "Python", "matching_score": 4.388490676879883, "max_stars_count": 7, "path": "awsconfig/reflectorinvokingevent.py" }, { "content": "import json\nimport os\nimport urllib.request\n\ndef format_message(data):\n payload = {\n 'username': 'Budget',\n 'icon_emoji': ':moneybag:',\n 'text': '<!channel> Budget exceed threshold alert',\n 'attachments': [\n {\n 'fallback': 'Detailed information',\n 'color': \"danger\",\n 'title': data['Sns']['Subject'],\n 'text': data['Sns']['Message'],\n }\n ]\n }\n return payload\n\ndef notify_slack(url, payload):\n data = json.dumps(payload).encode('utf-8')\n method = 'POST'\n headers = {'Content-Type': 'application/json'}\n\n request = urllib.request.Request(url, data = data, method = method, headers = headers)\n with urllib.request.urlopen(request) as response:\n return response.read().decode('utf-8')\n\ndef lambda_handler(event, context):\n webhook_urls = os.environ['WEBHOOK_URLS']\n payload = format_message(event['Records'][0])\n responses = []\n for webhook_url in webhook_urls.split(','):\n responses.append(notify_slack(webhook_url, payload))\n return responses\n", "id": "1055768", "language": "Python", "matching_score": 0.8761179447174072, "max_stars_count": 7, "path": "budget/budget.py" }, { "content": "'''\nThe MIT License (MIT)\n\nCopyright (c) 2015 base2Services\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nimport logging\nfrom urllib.request import urlopen, Request, HTTPError, URLError\nimport json\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\nclass CustomResourceResponse:\n def __init__(self, request_payload, context):\n self.payload = request_payload\n self.response = {\n \"StackId\": request_payload[\"StackId\"],\n \"RequestId\": request_payload[\"RequestId\"],\n \"LogicalResourceId\": request_payload[\"LogicalResourceId\"],\n \"Status\": 'SUCCESS',\n }\n self.context = context\n \n def respond_error(self, message):\n self.response['Status'] = 'FAILED'\n self.response['Reason'] = message\n self.respond()\n \n def respond(self):\n event = self.payload\n response = self.response\n ####\n #### copied from https://github.com/ryansb/cfn-wrapper-python/blob/master/cfn_resource.py\n ####\n \n if event.get(\"PhysicalResourceId\", False):\n response[\"PhysicalResourceId\"] = event[\"LogicalResourceId\"]\n response['PhysicalResourceId'] = self.context.log_stream_name \n\n\n logger.debug(\"Received %s request with event: %s\" % (event['RequestType'], json.dumps(event)))\n \n serialized = json.dumps(response)\n logger.info(f\"Responding to {event['RequestType']} request with: {serialized}\")\n \n req_data = serialized.encode('utf-8')\n \n req = Request(\n event['ResponseURL'],\n data=req_data,\n headers={'Content-Length': len(req_data),'Content-Type': ''}\n )\n req.get_method = lambda: 'PUT'\n \n try:\n urlopen(req)\n logger.debug(\"Request to CFN API succeeded, nothing to do here\")\n except HTTPError as e:\n logger.error(\"Callback to CFN API failed with status %d\" % e.code)\n logger.error(\"Response: %s\" % e.reason)\n except URLError as e:\n logger.error(\"Failed to reach the server - %s\" % e.reason)\n", "id": "4792936", "language": "Python", "matching_score": 2.411221742630005, "max_stars_count": 7, "path": "inspector/cr_response.py" }, { "content": "import json\nimport boto3\nimport cr_response\n\ndef lambda_handler(event, context):\n try:\n params = dict([(k, v) for k, v in event['ResourceProperties'].items() if k != 'ServiceToken'])\n client = boto3.client('inspector')\n if event['RequestType'] == 'Create':\n response_data = client.subscribe_to_event(**params)\n if event['RequestType'] == 'Delete':\n response_data = client.unsubscribe_from_event(**params)\n if event['RequestType'] == 'Update':\n old_params = dict([(k, v) for k, v in event['OldResourceProperties'].items() if k != 'ServiceToken'])\n client.unsubscribe_from_event(**old_params)\n response_data = client.subscribe_to_event(**params)\n print(response_data)\n lambda_response = cr_response.CustomResourceResponse(event, context)\n lambda_response.respond()\n except Exception as e:\n lambda_response.respond_error(e)\n raise e\n", "id": "11181106", "language": "Python", "matching_score": 2.1745288372039795, "max_stars_count": 7, "path": "inspector/setsnstopic.py" } ]
2.997419
postmasters
[ { "content": "# Copyright 2017 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport abc\nimport base64\nimport sys\n\nimport pyu2f.convenience.authenticator\nimport pyu2f.errors\nimport pyu2f.model\nimport six\n\nfrom google_reauth import _helpers, errors\n\n\nREAUTH_ORIGIN = 'https://accounts.google.com'\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass ReauthChallenge(object):\n \"\"\"Base class for reauth challenges.\"\"\"\n\n @property\n @abc.abstractmethod\n def name(self):\n \"\"\"Returns the name of the challenge.\"\"\"\n pass\n\n @property\n @abc.abstractmethod\n def is_locally_eligible(self):\n \"\"\"Returns true if a challenge is supported locally on this machine.\"\"\"\n pass\n\n @abc.abstractmethod\n def obtain_challenge_input(self, metadata):\n \"\"\"Performs logic required to obtain credentials and returns it.\n\n Args:\n metadata: challenge metadata returned in the 'challenges' field in\n the initial reauth request. Includes the 'challengeType' field\n and other challenge-specific fields.\n\n Returns:\n response that will be send to the reauth service as the content of\n the 'proposalResponse' field in the request body. Usually a dict\n with the keys specific to the challenge. For example,\n {'credential': password} for password challenge.\n \"\"\"\n pass\n\n\nclass PasswordChallenge(ReauthChallenge):\n \"\"\"Challenge that asks for user's password.\"\"\"\n\n @property\n def name(self):\n return 'PASSWORD'\n\n @property\n def is_locally_eligible(self):\n return True\n\n def obtain_challenge_input(self, unused_metadata):\n passwd = _helpers.get_user_password('Please enter your password:')\n if not passwd:\n passwd = ' ' # avoid the server crashing in case of no password :D\n return {'credential': passwd}\n\n\nclass SecurityKeyChallenge(ReauthChallenge):\n \"\"\"Challenge that asks for user's security key touch.\"\"\"\n\n @property\n def name(self):\n return 'SECURITY_KEY'\n\n @property\n def is_locally_eligible(self):\n return True\n\n def obtain_challenge_input(self, metadata):\n sk = metadata['securityKey']\n challenges = sk['challenges']\n app_id = sk['applicationId']\n\n challenge_data = []\n for c in challenges:\n kh = c['keyHandle'].encode('ascii')\n key = pyu2f.model.RegisteredKey(\n bytearray(base64.urlsafe_b64decode(kh)))\n challenge = c['challenge'].encode('ascii')\n challenge = base64.urlsafe_b64decode(challenge)\n challenge_data.append({'key': key, 'challenge': challenge})\n\n try:\n api = pyu2f.convenience.authenticator.CreateCompositeAuthenticator(\n REAUTH_ORIGIN)\n response = api.Authenticate(app_id, challenge_data,\n print_callback=sys.stderr.write)\n return {'securityKey': response}\n except pyu2f.errors.U2FError as e:\n if e.code == pyu2f.errors.U2FError.DEVICE_INELIGIBLE:\n sys.stderr.write('Ineligible security key.\\n')\n elif e.code == pyu2f.errors.U2FError.TIMEOUT:\n sys.stderr.write(\n 'Timed out while waiting for security key touch.\\n')\n else:\n raise e\n except pyu2f.errors.NoDeviceFoundError:\n sys.stderr.write('No security key found.\\n')\n return None\n\n\nclass SamlChallenge(ReauthChallenge):\n \"\"\"Challenge that asks the users to browse to their ID Providers.\"\"\"\n\n @property\n def name(self):\n return 'SAML'\n\n @property\n def is_locally_eligible(self):\n return True\n\n def obtain_challenge_input(self, metadata):\n # Magic Arch has not fully supported returning a proper dedirect URL\n # for programmatic SAML users today. So we error our here and request\n # users to complete a web login.\n raise errors.ReauthSamlLoginRequiredError()\n\n\nAVAILABLE_CHALLENGES = {\n challenge.name: challenge\n for challenge in [\n SecurityKeyChallenge(),\n PasswordChallenge(),\n SamlChallenge()\n ]\n}\n", "id": "3805783", "language": "Python", "matching_score": 0, "max_stars_count": 8, "path": "google_reauth/challenges.py" }, { "content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the reauth module.\"\"\"\n\nimport base64\nimport json\nimport os\nimport unittest\n\nimport mock\n\nfrom google_reauth import challenges, errors\n\nimport pyu2f\n\n\nclass _U2FInterfaceMock(object):\n def Authenticate(self, unused_app_id, challenge, unused_registered_keys):\n raise self.error\n\n\n_u2f_interface_mock = _U2FInterfaceMock()\n\n\nclass ChallengesTest(unittest.TestCase):\n \"\"\"This class contains tests for reauth challanges. \"\"\"\n\n @mock.patch('pyu2f.u2f.GetLocalU2FInterface', return_value = _u2f_interface_mock)\n def testSecurityKeyError(self, u2f_mock):\n metadata = {\n 'status': 'READY',\n 'challengeId': 2,\n 'challengeType': 'SECURITY_KEY',\n 'securityKey': {\n 'applicationId': 'security_key_application_id',\n 'challenges': [{\n 'keyHandle': 'some_key',\n 'challenge': base64.urlsafe_b64encode(\n 'some_challenge'.encode('ascii')).decode('ascii'),\n }]\n }}\n\n challenge = challenges.SecurityKeyChallenge()\n\n _u2f_interface_mock.error = pyu2f.errors.U2FError(\n pyu2f.errors.U2FError.DEVICE_INELIGIBLE)\n self.assertEqual(None, challenge.obtain_challenge_input(metadata))\n\n _u2f_interface_mock.error = pyu2f.errors.U2FError(\n pyu2f.errors.U2FError.TIMEOUT)\n self.assertEqual(None, challenge.obtain_challenge_input(metadata))\n\n _u2f_interface_mock.error = pyu2f.errors.NoDeviceFoundError()\n self.assertEqual(None, challenge.obtain_challenge_input(metadata))\n\n _u2f_interface_mock.error = pyu2f.errors.U2FError(\n pyu2f.errors.U2FError.BAD_REQUEST)\n with self.assertRaises(pyu2f.errors.U2FError):\n challenge.obtain_challenge_input(metadata)\n\n _u2f_interface_mock.error = pyu2f.errors.UnsupportedVersionException()\n with self.assertRaises(pyu2f.errors.UnsupportedVersionException):\n challenge.obtain_challenge_input(metadata)\n\n @mock.patch('getpass.getpass', return_value = None)\n def testNoPassword(self, getpass_mock):\n self.assertEqual(challenges.PasswordChallenge().obtain_challenge_input({}),\n {'credential': ' '})\n\n def testSaml(self):\n metadata = {\n 'status': 'READY',\n 'challengeId': 1,\n 'challengeType': 'SAML',\n 'securityKey': {}\n }\n challenge = challenges.SamlChallenge()\n self.assertEqual(True, challenge.is_locally_eligible)\n with self.assertRaises(errors.ReauthSamlLoginRequiredError):\n challenge.obtain_challenge_input(metadata)\n", "id": "12289282", "language": "Python", "matching_score": 0, "max_stars_count": 8, "path": "tests/test_challenges.py" } ]
0
Macc92
[ { "content": "#!/usr/bin/env python3\nimport os\nimport random\nimport time\nfrom collections import defaultdict\nfrom functools import wraps\n\nimport cereal.messaging as messaging\nfrom cereal import car\nfrom common.basedir import BASEDIR\nfrom common.hardware import ANDROID\nfrom common.params import Params\nfrom common.spinner import Spinner\nfrom panda import Panda\nfrom selfdrive.boardd.boardd import can_list_to_can_capnp\nfrom selfdrive.car import make_can_msg\nfrom selfdrive.test.helpers import with_processes\n\n\ndef reset_panda(fn):\n @wraps(fn)\n def wrapper():\n p = Panda()\n for i in [0, 1, 2, 0xFFFF]:\n p.can_clear(i)\n p.reset()\n p.close()\n fn()\n return wrapper\n\nos.environ['STARTED'] = '1'\nos.environ['BOARDD_LOOPBACK'] = '1'\nos.environ['BASEDIR'] = BASEDIR\n\n@reset_panda\n@with_processes(['boardd'])\ndef test_boardd_loopback():\n # wait for boardd to init\n spinner = Spinner(noop=(not ANDROID))\n time.sleep(2)\n\n # boardd blocks on CarVin and CarParams\n cp = car.CarParams.new_message()\n cp.safetyModel = car.CarParams.SafetyModel.allOutput\n Params().put(\"CarVin\", b\"0\"*17)\n Params().put(\"CarParams\", cp.to_bytes())\n\n sendcan = messaging.pub_sock('sendcan')\n can = messaging.sub_sock('can', conflate=False, timeout=100)\n\n time.sleep(1)\n\n n = 1000\n for i in range(n):\n spinner.update(f\"boardd loopback {i}/{n}\")\n\n sent_msgs = defaultdict(set)\n for _ in range(random.randrange(10)):\n to_send = []\n for __ in range(random.randrange(100)):\n bus = random.randrange(3)\n addr = random.randrange(1, 1<<29)\n dat = bytes([random.getrandbits(8) for _ in range(random.randrange(1, 9))])\n sent_msgs[bus].add((addr, dat))\n to_send.append(make_can_msg(addr, dat, bus))\n sendcan.send(can_list_to_can_capnp(to_send, msgtype='sendcan'))\n\n max_recv = 10\n while max_recv > 0 and any(len(sent_msgs[bus]) for bus in range(3)):\n recvd = messaging.drain_sock(can, wait_for_one=True)\n for msg in recvd:\n for m in msg.can:\n if m.src >= 128:\n k = (m.address, m.dat)\n assert k in sent_msgs[m.src-128]\n sent_msgs[m.src-128].discard(k)\n max_recv -= 1\n\n # if a set isn't empty, messages got dropped\n for bus in range(3):\n assert not len(sent_msgs[bus]), f\"loop {i}: bus {bus} missing {len(sent_msgs[bus])} messages\"\n\n spinner.close()\n\n\nif __name__ == \"__main__\":\n test_boardd_loopback()\n", "id": "774331", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "selfdrive/boardd/tests/test_boardd_loopback.py" } ]
0
GeorgiSharkov
[ { "content": "#!/usr/bin/env python\n\nimport sys\nimport rospy\nimport numpy as np\nfrom math import cos, sin\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import LaserScan\nfrom sensor_msgs.msg import Range\n\nnp.set_printoptions(precision=2)\n\norbit = 0\nlaser_sensors = {'w': 0, 'nw': 0, 'n': 0, 'ne': 0, 'e': 0}\n\nlinear_vel = 0.1\nangular_vel = 0.4\nwall_distance = 0.4\nwall_distance_forward = 0.30\nwall_distance_side = 0.35\n\ninf = float('inf')\n\nleft = -1\ngoing_left = -2\nright = 1\ngoing_right = 2\n\n\ndef calculate_lasers_range(data):\n '''Dynamic range intervals'''\n global laser_sensors\n half_pi = np.pi / 2\n initial_angle = 0\n final_angle = 0\n if data.angle_min < -half_pi:\n default_min_angle = half_pi / data.angle_increment\n robot_initial_angle = -data.angle_min / data.angle_increment\n initial_angle = robot_initial_angle - default_min_angle\n if data.angle_max > np.pi / 2:\n default_max_angle = half_pi / data.angle_increment\n robot_final_angle = data.angle_max / data.angle_increment\n final_angle = robot_final_angle - default_max_angle\n\n laser_interval = (len(data.ranges) - initial_angle - final_angle) / 5\n half_laser_interval = laser_interval / 2\n\n interval = [None] * 5\n interval[0] = np.mean(data.ranges[int(initial_angle):int(laser_interval)])\n for i in range(1, 5):\n dirty_values = data.ranges[int(\n initial_angle + i * laser_interval - half_laser_interval\n ):int(initial_angle + i * laser_interval + half_laser_interval) + 1]\n interval[i] = np.mean(np.nan_to_num(dirty_values))\n\n laser_sensors['e'] = interval[0]\n laser_sensors['ne'] = interval[1]\n laser_sensors['n'] = interval[2]\n laser_sensors['nw'] = interval[3]\n laser_sensors['w'] = interval[4]\n\n\ndef log_info():\n '''Initial orbit state'''\n global orbit, laser_sensors\n orbit_values = {-2: 'Going Left', -1: 'Left', 0: 'Undefined', 1: 'Right', 2: 'Going Right'}\n rospy.loginfo(\"Orbit: %s, W : %s, NW: %s, N : %s, NE: %s, E : %s\", orbit_values[orbit],\n laser_sensors['w'], laser_sensors['nw'], laser_sensors['n'],\n laser_sensors['ne'], laser_sensors['e'])\n\n\ndef create_velocity_message(turn_left, turn_right, forward):\n angular = 0\n linear = 0\n if (turn_left):\n angular += angular_vel\n if (turn_right):\n angular -= angular_vel\n if (forward):\n linear = linear_vel\n vel_msg = Twist()\n vel_msg.linear.x = linear\n vel_msg.angular.z = angular\n return vel_msg\n\n\ndef publish_velocity_message(vel_msg):\n vel_pub = rospy.Publisher(\n '/robot' + sys.argv[1] + '/cmd_vel', Twist, queue_size=10)\n vel_pub.publish(vel_msg)\n\n\ndef laser_callback(data):\n global orbit, laser_sensors\n\n calculate_lasers_range(data)\n\n log_info()\n\n linear = 0\n angular = 0\n forward = False\n turn_left = False\n turn_right = False\n\n if (orbit == 0):\n if (laser_sensors['w'] < wall_distance_side):\n orbit = left\n elif (laser_sensors['e'] < wall_distance_side):\n orbit = right\n elif (laser_sensors['nw'] < wall_distance):\n orbit = going_left\n turn_right = True\n elif (laser_sensors['ne'] < wall_distance):\n orbit = going_right\n turn_left = True\n elif (laser_sensors['n'] < wall_distance_forward):\n orbit = going_left\n turn_right = True\n else:\n forward = True\n elif (orbit == going_left or orbit == going_right):\n if (laser_sensors['w'] < wall_distance_side):\n orbit = left\n elif (laser_sensors['e'] < wall_distance_side):\n orbit = right\n elif (orbit == going_left):\n turn_right = True\n elif (orbit == going_right):\n turn_left = True\n elif (orbit == left):\n if (laser_sensors['n'] > wall_distance_forward\n and (laser_sensors['w'] > wall_distance_side\n or laser_sensors['e'] > wall_distance_side)):\n forward = True\n if (laser_sensors['w'] <= wall_distance_side\n and laser_sensors['e'] <= wall_distance_side):\n turn_right = True\n elif (laser_sensors['nw'] <= wall_distance\n or laser_sensors['ne'] <= wall_distance):\n turn_right = True\n else:\n if (laser_sensors['ne'] < wall_distance\n or laser_sensors['nw'] < wall_distance\n or laser_sensors['n'] < wall_distance_forward):\n turn_right = True\n else:\n turn_left = True \n elif (orbit == right):\n if (laser_sensors['n'] > wall_distance_forward\n and (laser_sensors['w'] > wall_distance_side\n or laser_sensors['e'] > wall_distance_side)):\n forward = True\n if (laser_sensors['w'] <= wall_distance_side\n and laser_sensors['e'] <= wall_distance_side):\n turn_left = True\n elif (laser_sensors['nw'] <= wall_distance\n or laser_sensors['ne'] <= wall_distance):\n turn_left = True\n else:\n if (laser_sensors['ne'] < wall_distance\n or laser_sensors['nw'] < wall_distance\n or laser_sensors['n'] < wall_distance_forward):\n turn_left = True\n else:\n turn_right = True\n\n vel_msg = create_velocity_message(turn_left, turn_right, forward)\n \n publish_velocity_message(vel_msg)\n\n\ndef sonar_callback(data):\n pass\n\n\ndef listeners():\n rospy.Subscriber('/robot' + sys.argv[1] + '/laser_0', LaserScan,\n laser_callback)\n rospy.Subscriber('/robot' + sys.argv[1] + '/sonar_0', Range,\n sonar_callback)\n rospy.spin()\n\n\nif __name__ == '__main__':\n rospy.init_node('avoid_wall_' + sys.argv[1], anonymous=True)\n listeners()\n\n", "id": "8447098", "language": "Python", "matching_score": 0, "max_stars_count": 3, "path": "src/avoid_wall.py" } ]
0
tlancian
[ { "content": "import utils as ut\n\n#TODO: 4.2 - Make a nice visualization of the network\n#TODO: 4.3 - Make a nice visualization of the network\n\n\n### 4.1\n\n# Compute communities of EO network by Louvain Algo\n#eo_communities = ut.get_communities(\"eo_pdc_50\")\n\n# Compute communities of EC network by Louvain Algo\n#ec_communities = ut.get_communities(\"ec_pdc_50\")\n\n### 4.2\n\nut.draw_communities(\"eo_pdc_20\", \"eo_pdc_20_louvain\", \"louvain\")\nut.draw_communities(\"ec_pdc_20\", \"ec_pdc_20_louvain\", \"louvain\")\n\n### 4.3\n\n\n\nut.draw_communities(\"eo_pdc_20\", \"eo_pdc_20_infomap\", \"infomap\")\nut.draw_communities(\"ec_pdc_20\", \"ec_pdc_20_infomap\", \"infomap\")\n\n\n", "id": "9812961", "language": "Python", "matching_score": 0.9398790001869202, "max_stars_count": 0, "path": "HW2/part_4/main.py" }, { "content": "import utils as ut\n\nimport operator\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\n######## 2.1\n\n### PDC\n\n# Read data\nadj_eo_pdc = np.load('../part_1/results/npy/eo_pdc_20.npy')\nG_eo_pdc =nx.from_numpy_matrix(adj_eo_pdc, create_using=nx.DiGraph())\nG_eo_pdc = nx.relabel_nodes(G_eo_pdc, dict(enumerate(ut.get_labels_nodes(adj_eo_pdc.shape[0]))))\n\nadj_ec_pdc = np.load('../part_1/results/npy/ec_pdc_20.npy')\nG_ec_pdc =nx.from_numpy_matrix(adj_ec_pdc, create_using=nx.DiGraph())\nG_ec_pdc = nx.relabel_nodes(G_ec_pdc, dict(enumerate(ut.get_labels_nodes(adj_ec_pdc.shape[0]))))\n\n## GLOBAL INDICES \n\n# Clustering Coefficient\n\nCC_eo_pdc = nx.average_clustering(G_eo_pdc)\nCC_ec_pdc = nx.average_clustering(G_ec_pdc)\n\n# Average shortest path length\n\navg_path_eo_pdc = nx.average_shortest_path_length(G_eo_pdc)\navg_path_ec_pdc = nx.average_shortest_path_length(G_ec_pdc)\n\n\n## LOCAL INDICES \n\n# degree \ndegree_10_eo_pdc = sorted(list(G_eo_pdc.degree), reverse=True, key=operator.itemgetter(1))[:10]\nut.save_highest_10(degree_10_eo_pdc, 'degree_eo_pdc')\ndegree_10_ec_pdc = sorted(list(G_ec_pdc.degree), reverse=True, key=operator.itemgetter(1))[:10]\nut.save_highest_10(degree_10_ec_pdc, 'degree_ec_pdc')\n\n# in_degree\nin_degree_10_eo_pdc = sorted(list(G_eo_pdc.in_degree), reverse=True, key=operator.itemgetter(1))[:10]\nut.save_highest_10(in_degree_10_eo_pdc, 'in_degree_eo_pdc')\nin_degree_10_ec_pdc =sorted(list(G_ec_pdc.in_degree), reverse=True, key=operator.itemgetter(1))[:10]\nut.save_highest_10(in_degree_10_ec_pdc, 'in_degree_ec_pdc')\n\n# out_degree\nout_degree_10_eo_pdc = sorted(list(G_eo_pdc.out_degree), reverse=True, key=operator.itemgetter(1))[:10]\nut.save_highest_10(out_degree_10_eo_pdc, 'out_degree_eo_pdc')\nout_degree_10_ec_pdc = sorted(list(G_ec_pdc.out_degree), reverse=True, key=operator.itemgetter(1))[:10]\nut.save_highest_10(out_degree_10_ec_pdc, 'out_degree_ec_pdc')\n\n\n\n######## 2.2\n\n######## 2.3\n\n### DTF\n\n# Read data\nadj_eo_dtf = np.load('../part_1/results/npy/eo_dtf_20.npy')\nG_eo_dtf =nx.from_numpy_matrix(adj_eo_dtf, create_using=nx.DiGraph())\nG_eo_dtf = nx.relabel_nodes(G_eo_dtf, dict(enumerate(ut.get_labels_nodes(adj_eo_dtf.shape[0]))))\n\n\nadj_ec_dtf = np.load('../part_1/results/npy/ec_dtf_20.npy')\nG_ec_dtf =nx.from_numpy_matrix(adj_ec_dtf, create_using=nx.DiGraph())\nG_ec_dtf = nx.relabel_nodes(G_ec_dtf, dict(enumerate(ut.get_labels_nodes(adj_ec_dtf.shape[0]))))\n\n## GLOBAL INDICES \n\n# Clustering Coefficient\n\nCC_eo_dtf = nx.average_clustering(G_eo_dtf)\nCC_ec_dtf = nx.average_clustering(G_ec_dtf)\n\n# Average shortest path length\n\navg_path_eo_dtf = nx.average_shortest_path_length(G_eo_dtf)\navg_path_ec_dtf = nx.average_shortest_path_length(G_ec_dtf)\n\n# save\ncc = pd.DataFrame([[CC_eo_pdc, CC_ec_pdc], [CC_eo_dtf, CC_ec_dtf]], columns=['pdc', 'dtf'])\navg_path = pd.DataFrame([[avg_path_eo_pdc, avg_path_ec_pdc], [avg_path_eo_dtf, avg_path_ec_dtf]], columns=['pdc', 'dtf'])\n\ncc.to_csv('results/clustering_coefficient.csv')\navg_path.to_csv('results/average_shortest_path.csv')\n\n\n######## 2.4\n\nlst_eo = ['eo_pdc_05', 'eo_pdc_10', 'eo_pdc_20', 'eo_pdc_30', 'eo_pdc_50']\nlst_ec = ['ec_pdc_05', 'ec_pdc_10', 'ec_pdc_20', 'ec_pdc_30', 'ec_pdc_50']\n\nglobal_cc_eo = ut.global_indeces(lst_eo)[0]\navg_path_eo = ut.global_indeces(lst_eo)[1]\n\nglobal_cc_ec = ut.global_indeces(lst_ec)[0]\navg_path_ec = ut.global_indeces(lst_ec)[1]\n\n# save\nut.global_plot(global_cc_eo, global_cc_ec, avg_path_eo,avg_path_ec, 'global_indices_plot')\n\n\n\n######## 2.5\n\nut.topology(G_eo_pdc, 'eo_pdc', adj_eo_pdc)\nut.topology(G_ec_pdc, 'ec_pdc', adj_ec_pdc)\n\n\n######## 2.6\n\nadj_ec2 = np.load('../part_1/results/npy/alt_ec_pdc_20.npy')\nG_ec2 = nx.from_numpy_matrix(adj_ec2, create_using=nx.DiGraph())\nadj_eo2 = np.load('../part_1/results/npy/alt_eo_pdc_20.npy')\nG_eo2 = nx.from_numpy_matrix(adj_eo2, create_using=nx.DiGraph())\n\n\n## GLOBAL INDICES \n\nCC_eo2 = nx.average_clustering(G_eo2)\nCC_ec2 = nx.average_clustering(G_ec2)\n\navg_path_eo2 = nx.average_shortest_path_length(G_eo2)\navg_path_ec2 = nx.average_shortest_path_length(G_ec2)\n\n\ndf = pd.DataFrame([[CC_eo_pdc, CC_eo2, CC_ec_pdc, CC_ec2],\n [avg_path_eo_pdc, avg_path_eo2, avg_path_ec_pdc, avg_path_ec2]], \n columns=['eo_pdc','alt_eo','ec_pdc','alt_ec'],\n index = ['clustering_coefficient', 'avg_shortest_path_length'])\ndf.to_csv('results/graph_idx_comparison.csv')\n\n## LOCAL INDICES \n\n# degree \ndegree_10_eo2 = sorted(list(G_eo2.degree), reverse=True, key=operator.itemgetter(1))[:10]\nut.save_highest_10(degree_10_eo2, 'degree_alt_eo')\ndegree_10_ec2 = sorted(list(G_ec2.degree), reverse=True, key=operator.itemgetter(1))[:10]\nut.save_highest_10(degree_10_ec2, 'degree_alt_ec')\n\n# in_degree\nin_degree_10_eo2 = sorted(list(G_eo2.in_degree), reverse=True, key=operator.itemgetter(1))[:10]\nut.save_highest_10(in_degree_10_eo2, 'in_degree_alt_eo')\nin_degree_10_ec2 =sorted(list(G_ec2.in_degree), reverse=True, key=operator.itemgetter(1))[:10]\nut.save_highest_10(in_degree_10_ec2, 'in_degree_alt_ec')\n\n# out_degree\nout_degree_10_eo2 = sorted(list(G_eo2.out_degree), reverse=True, key=operator.itemgetter(1))[:10]\nut.save_highest_10(out_degree_10_eo2, 'out_degree_alt_eo')\nout_degree_10_ec2 = sorted(list(G_ec2.out_degree), reverse=True, key=operator.itemgetter(1))[:10]\nut.save_highest_10(out_degree_10_ec2, 'out_degree_alt_ec')\n\n\n\n######## 2.7\n\n\n", "id": "8739214", "language": "Python", "matching_score": 1.8238863945007324, "max_stars_count": 0, "path": "HW2/part_2/main.py" }, { "content": "import utils as ut\n\n#TODO: ALL - Create a better visualization of the adjacency matrix\n\n# Read the files\neo = ut.read_file(\"../data/S072R01.edf\")\nec = ut.read_file(\"../data/S072R02.edf\")\n\n\n######## 1.1\n\nfs = 160 # Frequency of sampling, given by data\nresolution = 100 # Resolution of model (s.t. each bin has 1Hz of width)\nfreq = 10 # Frequency of interest\ndensity = 0.2 # Density of the graph desired\n\n\n###PDC\n\n# Fitting PDC models\neo_pdc = ut.fit_model(eo, fs, resolution, \"pdc\", freq)\nec_pdc = ut.fit_model(ec, fs, resolution, \"pdc\", freq)\n\n# Adjacency Matrices for 20% density networks\nut.adjacency_matrix(eo_pdc, ut.find_threshold(eo_pdc,density), \"eo_pdc_20\")\nut.adjacency_matrix(ec_pdc, ut.find_threshold(ec_pdc,density), \"ec_pdc_20\")\n\n\n\n######## 1.2\n\n\n# Fitting DTF models\neo_dtf = ut.fit_model(eo, fs, resolution, \"dtf\", freq)\nec_dtf = ut.fit_model(ec, fs, resolution, \"dtf\", freq)\n\n# Adjacency Matrices for 20% density networks\nut.adjacency_matrix(eo_dtf, ut.find_threshold(eo_dtf,density), \"eo_dtf_20\")\nut.adjacency_matrix(ec_dtf, ut.find_threshold(ec_dtf,density), \"ec_dtf_20\")\n\n\n\n\n######## 1.3\n\ndensities = [0.01,0.05,0.1,0.3,0.5] #Different thresholds\n\n#Names for files\neo_pdc_names = [\"eo_pdc_01\", \"eo_pdc_05\", \"eo_pdc_10\", \"eo_pdc_30\", \"eo_pdc_50\"]\nec_pdc_names = [\"ec_pdc_01\", \"ec_pdc_05\", \"ec_pdc_10\", \"ec_pdc_30\", \"ec_pdc_50\"]\neo_dtf_names = [\"eo_dtf_01\", \"eo_dtf_05\", \"eo_dtf_10\", \"eo_dtf_30\", \"eo_dtf_50\"]\nec_dtf_names = [\"ec_dtf_01\", \"ec_dtf_05\", \"ec_dtf_10\", \"ec_dtf_30\", \"ec_dtf_50\"]\n\n\n### PDC Networks\n\neo_pdc_networks = list(map(lambda x: ut.adjacency_matrix(eo_pdc, ut.find_threshold(eo_pdc,x[0]), x[1]), zip(densities,eo_pdc_names)))\nec_pdc_networks = list(map(lambda x: ut.adjacency_matrix(ec_pdc, ut.find_threshold(ec_pdc,x[0]), x[1]), zip(densities,ec_pdc_names)))\n\n\n### DTF Networks\n\neo_dtf_networks = list(map(lambda x: ut.adjacency_matrix(eo_dtf, ut.find_threshold(eo_dtf,x[0]), x[1]), zip(densities,eo_dtf_names)))\nec_dtf_networks = list(map(lambda x: ut.adjacency_matrix(ec_dtf, ut.find_threshold(ec_dtf,x[0]), x[1]), zip(densities,ec_dtf_names)))\n\n\n######## 1.4\n\n#Subset of channels\nchannels = [\"Fp1\", \"Fp2\", \"F7\", \"F3\", \"Fz\", \"F4\", \"F8\", \"T7\", \"C3\", \"Cz\", \"C4\", \"T8\", \"P7\", \"P3\", \"Pz\", \"P4\", \"P8\", \"O1\", \"O2\"]\n\n# Reading the subset of channels from files\nsmall_eo = ut.read_file(\"../data/S072R01.edf\", channels = channels)\nsmall_ec = ut.read_file(\"../data/S072R02.edf\", channels = channels)\n\n# Adjacency Matrices with Bootstrap validation\nut.adjacency_matrix(ut.fit_model(small_eo, fs, resolution, \"pdc\", freq = freq, boot = True), 0.05, \"small_eo_pdc\")\nut.adjacency_matrix(ut.fit_model(small_ec, fs, resolution, \"pdc\", freq = freq, boot = True), 0.05, \"small_ec_pdc\")\n\n\n######## 1.5\n\n# Save a png of the network for each network\nfor network in [elem[:-4] for elem in ut.get_networks()]:\n ut.viz_graph(network)\n\n\n######## 1.6\n\n# Choosing an alternative frequency\nalternative_frequency = 50\n\n###PDC\n\n# Fitting models\nalt_eo_pdc = ut.fit_model(eo, fs, resolution, \"pdc\", alternative_frequency)\nalt_ec_pdc = ut.fit_model(ec, fs, resolution, \"pdc\", alternative_frequency)\n\n# Adjacency Matrices\nut.adjacency_matrix(alt_eo_pdc, ut.find_threshold(alt_eo_pdc,density), \"alt_eo_pdc_20\")\nut.adjacency_matrix(alt_ec_pdc, ut.find_threshold(alt_ec_pdc,density), \"alt_ec_pdc_20\")\n\n\n\n\n\n", "id": "6188427", "language": "Python", "matching_score": 3.217301607131958, "max_stars_count": 0, "path": "HW2/part_1/main.py" }, { "content": "import numpy as np\nimport os\nimport pyedflib\nimport connectivipy\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\ndef read_file(file, channels = None):\n '''\n Read an EDF file, with the given channels. If channels it's not provided,\n it reads every channel in the file.\n '''\n \n f = pyedflib.EdfReader(file)\n \n if channels:\n n = len(channels)\n \n signal_labels = [name.replace(\".\",\"\") for name in f.getSignalLabels()]\n \n signals = np.zeros((n, f.getNSamples()[0]))\n \n for idx,chan in enumerate(channels):\n \n signals[idx, :] = f.readSignal(signal_labels.index(chan))\n \n f._close()\n \n else:\n\n n = f.signals_in_file\n \n signals = np.zeros((n, f.getNSamples()[0]))\n \n for chan in np.arange(n):\n signals[chan, :] = f.readSignal(chan)\n \n f._close()\n\n del f\n \n return signals\n\ndef fit_model(data, fs, resolution, method, freq = None, boot = False):\n '''\n Fit an MVAR model, and compute connecitvity estimation via PDC or DTF.\n '''\n \n if boot:\n \n data = connectivipy.Data(data = data, fs = 160, chan_names = get_labels_nodes(19))\n data.fit_mvar(method = \"yw\")\n data.conn(\"pdc\")\n res = data.significance(Nrep = 200, alpha = 0.05, verbose = False)\n np.fill_diagonal(res,0)\n return res\n \n \n model = connectivipy.Mvar().fit(data, method = \"yw\")\n \n \n if method == \"dtf\":\n if freq:\n res = connectivipy.conn.dtf_fun(model[0],model[1],fs = fs, resolution = resolution)[freq,:,:]\n np.fill_diagonal(res,0)\n return res\n else:\n return connectivipy.conn.dtf_fun(model[0],model[1],fs = fs, resolution = resolution)\n elif method == \"pdc\":\n if freq:\n res = connectivipy.conn.pdc_fun(model[0],model[1],fs = fs, resolution = resolution)[freq,:,:]\n np.fill_diagonal(res,0)\n return res\n else:\n return connectivipy.conn.pdc_fun(model[0],model[1],fs = fs, resolution = resolution)\n else:\n return \"Wrong method. Use \\\"pdc\\\" or \\\"dtf\\\"\"\n \n \ndef graph_density(edges,n):\n return (2*edges)/(n*(n-1))\n\n\ndef find_threshold(network, density):\n \n n = network.shape[0]\n \n num_edges = int((density*n*(n-1))/2)\n \n threshold = -np.sort(-network, axis = None)[num_edges+1]\n \n return threshold\n\ndef adjacency_matrix(network, threshold, file=None):\n adj = (network > threshold).astype(int)\n if file:\n #Save the npy\n np.save(\"results/npy/\"+file+\".npy\",adj)\n \n #Save the png\n plt.figure(figsize = (20,20))\n \n a = dict(enumerate(get_labels_nodes(adj.shape[0]))).values() \n plt.imshow(adj, cmap='Blues', interpolation='none')\n plt.xticks(range(65), a)\n plt.yticks(range(65), a)\n \n #plt.show()\n plt.savefig(\"results/png/adj_matrices/\"+file+\".png\")\n plt.close()\n \n return\n return adj\n \n\ndef get_networks():\n return os.listdir(\"results/npy\")\n\ndef get_coordinates():\n \n with open(\"../data/channel_locations.txt\") as f:\n \n coord = {}\n \n channels = [row.split(\" \") for row in f.readlines()[1:]]\n\n for elem in channels:\n coord[elem[1]] = (float(elem[2]), float(elem[3]))\n \n return coord\n \n \ndef get_labels_nodes(number_of_nodes = 64):\n if number_of_nodes == 64:\n return ['Fc5', 'Fc3', 'Fc1', 'Fcz', 'Fc2', 'Fc4', 'Fc6', 'C5', 'C3', 'C1',\n 'Cz', 'C2', 'C4', 'C6', 'Cp5', 'Cp3', 'Cp1', 'Cpz', 'Cp2', 'Cp4',\n 'Cp6', 'Fp1', 'Fpz', 'Fp2', 'Af7', 'Af3', 'Afz', 'Af4', 'Af8', 'F7',\n 'F5', 'F3', 'F1', 'Fz', 'F2', 'F4', 'F6', 'F8', 'Ft7', 'Ft8', 'T7',\n 'T8', 'T9', 'T10', 'Tp7', 'Tp8', 'P7', 'P5', 'P3', 'P1', 'Pz', 'P2',\n 'P4', 'P6', 'P8', 'Po7', 'Po3', 'Poz', 'Po4', 'Po8', 'O1', 'Oz', 'O2', 'Iz']\n else:\n return [\"Fp1\", \"Fp2\", \"F7\", \"F3\", \"Fz\", \"F4\", \"F8\", \"T7\", \"C3\", \"Cz\",\n \"C4\", \"T8\", \"P7\", \"P3\", \"Pz\", \"P4\", \"P8\", \"O1\", \"O2\"]\n \n\ndef viz_graph(file):\n \n adj = np.load(\"../part_1/results/npy/\"+file+\".npy\")\n G = nx.from_numpy_matrix(adj, create_using = nx.DiGraph())\n G = nx.relabel_nodes(G, dict(enumerate(get_labels_nodes(adj.shape[0]))))\n\n # color\n for node in G.nodes():\n if node in get_labels_nodes(number_of_nodes = 19):\n color = 'red'\n else:\n color = '#b9c1d1'\n G.node[node]['color'] = color\n\n\n plt.figure(num=None, figsize=(15,15), dpi=50)\n nx.draw(G, node_shape= 'o', with_labels = True, pos = get_coordinates(), node_size = 2000, \n node_color=list(nx.get_node_attributes(G,'color').values()), font_size=18) \n #plt.title(file, fontsize=25) \n\n #plt.show()\n plt.savefig(\"results/png/networks/\"+file+\".png\", bbox_inches='tight')\n plt.close()\n return", "id": "1455821", "language": "Python", "matching_score": 4.831243515014648, "max_stars_count": 0, "path": "HW2/part_1/utils.py" }, { "content": "import networkx as nx\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom collections import defaultdict\n\nimport igraph\nimport louvain\n\ndef get_labels_nodes(number_of_nodes = 64):\n if number_of_nodes == 64:\n return ['Fc5', 'Fc3', 'Fc1', 'Fcz', 'Fc2', 'Fc4', 'Fc6', 'C5', 'C3', 'C1',\n 'Cz', 'C2', 'C4', 'C6', 'Cp5', 'Cp3', 'Cp1', 'Cpz', 'Cp2', 'Cp4',\n 'Cp6', 'Fp1', 'Fpz', 'Fp2', 'Af7', 'Af3', 'Afz', 'Af4', 'Af8', 'F7',\n 'F5', 'F3', 'F1', 'Fz', 'F2', 'F4', 'F6', 'F8', 'Ft7', 'Ft8', 'T7',\n 'T8', 'T9', 'T10', 'Tp7', 'Tp8', 'P7', 'P5', 'P3', 'P1', 'Pz', 'P2',\n 'P4', 'P6', 'P8', 'Po7', 'Po3', 'Poz', 'Po4', 'Po8', 'O1', 'Oz', 'O2', 'Iz']\n else:\n return [\"Fp1\", \"Fp2\", \"F7\", \"F3\", \"Fz\", \"F4\", \"F8\", \"T7\", \"C3\", \"Cz\",\n \"C4\", \"T8\", \"P7\", \"P3\", \"Pz\", \"P4\", \"P8\", \"O1\", \"O2\"]\n\ndef read_graph(file, package = \"nx\"):\n adj = np.load(\"../part_1/results/npy/\"+file+\".npy\")\n \n if package == \"ig\":\n G = igraph.Graph.Adjacency((adj > 0).tolist())\n else:\n G = nx.from_numpy_matrix(adj, create_using = nx.DiGraph())\n G = nx.relabel_nodes(G, dict(enumerate(get_labels_nodes(adj.shape[0]))))\n return G\n\n\ndef get_communities(file, method = \"louvain\"):\n\n if method == \"louvain\":\n partition_com = dict(zip(get_labels_nodes(),louvain.find_partition(read_graph(file, \"ig\"),louvain.ModularityVertexPartition).membership))\n else:\n partition_com = dict(zip(get_labels_nodes(),read_graph(file, \"ig\").community_infomap().membership))\n \n res = defaultdict(list)\n \n for key, value in partition_com.items():\n res[value].append(key)\n \n communities = res.items()\n \n pd.DataFrame({\"community\": [elem[0] for elem in communities], \"members\": [elem[1] for elem in communities]}).to_excel(\"results/\"+file+\"_\"+method+\".xlsx\")\n \n return partition_com\n\ndef get_coordinates():\n \n with open(\"../data/channel_locations.txt\") as f:\n \n coord = {}\n \n channels = [row.split(\" \") for row in f.readlines()[1:]]\n\n for elem in channels:\n coord[elem[1]] = (float(elem[2]), float(elem[3]))\n \n return coord\n \ndef draw_communities(file, file_name, method):\n \n ##################### MODIFY HERE FOR THE VISUALIZATION\n \n plt.figure(num=None, figsize=(15,15), dpi=50)\n nx.draw(read_graph(file), pos = get_coordinates(), node_color = list(get_communities(file, method = method).values()),\n with_labels = True, node_size = 1000, cmap = \"RdYlGn\", fontsize=18)\n plt.title(file, fontsize=25) \n \n #####################\n \n plt.savefig(\"results/\"+file_name+\".png\")\n plt.close()", "id": "5032617", "language": "Python", "matching_score": 3.526378870010376, "max_stars_count": 0, "path": "HW2/part_4/utils.py" }, { "content": "import numpy as np\nimport networkx as nx\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef create_network_file(file):\n \n adj = np.load(\"../part_1/results/npy/\"+file+\".npy\")\n \n \n with open(\"mfinder1.2/\"+file+\".txt\", \"w\") as f:\n for row in range(adj.shape[0]):\n for col in range(adj.shape[1]):\n if adj[row,col] == 1:\n f.write(str(row+1)+\" \"+str(col+1) + \" 1\\n\")\n f.close()\n\n\ndef parse_mfinder_output(file):\n \n with open(\"mfinder1.2/\"+file+\".txt\") as f:\n \n lines = f.readlines()\n \n res = []\n \n for idx,line in enumerate(lines):\n if \"Full list of subgraphs\" in line:\n nrows = int(''.join(i for i in lines[idx+2] if i.isdigit())[1:])\n \n res.append(\"Motif ID\\tN_Real\\tN_Rand\\tZ_Score\\tP-value\\tCREAL\\tUniqueness\\n\")\n \n for i in range(nrows):\n res.append(lines[idx+6+(2*i)])\n \n f.close()\n \n with open(\"res.tsv\",\"w\") as f:\n f.writelines(res)\n f.close()\n \n data = pd.read_csv(\"res.tsv\", sep = \"\\t\")\n \n data.to_excel(\"results/\"+file+\".xlsx\")\n \n os.remove(\"res.tsv\")\n \n return\n\n \n \n\ndef subgraph_by_motif(file, motif):\n \n with open(\"mfinder1.2/\"+file+\".txt\") as f:\n \n members = {}\n \n lines = f.readlines()\n \n for idx,line in enumerate(lines):\n if \"subgraph id = \" in line:\n motif_id = int(''.join(i for i in line if i.isdigit()))\n num_members = int(''.join(i for i in lines[idx+1] if i.isdigit()))\n triplets = [[int(elem) for elem in row.split(\"\\t\")[:-1]] for row in lines[idx+5:idx+5+num_members]]\n \n members[motif_id] = triplets\n \n graph = nx.DiGraph()\n \n for triplet in members[motif]:\n graph.add_edge(triplet[0]-1, triplet[2]-1)\n graph.add_edge(triplet[1]-1, triplet[2]-1)\n \n graph = nx.relabel_nodes(graph, dict(enumerate(get_labels_nodes())))\n \n plt.figure(num=None, figsize=(15,15), dpi=50)\n nx.draw(graph, node_shape= 'o', with_labels = True, pos = get_coordinates(), node_size = 2000, font_size=18)\n #plt.title(file[:9], fontsize=25) \n \n\n plt.savefig(\"results/\"+file[:9]+\".png\")\n plt.close()\n \ndef get_labels_nodes(number_of_nodes = 64):\n if number_of_nodes == 64:\n return ['Fc5', 'Fc3', 'Fc1', 'Fcz', 'Fc2', 'Fc4', 'Fc6', 'C5', 'C3', 'C1',\n 'Cz', 'C2', 'C4', 'C6', 'Cp5', 'Cp3', 'Cp1', 'Cpz', 'Cp2', 'Cp4',\n 'Cp6', 'Fp1', 'Fpz', 'Fp2', 'Af7', 'Af3', 'Afz', 'Af4', 'Af8', 'F7',\n 'F5', 'F3', 'F1', 'Fz', 'F2', 'F4', 'F6', 'F8', 'Ft7', 'Ft8', 'T7',\n 'T8', 'T9', 'T10', 'Tp7', 'Tp8', 'P7', 'P5', 'P3', 'P1', 'Pz', 'P2',\n 'P4', 'P6', 'P8', 'Po7', 'Po3', 'Poz', 'Po4', 'Po8', 'O1', 'Oz', 'O2', 'Iz']\n else:\n return [\"Fp1\", \"Fp2\", \"F7\", \"F3\", \"Fz\", \"F4\", \"F8\", \"T7\", \"C3\", \"Cz\", \"C4\", \"T8\", \"P7\", \"P3\", \"Pz\", \"P4\", \"P8\", \"O1\", \"O2\"]\n\n\ndef get_involved_motifs(file, channel, motifs):\n \n with open(\"mfinder1.2/\"+file+\".txt\") as f:\n \n res = []\n\n lines = f.readlines()\n \n for idx,line in enumerate(lines):\n if \"subgraph id = \" in line:\n motif_id = int(''.join(i for i in line if i.isdigit()))\n num_members = int(''.join(i for i in lines[idx+1] if i.isdigit()))\n if motif_id in motifs:\n triplets = [[motif_id,triplet] for triplet in [[int(elem) for elem in row.split(\"\\t\")[:-1]] for row in lines[idx+5:idx+5+num_members]] if channel in triplet]\n res.extend(triplets)\n \n data = pd.DataFrame(res,columns = [\"Motif_Id\", \"Triplet\"])\n \n data.to_excel(\"results/\"+file[:9]+\"_node_\"+str(channel)+\"_motif.xlsx\")\n \n return\n \n \ndef get_coordinates():\n \n with open(\"../data/channel_locations.txt\") as f:\n \n coord = {}\n \n channels = [row.split(\" \") for row in f.readlines()[1:]]\n\n for elem in channels:\n coord[elem[1]] = (float(elem[2]), float(elem[3]))\n \n return coord", "id": "4092243", "language": "Python", "matching_score": 2.931304693222046, "max_stars_count": 0, "path": "HW2/part_3/utils.py" }, { "content": "import utils as ut\n\n\n# Creates the txt files needed for launching mfinder\nut.create_network_file(\"eo_pdc_20\")\nut.create_network_file(\"ec_pdc_20\")\n\n\n######### Part 3.1\n\n# Done with mfinder\n\n#Parameters used:\n\n# pval <0.01\n# mfactor = 1.1\n# U = 4\n# random graphs = 1000\n\n\n#Create a xls file with mfinder results - useful for the report\nut.parse_mfinder_output(\"eo_pdc_20_OUT_size3\")\nut.parse_mfinder_output(\"ec_pdc_20_OUT_size3\")\n\n\n\n######### Part 3.2\n\n# The motif id in mfinder for A->B<-C is 36\nmotif_id = 36\n\n#Get all the triplets that form a motif with pattern A->B<-C directly from the Mfinder output\n# and save a png of the subgraph composed by these triplets\n\nut.subgraph_by_motif(\"eo_pdc_20_MEMBERS_size3\",motif_id)\nut.subgraph_by_motif(\"ec_pdc_20_MEMBERS_size3\",motif_id)\n\n\n\n######### Part 3.3\n\n# Selecting the Parieto-Occipital central Channel, that correpond to the number 57 in our data\nchannel = 57\n\n# Selecting the motifs ids found by Mfinder, that were the same for both networks (EO and EC).\nmotifs_eo = [38,46,108,110]\nmotifs_ec = [38,46,108,110, 238]\n\n#Get triplets that contains the node 57 and save the subgraph induced by those motifs.\nut.get_involved_motifs(\"eo_pdc_20_MEMBERS_size3\", channel, motifs_eo)\nut.get_involved_motifs(\"ec_pdc_20_MEMBERS_size3\", channel, motifs_ec)\n\n\n######### Part 3.4\n\n# Done with Mfinder\n\n# Same parameters\n\nut.parse_mfinder_output(\"eo_pdc_20_OUT_size4\")\nut.parse_mfinder_output(\"ec_pdc_20_OUT_size4\")\n", "id": "6777979", "language": "Python", "matching_score": 0.847341001033783, "max_stars_count": 0, "path": "HW2/part_3/main.py" }, { "content": "import utils as ut\n\n# Read the graphs\nii = ut.read_graph(\"ii\")\nsgi = ut.read_graph(\"sgi\")\nui = ut.read_graph(\"ui\")\n\n# Compute global measures\nut.graph_global_measures(ii, \"ii\")\nut.graph_global_measures(sgi, \"sgi\")\nut.graph_global_measures(ui, \"ui\")\n\n\n# Compute LCC global measures\nut.graph_global_measures(ii, \"ii\", True)\nut.graph_global_measures(sgi, \"sgi\", True)\nut.graph_global_measures(ui, \"ui\", True)\n\n\n \n# Visualize graph \nut.viz_graph(sgi, 'sgi')\nut.viz_graph(ii, 'ii', cc=True)\nut.viz_graph(ui, 'ui', cc=True)", "id": "6809734", "language": "Python", "matching_score": 1.244889259338379, "max_stars_count": 0, "path": "HW1.2/part_1/main.py" }, { "content": "import networkx as nx\nimport pandas as pd\nimport pickle\nimport matplotlib.pyplot as plt\n\n\ndef read_graph(file):\n \n G = nx.Graph()\n \n with open(\"../interactomes/\"+file+\".tsv\", \"r\") as f:\n for row in f.readlines()[1:]:\n edge = row[:-1].split(\"\\t\")\n G.add_edge(edge[0],edge[1])\n \n return G\n\ndef net_centralization(graph):\n \n deg = dict(graph.degree())\n max_deg = max(deg.values())\n \n num = 0\n \n for elem in deg.values():\n num += (max_deg-elem)\n \n n = len(deg)\n \n return num/((n-1)*(n-2))\n\n\ndef graph_global_measures(graph, file, cc = False):\n \n if cc:\n graph = max(nx.connected_component_subgraphs(graph), key=len)\n with open(\"results/lcc/\"+file+\"_lcc.pickle\",\"wb\") as f:\n pickle.dump(graph, f)\n f.close()\n \n measures = [\"# of Nodes\", \"# of Edges\", \"Average Path Length\", \"Average Degree\", \"Average Clustering Coefficient\",\n \"Network Diameter\", \"Network Radius\", \"Centralization\"]\n path = \"results/lcc/\"+file+\"_lcc_global_measures.txt\"\n else:\n measures = [\"# of Nodes\", \"# of Edges\", \"# of Connected Components\", \"# of Isolated Nodes\", \"Average Path Length\", \"Average Degree\", \"Average Clustering Coefficient\",\n \"Network Diameter\", \"Network Radius\", \"Centralization\"]\n path = \"results/interactomes/\"+file+\"_global_measures.txt\"\n \n nodes = len(graph.nodes())\n \n if nodes < 20:\n with open(path, \"w\") as f:\n f.write(\"The graph has less than 20 nodes.\")\n f.close()\n return\n \n edges = len(graph.edges())\n \n num_cc = nx.number_connected_components(graph)\n isolated = len(list(nx.isolates(graph)))\n \n try:\n avg_sp = nx.average_shortest_path_length(graph)\n except:\n avg_sp = \"Graph not connected\"\n \n avg_dg = sum(dict(graph.degree).values())/len(graph.degree)\n \n avg_clust = nx.average_clustering(graph)\n \n try:\n diam = nx.diameter(graph)\n except:\n diam = \"Graph not connected\"\n \n try:\n radius = nx.radius(graph)\n except:\n radius = \"Graph not connected\"\n \n centralization = net_centralization(graph)\n \n if cc:\n values = [nodes, edges, avg_sp, avg_dg, avg_clust, diam, radius, centralization]\n \n else:\n values = [nodes, edges, num_cc, isolated, avg_sp, avg_dg, avg_clust, diam, radius, centralization]\n \n \n with open(path, \"w\") as f:\n f.writelines([str(elem[0])+\"\\t\"+str(elem[1])+\"\\n\" for elem in list(zip(measures,values))])\n f.close()\n \n if cc:\n lcc_local_measures(graph).to_excel(\"results/lcc/\"+file+\"_lcc_local_measures.xlsx\")\n\n\n\n\n\ndef lcc_local_measures(graph):\n res = {}\n deg = dict(graph.degree())\n b_cen = nx.betweenness_centrality(graph)\n e_cen = nx.eigenvector_centrality(graph)\n c_cen = nx.closeness_centrality(graph)\n \n for elem in deg.keys():\n res[elem] = [deg[elem]]\n res[elem].append(b_cen[elem])\n res[elem].append(e_cen[elem])\n res[elem].append(c_cen[elem])\n res[elem].append(b_cen[elem]/deg[elem])\n \n return pd.DataFrame.from_dict(res, orient = \"index\", columns = [\"degree\", \"betweenness\", \"eigenvector\", \"closeness\" ,\"ratio\"])\n\n\n \ndef viz_graph(G, filename, cc = False):\n \n if cc:\n G = max(nx.connected_component_subgraphs(G), key=len)\n plt.figure(num=None, figsize=(15,15), dpi=50)\n nx.draw(G, node_size=500, node_color='#8b9dc3')\n plt.savefig('results/images/' + filename + '.png')\n \n \n else:\n plt.figure(num=None, figsize=(15,15), dpi=50)\n nx.draw(G, with_labels=True, node_size=1000, font_size=18, node_color='#8b9dc3')\n plt.savefig('results/images/' + filename + '.png')\n \n ", "id": "4304074", "language": "Python", "matching_score": 4.490540504455566, "max_stars_count": 0, "path": "HW1.2/part_1/utils.py" }, { "content": "import operator\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\n\ndef save_highest_10(degree_lst, filename):\n df = pd.DataFrame(degree_lst, columns=['channel', 'degree'])\n df.to_csv('results/' + filename + '.csv')\n\n\n \ndef global_indeces(graph_names_lst):\n global_cc = []\n avg_path = []\n for g in graph_names_lst:\n\n adj = np.load('../part_1/results/npy/' + g + '.npy')\n G = nx.from_numpy_matrix(adj, create_using=nx.DiGraph())\n \n if nx.is_weakly_connected(G):\n global_cc.append(nx.average_clustering(G))\n avg_path.append(nx.average_shortest_path_length(G))\n \n else:\n global_cc.append(nx.average_clustering(G))\n \n G_un = nx.from_numpy_matrix(adj)\n connected_components = nx.connected_component_subgraphs(G_un)\n subgraphs = map(lambda smallGraph : G.subgraph(smallGraph.nodes()), connected_components)\n avgShortestPaths = np.mean(list(map(lambda x: nx.average_shortest_path_length(x), subgraphs )))\n avg_path.append(avgShortestPaths)\n\n return(global_cc, avg_path)\n\n\n\ndef global_plot(cc_eo, cc_ec, avg_eo, avg_ec, filename):\n densities = [5, 10, 20, 30, 50]\n \n plt.figure(figsize = (20,10))\n \n plt.subplot(1,2,1)\n plt.plot(densities, cc_eo, 'o-', label='EO')\n plt.plot(densities, cc_ec, 'o-', label='EC')\n plt.legend(loc='lower right', prop={'size': 20})\n \n plt.subplot(1,2,2)\n plt.plot(densities, avg_eo, 'o-', label='EO')\n plt.plot(densities, avg_ec, 'o-', label='EC')\n plt.legend(loc='lower right', prop={'size': 20})\n\n plt.savefig('results/' + filename + '.png')\n \n\ndef get_coordinates():\n \n with open(\"../data/channel_locations.txt\") as f:\n \n coord = {}\n \n channels = [row.split(\" \") for row in f.readlines()[1:]]\n\n for elem in channels:\n coord[elem[1]] = (float(elem[2]), float(elem[3]))\n \n return coord\n \n \n \ndef get_labels_nodes(number_of_nodes = 64):\n if number_of_nodes == 64:\n return ['Fc5', 'Fc3', 'Fc1', 'Fcz', 'Fc2', 'Fc4', 'Fc6', 'C5', 'C3', 'C1',\n 'Cz', 'C2', 'C4', 'C6', 'Cp5', 'Cp3', 'Cp1', 'Cpz', 'Cp2', 'Cp4',\n 'Cp6', 'Fp1', 'Fpz', 'Fp2', 'Af7', 'Af3', 'Afz', 'Af4', 'Af8', 'F7',\n 'F5', 'F3', 'F1', 'Fz', 'F2', 'F4', 'F6', 'F8', 'Ft7', 'Ft8', 'T7',\n 'T8', 'T9', 'T10', 'Tp7', 'Tp8', 'P7', 'P5', 'P3', 'P1', 'Pz', 'P2',\n 'P4', 'P6', 'P8', 'Po7', 'Po3', 'Poz', 'Po4', 'Po8', 'O1', 'Oz', 'O2', 'Iz']\n else:\n return [\"Fp1\", \"Fp2\", \"F7\", \"F3\", \"Fz\", \"F4\", \"F8\", \"T7\", \"C3\", \"Cz\",\n \"C4\", \"T8\", \"P7\", \"P3\", \"Pz\", \"P4\", \"P8\", \"O1\", \"O2\"]\n \ndef topology(G, filename, adj):\n \n G= nx.relabel_nodes(G, dict(enumerate(get_labels_nodes(adj.shape[0]))))\n \n # color\n for node in G.nodes():\n if node in get_labels_nodes(number_of_nodes = 19):\n color = 'red'\n else:\n color = '#b9c1d1'\n G.node[node]['color'] = color\n\n # degree\n d = dict(nx.degree(G))\n plt.figure(num=None, figsize=(15,15), dpi=50)\n nx.draw(G, nodelist= list(d.keys()), node_size=[v*100 for v in d.values()], with_labels = True, \n pos = get_coordinates(), font_size=18, node_color=list(nx.get_node_attributes(G,'color').values()))\n plt.savefig('results/' + filename + '_degree' + '.png')\n \n # in degree\n d = dict(G.in_degree)\n plt.figure(num=None, figsize=(15,15), dpi=50)\n nx.draw(G, nodelist= list(d.keys()), node_size=[v*100 for v in d.values()], with_labels = True, \n pos = get_coordinates(), font_size=18, node_color=list(nx.get_node_attributes(G,'color').values()))\n plt.savefig('results/' + filename + '_in_degree' + '.png')\n \n # out degree\n d = dict(G.out_degree)\n plt.figure(num=None, figsize=(15,15), dpi=50)\n nx.draw(G, nodelist= list(d.keys()), node_size=[v*100 for v in d.values()], with_labels = True, \n pos = get_coordinates(), font_size=18, node_color=list(nx.get_node_attributes(G,'color').values()))\n #plt.show()\n plt.savefig('results/' + filename + '_out_degree' + '.png')", "id": "9868314", "language": "Python", "matching_score": 1.6420955657958984, "max_stars_count": 0, "path": "HW2/part_2/utils.py" }, { "content": "import pickle\nimport networkx as nx\nimport pandas as pd\nimport markov_clustering as mc\nimport community\nimport numpy as np\nfrom scipy.stats import hypergeom\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef read_graph(file):\n \n with open(\"../part_1/results/lcc/\" + file + \"_lcc.pickle\",\"rb\") as f:\n return pickle.load(f)\n\n\ndef mcl(graph, viz=False):\n \n mat = nx.to_numpy_matrix(graph)\n \n mod = -1\n \n for val in np.arange(1.2,3,0.1):\n \n res = mc.run_mcl(mat, inflation=val)\n clust = mc.get_clusters(res)\n q = mc.modularity(matrix=np.asmatrix(res), clusters=clust)\n if q > mod:\n clusters = clust\n \n if viz == False:\n \n labels = dict(zip(range(len(graph)),graph.nodes()))\n\n return[[labels.get(item) for item in clust] for clust in clusters]\n \n else:\n \n plt.figure(num=None, figsize=(20,20), dpi=50)\n pos = nx.spring_layout(graph)\n mc.draw_graph(mat, clusters, node_size=200, with_labels=False, edge_color=\"silver\")\n #plt.savefig('results/' + filename + '.png')\n\n\ndef louvain(G_lcc, filename, viz=False):\n \n partition = community.best_partition(G_lcc)\n clusters = [[nodes for nodes in partition.keys() if partition[nodes] == com] for com in set(partition.values())]\n \n if viz == False:\n \n return(clusters)\n \n else:\n \n plt.figure(num=None, figsize=(15,15), dpi=50)\n pos = nx.spring_layout(G_lcc)\n col = sns.color_palette(\"husl\", len(clusters))\n \n for idx,c in enumerate(clusters):\n nx.draw_networkx_nodes(G_lcc, pos, nodelist=clusters[idx], node_color=col[idx])\n \n nx.draw_networkx_edges(G_lcc, pos, alpha=0.5)\n plt.savefig('results/' + filename + '_louvain.png')\n \n \n\n\n\ndef check_length_mod(mod):\n if len(mod) >= 10:\n return True\n\ndef hypergeom_test(graph, mod):\n \n # Initialize the gene list\n with open(\"../../HW1/seed_genes.txt\",\"r\") as f:\n genes = [gene.rstrip() for gene in f.readlines()]\n \n M = len(graph.nodes())\n n = len(set(genes).intersection(set(graph.nodes())))\n N = len(mod)\n x = len(set(genes).intersection(set(mod)))\n \n pval = hypergeom.sf(x-1, M, n, N)\n \n return [x, N, set(genes).intersection(set(mod)), set(mod).difference(set(genes)), pval]\n\n\n\n \n \ndef create_table(file, lou_mod, mcl_mod):\n \n lou_mod = [[\"Louvain\"]+elem for elem in lou_mod]\n mcl_mod = [[\"MCL\"]+elem for elem in mcl_mod]\n \n mods = lou_mod + mcl_mod\n \n cols = [\"Clustering Algorithm\", \"Number of Seed Genes\", \"Number of Genes\", \"List of Seed Genes\", \"List of Non-Seed Genes\", \"P-Value\"]\n \n table = pd.DataFrame(data = mods, columns = cols)\n table[\"Id\"] = np.arange(1, table.shape[0]+1)\n \n table.to_excel(\"results/\"+file+\".xlsx\")\n \n \n put_mod = table[table[\"P-Value\"] < 0.05]\n \n if not put_mod.empty:\n \n for index, row in put_mod.iterrows():\n with open(\"../part_3/putative_disease_modules/\"+file+\"_\"+str(row[\"Id\"])+\".txt\", \"w\") as f:\n f.writelines(\"%s\\n\" % l for l in list(row[\"List of Seed Genes\"].union(row[\"List of Non-Seed Genes\"])))\n f.close()\n \n return table\n\n\n\n\n\n\n############### Reference\n\n# https://blog.alexlenail.me/understanding-and-implementing-the-hypergeometric-test-in-python-a7db688a7458\n", "id": "3944734", "language": "Python", "matching_score": 2.848735809326172, "max_stars_count": 0, "path": "HW1.2/part_2/utils.py" }, { "content": "import utils as ut\n\n\n### read data\n\nii = ut.read_graph(\"ii\")\nui = ut.read_graph(\"ui\")\n\n\n### Hypergeom Test\n\nii_lou = map(lambda x : ut.hypergeom_test(ii,x) ,filter(ut.check_length_mod, ut.louvain(ii)))\nii_mcl = map(lambda x : ut.hypergeom_test(ii,x) ,filter(ut.check_length_mod, ut.mcl(ii)))\n\n\nui_lou = map(lambda x : ut.hypergeom_test(ui,x) ,filter(ut.check_length_mod, ut.louvain(ui)))\nui_mcl = map(lambda x : ut.hypergeom_test(ui,x) ,filter(ut.check_length_mod, ut.mcl(ui)))\n\n\n# Create tables\n\nii_mod = ut.create_table(\"ii_mod\", list(ii_lou), list(ii_mcl))\nui_mod = ut.create_table(\"ui_mod\", list(ui_lou), list(ui_mcl))\n\n\n\n# Visualize clusters \n\nut.louvain(ii, 'ii', viz=True)\nut.louvain(ui, 'ui', viz=True)\nut.mcl(ii, viz=True)\nut.mcl(ui, viz=True)\n\n", "id": "10449120", "language": "Python", "matching_score": 0.03699836507439613, "max_stars_count": 0, "path": "HW1.2/part_2/main.py" }, { "content": "import pandas as pd\nimport numpy as np\nimport utils as ut\n\n\n# Initialize the gene list\nwith open(\"../seed_genes.txt\",\"r\") as f:\n genes = [gene.rstrip() for gene in f.readlines()]\n\n\n### Preprocessing Biogrid\n \n# Since it is requested to report also the Uniprot AC in the table, here we look up for them.\n\n# Load the file\nbiogrid = pd.read_csv(\"../part_3/results/biogrid.tsv\", sep = \"\\t\")\n\n# Take the genes involved in interactions\nunique_genes = list((biogrid[\"gene_1\"].append(biogrid[\"gene_2\"])).unique())\n\n# Create a dictionary gene:uniprot_ac\nground_truth = dict(zip(unique_genes,map(ut.query_uniprot, unique_genes)))\n\n#Assign the uniprot_ac according to the gene\nbiogrid[\"interactor_1_uniprot\"] = biogrid[\"gene_1\"].map(ground_truth)\nbiogrid[\"interactor_2_uniprot\"] = biogrid[\"gene_2\"].map(ground_truth)\n\n# Retrieving uniprots where there are more than 2 occurrencies and correcting them manually after a visit at Uniprot website\nac1_unknown = biogrid[\"gene_1\"].loc[(biogrid[\"interactor_1_uniprot\"].apply(type) == list) & (biogrid[\"interactor_1_uniprot\"].apply(len) > 1)].unique()\nac2_unknown = biogrid[\"gene_2\"].loc[(biogrid[\"interactor_2_uniprot\"].apply(type) == list) & (biogrid[\"interactor_2_uniprot\"].apply(len) > 1)].unique()\n\nac_unknown = list(np.union1d(ac1_unknown,ac2_unknown))\n\nground_truth[\"LAP2\"] = \"Q96RT1\"\nground_truth[\"MCM2\"] = \"P49736\"\nground_truth[\"MTF1\"] = \"Q14872\"\nground_truth[\"PRR3\"] = \"P79522\"\nground_truth[\"SLP1\"] = \"Q9UBI4\"\nground_truth[\"SP1\"] = ground_truth[\"Sp1\"] = \"P08047\"\n\n#Reassign the uniprot_ac according to the gene\nbiogrid[\"interactor_1_uniprot\"] = biogrid[\"gene_1\"].map(ground_truth)\nbiogrid[\"interactor_2_uniprot\"] = biogrid[\"gene_2\"].map(ground_truth)\n\n# Deleting interactions where no Uniprot AC were found, since they involve genes that are not realted to humans.\nbiogrid.drop(biogrid[(biogrid[\"interactor_1_uniprot\"].apply(type) == list) | (biogrid[\"interactor_2_uniprot\"].apply(type) == list)].index, inplace = True)\n\n# Adding DB information\nbiogrid[\"database\"] = [\"BioGrid\" for _ in range(biogrid.shape[0])]\n\nbiogrid.to_csv('../part_3/results/biogrid.tsv', sep='\\t', index=False)\n\n\n### Preprocessing IID\n\n#Since Uniprot AC were reported by the website of IID, here we add just the information of the DB\niid = pd.read_csv(\"../part_3/iid.txt\", sep = \"\\t\", usecols = [\"Query Symbol\", \"Partner Symbol\", \"Query UniProt\", \"Partner UniProt\"])\niid = iid[[\"Query Symbol\", \"Partner Symbol\", \"Query UniProt\", \"Partner UniProt\"]]\niid[\"database\"] = [\"IID\" for _ in range(iid.shape[0])]\niid.drop_duplicates(inplace = True)\n\n\n\n# Uniforming column names\nbiogrid.columns = iid.columns = [\"interactor_1\", \"interactor_2\", \"interactor_1_uniprot\", \"interactor_2_uniprot\", \"database\"]\n\n\n\n### Creation of tables\n\n### Seed Genes Interactome\n\nsgi_bio = biogrid.loc[biogrid[\"interactor_1\"].isin(genes) & biogrid[\"interactor_2\"].isin(genes)]\nsgi_iid = iid.loc[iid[\"interactor_1\"].isin(genes) & iid[\"interactor_2\"].isin(genes)]\n\nsgi = pd.concat([sgi_bio, sgi_iid])\n\nsgi.to_csv(\"results/sgi.tsv\", sep = \"\\t\", index = False)\n\n\n### Union Interactome\n\nui_bio = biogrid.loc[biogrid[\"interactor_1\"].isin(genes) | biogrid[\"interactor_2\"].isin(genes)]\nui_iid = iid.loc[iid[\"interactor_1\"].isin(genes) | iid[\"interactor_2\"].isin(genes)]\n\nui = pd.concat([ui_bio, ui_iid])\nui.to_csv(\"results/ui.tsv\", sep = \"\\t\", index = False)\n\n\n### Intersection Interactome\n\nbiogrid[\"set_genes\"] = biogrid.apply(lambda row: frozenset([row.interactor_1, row.interactor_2]), axis=1)\niid[\"set_genes\"] = iid.apply(lambda row: frozenset([row.interactor_1, row.interactor_2]), axis=1)\n\nii = pd.merge(biogrid, iid, how='inner', on=[\"set_genes\"])\nii = ii.iloc[:,:4]\nii.columns = [\"interactor_1\", \"interactor_2\", \"interactor_1_uniprot\", \"interactor_2_uniprot\"]\n\nii = ii.loc[ii[\"interactor_1\"].isin(genes) | ii[\"interactor_2\"].isin(genes)]\n\nii.to_csv(\"results/ii.tsv\", sep = \"\\t\", index = False)\n", "id": "11512680", "language": "Python", "matching_score": 4.360305309295654, "max_stars_count": 0, "path": "HW1/part_4/create_tables.py" }, { "content": "import pandas as pd\n\n# Initialize the gene list\nwith open(\"../seed_genes.txt\",\"r\") as f:\n genes = [gene.rstrip() for gene in f.readlines()]\n\n\n### BioGRID\n\n# Load the entire Biogrid Dataset\nbiogrid_all = pd.read_csv(\"biogrid_all.txt\", sep = \"\\t\", usecols = [\"Official Symbol Interactor A\", \"Official Symbol Interactor B\"])\n\n#Changing column names in a more useful way\nbiogrid_all.columns = ['gene_1', 'gene_2']\n\n#Selecting interactions where seed genes are involved\nsg_interactions = biogrid_all[biogrid_all['gene_1'].isin(genes) | biogrid_all['gene_2'].isin(genes)]\n\n#Selecting genes that are connected at least to one seed genes\nnew_genes = set(sg_interactions['gene_1']).union(set(sg_interactions['gene_2'])).difference(genes)\n\n#Selecting interactions among the genes that has at least an interaction with the seed genes\nother_interactions = biogrid_all[biogrid_all['gene_1'].isin(new_genes) & biogrid_all['gene_2'].isin(new_genes)]\n\n#Merging the two dataframes and droppin duplicates\nbiogrid = pd.concat([sg_interactions, other_interactions])\nbiogrid.drop_duplicates(inplace = True)\nbiogrid.to_csv('results/biogrid.tsv', sep='\\t', index=False)\n\n\n### IID\n\n#IID dataframes were obtained directly by the website. iid represents the one where the query was made with the names of the genes, iid_ac with\n#Uniprot accession number.\n\n#Below we just drop duplicates for these 2 datasets.\n\niid = pd.read_csv(\"iid.txt\", sep = \"\\t\", usecols = [\"Query Symbol\", \"Partner Symbol\", \"Query UniProt\", \"Partner UniProt\"])\niid.drop_duplicates(inplace = True)\niid.to_csv(\"results/iid.tsv\", sep = \"\\t\", index = False, columns = [\"Query Symbol\", \"Partner Symbol\"])\niid.to_csv(\"iid.txt\", sep = \"\\t\", index = False)\n\n\niid_ac = pd.read_csv(\"iid_ac.txt\", sep = \"\\t\", usecols = [\"Query Symbol\", \"Partner Symbol\", \"Query UniProt\", \"Partner UniProt\"])\niid_ac.drop_duplicates(inplace = True)\niid_ac.to_csv(\"results/iid_ac.tsv\", sep = \"\\t\", index = False, columns = [\"Query Symbol\", \"Partner Symbol\"])\niid_ac.to_csv(\"iid_ac.txt\", sep = \"\\t\", index = False)\n", "id": "4410328", "language": "Python", "matching_score": 2.098590135574341, "max_stars_count": 0, "path": "HW1/part_3/create_interactomes.py" }, { "content": "import pandas as pd\n\n#Seed Genes List\n\nsg = pd.read_csv(\"../part_2/results/sg_info.tsv\", sep = \"\\t\")\n\nsg_genes = list(set(sg['uniprot_ac']))\n\nwith open(\"sg_genes.txt\",\"w\") as f:\n for gene in sg_genes:\n f.write(gene+\"\\n\")\n f.close()\n\n\n# Union Interactome List\n\nui = pd.read_csv(\"../part_4/results/ui.tsv\", sep = \"\\t\")\n\nui_genes = list(set(ui['interactor_1_uniprot']).union(set(ui['interactor_2_uniprot'])))\n\nwith open(\"ui_genes.txt\",\"w\") as f:\n for gene in ui_genes:\n f.write(gene+\"\\n\")\n f.close()\n \n# Intersection interactome list\n\nii = pd.read_csv(\"../part_4/results/ii.tsv\", sep = \"\\t\")\n\nii_genes = list(set(ii['interactor_1_uniprot']).union(set(ii['interactor_2_uniprot'])))\n\nwith open(\"ii_genes.txt\",\"w\") as f:\n for gene in ii_genes:\n f.write(gene+\"\\n\")\n f.close()", "id": "8401154", "language": "Python", "matching_score": 0.6909782886505127, "max_stars_count": 0, "path": "HW1/part_5/get_lists.py" }, { "content": "import utils as ut\n\n# Initialize the gene list\nwith open(\"../seed_genes.txt\",\"r\") as f:\n genes = [gene.rstrip() for gene in f.readlines()]\n\n\n#### Official Gene Symbol - HGNC \nwith open('dataframes/hgnc.tsv', 'w') as results:\n results.write(\"gene\\tapproved_symbol\\n\")\n results.writelines(\"%s\\t%s\\n\" % line for line in zip(genes, map(ut.query_hgnc, genes))) \n\n\n\n#### Uniprot AC + Protein Name\nwith open(\"dataframes/uniprot.tsv\",\"w\") as results:\n results.write(\"gene\\tuniprot_ac\\tprotein_name\\tfunction\\n\")\n results.writelines(\"%s\\t%s\\n\" % line for line in zip(genes,map(ut.query_uniprot, genes))) \n\n\n\n#### Entrez Gene ID\n\n# Query to NCBI site\nncbi_results = dict(zip(genes,map(ut.query_ncbi, genes)))\n\n# Looking at results, we note that some queries retrieve more than a single ID, since queries are not based on an exact match on gene name,\n# but also on the aliases, thus we need to check manually on the site which IDs correspond to the given gene.\n\n# Retrieving discrepancies\ndiscrepancies = [k for k,v in ncbi_results.items() if len(v) != 1]\n\n# After a visit to NCBI site, we can correct this results!\n\nncbi_results[\"PCDH11Y\"] = [\"83259\"]\nncbi_results[\"RBMY1A1\"] = [\"5940\"]\nncbi_results[\"RBMY1D\"] = [\"378949\"]\nncbi_results[\"TSPY1\"] = [\"7258\"]\nncbi_results[\"TSPY10\"] = [\"100289087\"]\nncbi_results[\"TSPY3\"] = [\"728137\"]\nncbi_results[\"TSPY1\"] = [\"728403\"]\n\nwith open(\"dataframes/ncbi.tsv\",\"w\") as results:\n results.write(\"gene\\tid\\n\")\n results.writelines(\"%s\\t%s\\n\" % line for line in {k:v[0] for k,v in ncbi_results.items()}.items())\n\n \n### Merging all the dataframes\ndf_name_list = ['dataframes/uniprot.tsv', 'dataframes/hgnc.tsv', 'dataframes/ncbi.tsv']\ndf_final = ut.merge_dfs(df_name_list)\n\n\n\n\n\n\n\n\n\n\n\n", "id": "8469517", "language": "Python", "matching_score": 2.9754092693328857, "max_stars_count": 0, "path": "HW1/part_2/get_info.py" }, { "content": "import bioservices.uniprot as up\nfrom bioservices.hgnc import HGNC\nfrom Bio import Entrez\nfrom functools import reduce\nimport pandas as pd\n\n# Return the results of a query in HGCN\ndef query_hgnc(gene):\n h = HGNC()\n return h.search(gene)['response']['docs'][0]['symbol']\n\n\n# Return the results of a query of UniProt DB\ndef query_uniprot(gene):\n u = up.UniProt()\n return u.search(\"gene_exact:\"+gene+\"+AND+reviewed:yes+AND+organism:9606\",\n columns = \"id, protein names, comment(FUNCTION)\").split(\"\\n\")[1]\n \n\n# Return the results of a query of NCBI DB\ndef query_ncbi(gene):\n \n Entrez.email = \"<EMAIL>\"\n \n handle = Entrez.esearch(db= \"gene\", term = \"(\"+gene+\"[Gene Name]) AND \\\"Homo sapiens\\\"[porgn] AND (alive[prop])\")\n record = Entrez.read(handle)\n return record[\"IdList\"]\n \n\n# Merge the DFs: read data from a name list and merge the DFs\ndef merge_dfs(dfs_name_list):\n dfs_list = []\n for filename in dfs_name_list:\n df = pd.read_csv(filename, sep='\\t', index_col=False)\n dfs_list.append(df)\n \n df_final = reduce(lambda left,right: pd.merge(left,right,on='gene'), dfs_list)\n df_final.to_csv('results/sg_info.tsv', sep='\\t', index = False)\n\n\n\n\n\n", "id": "9461435", "language": "Python", "matching_score": 3.2746055126190186, "max_stars_count": 0, "path": "HW1/part_2/utils.py" }, { "content": "import bioservices.uniprot as up\n\ndef query_uniprot(gene): \n u = up.UniProt()\n res = u.search(\"gene_exact:\"+gene+\"+AND+reviewed:yes+AND+organism:9606\", \n columns = \"id\").split(\"\\n\")\n \n if len(res[1:-1]) == 1:\n return res[1]\n else:\n return res[1:-1]\n", "id": "1261814", "language": "Python", "matching_score": 2.1626105308532715, "max_stars_count": 0, "path": "HW1/part_4/utils.py" }, { "content": "import bioservices.uniprot as up\nimport pandas as pd\nimport os\n\n# Return the results of a query of UniProt DB\ndef query_uniprot(gene):\n u = up.UniProt()\n return u.search(\"gene_exact:\"+gene+\"+AND+reviewed:yes+AND+organism:9606\",\n columns = \"id\").split(\"\\n\")[1]\n \n \ndef translate(mod):\n with open(\"putative_disease_modules/\"+mod, \"r\") as f:\n genes = f.readlines()\n f.close()\n \n with open(\"putative_disease_modules/\"+mod[:-4]+\"_uniprot.txt\", \"w\") as f:\n f.writelines(\"%s\\n\" % l for l in map(query_uniprot, genes))\n \n\ndef mod_files(folder):\n return os.listdir(folder)\n\n\ndef top_ten(file):\n \n data = pd.read_csv(\"results/innate/\" + file, sep = \"\\t\")\n \n data.sort_values(by = [\"Pathway p-value (corrected)\"], inplace = True)\n \n data.iloc[:10,:].to_excel(\"results/\"+ file[:-4] +\".xlsx\")", "id": "10581413", "language": "Python", "matching_score": 3.07747220993042, "max_stars_count": 0, "path": "HW1.2/part_3/utils.py" }, { "content": "import utils as ut\n\nfor file in ut.mod_files(\"putative_disease_modules/\"):\n ut.translate(file)\n\n\nfor file in ut.mod_files(\"results/innate/\"):\n ut.top_ten(file)", "id": "3919119", "language": "Python", "matching_score": 0.992953896522522, "max_stars_count": 0, "path": "HW1.2/part_3/main.py" }, { "content": "import utils as ut\n\nut.create_network()\n\nut.translate(\"seed_genes\", \"sg_entrez\")\n\nut.join_files(\"sg_entrez\", \"diamond\", \"intersection_list\")\n\nut.top_ten(\"go_ora\")\nut.top_ten(\"path_ora\")", "id": "1395301", "language": "Python", "matching_score": 1.014219880104065, "max_stars_count": 0, "path": "HW1.2/part_4/main.py" }, { "content": "#! /usr/bin/env python\n\n\n\"\"\"\n# -----------------------------------------------------------------------\n# encoding: utf-8\n\n# DIAMOnD.py\n# <NAME>, <NAME>\n# Last Modified: 2014-12-05\n\n# This code runs the DIAMOnD algorithm as described in\n# \n# A DIseAse MOdule Detection (DIAMOnD) Algorithm derived from a\n# systematic analysis of connectivity patterns of disease proteins in\n# the Human Interactome\n#\n# by <NAME>, <NAME> & <NAME>\n# \n# \n# -----------------------------------------------------------------------\n\"\"\"\n\nimport time\nimport cPickle\nimport networkx as nx\nimport numpy as np\nimport copy\nimport scipy.stats\nfrom collections import defaultdict\nimport csv\nimport sys\n\n\n# =============================================================================\ndef print_usage():\n \n print ' '\n print ' usage: ./DIAMOnD network_file seed_file n alpha(optional) outfile_name (optional)'\n print ' -----------------------------------------------------------------'\n print ' network_file : The edgelist must be provided as any delimiter-separated'\n print ' table. Make sure the delimiter does not exit in gene IDs' \n print ' and is consistent across the file.' \n print ' The first two columns of the table will be'\n print ' interpreted as an interaction gene1 <==> gene2'\n print ' seed_file : table containing the seed genes (if table contains'\n print ' more than one column they must be tab-separated;'\n print ' the first column will be used only)'\n print ' n : desired number of DIAMOnD genes, 200 is a reasonable'\n print ' starting point.'\n print ' alpha : an integer representing weight of the seeds,default'\n print ' value is set to 1'\n print ' outfile_name : results will be saved under this file name'\n print ' by default the outfile_name is set to \"first_n_added_nodes_weight_alpha.txt\"'\n print ' '\n\n\n# =============================================================================\ndef check_input_style(input_list):\n try:\n network_edgelist_file = input_list[1]\n seeds_file = input_list[2]\n max_number_of_added_nodes = int(input_list[3])\n # if no input is given, print out a usage message and exit\n except:\n print_usage()\n sys.exit(0)\n return \n \n alpha = 1\n outfile_name = 'first_%d_added_nodes_weight_%d.txt'%(max_number_of_added_nodes,alpha)\n\n if len(input_list)==5:\n try:\n alpha = int(input_list[4])\n outfile_name = 'first_%d_added_weight_%d.txt'%(max_number_of_added_nodes,alpha)\n except:\n outfile_name = input_list[4] \n \n if len(input_list)==6: \n try:\n alpha = int(input_list[4])\n outfile_name = input_list[5]\n except:\n print_usage() \n sys.exit(0)\n return\n return network_edgelist_file,seeds_file,max_number_of_added_nodes,alpha,outfile_name\n\n# =============================================================================\ndef read_input(network_file,seed_file):\n \"\"\"\n Reads the network and the list of seed genes from external files.\n\n * The edgelist must be provided as a tab-separated table. The\n first two columns of the table will be interpreted as an\n interaction gene1 <==> gene2\n\n * The seed genes mus be provided as a table. If the table has more\n than one column, they must be tab-separated. The first column will\n be used only.\n\n * Lines that start with '#' will be ignored in both cases\n \"\"\"\n\n sniffer = csv.Sniffer()\n line_delimiter = None\n for line in open(network_file,'r'):\n if line[0]=='#':\n continue\n else:\n dialect = sniffer.sniff(line)\n line_delimiter = dialect.delimiter\n break\n if line_delimiter == None:\n print 'network_file format not correct'\n sys.exit(0)\n\n\n # read the network:\n G = nx.Graph()\n for line in open(network_file,'r'):\n # lines starting with '#' will be ignored\n if line[0]=='#':\n continue\n # The first two columns in the line will be interpreted as an\n # interaction gene1 <=> gene2\n #line_data = line.strip().split('\\t')\n line_data = line.strip().split(line_delimiter)\n node1 = line_data[0]\n node2 = line_data[1]\n G.add_edge(node1,node2)\n\n # read the seed genes:\n seed_genes = set()\n for line in open(seed_file,'r'):\n # lines starting with '#' will be ignored\n if line[0]=='#':\n continue\n # the first column in the line will be interpreted as a seed\n # gene:\n line_data = line.strip().split('\\t')\n seed_gene = line_data[0]\n seed_genes.add(seed_gene)\n\n return G,seed_genes\n\n\n# ================================================================================\ndef compute_all_gamma_ln(N):\n \"\"\"\n precomputes all logarithmic gammas \n \"\"\"\n gamma_ln = {}\n for i in range(1,N+1):\n gamma_ln[i] = scipy.special.gammaln(i)\n\n return gamma_ln\n\n# =============================================================================\ndef logchoose(n, k, gamma_ln):\n if n-k+1 <= 0:\n return scipy.infty\n lgn1 = gamma_ln[n+1] \n lgk1 = gamma_ln[k+1] \n lgnk1 = gamma_ln[n-k+1] \n return lgn1 - [lgnk1 + lgk1] \n\n# =============================================================================\ndef gauss_hypergeom(x, r, b, n, gamma_ln): \n return np.exp(logchoose(r, x, gamma_ln) +\n logchoose(b, n-x, gamma_ln) -\n logchoose(r+b, n, gamma_ln)) \n \n# =============================================================================\ndef pvalue(kb, k, N, s, gamma_ln): \n \"\"\" \n ------------------------------------------------------------------- \n Computes the p-value for a node that has kb out of k links to \n seeds, given that there's a total of s sees in a network of N nodes. \n \n p-val = \\sum_{n=kb}^{k} HypergemetricPDF(n,k,N,s) \n ------------------------------------------------------------------- \n \"\"\" \n p = 0.0 \n for n in range(kb,k+1): \n if n > s: \n break \n prob = gauss_hypergeom(n, s, N-s, k, gamma_ln) \n # print prob \n p += prob \n \n if p > 1: \n return 1 \n else: \n return p \n\n# =============================================================================\ndef get_neighbors_and_degrees(G):\n\n neighbors,all_degrees = {},{}\n for node in G.nodes():\n nn = set(G.neighbors(node))\n neighbors[node] = nn\n all_degrees[node] = G.degree(node)\n\n return neighbors,all_degrees\n\n# =============================================================================\n# Reduce number of calculations\n# =============================================================================\ndef reduce_not_in_cluster_nodes(all_degrees,neighbors,G,not_in_cluster,cluster_nodes,alpha): \n reduced_not_in_cluster = {} \n kb2k = defaultdict(dict) \n for node in not_in_cluster: \n \n k = all_degrees[node] \n kb = 0 \n # Going through all neighbors and counting the number of module neighbors \n for neighbor in neighbors[node]: \n if neighbor in cluster_nodes: \n kb += 1\n \n #adding wights to the the edges connected to seeds\n k += (alpha-1)*kb\n kb += (alpha-1)*kb\n kb2k[kb][k] =node\n\n # Going to choose the node with largest kb, given k \n k2kb = defaultdict(dict) \n for kb,k2node in kb2k.iteritems(): \n min_k = min(k2node.keys()) \n node = k2node[min_k] \n k2kb[min_k][kb] = node \n \n for k,kb2node in k2kb.iteritems(): \n max_kb = max(kb2node.keys()) \n node = kb2node[max_kb] \n reduced_not_in_cluster[node] =(max_kb,k) \n \n return reduced_not_in_cluster \n\n#======================================================================================\n# C O R E A L G <NAME>\n#======================================================================================\ndef diamond_iteration_of_first_X_nodes(G,S,X,alpha):\n \n \"\"\"\n\n Parameters: \n ---------- \n - G: graph\n - S: seeds \n - X: the number of iterations, i.e only the first X gened will be\n pulled in\n - alpha: seeds weight\n\n Returns: \n --------\n \n - added_nodes: ordered list of nodes in the order by which they\n are agglomerated. Each entry has 4 info:\n\n * name : dito\n * k : degree of the node\n * kb : number of +1 neighbors\n * p : p-value at agglomeration\n\n \"\"\"\n \n N = G.number_of_nodes()\n\n added_nodes = []\n\n\n # ------------------------------------------------------------------\n # Setting up dictionaries with all neighbor lists\n # and all degrees\n # ------------------------------------------------------------------\n neighbors,all_degrees = get_neighbors_and_degrees(G)\n\n # ------------------------------------------------------------------\n # Setting up initial set of nodes in cluster\n # ------------------------------------------------------------------\n \n cluster_nodes = set(S)\n not_in_cluster = set()\n s0 = len(cluster_nodes)\n \n s0 += (alpha-1)*s0\n N +=(alpha-1)*s0\n \n # ------------------------------------------------------------------\n # precompute the logarithmic gamma functions\n # ------------------------------------------------------------------\n gamma_ln = compute_all_gamma_ln(N+1)\n \n # ------------------------------------------------------------------\n # Setting initial set of nodes not in cluster\n # ------------------------------------------------------------------\n for node in cluster_nodes:\n not_in_cluster |= neighbors[node]\n not_in_cluster -= cluster_nodes\n\n\n # ------------------------------------------------------------------\n #\n # M A I N L O O P \n #\n # ------------------------------------------------------------------\n\n all_p = {}\n\n while len(added_nodes) < X: \n\n # ------------------------------------------------------------------\n #\n # Going through all nodes that are not in the cluster yet and\n # record k, kb and p \n #\n # ------------------------------------------------------------------\n \n info = {}\n \n pmin = 10\n next_node = 'nix'\n reduced_not_in_cluster = reduce_not_in_cluster_nodes(all_degrees,\n neighbors,G,\n not_in_cluster,\n cluster_nodes,alpha)\n \n for node,kbk in reduced_not_in_cluster.iteritems():\n # Getting the p-value of this kb,k\n # combination and save it in all_p, so computing it only once!\n kb,k = kbk\n try:\n p = all_p[(k,kb,s0)]\n except KeyError:\n p = pvalue(kb, k, N, s0, gamma_ln) \n all_p[(k,kb,s0)] = p\n \n # recording the node with smallest p-value\n if p < pmin:\n pmin = p\n next_node = node\n \n info[node] = (k,kb,p)\n\n # ---------------------------------------------------------------------\n # Adding node with smallest p-value to the list of aaglomerated nodes\n # ---------------------------------------------------------------------\n added_nodes.append((next_node,\n info[next_node][0],\n info[next_node][1],\n info[next_node][2]))\n\n # Updating the list of cluster nodes and s0\n cluster_nodes.add(next_node)\n s0 = len(cluster_nodes)\n not_in_cluster |= ( neighbors[next_node] - cluster_nodes )\n not_in_cluster.remove(next_node)\n\n return added_nodes\n\n# ===========================================================================\n#\n# M A I N D I A M O n D A L G O R I T H M\n# \n# ===========================================================================\ndef DIAMOnD(G_original,seed_genes,max_number_of_added_nodes,alpha,outfile = None):\n\n \"\"\"\n Runs the DIAMOnD algorithm\n\n Input:\n ------\n - G_original :\n The network\n - seed_genes : \n a set of seed genes \n - max_number_of_added_nodes:\n after how many added nodes should the algorithm stop\n - alpha:\n given weight to the sees\n - outfile:\n filename for the output generates by the algorithm,\n if not given the program will name it 'first_x_added_nodes.txt'\n\n Returns:\n --------\n - added_nodes: A list with 4 entries at each element:\n * name : name of the node\n * k : degree of the node\n * kb : number of neighbors that are part of the module (at agglomeration)\n * p : connectivity p-value at agglomeration\n - \n \"\"\"\n \n # 1. throwing away the seed genes that are not in the network\n all_genes_in_network = set(G_original.nodes())\n seed_genes = set(seed_genes)\n disease_genes = seed_genes & all_genes_in_network\n\n if len(disease_genes) != len(seed_genes):\n print \"DIAMOnD(): ignoring %s of %s seed genes that are not in the network\" %(\n len(seed_genes - all_genes_in_network), len(seed_genes))\n \n\n\n \n # 2. agglomeration algorithm. \n added_nodes = diamond_iteration_of_first_X_nodes(G_original,\n disease_genes,\n max_number_of_added_nodes,alpha)\n # 3. saving the results \n with open(outfile,'w') as fout:\n print>>fout,'\\t'.join(['#rank','DIAMOnD_node'])\n rank = 0\n for DIAMOnD_node_info in added_nodes:\n rank += 1\n DIAMOnD_node = DIAMOnD_node_info[0]\n p = float(DIAMOnD_node_info[3])\n print>>fout,'\\t'.join(map(str,([rank,DIAMOnD_node])))\n\n return added_nodes\n\n\n# ===========================================================================\n#\n# \"Hey Ho, Let's go!\" -- The Ramones (1976)\n#\n# ===========================================================================\n\n\nif __name__ == '__main__':\n\n\n # -----------------------------------------------------\n # Checking for input from the command line:\n # -----------------------------------------------------\n #\n # [1] file providing the network in the form of an edgelist\n # (tab-separated table, columns 1 & 2 will be used)\n #\n # [2] file with the seed genes (if table contains more than one\n # column they must be tab-separated; the first column will be\n # used only)\n #\n # [3] number of desired iterations\n #\n # [4] (optional) seeds weight (integer), default value is 1 \n # [5] (optional) name for the results file \n\n #check if input style is correct\n input_list = sys.argv\n network_edgelist_file,seeds_file,max_number_of_added_nodes,alpha,outfile_name= check_input_style(input_list)\n \n # read the network and the seed genes:\n G_original,seed_genes = read_input(network_edgelist_file,seeds_file)\n \n # run DIAMOnD\n added_nodes = DIAMOnD(G_original,\n seed_genes,\n max_number_of_added_nodes,alpha,\n outfile=outfile_name)\n \n print \"\\n results have been saved to '%s' \\n\" %outfile_name\n\n\n\n", "id": "9283209", "language": "Python", "matching_score": 2.606104850769043, "max_stars_count": 0, "path": "HW1.2/part_4/DIAMOnD/DIAMOnD.py" }, { "content": "import pandas as pd\nfrom Bio import Entrez\n\n\ndef create_network():\n \n data = pd.read_csv(\"data/biogrid_all.txt\", sep = \"\\t\", usecols = [\"Entrez Gene Interactor A\", \"Entrez Gene Interactor B\"])\n \n data.to_csv(\"DIAMOnD/biogrid.txt\", index = False, header = False)\n\n\n# Return the results of a query of NCBI DB\ndef query_ncbi(gene):\n \n Entrez.email = \"<EMAIL>\"\n \n handle = Entrez.esearch(db= \"gene\", term = \"(\"+gene+\"[Gene Name]) AND \\\"Homo sapiens\\\"[porgn] AND (alive[prop])\")\n record = Entrez.read(handle)\n return int(record[\"IdList\"][0])\n\ndef translate(file, output_file):\n \n with open(\"data/\"+file+\".txt\", \"r\") as f:\n genes = [gene.rstrip() for gene in f.readlines()]\n \n with open(\"DIAMOnD/\"+output_file+\".txt\",\"w\") as g:\n g.writelines(\"%s\\n\" % line for line in map(query_ncbi, genes))\n \n \ndef join_files(file_1, file_2, output_file):\n with open(\"DIAMOnD/\"+file_1+\".txt\", \"r\") as f1:\n genes_1 = [gene.rstrip() for gene in f1.readlines()]\n \n file_2 = pd.read_csv(\"DIAMOnD/\"+file_2+\".txt\", sep = \"\\t\")\n genes_2 = list(file_2[\"DIAMOnD_node\"])\n \n \n with open(\"results/\"+output_file+\".txt\",\"w\") as g:\n g.writelines(\"%s\\n\" % line for line in list(set(genes_1+genes_2)))\n \n \ndef top_ten(file):\n \n data = pd.read_csv(\"results/\" + file + \".txt\", sep = \"\\t\")\n \n data.sort_values(by = [\"Pathway p-value (corrected)\"], inplace = True)\n \n data.iloc[:10,:].to_excel(\"results/\"+ file +\".xlsx\")\n \n ", "id": "4804105", "language": "Python", "matching_score": 3.0085511207580566, "max_stars_count": 0, "path": "HW1.2/part_4/utils.py" }, { "content": "import pandas as pd\n\ndef rank_dataset_go(file):\n data = pd.read_csv(file, sep = \"\\t\")\n data.sort_values([\"Pathway p-value (corrected)\"], inplace = True, ascending = False)\n \n cl= data[data[\"Source Name\"] == \"cellular component\"].iloc[:10]\n mf = data[data[\"Source Name\"] == \"molecular function\"].iloc[:10]\n bp = data[data[\"Source Name\"] == \"biological process\"].iloc[:10]\n \n return cl, mf, bp\n \n \ndef rank_dataset_path(file):\n data = pd.read_csv(file, sep = \"\\t\", encoding='latin-1')\n data.sort_values([\"Pathway p-value (corrected)\"], inplace = True, ascending = False)\n \n return data.iloc[:10]", "id": "11565796", "language": "Python", "matching_score": 1.8511714935302734, "max_stars_count": 0, "path": "HW1/part_5/utils.py" }, { "content": "import utils as ut\n\nsg_cl, sg_mf, sg_bp = ut.rank_dataset_go(\"innate/gene_ontology/sg_ora.txt\")\nui_cl, ui_mf, ui_bp = ut.rank_dataset_go(\"innate/gene_ontology/ui_ora.txt\")\nii_cl, ii_mf, ii_bp = ut.rank_dataset_go(\"innate/gene_ontology/ii_ora.txt\")\n\nsg_cl.to_csv(\"results/gene_ontology/sg_cl.tsv\", sep = \"\\t\", index = False)\nsg_mf.to_csv(\"results/gene_ontology/sg_mf.tsv\", sep = \"\\t\", index = False)\nsg_bp.to_csv(\"results/gene_ontology/sg_bp.tsv\", sep = \"\\t\", index = False)\n\nui_cl.to_csv(\"results/gene_ontology/ui_cl.tsv\", sep = \"\\t\", index = False)\nui_mf.to_csv(\"results/gene_ontology/ui_mf.tsv\", sep = \"\\t\", index = False)\nui_bp.to_csv(\"results/gene_ontology/ui_bp.tsv\", sep = \"\\t\", index = False)\n\nii_cl.to_csv(\"results/gene_ontology/ii_cl.tsv\", sep = \"\\t\", index = False)\nii_mf.to_csv(\"results/gene_ontology/ii_mf.tsv\", sep = \"\\t\", index = False)\nii_bp.to_csv(\"results/gene_ontology/ii_bp.tsv\", sep = \"\\t\", index = False)\n\n\nui_path = ut.rank_dataset_path(\"innate/pathway/ui_ora.txt\")\nii_path = ut.rank_dataset_path(\"innate/pathway/ii_ora.txt\")\n\nui_path.to_csv(\"results/pathway/ui_path.tsv\", sep = \"\\t\", index = False)\nii_path.to_csv(\"results/pathway/ii_path.tsv\", sep = \"\\t\", index = False)", "id": "4531075", "language": "Python", "matching_score": 1.5412452220916748, "max_stars_count": 0, "path": "HW1/part_5/get_rankings.py" } ]
2.162611
sunnykriplani
[ { "content": "from flask import Flask, request, redirect,jsonify\nimport requests\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n# Tyndall specific Client and Secret ID\nCLIENT_ID = \"3f034891bb6540cdf2bbbc19be0bdb7fb9d11800691a0043534fe7e7be995274\"\nCUSTOMER_SECRET = \"<KEY>\"\nSTATE = \"dev\"\nACCOUNT_URL = \"https://account.withings.com\"\nWBSAPI_URL = \"https://wbsapi.withings.net\"\n\n# Azure hosting specific URL\nCALLBACK_URI = \"http://wsnhol.azurewebsites.net/get_token\"\nUSER = \"sunny\"\nPASS = \"<PASSWORD>\"\nacc_list = []\n\n\n\n# Function to check if requested user is available in \n# database\n@app.route(\"/login\")\ndef login():\n usrname = request.args.get('user')\n password = request.args.get('pass')\n print(usrname)\n print(password)\n print(usrname + password)\n if (usrname == USER) and (password == <PASSWORD>):\n return \"login successful\"\n else:\n return \"login failed\"\n\n\n# Function to take permission from user for OAUTH and redirects\n# to login page where user will approve the permissions this will \n# post the access code on success\n@app.route(\"/\")\ndef get_code():\n\n payload = {'response_type': 'code', # Specific to the Api\n 'client_id': CLIENT_ID,\n 'state': STATE,\n # permissions to be taken from user\n 'scope': 'user.info,user.metrics,user.activity', \n 'redirect_uri': CALLBACK_URI, # URL of this app\n }\n\n r_auth = requests.get(f'{ACCOUNT_URL}/oauth2_user/authorize2',\n params=payload)\n print(r_auth.url)\n return redirect(r_auth.url)\n\n\n# Server POST Request URL which will allow Withings to send\n# access code to the server and access code can be used to \n# fetch Access token/ Refresh Token\n@app.route(\"/get_token\")\ndef get_token():\n\n\n code = request.args.get('code')\n state = request.args.get('state')\n print(code)\n print(state)\n payload = {'grant_type': 'authorization_code',\n 'client_id': CLIENT_ID,\n 'client_secret': CUSTOMER_SECRET,\n 'code': code,\n 'redirect_uri': CALLBACK_URI\n }\n\n r_token = requests.post(f'{ACCOUNT_URL}/oauth2/token',\n data=payload).json()\n access_token = r_token.get('access_token', '')\n acc_list.append(access_token)\n print(access_token)\n # GET Some info with this token\n headers = {'Authorization': 'Bearer ' + access_token}\n payload = {'action': 'getdevice'}\n \n # Javascript to return to the user which will allow\n # user to jump to other activity upon success\n \n js = \"<title>Fetch Data</title> \\\n <script language=\\\"javascript\\\"> \\\n function sendtoAndroid() { \\\n var str = \\\"\" + str(access_token) + \"\\\";\\\n javascript_object.OpenActivity(str); } \\\n </script> \\\n <center> <h2>Successfully Captured the token</h2> \\\n <button onclick=\\\"sendtoAndroid()\\\">Fetch Data</button> \\\n </center> \"\n \n # List devices of returned user\n # r_getdevice = requests.get(f'{WBSAPI_URL}/v2/user',\n # headers=headers,\n # params=payload).json()\n \n \n return js\n\n\n# Additional function which allow user to request for Data\n@app.route(\"/get_data\")\ndef get_data():\n measure_type = request.args.get('type')\n print(acc_list)\n headers = {'Authorization': 'Bearer ' + acc_list[0]}\n payload = {'action': 'getmeas',\n 'meastypes': measure_type,\n 'category' : '1',\n }\n \n r_getdata = requests.get(f'{WBSAPI_URL}/measure',\n headers=headers,\n params=payload).json()\n return str(r_getdata)\n\n# main function to run the server \nif __name__ == '__main__':\n app.run()\n", "id": "1505286", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "app.py" } ]
0
ispamm
[ { "content": "\n#from evaluate_baseline_task2 import evaluate_model\nimport sys, os\nimport time\nimport json\nimport pickle\nimport argparse\nfrom tqdm import tqdm\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import StepLR \nimport torch.utils.data as utils\nfrom models.SELD_Model import SELD_Model\nfrom utility_functions import save_array_to_csv,gen_submission_list_task2, readFile\nfrom metrics import location_sensitive_detection\nimport shutil\nfrom torchinfo import summary\nimport wandb\nfrom Dcase21_metrics import *\n\n\ndef save_model(model, optimizer, state, path,scheduler=None):\n if isinstance(model, torch.nn.DataParallel):\n model = model.module # save state dict of wrapped module\n if len(os.path.dirname(path)) > 0 and not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n if scheduler is not None:\n torch.save({\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'state': state, # state of training loop (was 'step')\n 'scheduler_state_dict' : scheduler.state_dict(),\n 'random_states':(np.random.get_state(), torch.get_rng_state(), torch.cuda.get_rng_state() if torch.cuda.is_available() else None)\n }, path)\n else:\n torch.save({\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'state': state, # state of training loop (was 'step')\n 'random_states':(np.random.get_state(), torch.get_rng_state(), torch.cuda.get_rng_state() if torch.cuda.is_available() else None)\n }, path)\n\ndef load_model(model, optimizer, path, cuda, device,scheduler=None):\n\n if isinstance(model, torch.nn.DataParallel):\n model = model.module # load state dict of wrapped module\n if cuda:\n checkpoint = torch.load(path, map_location=device)\n else:\n checkpoint = torch.load(path, map_location='cpu')\n try:\n model.load_state_dict(checkpoint['model_state_dict'])\n except:\n # work-around for loading checkpoints where DataParallel was saved instead of inner module\n from collections import OrderedDict\n model_state_dict_fixed = OrderedDict()\n prefix = 'module.'\n for k, v in checkpoint['model_state_dict'].items():\n if k.startswith(prefix):\n k = k[len(prefix):]\n model_state_dict_fixed[k] = v\n model.load_state_dict(model_state_dict_fixed)\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n if scheduler is not None:\n scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\n if 'state' in checkpoint:\n state = checkpoint['state']\n else:\n # older checkpoints only store step, rest of state won't be there\n state = {'step': checkpoint['step']}\n \n np.random.set_state(checkpoint['random_states'][0])\n torch.set_rng_state(checkpoint['random_states'][1].cpu())\n if torch.cuda.is_available() and checkpoint['random_states'][2] is not None:\n torch.cuda.set_rng_state(checkpoint['random_states'][2].cpu())\n return state\n\n\ndef evaluate_test(model,device, dataloader,epoch=0,max_loc_value=2.,num_frames=600,spatial_threshold=2.):\n TP = 0\n FP = 0\n FN = 0\n count = 0\n output_classes=args.output_classes\n class_overlaps=args.class_overlaps\n\n model.eval()\n \n eval_metrics = SELDMetrics(nb_classes=output_classes, doa_threshold=args.Dcase21_metrics_DOA_threshold)\n \n with tqdm(total=len(dataloader) // 1) as pbar, torch.no_grad():\n for example_num, (x, target) in enumerate(dataloader):\n x = x.to(device)\n target = target.to(device)\n \n sed, doa = model(x)\n sed = sed.cpu().numpy().squeeze()\n doa = doa.cpu().numpy().squeeze()\n target = target.cpu().numpy().squeeze()\n #in the target matrices sed and doa are joint\n sed_target = target[:,:args.output_classes*args.class_overlaps]\n doa_target = target[:,args.output_classes*args.class_overlaps:]\n\n \n prediction,prediction_dict = gen_submission_list_task2(sed, doa,\n max_overlaps=class_overlaps,\n max_loc_value=max_loc_value)\n\n target,target_dict = gen_submission_list_task2(sed_target, doa_target,\n max_overlaps=class_overlaps,\n max_loc_value=max_loc_value)\n \n pred_labels =segment_labels(prediction_dict, num_frames)\n ref_labels =segment_labels(target_dict, num_frames)\n # Calculated scores\n eval_metrics.update_seld_scores(pred_labels, ref_labels)\n tp, fp, fn, _ = location_sensitive_detection(prediction, target, num_frames,\n spatial_threshold, False)\n TP += tp\n FP += fp\n FN += fn\n\n count += 1\n pbar.update(1)\n\n\n #compute total F score\n precision = TP / (TP + FP + sys.float_info.epsilon)\n recall = TP / (TP + FN + sys.float_info.epsilon)\n F_score = 2 * ((precision * recall) / (precision + recall + sys.float_info.epsilon))\n Nref=TP+FN\n Nsys=TP+FP\n ER_score = (max(Nref, Nsys) - TP) / (Nref + 0.0)\n \n ER_dcase21, F_dcase21, LE_dcase21, LR_dcase21 = eval_metrics.compute_seld_scores()\n\n SELD_dcase21 = np.mean([ER_dcase21,1 - F_dcase21, LE_dcase21/180,1 - LR_dcase21])\n SELD_L3DAS21_LRLE = np.mean([ER_score,1 - F_score, LE_dcase21/180,1 - LR_dcase21])\n CSL_score= np.mean([LE_dcase21/180,1 - LR_dcase21])\n LSD_score=np.mean([1-F_score,ER_score])\n test_results=[epoch,F_score,ER_score,precision,recall,TP,FP,FN,\n CSL_score,LSD_score,SELD_L3DAS21_LRLE,\n SELD_dcase21,ER_dcase21, F_dcase21, LE_dcase21, LR_dcase21]\n \n\n #visualize and save results\n print ('*******************************')\n print ('RESULTS')\n print ('TP: ' , TP)\n print ('FP: ' , FP)\n print ('FN: ' , FN)\n print ('******** SELD (F ER L3DAS21 - LE LR DCASE21) ***********')\n print ('Global SELD score: ', SELD_L3DAS21_LRLE)\n print ('LSD score: ', LSD_score)\n print ('CSL score: ', CSL_score)\n print ('F score: ', F_score)\n print ('ER score: ', ER_score)\n print ('LE: ', LE_dcase21)\n print ('LR: ', LR_dcase21)\n \n return test_results\n\ndef evaluate(model, device, criterion_sed, criterion_doa, dataloader):\n #compute loss without backprop\n model.eval()\n test_loss = 0.\n with tqdm(total=len(dataloader) // args.batch_size) as pbar, torch.no_grad():\n for example_num, (x, target) in enumerate(dataloader):\n target = target.to(device)\n x = x.to(device)\n t = time.time()\n # Compute loss for each instrument/model\n #sed, doa = model(x)\n loss = seld_loss(x, target, model, criterion_sed, criterion_doa)\n test_loss += (1. / float(example_num + 1)) * (loss - test_loss)\n pbar.set_description(\"Current loss: {:.4f}\".format(test_loss))\n pbar.update(1)\n return test_loss\n\n\ndef seld_loss(x, target, model, criterion_sed, criterion_doa):\n '''\n compute seld loss as weighted sum of sed (BCE) and doa (MSE) losses\n '''\n #divide labels into sed and doa (which are joint from the preprocessing)\n target_sed = target[:,:,:args.output_classes*args.class_overlaps]\n target_doa = target[:,:,args.output_classes*args.class_overlaps:]\n\n #compute loss\n sed, doa = model(x)\n \n sed = torch.flatten(sed, start_dim=1)\n doa = torch.flatten(doa, start_dim=1)\n target_sed = torch.flatten(target_sed, start_dim=1)\n target_doa = torch.flatten(target_doa, start_dim=1)\n loss_sed = criterion_sed(sed, target_sed) * args.sed_loss_weight\n loss_doa = criterion_doa(doa, target_doa) * args.doa_loss_weight\n \n return loss_sed + loss_doa\n\n\ndef main(args):\n\n if args.use_cuda:\n device = 'cuda:' + str(args.gpu_id)\n else:\n device = 'cpu'\n\n if args.fixed_seed:\n seed = 1\n np.random.seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n\n #LOAD DATASET\n print ('\\nLoading dataset')\n\n with open(args.training_predictors_path, 'rb') as f:\n training_predictors = pickle.load(f)\n with open(args.training_target_path, 'rb') as f:\n training_target = pickle.load(f)\n with open(args.validation_predictors_path, 'rb') as f:\n validation_predictors = pickle.load(f)\n with open(args.validation_target_path, 'rb') as f:\n validation_target = pickle.load(f)\n with open(args.test_predictors_path, 'rb') as f:\n test_predictors = pickle.load(f)\n with open(args.test_target_path, 'rb') as f:\n test_target = pickle.load(f)\n\n phase_string='_Phase' if args.phase else ''\n dataset_string='L3DAS21_'+str(args.n_mics)+'Mics_Magnidute'+phase_string+'_'+str(args.input_channels)+'Ch'\n #####################################NORMALIZATION####################################\n if args.dataset_normalization not in {'False','false','None','none'}:\n print('\\nDataset_Normalization')\n if args.dataset_normalization in{'DQ_Normalization','UnitNormNormalization','UnitNorm'}:\n \n training_predictors = torch.tensor(training_predictors)\n training_target = torch.tensor(training_target)\n validation_predictors = torch.tensor(validation_predictors)\n validation_target = torch.tensor(validation_target)\n test_predictors = torch.tensor(test_predictors)\n test_target = torch.tensor(test_target)\n if args.n_mics==2:\n if args.domain in ['DQ','dq','dQ','Dual_Quaternion','dual_quaternion']:\n dataset_string+=' Dataset Normalization for 2Mic 8Ch Magnitude Dual Quaternion UnitNorm'\n print('Dataset Normalization for 2Mic 8Ch Magnitude Dual Quaternion UnitNorm')\n ## TRAINING PREDICTORS ##\n q_0, q_1, q_2, q_3, p_0, p_1, p_2, p_3 = torch.chunk(training_predictors[:,:8,:,:], chunks=8, dim=1)\n denominator_0 = q_0 ** 2 + q_1 ** 2 + q_2 ** 2 + q_3 ** 2\n denominator_1 = torch.sqrt(denominator_0)\n deno_cross = q_0 * p_0 + q_1 * p_1 + q_2 * p_2 + q_3 * p_3\n\n p_0 = p_0 - deno_cross / denominator_0 * q_0\n p_1 = p_1 - deno_cross / denominator_0 * q_1\n p_2 = p_2 - deno_cross / denominator_0 * q_2\n p_3 = p_3 - deno_cross / denominator_0 * q_3\n\n q_0 = q_0 / denominator_1\n q_1 = q_1 / denominator_1\n q_2 = q_2 / denominator_1\n q_3 = q_3 / denominator_1\n\n training_predictors[:,:8,:,:] = torch.cat([q_0, q_1, q_2, q_3, p_0, p_1, p_2, p_3], dim=1)\n\n ## VALIDATION PREDICTORS ##\n q_0, q_1, q_2, q_3, p_0, p_1, p_2, p_3 = torch.chunk(validation_predictors[:,:8,:,:], chunks=8, dim=1)\n denominator_0 = q_0 ** 2 + q_1 ** 2 + q_2 ** 2 + q_3 ** 2\n denominator_1 = torch.sqrt(denominator_0)\n deno_cross = q_0 * p_0 + q_1 * p_1 + q_2 * p_2 + q_3 * p_3\n\n p_0 = p_0 - deno_cross / denominator_0 * q_0\n p_1 = p_1 - deno_cross / denominator_0 * q_1\n p_2 = p_2 - deno_cross / denominator_0 * q_2\n p_3 = p_3 - deno_cross / denominator_0 * q_3\n\n q_0 = q_0 / denominator_1\n q_1 = q_1 / denominator_1\n q_2 = q_2 / denominator_1\n q_3 = q_3 / denominator_1\n\n validation_predictors[:,:8,:,:] = torch.cat([q_0, q_1, q_2, q_3, p_0, p_1, p_2, p_3], dim=1)\n\n ## TEST PREDICTORS ##\n q_0, q_1, q_2, q_3, p_0, p_1, p_2, p_3 = torch.chunk(test_predictors[:,:8,:,:], chunks=8, dim=1)\n denominator_0 = q_0 ** 2 + q_1 ** 2 + q_2 ** 2 + q_3 ** 2\n denominator_1 = torch.sqrt(denominator_0)\n deno_cross = q_0 * p_0 + q_1 * p_1 + q_2 * p_2 + q_3 * p_3\n\n p_0 = p_0 - deno_cross / denominator_0 * q_0\n p_1 = p_1 - deno_cross / denominator_0 * q_1\n p_2 = p_2 - deno_cross / denominator_0 * q_2\n p_3 = p_3 - deno_cross / denominator_0 * q_3\n\n q_0 = q_0 / denominator_1\n q_1 = q_1 / denominator_1\n q_2 = q_2 / denominator_1\n q_3 = q_3 / denominator_1\n\n test_predictors[:,:8,:,:] = torch.cat([q_0, q_1, q_2, q_3, p_0, p_1, p_2, p_3], dim=1) \n if args.phase:\n raise ValueError('DATASET NORMALIZATION FOR PHASE DUAL QUATERNION NOT YET IMPLEMENTED')\n print('Dataset Normalization for 2Mic 16Ch Magnitude-Phase Dual Quaternion ')\n training_predictors = np.array(training_predictors)\n training_target = np.array(training_target)\n validation_predictors = np.array(validation_predictors)\n validation_target = np.array(validation_target)\n test_predictors = np.array(test_predictors)\n test_target = np.array(test_target)\n\n print ('\\nShapes:')\n print ('Training predictors: ', training_predictors.shape)\n print ('Validation predictors: ', validation_predictors.shape)\n print ('Test predictors: ', test_predictors.shape)\n print ('Training target: ', training_target.shape)\n print ('Validation target: ', validation_target.shape)\n print ('Test target: ', test_target.shape)\n else:\n training_predictors = np.array(training_predictors)\n training_target = np.array(training_target)\n validation_predictors = np.array(validation_predictors)\n validation_target = np.array(validation_target)\n test_predictors = np.array(test_predictors)\n test_target = np.array(test_target)\n\n print ('\\nShapes:')\n print ('Training predictors: ', training_predictors.shape)\n print ('Validation predictors: ', validation_predictors.shape)\n print ('Test predictors: ', test_predictors.shape)\n print ('Training target: ', training_target.shape)\n print ('Validation target: ', validation_target.shape)\n print ('Test target: ', test_target.shape)\n if args.n_mics==1:\n dataset_string+=' Dataset Normalization for 1Mic 4Ch Magnitude'\n print('Dataset Normalization for 1Mic 4Ch Magnitude')\n # Normalize training predictors with mean 0 and std 1\n train_mag_min = np.mean(training_predictors[:,:4,:,:])\n train_mag_std = np.std(training_predictors[:,:4,:,:]) \n training_predictors[:,:4,:,:] -= train_mag_min\n training_predictors[:,:4,:,:] /= train_mag_std\n # Normalize validation predictors with mean 0 and std 1\n val_mag_min = np.mean(validation_predictors[:,:4,:,:])\n val_mag_std = np.std(validation_predictors[:,:4,:,:]) \n validation_predictors[:,:4,:,:] -= val_mag_min\n validation_predictors[:,:4,:,:] /= val_mag_std\n # Normalize test predictors with mean 0 and std 1\n test_mag_min = np.mean(test_predictors[:,:4,:,:])\n test_mag_std = np.std(test_predictors[:,:4,:,:]) \n test_predictors[:,:4,:,:] -= test_mag_min\n test_predictors[:,:4,:,:] /= test_mag_std\n if args.phase:\n dataset_string+=' Dataset Normalization for 1Mic 8Ch Magnitude-Phase'\n print('Dataset Normalization for 1Mic 8Ch Magnitude-Phase')\n train_phase_min = np.mean(training_predictors[:,4:,:,:])\n train_phase_std = np.std(training_predictors[:,4:,:,:])\n training_predictors[:,4:,:,:] -= train_phase_min\n training_predictors[:,4:,:,:] /= train_phase_std\n val_phase_min = np.mean(validation_predictors[:,4:,:,:])\n val_phase_std = np.std(validation_predictors[:,4:,:,:])\n validation_predictors[:,4:,:,:] -= val_phase_min\n validation_predictors[:,4:,:,:] /= val_phase_std\n test_phase_min = np.mean(test_predictors[:,4:,:,:])\n test_phase_std = np.std(test_predictors[:,4:,:,:])\n test_predictors[:,4:,:,:] -= test_phase_min\n test_predictors[:,4:,:,:] /= test_phase_std\n if args.n_mics==2:\n \n dataset_string+=' Dataset Normalization for 2Mic 8Ch Magnitude'\n print('Dataset Normalization for 2Mic 8Ch Magnitude')\n # Normalize training predictors with mean 0 and std 1\n train_mag_min = np.mean(training_predictors[:,:8,:,:])\n train_mag_std = np.std(training_predictors[:,:8,:,:]) \n training_predictors[:,:8,:,:] -= train_mag_min\n training_predictors[:,:8,:,:] /= train_mag_std\n # Normalize validation predictors with mean 0 and std 1\n val_mag_min = np.mean(validation_predictors[:,:8,:,:])\n val_mag_std = np.std(validation_predictors[:,:8,:,:]) \n validation_predictors[:,:8,:,:] -= val_mag_min\n validation_predictors[:,:8,:,:] /= val_mag_std\n # Normalize test predictors with mean 0 and std 1\n test_mag_min = np.mean(test_predictors[:,:8,:,:])\n test_mag_std = np.std(test_predictors[:,:8,:,:]) \n test_predictors[:,:8,:,:] -= test_mag_min\n test_predictors[:,:8,:,:] /= test_mag_std\n if args.phase:\n \n dataset_string+=' Dataset Normalization for 2Mic 16Ch Magnitude-Phase'\n print('Dataset Normalization for 2Mic 16Ch Magnitude-Phase')\n train_phase_min = np.mean(training_predictors[:,8:,:,:])\n train_phase_std = np.std(training_predictors[:,8:,:,:])\n training_predictors[:,8:,:,:] -= train_phase_min\n training_predictors[:,8:,:,:] /= train_phase_std\n val_phase_min = np.mean(validation_predictors[:,8:,:,:])\n val_phase_std = np.std(validation_predictors[:,8:,:,:])\n validation_predictors[:,8:,:,:] -= val_phase_min\n validation_predictors[:,8:,:,:] /= val_phase_std\n test_phase_min = np.mean(test_predictors[:,8:,:,:])\n test_phase_std = np.std(test_predictors[:,8:,:,:])\n test_predictors[:,8:,:,:] -= test_phase_min\n test_predictors[:,8:,:,:] /= test_phase_std\n else:\n training_predictors = np.array(training_predictors)\n training_target = np.array(training_target)\n validation_predictors = np.array(validation_predictors)\n validation_target = np.array(validation_target)\n test_predictors = np.array(test_predictors)\n test_target = np.array(test_target)\n\n print ('\\nShapes:')\n print ('Training predictors: ', training_predictors.shape)\n print ('Validation predictors: ', validation_predictors.shape)\n print ('Test predictors: ', test_predictors.shape)\n print ('Training target: ', training_target.shape)\n print ('Validation target: ', validation_target.shape)\n print ('Test target: ', test_target.shape)\n \n ###############################################################################\n\n\n features_dim = int(test_target.shape[-2] * test_target.shape[-1])\n\n #convert to tensor\n training_predictors = torch.tensor(training_predictors).float()\n validation_predictors = torch.tensor(validation_predictors).float()\n test_predictors = torch.tensor(test_predictors).float()\n training_target = torch.tensor(training_target).float()\n validation_target = torch.tensor(validation_target).float()\n test_target = torch.tensor(test_target).float()\n #build dataset from tensors\n tr_dataset = utils.TensorDataset(training_predictors, training_target)\n val_dataset = utils.TensorDataset(validation_predictors, validation_target)\n test_dataset = utils.TensorDataset(test_predictors, test_target)\n #build data loader from dataset\n tr_data = utils.DataLoader(tr_dataset, args.batch_size, shuffle=True, pin_memory=True)\n val_data = utils.DataLoader(val_dataset, args.batch_size, shuffle=False, pin_memory=True)\n test_data = utils.DataLoader(test_dataset, 1, shuffle=False, pin_memory=True)#(test_dataset, args.batch_size, shuffle=False, pin_memory=True\n\n #LOAD MODEL\n n_time_frames = test_predictors.shape[-1]\n\n ######################################################################################################################\n model=SELD_Model(time_dim=n_time_frames, freq_dim=args.freq_dim, input_channels=args.input_channels, output_classes=args.output_classes,\n domain=args.domain, domain_classifier=args.domain_classifier,\n cnn_filters=args.cnn_filters, kernel_size_cnn_blocks=args.kernel_size_cnn_blocks, pool_size=args.pool_size, pool_time=args.pool_time,\n D=args.D, dilation_mode=args.dilation_mode,G=args.G, U=args.U, kernel_size_dilated_conv=args.kernel_size_dilated_conv,\n spatial_dropout_rate=args.spatial_dropout_rate,V=args.V, V_kernel_size=args.V_kernel_size,\n fc_layers=args.fc_layers, fc_activations=args.fc_activations, fc_dropout=args.fc_dropout, dropout_perc=args.dropout_perc, \n class_overlaps=args.class_overlaps,\n use_bias_conv=args.use_bias_conv,use_bias_linear=args.use_bias_linear,batch_norm=args.batch_norm, parallel_ConvTC_block=args.parallel_ConvTC_block, parallel_magphase=args.parallel_magphase,\n extra_name=args.model_extra_name, verbose=False)\n \n \n architecture_dir='RESULTS/Task2/{}/'.format(args.architecture)\n if len(os.path.dirname(architecture_dir)) > 0 and not os.path.exists(os.path.dirname(architecture_dir)):\n os.makedirs(os.path.dirname(architecture_dir))\n model_dir=architecture_dir+model.model_name+'/'\n if len(os.path.dirname(model_dir)) > 0 and not os.path.exists(os.path.dirname(model_dir)):\n os.makedirs(os.path.dirname(model_dir))\n args.load_model=model_dir+'checkpoint'\n unique_name=model_dir+model.model_name\n \n '''if not args.wandb_id=='none': \n wandb.init(project=args.wandb_project, entity=args.wandb_entity,resume='allow',id=args.wandb_id,name=model.model_name)############################################################################################ WANDB\n else:\n wandb.init(project=args.wandb_project,entity=args.wandb_entity,resume='allow',name=model.model_name)\n config = wandb.config\n wandb.watch(model)\n wandb.config.update(args, allow_val_change=True)\n wandb.config.ReceptiveField=model.receptive_field\n wandb.config.n_ResBlocks=model.total_n_resblocks'''\n \n print(dataset_string)\n print(model.model_name)\n \n summary(model, input_size=(args.batch_size,args.input_channels,args.freq_dim,n_time_frames)) ##################################################\n if not args.architecture == 'seldnet_vanilla' and not args.architecture == 'seldnet_augmented': \n print('\\nReceptive Field: ',model.receptive_field,'\\nNumber of ResBlocks: ', model.total_n_resblocks)\n #######################################################################################################################\n if args.use_cuda:\n print(\"Moving model to gpu\")\n model = model.to(device)\n\n #compute number of parameters\n model_params = sum([np.prod(p.size()) for p in model.parameters()])\n print ('Total paramters: ' + str(model_params))\n '''\n wandb.config.n_Parameters=model_params'''\n\n #set up the loss functions\n criterion_sed = nn.BCELoss()\n criterion_doa = nn.MSELoss()\n\n #set up optimizer\n optimizer = Adam(params=model.parameters(), lr=args.lr)\n \n ################################################################### DYNAMIC LEARNING RATE\n if args.use_lr_scheduler:\n scheduler = StepLR(optimizer, step_size=args.lr_scheduler_step_size, gamma=args.lr_scheduler_gamma, verbose=True)\n else:\n scheduler=None\n ###################################################################\n #set up training state dict that will also be saved into checkpoints\n state = {\"step\" : 0,\n \"worse_epochs\" : 0,\n \"epochs\" : 0,\n \"best_loss\" : np.Inf,\n \"best_epoch\" : 0,\n \"best_test_epoch\":0,\n \"torch_seed_state\":torch.get_rng_state(),\n \"numpy_seed_state\":np.random.get_state()\n \n }\n epoch =0\n best_loss_checkpoint=np.inf\n best_test_metric=1\n #load model checkpoint if desired\n if args.load_model is not None and os.path.isfile(args.load_model) :####################################### added \"and os.path.isfile(args.load_model)\"\n print(\"Continuing training full model from checkpoint \" + str(args.load_model))\n state = load_model(model, optimizer, args.load_model, args.use_cuda,device,scheduler)\n epoch=state[\"epochs\"]#######################################################################\n new_best=False\n test_best_results=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n best_epoch_checkpoint = epoch\n \n\n #TRAIN MODEL\n print('TRAINING START')\n train_loss_hist = []\n val_loss_hist = []\n while state[\"worse_epochs\"] < args.patience or epoch<args.min_n_epochs:\n epoch += 1\n state[\"epochs\"] += 1\n print(\"Training epoch \" + str(epoch) +' of '+model.model_name, ' with lr ', optimizer.param_groups[0]['lr'])\n avg_time = 0.\n model.train()\n train_loss = 0.\n with tqdm(total=len(tr_dataset) // args.batch_size) as pbar:\n for example_num, (x, target) in enumerate(tr_data):\n target = target.to(device)\n #print(x.shape)\n x = x.to(device)\n t = time.time()\n # Compute loss for each instrument/model\n optimizer.zero_grad()\n #print(x.shape)\n #sed, doa = model(x)\n #print(x.shape)\n loss = seld_loss(x, target, model, criterion_sed, criterion_doa)\n loss.backward()\n\n train_loss += (1. / float(example_num + 1)) * (loss - train_loss)\n optimizer.step()\n state[\"step\"] += 1\n t = time.time() - t\n avg_time += (1. / float(example_num + 1)) * (t - avg_time)\n\n pbar.update(1)\n\n #PASS VALIDATION DATA\n val_loss = evaluate(model, device, criterion_sed, criterion_doa, val_data)\n \n if args.use_lr_scheduler and optimizer.param_groups[0]['lr']>args.min_lr:\n scheduler.step()######################################################################Dynamic learning rate\n \n\n # EARLY STOPPING CHECK\n #############################################################################\n \n checkpoint_path = os.path.join(model_dir, \"checkpoint\")\n checkpoint_best_model_path = os.path.join(model_dir, \"checkpoint_best_model\")\n checkpoint_best_model_checkpoint_path = os.path.join(model_dir, \"checkpoint_best_model_of_checkpoint\")\n\n\n \n #state[\"worse_epochs\"] = 200\n train_loss_hist.append(train_loss.cpu().detach().numpy())\n val_loss_hist.append(val_loss.cpu().detach().numpy())\n\n\n if val_loss >= state[\"best_loss\"]:\n state[\"worse_epochs\"] += 1\n \n else:\n if new_best==True:\n best_loss_checkpoint =state[\"best_loss\"] \n best_epoch_checkpoint = state[\"best_epoch\"]\n shutil.copyfile(checkpoint_best_model_path, checkpoint_best_model_checkpoint_path)\n \n print(\"MODEL IMPROVED ON VALIDATION SET!\")\n state[\"worse_epochs\"] = 0\n state[\"best_loss\"] = val_loss\n state[\"best_epoch\"] = epoch\n state[\"best_checkpoint\"] = checkpoint_best_model_path\n new_best=True\n\n # CHECKPOINT\n print(\"Saving best model...\")\n save_model(model, optimizer, state, checkpoint_best_model_path,scheduler)\n\n if val_loss < best_loss_checkpoint and (val_loss!=state[\"best_loss\"] or best_loss_checkpoint==np.inf):\n best_loss_checkpoint = val_loss\n print(\"Saving best model checkpoint...\")\n save_model(model, optimizer, state, checkpoint_best_model_checkpoint_path,scheduler)\n best_epoch_checkpoint = epoch\n\n\n print(\"Saving model...\")\n save_model(model, optimizer, state, checkpoint_path,scheduler)\n print(\"VALIDATION FINISHED: TRAIN_LOSS: {} VAL_LOSS: {}\".format(str(train_loss.cpu().detach().numpy().round(4)), str(val_loss.cpu().detach().numpy().round(4))))\n print(\"Best epoch at: {} Best loss: {}\".format(state['best_epoch'],str(state['best_loss'].cpu().detach().numpy().round(4))))\n\n plot_array=[epoch, train_loss.cpu().detach().numpy(), val_loss.cpu().detach().numpy()]\n save_array_to_csv(\"{}_training_metrics.csv\".format(unique_name), plot_array)###################################\n \n '''wandb.log({\"train loss\": train_loss.cpu().detach().numpy()},step=epoch)#################################################### WANDB\n wandb.log({\"val loss\":val_loss.cpu().detach().numpy()},step=epoch)\n '''\n\n #TEST############################################################################################################\n if epoch%args.test_step==0:\n if args.test_mode=='test_best':\n if new_best:\n print ('\\n***************TEST BEST MODEL AT EPOCH {}****************'.format(state[\"best_epoch\"]))\n state = load_model(model, optimizer, checkpoint_best_model_path, args.use_cuda,device,scheduler)\n test_best_results=evaluate_test(model,device, test_data,epoch=state['best_epoch'],max_loc_value=args.max_loc_value,num_frames=args.num_frames,spatial_threshold=args.spatial_threshold)\n save_array_to_csv(\"{}_test_metrics.csv\".format(unique_name), test_best_results)\n else:\n print ('\\n***************TEST MODEL AT EPOCH {}****************'.format(best_epoch_checkpoint))\n state = load_model(model, optimizer, checkpoint_best_model_checkpoint_path, args.use_cuda,device,scheduler)\n test_best_results=evaluate_test(model,device, test_data,epoch=best_epoch_checkpoint,max_loc_value=args.max_loc_value,num_frames=args.num_frames,spatial_threshold=args.spatial_threshold)\n save_array_to_csv(\"{}_test_metrics.csv\".format(unique_name), test_best_results)\n else:\n print ('\\n***************TEST MODEL AT EPOCH {}****************'.format(epoch))\n test_best_results=evaluate_test(model,device, test_data,epoch=epoch,max_loc_value=args.max_loc_value,num_frames=args.num_frames,spatial_threshold=args.spatial_threshold)\n save_array_to_csv(\"{}_test_metrics.csv\".format(unique_name), test_best_results)\n '''\n wandb.log({\"F-Score\": test_best_results[1]},step=epoch)#################################################### WANDB\n wandb.log({\"ER-Score\": test_best_results[2]},step=epoch)\n wandb.log({\"Precision\": test_best_results[3]},step=epoch)\n wandb.log({\"Recall\": test_best_results[4]},step=epoch)\n wandb.log({\"LR Localization Recall (DCASE21)\": test_best_results[-1]},step=epoch)\n wandb.log({\"LE Localization Error (DCASE21)\": test_best_results[-2]},step=epoch)\n wandb.log({\"F (DCASE21)\": test_best_results[-3]},step=epoch)\n wandb.log({\"ER (DCASE21)\": test_best_results[-4]},step=epoch)\n wandb.log({\"SELD Score (DCASE21)\": test_best_results[-5]},step=epoch) \n wandb.log({\"Global SELD (F ER L3DAS21 - LE LR DCASE21)\": test_best_results[-6]},step=epoch) \n wandb.log({\"LSD score\": test_best_results[-7]},step=epoch) \n wandb.log({\"CSL score\": test_best_results[-8]},step=epoch) ''' \n \n if args.test_mode=='test_best':\n state = load_model(model, optimizer, args.load_model, args.use_cuda,device,scheduler)\n if new_best:\n new_best=False \n \n if epoch% args.checkpoint_step==0:\n checkpoint_dir=model_dir+'checkpoint_epoch_{}/'.format(epoch)\n if len(os.path.dirname(checkpoint_dir)) > 0 and not os.path.exists(os.path.dirname(checkpoint_dir)):\n os.makedirs(os.path.dirname(checkpoint_dir))\n print ('\\n***************CHECKPOINT EPOCH {}****************'.format(epoch))\n shutil.copyfile(checkpoint_best_model_path, checkpoint_dir+\"checkpoint_best_epoch_{}\".format(state[\"best_epoch\"])) \n shutil.copyfile(checkpoint_path, checkpoint_dir+\"checkpoint_epoch_{}\".format(epoch)) \n shutil.copyfile(checkpoint_path+'_best_model_on_Test', checkpoint_dir+\"checkpoint_best_model_on_Test_epoch_{}\".format(state[\"best_epoch\"])) \n \n shutil.copyfile(checkpoint_best_model_checkpoint_path, checkpoint_dir+\"checkpoint_best_model_checkpoint_epoch_{}\".format(best_epoch_checkpoint))\n \n shutil.copyfile(\"{}_training_metrics.csv\".format(unique_name), checkpoint_dir+model.model_name+\"_training_metrics_at_epoch_{}.csv\".format(epoch))\n shutil.copyfile(\"{}_test_metrics.csv\".format(unique_name), checkpoint_dir+model.model_name+\"_test_metrics_at_epoch_{}.csv\".format(epoch))\n \n ########################################################################################################################################################\n \n #LOAD BEST MODEL AND COMPUTE LOSS FOR ALL SETS\n print(\"TESTING\")\n # Load best model based on validation loss\n state = load_model(model, None, checkpoint_path+'_best_model_on_Test', args.use_cuda,device,scheduler)\n #compute loss on all set_output_size\n train_loss = evaluate(model, device, criterion_sed, criterion_doa, tr_data)\n val_loss = evaluate(model, device, criterion_sed, criterion_doa, val_data)\n test_loss = evaluate(model, device, criterion_sed, criterion_doa, test_data)\n\n #PRINT AND SAVE RESULTS\n results = {'train_loss': train_loss.cpu().detach().numpy(),\n 'val_loss': val_loss.cpu().detach().numpy(),\n 'test_loss': test_loss.cpu().detach().numpy(),\n 'train_loss_hist': train_loss_hist,\n 'val_loss_hist': val_loss_hist}\n\n print(model.model_name)\n print ('RESULTS')\n for i in results:\n if 'hist' not in i:\n print (i, results[i])\n out_path = os.path.join(args.results_path, 'results_dict.json')\n np.save(out_path, results)\n print('*********** TEST BEST MODEL (epoch {}) ************'.format(state['best_test_epoch']))\n test_best_results=evaluate_test(model,device, test_data,epoch=state['best_test_epoch'],max_loc_value=args.max_loc_value,num_frames=args.num_frames,spatial_threshold=args.spatial_threshold)\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n #saving/loading parameters\n parser.add_argument('--results_path', type=str, default='RESULTS/Task2',\n help='Folder to write results dicts into')\n parser.add_argument('--checkpoint_dir', type=str, default='RESULTS/Task2',\n help='Folder to write checkpoints into')\n parser.add_argument('--load_model', type=str, default=None,#'RESULTS/Task2/checkpoint',\n help='Reload a previously trained model (whole task model)')\n #dataset parameters\n parser.add_argument('--training_predictors_path', type=str,default='/var/datasets/L3DAS21/processed/task2_predictors_train.pkl')\n parser.add_argument('--training_target_path', type=str,default='/var/datasets/L3DAS21/processed/task2_target_train.pkl')\n parser.add_argument('--validation_predictors_path', type=str, default='/var/datasets/L3DAS21/processed/task2_predictors_validation.pkl')\n parser.add_argument('--validation_target_path', type=str, default='/var/datasets/L3DAS21/processed/task2_target_validation.pkl')\n parser.add_argument('--test_predictors_path', type=str, default='/var/datasets/L3DAS21/processed/task2_predictors_test.pkl')\n parser.add_argument('--test_target_path', type=str, default='/var/datasets/L3DAS21/processed/task2_target_test.pkl')\n #training parameters\n parser.add_argument('--gpu_id', type=int, default=0)\n parser.add_argument('--use_cuda', type=str, default='True')\n parser.add_argument('--early_stopping', type=str, default='True')\n parser.add_argument('--fixed_seed', type=str, default='True')\n\n parser.add_argument('--lr', type=float, default=0.0001)\n parser.add_argument('--batch_size', type=int, default=1,\n help=\"Batch size\")\n parser.add_argument('--sr', type=int, default=32000,\n help=\"Sampling rate\")\n parser.add_argument('--patience', type=int, default=250,\n help=\"Patience for early stopping on validation set\")\n\n #model parameters\n #the following parameters produce a prediction for each 100-msecs frame\n parser.add_argument('--architecture', type=str, default='DualQSELD-TCN',\n help=\"model's architecture, can be seldnet_vanilla or seldnet_augmented\")\n parser.add_argument('--input_channels', type=int, default=4,\n help=\"4/8 for 1/2 mics, multiply x2 if using also phase information\")\n parser.add_argument('--n_mics', type=int, default=1)\n parser.add_argument('--phase', type=str, default='False')\n parser.add_argument('--class_overlaps', type=int, default=3,\n help= 'max number of simultaneous sounds of the same class')\n parser.add_argument('--time_dim', type=int, default=4800)\n parser.add_argument('--freq_dim', type=int, default=256)\n parser.add_argument('--output_classes', type=int, default=14)\n parser.add_argument('--pool_size', type=str, default='[[8,2],[8,2],[2,2],[1,1]]')\n parser.add_argument('--cnn_filters', type=str, default='[64,64,64]')\n parser.add_argument('--pool_time', type=str, default='True')\n parser.add_argument('--dropout_perc', type=float, default=0.3)\n parser.add_argument('--D', type=str, default='[10]')\n parser.add_argument('--G', type=int, default=128)\n parser.add_argument('--U', type=int, default=128)\n parser.add_argument('--V', type=str, default='[128,128]')\n parser.add_argument('--spatial_dropout_rate', type=float, default=0.5)\n parser.add_argument('--batch_norm', type=str, default='BN')\n parser.add_argument('--dilation_mode', type=str, default='fibonacci')\n parser.add_argument('--model_extra_name', type=str, default='')\n parser.add_argument('--test_mode', type=str, default='test_best')\n parser.add_argument('--use_lr_scheduler', type=str, default='True')\n parser.add_argument('--lr_scheduler_step_size', type=int, default=150)\n parser.add_argument('--lr_scheduler_gamma', type=float, default=0.5)\n parser.add_argument('--min_lr', type=float, default=0.000005) \n parser.add_argument('--dataset_normalization', type=str, default='True') \n parser.add_argument('--kernel_size_cnn_blocks', type=int, default=3) \n parser.add_argument('--kernel_size_dilated_conv', type=int, default=3) \n parser.add_argument('--use_tcn', type=str, default='True') \n parser.add_argument('--use_bias_conv', type=str, default='True') \n parser.add_argument('--use_bias_linear', type=str, default='True') \n parser.add_argument('--verbose', type=str, default='False')\n parser.add_argument('--sed_loss_weight', type=float, default=1.)\n parser.add_argument('--doa_loss_weight', type=float, default=5.)\n parser.add_argument('--domain_classifier', type=str, default='same') \n parser.add_argument('--domain', type=str, default='DQ') \n parser.add_argument('--fc_activations', type=str, default='Linear') \n parser.add_argument('--fc_dropout', type=str, default='Last') \n parser.add_argument('--fc_layers', type=str, default='[128]') \n parser.add_argument('--V_kernel_size', type=int, default=3) \n parser.add_argument('--use_time_distributed', type=str, default='False') \n parser.add_argument('--parallel_ConvTC_block', type=str, default='False') \n\n '''parser.add_argument('--wandb_id', type=str, default='none')\n parser.add_argument('--wandb_project', type=str, default='')\n parser.add_argument('--wandb_entity', type=str, default='')'''\n ############## TEST ###################\n parser.add_argument('--max_loc_value', type=float, default=2.,\n help='max value of target loc labels (to rescale model\\'s output since the models has tanh in the output loc layer)')\n parser.add_argument('--num_frames', type=int, default=600,\n help='total number of time frames in the predicted seld matrices. (600 for 1-minute sounds with 100msecs frames)')\n parser.add_argument('--spatial_threshold', type=float, default=2.,\n help='max cartesian distance withn consider a true positive')\n ########################################\n\n ######################### CHECKPOINT ####################################################\n parser.add_argument('--checkpoint_step', type=int, default=100,\n help=\"Save and test models every checkpoint_step epochs\")\n parser.add_argument('--test_step', type=int, default=10,\n help=\"Save and test models every checkpoint_step epochs\")\n parser.add_argument('--min_n_epochs', type=int, default=1000,\n help=\"Save and test models every checkpoint_step epochs\")\n parser.add_argument('--Dcase21_metrics_DOA_threshold', type=int, default=20) \n parser.add_argument('--parallel_magphase', type=str, default='False') \n\n parser.add_argument('--TextArgs', type=str, default='config/Test.txt', help='Path to text with training settings')#'config/PHC-SELD-TCN-S1_BN.txt'\n parse_list = readFile(parser.parse_args().TextArgs)\n args = parser.parse_args(parse_list)\n \n #eval string bools and lists\n args.use_cuda = eval(args.use_cuda)\n args.early_stopping = eval(args.early_stopping)\n args.fixed_seed = eval(args.fixed_seed)\n args.pool_size= eval(args.pool_size)\n args.cnn_filters = eval(args.cnn_filters)\n args.verbose = eval(args.verbose)\n args.D=eval(args.D)\n args.V=eval(args.V)\n args.use_lr_scheduler=eval(args.use_lr_scheduler)\n #args.dataset_normalization=eval(args.dataset_normalization)\n args.phase=eval(args.phase)\n args.use_tcn=eval(args.use_tcn)\n args.use_bias_conv=eval(args.use_bias_conv)\n args.use_bias_linear=eval(args.use_bias_linear)\n args.fc_layers = eval(args.fc_layers)\n args.parallel_magphase = eval(args.parallel_magphase)\n\n main(args)\n", "id": "5196451", "language": "Python", "matching_score": 7.736778736114502, "max_stars_count": 0, "path": "train_model.py" }, { "content": "import sys, os\nimport pickle\nimport argparse\nfrom matplotlib.image import pil_to_array\nfrom tqdm import tqdm\nimport numpy as np\nimport soundfile as sf\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as utils\nfrom metrics import location_sensitive_detection\nfrom models.SELD_Model import SELD_Model\nfrom utility_functions import load_model, save_model, gen_submission_list_task2,save_array_to_csv\nfrom torchinfo import summary\nfrom Dcase21_metrics import *\n'''\nLoad pretrained model and compute the metrics for Task 2\nof the L3DAS21 challenge. The metric is F score computed with the\nlocation sensitive detection: https://ieeexplore.ieee.org/document/8937220.\nCommand line arguments define the model parameters, the dataset to use and\nwhere to save the obtained results.\n'''\n\ndef load_model(model, optimizer, path, cuda, device,scheduler=None):\n\n if isinstance(model, torch.nn.DataParallel):\n model = model.module # load state dict of wrapped module\n if cuda:\n checkpoint = torch.load(path, map_location=device)\n else:\n checkpoint = torch.load(path, map_location='cpu')\n try:\n model.load_state_dict(checkpoint['model_state_dict'])\n except:\n # work-around for loading checkpoints where DataParallel was saved instead of inner module\n from collections import OrderedDict\n model_state_dict_fixed = OrderedDict()\n prefix = 'module.'\n for k, v in checkpoint['model_state_dict'].items():\n if k.startswith(prefix):\n k = k[len(prefix):]\n model_state_dict_fixed[k] = v\n model.load_state_dict(model_state_dict_fixed)\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n if scheduler is not None:\n scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\n if 'state' in checkpoint:\n state = checkpoint['state']\n else:\n # older checkpoints only store step, rest of state won't be there\n state = {'step': checkpoint['step']}\n \n np.random.set_state(checkpoint['random_states'][0])\n torch.set_rng_state(checkpoint['random_states'][1].cpu())\n if torch.cuda.is_available() and checkpoint['random_states'][2] is not None:\n torch.cuda.set_rng_state(checkpoint['random_states'][2].cpu())\n return state\n\ndef main(args):\n \n model_path='RESULTS/Task2/{}/checkpoint'.format(args.architecture)#########\n\n if args.use_cuda:\n device = 'cuda:' + str(args.gpu_id)\n else:\n device = 'cpu'\n \n\n print ('\\nLoading dataset')\n #LOAD DATASET\n with open(args.predictors_path, 'rb') as f:\n predictors = pickle.load(f)\n with open(args.target_path, 'rb') as f:\n target = pickle.load(f)\n\n\n phase_string='_Phase' if args.phase else ''\n dataset_string='L3DAS21_'+str(args.n_mics)+'Mics_Magnidute'+phase_string+'_'+str(args.input_channels)+'Ch'\n #####################################NORMALIZATION####################################\n if args.dataset_normalization not in {'False','false','None','none'}:\n print('\\nDataset_Normalization')\n if args.dataset_normalization in{'DQ_Normalization','UnitNormNormalization','UnitNorm'}:\n predictors = torch.tensor(predictors)\n target = torch.tensor(target)\n if args.n_mics==2:\n if args.domain in ['DQ','dq','dQ','Dual_Quaternion','dual_quaternion']:\n dataset_string+=' Dataset Normalization for 2Mic 8Ch Magnitude Dual Quaternion UnitNorm'\n print('Dataset Normalization for 2Mic 8Ch Magnitude Dual Quaternion UnitNorm')\n ## TEST PREDICTORS ##\n q_0, q_1, q_2, q_3, p_0, p_1, p_2, p_3 = torch.chunk(predictors[:,:8,:,:], chunks=8, dim=1)\n denominator_0 = q_0 ** 2 + q_1 ** 2 + q_2 ** 2 + q_3 ** 2\n denominator_1 = torch.sqrt(denominator_0)\n deno_cross = q_0 * p_0 + q_1 * p_1 + q_2 * p_2 + q_3 * p_3\n\n p_0 = p_0 - deno_cross / denominator_0 * q_0\n p_1 = p_1 - deno_cross / denominator_0 * q_1\n p_2 = p_2 - deno_cross / denominator_0 * q_2\n p_3 = p_3 - deno_cross / denominator_0 * q_3\n\n q_0 = q_0 / denominator_1\n q_1 = q_1 / denominator_1\n q_2 = q_2 / denominator_1\n q_3 = q_3 / denominator_1\n\n predictors[:,:8,:,:] = torch.cat([q_0, q_1, q_2, q_3, p_0, p_1, p_2, p_3], dim=1) \n if args.phase:\n raise ValueError('DATASET NORMALIZATION FOR PHASE DUAL QUATERNION NOT YET IMPLEMENTED')\n print('Dataset Normalization for 2Mic 16Ch Magnitude-Phase Dual Quaternion ')\n predictors = np.array(predictors)\n target = np.array(target)\n print ('\\nShapes:')\n print ('Test predictors: ', predictors.shape)\n print ('Test target: ',target.shape)\n else:\n predictors = np.array(predictors)\n target = np.array(target)\n print ('\\nShapes:')\n print ('Test predictors: ', predictors.shape)\n print ('Test target: ', target.shape)\n if args.n_mics==1:\n dataset_string+=' Dataset Normalization for 1Mic 4Ch Magnitude'\n print('Dataset Normalization for 1Mic 4Ch Magnitude')\n # Normalize test predictors with mean 0 and std 1\n test_mag_min = np.mean(predictors[:,:4,:,:])\n test_mag_std = np.std(predictors[:,:4,:,:]) \n predictors[:,:4,:,:] -= test_mag_min\n predictors[:,:4,:,:] /= test_mag_std\n if args.phase:\n dataset_string+=' Dataset Normalization for 1Mic 8Ch Magnitude-Phase'\n print('Dataset Normalization for 1Mic 8Ch Magnitude-Phase')\n test_phase_min = np.mean(predictors[:,4:,:,:])\n test_phase_std = np.std(predictors[:,4:,:,:])\n predictors[:,4:,:,:] -= test_phase_min\n predictors[:,4:,:,:] /= test_phase_std\n if args.n_mics==2:\n dataset_string+=' Dataset Normalization for 2Mic 8Ch Magnitude'\n print('Dataset Normalization for 2Mic 8Ch Magnitude')\n # Normalize test predictors with mean 0 and std 1\n test_mag_min = np.mean(predictors[:,:8,:,:])\n test_mag_std = np.std(predictors[:,:8,:,:]) \n predictors[:,:8,:,:] -= test_mag_min\n predictors[:,:8,:,:] /= test_mag_std\n if args.phase:\n dataset_string+=' Dataset Normalization for 2Mic 16Ch Magnitude-Phase'\n print('Dataset Normalization for 2Mic 16Ch Magnitude-Phase')\n test_phase_min = np.mean(predictors[:,8:,:,:])\n test_phase_std = np.std(predictors[:,8:,:,:])\n predictors[:,8:,:,:] -= test_phase_min\n predictors[:,8:,:,:] /= test_phase_std\n else:\n predictors = np.array(predictors)\n target = np.array(target)\n print ('\\nShapes:')\n print ('Test predictors: ', predictors.shape)\n print ('Test target: ', target.shape)\n \n #convert to tensor\n predictors = torch.tensor(predictors).float()\n target = torch.tensor(target).float()\n #build dataset from tensors\n dataset_ = utils.TensorDataset(predictors, target)\n #build data loader from dataset\n dataloader = utils.DataLoader(dataset_, 1, shuffle=False, pin_memory=True)\n\n if not os.path.exists(args.results_path):\n os.makedirs(args.results_path)\n\n #LOAD MODEL\n n_time_frames = predictors.shape[-1]\n\n model=SELD_Model(time_dim=n_time_frames, freq_dim=args.freq_dim, input_channels=args.input_channels, output_classes=args.output_classes,\n domain=args.domain, domain_classifier=args.domain_classifier,\n cnn_filters=args.cnn_filters, kernel_size_cnn_blocks=args.kernel_size_cnn_blocks, pool_size=args.pool_size, pool_time=args.pool_time,\n D=args.D, dilation_mode=args.dilation_mode,G=args.G, U=args.U, kernel_size_dilated_conv=args.kernel_size_dilated_conv,\n spatial_dropout_rate=args.spatial_dropout_rate,V=args.V, V_kernel_size=args.V_kernel_size,\n fc_layers=args.fc_layers, fc_activations=args.fc_activations, fc_dropout=args.fc_dropout, dropout_perc=args.dropout_perc, \n class_overlaps=args.class_overlaps,\n use_bias_conv=args.use_bias_conv,use_bias_linear=args.use_bias_linear,batch_norm=args.batch_norm, parallel_ConvTC_block=args.parallel_ConvTC_block, parallel_magphase=args.parallel_magphase,\n extra_name=args.model_extra_name, verbose=False)\n \n architecture_dir='RESULTS/Task2/{}/'.format(args.architecture)\n if len(os.path.dirname(architecture_dir)) > 0 and not os.path.exists(os.path.dirname(architecture_dir)):\n os.makedirs(os.path.dirname(architecture_dir))\n model_dir=architecture_dir+model.model_name+'/'\n if len(os.path.dirname(model_dir)) > 0 and not os.path.exists(os.path.dirname(model_dir)):\n os.makedirs(os.path.dirname(model_dir))\n args.load_model=model_dir+'checkpoint_best_model_on_Test'\n unique_name=model_dir+model.model_name\n print(model.model_name)\n #summary(model, input_size=(args.batch_size,args.input_channels,args.freq_dim,n_time_frames)) ##################################################\n \n if args.use_cuda:\n print(\"Moving model to gpu\")\n model = model.to(device)\n\n #load checkpoint\n if args.load_model is not None and os.path.isfile(args.load_model) :####################################### added \"and os.path.isfile(args.load_model)\"\n print(\"Loading Model\")\n state = load_model(model, None, args.load_model, args.use_cuda,device,None)\n \n #COMPUTING METRICS\n print(\"COMPUTING TASK 2 METRICS\")\n TP = 0\n FP = 0\n FN = 0\n output_classes=args.output_classes\n class_overlaps=args.class_overlaps\n\n count = 0\n model.eval()\n eval_metrics = SELDMetrics(nb_classes=output_classes, doa_threshold=args.Dcase21_metrics_DOA_threshold)\n \n with tqdm(total=len(dataloader) // 1) as pbar, torch.no_grad():\n for example_num, (x, target) in enumerate(dataloader):\n x = x.to(device)\n target = target.to(device)\n \n sed, doa = model(x)\n sed = sed.cpu().numpy().squeeze()\n doa = doa.cpu().numpy().squeeze()\n target = target.cpu().numpy().squeeze()\n #in the target matrices sed and doa are joint\n sed_target = target[:,:args.output_classes*args.class_overlaps]\n doa_target = target[:,args.output_classes*args.class_overlaps:]\n\n prediction,prediction_dict = gen_submission_list_task2(sed, doa,\n max_overlaps=args.class_overlaps,\n max_loc_value=args.max_loc_value)\n\n target,target_dict = gen_submission_list_task2(sed_target, doa_target,\n max_overlaps=args.class_overlaps,\n max_loc_value=args.max_loc_value)\n\n\n pred_labels =segment_labels(prediction_dict, args.num_frames)\n ref_labels =segment_labels(target_dict, args.num_frames)\n eval_metrics.update_seld_scores(pred_labels, ref_labels)\n \n \n tp, fp, fn, _ = location_sensitive_detection(prediction, target, args.num_frames,\n args.spatial_threshold, False)\n\n TP += tp\n FP += fp\n FN += fn\n\n count += 1\n pbar.update(1)\n\n #compute total F score\n precision = TP / (TP + FP + sys.float_info.epsilon)\n recall = TP / (TP + FN + sys.float_info.epsilon)\n F_score = 2 * ((precision * recall) / (precision + recall + sys.float_info.epsilon))\n Nref=TP+FN\n Nsys=TP+FP\n ER_score = (max(Nref, Nsys) - TP) / (Nref + 0.0)################ from evaluation_metrics.py SELDnet\n \n ER_dcase21, F_dcase21, LE_dcase21, LR_dcase21 = eval_metrics.compute_seld_scores()\n\n #SELD_dcase21 = np.mean([ER_dcase21,1 - F_dcase21, LE_dcase21/180,1 - LR_dcase21])\n SELD_L3DAS21_LRLE = np.mean([ER_score,1 - F_score, LE_dcase21/180,1 - LR_dcase21])\n CSL_score= np.mean([LE_dcase21/180,1 - LR_dcase21])\n LSD_score=np.mean([1-F_score,ER_score])\n \n\n #visualize and save results\n results = {'precision': precision,\n 'recall': recall,\n 'F score': F_score,\n 'ER score': ER_score,\n 'LE': LE_dcase21,\n 'LR': LR_dcase21,\n 'CSL score': CSL_score,\n 'LSD score': LSD_score,\n 'Global SELD score': SELD_L3DAS21_LRLE\n }\n print ('*******************************')\n print ('RESULTS')\n print ('TP: ' , TP)\n print ('FP: ' , FP)\n print ('FN: ' , FN)\n print ('******** SELD (F ER L3DAS21 - LE LR DCASE21) ***********')\n print ('Global SELD score: ', SELD_L3DAS21_LRLE)\n print ('LSD score: ', LSD_score)\n print ('CSL score: ', CSL_score)\n print ('F score: ', F_score)\n print ('ER score: ', ER_score)\n print ('LE: ', LE_dcase21)\n print ()\n \n out_path = os.path.join(args.results_path, 'task2_metrics_dict.json')\n np.save(out_path, results)\n\n\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n #saving/loading parameters\n parser.add_argument('--results_path', type=str, default='RESULTS/Task2',\n help='Folder to write results dicts into')\n parser.add_argument('--checkpoint_dir', type=str, default='RESULTS/Task2',\n help='Folder to write checkpoints into')\n parser.add_argument('--load_model', type=str, default=None,#'RESULTS/Task2/checkpoint',\n help='Reload a previously trained model (whole task model)')\n #dataset parameters\n parser.add_argument('--training_predictors_path', type=str,default='/var/datasets/L3DAS21/processed/task2_predictors_train.pkl')\n parser.add_argument('--training_target_path', type=str,default='/var/datasets/L3DAS21/processed/task2_target_train.pkl')\n parser.add_argument('--validation_predictors_path', type=str, default='/var/datasets/L3DAS21/processed/task2_predictors_validation.pkl')\n parser.add_argument('--validation_target_path', type=str, default='/var/datasets/L3DAS21/processed/task2_target_validation.pkl')\n parser.add_argument('--test_predictors_path', type=str, default='/var/datasets/L3DAS21/processed/task2_predictors_test.pkl')\n parser.add_argument('--test_target_path', type=str, default='/var/datasets/L3DAS21/processed/task2_target_test.pkl')\n #training parameters\n parser.add_argument('--gpu_id', type=int, default=0)\n parser.add_argument('--use_cuda', type=str, default='True')\n parser.add_argument('--early_stopping', type=str, default='True')\n parser.add_argument('--fixed_seed', type=str, default='True')\n\n parser.add_argument('--lr', type=float, default=0.0001)\n parser.add_argument('--batch_size', type=int, default=1,\n help=\"Batch size\")\n parser.add_argument('--sr', type=int, default=32000,\n help=\"Sampling rate\")\n parser.add_argument('--patience', type=int, default=250,\n help=\"Patience for early stopping on validation set\")\n\n #model parameters\n #the following parameters produce a prediction for each 100-msecs frame\n parser.add_argument('--architecture', type=str, default='DualQSELD-TCN',\n help=\"model's architecture, can be seldnet_vanilla or seldnet_augmented\")\n parser.add_argument('--input_channels', type=int, default=4,\n help=\"4/8 for 1/2 mics, multiply x2 if using also phase information\")\n parser.add_argument('--n_mics', type=int, default=1)\n parser.add_argument('--phase', type=str, default='False')\n parser.add_argument('--class_overlaps', type=int, default=3,\n help= 'max number of simultaneous sounds of the same class')\n parser.add_argument('--time_dim', type=int, default=4800)\n parser.add_argument('--freq_dim', type=int, default=256)\n parser.add_argument('--output_classes', type=int, default=14)\n parser.add_argument('--pool_size', type=str, default='[[8,2],[8,2],[2,2],[1,1]]')\n parser.add_argument('--cnn_filters', type=str, default='[64,64,64]')\n parser.add_argument('--pool_time', type=str, default='True')\n parser.add_argument('--dropout_perc', type=float, default=0.3)\n parser.add_argument('--D', type=str, default='[10]')\n parser.add_argument('--G', type=int, default=128)\n parser.add_argument('--U', type=int, default=128)\n parser.add_argument('--V', type=str, default='[128,128]')\n parser.add_argument('--spatial_dropout_rate', type=float, default=0.5)\n parser.add_argument('--batch_norm', type=str, default='BN')\n parser.add_argument('--dilation_mode', type=str, default='fibonacci')\n parser.add_argument('--model_extra_name', type=str, default='')\n parser.add_argument('--test_mode', type=str, default='test_best')\n parser.add_argument('--use_lr_scheduler', type=str, default='True')\n parser.add_argument('--lr_scheduler_step_size', type=int, default=150)\n parser.add_argument('--lr_scheduler_gamma', type=float, default=0.5)\n parser.add_argument('--min_lr', type=float, default=0.000005) \n parser.add_argument('--dataset_normalization', type=str, default='True') \n parser.add_argument('--kernel_size_cnn_blocks', type=int, default=3) \n parser.add_argument('--kernel_size_dilated_conv', type=int, default=3) \n parser.add_argument('--use_tcn', type=str, default='True') \n parser.add_argument('--use_bias_conv', type=str, default='True') \n parser.add_argument('--use_bias_linear', type=str, default='True') \n parser.add_argument('--verbose', type=str, default='False')\n parser.add_argument('--sed_loss_weight', type=float, default=1.)\n parser.add_argument('--doa_loss_weight', type=float, default=5.)\n parser.add_argument('--domain_classifier', type=str, default='same') \n parser.add_argument('--domain', type=str, default='DQ') \n parser.add_argument('--fc_activations', type=str, default='Linear') \n parser.add_argument('--fc_dropout', type=str, default='Last') \n parser.add_argument('--fc_layers', type=str, default='[128]') \n parser.add_argument('--V_kernel_size', type=int, default=3) \n parser.add_argument('--use_time_distributed', type=str, default='False') \n parser.add_argument('--parallel_ConvTC_block', type=str, default='False') \n\n '''parser.add_argument('--wandb_id', type=str, default='none')\n parser.add_argument('--wandb_project', type=str, default='')\n parser.add_argument('--wandb_entity', type=str, default='')'''\n ############## TEST ###################\n parser.add_argument('--max_loc_value', type=float, default=2.,\n help='max value of target loc labels (to rescale model\\'s output since the models has tanh in the output loc layer)')\n parser.add_argument('--num_frames', type=int, default=600,\n help='total number of time frames in the predicted seld matrices. (600 for 1-minute sounds with 100msecs frames)')\n parser.add_argument('--spatial_threshold', type=float, default=2.,\n help='max cartesian distance withn consider a true positive')\n ########################################\n\n ######################### CHECKPOINT ####################################################\n parser.add_argument('--checkpoint_step', type=int, default=100,\n help=\"Save and test models every checkpoint_step epochs\")\n parser.add_argument('--test_step', type=int, default=10,\n help=\"Save and test models every checkpoint_step epochs\")\n parser.add_argument('--min_n_epochs', type=int, default=1000,\n help=\"Save and test models every checkpoint_step epochs\")\n parser.add_argument('--Dcase21_metrics_DOA_threshold', type=int, default=20) \n parser.add_argument('--parallel_magphase', type=str, default='False') \n\n parser.add_argument('--TextArgs', type=str, default='config/Test.txt', help='Path to text with training settings')#'config/PHC-SELD-TCN-S1_BN.txt'\n parse_list = readFile(parser.parse_args().TextArgs)\n args = parser.parse_args(parse_list)\n \n #eval string bools and lists\n args.use_cuda = eval(args.use_cuda)\n args.early_stopping = eval(args.early_stopping)\n args.fixed_seed = eval(args.fixed_seed)\n args.pool_size= eval(args.pool_size)\n args.cnn_filters = eval(args.cnn_filters)\n args.verbose = eval(args.verbose)\n args.D=eval(args.D)\n args.V=eval(args.V)\n args.use_lr_scheduler=eval(args.use_lr_scheduler)\n #args.dataset_normalization=eval(args.dataset_normalization)\n args.phase=eval(args.phase)\n args.use_tcn=eval(args.use_tcn)\n args.use_bias_conv=eval(args.use_bias_conv)\n args.use_bias_linear=eval(args.use_bias_linear)\n args.fc_layers = eval(args.fc_layers)\n args.parallel_magphase = eval(args.parallel_magphase)\n\n main(args)\n\n", "id": "3093596", "language": "Python", "matching_score": 5.971781253814697, "max_stars_count": 0, "path": "evaluate_model.py" }, { "content": "## https://arxiv.org/abs/2204.01851\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport os\r\nimport numpy as np\r\nimport utility_functions as uf\r\nfrom torchinfo import summary\r\nfrom models.dual_quaternion_layers.dual_quat_layers import * \r\nfrom models.quaternion_layers.quaternion_layers import *\r\nimport math\r\n\r\n\r\nclass ResBlock(nn.Module):\r\n def __init__(self, in_channels, \r\n domain='DQ',\r\n G=128,U=128, kernel_size_dilated_conv=3, dilation=1, stride=1,\r\n spatial_dropout_rate=0.5, use_bias_conv=True,batch_norm='BN',verbose=False):\r\n\r\n super(ResBlock, self).__init__()\r\n self.verbose = verbose\r\n self.batch_norm=batch_norm\r\n self.spatial_dropout_rate=spatial_dropout_rate\r\n self.domain = domain\r\n padding=int(((kernel_size_dilated_conv-1) * dilation)/2)\r\n L=in_channels\r\n if self.domain=='Q':\r\n self.conv1_filter = QuaternionConv(L, G, kernel_size=kernel_size_dilated_conv,\r\n stride=stride, padding=padding,\r\n dilatation=dilation, bias=use_bias_conv, operation='convolution1d')\r\n self.conv1_gate = QuaternionConv(L, G, kernel_size=kernel_size_dilated_conv,\r\n stride=stride, padding=padding,\r\n dilatation=dilation, bias=use_bias_conv, operation='convolution1d')\r\n elif self.domain=='DQ':\r\n self.conv1_filter = DualQuaternionConv(L,G, kernel_size=kernel_size_dilated_conv,\r\n stride=stride, padding=padding,\r\n dilatation=dilation, bias=use_bias_conv, operation='convolution1d')\r\n self.conv1_gate = DualQuaternionConv(L, G, kernel_size=kernel_size_dilated_conv,\r\n stride=stride, padding=padding,\r\n dilatation=dilation, bias=use_bias_conv, operation='convolution1d')\r\n else:\r\n self.conv1_filter = nn.Conv1d(L,G, kernel_size=kernel_size_dilated_conv,\r\n stride=stride, padding=padding,\r\n dilation=dilation,bias=use_bias_conv)\r\n self.conv1_gate = nn.Conv1d(L,G, kernel_size=kernel_size_dilated_conv,\r\n stride=stride, padding=padding,\r\n dilation=dilation,bias=use_bias_conv)\r\n\r\n if batch_norm=='BN'or batch_norm=='BN_on_TCN'or batch_norm=='BNonTCN':\r\n self.batch_filter=nn.BatchNorm1d(G)\r\n self.batch_gate=nn.BatchNorm1d(G)\r\n \r\n self.tanh = nn.Tanh()\r\n self.sigmoid = nn.Sigmoid()\r\n if(not spatial_dropout_rate==0):\r\n self.dropout = nn.Dropout2d(p=spatial_dropout_rate)\r\n\r\n if self.domain=='Q':\r\n self.conv2_skip = QuaternionConv(G,U, kernel_size=1, stride=1, bias=use_bias_conv, operation='convolution1d')\r\n self.conv2_residual= QuaternionConv(G,L, kernel_size=1, stride=1, bias=use_bias_conv, operation='convolution1d')\r\n elif self.domain=='DQ':\r\n self.conv2_skip = DualQuaternionConv(G,U, kernel_size=1, stride=1, bias=use_bias_conv, operation='convolution1d')\r\n self.conv2_residual= DualQuaternionConv(G,L, kernel_size=1, stride=1, bias=use_bias_conv, operation='convolution1d')\r\n else:\r\n self.conv2_skip = nn.Conv1d(G,U, kernel_size=1, stride=1,bias=use_bias_conv)\r\n self.conv2_residual= nn.Conv1d(G,L, kernel_size=1, stride=1,bias=use_bias_conv)\r\n \r\n def forward(self, x):\r\n\r\n y_f=self.conv1_filter(x)\r\n y_g=self.conv1_gate(x)\r\n if self.batch_norm in {'BN','BN_on_TCN','BNonTCN'}:\r\n y_f=self.batch_filter(y_f)\r\n y_g=self.batch_gate(y_g)\r\n y=self.tanh(y_f)*self.sigmoid(y_g)\r\n if(not self.spatial_dropout_rate==0):\r\n y=self.dropout(y)\r\n y_skip=self.conv2_skip(y)\r\n y_residual=self.conv2_residual(y)\r\n return x+y_residual,y_skip\r\n\r\nclass TC_Block(nn.Module):\r\n def __init__(self, in_channels, domain='DQ', \r\n G=128,U=128, V=[128,128], V_kernel_size=3,pool_size=[[8,2],[8,2],[2,2]], D=[10], \r\n spatial_dropout_rate=0.5, use_bias_conv=True,dilation_mode='fibonacci', pool_time='TCN',batch_norm='BN',\r\n kernel_size_dilated_conv=3,verbose=False):\r\n super(TC_Block, self).__init__()\r\n self.verbose = verbose\r\n self.ResBlocks = nn.ModuleList()\r\n self.D=D\r\n self.pool_time=pool_time\r\n self.domain = domain\r\n\r\n for n_resblock in D:\r\n dilation=1\r\n prec_1=1\r\n prec_2=0\r\n if type(n_resblock)==list:\r\n for d in (n_resblock):\r\n dilation=d\r\n \r\n self.ResBlocks.append(ResBlock(in_channels=in_channels, \r\n domain=domain,\r\n G=G,U=U,kernel_size_dilated_conv=kernel_size_dilated_conv, \r\n dilation=dilation, spatial_dropout_rate=spatial_dropout_rate, \r\n use_bias_conv=use_bias_conv,batch_norm=batch_norm,verbose=verbose))\r\n else:\r\n for d in range(n_resblock):\r\n if dilation_mode=='fibonacci':\r\n if d==0:\r\n dilation=1\r\n else:\r\n dilation=prec_1+prec_2\r\n prec_2=prec_1\r\n prec_1=dilation\r\n else:\r\n dilation=2**d\r\n self.ResBlocks.append(ResBlock(in_channels=in_channels, \r\n domain=domain,\r\n G=G,U=U,kernel_size_dilated_conv=kernel_size_dilated_conv, \r\n dilation=dilation, spatial_dropout_rate=spatial_dropout_rate, \r\n use_bias_conv=use_bias_conv,batch_norm=batch_norm,verbose=verbose))\r\n\r\n self.relu1=nn.ReLU()\r\n\r\n if self.pool_time=='TCN':\r\n self.maxpool1=nn.MaxPool1d(pool_size[0][1])\r\n \r\n \r\n \r\n if self.domain=='Q':\r\n self.conv1 = QuaternionConv(in_channels, V[0], kernel_size=V_kernel_size, stride=1,padding=1, bias=use_bias_conv, operation='convolution1d')\r\n elif self.domain=='DQ':\r\n self.conv1 = DualQuaternionConv(in_channels, V[0], kernel_size=V_kernel_size, stride=1,padding=1, bias=use_bias_conv, operation='convolution1d')\r\n else:\r\n self.conv1 = nn.Conv1d(in_channels,V[0], kernel_size=V_kernel_size, stride=1,padding=1,bias=use_bias_conv)\r\n self.relu2=nn.ReLU()\r\n\r\n if self.pool_time=='TCN':\r\n self.maxpool2=nn.MaxPool1d(pool_size[1][1])\r\n \r\n if self.domain=='Q':\r\n self.conv2 = QuaternionConv(V[0], V[1], kernel_size=V_kernel_size, stride=1,padding=1, bias=use_bias_conv, operation='convolution1d')\r\n elif self.domain=='DQ':\r\n self.conv2 = DualQuaternionConv(V[0],V[1], kernel_size=V_kernel_size, stride=1,padding=1, bias=use_bias_conv, operation='convolution1d')\r\n else:\r\n self.conv2 = nn.Conv1d(V[0],V[1], kernel_size=V_kernel_size, stride=1,padding=1,bias=use_bias_conv)\r\n\r\n self.tanh=nn.Tanh()\r\n if self.pool_time=='TCN':\r\n self.maxpool3=nn.MaxPool1d(pool_size[2][1])\r\n \r\n def forward(self, residual):\r\n skip_connections=[]\r\n for i in range(len(self.ResBlocks)):\r\n residual,skip=self.ResBlocks[i](residual)\r\n skip_connections.append(skip)\r\n sum_skip=skip_connections[0]\r\n for i in range(1,len(skip_connections)):\r\n sum_skip+=skip_connections[i]\r\n out= self.relu1(sum_skip)\r\n if self.pool_time=='TCN':\r\n out=self.maxpool1(out)\r\n out= self.conv1(out)\r\n out= self.relu2(out)\r\n if self.pool_time=='TCN':\r\n out=self.maxpool2(out)\r\n out= self.conv2(out)\r\n out= self.tanh(out)\r\n if self.pool_time=='TCN':\r\n out=self.maxpool3(out)\r\n return out\r\n \r\n\r\nclass ConvTC_Block(nn.Module):\r\n def __init__(self, time_dim, freq_dim=256, input_channels=4, \r\n domain='DQ',\r\n cnn_filters=[64,64,64], kernel_size_cnn_blocks=3, pool_size=[[8,2],[8,2],[2,2]], pool_time='TCN',\r\n D=[10], dilation_mode='fibonacci',G=128, U=128, kernel_size_dilated_conv=3,spatial_dropout_rate=0.5,\r\n V=[128,128], V_kernel_size=3,\r\n dropout_perc=0.3, use_bias_conv=True,batch_norm='noBN',\r\n verbose=False):\r\n super(ConvTC_Block, self).__init__()\r\n self.time_dim = time_dim\r\n self.freq_dim = freq_dim\r\n self.domain = domain\r\n self.verbose = verbose\r\n self.D=D\r\n self.kernel_size_dilated_conv=kernel_size_dilated_conv\r\n self.dilation_mode=dilation_mode\r\n\r\n if pool_time=='CNN':\r\n self.time_pooled_size = int(time_dim / np.prod(np.array(pool_size), axis=0)[-1])\r\n else:\r\n self.time_pooled_size = time_dim\r\n #building CNN feature extractor\r\n conv_layers = []\r\n layers_list=[]\r\n in_chans = input_channels\r\n \r\n for i, (p,c) in enumerate(zip(pool_size, np.array(cnn_filters))):\r\n curr_chans = c\r\n\r\n if pool_time=='CNN':\r\n pool = [p[0],p[1]]\r\n else:\r\n pool = [p[0],1]\r\n\r\n if self.domain=='Q':\r\n layers_list.append(QuaternionConv(in_chans, out_channels=curr_chans, kernel_size=kernel_size_cnn_blocks,\r\n stride=1, padding=1, operation='convolution2d', bias=use_bias_conv))\r\n elif self.domain=='DQ':\r\n layers_list.append(DualQuaternionConv(in_chans, out_channels=curr_chans, kernel_size=kernel_size_cnn_blocks,\r\n stride=1, padding=1, operation='convolution2d', bias=use_bias_conv))\r\n else:\r\n layers_list.append(nn.Conv2d(in_chans, out_channels=curr_chans, kernel_size=kernel_size_cnn_blocks,stride=1, padding=1,bias=use_bias_conv))\r\n \r\n if batch_norm=='BN'or batch_norm=='BN_on_CNN'or batch_norm=='BNonCNN':\r\n layers_list.append(nn.BatchNorm2d(c))\r\n layers_list.append(nn.ReLU())\r\n layers_list.append(nn.MaxPool2d(pool))\r\n layers_list.append(nn.Dropout(dropout_perc))\r\n conv_layers.append(nn.Sequential(*layers_list))\r\n layers_list=[]\r\n in_chans = curr_chans\r\n \r\n self.cnn = nn.Sequential(*conv_layers)\r\n\r\n L = int(freq_dim / np.prod(np.array(pool_size), axis=0)[0]*cnn_filters[-1])#input dimension for QTCN Block\r\n\r\n self.tcn=TC_Block(in_channels=L, domain=domain,\r\n G=G,U=U,V=V,V_kernel_size=V_kernel_size, pool_size=pool_size, D=D, \r\n spatial_dropout_rate=spatial_dropout_rate, use_bias_conv=use_bias_conv,\r\n dilation_mode=dilation_mode, pool_time=pool_time,batch_norm=batch_norm,\r\n kernel_size_dilated_conv=kernel_size_dilated_conv,verbose=verbose)\r\n\r\n \r\n def forward(self, x):\r\n \r\n x = self.cnn(x)\r\n if self.verbose:\r\n print ('cnn out ', x.shape) \r\n\r\n x = x.permute(0,3,1,2) \r\n if self.verbose:\r\n print ('permuted: ', x.shape) \r\n\r\n x = x.reshape(x.shape[0], self.time_pooled_size, -1)\r\n if self.verbose:\r\n print ('reshaped: ', x.shape) \r\n\r\n x = x.permute(0,2,1)\r\n if self.verbose:\r\n print ('permute2: ', x.shape) \r\n \r\n x= self.tcn(x)\r\n \r\n if self.verbose:\r\n print ('tcn out: ', x.shape) \r\n x = x.permute(0,2,1) \r\n if self.verbose:\r\n print ('permute3: ', x.shape) \r\n return x\r\n\r\n\r\nclass SELD_Model(nn.Module):\r\n def __init__(self, time_dim, freq_dim=256, input_channels=4, output_classes=14,\r\n domain='DQ',domain_classifier='same', \r\n cnn_filters=[64,64,64], kernel_size_cnn_blocks=3, pool_size=[[8,2],[8,2],[2,2]], pool_time='TCN',\r\n D=[10], dilation_mode='fibonacci',G=128, U=128, kernel_size_dilated_conv=3,spatial_dropout_rate=0.5,V=[128,128], V_kernel_size=3,\r\n fc_layers=[128], fc_activations='Linear', fc_dropout='all', dropout_perc=0.3, \r\n class_overlaps=3.,\r\n use_bias_conv=False,use_bias_linear=True,batch_norm='BN',parallel_ConvTC_block='False',parallel_magphase=False,\r\n extra_name='', verbose=False):\r\n super(SELD_Model, self).__init__()\r\n self.input_channels=input_channels\r\n self.time_dim = time_dim\r\n self.freq_dim = freq_dim\r\n self.domain = domain\r\n self.verbose = verbose\r\n self.D=D\r\n self.kernel_size_dilated_conv=kernel_size_dilated_conv\r\n self.dilation_mode=dilation_mode\r\n self.parallel_magphase=parallel_magphase\r\n self.domain_classifier=domain if domain_classifier=='same' else domain_classifier\r\n self.receptive_field, self.total_n_resblocks=self.calculate_receptive_field()\r\n self.parallel_ConvTC_block=parallel_ConvTC_block\r\n\r\n if domain in{'q','Q','quaternion','Quaternion'}:\r\n self.model_name='Q'\r\n elif domain in{'dq','dQ','DQ','dual_quaternion','Dual_Quaternion'}:\r\n self.model_name='DualQ'\r\n else:\r\n self.model_name=''\r\n self.model_name+='SELD'\r\n self.model_name+='-TCN'\r\n if dilation_mode=='fibonacci':\r\n self.model_name+='-PHI'\r\n self.model_name+='-'\r\n if len(D)>1:\r\n if D[0]<D[1]:\r\n self.model_name+='I'\r\n self.model_name+='S'+str(len(D))\r\n #self.model_name+='_D_'+str(D)\r\n if parallel_ConvTC_block not in {'False','false','None','none'}:\r\n self.model_name+='_'+parallel_ConvTC_block\r\n self.model_name+='_'+batch_norm\r\n #if pool_time=='TCN':\r\n # self.model_name+='_ptTCN'\r\n if pool_time=='CNN':\r\n self.model_name+='_pooltCNN'\r\n self.model_name+='_RF{}_{}RB'.format(self.receptive_field,self.total_n_resblocks)\r\n \r\n self.model_name+=extra_name\r\n\r\n sed_output_size = int(output_classes * class_overlaps) #here 3 is the max number of simultaneus sounds from the same class\r\n doa_output_size = sed_output_size * 3 #here 3 is the number of spatial dimensions xyz\r\n\r\n if parallel_ConvTC_block in {'2Parallel','2BParallel','2ParallelBranches','2PB'}:\r\n self.branch_A=ConvTC_Block(time_dim=time_dim, freq_dim=freq_dim, input_channels=input_channels//2, \r\n domain=domain,\r\n cnn_filters=cnn_filters, kernel_size_cnn_blocks=kernel_size_cnn_blocks, pool_size=pool_size, pool_time=pool_time,\r\n D=D, dilation_mode=dilation_mode,G=G, U=U, kernel_size_dilated_conv=kernel_size_dilated_conv,spatial_dropout_rate=spatial_dropout_rate,\r\n V=V, V_kernel_size=V_kernel_size,\r\n dropout_perc=dropout_perc,use_bias_conv=use_bias_conv,batch_norm=batch_norm,verbose=False)\r\n self.branch_B=ConvTC_Block(time_dim=time_dim, freq_dim=freq_dim, input_channels=input_channels//2, \r\n domain=domain,\r\n cnn_filters=cnn_filters, kernel_size_cnn_blocks=kernel_size_cnn_blocks, pool_size=pool_size, pool_time=pool_time,\r\n D=D, dilation_mode=dilation_mode,G=G, U=U, kernel_size_dilated_conv=kernel_size_dilated_conv,spatial_dropout_rate=spatial_dropout_rate,\r\n V=V, V_kernel_size=V_kernel_size,\r\n dropout_perc=dropout_perc,use_bias_conv=use_bias_conv,batch_norm=batch_norm,verbose=False)\r\n fc_input_size=V[-1]*2\r\n else:\r\n self.seld_block=ConvTC_Block(time_dim=time_dim, freq_dim=freq_dim, input_channels=input_channels, \r\n domain=domain, \r\n cnn_filters=cnn_filters, kernel_size_cnn_blocks=kernel_size_cnn_blocks, pool_size=pool_size, pool_time=pool_time,\r\n D=D, dilation_mode=dilation_mode,G=G, U=U, kernel_size_dilated_conv=kernel_size_dilated_conv,spatial_dropout_rate=spatial_dropout_rate,\r\n V=V, V_kernel_size=V_kernel_size,\r\n dropout_perc=dropout_perc,use_bias_conv=use_bias_conv,batch_norm=batch_norm,verbose=False)\r\n fc_input_size=V[-1]\r\n fc_sed_list = []\r\n fc_doa_list = []\r\n\r\n for fc_layer in fc_layers:\r\n \r\n if self.domain_classifier=='Q':\r\n fc_sed_list.append(QuaternionLinear(fc_input_size, fc_layer, bias=use_bias_linear))\r\n fc_doa_list.append(QuaternionLinear(fc_input_size, fc_layer, bias=use_bias_linear))\r\n elif self.domain_classifier=='DQ':\r\n fc_sed_list.append(DualQuaternionLinear(fc_input_size, fc_layer, bias=use_bias_linear))\r\n fc_doa_list.append(DualQuaternionLinear(fc_input_size, fc_layer, bias=use_bias_linear))\r\n else:\r\n fc_sed_list.append(nn.Linear(fc_input_size, fc_layer,bias=use_bias_linear))\r\n fc_doa_list.append(nn.Linear(fc_input_size, fc_layer,bias=use_bias_linear))\r\n \r\n if fc_activations in {'relu','ReLU','RELU'}:\r\n fc_sed_list.append(nn.ReLU())\r\n fc_doa_list.append(nn.ReLU())\r\n if fc_dropout in {'all','ALL','True'}:\r\n fc_sed_list.append(nn.Dropout(dropout_perc))\r\n fc_doa_list.append(nn.Dropout(dropout_perc))\r\n fc_input_size=fc_layer\r\n if fc_dropout in {'last','Last','LAST'}:\r\n fc_sed_list.append(nn.Dropout(dropout_perc))\r\n fc_doa_list.append(nn.Dropout(dropout_perc))\r\n \r\n self.sed = nn.Sequential(*fc_sed_list,\r\n nn.Linear(fc_layers[-1], sed_output_size, bias=use_bias_linear),\r\n nn.Sigmoid())\r\n\r\n self.doa = nn.Sequential(*fc_doa_list,\r\n nn.Linear(fc_layers[-1], doa_output_size, bias=use_bias_linear),\r\n nn.Tanh())\r\n\r\n def forward(self, x):\r\n if self.parallel_ConvTC_block in {'2Parallel','2BParallel','2ParallelBranches','2PB'}:\r\n if self.parallel_magphase:\r\n x_A=torch.cat((x[:,:4,:,:],x[:,8:12,:,:]),1)####X_A MicA mag-phase\r\n x_B=torch.cat((x[:,4:8,:,:],x[:,12:,:,:]),1)####X_B MicB mag-phase\r\n else:\r\n x_A=x[:,:self.input_channels//2,:,:]\r\n x_B=x[:,self.input_channels//2:,:,:]\r\n branch_A=self.branch_A(x_A)\r\n branch_B=self.branch_B(x_B)\r\n x=torch.cat((branch_A,branch_B), 2)\r\n else: \r\n x = self.seld_block(x)\r\n sed = self.sed(x)\r\n doa = self.doa(x)\r\n if self.verbose:\r\n print ('sed prediction: ', sed.shape) #target dim: [batch, time, sed_output_size]\r\n print ('doa prediction: ', doa.shape) #target dim: [batch, time, doa_output_size]\r\n\r\n return sed, doa\r\n\r\n def calculate_receptive_field(self,verbose=0):\r\n receptive_field=1\r\n tcn_block=self.D\r\n kernel_size=self.kernel_size_dilated_conv\r\n total_n_resblocks=0\r\n for i, n_resblock in enumerate(tcn_block):\r\n dilation=1\r\n prec_1=1\r\n prec_2=0\r\n if type(n_resblock)==list:\r\n res_count=0\r\n for d in n_resblock:\r\n res_count+=1\r\n total_n_resblocks+=1\r\n dilation=d\r\n receptive_field+=(kernel_size-1)*(dilation)\r\n if verbose==2:\r\n print('stack ',i+1,' resblock ',res_count,': ',receptive_field)\r\n else:\r\n for d in range(n_resblock):\r\n total_n_resblocks+=1\r\n if self.dilation_mode=='fibonacci':\r\n if d==0:\r\n dilation=1\r\n else:\r\n dilation=prec_1+prec_2\r\n prec_2=prec_1\r\n prec_1=dilation\r\n else:\r\n dilation=2**d\r\n receptive_field+=(kernel_size-1)*(dilation)\r\n if verbose==2:\r\n print('stack ',i+1,' resblock ',d+1,': ',receptive_field)\r\n if verbose==1 or verbose==2:\r\n print(tcn_block,' Receptive field:',receptive_field,', Total number of Resblocks:',total_n_resblocks)\r\n return receptive_field, total_n_resblocks\r\n \r\n\r\n\r\n\r\ndef test_model():\r\n '''\r\n Test model's i/o shapes with the default prepocessing parameters\r\n '''\r\n #create dummy input spectrogram\r\n in_chans = 8\r\n sample = np.ones((in_chans,32000*60))\r\n nperseg = 512\r\n noverlap = 112\r\n sp = uf.spectrum_fast(sample, nperseg=nperseg, noverlap=noverlap, output_phase=False)\r\n sp = torch.tensor(sp.reshape(1,sp.shape[0],sp.shape[1],sp.shape[2])).float()\r\n #create model\r\n #the dimension of the input spectrogram and the pooling/processing dimension of the model\r\n #create 1 prediction (sed and doa) for each 100-milliseconds label frame\r\n\r\n model= SELD_Model(time_dim=sp.shape[-1], freq_dim=256, input_channels=4, output_classes=14,\r\n domain='DQ',domain_classifier='same',\r\n cnn_filters=[64,64,64], kernel_size_cnn_blocks=3, pool_size=[[8,2],[8,2],[2,2]], pool_time='TCN',\r\n D=[10], dilation_mode='fibonacci',G=128, U=128, kernel_size_dilated_conv=3,spatial_dropout_rate=0.5,V=[128,128], V_kernel_size=3,\r\n fc_layers=[128], fc_activations='Linear', fc_dropout='all', dropout_perc=0.3, \r\n class_overlaps=3.,\r\n use_bias_conv=False,use_bias_linear=True,batch_norm='BN',parallel_ConvTC_block='False',parallel_magphase=False,\r\n extra_name='', verbose=False)\r\n print ('\\nTesting model '+ model.model_name)\r\n print ('Input shape: ', sp.shape)\r\n sp=sp.to('cuda:0')\r\n model=model.to('cuda:0')\r\n sed, doa = model(sp)\r\n print ('SED shape: ', sed.shape, \"| DOA shape: \", doa.shape) #target shape sed=[batch,600(label frames),42] doa=[batch, 600(label frames),126\r\n summary(model, input_size=(1,in_chans,256,4800))\r\n print ('\\nTesting model:', model.model_name)\r\n #torch.onnx.export(model, sp, model.model_name+\".onnx\")\r\n \r\nif __name__ == '__main__':\r\n test_model()\r\n", "id": "10814037", "language": "Python", "matching_score": 1.3539434671401978, "max_stars_count": 0, "path": "models/SELD_Model.py" }, { "content": "import numpy as np\r\nimport csv\r\nimport sys\r\nimport pandas as pd\r\nimport torch\r\nimport jiwer\r\nimport librosa\r\nfrom pystoi import stoi\r\nimport transformers\r\nfrom transformers import Wav2Vec2ForMaskedLM, Wav2Vec2Tokenizer\r\nimport sys, os\r\nimport warnings\r\n\r\n'''\r\nFunctions to compute the metrics for the 2 tasks of the L3DAS21 challenge.\r\n- task1_metric returns the metric for task 1.\r\n- location_sensitive_detection returns the metric for task 1.\r\nBoth functions require numpy matrices as input and can compute only 1 batch at time.\r\nPlease, have a look at the \"evaluation_baseline_taskX.py\" scripts for detailed examples\r\non the use of these functions.\r\n'''\r\n\r\n\r\n#TASK 1 METRICS\r\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\r\ntransformers.logging.set_verbosity_error()\r\n#wer_tokenizer = Wav2Vec2Tokenizer.from_pretrained(\"facebook/wav2vec2-base-960h\");\r\n#wer_model = Wav2Vec2ForMaskedLM.from_pretrained(\"facebook/wav2vec2-base-960h\");\r\n\r\ndef wer(clean_speech, denoised_speech):\r\n \"\"\"\r\n computes the word error rate(WER) score for 1 single data point\r\n \"\"\"\r\n def _transcription(clean_speech, denoised_speech):\r\n\r\n # transcribe clean audio\r\n input_values = wer_tokenizer(clean_speech, return_tensors=\"pt\").input_values;\r\n logits = wer_model(input_values).logits;\r\n predicted_ids = torch.argmax(logits, dim=-1);\r\n transcript_clean = wer_tokenizer.batch_decode(predicted_ids)[0];\r\n\r\n # transcribe\r\n input_values = wer_tokenizer(denoised_speech, return_tensors=\"pt\").input_values;\r\n logits = wer_model(input_values).logits;\r\n predicted_ids = torch.argmax(logits, dim=-1);\r\n transcript_estimate = wer_tokenizer.batch_decode(predicted_ids)[0];\r\n\r\n return [transcript_clean, transcript_estimate]\r\n\r\n transcript = _transcription(clean_speech, denoised_speech);\r\n try: #if no words are predicted\r\n wer_val = jiwer.wer(transcript[0], transcript[1])\r\n except ValueError:\r\n wer_val = None\r\n\r\n return wer_val\r\n\r\ndef task1_metric(clean_speech, denoised_speech, sr=16000):\r\n '''\r\n Compute evaluation metric for task 1 as (stoi+(1-word error rate)/2)\r\n This function computes such measure for 1 single datapoint\r\n '''\r\n WER = wer(clean_speech, denoised_speech)\r\n if WER is not None: #if there is no speech in the segment\r\n STOI = stoi(clean_speech, denoised_speech, sr, extended=False)\r\n WER = np.clip(WER, 0., 1.)\r\n STOI = np.clip(STOI, 0., 1.)\r\n metric = (STOI + (1. - WER)) / 2.\r\n else:\r\n metric = None\r\n STOI = None\r\n return metric, WER, STOI\r\n\r\ndef compute_se_metrics(predicted_folder, truth_folder, fs=16000):\r\n '''\r\n Load all submitted sounds for task 1 and compute the average metric\r\n '''\r\n METRIC = []\r\n WER = []\r\n STOI = []\r\n predicted_list = [s for s in os.listdir(predicted_folder) if '.wav' in s]\r\n truth_list = [s for s in os.listdir(truth_folder) if '.wav' in s]\r\n n_sounds = len(predicted_list)\r\n for i in range(n_sounds):\r\n name = str(i) + '.wav'\r\n predicted_temp_path = os.path.join(predicted_folder, name)\r\n truth_temp_path = os.path.join(truth_folder, name)\r\n predicted = librosa.load(predicted_temp_path, sr=fs)\r\n truth = librosa.load(truth_temp_path, sr=fs)\r\n metric, wer, stoi = task1_metric(truth, predicted)\r\n METRIC.append(metric)\r\n WER.append(wer)\r\n STOI.append(stoi)\r\n\r\n average_metric = np.mean(METRIC)\r\n average_wer = np.mean(WER)\r\n average_stoi = np.mean(STOI)\r\n\r\n print ('*******************************')\r\n print ('Task 1 metric: ', average_metric)\r\n print ('Word error rate: ', average_wer)\r\n print ('Stoi: ', average_stoi)\r\n\r\n return average_metric\r\n\r\n\r\n#TASK 2 METRICS\r\nsound_classes_dict_task2 = {'Chink_and_clink':0,\r\n 'Computer_keyboard':1,\r\n 'Cupboard_open_or_close':2,\r\n 'Drawer_open_or_close':3,\r\n 'Female_speech_and_woman_speaking':4,\r\n 'Finger_snapping':5,\r\n 'Keys_jangling':6,\r\n 'Knock':7,\r\n 'Laughter':8,\r\n 'Male_speech_and_man_speaking':9,\r\n 'Printer':10,\r\n 'Scissors':11,\r\n 'Telephone':12,\r\n 'Writing':13}\r\n\r\ndef location_sensitive_detection(pred, true, n_frames=100, spatial_threshold=2.,\r\n from_csv=False, verbose=False):\r\n '''\r\n Compute TP, FP, FN of a single data point using\r\n location sensitive detection\r\n '''\r\n TP = 0 #true positives\r\n FP = 0 #false positives\r\n FN = 0 #false negatives\r\n #read csv files into numpy matrices if required\r\n if from_csv:\r\n pred = pd.read_csv(pred, sep=',',header=None)\r\n true = pd.read_csv(true, sep=',',header=None)\r\n pred = pred.values\r\n true = true.values\r\n #build empty dict with a key for each time frame\r\n frames = {}\r\n for i in range(n_frames):\r\n frames[i] = {'p':[], 't':[]}\r\n #fill each time frame key with predicted and true entries for that frame\r\n for i in pred:\r\n frames[i[0]]['p'].append(i)\r\n for i in true:\r\n frames[i[0]]['t'].append(i)\r\n #iterate each time frame:\r\n for frame in range(n_frames):\r\n t = frames[frame]['t'] #all true events for frame i\r\n p = frames[frame]['p'] #all predicted events for frame i\r\n matched = 0 #counts the matching events\r\n\r\n if len(t) == 0: #if there are PREDICTED but not TRUE events\r\n FP += len(p) #all predicted are false positive\r\n elif len(p) == 0: #if there are TRUE but not PREDICTED events\r\n FN += len(t) #all predicted are false negative\r\n\r\n else:\r\n for i_t in range(len(t)): #iterate all true events\r\n match = False #flag for matching events\r\n #count if in each true event there is or not a matching predicted event\r\n true_class = t[i_t][1] #true class\r\n true_coord = t[i_t][-3:] #true coordinates\r\n for i_p in range(len(p)): #compare each true event with all predicted events\r\n pred_class = p[i_p][1] #predicted class\r\n pred_coord = p[i_p][-3:] #predicted coordinates\r\n spat_error = np.linalg.norm(true_coord-pred_coord) #cartesian distance between spatial coords\r\n if true_class == pred_class and spat_error < spatial_threshold: #if predicton is correct (same label + not exceeding spatial error threshold)\r\n match = True\r\n if match:\r\n matched += 1 #for each true event, match only once comparing all predicted events\r\n\r\n num_true_items = len(t)\r\n num_pred_items = len(p)\r\n fn = num_true_items - matched\r\n fp = num_pred_items - matched\r\n\r\n #add to counts\r\n TP += matched #number of matches are directly true positives\r\n FN += fn\r\n FP += fp\r\n\r\n precision = TP / (TP + FP + sys.float_info.epsilon)\r\n recall = TP / (TP + FN + sys.float_info.epsilon)\r\n F_score = 2 * ((precision * recall) / (precision + recall + sys.float_info.epsilon))\r\n\r\n results = {'precision': precision,\r\n 'recall': recall,\r\n 'F score': F_score\r\n }\r\n\r\n\r\n if verbose:\r\n print ('true positives: ', TP)\r\n print ('false positives: ', FP)\r\n print ('false negatives: ', FN)\r\n print ('---------------------')\r\n\r\n\r\n print ('*******************************')\r\n print ('F score: ', F_score)\r\n print ('Precision: ', precision)\r\n print ('Recall: ', recall)\r\n print ('TP: ' , TP)\r\n print ('FP: ' , FP)\r\n print ('FN: ' , FN)\r\n\r\n return TP, FP, FN, F_score\r\n\r\n\r\ndef sed_score_computation(pred, true, n_frames=100, spatial_threshold=2., \r\n from_csv=False, verbose=False):\r\n '''\r\n Compute TP, FP, FN of a single data point using\r\n location sensitive detection\r\n '''\r\n TP = 0 #true positives\r\n FP = 0 #false positives\r\n FN = 0 #false negatives\r\n #read csv files into numpy matrices if required\r\n if from_csv:\r\n pred = pd.read_csv(pred, sep=',',header=None)\r\n true = pd.read_csv(true, sep=',',header=None)\r\n pred = pred.values\r\n true = true.values\r\n #build empty dict with a key for each time frame\r\n frames = {}\r\n for i in range(n_frames):\r\n frames[i] = {'p':[], 't':[]}\r\n #fill each time frame key with predicted and true entries for that frame\r\n for i in pred:\r\n frames[i[0]]['p'].append(i)\r\n for i in true:\r\n frames[i[0]]['t'].append(i)\r\n #iterate each time frame:\r\n for frame in range(n_frames):\r\n t = frames[frame]['t'] #all true events for frame i\r\n p = frames[frame]['p'] #all predicted events for frame i\r\n matched = 0 #counts the matching events\r\n\r\n if len(t) == 0: #if there are PREDICTED but not TRUE events\r\n FP += len(p) #all predicted are false positive\r\n elif len(p) == 0: #if there are TRUE but not PREDICTED events\r\n FN += len(t) #all predicted are false negative\r\n\r\n else:\r\n for i_t in range(len(t)): #iterate all true events\r\n match = False #flag for matching events\r\n #count if in each true event there is or not a matching predicted event\r\n true_class = t[i_t][1] #true class\r\n# true_coord = t[i_t][-3:] #true coordinates\r\n for i_p in range(len(p)): #compare each true event with all predicted events\r\n pred_class = p[i_p][1] #predicted class\r\n# pred_coord = p[i_p][-3:] #predicted coordinates\r\n# spat_error = np.linalg.norm(true_coord-pred_coord) #cartesian distance between spatial coords\r\n if true_class == pred_class: #and spat_error < spatial_threshold: #if predicton is correct (same label + not exceeding spatial error threshold)\r\n match = True\r\n if match:\r\n matched += 1 #for each true event, match only once comparing all predicted events\r\n\r\n num_true_items = len(t)\r\n num_pred_items = len(p)\r\n fn = num_true_items - matched\r\n fp = num_pred_items - matched\r\n\r\n #add to counts\r\n TP += matched #number of matches are directly true positives\r\n FN += fn\r\n FP += fp\r\n\r\n precision = TP / (TP + FP + sys.float_info.epsilon)\r\n recall = TP / (TP + FN + sys.float_info.epsilon)\r\n F_score = 2 * ((precision * recall) / (precision + recall + sys.float_info.epsilon))\r\n Nref=TP+FN\r\n Nsys=TP+FP\r\n ER_score = (max(Nref, Nsys) - TP) / (Nref + 0.0)################ from evaluation_metrics.py SELD\r\n sed_score=np.mean([1-F_score,ER_score])\r\n\r\n results = {'precision': precision,\r\n 'recall': recall,\r\n 'F score': F_score\r\n }\r\n\r\n\r\n if verbose:\r\n print ('SED score: ' , sed_score)\r\n\r\n return TP, FP, FN, sed_score\r\ndef compute_seld_metrics(predicted_folder, truth_folder, n_frames=100, spatial_threshold=0.3):\r\n '''\r\n compute F1 score from results folder of submitted results based on the\r\n location sensitive detection metric\r\n '''\r\n TP = 0\r\n FP = 0\r\n FN = 0\r\n predicted_list = [s for s in os.listdir(predicted_folder) if '.csv' in s]\r\n truth_list = [s for s in os.listdir(truth_folder) if '.csv' in s]\r\n n_files = len(predicted_list)\r\n #iterrate each submitted file\r\n for i in range(n_files):\r\n name = predicted_list[i]\r\n predicted_temp_path = os.path.join(predicted_folder, name)\r\n truth_temp_path = os.path.join(truth_folder, name)\r\n #compute tp,fp,fn for each file\r\n tp, fp, fn = location_sensitive_detection(predicted_temp_path,\r\n truth_temp_path,\r\n n_frames,\r\n spatial_threshold)\r\n TP += tp\r\n FP += fp\r\n FN += fn\r\n\r\n #compute total F score\r\n precision = TP / (TP + FP + sys.float_info.epsilon)\r\n recall = TP / (TP + FN + sys.float_info.epsilon)\r\n\r\n print ('*******************************')\r\n F_score = (2 * precision * recall) / (precision + recall + sys.float_info.epsilon)\r\n print ('F score: ', F_score)\r\n print ('Precision: ', precision)\r\n print ('Recall: ', recall)\r\n\r\n return F_score\r\n\r\n\r\n#gen_dummy_seld_results('./prova')\r\n#compute_seld_metric('./prova/pred', './prova/truth')\r\n", "id": "10529562", "language": "Python", "matching_score": 0.30385395884513855, "max_stars_count": 0, "path": "metrics.py" }, { "content": "import math\r\nimport pdb\r\nimport sys\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom numpy.random import RandomState\r\nfrom scipy.stats import chi\r\nfrom torch.autograd import Variable\r\nfrom torch.nn import Module, init\r\nfrom torch.nn.parameter import Parameter\r\n\r\n\r\n\r\nclass DualQuaternionConv(Module):\r\n r\"\"\"Applies a Quaternion Convolution to the incoming data.\r\n \"\"\"\r\n\r\n def __init__(self, in_channels, out_channels, kernel_size, stride,\r\n dilatation=1, padding=0, groups=1, bias=True, init_criterion='glorot',\r\n weight_init='quaternion', seed=None, operation='convolution2d', rotation=False, quaternion_format=True, scale=False):\r\n\r\n super(DualQuaternionConv, self).__init__()\r\n\r\n self.in_channels = in_channels // 8\r\n self.out_channels = out_channels // 8\r\n self.stride = stride\r\n self.padding = padding\r\n self.groups = groups\r\n self.dilatation = dilatation\r\n self.init_criterion = init_criterion\r\n self.weight_init = weight_init\r\n self.seed = seed if seed is not None else np.random.randint(0,1234)\r\n self.rng = RandomState(self.seed)\r\n self.operation = operation\r\n self.rotation = rotation\r\n self.quaternion_format = quaternion_format\r\n self.winit = {'quaternion': quaternion_init,\r\n 'unitary' : unitary_init,\r\n 'random' : random_init}[self.weight_init]\r\n self.scale = scale\r\n\r\n\r\n (self.kernel_size, self.w_shape) = get_kernel_and_weight_shape( self.operation,\r\n self.in_channels, self.out_channels, kernel_size )\r\n\r\n # quaternion 1\r\n self.r_weight = Parameter(torch.Tensor(*self.w_shape))\r\n self.i_weight = Parameter(torch.Tensor(*self.w_shape))\r\n self.j_weight = Parameter(torch.Tensor(*self.w_shape))\r\n self.k_weight = Parameter(torch.Tensor(*self.w_shape))\r\n # quaternion 2\r\n self.r_weight_2 = Parameter(torch.Tensor(*self.w_shape))\r\n self.i_weight_2 = Parameter(torch.Tensor(*self.w_shape))\r\n self.j_weight_2 = Parameter(torch.Tensor(*self.w_shape))\r\n self.k_weight_2 = Parameter(torch.Tensor(*self.w_shape))\r\n\r\n if self.scale:\r\n self.scale_param = Parameter(torch.Tensor(self.r_weight.shape))\r\n else:\r\n self.scale_param = None\r\n\r\n if self.rotation:\r\n self.zero_kernel = Parameter(torch.zeros(self.r_weight.shape), requires_grad=False)\r\n if bias:\r\n self.bias = Parameter(torch.Tensor(out_channels))\r\n else:\r\n self.register_parameter('bias', None)\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n affect_init_conv(self.r_weight, self.i_weight, self.j_weight, self.k_weight,\r\n self.kernel_size, self.winit, self.rng, self.init_criterion, \\\r\n self.r_weight_2, self.i_weight_2, self.j_weight_2, self.k_weight_2)\r\n if self.scale_param is not None:\r\n torch.nn.init.xavier_uniform_(self.scale_param.data)\r\n if self.bias is not None:\r\n self.bias.data.zero_()\r\n\r\n def forward(self, input):\r\n\r\n return dual_quaternion_conv(input, self.r_weight, self.i_weight, self.j_weight, self.k_weight, \\\r\n self.r_weight_2, self.i_weight_2, self.j_weight_2, self.k_weight_2, \\\r\n self.bias, self.stride, self.padding, self.groups, self.dilatation)\r\n\r\n\r\n def __repr__(self):\r\n return self.__class__.__name__ + '(' \\\r\n + 'in_channels=' + str(self.in_channels) \\\r\n + ', out_channels=' + str(self.out_channels) \\\r\n + ', bias=' + str(self.bias is not None) \\\r\n + ', kernel_size=' + str(self.kernel_size) \\\r\n + ', stride=' + str(self.stride) \\\r\n + ', padding=' + str(self.padding) \\\r\n + ', init_criterion=' + str(self.init_criterion) \\\r\n + ', weight_init=' + str(self.weight_init) \\\r\n + ', seed=' + str(self.seed) \\\r\n + ', rotation=' + str(self.rotation) \\\r\n + ', q_format=' + str(self.quaternion_format) \\\r\n + ', operation=' + str(self.operation) + ')'\r\n\r\n\r\n\r\nclass DualQuaternionLinear(Module):\r\n r\"\"\"Applies a quaternion linear transformation to the incoming data.\r\n \"\"\"\r\n\r\n def __init__(self, in_features, out_features, bias=True,\r\n init_criterion='he', weight_init='quaternion',\r\n seed=None):\r\n\r\n super(DualQuaternionLinear, self).__init__()\r\n self.in_features = in_features // 8\r\n self.out_features = out_features // 8\r\n # quaternion 1\r\n self.r_weight = Parameter(torch.Tensor(self.in_features, self.out_features))\r\n self.i_weight = Parameter(torch.Tensor(self.in_features, self.out_features))\r\n self.j_weight = Parameter(torch.Tensor(self.in_features, self.out_features))\r\n self.k_weight = Parameter(torch.Tensor(self.in_features, self.out_features))\r\n # quaternion 2\r\n self.r_weight_2 = Parameter(torch.Tensor(self.in_features, self.out_features))\r\n self.i_weight_2 = Parameter(torch.Tensor(self.in_features, self.out_features))\r\n self.j_weight_2 = Parameter(torch.Tensor(self.in_features, self.out_features))\r\n self.k_weight_2 = Parameter(torch.Tensor(self.in_features, self.out_features))\r\n\r\n if bias:\r\n self.bias = Parameter(torch.Tensor(self.out_features*8))\r\n else:\r\n self.register_parameter('bias', None)\r\n\r\n self.init_criterion = init_criterion\r\n self.weight_init = weight_init\r\n self.seed = seed if seed is not None else np.random.randint(0,1234)\r\n self.rng = RandomState(self.seed)\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n winit = {'quaternion': quaternion_init,\r\n 'unitary': unitary_init}[self.weight_init]\r\n if self.bias is not None:\r\n self.bias.data.fill_(0)\r\n affect_init(self.r_weight, self.i_weight, self.j_weight, self.k_weight, \\\r\n self.r_weight_2, self.i_weight_2, self.j_weight_2, self.k_weight_2, \\\r\n winit, self.rng, self.init_criterion)\r\n\r\n def forward(self, input):\r\n # See the autograd section for explanation of what happens here.\r\n if input.dim() == 3:\r\n T, N, C = input.size()\r\n input = input.view(T * N, C)\r\n output = dual_quaternion_linear(input=input, r_weight=self.r_weight, i_weight=self.i_weight, j_weight=self.j_weight, k_weight=self.k_weight, \\\r\n r_weight_2=self.r_weight_2, i_weight_2=self.i_weight_2, j_weight_2=self.j_weight_2, k_weight_2=self.k_weight_2, bias=self.bias)\r\n # output = QuaternionLinearFunction.apply(input, self.r_weight, self.i_weight, self.j_weight, self.k_weight, self.bias)\r\n output = output.view(T, N, output.size(1))\r\n elif input.dim() == 2:\r\n output = dual_quaternion_linear(input=input, r_weight=self.r_weight, i_weight=self.i_weight, j_weight=self.j_weight, k_weight=self.k_weight, \\\r\n r_weight_2=self.r_weight_2, i_weight_2=self.i_weight_2, j_weight_2=self.j_weight_2, k_weight_2=self.k_weight_2, bias=self.bias)\r\n # output = QuaternionLinearFunction.apply(input, self.r_weight, self.i_weight, self.j_weight, self.k_weight, self.bias)\r\n else:\r\n raise NotImplementedError\r\n\r\n return output\r\n\r\n def __repr__(self):\r\n return self.__class__.__name__ + '(' \\\r\n + 'in_features=' + str(self.in_features) \\\r\n + ', out_features=' + str(self.out_features) \\\r\n + ', bias=' + str(self.bias is not None) \\\r\n + ', init_criterion=' + str(self.init_criterion) \\\r\n + ', weight_init=' + str(self.weight_init) \\\r\n + ', seed=' + str(self.seed) + ')'\r\n\r\n\r\n#####################################\r\n########## DUAL QUAT OPS ############\r\n#####################################\r\n\r\ndef dual_quaternion_conv(input, r_weight, i_weight, j_weight, k_weight, \\\r\n r_weight_2, i_weight_2, j_weight_2, k_weight_2, bias, stride,\r\n padding, groups, dilatation):\r\n \"\"\"\r\n Applies a dual quaternion convolution to the incoming data:\r\n | q 0 |\r\n | |\r\n | q_e q |\r\n \"\"\"\r\n # quaternion 1\r\n cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=1)\r\n cat_kernels_4_i = torch.cat([i_weight, r_weight, -k_weight, j_weight], dim=1)\r\n cat_kernels_4_j = torch.cat([j_weight, k_weight, r_weight, -i_weight], dim=1)\r\n cat_kernels_4_k = torch.cat([k_weight, -j_weight, i_weight, r_weight], dim=1)\r\n # quaternion 2\r\n cat_kernels_4_r_2 = torch.cat([r_weight_2, -i_weight_2, -j_weight_2, -k_weight_2], dim=1)\r\n cat_kernels_4_i_2 = torch.cat([i_weight_2, r_weight_2, -k_weight_2, j_weight_2], dim=1)\r\n cat_kernels_4_j_2 = torch.cat([j_weight_2, k_weight_2, r_weight_2, -i_weight_2], dim=1)\r\n cat_kernels_4_k_2 = torch.cat([k_weight_2, -j_weight_2, i_weight_2, r_weight_2], dim=1)\r\n\r\n\r\n # Build the block elements of the weight matrix\r\n cat_kernels_4_quaternion_diagonal_element = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)\r\n cat_kernels_4_quaternion_diagonal_element_2 = torch.cat([cat_kernels_4_r_2, cat_kernels_4_i_2, cat_kernels_4_j_2, cat_kernels_4_k_2], dim=0)\r\n zero_kernels_top_right = torch.zeros_like(cat_kernels_4_quaternion_diagonal_element, requires_grad=False)\r\n row_1 = torch.cat([cat_kernels_4_quaternion_diagonal_element, zero_kernels_top_right], dim=1)\r\n row_2 = torch.cat([cat_kernels_4_quaternion_diagonal_element_2, cat_kernels_4_quaternion_diagonal_element], dim=1)\r\n\r\n weight_matrix = torch.cat([row_1, row_2], dim=0)\r\n\r\n if input.dim() == 3:\r\n convfunc = F.conv1d\r\n elif input.dim() == 4:\r\n convfunc = F.conv2d\r\n elif input.dim() == 5:\r\n convfunc = F.conv3d\r\n else:\r\n raise Exception(\"The convolutional input is either 3, 4 or 5 dimensions.\"\r\n \" input.dim = \" + str(input.dim()))\r\n\r\n return convfunc(input, weight_matrix, bias, stride, padding, dilatation, groups)\r\n\r\n\r\ndef dual_quaternion_linear(input, r_weight, i_weight, j_weight, k_weight, \\\r\n r_weight_2, i_weight_2, j_weight_2, k_weight_2, bias=True):\r\n \"\"\"\r\n Applies a quaternion linear transformation to the incoming data:\r\n\r\n It is important to notice that the forward phase of a QNN is defined\r\n as W * Inputs (with * equal to the Hamilton product). The constructed\r\n cat_kernels_4_quaternion is a modified version of the quaternion representation\r\n so when we do torch.mm(Input,W) it's equivalent to W * Inputs.\r\n\r\n \"\"\"\r\n\r\n # quaternion 1\r\n cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=1)\r\n cat_kernels_4_i = torch.cat([i_weight, r_weight, -k_weight, j_weight], dim=1)\r\n cat_kernels_4_j = torch.cat([j_weight, k_weight, r_weight, -i_weight], dim=1)\r\n cat_kernels_4_k = torch.cat([k_weight, -j_weight, i_weight, r_weight], dim=1)\r\n # quaternion 2\r\n cat_kernels_4_r_2 = torch.cat([r_weight_2, -i_weight_2, -j_weight_2, -k_weight_2], dim=1)\r\n cat_kernels_4_i_2 = torch.cat([i_weight_2, r_weight_2, -k_weight_2, j_weight_2], dim=1)\r\n cat_kernels_4_j_2 = torch.cat([j_weight_2, k_weight_2, r_weight_2, -i_weight_2], dim=1)\r\n cat_kernels_4_k_2 = torch.cat([k_weight_2, -j_weight_2, i_weight_2, r_weight_2], dim=1)\r\n\r\n\r\n # Build the block elements of the weight matrix\r\n cat_kernels_4_quaternion_diagonal_element = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)\r\n cat_kernels_4_quaternion_diagonal_element_2 = torch.cat([cat_kernels_4_r_2, cat_kernels_4_i_2, cat_kernels_4_j_2, cat_kernels_4_k_2], dim=0)\r\n zero_kernels_top_right = torch.zeros_like(cat_kernels_4_quaternion_diagonal_element, requires_grad=False)\r\n row_1 = torch.cat([cat_kernels_4_quaternion_diagonal_element, zero_kernels_top_right], dim=1)\r\n row_2 = torch.cat([cat_kernels_4_quaternion_diagonal_element_2, cat_kernels_4_quaternion_diagonal_element], dim=1)\r\n\r\n weight_matrix = torch.cat([row_1, row_2], dim=0)\r\n\r\n if input.dim() == 2 :\r\n\r\n if bias is not None:\r\n return torch.addmm(bias, input, weight_matrix)\r\n else:\r\n return torch.mm(input, weight_matrix)\r\n else:\r\n output = torch.matmul(input, weight_matrix)\r\n if bias is not None:\r\n return output+bias\r\n else:\r\n return output\r\n\r\n\r\ndef q_normalize(input, channel=1):\r\n \r\n r = get_r(input)\r\n i = get_i(input)\r\n j = get_j(input)\r\n k = get_k(input)\r\n\r\n norm = torch.sqrt(r*r + i*i + j*j + k*k + 0.0001)\r\n r = r / norm\r\n i = i / norm\r\n j = j / norm\r\n k = k / norm\r\n\r\n return torch.cat([r,i,j,k], dim=channel)\r\n\r\n\r\ndef check_input(input):\r\n\r\n if input.dim() not in {2, 3, 4, 5}:\r\n raise RuntimeError(\r\n \"Quaternion linear accepts only input of dimension 2 or 3. Quaternion conv accepts up to 5 dim \"\r\n \" input.dim = \" + str(input.dim())\r\n )\r\n\r\n if input.dim() < 4:\r\n nb_hidden = input.size()[-1]\r\n else:\r\n nb_hidden = input.size()[1]\r\n\r\n if nb_hidden % 4 != 0:\r\n raise RuntimeError(\r\n \"Quaternion Tensors must be divisible by 4.\"\r\n \" input.size()[1] = \" + str(nb_hidden)\r\n )\r\n#\r\n# Getters\r\n#\r\ndef get_r(input):\r\n check_input(input)\r\n if input.dim() < 4:\r\n nb_hidden = input.size()[-1]\r\n else:\r\n nb_hidden = input.size()[1]\r\n\r\n if input.dim() == 2:\r\n return input.narrow(1, 0, nb_hidden // 4)\r\n if input.dim() == 3:\r\n return input.narrow(2, 0, nb_hidden // 4)\r\n if input.dim() >= 4:\r\n return input.narrow(1, 0, nb_hidden // 4)\r\n\r\n\r\ndef get_i(input):\r\n if input.dim() < 4:\r\n nb_hidden = input.size()[-1]\r\n else:\r\n nb_hidden = input.size()[1]\r\n if input.dim() == 2:\r\n return input.narrow(1, nb_hidden // 4, nb_hidden // 4)\r\n if input.dim() == 3:\r\n return input.narrow(2, nb_hidden // 4, nb_hidden // 4)\r\n if input.dim() >= 4:\r\n return input.narrow(1, nb_hidden // 4, nb_hidden // 4)\r\n\r\ndef get_j(input):\r\n check_input(input)\r\n if input.dim() < 4:\r\n nb_hidden = input.size()[-1]\r\n else:\r\n nb_hidden = input.size()[1]\r\n if input.dim() == 2:\r\n return input.narrow(1, nb_hidden // 2, nb_hidden // 4)\r\n if input.dim() == 3:\r\n return input.narrow(2, nb_hidden // 2, nb_hidden // 4)\r\n if input.dim() >= 4:\r\n return input.narrow(1, nb_hidden // 2, nb_hidden // 4)\r\n\r\ndef get_k(input):\r\n check_input(input)\r\n if input.dim() < 4:\r\n nb_hidden = input.size()[-1]\r\n else:\r\n nb_hidden = input.size()[1]\r\n if input.dim() == 2:\r\n return input.narrow(1, nb_hidden - nb_hidden // 4, nb_hidden // 4)\r\n if input.dim() == 3:\r\n return input.narrow(2, nb_hidden - nb_hidden // 4, nb_hidden // 4)\r\n if input.dim() >= 4:\r\n return input.narrow(1, nb_hidden - nb_hidden // 4, nb_hidden // 4)\r\n\r\n\r\ndef get_modulus(input, vector_form=False):\r\n check_input(input)\r\n r = get_r(input)\r\n i = get_i(input)\r\n j = get_j(input)\r\n k = get_k(input)\r\n if vector_form:\r\n return torch.sqrt(r * r + i * i + j * j + k * k)\r\n else:\r\n return torch.sqrt((r * r + i * i + j * j + k * k).sum(dim=0))\r\n\r\n\r\ndef get_normalized(input, eps=0.0001):\r\n check_input(input)\r\n data_modulus = get_modulus(input)\r\n if input.dim() == 2:\r\n data_modulus_repeated = data_modulus.repeat(1, 4)\r\n elif input.dim() == 3:\r\n data_modulus_repeated = data_modulus.repeat(1, 1, 4)\r\n return input / (data_modulus_repeated.expand_as(input) + eps)\r\n\r\n\r\ndef quaternion_exp(input):\r\n\r\n r = get_r(input)\r\n i = get_i(input)\r\n j = get_j(input)\r\n k = get_k(input)\r\n\r\n\r\n norm_v = torch.sqrt(i*i+j*j+k*k) + 0.0001\r\n exp = torch.exp(r)\r\n\r\n r = torch.cos(norm_v)\r\n i = (i / norm_v) * torch.sin(norm_v)\r\n j = (j / norm_v) * torch.sin(norm_v)\r\n k = (k / norm_v) * torch.sin(norm_v)\r\n\r\n\r\n return torch.cat([exp*r, exp*i, exp*j, exp*k], dim=1)\r\n\r\n\r\n\r\n# Custom AUTOGRAD for lower VRAM consumption\r\nclass DualQuaternionLinearFunction(torch.autograd.Function):\r\n\r\n @staticmethod\r\n def forward(ctx, input, r_weight, i_weight, j_weight, k_weight, r_weight_2, i_weight_2, j_weight_2, k_weight_2, bias=None):\r\n ctx.save_for_backward(input, r_weight, i_weight, j_weight, k_weight, r_weight_2, i_weight_2, j_weight_2, k_weight_2, bias)\r\n check_input(input)\r\n # quaternion 1\r\n cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=1)\r\n cat_kernels_4_i = torch.cat([i_weight, r_weight, -k_weight, j_weight], dim=1)\r\n cat_kernels_4_j = torch.cat([j_weight, k_weight, r_weight, -i_weight], dim=1)\r\n cat_kernels_4_k = torch.cat([k_weight, -j_weight, i_weight, r_weight], dim=1)\r\n # quaternion 2\r\n cat_kernels_4_r_2 = torch.cat([r_weight_2, -i_weight_2, -j_weight_2, -k_weight_2], dim=1)\r\n cat_kernels_4_i_2 = torch.cat([i_weight_2, r_weight_2, -k_weight_2, j_weight_2], dim=1)\r\n cat_kernels_4_j_2 = torch.cat([j_weight_2, k_weight_2, r_weight_2, -i_weight_2], dim=1)\r\n cat_kernels_4_k_2 = torch.cat([k_weight_2, -j_weight_2, i_weight_2, r_weight_2], dim=1)\r\n\r\n\r\n # Build the block elements of the weight matrix\r\n cat_kernels_4_quaternion_diagonal_element = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)\r\n cat_kernels_4_quaternion_diagonal_element_2 = torch.cat([cat_kernels_4_r_2, cat_kernels_4_i_2, cat_kernels_4_j_2, cat_kernels_4_k_2], dim=0)\r\n zero_kernels_top_right = torch.zeros_like(cat_kernels_4_quaternion_diagonal_element)\r\n row_1 = torch.cat([cat_kernels_4_quaternion_diagonal_element, zero_kernels_top_right], dim=1)\r\n row_2 = torch.cat([cat_kernels_4_quaternion_diagonal_element_2, cat_kernels_4_quaternion_diagonal_element], dim=1)\r\n\r\n weight_matrix = torch.cat([row_1, row_2], dim=0)\r\n\r\n if input.dim() == 2 :\r\n if bias is not None:\r\n return torch.addmm(bias, input, weight_matrix)\r\n else:\r\n return torch.mm(input, weight_matrix)\r\n else:\r\n output = torch.matmul(input, weight_matrix)\r\n if bias is not None:\r\n return output+bias\r\n else:\r\n return output\r\n\r\n # This function has only a single output, so it gets only one gradient\r\n @staticmethod\r\n def backward(ctx, grad_output, grad_output_2):\r\n\r\n input, r_weight, i_weight, j_weight, k_weight, r_weight_2, i_weight_2, j_weight_2, k_weight_2, bias = ctx.saved_tensors\r\n grad_input = grad_weight_r = grad_weight_i = grad_weight_j = grad_weight_k = grad_bias = None\r\n grad_weight_r_2 = grad_weight_i_2 = grad_weight_j_2 = grad_weight_k_2 = None\r\n\r\n # quaternion 1\r\n input_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=0)\r\n input_i = torch.cat([i_weight, r_weight, -k_weight, j_weight], dim=0)\r\n input_j = torch.cat([j_weight, k_weight, r_weight, -i_weight], dim=0)\r\n input_k = torch.cat([k_weight, -j_weight, i_weight, r_weight], dim=0)\r\n\r\n # quaternion 2\r\n input_r_2 = torch.cat([r_weight_2, -i_weight_2, -j_weight_2, -k_weight_2], dim=1)\r\n input_i_2 = torch.cat([i_weight_2, r_weight_2, -k_weight_2, j_weight_2], dim=1)\r\n input_j_2 = torch.cat([j_weight_2, k_weight_2, r_weight_2, -i_weight_2], dim=1)\r\n input_k_2 = torch.cat([k_weight_2, -j_weight_2, i_weight_2, r_weight_2], dim=1)\r\n\r\n cat_kernels_4_quaternion_diagonal_element_b = torch.cat([input_r, input_i, input_j, input_k], dim=1)\r\n cat_kernels_4_quaternion_diagonal_element_2_b = torch.cat([input_r_2, input_i_2, input_j_2, input_k_2], dim=1)\r\n\r\n zero_kernels_top_right_b = torch.zeros_like(cat_kernels_4_quaternion_diagonal_element_b)\r\n row_1_b = torch.cat([cat_kernels_4_quaternion_diagonal_element_b, zero_kernels_top_right_b], dim=1)\r\n row_2_b = torch.cat([cat_kernels_4_quaternion_diagonal_element_2_b, cat_kernels_4_quaternion_diagonal_element_b], dim=1)\r\n\r\n weight_matrix_b = Variable(torch.cat([row_1_b, row_2_b], dim=0).permute(1,0), requires_grad=False)\r\n\r\n\r\n r = get_r(input)\r\n i = get_i(input)\r\n j = get_j(input)\r\n k = get_k(input)\r\n r_2 = get_r(input)\r\n i_2 = get_i(input)\r\n j_2 = get_j(input)\r\n k_2 = get_k(input)\r\n\r\n input_r = torch.cat([r, -i, -j, -k], dim=0)\r\n input_i = torch.cat([i, r, -k, j], dim=0)\r\n input_j = torch.cat([j, k, r, -i], dim=0)\r\n input_k = torch.cat([k, -j, i, r], dim=0)\r\n input_mat = Variable(torch.cat([input_r, input_i, input_j, input_k], dim=1), requires_grad=False)\r\n input_r_2 = torch.cat([r_2, -i_2, -j_2, -k_2], dim=0)\r\n input_i_2 = torch.cat([i_2, r_2, -k_2, j_2], dim=0)\r\n input_j_2 = torch.cat([j_2, k_2, r_2, -i_2], dim=0)\r\n input_k_2 = torch.cat([k_2, -j_2, i_2, r_2], dim=0)\r\n input_mat_2 = Variable(torch.cat([input_r_2, input_i_2, input_j_2, input_k_2], dim=1), requires_grad=False)\r\n\r\n\r\n r = get_r(grad_output)\r\n i = get_i(grad_output)\r\n j = get_j(grad_output)\r\n k = get_k(grad_output)\r\n r_2 = get_r(grad_output_2)\r\n i_2 = get_i(grad_output_2)\r\n j_2 = get_j(grad_output_2)\r\n k_2 = get_k(grad_output_2)\r\n\r\n input_r = torch.cat([r, i, j, k], dim=1)\r\n input_i = torch.cat([-i, r, k, -j], dim=1)\r\n input_j = torch.cat([-j, -k, r, i], dim=1)\r\n input_k = torch.cat([-k, j, -i, r], dim=1)\r\n grad_mat = torch.cat([input_r, input_i, input_j, input_k], dim=0)\r\n input_r_2 = torch.cat([r_2, i_2, j_2, k_2], dim=1)\r\n input_i_2 = torch.cat([-i_2, r_2, k_2, -j_2], dim=1)\r\n input_j_2 = torch.cat([-j_2, -k_2, r_2, i_2], dim=1)\r\n input_k_2 = torch.cat([-k_2, j_2, -i_2, r_2], dim=1)\r\n grad_mat_2 = torch.cat([input_r_2, input_i_2, input_j_2, input_k_2], dim=0)\r\n\r\n if ctx.needs_input_grad[0]:\r\n grad_input = grad_output.mm(weight_matrix_b)\r\n if ctx.needs_input_grad[1]:\r\n grad_weight = grad_mat.permute(1,0).mm(input_mat).permute(1,0)\r\n unit_size_x = r_weight.size(0)\r\n unit_size_y = r_weight.size(1)\r\n grad_weight_r = grad_weight.narrow(0,0,unit_size_x).narrow(1,0,unit_size_y)\r\n grad_weight_i = grad_weight.narrow(0,0,unit_size_x).narrow(1,unit_size_y,unit_size_y)\r\n grad_weight_j = grad_weight.narrow(0,0,unit_size_x).narrow(1,unit_size_y*2,unit_size_y)\r\n grad_weight_k = grad_weight.narrow(0,0,unit_size_x).narrow(1,unit_size_y*3,unit_size_y)\r\n if ctx.needs_input_grad[5]:\r\n grad_bias = grad_output.sum(0).squeeze(0)\r\n\r\n return grad_input, grad_weight_r, grad_weight_i, grad_weight_j, grad_weight_k, grad_bias\r\n\r\n\r\ndef hamilton_product(q0, q1):\r\n \"\"\"\r\n Applies a Hamilton product q0 * q1:\r\n Shape:\r\n - q0, q1 should be (batch_size, quaternion_number)\r\n (rr' - xx' - yy' - zz') +\r\n (rx' + xr' + yz' - zy')i +\r\n (ry' - xz' + yr' + zx')j +\r\n (rz' + xy' - yx' + zr')k +\r\n \"\"\"\r\n\r\n q1_r = get_r(q1)\r\n q1_i = get_i(q1)\r\n q1_j = get_j(q1)\r\n q1_k = get_k(q1)\r\n\r\n # rr', xx', yy', and zz'\r\n r_base = torch.mul(q0, q1)\r\n # (rr' - xx' - yy' - zz')\r\n r = get_r(r_base) - get_i(r_base) - get_j(r_base) - get_k(r_base)\r\n\r\n # rx', xr', yz', and zy'\r\n i_base = torch.mul(q0, torch.cat([q1_i, q1_r, q1_k, q1_j], dim=1))\r\n # (rx' + xr' + yz' - zy')\r\n i = get_r(i_base) + get_i(i_base) + get_j(i_base) - get_k(i_base)\r\n\r\n # ry', xz', yr', and zx'\r\n j_base = torch.mul(q0, torch.cat([q1_j, q1_k, q1_r, q1_i], dim=1))\r\n # (rx' + xr' + yz' - zy')\r\n j = get_r(j_base) - get_i(j_base) + get_j(j_base) + get_k(j_base)\r\n\r\n # rz', xy', yx', and zr'\r\n k_base = torch.mul(q0, torch.cat([q1_k, q1_j, q1_i, q1_r], dim=1))\r\n # (rx' + xr' + yz' - zy')\r\n k = get_r(k_base) + get_i(k_base) - get_j(k_base) + get_k(k_base)\r\n\r\n return torch.cat([r, i, j, k], dim=1)\r\n\r\n#\r\n# PARAMETERS INITIALIZATION\r\n#\r\n\r\ndef unitary_init(in_features, out_features, rng, kernel_size=None, criterion='he'):\r\n\r\n if kernel_size is not None:\r\n receptive_field = np.prod(kernel_size)\r\n fan_in = in_features * receptive_field\r\n fan_out = out_features * receptive_field\r\n else:\r\n fan_in = in_features\r\n fan_out = out_features\r\n\r\n\r\n if kernel_size is None:\r\n kernel_shape = (in_features, out_features)\r\n else:\r\n if type(kernel_size) is int:\r\n kernel_shape = (out_features, in_features) + tuple((kernel_size,))\r\n else:\r\n kernel_shape = (out_features, in_features) + (*kernel_size,)\r\n\r\n number_of_weights = np.prod(kernel_shape)\r\n v_r = np.random.uniform(-1.0,1.0,number_of_weights)\r\n v_i = np.random.uniform(-1.0,1.0,number_of_weights)\r\n v_j = np.random.uniform(-1.0,1.0,number_of_weights)\r\n v_k = np.random.uniform(-1.0,1.0,number_of_weights)\r\n\r\n # Unitary quaternion\r\n for i in range(0, number_of_weights):\r\n norm = np.sqrt(v_r[i]**2 + v_i[i]**2 + v_j[i]**2 + v_k[i]**2)+0.0001\r\n v_r[i]/= norm\r\n v_i[i]/= norm\r\n v_j[i]/= norm\r\n v_k[i]/= norm\r\n v_r = v_r.reshape(kernel_shape)\r\n v_i = v_i.reshape(kernel_shape)\r\n v_j = v_j.reshape(kernel_shape)\r\n v_k = v_k.reshape(kernel_shape)\r\n\r\n return (v_r, v_i, v_j, v_k)\r\n\r\ndef random_init(in_features, out_features, rng, kernel_size=None, criterion='glorot'):\r\n\r\n if kernel_size is not None:\r\n receptive_field = np.prod(kernel_size)\r\n fan_in = in_features * receptive_field\r\n fan_out = out_features * receptive_field\r\n else:\r\n fan_in = in_features\r\n fan_out = out_features\r\n\r\n if criterion == 'glorot':\r\n s = 1. / np.sqrt(2*(fan_in + fan_out))\r\n elif criterion == 'he':\r\n s = 1. / np.sqrt(2*fan_in)\r\n else:\r\n raise ValueError('Invalid criterion: ' + criterion)\r\n\r\n if kernel_size is None:\r\n kernel_shape = (in_features, out_features)\r\n else:\r\n if type(kernel_size) is int:\r\n kernel_shape = (out_features, in_features) + tuple((kernel_size,))\r\n else:\r\n kernel_shape = (out_features, in_features) + (*kernel_size,)\r\n\r\n number_of_weights = np.prod(kernel_shape)\r\n v_r = np.random.uniform(-1.0,1.0,number_of_weights)\r\n v_i = np.random.uniform(-1.0,1.0,number_of_weights)\r\n v_j = np.random.uniform(-1.0,1.0,number_of_weights)\r\n v_k = np.random.uniform(-1.0,1.0,number_of_weights)\r\n\r\n\r\n\r\n v_r = v_r.reshape(kernel_shape)\r\n v_i = v_i.reshape(kernel_shape)\r\n v_j = v_j.reshape(kernel_shape)\r\n v_k = v_k.reshape(kernel_shape)\r\n\r\n weight_r = v_r\r\n weight_i = v_i\r\n weight_j = v_j\r\n weight_k = v_k\r\n return (weight_r, weight_i, weight_j, weight_k)\r\n\r\n\r\ndef quaternion_init(in_features, out_features, rng, kernel_size=None, criterion='glorot'):\r\n\r\n if kernel_size is not None:\r\n receptive_field = np.prod(kernel_size)\r\n fan_in = in_features * receptive_field\r\n fan_out = out_features * receptive_field\r\n else:\r\n fan_in = in_features\r\n fan_out = out_features\r\n\r\n if criterion == 'glorot':\r\n s = 1. / np.sqrt(2*(fan_in + fan_out))\r\n elif criterion == 'he':\r\n s = 1. / np.sqrt(2*fan_in)\r\n else:\r\n raise ValueError('Invalid criterion: ' + criterion)\r\n\r\n rng = RandomState(np.random.randint(1,1234))\r\n\r\n # Generating randoms and purely imaginary quaternions :\r\n if kernel_size is None:\r\n kernel_shape = (in_features, out_features)\r\n else:\r\n if type(kernel_size) is int:\r\n kernel_shape = (out_features, in_features) + tuple((kernel_size,))\r\n else:\r\n kernel_shape = (out_features, in_features) + (*kernel_size,)\r\n\r\n modulus = chi.rvs(4,loc=0,scale=s,size=kernel_shape)\r\n number_of_weights = np.prod(kernel_shape)\r\n v_i = np.random.uniform(-1.0,1.0,number_of_weights)\r\n v_j = np.random.uniform(-1.0,1.0,number_of_weights)\r\n v_k = np.random.uniform(-1.0,1.0,number_of_weights)\r\n\r\n # Purely imaginary quaternions unitary\r\n for i in range(0, number_of_weights):\r\n norm = np.sqrt(v_i[i]**2 + v_j[i]**2 + v_k[i]**2 +0.0001)\r\n v_i[i]/= norm\r\n v_j[i]/= norm\r\n v_k[i]/= norm\r\n v_i = v_i.reshape(kernel_shape)\r\n v_j = v_j.reshape(kernel_shape)\r\n v_k = v_k.reshape(kernel_shape)\r\n\r\n phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)\r\n\r\n weight_r = modulus * np.cos(phase)\r\n weight_i = modulus * v_i*np.sin(phase)\r\n weight_j = modulus * v_j*np.sin(phase)\r\n weight_k = modulus * v_k*np.sin(phase)\r\n\r\n return (weight_r, weight_i, weight_j, weight_k)\r\n\r\ndef create_dropout_mask(dropout_p, size, rng, as_type, operation='linear'):\r\n if operation == 'linear':\r\n mask = rng.binomial(n=1, p=1-dropout_p, size=size)\r\n return Variable(torch.from_numpy(mask).type(as_type))\r\n else:\r\n raise Exception(\"create_dropout_mask accepts only 'linear'. Found operation = \"\r\n + str(operation))\r\n\r\ndef affect_init(r_weight, i_weight, j_weight, k_weight, \\\r\n r_weight_2, i_weight_2, j_weight_2, k_weight_2, \\\r\n init_func, rng, init_criterion):\r\n if r_weight.size() != i_weight.size() or r_weight.size() != j_weight.size() or \\\r\n r_weight.size() != k_weight.size() :\r\n raise ValueError('The real and imaginary weights '\r\n 'should have the same size . Found: r:'\r\n + str(r_weight.size()) +' i:'\r\n + str(i_weight.size()) +' j:'\r\n + str(j_weight.size()) +' k:'\r\n + str(k_weight.size()))\r\n\r\n elif r_weight.dim() != 2:\r\n raise Exception('affect_init accepts only matrices. Found dimension = '\r\n + str(r_weight.dim()))\r\n kernel_size = None\r\n r, i, j, k = init_func(r_weight.size(0), r_weight.size(1), rng, kernel_size, init_criterion)\r\n r, i, j, k = torch.from_numpy(r), torch.from_numpy(i), torch.from_numpy(j), torch.from_numpy(k)\r\n r_weight.data = r.type_as(r_weight.data)\r\n i_weight.data = i.type_as(i_weight.data)\r\n j_weight.data = j.type_as(j_weight.data)\r\n k_weight.data = k.type_as(k_weight.data)\r\n\r\n r_2, i_2, j_2, k_2 = init_func(r_weight_2.size(0), r_weight_2.size(1), rng, kernel_size, init_criterion)\r\n r_2, i_2, j_2, k_2 = torch.from_numpy(r_2), torch.from_numpy(i_2), torch.from_numpy(j_2), torch.from_numpy(k_2)\r\n r_weight_2.data = r_2.type_as(r_weight_2.data)\r\n i_weight_2.data = i_2.type_as(i_weight_2.data)\r\n j_weight_2.data = j_2.type_as(j_weight_2.data)\r\n k_weight_2.data = k_2.type_as(k_weight_2.data)\r\n\r\n\r\n\r\ndef affect_init_conv(r_weight, i_weight, j_weight, k_weight, kernel_size, init_func, rng,\r\n init_criterion, r_weight_2=None, i_weight_2=None, j_weight_2=None, k_weight_2=None):\r\n if r_weight.size() != i_weight.size() or r_weight.size() != j_weight.size() or \\\r\n r_weight.size() != k_weight.size() :\r\n raise ValueError('The real and imaginary weights '\r\n 'should have the same size . Found: r:'\r\n + str(r_weight.size()) +' i:'\r\n + str(i_weight.size()) +' j:'\r\n + str(j_weight.size()) +' k:'\r\n + str(k_weight.size()))\r\n\r\n elif 2 >= r_weight.dim():\r\n raise Exception('affect_conv_init accepts only tensors that have more than 2 dimensions. Found dimension = '\r\n + str(real_weight.dim()))\r\n\r\n r, i, j, k = init_func(\r\n r_weight.size(1),\r\n r_weight.size(0),\r\n rng=rng,\r\n kernel_size=kernel_size,\r\n criterion=init_criterion\r\n )\r\n r, i, j, k = torch.from_numpy(r), torch.from_numpy(i), torch.from_numpy(j), torch.from_numpy(k)\r\n r_weight.data = r.type_as(r_weight.data)\r\n i_weight.data = i.type_as(i_weight.data)\r\n j_weight.data = j.type_as(j_weight.data)\r\n k_weight.data = k.type_as(k_weight.data)\r\n\r\n if r_weight_2 != None:\r\n\r\n r_2, i_2, j_2, k_2 = init_func(\r\n r_weight_2.size(1),\r\n r_weight_2.size(0),\r\n rng=rng,\r\n kernel_size=kernel_size,\r\n criterion=init_criterion\r\n )\r\n r_2, i_2, j_2, k_2 = torch.from_numpy(r_2), torch.from_numpy(i_2), torch.from_numpy(j_2), torch.from_numpy(k_2)\r\n r_weight_2.data = r_2.type_as(r_weight_2.data)\r\n i_weight_2.data = i_2.type_as(i_weight_2.data)\r\n j_weight_2.data = j_2.type_as(j_weight_2.data)\r\n k_weight_2.data = k_2.type_as(k_weight_2.data)\r\n\r\n\r\n\r\ndef get_kernel_and_weight_shape(operation, in_channels, out_channels, kernel_size):\r\n if operation == 'convolution1d':\r\n if type(kernel_size) is not int:\r\n raise ValueError(\r\n \"\"\"An invalid kernel_size was supplied for a 1d convolution. The kernel size\r\n must be integer in the case. Found kernel_size = \"\"\" + str(kernel_size)\r\n )\r\n else:\r\n ks = kernel_size\r\n w_shape = (out_channels, in_channels) + tuple((ks,))\r\n # w_shape = (out_channels, in_channels) + (ks,)\r\n else:# in case it is 2d or 3d.\r\n if operation == 'convolution2d' and type(kernel_size) is int:\r\n ks = (kernel_size, kernel_size)\r\n elif operation == 'convolution3d' and type(kernel_size) is int:\r\n ks = (kernel_size, kernel_size, kernel_size)\r\n elif type(kernel_size) is not int:\r\n if operation == 'convolution2d' and len(kernel_size) != 2:\r\n raise ValueError(\r\n \"\"\"An invalid kernel_size was supplied for a 2d convolution. The kernel size\r\n must be either an integer or a tuple of 2. Found kernel_size = \"\"\" + str(kernel_size)\r\n )\r\n elif operation == 'convolution3d' and len(kernel_size) != 3:\r\n raise ValueError(\r\n \"\"\"An invalid kernel_size was supplied for a 3d convolution. The kernel size\r\n must be either an integer or a tuple of 3. Found kernel_size = \"\"\" + str(kernel_size)\r\n )\r\n else:\r\n ks = kernel_size\r\n w_shape = (out_channels, in_channels) + (*ks,)\r\n return ks, w_shape\r\n\r\ndef get_kernel_and_weight_shape_dual(operation, in_channels, out_channels, kernel_size):\r\n if operation == 'convolution1d':\r\n if type(kernel_size) is not int:\r\n raise ValueError(\r\n \"\"\"An invalid kernel_size was supplied for a 1d convolution. The kernel size\r\n must be integer in the case. Found kernel_size = \"\"\" + str(kernel_size)\r\n )\r\n else:\r\n ks = kernel_size\r\n # w_shape = (out_channels, in_channels) + tuple((ks,))\r\n w_shape = (out_channels, in_channels) + (ks,)\r\n else:# in case it is 2d or 3d.\r\n if operation == 'convolution2d' and type(kernel_size) is int:\r\n ks = (kernel_size, kernel_size)\r\n elif operation == 'convolution3d' and type(kernel_size) is int:\r\n ks = (kernel_size, kernel_size, kernel_size)\r\n elif type(kernel_size) is not int:\r\n if operation == 'convolution2d' and len(kernel_size) != 2:\r\n raise ValueError(\r\n \"\"\"An invalid kernel_size was supplied for a 2d convolution. The kernel size\r\n must be either an integer or a tuple of 2. Found kernel_size = \"\"\" + str(kernel_size)\r\n )\r\n elif operation == 'convolution3d' and len(kernel_size) != 3:\r\n raise ValueError(\r\n \"\"\"An invalid kernel_size was supplied for a 3d convolution. The kernel size\r\n must be either an integer or a tuple of 3. Found kernel_size = \"\"\" + str(kernel_size)\r\n )\r\n else:\r\n ks = kernel_size\r\n w_shape = (out_channels, in_channels) + (*ks,)\r\n return ks, w_shape", "id": "7004639", "language": "Python", "matching_score": 5.265717029571533, "max_stars_count": 0, "path": "models/dual_quaternion_layers/dual_quat_layers.py" }, { "content": "##########################################################\n# pytorch-qnn v1.0 \n# <NAME>\n# LIA, Université d'Avignon et des Pays du Vaucluse\n# ORKIS, Aix-en-provence\n# October 2018\n##########################################################\n\nimport torch\nimport torch.nn.functional as F\n\nimport numpy as np\nfrom numpy.random import RandomState\n\n\ndef check_input(input):\n if input.dim() not in {2, 3}:\n raise RuntimeError(\n \"quaternion linear accepts only input of dimension 2 or 3.\"\n \" input.dim = \" + str(input.dim())\n )\n\n nb_hidden = input.size()[-1]\n\n if nb_hidden % 4 != 0:\n raise RuntimeError(\n \"Quaternion Tensors must be divisible by 4.\"\n \" input.size()[1] = \" + str(nb_hidden)\n )\n\n\n# Getters #\n\ndef get_r(input):\n check_input(input)\n nb_hidden = input.size()[-1]\n if input.dim() == 2:\n return input.narrow(1, 0, nb_hidden // 4)\n elif input.dim() == 3:\n return input.narrow(2, 0, nb_hidden // 4)\n\n\ndef get_i(input):\n check_input(input)\n nb_hidden = input.size()[-1]\n if input.dim() == 2:\n return input.narrow(1, nb_hidden // 4, nb_hidden // 4)\n if input.dim() == 3:\n return input.narrow(2, nb_hidden // 4, nb_hidden // 4)\n\n\ndef get_j(input):\n check_input(input)\n nb_hidden = input.size()[-1]\n if input.dim() == 2:\n return input.narrow(1, nb_hidden // 2, nb_hidden // 4)\n if input.dim() == 3:\n return input.narrow(2, nb_hidden // 2, nb_hidden // 4)\n\n\ndef get_k(input):\n check_input(input)\n nb_hidden = input.size()[-1]\n if input.dim() == 2:\n return input.narrow(1, nb_hidden - nb_hidden // 4, nb_hidden // 4)\n if input.dim() == 3:\n return input.narrow(2, nb_hidden - nb_hidden // 4, nb_hidden // 4)\n\n\ndef get_modulus(input, vector_form=False):\n check_input(input)\n r = get_r(input)\n i = get_i(input)\n j = get_j(input)\n k = get_k(input)\n if vector_form:\n return torch.sqrt(r * r + i * i + j * j + k * k)\n else:\n return torch.sqrt((r * r + i * i + j * j + k * k).sum(dim=0))\n\n\ndef get_normalized(input, eps=0.0001):\n check_input(input)\n data_modulus = get_modulus(input)\n if input.dim() == 2:\n data_modulus_repeated = data_modulus.repeat(1, 4)\n elif input.dim() == 3:\n data_modulus_repeated = data_modulus.repeat(1, 1, 4)\n return input / (data_modulus_repeated.expand_as(input) + eps)\n\n\ndef quaternion_conv(input, r_weight, i_weight, j_weight, k_weight, bias, stride,\n padding, groups, dilatation):\n \"\"\"\n Applies a quaternion convolution to the incoming data:\n \"\"\"\n\n cat_kernels_4_r = torch.cat((r_weight, -i_weight, -j_weight, -k_weight), dim=1)\n cat_kernels_4_i = torch.cat((i_weight, r_weight, -k_weight, j_weight), dim=1)\n cat_kernels_4_j = torch.cat((j_weight, k_weight, r_weight, -i_weight), dim=1)\n cat_kernels_4_k = torch.cat((k_weight, -j_weight, i_weight, r_weight), dim=1)\n cat_kernels_4_quaternion = torch.cat((cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k), dim=0)\n \n if input.dim() == 3:\n conv_func = F.conv1d\n elif input.dim() == 4:\n conv_func = F.conv2d\n elif input.dim() == 5:\n conv_func = F.conv3d\n else:\n raise Exception(\"The convolutional input is either 3, 4 or 5 dimensions.\"\n \" input.dim = \" + str(input.dim()))\n\n return conv_func(input, cat_kernels_4_quaternion, bias, stride, padding, dilatation, groups)\n\n\ndef quaternion_transpose_conv(input, r_weight, i_weight, j_weight, k_weight, bias, stride,\n padding, output_padding, groups, dilatation):\n \"\"\"\n Applies a quaternion transposed convolution to the incoming data:\n\n \"\"\"\n\n cat_kernels_4_r = torch.cat((r_weight, -i_weight, -j_weight, -k_weight), dim=1)\n cat_kernels_4_i = torch.cat((i_weight, r_weight, -k_weight, j_weight), dim=1)\n cat_kernels_4_j = torch.cat((j_weight, k_weight, r_weight, -i_weight), dim=1)\n cat_kernels_4_k = torch.cat((k_weight, -j_weight, i_weight, r_weight), dim=1)\n cat_kernels_4_quaternion = torch.cat((cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k), dim=0)\n\n if input.dim() == 3:\n conv_func = F.conv_transpose1d\n elif input.dim() == 4:\n conv_func = F.conv_transpose2d\n elif input.dim() == 5:\n conv_func = F.conv_transpose3d\n else:\n raise Exception(\"The convolutional input is either 3, 4 or 5 dimensions.\"\n \" input.dim = \" + str(input.dim()))\n\n return conv_func(input, cat_kernels_4_quaternion, bias, stride, padding, output_padding, groups, dilatation)\n\n\ndef quaternion_conv_rotation(input, r_weight, i_weight, j_weight, k_weight, bias, stride,\n padding, groups, dilatation, quaternion_format):\n \"\"\"\n Applies a quaternion rotation and convolution transformation to the incoming data:\n\n The rotation W*x*W^t can be replaced by R*x following:\n https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation\n\n Works for unitary and non unitary weights.\n\n The initial size of the input must be a multiple of 3 if quaternion_format = False and\n 4 if quaternion_format = True.\n \"\"\"\n\n square_r = (r_weight * r_weight)\n square_i = (i_weight * i_weight)\n square_j = (j_weight * j_weight)\n square_k = (k_weight * k_weight)\n\n norm = torch.sqrt(square_r + square_i + square_j + square_k)\n norm_factor = 2.0 * norm\n\n square_i = norm_factor * (i_weight * i_weight)\n square_j = norm_factor * (j_weight * j_weight)\n square_k = norm_factor * (k_weight * k_weight)\n\n ri = (norm_factor * r_weight * i_weight)\n rj = (norm_factor * r_weight * j_weight)\n rk = (norm_factor * r_weight * k_weight)\n\n ij = (norm_factor * i_weight * j_weight)\n ik = (norm_factor * i_weight * k_weight)\n\n jk = (norm_factor * j_weight * k_weight)\n\n if quaternion_format:\n zero_kernel = torch.zeros(r_weight.shape)\n rot_kernel_1 = torch.cat((zero_kernel, 1.0 - (square_j + square_k), ij - rk, ik + rj), dim=0)\n rot_kernel_2 = torch.cat((zero_kernel, ij + rk, 1.0 - (square_i + square_k), jk - ri), dim=0)\n rot_kernel_3 = torch.cat((zero_kernel, ik - rj, jk + ri, 1.0 - (square_i + square_j)), dim=0)\n\n zero_kernel2 = torch.zeros(rot_kernel_1.shape)\n global_rot_kernel = torch.cat((zero_kernel2, rot_kernel_1, rot_kernel_2, rot_kernel_3), dim=1)\n else:\n rot_kernel_1 = torch.cat((1.0 - (square_j + square_k), ij - rk, ik + rj), dim=0)\n rot_kernel_2 = torch.cat((ij + rk, 1.0 - (square_i + square_k), jk - ri), dim=0)\n rot_kernel_3 = torch.cat((ik - rj, jk + ri, 1.0 - (square_i + square_j)), dim=0)\n global_rot_kernel = torch.cat((rot_kernel_1, rot_kernel_2, rot_kernel_3), dim=1)\n\n if input.dim() == 3:\n conv_func = F.conv1d\n elif input.dim() == 4:\n conv_func = F.conv2d\n elif input.dim() == 5:\n conv_func = F.conv3d\n else:\n raise Exception(\"The convolutional input is either 3, 4 or 5 dimensions.\"\n \" input.dim = \" + str(input.dim()))\n\n return conv_func(input, global_rot_kernel, bias, stride, padding, dilatation, groups)\n\n\ndef quaternion_transpose_conv_rotation(input, r_weight, i_weight, j_weight, k_weight, bias, stride,\n padding, output_padding, groups, dilatation, quaternion_format):\n \"\"\"\n Applies a quaternion rotation and transposed convolution transformation to the incoming data:\n\n The rotation W*x*W^t can be replaced by R*x following:\n https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation\n\n Works for unitary and non unitary weights.\n\n The initial size of the input must be a multiple of 3 if quaternion_format = False and\n 4 if quaternion_format = True.\n\n \"\"\"\n\n square_r = (r_weight * r_weight)\n square_i = (i_weight * i_weight)\n square_j = (j_weight * j_weight)\n square_k = (k_weight * k_weight)\n\n norm = torch.sqrt(square_r + square_i + square_j + square_k)\n norm_factor = 2.0 * norm\n\n square_i = norm_factor * (i_weight * i_weight)\n square_j = norm_factor * (j_weight * j_weight)\n square_k = norm_factor * (k_weight * k_weight)\n\n ri = (norm_factor * r_weight * i_weight)\n rj = (norm_factor * r_weight * j_weight)\n rk = (norm_factor * r_weight * k_weight)\n\n ij = (norm_factor * i_weight * j_weight)\n ik = (norm_factor * i_weight * k_weight)\n\n jk = (norm_factor * j_weight * k_weight)\n\n if quaternion_format:\n zero_kernel = torch.zeros(r_weight.shape)\n rot_kernel_1 = torch.cat((zero_kernel, 1.0 - (square_j + square_k), ij - rk, ik + rj), dim=0)\n rot_kernel_2 = torch.cat((zero_kernel, ij + rk, 1.0 - (square_i + square_k), jk - ri), dim=0)\n rot_kernel_3 = torch.cat((zero_kernel, ik - rj, jk + ri, 1.0 - (square_i + square_j)), dim=0)\n\n zero_kernel2 = torch.zeros(rot_kernel_1.shape)\n global_rot_kernel = torch.cat((zero_kernel2, rot_kernel_1, rot_kernel_2, rot_kernel_3), dim=1)\n else:\n rot_kernel_1 = torch.cat((1.0 - (square_j + square_k), ij - rk, ik + rj), dim=0)\n rot_kernel_2 = torch.cat((ij + rk, 1.0 - (square_i + square_k), jk - ri), dim=0)\n rot_kernel_3 = torch.cat((ik - rj, jk + ri, 1.0 - (square_i + square_j)), dim=0)\n global_rot_kernel = torch.cat((rot_kernel_1, rot_kernel_2, rot_kernel_3), dim=1)\n\n if input.dim() == 3:\n conv_func = F.conv_transpose1d\n elif input.dim() == 4:\n conv_func = F.conv_transpose2d\n elif input.dim() == 5:\n conv_func = F.conv_transpose3d\n else:\n raise Exception(\"The convolutional input is either 3, 4 or 5 dimensions.\"\n \" input.dim = \" + str(input.dim()))\n\n return conv_func(input, global_rot_kernel, bias, stride, padding, output_padding, groups, dilatation)\n\n\ndef quaternion_linear(input, r_weight, i_weight, j_weight, k_weight, bias=True):\n \"\"\"\n Applies a quaternion linear transformation to the incoming data:\n\n It is important to notice that the forward phase of a QNN is defined\n as W * Inputs (with * equal to the Hamilton product). The constructed\n cat_kernels_4_quaternion is a modified version of the quaternion representation\n so when we do torch.mm(Input,W) it's equivalent to W * Inputs.\n\n \"\"\"\n\n cat_kernels_4_r = torch.cat((r_weight, -i_weight, -j_weight, -k_weight), dim=0)\n cat_kernels_4_i = torch.cat((i_weight, r_weight, -k_weight, j_weight), dim=0)\n cat_kernels_4_j = torch.cat((j_weight, k_weight, r_weight, -i_weight), dim=0)\n cat_kernels_4_k = torch.cat((k_weight, -j_weight, i_weight, r_weight), dim=0)\n cat_kernels_4_quaternion = torch.cat((cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k), dim=1)\n\n if input.dim() == 2:\n\n if bias is not None:\n return torch.addmm(bias, input, cat_kernels_4_quaternion)\n else:\n return torch.mm(input, cat_kernels_4_quaternion)\n else:\n output = torch.matmul(input, cat_kernels_4_quaternion)\n if bias is not None:\n return output + bias\n else:\n return output\n\n\ndef quaternion_linear_rotation(input, r_weight, i_weight, j_weight, k_weight, bias=None, quaternion_format=False):\n \"\"\"\n Applies a quaternion rotation transformation to the incoming data:\n\n The rotation W*x*W^t can be replaced by R*x following:\n https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation\n\n Works for unitary and non unitary weights.\n\n The initial size of the input must be a multiple of 3 if quaternion_format = False and\n 4 if quaternion_format = True.\n \"\"\"\n\n square_r = (r_weight * r_weight)\n square_i = (i_weight * i_weight)\n square_j = (j_weight * j_weight)\n square_k = (k_weight * k_weight)\n\n norm = torch.sqrt(square_r + square_i + square_j + square_k)\n norm_factor = 2.0 * norm\n\n square_i = norm_factor * (i_weight * i_weight)\n square_j = norm_factor * (j_weight * j_weight)\n square_k = norm_factor * (k_weight * k_weight)\n\n ri = (norm_factor * r_weight * i_weight)\n rj = (norm_factor * r_weight * j_weight)\n rk = (norm_factor * r_weight * k_weight)\n\n ij = (norm_factor * i_weight * j_weight)\n ik = (norm_factor * i_weight * k_weight)\n\n jk = (norm_factor * j_weight * k_weight)\n\n if quaternion_format:\n zero_kernel = torch.zeros(r_weight.shape)\n rot_kernel_1 = torch.cat((zero_kernel, 1.0 - (square_j + square_k), ij - rk, ik + rj), dim=0)\n rot_kernel_2 = torch.cat((zero_kernel, ij + rk, 1.0 - (square_i + square_k), jk - ri), dim=0)\n rot_kernel_3 = torch.cat((zero_kernel, ik - rj, jk + ri, 1.0 - (square_i + square_j)), dim=0)\n\n zero_kernel2 = torch.zeros(rot_kernel_1.shape)\n global_rot_kernel = torch.cat((zero_kernel2, rot_kernel_1, rot_kernel_2, rot_kernel_3), dim=1)\n else:\n rot_kernel_1 = torch.cat((1.0 - (square_j + square_k), ij - rk, ik + rj), dim=0)\n rot_kernel_2 = torch.cat((ij + rk, 1.0 - (square_i + square_k), jk - ri), dim=0)\n rot_kernel_3 = torch.cat((ik - rj, jk + ri, 1.0 - (square_i + square_j)), dim=0)\n global_rot_kernel = torch.cat((rot_kernel_1, rot_kernel_2, rot_kernel_3), dim=1)\n\n if input.dim() == 2:\n if bias is not None:\n return torch.addmm(bias, input, global_rot_kernel)\n else:\n return torch.mm(input, global_rot_kernel)\n else:\n output = torch.matmul(input, global_rot_kernel)\n if bias is not None:\n return output + bias\n else:\n return output\n\n\n# Custom AUTOGRAD for lower VRAM consumption\nclass QuaternionLinearFunction(torch.autograd.Function):\n\n @staticmethod\n def forward(ctx, input, r_weight, i_weight, j_weight, k_weight, bias=None):\n ctx.save_for_backward(input, r_weight, i_weight, j_weight, k_weight, bias)\n check_input(input)\n cat_kernels_4_r = torch.cat((r_weight, -i_weight, -j_weight, -k_weight), dim=0)\n cat_kernels_4_i = torch.cat((i_weight, r_weight, -k_weight, j_weight), dim=0)\n cat_kernels_4_j = torch.cat((j_weight, k_weight, r_weight, -i_weight), dim=0)\n cat_kernels_4_k = torch.cat((k_weight, -j_weight, i_weight, r_weight), dim=0)\n cat_kernels_4_quaternion = torch.cat((cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k),\n dim=1)\n if input.dim() == 2:\n if bias is not None:\n return torch.addmm(bias, input, cat_kernels_4_quaternion)\n else:\n return torch.mm(input, cat_kernels_4_quaternion)\n else:\n output = torch.matmul(input, cat_kernels_4_quaternion)\n if bias is not None:\n return output + bias\n else:\n return output\n\n # This function has only a single output, so it gets only one gradient\n @staticmethod\n def backward(ctx, grad_output):\n\n input, r_weight, i_weight, j_weight, k_weight, bias = ctx.saved_tensors\n grad_input = grad_weight_r = grad_weight_i = grad_weight_j = grad_weight_k = grad_bias = None\n\n input_r = torch.cat((r_weight, -i_weight, -j_weight, -k_weight), dim=0)\n input_i = torch.cat((i_weight, r_weight, -k_weight, j_weight), dim=0)\n input_j = torch.cat((j_weight, k_weight, r_weight, -i_weight), dim=0)\n input_k = torch.cat((k_weight, -j_weight, i_weight, r_weight), dim=0)\n cat_kernels_4_quaternion_T = torch.cat((input_r, input_i, input_j, input_k), dim=1).permute(1, 0)\n cat_kernels_4_quaternion_T.requires_grad_(False)\n\n r = get_r(input)\n i = get_i(input)\n j = get_j(input)\n k = get_k(input)\n input_r = torch.cat((r, -i, -j, -k), dim=0)\n input_i = torch.cat((i, r, -k, j), dim=0)\n input_j = torch.cat((j, k, r, -i), dim=0)\n input_k = torch.cat((k, -j, i, r), dim=0)\n input_mat = torch.cat((input_r, input_i, input_j, input_k), dim=1)\n input_mat.requires_grad_(False)\n\n r = get_r(grad_output)\n i = get_i(grad_output)\n j = get_j(grad_output)\n k = get_k(grad_output)\n input_r = torch.cat((r, i, j, k), dim=1)\n input_i = torch.cat((-i, r, k, -j), dim=1)\n input_j = torch.cat((-j, -k, r, i), dim=1)\n input_k = torch.cat((-k, j, -i, r), dim=1)\n grad_mat = torch.cat((input_r, input_i, input_j, input_k), dim=0)\n\n if ctx.needs_input_grad[0]:\n grad_input = grad_output.mm(cat_kernels_4_quaternion_T)\n if ctx.needs_input_grad[1]:\n grad_weight = grad_mat.permute(1, 0).mm(input_mat).permute(1, 0)\n unit_size_x = r_weight.size(0)\n unit_size_y = r_weight.size(1)\n grad_weight_r = grad_weight.narrow(0, 0, unit_size_x).narrow(1, 0, unit_size_y)\n grad_weight_i = grad_weight.narrow(0, 0, unit_size_x).narrow(1, unit_size_y, unit_size_y)\n grad_weight_j = grad_weight.narrow(0, 0, unit_size_x).narrow(1, unit_size_y * 2, unit_size_y)\n grad_weight_k = grad_weight.narrow(0, 0, unit_size_x).narrow(1, unit_size_y * 3, unit_size_y)\n if ctx.needs_input_grad[5]:\n grad_bias = grad_output.sum(0).squeeze(0)\n\n return grad_input, grad_weight_r, grad_weight_i, grad_weight_j, grad_weight_k, grad_bias\n\n\ndef hamilton_product(q0, q1):\n \"\"\"\n Applies a Hamilton product q0 * q1:\n Shape:\n - q0, q1 should be (batch_size, quaternion_number)\n (rr' - xx' - yy' - zz') +\n (rx' + xr' + yz' - zy')i +\n (ry' - xz' + yr' + zx')j +\n (rz' + xy' - yx' + zr')k +\n \"\"\"\n\n q1_r = get_r(q1)\n q1_i = get_i(q1)\n q1_j = get_j(q1)\n q1_k = get_k(q1)\n\n # rr', xx', yy', and zz'\n r_base = torch.mul(q0, q1)\n # (rr' - xx' - yy' - zz')\n r = get_r(r_base) - get_i(r_base) - get_j(r_base) - get_k(r_base)\n\n # rx', xr', yz', and zy'\n i_base = torch.mul(q0, torch.cat((q1_i, q1_r, q1_k, q1_j), dim=1))\n # (rx' + xr' + yz' - zy')\n i = get_r(i_base) + get_i(i_base) + get_j(i_base) - get_k(i_base)\n\n # ry', xz', yr', and zx'\n j_base = torch.mul(q0, torch.cat((q1_j, q1_k, q1_r, q1_i), dim=1))\n # (rx' + xr' + yz' - zy')\n j = get_r(j_base) - get_i(j_base) + get_j(j_base) + get_k(j_base)\n\n # rz', xy', yx', and zr'\n k_base = torch.mul(q0, torch.cat((q1_k, q1_j, q1_i, q1_r), dim=1))\n # (rx' + xr' + yz' - zy')\n k = get_r(k_base) + get_i(k_base) - get_j(k_base) + get_k(k_base)\n\n return torch.cat((r, i, j, k), dim=1)\n\n\n# PARAMETERS INITIALIZATION #\n\ndef unitary_init(in_features, out_features, rng, kernel_size=None, criterion='he'):\n if kernel_size is not None:\n receptive_field = np.prod(kernel_size)\n fan_in = in_features * receptive_field\n fan_out = out_features * receptive_field\n else:\n fan_in = in_features\n fan_out = out_features\n\n if criterion == 'glorot':\n s = 1. / np.sqrt(2 * (fan_in + fan_out))\n elif criterion == 'he':\n s = 1. / np.sqrt(2 * fan_in)\n else:\n raise ValueError('Invalid criterion: ' + criterion)\n\n if kernel_size is None:\n kernel_shape = (in_features, out_features)\n else:\n if type(kernel_size) is int:\n kernel_shape = (out_features, in_features) + tuple((kernel_size,))\n else:\n kernel_shape = (out_features, in_features) + (*kernel_size,)\n\n number_of_weights = np.prod(kernel_shape)\n v_r = np.random.normal(0.0, s, number_of_weights)\n v_i = np.random.normal(0.0, s, number_of_weights)\n v_j = np.random.normal(0.0, s, number_of_weights)\n v_k = np.random.normal(0.0, s, number_of_weights)\n\n # Unitary quaternion\n for i in range(0, number_of_weights):\n norm = np.sqrt(v_r[i] ** 2 + v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2) + 0.0001\n v_r[i] /= norm\n v_i[i] /= norm\n v_j[i] /= norm\n v_k[i] /= norm\n v_r = v_r.reshape(kernel_shape)\n v_i = v_i.reshape(kernel_shape)\n v_j = v_j.reshape(kernel_shape)\n v_k = v_k.reshape(kernel_shape)\n\n return v_r, v_i, v_j, v_k\n\n\ndef random_init(in_features, out_features, rng, kernel_size=None, criterion='glorot'):\n if kernel_size is not None:\n receptive_field = np.prod(kernel_size)\n fan_in = in_features * receptive_field\n fan_out = out_features * receptive_field\n else:\n fan_in = in_features\n fan_out = out_features\n\n if criterion == 'glorot':\n s = 1. / np.sqrt(2 * (fan_in + fan_out))\n elif criterion == 'he':\n s = 1. / np.sqrt(2 * fan_in)\n else:\n raise ValueError('Invalid criterion: ' + criterion)\n\n if kernel_size is None:\n kernel_shape = (in_features, out_features)\n else:\n if type(kernel_size) is int:\n kernel_shape = (out_features, in_features) + tuple((kernel_size,))\n else:\n kernel_shape = (out_features, in_features) + (*kernel_size,)\n\n number_of_weights = np.prod(kernel_shape)\n v_r = np.random.uniform(0.0, 1.0, number_of_weights)\n v_i = np.random.uniform(0.0, 1.0, number_of_weights)\n v_j = np.random.uniform(0.0, 1.0, number_of_weights)\n v_k = np.random.uniform(0.0, 1.0, number_of_weights)\n\n v_r = v_r.reshape(kernel_shape)\n v_i = v_i.reshape(kernel_shape)\n v_j = v_j.reshape(kernel_shape)\n v_k = v_k.reshape(kernel_shape)\n\n weight_r = v_r * s\n weight_i = v_i * s\n weight_j = v_j * s\n weight_k = v_k * s\n return weight_r, weight_i, weight_j, weight_k\n\n\ndef quaternion_init(in_features, out_features, rng, kernel_size=None, criterion='glorot'):\n if kernel_size is not None:\n receptive_field = np.prod(kernel_size)\n fan_in = in_features * receptive_field\n fan_out = out_features * receptive_field\n else:\n fan_in = in_features\n fan_out = out_features\n\n if criterion == 'glorot':\n s = 1. / np.sqrt(2 * (fan_in + fan_out))\n elif criterion == 'he':\n s = 1. / np.sqrt(2 * fan_in)\n else:\n raise ValueError('Invalid criterion: ' + criterion)\n rng = RandomState(123)\n\n # Generating randoms and purely imaginary quaternions :\n if kernel_size is None:\n kernel_shape = (in_features, out_features)\n else:\n if type(kernel_size) is int:\n kernel_shape = (out_features, in_features) + tuple((kernel_size,))\n else:\n kernel_shape = (out_features, in_features) + (*kernel_size,)\n\n number_of_weights = np.prod(kernel_shape)\n v_i = np.random.normal(0.0, s, number_of_weights)\n v_j = np.random.normal(0.0, s, number_of_weights)\n v_k = np.random.normal(0.0, s, number_of_weights)\n\n # Purely imaginary quaternions unitary\n for i in range(0, number_of_weights):\n norm = np.sqrt(v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2) + 0.0001\n v_i[i] /= norm\n v_j[i] /= norm\n v_k[i] /= norm\n v_i = v_i.reshape(kernel_shape)\n v_j = v_j.reshape(kernel_shape)\n v_k = v_k.reshape(kernel_shape)\n\n modulus = rng.uniform(low=-s, high=s, size=kernel_shape)\n phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)\n\n weight_r = modulus * np.cos(phase)\n weight_i = modulus * v_i * np.sin(phase)\n weight_j = modulus * v_j * np.sin(phase)\n weight_k = modulus * v_k * np.sin(phase)\n\n return weight_r, weight_i, weight_j, weight_k\n\n\ndef create_dropout_mask(dropout_p, size, rng, as_type, operation='linear'):\n if operation == 'linear':\n mask = rng.binomial(n=1, p=1 - dropout_p, size=size)\n return torch.from_numpy(mask).type(as_type)\n else:\n raise Exception(\"create_dropout_mask accepts only 'linear'. Found operation = \" + str(operation))\n\n\ndef affect_init(r_weight, i_weight, j_weight, k_weight, init_func, rng, init_criterion):\n if r_weight.size() != i_weight.size() or r_weight.size() != j_weight.size() or \\\n r_weight.size() != k_weight.size():\n raise ValueError('The real and imaginary weights '\n 'should have the same size. Found:'\n + ' r:' + str(r_weight.size())\n + ' i:' + str(i_weight.size())\n + ' j:' + str(j_weight.size())\n + ' k:' + str(k_weight.size()))\n\n elif r_weight.dim() != 2:\n raise Exception('affect_init accepts only matrices. Found dimension = ' + str(r_weight.dim()))\n kernel_size = None\n r, i, j, k = init_func(r_weight.size(0), r_weight.size(1), rng, kernel_size, init_criterion)\n r, i, j, k = torch.from_numpy(r), torch.from_numpy(i), torch.from_numpy(j), torch.from_numpy(k)\n r_weight.data = r.type_as(r_weight.data)\n i_weight.data = i.type_as(i_weight.data)\n j_weight.data = j.type_as(j_weight.data)\n k_weight.data = k.type_as(k_weight.data)\n\n\ndef affect_init_conv(r_weight, i_weight, j_weight, k_weight, kernel_size, init_func, rng,\n init_criterion):\n if r_weight.size() != i_weight.size() or r_weight.size() != j_weight.size() or \\\n r_weight.size() != k_weight.size():\n raise ValueError('The real and imaginary weights '\n 'should have the same size. Found:'\n + ' r:' + str(r_weight.size())\n + ' i:' + str(i_weight.size())\n + ' j:' + str(j_weight.size())\n + ' k:' + str(k_weight.size()))\n\n elif r_weight.dim() <= 2:\n raise Exception('affect_conv_init accepts only tensors that have more than 2 dimensions. Found dimension = '\n + str(r_weight.dim()))\n\n r, i, j, k = init_func(\n r_weight.size(1),\n r_weight.size(0),\n rng=rng,\n kernel_size=kernel_size,\n criterion=init_criterion\n )\n r, i, j, k = torch.from_numpy(r), torch.from_numpy(i), torch.from_numpy(j), torch.from_numpy(k)\n r_weight.data = r.type_as(r_weight.data)\n i_weight.data = i.type_as(i_weight.data)\n j_weight.data = j.type_as(j_weight.data)\n k_weight.data = k.type_as(k_weight.data)\n\n\ndef get_kernel_and_weight_shape(operation, in_channels, out_channels, kernel_size):\n if operation == 'convolution1d':\n if type(kernel_size) is not int:\n raise ValueError(\n \"\"\"An invalid kernel_size was supplied for a 1d convolution. The kernel size\n must be integer in the case. Found kernel_size = \"\"\" + str(kernel_size)\n )\n else:\n ks = kernel_size\n w_shape = (out_channels, in_channels) + tuple((ks,))\n else: # in case it is 2d or 3d.\n if operation == 'convolution2d' and type(kernel_size) is int:\n ks = (kernel_size, kernel_size)\n elif operation == 'convolution3d' and type(kernel_size) is int:\n ks = (kernel_size, kernel_size, kernel_size)\n elif type(kernel_size) is not int:\n if operation == 'convolution2d' and len(kernel_size) != 2:\n raise ValueError(\n \"\"\"An invalid kernel_size was supplied for a 2d convolution. The kernel size\n must be either an integer or a tuple of 2. Found kernel_size = \"\"\" + str(kernel_size)\n )\n elif operation == 'convolution3d' and len(kernel_size) != 3:\n raise ValueError(\n \"\"\"An invalid kernel_size was supplied for a 3d convolution. The kernel size\n must be either an integer or a tuple of 3. Found kernel_size = \"\"\" + str(kernel_size)\n )\n else:\n ks = kernel_size\n w_shape = (out_channels, in_channels) + (*ks,)\n return ks, w_shape\n", "id": "6268215", "language": "Python", "matching_score": 1.6234058141708374, "max_stars_count": 0, "path": "models/quaternion_layers/quaternion_ops.py" }, { "content": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.init import normal_\nimport torch.nn.functional as F\n\n\nclass KAF(nn.Module):\n \"\"\" Implementation of the kernel activation function.\n \n Parameters\n ----------\n num_parameters: int\n Size of the layer (number of neurons).\n D: int, optional\n Size of the dictionary for each neuron. Default to 20.\n conv: bool, optional\n True if this is a convolutive layer, False for a feedforward layer. Default to False.\n boundary: float, optional\n Dictionary elements are sampled uniformly in [-boundary, boundary]. Default to 4.0.\n init_fcn: None or func, optional\n If None, elements are initialized randomly. Otherwise, elements are initialized to approximate given function.\n kernel: {'gauss', 'relu', 'softplus'}, optional\n Kernel function to be used. Defaults to 'gaussian'.\n \n Example\n ----------\n Neural network with one hidden layer with KAF nonlinearities:\n \n >>> net = Sequential([nn.Linear(10, 20), KAF(20), nn.Linear(20, 1)])\n \n References\n ----------\n [1] <NAME>., <NAME>., <NAME>. and <NAME>., 2019. \n Kafnets: kernel-based non-parametric activation functions for neural networks. \n Neural Networks, 110, pp. 19-32.\n [2] <NAME>., <NAME>., <NAME>. and <NAME>., 2018. \n Learning Neuron Non-Linearities with Kernel-Based Deep Neural Networks. \n arXiv preprint arXiv:1807.06302.\n \"\"\"\n\n def __init__(self, num_parameters, D=20, conv=False, boundary=4.0, init_fcn=None, kernel='gaussian'):\n\n super().__init__()\n self.num_parameters, self.D, self.conv = num_parameters, D, conv\n \n # Initialize the dictionary (NumPy)\n self.dict_numpy = np.linspace(-boundary, boundary, self.D).astype(np.float32).reshape(-1, 1)\n \n # Save the dictionary\n if self.conv:\n self.register_buffer('dict', torch.from_numpy(self.dict_numpy).view(1, 1, 1, 1, -1))\n self.unsqueeze_dim = 4\n else:\n self.register_buffer('dict', torch.from_numpy(self.dict_numpy).view(1, -1))\n self.unsqueeze_dim = 2\n\n # Select appropriate kernel function\n if not (kernel in ['gaussian', 'relu', 'softplus']):\n raise ValueError('Kernel not recognized (must be {gaussian, relu, softplus})')\n \n if kernel == 'gaussian':\n self.kernel_fcn = self.gaussian_kernel\n # Rule of thumb for gamma (only needed for Gaussian kernel)\n interval = (self.dict_numpy[1] - self.dict_numpy[0])\n sigma = 2 * interval # empirically chosen\n self.gamma_init = float(0.5 / np.square(sigma))\n \n # Initialize gamma\n if self.conv:\n self.register_buffer('gamma', torch.from_numpy(np.ones((1, 1, 1, 1, self.D), dtype=np.float32)*self.gamma_init))\n else:\n self.register_buffer('gamma', torch.from_numpy(np.ones((1, 1, self.D), dtype=np.float32)*self.gamma_init))\n \n elif kernel == 'relu':\n self.kernel_fcn = self.relu_kernel\n else:\n self.kernel_fcn = self.softplus_kernel\n\n # Initialize mixing coefficients\n if self.conv:\n self.alpha = Parameter(torch.FloatTensor(1, self.num_parameters, 1, 1, self.D))\n else:\n self.alpha = Parameter(torch.FloatTensor(1, self.num_parameters, self.D))\n \n # Eventually: initialization with kernel ridge regression\n self.init_fcn = init_fcn\n if init_fcn != None:\n \n if kernel == 'gaussian':\n K = np.exp(- self.gamma_init*(self.dict_numpy - self.dict_numpy.T) ** 2)\n elif kernel == 'softplus':\n K = np.log(np.exp(self.dict_numpy - self.dict_numpy.T) + 1.0)\n else:\n #K = np.maximum(self.dict_numpy - self.dict_numpy.T, 0)\n raise ValueError('Cannot perform kernel ridge regression with ReLU kernel (singular matrix)')\n \n self.alpha_init = np.linalg.solve(K + 1e-4 * np.eye(self.D), self.init_fcn(self.dict_numpy)).reshape(-1).astype(np.float32)\n \n else: \n self.alpha_init = None\n \n # Reset the parameters\n self.reset_parameters()\n\n def reset_parameters(self):\n if self.init_fcn != None:\n if self.conv:\n self.alpha.data = torch.from_numpy(self.alpha_init).repeat(1, self.num_parameters, 1, 1, 1)\n else:\n self.alpha.data = torch.from_numpy(self.alpha_init).repeat(1, self.num_parameters, 1)\n else:\n normal_(self.alpha.data, std=0.8)\n \n def gaussian_kernel(self, input):\n return torch.exp(- torch.mul((torch.add(input.unsqueeze(self.unsqueeze_dim), - self.dict))**2, self.gamma))\n \n def relu_kernel(self, input):\n return F.relu(input.unsqueeze(self.unsqueeze_dim) - self.dict)\n \n def softplus_kernel(self, input):\n return F.softplus(input.unsqueeze(self.unsqueeze_dim) - self.dict)\n \n def forward(self, input):\n K = self.kernel_fcn(input)\n y = torch.sum(K*self.alpha, self.unsqueeze_dim)\n return y\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.num_parameters) + ')'", "id": "2124848", "language": "Python", "matching_score": 5.430643081665039, "max_stars_count": 63, "path": "pytorch/kafnets.py" }, { "content": "import numpy as np\nfrom keras.layers import Layer\nfrom keras import backend as K\n\nclass KAF(Layer):\n \"\"\" Implementation of the kernel activation function.\n \n Parameters\n ----------\n num_parameters: int\n Size of the layer (number of neurons).\n D: int, optional\n Size of the dictionary for each neuron. Default to 20.\n conv: bool, optional\n True if this is a convolutive layer, False for a feedforward layer. Default to False.\n boundary: float, optional\n Dictionary elements are sampled uniformly in [-boundary, boundary]. Default to 4.0.\n init_fcn: None or func, optional\n If None, elements are initialized randomly. Otherwise, elements are initialized to approximate given function.\n kernel: {'gauss', 'relu', 'softplus'}, optional\n Kernel function to be used. Defaults to 'gaussian'.\n \n Example\n ----------\n Neural network with one hidden layer with KAF nonlinearities:\n \n >>> net = Sequential([Dense(10), KAF(10), Dense(10, 1)])\n \n References\n ----------\n [1] <NAME>., <NAME>., <NAME>. and <NAME>., 2019. \n Kafnets: kernel-based non-parametric activation functions for neural networks. \n Neural Networks, 110, pp. 19-32.\n [2] <NAME>., <NAME>., <NAME>. and <NAME>., 2018. \n Learning Neuron Non-Linearities with Kernel-Based Deep Neural Networks. \n arXiv preprint arXiv:1807.06302.\n \"\"\"\n\n def __init__(self, num_parameters, D=20, boundary=3.0, conv=False, init_fcn=None, kernel='gaussian', **kwargs):\n self.num_parameters = num_parameters\n self.D = D\n self.boundary = boundary\n self.init_fcn = init_fcn\n self.conv = conv\n if self.conv:\n self.unsqueeze_dim = 4\n else:\n self.unsqueeze_dim = 2\n self.kernel = kernel\n if not (kernel in ['gaussian', 'relu', 'softplus']):\n raise ValueError('Kernel not recognized (must be {gaussian, relu, softplus})')\n super().__init__(**kwargs)\n \n def build(self, input_shape):\n\n # Initialize the fixed dictionary\n d = np.linspace(-self.boundary, self.boundary, self.D).astype(np.float32).reshape(-1, 1)\n \n if self.conv:\n self.dict = self.add_weight(name='dict', \n shape=(1, 1, 1, 1, self.D),\n initializer='uniform',\n trainable=False)\n K.set_value(self.dict, d.reshape(1, 1, 1, 1, -1))\n else:\n self.dict = self.add_weight(name='dict', \n shape=(1, 1, self.D),\n initializer='uniform',\n trainable=False)\n K.set_value(self.dict, d.reshape(1, 1, -1))\n \n if self.kernel == 'gaussian':\n self.kernel_fcn = self.gaussian_kernel\n # Rule of thumb for gamma\n interval = (d[1] - d[0])\n sigma = 2 * interval # empirically chosen\n self.gamma = 0.5 / np.square(sigma)\n elif self.kernel == 'softplus':\n self.kernel_fcn = self.softplus_kernel\n else:\n self.kernel_fcn = self.relu_kernel\n \n \n # Mixing coefficients\n if self.conv:\n self.alpha = self.add_weight(name='alpha', \n shape=(1, 1, 1, self.num_parameters, self.D),\n initializer='normal',\n trainable=True)\n else:\n self.alpha = self.add_weight(name='alpha', \n shape=(1, self.num_parameters, self.D),\n initializer='normal',\n trainable=True)\n\n # Optional initialization with kernel ridge regression\n if self.init_fcn is not None:\n if self.kernel == 'gaussian':\n kernel_matrix = np.exp(- self.gamma*(d - d.T) ** 2)\n elif self.kernel == 'softplus':\n kernel_matrix = np.log(np.exp(d - d.T) + 1.0)\n else:\n raise ValueError('Cannot perform kernel ridge regression with ReLU kernel (singular matrix)')\n \n alpha_init = np.linalg.solve(kernel_matrix + 1e-5*np.eye(self.D), self.init_fcn(d)).reshape(-1)\n if self.conv:\n K.set_value(self.alpha, np.repeat(alpha_init.reshape(1, 1, 1, 1, -1), self.num_parameters, axis=3))\n else:\n K.set_value(self.alpha, np.repeat(alpha_init.reshape(1, 1, -1), self.num_parameters, axis=1))\n \n super(KAF, self).build(input_shape)\n \n def gaussian_kernel(self, x):\n return K.exp(- self.gamma * (K.expand_dims(x, axis=self.unsqueeze_dim) - self.dict) ** 2.0)\n \n def softplus_kernel(self, x):\n return K.softplus(K.expand_dims(x, axis=self.unsqueeze_dim) - self.dict)\n \n def relu_kernel(self, x):\n return K.relu(K.expand_dims(x, axis=self.unsqueeze_dim) - self.dict)\n \n def call(self, x):\n kernel_matrix = self.kernel_fcn(x)\n return K.sum(kernel_matrix * self.alpha, axis=self.unsqueeze_dim)\n \n def get_config(self):\n return {'num_parameters': self.num_parameters,\n 'D': self.D,\n 'boundary': self.boundary,\n 'conv': self.conv,\n 'init_fcn': self.init_fcn,\n 'kernel': self.kernel\n }\n", "id": "5487758", "language": "Python", "matching_score": 3.5805907249450684, "max_stars_count": 63, "path": "keras/kafnets.py" }, { "content": "# -*- coding: utf-8 -*-\n\nimport autograd.numpy as np\n\ndef init_kaf_nn(layer_sizes, scale=0.01, rs=np.random.RandomState(0), dict_size=20, boundary=3.0):\n \"\"\" \n Initialize the parameters of a KAF feedforward network.\n - dict_size: the size of the dictionary for every neuron.\n - boundary: the boundary for the activation functions.\n \"\"\"\n \n # Initialize the dictionary\n D = np.linspace(-boundary, boundary, dict_size).reshape(-1, 1)\n \n # Rule of thumb for gamma\n interval = D[1,0] - D[0,0];\n gamma = 0.5/np.square(2*interval)\n D = D.reshape(1, 1, -1)\n \n # Initialize a list of parameters for the layer\n w = [(rs.randn(insize, outsize) * scale, # Weight matrix\n rs.randn(outsize) * scale, # Bias vector\n rs.randn(1, outsize, dict_size) * 0.5) # Mixing coefficients\n for insize, outsize in zip(layer_sizes[:-1], layer_sizes[1:])]\n \n return w, (D, gamma)\n\ndef predict_kaf_nn(w, X, info):\n \"\"\"\n Compute the outputs of a KAF feedforward network.\n \"\"\"\n \n D, gamma = info\n for W, b, alpha in w:\n outputs = np.dot(X, W) + b\n K = gauss_kernel(outputs, D, gamma)\n X = np.sum(K*alpha, axis=2)\n return X\n\ndef gauss_kernel(X, D, gamma=1.0):\n \"\"\"\n Compute the 1D Gaussian kernel between all elements of a \n NxH matrix and a fixed L-dimensional dictionary, resulting in a NxHxL matrix of kernel\n values.\n \"\"\"\n return np.exp(- gamma*np.square(X.reshape(-1, X.shape[1], 1) - D))", "id": "1562485", "language": "Python", "matching_score": 0.30618607997894287, "max_stars_count": 63, "path": "autograd/kafnets.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nSimple demo using kernel activation functions with convolutional networks on the MNIST dataset.\n\"\"\"\n\n# Import TensorFlow\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.eager as tfe\ntf.enable_eager_execution()\n\n# Keras imports\nfrom tensorflow.keras import datasets\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten\n\n# Custom imports\nfrom kafnets import KAF\nimport tqdm\n\n# Load Breast Cancer dataset\n(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()\n\n# Preprocessing is taken from here:\n# https://github.com/keras-team/keras/blob/master/examples/mnist_cnn.py\nX_train = X_train.reshape(X_train.shape[0], 28, 28, 1)\nX_test = X_test.reshape(X_test.shape[0], 28, 28, 1)\n \nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\n\n\n# Initialize a KAF neural network\nkafnet = Sequential()\nkafnet.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))\nkafnet.add(KAF(32, conv=True))\nkafnet.add(Conv2D(32, (3, 3)))\nkafnet.add(KAF(32, conv=True))\nkafnet.add(MaxPooling2D(pool_size=(2, 2)))\nkafnet.add(Flatten())\nkafnet.add(Dense(100))\nkafnet.add(KAF(100))\nkafnet.add(Dense(10, activation='softmax'))\n\n# Use tf.data DataLoader\ntrain_data = tf.data.Dataset.from_tensor_slices((X_train.astype(np.float32), y_train.astype(np.int64)))\ntest_data = tf.data.Dataset.from_tensor_slices((X_test.astype(np.float32), y_test.astype(np.int64)))\n\n# Optimizer\nopt = tf.train.AdamOptimizer()\n\n# Training\nfor e in tqdm.trange(5, desc='Training'):\n \n for xb, yb in train_data.shuffle(1000).batch(32):\n \n with tfe.GradientTape() as tape:\n loss = tf.losses.sparse_softmax_cross_entropy(yb, kafnet(xb))\n g = tape.gradient(loss, kafnet.variables)\n opt.apply_gradients(zip(g, kafnet.variables))\n\n # Evaluation\n acc = tfe.metrics.Accuracy()\n for xb, yb in test_data.batch(32):\n acc(yb, tf.argmax(kafnet(xb), axis=1))\n tqdm.tqdm.write('Test accuracy after epoch {} is: '.format(e+1) + str(acc.result()))", "id": "3054765", "language": "Python", "matching_score": 6.970926761627197, "max_stars_count": 63, "path": "tensorflow/demo_kaf_convolutional.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nSimple demo using kernel activation functions on a basic regression dataset.\n\"\"\"\n\n# Import TensorFlow\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.eager as tfe\ntf.enable_eager_execution()\n\n# Keras imports\nfrom tensorflow.keras import datasets\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\n# Custom imports\nfrom kafnets import KAF\nimport tqdm\n\n# Load Breast Cancer dataset\n(X_train, y_train), (X_test, y_test) = datasets.boston_housing.load_data()\n\n# Initialize a KAF neural network\nkafnet = Sequential([\n Dense(20, input_shape=(13,)),\n KAF(20),\n Dense(1),\n])\n\n#Uncomment to use KAF with Softplus kernel\n#kafnet = Sequential([\n# Dense(20, input_shape=(13,)),\n# KAF(20, kernel='softplus', D=5),\n# Dense(1),\n#])\n\n# Use tf.data DataLoader\ntrain_data = tf.data.Dataset.from_tensor_slices((X_train.astype(np.float32), y_train.reshape(-1, 1)))\ntest_data = tf.data.Dataset.from_tensor_slices((X_test.astype(np.float32), y_test.astype(np.float32).reshape(-1, 1)))\n\n# Optimizer\nopt = tf.train.AdamOptimizer()\n\n# Training\nfor e in tqdm.trange(300, desc='Training'):\n \n for xb, yb in train_data.shuffle(1000).batch(32):\n \n with tfe.GradientTape() as tape:\n loss = tf.losses.mean_squared_error(yb, kafnet(xb))\n g = tape.gradient(loss, kafnet.variables)\n opt.apply_gradients(zip(g, kafnet.variables))\n\n# Evaluation\nerr = tfe.metrics.Mean()\nfor xb, yb in test_data.batch(32):\n err((yb - kafnet(xb))**2)\nprint('Final error is: ' + str(err.result()))\n", "id": "3656371", "language": "Python", "matching_score": 3.6740403175354004, "max_stars_count": 63, "path": "tensorflow/demo_kaf_feedforward.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nSimple demo using kernel activation functions with convolutional networks on the MNIST dataset.\n\"\"\"\n\n# Keras imports\nfrom keras import datasets\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, MaxPooling2D, Flatten\nfrom keras.utils import to_categorical\nimport keras.backend as K\n\n# Custom imports\nfrom kafnets import KAF\n\n# Load Breast Cancer dataset\n(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()\n\n# Preprocessing is taken from here:\n# https://github.com/keras-team/keras/blob/master/examples/mnist_cnn.py\nif K.image_data_format() == 'channels_first':\n X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)\n X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)\nelse:\n X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)\n X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)\n \nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\n\n# convert class vectors to binary class matrices\ny_train = to_categorical(y_train, 10)\ny_test = to_categorical(y_test, 10)\n\n# Initialize a KAF neural network\nkafnet = Sequential()\nkafnet.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))\nkafnet.add(KAF(32, conv=True))\nkafnet.add(Conv2D(32, (3, 3)))\nkafnet.add(KAF(32, conv=True))\nkafnet.add(MaxPooling2D(pool_size=(2, 2)))\nkafnet.add(Flatten())\nkafnet.add(Dense(100))\nkafnet.add(KAF(100))\nkafnet.add(Dense(10, activation='softmax'))\n\n# Training\nkafnet.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\nkafnet.summary()\nkafnet.fit(X_train, y_train, epochs=5, batch_size=32, verbose=1)\n\n# Evaluation\nprint('Final accuracy is: ' + str(kafnet.evaluate(X_test, y_test, batch_size=64)[1]))", "id": "3748538", "language": "Python", "matching_score": 4.537148475646973, "max_stars_count": 63, "path": "keras/demo_kaf_convolutional.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nSimple demo using kernel activation functions on a basic regression dataset.\n\"\"\"\n\n# Keras imports\nfrom keras import datasets\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n# Custom imports\nfrom kafnets import KAF\n\n# Load Breast Cancer dataset\n(X_train, y_train), (X_test, y_test) = datasets.boston_housing.load_data()\n\n# Initialize a KAF neural network\nkafnet = Sequential([\n Dense(20, input_shape=(13,)),\n KAF(20),\n Dense(1),\n])\n\n#Uncomment to use KAF with Softplus kernel\n#kafnet = Sequential([\n# Dense(20, input_shape=(13,)),\n# KAF(20, kernel='softplus', D=5),\n# Dense(1),\n#])\n\n# Training\nkafnet.compile(optimizer='adam', loss='mse')\nkafnet.summary()\nkafnet.fit(X_train, y_train, epochs=250, batch_size=32, verbose=0)\n\n# Evaluation\nprint('Final error is: ' + str(kafnet.evaluate(X_test, y_test, batch_size=64)))", "id": "7764669", "language": "Python", "matching_score": 1.642938256263733, "max_stars_count": 63, "path": "keras/demo_kaf_feedforward.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nSimple demo using kernel activation functions with convolutional layers on the MNIST dataset.\n\"\"\"\n\n# Imports from Python libraries\nimport numpy as np\nimport tqdm\n\n# PyTorch imports\nimport torch\nimport torch.utils.data\nfrom torchvision import datasets, transforms\nfrom torch.nn import Module\n\n# Custom imports\nfrom kafnets import KAF\n\n# Set seed for PRNG\nnp.random.seed(1)\ntorch.manual_seed(1)\n\n# Enable CUDA (optional)\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n# Load MNIST dataset\ntrain_loader = torch.utils.data.DataLoader(datasets.MNIST('data/MNIST', train=True, download=True,\n transform=transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))])),\n batch_size=32, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(datasets.MNIST('data/MNIST', train=False, transform=transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])), batch_size=32, shuffle=True)\n\nclass Flatten(Module):\n \"\"\"\n Simple flatten module, see this discussion:\n https://discuss.pytorch.org/t/flatten-layer-of-pytorch-build-by-sequential-container/5983\n \"\"\"\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n# Initialize a KAF neural network\nkafnet = torch.nn.Sequential(\n torch.nn.Conv2d(1, 20, kernel_size=5, padding=(2,2)),\n torch.nn.MaxPool2d(3),\n KAF(20, conv=True),\n torch.nn.Conv2d(20, 20, kernel_size=5, padding=(2,2)),\n torch.nn.MaxPool2d(3),\n KAF(20, conv=True),\n Flatten(),\n torch.nn.Linear(180, 10),\n)\n\n# Reset parameters\nfor m in kafnet:\n if len(m._parameters) > 0:\n m.reset_parameters()\n\nprint('Training: **KAFNET**', flush=True)\n\n# Loss function\nloss_fn = torch.nn.CrossEntropyLoss()\n\n# Build optimizer\noptimizer = torch.optim.Adam(kafnet.parameters(), weight_decay=1e-4)\n\n# Put model on GPU if needed\nkafnet.to(device)\n\nmax_epochs = 10\nfor idx_epoch in range(max_epochs):\n\n print('Epoch #', idx_epoch, ' of #', max_epochs)\n kafnet.train()\n\n for (X_batch, y_batch) in tqdm.tqdm(train_loader):\n\n # Eventually move mini-batch to GPU\n X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n\n # Forward pass: compute predicted y by passing x to the model.\n y_pred = kafnet(X_batch)\n\n # Compute loss.\n loss = loss_fn(y_pred, y_batch)\n\n # Zeroes out all gradients\n optimizer.zero_grad()\n\n # Backward pass\n loss.backward()\n\n # Update parameters\n optimizer.step()\n\n# Compute final test score\nwith torch.no_grad():\n print('Computing test score for: **KAFNET**', flush=True)\n kafnet.eval()\n acc = 0\n for _, (X_batch, y_batch) in enumerate(test_loader):\n # Eventually move mini-batch to GPU\n X_batch = X_batch.to(device)\n acc += np.sum(y_batch.numpy() == np.argmax(kafnet(X_batch).cpu().numpy(), axis=1))\n print('Final score on test set: ', acc / test_loader.dataset.__len__())\n", "id": "6139761", "language": "Python", "matching_score": 1.7665845155715942, "max_stars_count": 63, "path": "pytorch/demo_kaf_convolutional.py" }, { "content": "__license__ = \"MIT\"\r\n\r\nimport math\r\nimport torch\r\nfrom typing import List\r\nfrom torch.nn import ModuleList, Dropout, ReLU, Linear\r\nfrom torch_geometric.nn import GCNConv\r\nfrom torch_geometric.data import Data, InMemoryDataset\r\nfrom torch_geometric.nn.conv import MessagePassing\r\nfrom torch_geometric.utils.dropout import dropout_adj \r\n\r\n\r\nclass AdaptivePropagation(MessagePassing):\r\n def __init__(self, niter: int, h_size: int, bias = True, **kwargs):\r\n super(AdaptivePropagation, self).__init__(aggr='add', **kwargs)\r\n\r\n self.niter = niter\r\n self.halt = Linear(h_size,1)\r\n self.reg_params = list(self.halt.parameters())\r\n self.dropout = Dropout()\r\n self.reset_parameters()\r\n \r\n def reset_parameters(self):\r\n self.halt.reset_parameters()\r\n x = (self.niter+1) // 1\r\n b = math.log((1/x)/(1-(1/x)))\r\n self.halt.bias.data.fill_(b)\r\n\r\n def forward(self, local_preds: torch.FloatTensor, edge_index):\r\n sz = local_preds.size(0)\r\n steps = torch.ones(sz).to(local_preds.device)\r\n sum_h = torch.zeros(sz).to(local_preds.device)\r\n continue_mask = torch.ones(sz, dtype=torch.bool).to(local_preds.device)\r\n x = torch.zeros_like(local_preds).to(local_preds.device)\r\n\r\n prop = self.dropout(local_preds)\r\n for i in range(0, self.niter):\r\n \r\n old_prop = prop\r\n continue_fmask = continue_mask.type('torch.FloatTensor').to(local_preds.device)\r\n \r\n drop_edge_index, _ = dropout_adj(edge_index, training=self.training)\r\n drop_edge_index, drop_norm = GCNConv.norm(drop_edge_index,sz) \r\n\r\n prop = self.propagate(drop_edge_index, x=prop, norm=drop_norm) \r\n\r\n h = torch.sigmoid(self.halt(prop)).t().squeeze()\r\n prob_mask = (((sum_h+h) < 0.99) & continue_mask).squeeze()\r\n prob_fmask = prob_mask.type('torch.FloatTensor').to(local_preds.device)\r\n\r\n steps = steps + prob_fmask \r\n sum_h = sum_h + prob_fmask * h \r\n\r\n final_iter = steps <= self.niter\r\n \r\n condition = prob_mask & final_iter\r\n p = torch.where(condition, sum_h, 1-sum_h)\r\n \r\n to_update = self.dropout(continue_fmask)[:,None]\r\n x = x + (prop * p[:,None] +\r\n old_prop * (1-p)[:,None])*to_update\r\n \r\n continue_mask = continue_mask & prob_mask\r\n\r\n if (~continue_mask).all():\r\n break\r\n\r\n x = x / steps[:,None]\r\n \r\n return x, (steps-1), (1-sum_h)\r\n\r\n def message(self, x_j, norm):\r\n return norm.view(-1, 1) * x_j\r\n\r\n \r\n\r\nclass APGCN(torch.nn.Module):\r\n def __init__(self,\r\n dataset: InMemoryDataset,\r\n niter: float = 10,\r\n prop_penalty: float = 0.005,\r\n hidden: List[int] = [64],\r\n dropout: float = 0.5):\r\n super(APGCN, self).__init__()\r\n\r\n num_features = [dataset.data.x.shape[1]] + hidden + [dataset.num_classes]\r\n \r\n layers = []\r\n for in_features, out_features in zip(num_features[:-1], num_features[1:]):\r\n layers.append(Linear(in_features, out_features))\r\n \r\n self.prop = AdaptivePropagation(niter,dataset.num_classes)\r\n self.prop_penalty = prop_penalty\r\n self.layers = ModuleList(layers)\r\n self.reg_params = list(layers[0].parameters())\r\n self.non_reg_params = list([p for l in layers[1:] for p in l.parameters()])\r\n\r\n self.dropout = Dropout(p=dropout)\r\n self.act_fn = ReLU()\r\n\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n self.prop.reset_parameters()\r\n for layer in self.layers:\r\n layer.reset_parameters()\r\n\r\n def forward(self, data: Data):\r\n \r\n x, edge_index = data.x, data.edge_index\r\n for i, layer in enumerate(self.layers):\r\n x = layer(self.dropout(x))\r\n\r\n if i == len(self.layers) - 1:\r\n break\r\n\r\n x = self.act_fn(x)\r\n\r\n x, steps, reminders = self.prop(x, edge_index)\r\n\r\n return torch.nn.functional.log_softmax(x, dim=1), steps, reminders\r\n\r\n", "id": "286006", "language": "Python", "matching_score": 1.3167338371276855, "max_stars_count": 1, "path": "models.py" }, { "content": "__author__ = \"<NAME> and <NAME>\"\r\n__license__ = \"MIT\"\r\n\r\nimport os\r\n\r\nimport numpy as np\r\nfrom scipy.linalg import expm\r\n\r\nimport torch\r\nfrom torch_geometric.data import Data, InMemoryDataset\r\nfrom torch_geometric.datasets import Planetoid, Amazon, Coauthor\r\nfrom io_data import load_dataset\r\nfrom seeds import development_seed\r\nimport scipy.sparse as sp\r\nimport scipy.sparse.linalg as spla\r\n\r\nDATA_PATH = 'data'\r\n\r\ndef normalize_attributes(attr_matrix):\r\n epsilon = 1e-12\r\n if isinstance(attr_matrix, sp.csr_matrix):\r\n attr_norms = spla.norm(attr_matrix, ord=1, axis=1)\r\n attr_invnorms = 1 / np.maximum(attr_norms, epsilon)\r\n attr_mat_norm = attr_matrix.multiply(attr_invnorms[:, np.newaxis])\r\n else:\r\n attr_norms = np.linalg.norm(attr_matrix, ord=1, axis=1)\r\n attr_invnorms = 1 / np.maximum(attr_norms, epsilon)\r\n attr_mat_norm = attr_matrix * attr_invnorms[:, np.newaxis]\r\n return attr_mat_norm\r\n\r\n\r\ndef get_dataset(name: str, use_lcc: bool = True) -> InMemoryDataset:\r\n dataset = InMemoryDataset\r\n graph = load_dataset(name)\r\n graph.standardize(select_lcc=True)\r\n new_y = torch.LongTensor(graph.labels)\r\n data = Data(\r\n x=torch.FloatTensor(normalize_attributes(graph.attr_matrix).toarray()),\r\n edge_index=torch.LongTensor(graph.get_edgeid_to_idx_array().T),\r\n y=new_y,\r\n train_mask=torch.zeros(new_y.size(0), dtype=torch.bool),\r\n test_mask=torch.zeros(new_y.size(0), dtype=torch.bool),\r\n val_mask=torch.zeros(new_y.size(0), dtype=torch.bool)\r\n )\r\n dataset.data = data\r\n dataset.num_classes =len(np.unique(new_y))\r\n return dataset\r\n\r\n\r\ndef set_train_val_test_split(\r\n seed: int,\r\n data: Data,\r\n num_development: int = 1500,\r\n num_per_class: int = 20) -> Data:\r\n rnd_state = np.random.RandomState(development_seed)\r\n num_nodes = data.y.shape[0]\r\n development_idx = rnd_state.choice(num_nodes, num_development, replace=False)\r\n test_idx = [i for i in np.arange(num_nodes) if i not in development_idx]\r\n\r\n train_idx = []\r\n rnd_state = np.random.RandomState(seed)\r\n for c in range(data.y.max() + 1):\r\n class_idx = development_idx[np.where(data.y[development_idx].cpu() == c)[0]]\r\n train_idx.extend(rnd_state.choice(class_idx, num_per_class, replace=False))\r\n\r\n val_idx_tmp = [i for i in development_idx if i not in train_idx]\r\n\r\n val_idx = rnd_state.choice(val_idx_tmp, 500, replace=False)\r\n def get_mask(idx):\r\n mask = torch.zeros(num_nodes, dtype=torch.bool)\r\n mask[idx] = 1\r\n return mask\r\n\r\n data.train_mask = get_mask(train_idx)\r\n data.val_mask = get_mask(val_idx)\r\n data.test_mask = get_mask(test_idx)\r\n\r\n return data\r\n\r\n", "id": "2229123", "language": "Python", "matching_score": 1.5201315879821777, "max_stars_count": 15, "path": "data.py" }, { "content": "import numpy as np\r\n\r\ndef gen_seeds(size: int = None) -> np.ndarray:\r\n max_uint32 = np.iinfo(np.uint32).max\r\n return np.random.randint(\r\n max_uint32+1, size=size, dtype=np.uint32)\r\n\r\nquick_seeds = [2144199730, 794209841]\r\n\r\ntest_seeds = [2144199730, 794209841, 2985733717, 2282690970, 1901557222,\r\n 2009332812, 2266730407, 635625077, 3538425002, 960893189,\r\n 497096336, 3940842554, 3594628340, 948012117, 3305901371,\r\n 3644534211, 2297033685, 4092258879, 2590091101, 1694925034]\r\n\r\ndevelopment_seed = 4143496719\r\n", "id": "7071074", "language": "Python", "matching_score": 0.6064414381980896, "max_stars_count": 15, "path": "seeds.py" } ]
1.766585
ramanakshay
[ { "content": "##########################\n### EXAMPLE: returning a tuple\n##########################\n#def quotient_and_remainder(x, y):\n# q = x // y\n# r = x % y\n# return (q, r)\n# \n#(quot, rem) = quotient_and_remainder(4,3)\n#print(quot)\n#print(rem)\n\n##\n##########################\n### EXAMPLE: iterating over tuples\n##########################\ndef get_data(aTuple):\n \"\"\"\n aTuple, tuple of tuples (int, string)\n Extracts all integers from aTuple and sets \n them as elements in a new tuple. \n Extracts all unique strings from from aTuple \n and sets them as elements in a new tuple.\n Returns a tuple of the minimum integer, the\n maximum integer, and the number of unique strings\n \"\"\"\n nums = () # empty tuple\n words = ()\n for t in aTuple:\n # concatenating with a singleton tuple\n nums = nums + (t[0],) \n # only add words haven't added before\n if t[1] not in words: \n words = words + (t[1],)\n min_n = min(nums)\n max_n = max(nums)\n unique_words = len(words)\n return (min_n, max_n, unique_words)\n#\n#test = ((1,\"a\"),(2, \"b\"),\n# (1,\"a\"),(7,\"b\"))\n#(a, b, c) = get_data(test)\n#print(\"a:\",a,\"b:\",b,\"c:\",c)\n##\n# apply to any data you want!\n#tswift = ((2014,\"Katy\"),\n# (2014, \"Harry\"),\n# (2012,\"Jake\"), \n# (2010,\"Jake\"), \n# (2008,\"Joe\")) \n#(min_year, max_year, num_people) = get_data(tswift)\n#print(\"From\", min_year, \"to\", max_year, \\\n# \"<NAME> wrote songs about\", num_people, \"people!\")\n#\n##########################\n### EXAMPLE: sum of elements in a list\n##########################\n#def sum_elem_method1(L):\n# total = 0 \n# for i in range(len(L)): \n# total += L[i] \n# return total\n# \n#def sum_elem_method2(L):\n# total = 0 \n# for i in L: \n# total += i \n# return total\n# \n#print(sum_elem_method1([1,2,3,4]))\n#print(sum_elem_method2([1,2,3,4]))\n\n\n##########################\n### EXAMPLE: various list operations\n### put print(L) at different locations to see how it gets mutated\n##########################\n#L1 = [2,1,3]\n#L2 = [4,5,6]\n#L3 = L1 + L2\n#L1.extend([0,6])\n#\n#L = [2,1,3,6,3,7,0]\n#L.remove(2)\n#L.remove(3)\n#del(L[1])\n#print(L.pop())\n#\n#s = \"I<3 cs\"\n#print(list(s))\n#print((s.split('<'))[0:1])\n#L = ['a', 'b', 'c']\n#print(''.join(L))\n#print('_'.join(L))\n#\n#L=[9,6,0,3]\n#print(sorted(L))\n#print(L)\n#L.sort()\n#L.reverse()\n#print(L)\n\n\n##########################\n### EXAMPLE: aliasing\n##########################\n#a = 1\n#b = a\n#print(a)\n#print(b)\n#\n#warm = ['red', 'yellow', 'orange']\n#hot = warm\n#warm.append('pink')\n#print(hot)\n#print(warm)\n\n##########################\n### EXAMPLE: cloning\n##########################\n#cool = ['blue', 'green', 'grey']\n#chill = cool[:]\n#chill.append('black')\n#print(chill)\n#print(cool)\n\n##########################\n### EXAMPLE: sorting with/without mutation\n##########################\n#warm = ['red', 'yellow', 'orange']\n#sortedwarm = warm.sort()\n#print(warm)\n#print(sortedwarm)\n#\n#cool = ['grey', 'green', 'blue']\n#sortedcool = sorted(cool)\n#print(cool)\n#print(sortedcool)\n\n##########################\n### EXAMPLE: lists of lists of lists...\n##########################\n#warm = ['yellow', 'orange']\n#hot = ['red']\n#brightcolors = [warm]\n#brightcolors.append(hot)\n#print(brightcolors)\n#hot.append('pink')\n#print(hot)\n#print(brightcolors)\n#\n\n################################\n### EXAMPLE: mutating a list while iterating over it\n################################\n#def remove_dups(L1, L2):\n# for e in L1:\n# if e in L2:\n# L1.remove(e) #python has its internal counter running for the indices\n# \n#def remove_dups_new(L1, L2):\n# L1_copy = L1[:]\n# for e in L1_copy:\n# if e in L2:\n# L1.remove(e)\n#\n#L1 = [1, 2, 3, 4]\n#L2 = [1, 2, 5, 6]\n#remove_dups(L1, L2)\n#print(L1, L2)\n#\n#L1 = [1, 2, 3, 4]\n#L2 = [1, 2, 5, 6]\n#remove_dups_new(L1, L2)\n#print(L1, L2)\n\n################################\n### EXERCISE: Test yourself by predicting what the output is and \n### what gets mutated then check with the Python Tutor\n################################\n#cool = ['blue', 'green']\n#warm = ['red', 'yellow', 'orange']\n#print(cool)\n#print(warm)\n#\n#colors1 = [cool]\n#print(colors1)\n#colors1.append(warm)\n#print('colors1 = ', colors1)\n#\n#colors2 = [['blue', 'green'],\n# ['red', 'yellow', 'orange']]\n#print('colors2 =', colors2)\n#\n#warm.remove('red') \n#print('colors1 = ', colors1)\n#print('colors2 =', colors2)\n#\n#for e in colors1:\n# print('e =', e)\n#\n#for e in colors1:\n# if type(e) == list:\n# for e1 in e:\n# print(e1)\n# else:\n# print(e)\n#\n#flat = cool + warm\n#print('flat =', flat)\n#\n#print(flat.sort())\n#print('flat =', flat)\n#\n#new_flat = sorted(flat, reverse = True)\n#print('flat =', flat)\n#print('new_flat =', new_flat)\n#\n#cool[1] = 'black'\n#print(cool)\n#print(colors1)\n", "id": "9017540", "language": "Python", "matching_score": 1.1436258554458618, "max_stars_count": 3, "path": "6.0001/ps2/tuples,lists.py" }, { "content": "# Problem Set 4A\r\n# Name: <your name here>\r\n# Collaborators:\r\n# Time Spent: x:xx\r\n\r\ndef get_permutations(sequence):\r\n '''\r\n Enumerate all permutations of a given string\r\n\r\n sequence (string): an arbitrary string to permute. Assume that it is a\r\n non-empty string. \r\n\r\n You MUST use recursion for this part. Non-recursive solutions will not be\r\n accepted.\r\n\r\n Returns: a list of all permutations of sequence\r\n\r\n Example:\r\n >>> get_permutations('abc')\r\n ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']\r\n\r\n Note: depending on your implementation, you may return the permutations in\r\n a different order than what is listed here.\r\n '''\r\n l = []\r\n if len(sequence)==1:\r\n return [sequence]\r\n else:\r\n l1 = get_permutations(sequence[1:])\r\n for i in l1:\r\n a= i+sequence[0] \r\n if a not in l:\r\n l.append(a)\r\n for j in range(len(i)):\r\n a = i[0:j]+sequence[0]+i[j:]\r\n if a not in l:\r\n l.append(a)\r\n\r\n return l\r\n \r\n\r\nif __name__ == '__main__':\r\n# #EXAMPLE\r\n# example_input = 'abc'\r\n# print('Input:', example_input)\r\n# print('Expected Output:', ['abc', 'acb', 'bac', 'bca', 'cab', 'cba'])\r\n# print('Actual Output:', get_permutations(example_input))\r\n \r\n# # Put three example test cases here (for your sanity, limit your inputs\r\n# to be three characters or fewer as you will have n! permutations for a \r\n# sequence of length n)\r\n print(get_permutations('abcd'))\r\n print(get_permutations('uuss'))\r\n print(get_permutations('buss'))\r\n\r\n", "id": "11824646", "language": "Python", "matching_score": 1.146442174911499, "max_stars_count": 3, "path": "6.0001/ps4/permutations.py" }, { "content": "#########################\r\n## EXAMPLE: combinations of print and return\r\n## Python Tutor link: http://www.pythontutor.com/visualize.html#code=def%20is_even_with_return(%20i%20%29%3A%0A%20%20%20%20%22%22%22%20%0A%20%20%20%20Input%3A%20i,%20a%20positive%20int%0A%20%20%20%20Returns%20True%20if%20i%20is%20even,%20otherwise%20False%0A%20%20%20%20%22%22%22%0A%20%20%20%20print('with%20return'%29%0A%20%20%20%20remainder%20%3D%20i%20%25%202%0A%20%20%20%20return%20remainder%20%3D%3D%200%0A%0Ais_even_with_return(3%29%20%0Aprint(is_even_with_return(3%29%20%29%0A%0Adef%20is_even_without_return(%20i%20%29%3A%0A%20%20%20%20%22%22%22%20%0A%20%20%20%20Input%3A%20i,%20a%20positive%20int%0A%20%20%20%20Does%20not%20return%20anything%0A%20%20%20%20%22%22%22%0A%20%20%20%20print('without%20return'%29%0A%0Ais_even_without_return(3%29%0Aprint(is_even_without_return(3%29%20%29%0A&cumulative=false&curInstr=0&heapPrimitives=false&mode=display&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false\r\n#########################\r\n#def is_even_with_return( i ):\r\n# \"\"\" \r\n# Input: i, a positive int\r\n# Returns True if i is even, otherwise False\r\n# \"\"\"\r\n# print('with return')\r\n# remainder = i % 2\r\n# return remainder == 0\r\n#\r\n#is_even_with_return(3) \r\n#print(is_even_with_return(3) )\r\n###\r\n#def is_even_without_return( i ):\r\n# \"\"\" \r\n# Input: i, a positive int\r\n# Does not return anything\r\n# \"\"\"\r\n# print('without return')\r\n# remainder = i % 2\r\n#\r\n#is_even_without_return(3)\r\n#print(is_even_without_return(3) )\r\n##\r\n# Simple is_even function definition\r\n#def is_even( i ):\r\n# \"\"\" \r\n# Input: i, a positive int\r\n# Returns True if i is even, otherwise False\r\n# \"\"\"\r\n# remainder = i % 2\r\n# return remainder == 0\r\n#\r\n## Use the is_even function later on in the code\r\n#print(\"All numbers between 0 and 20: even or not\")\r\n#for i in range(20):\r\n# if is_even(i):\r\n# print(i, \"even\")\r\n# else:\r\n# print(i, \"odd\")\r\n#\r\n##########################\r\n### EXAMPLE: applying functions to repeat same task many times\r\n#########################\r\n#def bisection_cuberoot_approx(x, epsilon):\r\n# \"\"\"\r\n# Input: x, an integer\r\n# Uses bisection to approximate the cube root of x to within epsilon\r\n# Returns: a float approximating the cube root of x\r\n# \"\"\"\r\n# low = 0.0\r\n# high = x\r\n# guess = (high + low)/2.0\r\n# while abs(guess**3 - x) >= epsilon:\r\n# if guess**3 < x:\r\n# low = guess\r\n# else:\r\n# high = guess\r\n# guess = (high + low)/2.0\r\n# return guess\r\n#\r\n#x = 1\r\n#while x <= 10000:\r\n# approx = bisection_cuberoot_approx(x, 0.001)\r\n# print(approx, \"is close to cube root of\", x)\r\n# x *= 10\r\n\r\n\r\n##########################\r\n### EXAMPLE: functions as arguments\r\n### Python Tutor link: http://www.pythontutor.com/visualize.html#code=def%20func_a(%29%3A%0A%20%20%20%20print('inside%20func_a'%29%0A%0Adef%20func_b(y%29%3A%0A%20%20%20%20print('inside%20func_b'%29%0A%20%20%20%20return%20y%0A%0Adef%20func_c(z%29%3A%0A%20%20%20%20print('inside%20func_c'%29%0A%20%20%20%20return%20z(%29%0A%0Aprint(func_a(%29%29%0Aprint(5%2Bfunc_b(2%29%29%0Aprint(func_c(func_a%29%29%0A&cumulative=false&curInstr=0&heapPrimitives=false&mode=display&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false\r\n##########################\r\n#def func_a():\r\n# print('inside func_a')\r\n#\r\n#def func_b(y):\r\n# print('inside func_b')\r\n# return y\r\n#\r\n#def func_c(z):\r\n# print('inside func_c')\r\n# return z()\r\n#\r\n#print(func_a())\r\n#print(5+func_b(2))\r\n#print(func_c(func_a))\r\n\r\n#\r\n##########################\r\n### EXAMPLE: returning function objects\r\n### Python Tutor link: http://www.pythontutor.com/visualize.html#code=def%20f(%29%3A%0A%20%20%20%20def%20x(a,%20b%29%3A%0A%20%20%20%20%20%20%20%20return%20a%2Bb%0A%20%20%20%20return%20x%0A%20%20%20%20%0Aval%20%3D%20f(%29(3,4%29%0Aprint(val%29%0A&cumulative=false&curInstr=0&heapPrimitives=false&mode=display&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false\r\n##########################\r\n#def f():\r\n# def x(a, b):\r\n# return a+b\r\n# return x\r\n# \r\n## the first part, f(), returns a function object\r\n## then apply that function with parameters 3 and 4\r\n#val = f()(3,4)\r\n#print(val)\r\n\r\n#\r\n#\r\n##########################\r\n### EXAMPLE: shows accessing variables outside scope\r\n##########################\r\n#def f(y):\r\n# x = 1\r\n# x += 1\r\n# print(x)\r\n#x = 5\r\n#f(x)\r\n#print(x)\r\n#\r\n#def g(y):\r\n# print(x)\r\n# print(x+1)\r\n#x = 5\r\n#g(x)\r\n#print(x)\r\n\r\n#def h(y):\r\n# pass\r\n# x += 1 #leads to an error without line `global x` inside h\r\n#x = 5\r\n#h(x)\r\n#print(x)\r\n\r\n\r\n##########################\r\n### EXAMPLE: hader scope example from slides\r\n### Python Tutor link: http://www.pythontutor.com/visualize.html#code=def%20g(x%29%3A%0A%20%20%20%20def%20h(%29%3A%0A%20%20%20%20%20%20%20%20x%20%3D%20'abc'%0A%20%20%20%20x%20%3D%20x%20%2B%201%0A%20%20%20%20print('in%20g(x%29%3A%20x%20%3D',%20x%29%0A%20%20%20%20h(%29%0A%20%20%20%20return%20x%0A%0Ax%20%3D%203%0Az%20%3D%20g(x%29&cumulative=false&curInstr=0&heapPrimitives=false&mode=display&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false\r\n##########################\r\n#def g(x):\r\n# def h():\r\n# x = 'abc'\r\n# x = x + 1\r\n# print('in g(x): x =', x)\r\n# h()\r\n# return x\r\n#\r\n#x = 3\r\n#z = g(x)\r\n\r\n##########################\r\n### EXAMPLE: complicated scope, test yourself!\r\n### Python Tutor link: http://www.pythontutor.com/visualize.html#code=def%20f(x%29%3A%0A%20%20%20x%20%3D%20x%20%2B%201%0A%20%20%20print('in%20f(x%29%3A%20x%20%3D',%20x%29%0A%20%20%20return%20x%0A%0Ax%20%3D%203%0Az%20%3D%20f(x%29%0Aprint('in%20main%20program%20scope%3A%20z%20%3D',%20z%29%0Aprint('in%20main%20program%20scope%3A%20x%20%3D',%20x%29%0A%0Adef%20g(x%29%3A%0A%20%20%20%20def%20h(x%29%3A%0A%20%20%20%20%20%20%20%20x%20%3D%20x%2B1%0A%20%20%20%20%20%20%20%20print(%22in%20h(x%29%3A%20x%20%3D%20%22,%20x%29%0A%20%20%20%20x%20%3D%20x%20%2B%201%0A%20%20%20%20print('in%20g(x%29%3A%20x%20%3D%20',%20x%29%0A%20%20%20%20h(x%29%0A%20%20%20%20return%20x%0A%0Ax%20%3D%203%0Az%20%3D%20g(x%29%0Aprint('in%20main%20program%20scope%3A%20x%20%3D%20',%20x%29%0Aprint('in%20main%20program%20scope%3A%20z%20%3D%20',%20z%29%0A&cumulative=false&curInstr=0&heapPrimitives=false&mode=display&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false\r\n##########################\r\n#def f(x):\r\n# x = x + 1\r\n# print('in f(x): x =', x)\r\n# return x\r\n#\r\n#x = 3\r\n#z = f(x)\r\n#print('in main program scope: z =', z)\r\n#print('in main program scope: x =', x)\r\n#\r\n#def g(x):\r\n# def h(x):\r\n# x = x+1\r\n# print(\"in h(x): x = \", x)\r\n# x = x + 1\r\n# print('in g(x): x = ', x)\r\n# h(x)\r\n# return x\r\n#\r\n#x = 3\r\n#z = g(x)\r\n#print('in main program scope: x = ', x)\r\n#print('in main program scope: z = ', z)\r\n", "id": "3371183", "language": "Python", "matching_score": 2.285518169403076, "max_stars_count": 3, "path": "6.0001/ps2/scopesandfunctions.py" }, { "content": "#################\n## EXAMPLE: simple Coordinate class\n#################\nclass Coordinate(object):\n \"\"\" A coordinate made up of an x and y value \"\"\"\n def __init__(self, x, y):\n \"\"\" Sets the x and y values \"\"\"\n self.x = x\n self.y = y\n def __str__(self):\n \"\"\" Returns a string representation of self \"\"\"\n return \"<\" + str(self.x) + \",\" + str(self.y) + \">\"\n def distance(self, other):\n \"\"\" Returns the euclidean distance between two points \"\"\"\n x_diff_sq = (self.x-other.x)**2\n y_diff_sq = (self.y-other.y)**2\n return (x_diff_sq + y_diff_sq)**0.5\n\n\nc = Coordinate(3,4)\norigin = Coordinate(0,0)\nprint(c.x, origin.x)\nprint(c.distance(origin))\nprint(Coordinate.distance(c, origin))\nprint(origin.distance(c))\nprint(c)\n\n\n#################\n## EXAMPLE: simple class to represent fractions\n## Try adding more built-in operations like multiply, divide\n### Try adding a reduce method to reduce the fraction (use gcd)\n#################\nclass Fraction(object):\n \"\"\"\n A number represented as a fraction\n \"\"\"\n def __init__(self, num, denom):\n \"\"\" num and denom are integers \"\"\"\n assert type(num) == int and type(denom) == int, \"ints not used\"\n self.num = num\n self.denom = denom\n def __str__(self):\n \"\"\" Retunrs a string representation of self \"\"\"\n return str(self.num) + \"/\" + str(self.denom)\n def __add__(self, other):\n \"\"\" Returns a new fraction representing the addition \"\"\"\n top = self.num*other.denom + self.denom*other.num\n bott = self.denom*other.denom\n return Fraction(top, bott)\n def __sub__(self, other):\n \"\"\" Returns a new fraction representing the subtraction \"\"\"\n top = self.num*other.denom - self.denom*other.num\n bott = self.denom*other.denom\n return Fraction(top, bott)\n def __float__(self):\n \"\"\" Returns a float value of the fraction \"\"\"\n return self.num/self.denom\n def inverse(self):\n \"\"\" Returns a new fraction representing 1/self \"\"\"\n return Fraction(self.denom, self.num)\n \n\na = Fraction(1,4)\nb = Fraction(3,4)\nc = a + b # c is a Fraction object\nprint(c)\nprint(float(c))\nprint(Fraction.__float__(c))\nprint(float(b.inverse()))\n##c = Fraction(3.14, 2.7) # assertion error\n##print a*b # error, did not define how to multiply two Fraction objects\n\n\n##############\n## EXAMPLE: a set of integers as class\n##############\nclass intSet(object):\n \"\"\"\n An intSet is a set of integers\n The value is represented by a list of ints, self.vals\n Each int in the set occurs in self.vals exactly once\n \"\"\"\n def __init__(self):\n \"\"\" Create an empty set of integers \"\"\"\n self.vals = []\n\n def insert(self, e):\n \"\"\" Assumes e is an integer and inserts e into self \"\"\"\n if not e in self.vals:\n self.vals.append(e)\n\n def member(self, e):\n \"\"\" Assumes e is an integer\n Returns True if e is in self, and False otherwise \"\"\"\n return e in self.vals\n\n def remove(self, e):\n \"\"\" Assumes e is an integer and removes e from self\n Raises ValueError if e is not in self \"\"\"\n try:\n self.vals.remove(e)\n except:\n raise ValueError(str(e) + ' not found')\n\n def __str__(self):\n \"\"\" Returns a string representation of self \"\"\"\n self.vals.sort()\n return '{' + ','.join([str(e) for e in self.vals]) + '}'\n\n\ns = intSet()\nprint(s)\ns.insert(3)\ns.insert(4)\ns.insert(3)\nprint(s)\ns.member(3)\ns.member(5)\ns.insert(6)\nprint(s)\ns.remove(3) \nprint(s)\ns.remove(3) # leads to an error\n", "id": "2197247", "language": "Python", "matching_score": 1.0345209836959839, "max_stars_count": 3, "path": "6.0001/ps4/coordinate_class.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 21 11:52:34 2016\n\n@author: WELG\n\"\"\"\n\n#####################################\n# EXAMPLE: Towers of Hanoi\n#####################################\n\ndef printMove(fr, to):\n print('move from ' + str(fr) + ' to ' + str(to))\n\ndef Towers(n, fr, to, spare):\n if n == 1:\n printMove(fr, to)\n else:\n Towers(n-1, fr, spare, to)\n Towers(1, fr, to, spare)\n Towers(n-1, spare, to, fr)\n\n#print(Towers(4, 'P1', 'P2', 'P3'))\n\n#####################################\n# EXAMPLE: fibonacci\n#####################################\n\ndef fib(x):\n \"\"\"assumes x an int >= 0\n returns Fibonacci of x\"\"\"\n if x == 0 or x == 1:\n return 1\n else:\n return fib(x-1) + fib(x-2)\n\n#####################################\n# EXAMPLE: testing for palindromes\n#####################################\n \ndef isPalindrome(s):\n\n def toChars(s):\n s = s.lower()\n ans = ''\n for c in s:\n if c in 'abcdefghijklmnopqrstuvwxyz':\n ans = ans + c\n return ans\n\n def isPal(s):\n if len(s) <= 1:\n return True\n else:\n return s[0] == s[-1] and isPal(s[1:-1])\n\n return isPal(toChars(s))\n\n#print(isPalindrome('eve'))\n#\n#print(isPalindrome('Able was I, ere I saw Elba'))\n#\n#print(isPalindrome('Is this a palindrome'))\n\n#####################################\n# EXAMPLE: using dictionaries\n# counting frequencies of words in song lyrics\n#####################################\n\ndef lyrics_to_frequencies(lyrics):\n myDict = {}\n for word in lyrics:\n if word in myDict:\n myDict[word] += 1\n else:\n myDict[word] = 1\n return myDict\n \n \nshe_loves_you = ['she', 'loves', 'you', 'yeah', 'yeah', \n'yeah','she', 'loves', 'you', 'yeah', 'yeah', 'yeah',\n'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',\n\n'you', 'think', \"you've\", 'lost', 'your', 'love',\n'well', 'i', 'saw', 'her', 'yesterday-yi-yay',\n\"it's\", 'you', \"she's\", 'thinking', 'of',\n'and', 'she', 'told', 'me', 'what', 'to', 'say-yi-yay',\n\n'she', 'says', 'she', 'loves', 'you',\n'and', 'you', 'know', 'that', \"can't\", 'be', 'bad',\n'yes', 'she', 'loves', 'you',\n'and', 'you', 'know', 'you', 'should', 'be', 'glad',\n\n'she', 'said', 'you', 'hurt', 'her', 'so',\n'she', 'almost', 'lost', 'her', 'mind',\n'and', 'now', 'she', 'says', 'she', 'knows',\n\"you're\", 'not', 'the', 'hurting', 'kind',\n\n'she', 'says', 'she', 'loves', 'you',\n'and', 'you', 'know', 'that', \"can't\", 'be', 'bad',\n'yes', 'she', 'loves', 'you',\n'and', 'you', 'know', 'you', 'should', 'be', 'glad',\n\n'oo', 'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',\n'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',\n'with', 'a', 'love', 'like', 'that',\n'you', 'know', 'you', 'should', 'be', 'glad',\n\n'you', 'know', \"it's\", 'up', 'to', 'you',\n'i', 'think', \"it's\", 'only', 'fair',\n'pride', 'can', 'hurt', 'you', 'too',\n'pologize', 'to', 'her',\n\n'Because', 'she', 'loves', 'you',\n'and', 'you', 'know', 'that', \"can't\", 'be', 'bad',\n'Yes', 'she', 'loves', 'you',\n'and', 'you', 'know', 'you', 'should', 'be', 'glad',\n\n'oo', 'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',\n'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',\n'with', 'a', 'love', 'like', 'that',\n'you', 'know', 'you', 'should', 'be', 'glad',\n'with', 'a', 'love', 'like', 'that',\n'you', 'know', 'you', 'should', 'be', 'glad',\n'with', 'a', 'love', 'like', 'that',\n'you', 'know', 'you', 'should', 'be', 'glad',\n'yeah', 'yeah', 'yeah',\n'yeah', 'yeah', 'yeah', 'yeah'\n]\n\nbeatles = lyrics_to_frequencies(she_loves_you)\n\n\ndef most_common_words(freqs):\n best = max(freqs.values())\n words = []\n for k in freqs:\n if freqs[k] == best:\n words.append(k)\n return (words, best)\n \ndef words_often(freqs, minTimes):\n result = []\n done = False\n while not done:\n temp = most_common_words(freqs)\n if temp[1] >= minTimes:\n result.append(temp)\n for w in temp[0]:\n del(freqs[w]) #remove word from dict\n else:\n done = True\n return result\n \n#print(words_often(beatles, 10))\n\n#####################################\n# EXAMPLE: comparing fibonacci using memoization\n#####################################\n\n\ndef fib(n):\n if n == 1:\n return 1\n elif n == 2:\n return 2\n else:\n print(fib(n-1) + fib(n-2))\n return fib(n-1) + fib(n-2)\n\n\ndef fib_efficient(n, d):\n if n in d:\n return d[n]\n else:\n ans = fib_efficient(n-1, d)+fib_efficient(n-2, d)\n d[n] = ans\n return ans\n \nd = {1:1, 2:2}\n\nargToUse = 34\nprint(\"\")\nprint('using fib')\nprint(fib(argToUse))\nprint(\"\")\nprint('using fib_efficient')\nprint(fib_efficient(argToUse, d))\n", "id": "426701", "language": "Python", "matching_score": 0.7163735628128052, "max_stars_count": 3, "path": "6.0001/ps3/she_loves_you.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 20 09:42:33 2019\r\n\r\n@author: aksha\r\n\"\"\"\r\n#printing examples\r\n#print(\"Hello World\")\r\n#\r\n#print(\"The Lion King\")\r\n#print (\"was released in 1996\")\r\n#print(\"by Disney.\")\r\n#\r\n#print(\"4*8 is\", 4*8)\r\n#print(\"2**2**2 is\", 2**2**2)\r\n#print(\"1+2+3+4+5 is\", 1+2+3+4+5)\r\n\r\n\r\n#input and variables\r\n#print(\"Halt!\")\r\n#user_input = input(\"Who goes there? \")\r\n#print(\"You may pass,\", user_input)\r\n#\r\n#a=123.4\r\n#b1 = 'spam'\r\n#first_name = 'bill'\r\n#b = 432\r\n#c = a+b\r\n#print('a+b =', c)\r\n#print('first name is', first_name)\r\n#print('Emails have a lot of', b1)\r\n\r\n\r\n#assignment\r\n#a = 1\r\n#print(a)\r\n#a = a + 1\r\n#print(a)\r\n#a = a*3\r\n#print(a)\r\n\r\n#number = float(input(\"Type in a number: \"))\r\n#integer = int(input(\"Type in an integer: \"))\r\n#text = input(\"Type in a string: \")\r\n#print(\"Number is\", number)\r\n#print(\"number is a\", type(number))\r\n#print(\"number*2 =\", number*2)\r\n#print('integer is', integer)\r\n#print('it is a', type(integer))\r\n#print('integer*2 =', integer*2)\r\n#print(text)\r\n#print(type(text))\r\n#print(text*5)\r\n\r\n#F to C\r\n#fahr_temp = float(input(\"Fahranheit Temperature: \"))\r\n#celc_temp = (fahr_temp - 32)*(5/9)\r\n#print(\"Celcius Temperature: \", celc_temp)\r\n\r\n \r\n#math\r\n#weightkg = float(input('Enter weight in kilograms: '))\r\n#stonemass = round(weightkg*2.2/14)\r\n#print('you weigh', stonemass,'stones. ')\r\n\r\n\r\n#variables\r\n#variablesareboxes = 1\r\n#print(variablesareboxes)\r\n#variablesareboxes = 288\r\n#print(variablesareboxes)\r\n\r\n#red = 10\r\n#blue = 8\r\n#print(red, blue)\r\n#red = blue\r\n#print(red, blue)\r\n#yellow = red + blue\r\n#print(yellow)\r\n#red = red+3\r\n#print(yellow)\r\n#print(red)\r\n\r\n#stringsandvariables\r\n#question = \"What did you have for breakfast?\"\r\n#print(question)\r\n#answer = input()\r\n#print(\"nice. you had \" + answer) #concatenation\r\n", "id": "159907", "language": "Python", "matching_score": 3.0340137481689453, "max_stars_count": 3, "path": "6.0001/ps0/helloworld.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 20 10:51:19 2019\r\n\r\n@author: aksha\r\n\"\"\"\r\nimport numpy\r\nx = float(input(\"Enter a number x: \"))\r\ny = float(input(\"Enter a number y: \"))\r\nprint(\"x to the power of y is\", x**y)\r\nprint(\"log2(x) =\", round(numpy.log2(x),10))\r\n", "id": "8193414", "language": "Python", "matching_score": 0.12596668303012848, "max_stars_count": 3, "path": "6.0001/ps0/ps0.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 23 14:39:19 2019\r\n\r\n@author: aksha\r\n\"\"\"\r\nannual_salary = int(input('Enter your annual salary: '))\r\nannual_salary1 = annual_salary\r\ntotal_cost = 1000000\r\nsemi_annual_raise = 0.07\r\ncurrent_savings = 0.0\r\nlow = 0\r\nhigh = 10000\r\nguess = 5000\r\nnumberofsteps = 0\r\nwhile abs(current_savings-total_cost*0.25)>=100 and guess != 9999 and abs(low-high)>=2:\r\n current_savings = 0.0\r\n annual_salary = annual_salary1\r\n for months in range(1,37):\r\n if months%6==1 and months != 1:\r\n annual_salary += annual_salary*semi_annual_raise\r\n current_savings += annual_salary/12*(guess/10000) + current_savings*0.04/12\r\n if current_savings<(total_cost*0.25):\r\n low = guess\r\n else:\r\n high = guess\r\n guess =int((low+high)/2)\r\n numberofsteps += 1 \r\n\r\nif guess==9999:\r\n print('It is not possible to pay the down payment in 3 years')\r\nelif guess ==0:\r\n print('The portion size cannot be computed as it is very less(less than 0.0001)')\r\nelse:\r\n print('Best Savings Rate: ',guess/10000)\r\n print('Steps in bisection search: ',numberofsteps)", "id": "334313", "language": "Python", "matching_score": 3.2656471729278564, "max_stars_count": 3, "path": "6.0001/ps1/ps1c.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jul 21 10:58:15 2019\r\n\r\n@author: aksha\r\n\"\"\"\r\n\r\nannual_salary = int(input('Enter your annual salary: '))\r\nportion_saved = float(input('Enter the percent of your salary to be saved, as a decimal: '))\r\ntotal_cost = int(input('Enter cost your dream home: '))\r\nsemi_annual_raise = float(input('Enter the semi-annual raise, as a decimal: '))\r\ncurrent_savings = 0.0\r\nmonths = 0\r\nwhile current_savings<=(total_cost*0.25):\r\n months +=1\r\n if months%6==1 and months != 1:\r\n annual_salary += annual_salary*semi_annual_raise\r\n \r\n current_savings += annual_salary/12*portion_saved + current_savings*0.04/12\r\nprint('Number of months:',months) ", "id": "12094903", "language": "Python", "matching_score": 3.460653066635132, "max_stars_count": 3, "path": "6.0001/ps1/ps1b.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jul 21 10:14:13 2019\r\n\r\n@author: aksha\r\n\"\"\"\r\nannual_salary = int(input('Enter your annual salary: '))\r\nportion_saved = float(input('Enter the percent of your salary to be saved, as a decimal: '))\r\ntotal_cost = int(input('Enter cost your dream home: '))\r\ncurrent_savings = 0.0\r\nmonths = 0\r\nwhile current_savings<=(total_cost*0.25):\r\n months +=1\r\n current_savings += annual_salary/12*portion_saved + current_savings*0.04/12\r\nprint('Number of months:',months) ", "id": "11730084", "language": "Python", "matching_score": 3.568878412246704, "max_stars_count": 3, "path": "6.0001/ps1/ps1a.py" } ]
1.71598
axevalley
[ { "content": "\"\"\"The RotorController class.\"\"\"\n\nimport logging\nfrom string import ascii_uppercase as alphabet\nfrom typing import Optional, Sequence\n\nfrom .reflector import Reflector\nfrom .rotor import Rotor\n\nlogging.basicConfig(\n filename=\"test.log\",\n level=logging.INFO,\n format=\"%(asctime)s:%(levelname)s:%(message)s\",\n)\n\n\nclass RotorMechanism:\n \"\"\"Controller for Enigma's rotors.\"\"\"\n\n def __init__(\n self, *, rotors: Sequence[Rotor], reflector: Optional[Reflector] = None\n ):\n \"\"\"Set up enimga's rotors.\"\"\"\n self.rotors = rotors\n if reflector is None:\n reflector = Reflector(wiring=alphabet)\n self.reflector = reflector\n\n def encode(self, value: str) -> str:\n \"\"\"Return value encoded by rotors.\"\"\"\n self.update_rotor_positions()\n first_pass_value = self.encode_rotor_right_left(len(self.rotors) - 1, value)\n reflector_value = self.reflector.encode(first_pass_value)\n logging.debug(\"Reflector encoded {} to {}\".format(value, reflector_value))\n encoded_value = self.encode_rotor_left_right(0, reflector_value)\n logging.info(\"Encoded {} to {}\".format(value, encoded_value))\n return encoded_value\n\n def encode_rotor_right_left(self, rotor_position: int, value: str) -> str:\n \"\"\"Return right to left encoding of the rotor.\"\"\"\n encoded_value = self.rotors[rotor_position].encode(value)\n logging.debug(\n \"Rotor {} encoded {} < {}\".format(rotor_position, value, encoded_value)\n )\n if rotor_position == 0:\n return encoded_value\n return self.encode_rotor_right_left(rotor_position - 1, encoded_value)\n\n def encode_rotor_left_right(self, rotor_position: int, value: str) -> str:\n \"\"\"Return left to right encoding of the rotor.\"\"\"\n encoded_value = self.rotors[rotor_position].encode(value, reverse=True)\n logging.debug(\n \"Rotor {} encoded {} > {}\".format(rotor_position, value, encoded_value)\n )\n if rotor_position == len(self.rotors) - 1:\n return encoded_value\n return self.encode_rotor_left_right(rotor_position + 1, encoded_value)\n\n def update_rotor_positions(self) -> None:\n \"\"\"Update the rotation of the rotors.\"\"\"\n self.rotors[-1].rotate()\n for rotor_position in reversed(range(0, len(self.rotors) - 1)):\n if self.rotors[rotor_position + 1].rotate_next_rotor():\n self.rotors[rotor_position].rotate()\n", "id": "8890619", "language": "Python", "matching_score": 2.077636480331421, "max_stars_count": 0, "path": "enigma/rotor/rotor_mechanism.py" }, { "content": "\"\"\"Tests for engima's rotor mechanism.\"\"\"\n\nimport string\nimport unittest\n\nfrom enigma import Reflector, Rotor, RotorMechanism\n\n\nclass TestRotorMechanism(unittest.TestCase):\n \"\"\"Test class for enigma's rotor mechanism.\"\"\"\n\n def get_rotors(self, settings):\n \"\"\"Return rotor set.\"\"\"\n return [Rotor(**setting) for setting in settings]\n\n def get_rotor_mechanism(\n self,\n rotors=[\n {\n \"wiring\": \"EKMFLGDQVZNTOWYHXUSPAIBRCJ\",\n \"position\": \"A\",\n \"ring_setting\": 1,\n \"turnover_positions\": [\"R\"],\n },\n {\n \"wiring\": \"AJDKSIRUXBLHWTMCQGZNPYFVOE\",\n \"position\": \"A\",\n \"ring_setting\": 1,\n \"turnover_positions\": [\"F\"],\n },\n {\n \"wiring\": \"BDFHJLCPRTXVZNYEIWGAKMUSQO\",\n \"position\": \"A\",\n \"ring_setting\": 1,\n \"turnover_positions\": [\"W\"],\n },\n ],\n reflector=Reflector(\"YRUHQSLDPXNGOKMIEBFZCWVJAT\"),\n ):\n \"\"\"Return rotor mechanism.\"\"\"\n rotors = self.get_rotors(rotors)\n return RotorMechanism(rotors=rotors, reflector=reflector)\n\n def test_default_position_encoding(self):\n \"\"\"\n Test the rotor mechanism returns correct.\n\n Input string: 'AAA'\n Rotor position 1: Rotor I\n Rotor position 2: Rotor II\n Rotor position 3: Rotor III\n\n All rotors in position A with ring setting of 01.\n \"\"\"\n rotors = self.get_rotor_mechanism()\n self.assertEqual(\"\".join([rotors.encode(char) for char in \"AAA\"]), \"BDZ\")\n\n def test_first_rotor_rotates(self):\n \"\"\"Test that the first rotor rotates after a keypress.\"\"\"\n rotors = self.get_rotor_mechanism()\n self.assertEqual(rotors.rotors[2].position, \"A\")\n rotors.encode(\"A\")\n self.assertEqual(rotors.rotors[2].position, \"B\")\n rotors.encode(\"A\")\n self.assertEqual(rotors.rotors[2].position, \"C\")\n\n def test_second_rotor_rotates(self):\n \"\"\"Test that the second rotor rotates.\"\"\"\n rotors = self.get_rotor_mechanism()\n rotors.rotors[2].set_position(\"V\")\n self.assertEqual(rotors.rotors[2].turnover_positions, [\"W\"])\n self.assertEqual(rotors.rotors[1].position, \"A\")\n rotors.encode(\"A\")\n self.assertEqual(rotors.rotors[2].position, \"W\")\n self.assertEqual(rotors.rotors[1].position, \"B\")\n\n def test_rotor_mechanism_without_a_reflector(self):\n \"\"\"Test a rotor mechanism can be instaniated without a reflector.\"\"\"\n rotors = self.get_rotor_mechanism(reflector=None)\n reflector = rotors.reflector\n for letter in string.ascii_uppercase:\n self.assertEqual(reflector.encode(letter), letter)\n", "id": "1751120", "language": "Python", "matching_score": 3.8133740425109863, "max_stars_count": 0, "path": "tests/test_rotor/test_rotor_mechanism.py" }, { "content": "\"\"\"Tests for the engima's reflectors.\"\"\"\n\nimport unittest\n\nfrom enigma import Reflector\n\n\nclass TestReflector(unittest.TestCase):\n \"\"\"Test class for testing reflectors.\"\"\"\n\n def test_reflector_output(self):\n \"\"\"Test refelectors give a correct encoding.\"\"\"\n rotor = Reflector(wiring=\"YRUHQSLDPXNGOKMIEBFZCWVJAT\")\n self.assertEqual(rotor.encode(\"A\"), \"Y\")\n self.assertEqual(rotor.encode(\"Q\"), \"E\")\n self.assertEqual(rotor.encode(\"H\"), \"D\")\n", "id": "598859", "language": "Python", "matching_score": 1.7790615558624268, "max_stars_count": 0, "path": "tests/test_rotor/test_reflector.py" }, { "content": "\"\"\"Tests for enigma.rotor.wiring.Wiring class.\"\"\"\n\nimport unittest\n\nfrom enigma.rotor.wiring import Wiring\n\nALPHA = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\n\nclass TestWiring(unittest.TestCase):\n \"\"\"Tests for enigma.rotor.wiring.Wiring class.\"\"\"\n\n def test_wiring_pins(self):\n \"\"\"Test Wiring can be instanicated.\"\"\"\n wiring = Wiring(ALPHA)\n output = [wiring.left_pin(i) for i in range(len(ALPHA))]\n self.assertEqual(output, list(range(len(ALPHA))))\n\n def test_wiring_to_sting(self):\n \"\"\"Test Wiring classes __repr__ method.\"\"\"\n wiring = Wiring(ALPHA)\n self.assertEqual(str(wiring), ALPHA)\n", "id": "4448835", "language": "Python", "matching_score": 1.7075273990631104, "max_stars_count": 0, "path": "tests/test_rotor/test_wiring.py" }, { "content": "\"\"\"The wiring class.\"\"\"\nfrom string import ascii_uppercase as alphabet\n\n\nclass Wiring:\n \"\"\"Find pin connections for an Enima rotor.\"\"\"\n\n def __init__(self, wiring: str):\n \"\"\"\n Set wiring.\n\n Args:\n wiring:\n String containing the letters the pins on the right side\n will connect to for each left hand pin when the rotor is in\n position 'A'.\n \"\"\"\n self.pins = [alphabet.index(char) for char in wiring]\n\n def __repr__(self) -> str:\n return \"\".join([alphabet[pin] for pin in self.pins])\n\n def __len__(self) -> int:\n return len(self.pins)\n\n def left_pin(self, right_pin: int) -> int:\n \"\"\"Return the pin position on the left for one on the right.\"\"\"\n return self.pins[right_pin]\n\n def right_pin(self, left_pin: int) -> int:\n \"\"\"Return the pin position on the right for one on the left.\"\"\"\n return self.pins.index(left_pin)\n", "id": "741474", "language": "Python", "matching_score": 1.5206884145736694, "max_stars_count": 0, "path": "enigma/rotor/wiring.py" }, { "content": "\"\"\"The Encoder class.\"\"\"\n\nfrom .wiring import Wiring\n\n\nclass Encoder:\n \"\"\"Base class for encoders.\"\"\"\n\n name = None\n\n def __init__(self, wiring: str = \"YRUHQSLDPXNGOKMIEBFZCWVJAT\"):\n \"\"\"Set wiring and position encodings.\"\"\"\n self.wiring = Wiring(wiring)\n", "id": "3286995", "language": "Python", "matching_score": 0.9870892763137817, "max_stars_count": 0, "path": "enigma/rotor/encoder.py" }, { "content": "\"\"\"Tests for enigma.rotor.encoder.Encoder.\"\"\"\n\nimport unittest\nfrom string import ascii_uppercase\n\nfrom enigma.rotor.encoder import Encoder\n\n\nclass TestEncoder(unittest.TestCase):\n \"\"\"Test for the Encoder class.\"\"\"\n\n def test_encoder(self):\n \"\"\"Test encoder can be instanicated.\"\"\"\n Encoder(ascii_uppercase)\n", "id": "11383176", "language": "Python", "matching_score": 1.867051601409912, "max_stars_count": 0, "path": "tests/test_rotor/test_encoder.py" }, { "content": "\"\"\"Tests for enigma's rotors.\"\"\"\n", "id": "924675", "language": "Python", "matching_score": 0.12080029398202896, "max_stars_count": 0, "path": "tests/test_rotor/__init__.py" }, { "content": "\"\"\"Encoders for enigma's rotor mechanism.\"\"\"\n\nfrom string import ascii_uppercase as alphabet\nfrom typing import Sequence\n\nfrom .encoder import Encoder\n\n\nclass Rotor(Encoder):\n \"\"\"\n Enigma's Rotors.\n\n Attributes:\n wriring:\n The pin connections of the rotor.\n\n ring_setting:\n The ring setting of the rotor.\n\n position:\n The current position of the rotor.\n\n \"\"\"\n\n def __init__(\n self,\n wiring: str = \"EKMFLGDQVZNTOWYHXUSPAIBRCJ\",\n ring_setting: int = 1,\n position: str = \"A\",\n turnover_positions: Sequence[str] = [\"R\"],\n ):\n \"\"\"\n Set the initial settings of the rotor.\n\n Kwargs:\n\n wiring:\n String containing the letters the pins on the right side\n will connect to for each left hand pin when the rotor is in\n position 'A'.\n\n ring_setting:\n The offset of the letters on the rotor as a letter or number.\n Default: 'A'.\n\n position:\n The starting position of the rotor as a letter or number.\n Default: 'A'.\n\n turnover_positions:\n String or list of strings containig the letter position at\n which the rotor will cause the next to rotate. Default: 'A'.\n\n \"\"\"\n super().__init__(wiring)\n self.start_position = position\n self.turnover_positions = turnover_positions\n self.ring_setting = ring_setting\n self.rotation = 0\n self.set_position(self.start_position)\n\n def encode(self, letter: str, reverse: bool = False) -> str:\n \"\"\"\n Return the letter position currently connected to annother.\n\n The letter positions are those of a rotor in position 'A' with a ring\n setting of 01.\n\n Args:\n letter:\n The input position.\n\n Kwargs:\n reverse:\n If True the encoding is left to right, otherwise right to left.\n Default: False.\n\n Returns:\n The letter position of the pin connected to the one at the passed\n letter postion.\n\n \"\"\"\n pin_number = self._find_pin(letter)\n if reverse is True:\n pin_location = self.wiring.right_pin(pin_number)\n else:\n pin_location = self.wiring.left_pin(pin_number)\n return self._find_letter(pin_location)\n\n def rotate(self, turns: int = 1) -> None:\n \"\"\"\n Rotate the rotor.\n\n Args:\n turns: Number of times to rotate the rotor. Default: 1.\n \"\"\"\n self.rotation += turns\n if self.rotation >= len(self.wiring):\n self.rotation = 0\n\n def set_position(self, letter_position: str) -> None:\n \"\"\"Turn the rotor to a given position.\"\"\"\n numeric_position = alphabet.index(letter_position) + 1\n offset = numeric_position - self.ring_setting\n if offset < 0:\n offset += len(self.wiring)\n self.rotation = offset\n\n def rotate_next_rotor(self) -> bool:\n \"\"\"Return True if the next rotor should rotate.\"\"\"\n if self.position in self.turnover_positions:\n return True\n return False\n\n @property\n def position(self) -> str:\n \"\"\"Return the current position of the rotor.\"\"\"\n offset = self.rotation + self.ring_setting - 1\n if offset >= len(self.wiring):\n offset -= len(self.wiring)\n return alphabet[offset]\n\n def _find_pin(self, pin_letter: str) -> int:\n \"\"\"Return the pin number for a given letter input.\"\"\"\n pin_position = alphabet.index(pin_letter)\n offset = pin_position + self.rotation\n if offset >= len(self.wiring):\n offset -= len(self.wiring)\n return offset\n\n def _find_letter(self, pin_number: int) -> str:\n \"\"\"Find the letter position for a given pin number.\"\"\"\n offset = pin_number - self.rotation\n return alphabet[offset]\n", "id": "9888677", "language": "Python", "matching_score": 4.421316623687744, "max_stars_count": 0, "path": "enigma/rotor/rotor.py" }, { "content": "\"\"\"Reflectors for enigma.\"\"\"\n\nfrom string import ascii_uppercase as alphabet\n\nfrom .encoder import Encoder\n\n\nclass Reflector(Encoder):\n \"\"\"Base class for reflectors.\"\"\"\n\n def encode(self, input: str) -> str:\n \"\"\"\n Return the letter position currently connected to annother.\n\n The letter positions are those of a rotor in position 'A' with a ring\n setting of 01.\n\n Args:\n letter:\n The input position.\n\n Kwargs:\n reverse:\n If True the encoding is left to right, otherwise right to left.\n Default: False.\n\n Returns:\n The letter position of the pin connected to the one at the passed\n letter postion.\n\n \"\"\"\n input_pin = self._find_pin(input)\n output_pin = self.wiring.right_pin(input_pin)\n return self._find_letter(output_pin)\n\n def _find_pin(self, pin_letter: str) -> int:\n \"\"\"Return the pin number for a given letter input.\"\"\"\n return alphabet.index(pin_letter)\n\n def _find_letter(self, pin_number: int) -> str:\n \"\"\"Find the letter position for a given pin number.\"\"\"\n return alphabet[pin_number]\n", "id": "9540922", "language": "Python", "matching_score": 1.872698426246643, "max_stars_count": 0, "path": "enigma/rotor/reflector.py" }, { "content": "\"\"\"Tests for enigma's rotors.\"\"\"\n\nfrom string import ascii_uppercase\n\nfrom enigma import Rotor\n\nfrom .rotor_test import RotorTest\n\n\nclass TestRotors(RotorTest):\n \"\"\"Test class for testing rotors.\"\"\"\n\n def rotor_position_test(self, positions, ring_setting=1):\n \"\"\"Test initial position with given positions.\"\"\"\n rotor = self.get_rotor()\n set_positions = []\n for position in positions:\n rotor = self.get_rotor(position=position, ring_setting=ring_setting)\n set_positions.append(rotor.position)\n return set_positions\n\n def rotor_turnover_positions_test(self, turnover_positionses, ring_setting=1):\n \"\"\"Test initial position with given positions.\"\"\"\n rotor = self.get_rotor()\n set_notches = []\n for turnover_positions in turnover_positionses:\n rotor = self.get_rotor(\n turnover_positions=turnover_positions, ring_setting=ring_setting\n )\n set_notches.append(rotor.turnover_positions)\n return set_notches\n\n def test_straight_rotor(self):\n \"\"\"Test rotor encoding does not change with straight wiring.\"\"\"\n for position in ascii_uppercase:\n for ring_setting in range(1, len(ascii_uppercase) + 1):\n for reverse in (True, False):\n rotor = Rotor(\n wiring=ascii_uppercase,\n position=position,\n ring_setting=ring_setting,\n )\n output = [\n rotor.encode(letter, reverse=reverse)\n for letter in ascii_uppercase\n ]\n self.assertEqual(output, list(ascii_uppercase))\n\n def test_set_rotor_position(self):\n \"\"\"Test initial position with ring setting.\"\"\"\n self.assertEqual(self.rotor_position_test([\"A\", \"Q\", \"Z\"]), [\"A\", \"Q\", \"Z\"])\n\n def test_set_rotor_position_with_ring_setting(self):\n \"\"\"Test that rotor.set_position sets rotor position.\"\"\"\n self.assertEqual(self.rotor_position_test([\"A\", \"Q\", \"Z\"]), [\"A\", \"Q\", \"Z\"])\n\n def test_set_rotor_turnover_positions(self):\n \"\"\"Test initial turnover positions with ring setting.\"\"\"\n self.assertEqual(\n self.rotor_turnover_positions_test([[\"A\"], [\"Q\"], [\"Z\"]]),\n [[\"A\"], [\"Q\"], [\"Z\"]],\n )\n\n def test_set_multiple_rotor_turnover_positions(self):\n \"\"\"Test rotor accepts a list of turnover position settings.\"\"\"\n self.assertEqual(self.rotor_turnover_positions_test([\"A\", \"Q\"]), [\"A\", \"Q\"])\n\n def test_rotor_starting_position(self):\n \"\"\"Test rotors starting position is correct for passed argument.\"\"\"\n self.assertEqual(self.get_rotor(position=\"A\").position, \"A\")\n self.assertEqual(self.get_rotor(position=\"Q\").position, \"Q\")\n\n def test_rotor_complete_rotation(self):\n \"\"\"Test that a rotor can complete a full rotation.\"\"\"\n rotor = self.get_rotor(ring_setting=2)\n initial_position = rotor.position\n for position in range(len(rotor.wiring)):\n rotor.rotate()\n self.assertEqual(initial_position, rotor.position)\n\n def test_rotor_position_updates_on_rotation(self):\n \"\"\"Test rotor position is correct after rotation.\"\"\"\n rotor = self.get_rotor()\n self.assertEqual(rotor.position, \"A\")\n rotor.rotate()\n self.assertEqual(rotor.position, \"B\")\n rotor.rotate()\n self.assertEqual(rotor.position, \"C\")\n rotor = self.get_rotor()\n rotor.rotate(turns=3)\n self.assertEqual(rotor.position, \"D\")\n\n def test_get_ring_setting(self):\n \"\"\"Test rotor sets and reports ring setting correctly.\"\"\"\n rotor = self.get_rotor(ring_setting=2)\n self.assertEqual(rotor.ring_setting, 2)\n\n def test_ring_setting(self):\n \"\"\"Test rotor encoding with ring setting.\"\"\"\n rotor = self.get_rotor(ring_setting=2)\n self.assertEqual(rotor.encode(\"A\"), \"K\")\n self.assertEqual(rotor.encode(\"Q\"), \"I\")\n self.assertEqual(rotor.encode(\"H\"), \"E\")\n rotor.rotate()\n self.assertEqual(rotor.encode(\"A\"), \"E\")\n\n def test_rotor_rotate_next_rotor(self):\n \"\"\"Test rotor.get_next_rotor works correctly.\"\"\"\n rotor = self.get_rotor(position=\"A\", turnover_positions=[\"C\"])\n self.assertFalse(rotor.rotate_next_rotor())\n rotor.rotate()\n self.assertFalse(rotor.rotate_next_rotor())\n rotor.rotate()\n self.assertEqual(rotor.position, \"C\")\n self.assertEqual(rotor.turnover_positions, [\"C\"])\n self.assertTrue(rotor.rotate_next_rotor())\n\n def test_rotor_I_position_A_input_A(self):\n \"\"\"Test rotor encoding right to left without rotation.\"\"\"\n rotor = self.get_rotor()\n self.assertEqual(rotor.encode(\"A\"), \"E\")\n\n def test_rotor_I_position_A_after_rotation(self):\n \"\"\"Test rotor encoding right to left with a single rotation.\"\"\"\n rotor = self.get_rotor()\n rotor.rotate()\n self.assertEqual(rotor.encode(\"A\"), \"J\")\n self.assertEqual(rotor.encode(\"Q\"), \"T\")\n self.assertEqual(rotor.encode(\"H\"), \"U\")\n\n def test_rotor_III(self):\n \"\"\"Test rotor encoding right to left with multiple rotations.\"\"\"\n rotor = self.get_rotor(wiring=\"BDFHJLCPRTXVZNYEIWGAKMUSQO\", position=\"D\")\n self.assertEqual(rotor.encode(\"A\"), \"E\")\n self.assertEqual(rotor.encode(\"Q\"), \"X\")\n self.assertEqual(rotor.encode(\"H\"), \"U\")\n\n def test_rotor_I_position_A_input_A_reverse(self):\n \"\"\"Test rotor encoding left to right without rotation.\"\"\"\n rotor = self.get_rotor(wiring=\"EKMFLGDQVZNTOWYHXUSPAIBRCJ\", position=\"A\")\n self.assertEqual(rotor.encode(\"A\", reverse=True), \"U\")\n\n def test_rotor_I_position_A_after_rotation_reverse(self):\n \"\"\"Test rotor encoding left to right with a single rotation.\"\"\"\n rotor = self.get_rotor(wiring=\"EKMFLGDQVZNTOWYHXUSPAIBRCJ\", position=\"A\")\n rotor.rotate()\n self.assertEqual(rotor.encode(\"A\", reverse=True), \"V\")\n self.assertEqual(rotor.encode(\"Q\", reverse=True), \"W\")\n self.assertEqual(rotor.encode(\"H\", reverse=True), \"U\")\n\n def test_rotor_III_reverse(self):\n \"\"\"Test rotor encoding left to right with multiple rotations.\"\"\"\n rotor = self.get_rotor(wiring=\"BDFHJLCPRTXVZNYEIWGAKMUSQO\", position=\"D\")\n self.assertEqual(rotor.encode(\"A\", reverse=True), \"Y\")\n self.assertEqual(rotor.encode(\"Q\", reverse=True), \"G\")\n self.assertEqual(rotor.encode(\"H\", reverse=True), \"R\")\n", "id": "8816275", "language": "Python", "matching_score": 3.025941848754883, "max_stars_count": 0, "path": "tests/test_rotor/test_rotor.py" }, { "content": "\"\"\"Base class for rotor tests.\"\"\"\n\nimport unittest\n\nfrom enigma.rotor.reflector import Reflector\nfrom enigma.rotor.rotor import Rotor\n\n\nclass RotorTest(unittest.TestCase):\n \"\"\"Provides tools testing rotors.\"\"\"\n\n def get_rotor(\n self,\n wiring=\"EKMFLGDQVZNTOWYHXUSPAIBRCJ\",\n ring_setting=1,\n position=\"A\",\n turnover_positions=[\"R\"],\n ):\n \"\"\"Return Rotor object.\"\"\"\n return Rotor(\n wiring=wiring,\n ring_setting=ring_setting,\n position=position,\n turnover_positions=turnover_positions,\n )\n\n def get_reflector(self, wiring=\"YRUHQSLDPXNGOKMIEBFZCWVJAT\"):\n \"\"\"Return Reflector object.\"\"\"\n return Reflector(wiring=wiring)\n", "id": "22050", "language": "Python", "matching_score": 2.759105920791626, "max_stars_count": 0, "path": "tests/test_rotor/rotor_test.py" }, { "content": "\"\"\"Tests for the enigma module.\"\"\"\n\nimport unittest\n\nfrom enigma import Enigma, Plugboard, Reflector, Rotor\n\n\nclass TestEnigma(unittest.TestCase):\n \"\"\"Tests for the enigma class.\"\"\"\n\n def get_enigma(\n self,\n rotors=[\n {\n \"wiring\": \"EKMFLGDQVZNTOWYHXUSPAIBRCJ\",\n \"position\": \"A\",\n \"ring_setting\": 1,\n \"turnover_positions\": [\"R\"],\n },\n {\n \"wiring\": \"AJDKSIRUXBLHWTMCQGZNPYFVOE\",\n \"position\": \"A\",\n \"ring_setting\": 1,\n \"turnover_positions\": [\"F\"],\n },\n {\n \"wiring\": \"BDFHJLCPRTXVZNYEIWGAKMUSQO\",\n \"position\": \"A\",\n \"ring_setting\": 1,\n \"turnover_positions\": [\"W\"],\n },\n ],\n reflector=\"YRUHQSLDPXNGOKMIEBFZCWVJAT\",\n plugboard=[],\n ):\n \"\"\"Test enigma object can be created.\"\"\"\n used_rotors = [\n Rotor(\n wiring=_[\"wiring\"],\n position=_[\"position\"],\n ring_setting=_[\"ring_setting\"],\n turnover_positions=_[\"turnover_positions\"],\n )\n for _ in rotors\n ]\n used_reflector = Reflector(wiring=reflector)\n used_plugboard = Plugboard(plugboard)\n return Enigma(\n rotors=used_rotors, reflector=used_reflector, plugboard=used_plugboard\n )\n\n def test_enigma_default_encoding(self):\n \"\"\"Test enigma encoding with default setup.\"\"\"\n enigma = self.get_enigma()\n self.assertEqual(enigma.encode(\"HELL OWOR LD\"), \"ILBD AAMT AZ\")\n", "id": "822018", "language": "Python", "matching_score": 2.476884603500366, "max_stars_count": 0, "path": "tests/test_enigma/test_enigma.py" }, { "content": "\"\"\"Tests for Engma's plugboard.\"\"\"\n\nimport unittest\n\nfrom enigma import Plugboard\nfrom enigma.exceptions import InvalidPlugboard\n\n\nclass TestPlugboard(unittest.TestCase):\n \"\"\"Tests for Enigma's plugboard.\"\"\"\n\n def test_empty_plugboard(self):\n \"\"\"Test empty plugboard does not change encoding.\"\"\"\n board = Plugboard([])\n self.assertEqual(board.encode(\"A\"), \"A\")\n self.assertEqual(board.encode(\"Q\"), \"Q\")\n\n def test_plugboard_encoding(self):\n \"\"\"Test that the plugboard can encode values.\"\"\"\n board = Plugboard([(\"A\", \"Q\"), (\"N\", \"V\")])\n self.assertEqual(board.encode(\"A\"), \"Q\")\n self.assertEqual(board.encode(\"Q\"), \"A\")\n self.assertEqual(board.encode(\"N\"), \"V\")\n self.assertEqual(board.encode(\"V\"), \"N\")\n self.assertEqual(board.encode(\"L\"), \"L\")\n\n def test_plugboard_raises_for_invalid_connection(self):\n \"\"\"Test a plugboard cannot be created with an invalid setup.\"\"\"\n with self.assertRaises(InvalidPlugboard):\n Plugboard([(\"A\", \"A\")])\n\n def test_plugboard_raises_for_invalid_argument(self):\n \"\"\"Test a plugboard raises for a connection has the wrong number of arguments.\"\"\"\n with self.assertRaises(InvalidPlugboard):\n Plugboard([(\"A\")])\n with self.assertRaises(InvalidPlugboard):\n Plugboard([(\"A\", \"B\", \"C\")])\n with self.assertRaises(InvalidPlugboard):\n Plugboard([(\"@\", \"B\")])\n", "id": "1129957", "language": "Python", "matching_score": 1.7460845708847046, "max_stars_count": 0, "path": "tests/test_plugboard/test_plugboard.py" }, { "content": "\"\"\"Enigma's Plugboard.\"\"\"\nfrom string import ascii_uppercase as alphabet\nfrom typing import Dict, Optional, Sequence, Tuple\n\n\nclass Plugboard:\n \"\"\"Enigma's Plugboard.\"\"\"\n\n def __init__(self, connections: Optional[Sequence[Tuple[str, str]]] = None):\n \"\"\"\n Set up plugboard connections.\n\n Args:\n connections:\n A list of tuples of two letters to be connected on the plugboard.\n E.g. [('A', 'B')].\n\n \"\"\"\n self.connections: Dict[str, str] = {}\n if connections is not None:\n for connection in connections:\n self.validate_connection(connection)\n letter_1, letter_2 = connection\n self.connections[letter_1] = letter_2\n self.connections[letter_2] = letter_1\n\n def encode(self, value: str) -> str:\n \"\"\"Return encoded value for value.\"\"\"\n if value in self.connections:\n return self.connections[value]\n return value\n\n def validate_connection(self, connection: Tuple[str, str]) -> None:\n \"\"\"Raise error if connection is not valid.\"\"\"\n if len(connection) != 2:\n raise InvalidPlugboard(connection)\n if connection[0] == connection[1]:\n raise InvalidPlugboard(connection)\n if connection[0] not in alphabet or connection[1] not in alphabet:\n raise InvalidPlugboard(connection)\n\n\nclass InvalidPlugboard(ValueError):\n \"\"\"Exception for invalid plugboard setups.\"\"\"\n\n def __init__(self, connection: Tuple[str, str]):\n \"\"\"Raise exception.\"\"\"\n super().__init__(\"{} is an invalid plugboard connection.\".format(connection))\n", "id": "5203602", "language": "Python", "matching_score": 2.1125776767730713, "max_stars_count": 0, "path": "enigma/plugboard.py" }, { "content": "\"\"\"Exceptions for enigma.\"\"\"\n\nfrom .plugboard import InvalidPlugboard # NOQA\n", "id": "4009742", "language": "Python", "matching_score": 0.3202422261238098, "max_stars_count": 0, "path": "enigma/exceptions.py" }, { "content": "\"\"\"Base classes for specific models of Enigma machine.\"\"\"\n\nfrom typing import Dict, Sequence, Tuple, Type\n\nfrom enigma import Enigma, Plugboard, Reflector, Rotor\n\n\nclass PresetRotor(Rotor):\n \"\"\"Base class for preset rotors.\"\"\"\n\n set_wiring: str\n turnover_positions: Sequence[str]\n\n def __init__(self, position: str, ring_setting: int):\n \"\"\"\n Set up rotor.\n\n Kwargs:\n ring_setting:\n The offset of the letters on the rotor as a letter or number.\n Default: 'A'.\n\n position:\n The starting position of the rotor as a letter or number.\n Default: 'A'.\n \"\"\"\n super().__init__(\n wiring=self.set_wiring,\n turnover_positions=self.turnover_positions,\n position=position,\n ring_setting=ring_setting,\n )\n\n\nclass PresetReflector(Reflector):\n \"\"\"Base class for preset reflectors.\"\"\"\n\n set_wiring: str\n\n def __init__(self) -> None:\n \"\"\"Set up reflector.\"\"\"\n super().__init__(wiring=self.set_wiring)\n\n\nclass EnigmaModel(Enigma):\n \"\"\"Base class for enigma models.\"\"\"\n\n ROTORS = \"rotors\"\n POSITIONS = \"positions\"\n RING_SETTINGS = \"ring_settings\"\n REFLECTOR = \"reflector\"\n PLUGBOARD_PAIRS = \"plugboard_pairs\"\n\n available_rotors: Dict[str, Type[PresetRotor]]\n available_reflectors: Dict[str, Type[PresetReflector]]\n\n rotors: Sequence[Rotor] = []\n reflectors: Sequence[Reflector] = []\n\n def __init__(\n self,\n *,\n rotors: Sequence[str],\n positions: Sequence[str],\n ring_settings: Sequence[str],\n plugboard_pairs: Sequence[Tuple[str, str]],\n reflector: str\n ):\n \"\"\"\n Set up Enigma object.\n\n Kwargs:\n rotors:\n Iterable containing the names of the rotors to use.\n E.g. ('I', 'II', 'III').\n positions:\n Iterable containng the initial letter positions of the\n rotors. E.g. 'AAA'.\n ring_settings:\n Iterable containing the ring settings of the rotors as str.\n E.g ('01', '01', '01')\n plugboard_pairs:\n Iterable containing two character strings of letter pairs for\n the plugboard. E.g. ['AB', 'QZ']\n reflector:\n Name of the reflector to use.\n \"\"\"\n positions = positions\n ring_settings = ring_settings\n used_reflector = self.available_reflectors[reflector]()\n plugboard_pairs = plugboard_pairs\n used_rotors = []\n for i in range(3):\n rotor_class = self.available_rotors[rotors[i]]\n rotor = rotor_class(\n position=positions[i], ring_setting=int(ring_settings[i])\n )\n used_rotors.append(rotor)\n plugboard = Plugboard(plugboard_pairs)\n super().__init__(\n rotors=used_rotors, reflector=used_reflector, plugboard=plugboard\n )\n", "id": "6459877", "language": "Python", "matching_score": 3.4859323501586914, "max_stars_count": 0, "path": "enigma/models/enigma_model.py" }, { "content": "\"\"\"Specific Enigma models.\"\"\"\n\n\nfrom .enigma_model import EnigmaModel, PresetReflector, PresetRotor\n\n\nclass RotorI(PresetRotor):\n \"\"\"Enigma M3 Rotor I.\"\"\"\n\n set_wiring = \"EKMFLGDQVZNTOWYHXUSPAIBRCJ\"\n turnover_positions = [\"R\"]\n\n\nclass RotorII(PresetRotor):\n \"\"\"Enigma M3 Rotor II.\"\"\"\n\n set_wiring = \"AJDKSIRUXBLHWTMCQGZNPYFVOE\"\n turnover_positions = [\"F\"]\n\n\nclass RotorIII(PresetRotor):\n \"\"\"Enigma M3 Rotor III.\"\"\"\n\n set_wiring = \"BDFHJLCPRTXVZNYEIWGAKMUSQO\"\n turnover_positions = [\"W\"]\n\n\nclass RotorIV(PresetRotor):\n \"\"\"Enigma M3 Rotor IV.\"\"\"\n\n set_wiring = \"ESOVPZJAYQUIRHXLNFTGKDCMWB\"\n turnover_positions = [\"K\"]\n\n\nclass RotorV(PresetRotor):\n \"\"\"Enigma M3 Rotor V.\"\"\"\n\n set_wiring = \"VZBRGITYUPSDNHLXAWMJQOFECK\"\n turnover_positions = [\"A\"]\n\n\nclass RotorVI(PresetRotor):\n \"\"\"Enigma M3 Rotor VI.\"\"\"\n\n set_wiring = \"JPGVOUMFYQBENHZRDKASXLICTW\"\n turnover_positions = [\"A\", \"N\"]\n\n\nclass RotorVII(PresetRotor):\n \"\"\"Enigma M3 Rotor VII.\"\"\"\n\n set_wiring = \"NZJHGRCXMYSWBOUFAIVLPEKQDT\"\n turnover_positions = [\"A\", \"N\"]\n\n\nclass RotorVIII(PresetRotor):\n \"\"\"Enigma M3 Rotor VIII.\"\"\"\n\n set_wiring = \"FKQHTLXOCBJSPDZRAMEWNIUYGV\"\n turnover_positions = [\"A\", \"N\"]\n\n\nclass ReflectorA(PresetReflector):\n \"\"\"Enigma M3 Reflector A.\"\"\"\n\n set_wiring = \"EJMZALYXVBWFCRQUONTSPIKHGD\"\n\n\nclass ReflectorB(PresetReflector):\n \"\"\"Enigma M3 Reflector B.\"\"\"\n\n set_wiring = \"YRUHQSLDPXNGOKMIEBFZCWVJAT\"\n\n\nclass ReflectorC(PresetReflector):\n \"\"\"Enigma M3 Reflector C.\"\"\"\n\n set_wiring = \"FVPJIAOYEDRZXWGCTKUQSBNMHL\"\n\n\nclass M3(EnigmaModel):\n \"\"\"The Army M3 Enigma.\"\"\"\n\n available_rotors = {\n \"I\": RotorI,\n \"II\": RotorII,\n \"III\": RotorIII,\n \"IV\": RotorIV,\n \"V\": RotorV,\n \"VI\": RotorVI,\n \"VII\": RotorVII,\n \"VIII\": RotorVIII,\n }\n\n available_reflectors = {\"A\": ReflectorA, \"B\": ReflectorB, \"C\": ReflectorC}\n", "id": "7423595", "language": "Python", "matching_score": 1.4264549016952515, "max_stars_count": 0, "path": "enigma/models/m3.py" }, { "content": "\"\"\"Models of enigma.\"\"\"\n\nfrom .m3 import M3 # NOQA\n", "id": "9949271", "language": "Python", "matching_score": 1.0916913747787476, "max_stars_count": 0, "path": "enigma/models/__init__.py" }, { "content": "\"\"\"Tests for Enigma models.\"\"\"\n", "id": "11691589", "language": "Python", "matching_score": 0.9309605956077576, "max_stars_count": 0, "path": "tests/test_models/__init__.py" }, { "content": "\"\"\"Tests for Enigma models.\"\"\"\n\nimport unittest\n\nfrom enigma.models import M3\n\n\nclass TestM3(unittest.TestCase):\n \"\"\"Tests for the M3 model Enigma machine.\"\"\"\n\n def test_M3(self):\n \"\"\"Test message encoding with Enigma model M3.\"\"\"\n message = (\n \"EDPUD NRGYS ZRCXN UYTPO MRMBO FKTBZ REZKM LXLVE FGUEY SIOZV\"\n \"EQMIK UBPMM YLKLT TDEIS MDICA GYKUA CTCDO MOHWX MUUIA UBSTS LRNBZ\"\n \"SZWNR FXWFY SSXJZ VIJHI DISHP RKLKA YUPAD TXQSP INQMA TLPIF SVKDA\"\n \"SCTAC DPBOP VHJK\"\n )\n enigma = M3(\n rotors=(\"II\", \"IV\", \"V\"),\n positions=\"BLA\",\n ring_settings=(2, 21, 12),\n reflector=\"B\",\n plugboard_pairs=(\n \"AV\",\n \"BS\",\n \"CG\",\n \"DL\",\n \"FU\",\n \"HZ\",\n \"IN\",\n \"KM\",\n \"OW\",\n \"RX\",\n ),\n )\n expected_output = (\n \"AUFKL XABTE ILUNG XVONX KURTI NOWAX KURTI NOWAX NORDW ESTLX \"\n \"SEBEZ XSEBE ZXUAF FLIEG ERSTR ASZER IQTUN GXDUB ROWKI XDUBR \"\n \"OWKIX OPOTS CHKAX OPOTS CHKAX UMXEI NSAQT DREIN ULLXU HRANG \"\n \"ETRET ENXAN GRIFF XINFX RGTX\"\n )\n output = enigma.encode(message, blocks=5)\n self.assertEqual(output, expected_output)\n", "id": "5391419", "language": "Python", "matching_score": 1.7404066324234009, "max_stars_count": 0, "path": "tests/test_models/test_m3.py" }, { "content": "\"\"\"Base class for Enigma machines.\"\"\"\nfrom string import ascii_uppercase as alphabet\nfrom typing import List, Sequence\n\nfrom .plugboard import Plugboard\nfrom .rotor import Reflector, Rotor, RotorMechanism\n\n\nclass Enigma:\n \"\"\"Base class for Enigma machines.\"\"\"\n\n def __init__(\n self, *, rotors: Sequence[Rotor], reflector: Reflector, plugboard: Plugboard\n ):\n \"\"\"Create enigma setup.\"\"\"\n self.rotor_mechanism = RotorMechanism(rotors=rotors, reflector=reflector)\n self.plugboard = plugboard\n\n def encode(self, plain_text: str, blocks: int = 4) -> str:\n \"\"\"Encode message.\"\"\"\n prepared_plain_text = self.prepare_input(plain_text)\n raw_output = [self.encode_letter(letter) for letter in prepared_plain_text]\n output = self.format_output(raw_output, blocks=blocks)\n return output\n\n def encode_letter(self, letter: str) -> str:\n \"\"\"Encypher a single letter with the enigma machine.\"\"\"\n return self.plugboard.encode(\n self.rotor_mechanism.encode(self.plugboard.encode(letter))\n )\n\n def prepare_input(self, input: str) -> str:\n \"\"\"Format input message as a string of uppercase letters.\"\"\"\n input = input.upper()\n invalid_characters = [letter for letter in input if letter not in alphabet]\n for letter in invalid_characters:\n input = input.replace(letter, \"\")\n return input\n\n def format_output(self, output: List[str], blocks: int = 4) -> str:\n \"\"\"Format the output message into blocks of four uppercase letters.\"\"\"\n block_text = self.split_into_blocks(output, blocks)\n return block_text.upper()\n\n def split_into_blocks(self, input: List[str], block_size: int) -> str:\n \"\"\"Split iterable into blocks.\"\"\"\n blocks = [input[i : i + block_size] for i in range(0, len(input), block_size)]\n text_blocks = [\"\".join(block) for block in blocks]\n text = \" \".join(text_blocks)\n return text\n", "id": "11083925", "language": "Python", "matching_score": 2.066964864730835, "max_stars_count": 0, "path": "enigma/enigma.py" }, { "content": "\"\"\"Enigma's rotors.\"\"\"\n\nfrom .reflector import Reflector # NOQA\nfrom .rotor import Rotor # NOQA\nfrom .rotor_mechanism import RotorMechanism # NOQA\nfrom .wiring import Wiring # NOQA\n", "id": "7845895", "language": "Python", "matching_score": 2.7437219619750977, "max_stars_count": 0, "path": "enigma/rotor/__init__.py" }, { "content": "\"\"\"Enigma, an enimga emulator.\"\"\"\n\nfrom .enigma import Enigma # NOQA\nfrom .plugboard import Plugboard # NOQA\nfrom .rotor import Reflector, Rotor, RotorMechanism, Wiring # NOQA\n", "id": "2347275", "language": "Python", "matching_score": 1.4301847219467163, "max_stars_count": 0, "path": "enigma/__init__.py" }, { "content": "\"\"\"Tests for Enigma's plugboard.\"\"\"\n", "id": "104559", "language": "Python", "matching_score": 1.5764014720916748, "max_stars_count": 0, "path": "tests/test_plugboard/__init__.py" }, { "content": "\"\"\"Tests for the enigma module.\"\"\"\n", "id": "11882641", "language": "Python", "matching_score": 0.17149290442466736, "max_stars_count": 0, "path": "tests/test_enigma/__init__.py" }, { "content": "\"\"\"Test the enigma package.\"\"\"\n\nimport unittest\n\nfrom enigma import __version__\n\n\nclass TestPackage(unittest.TestCase):\n \"\"\"Test the enigma package.\"\"\"\n\n def test_version_file_attributes(self):\n \"\"\"Test the __version__ file has the necessary attributes.\"\"\"\n self.assertTrue(hasattr(__version__, \"__title__\"))\n self.assertTrue(hasattr(__version__, \"__description__\"))\n self.assertTrue(hasattr(__version__, \"__url__\"))\n self.assertTrue(hasattr(__version__, \"__version__\"))\n self.assertTrue(hasattr(__version__, \"__author__\"))\n self.assertTrue(hasattr(__version__, \"__author_email__\"))\n self.assertTrue(hasattr(__version__, \"__license__\"))\n self.assertTrue(hasattr(__version__, \"__copyright__\"))\n", "id": "10771540", "language": "Python", "matching_score": 3.4107344150543213, "max_stars_count": 0, "path": "tests/test_enigma/test_package.py" }, { "content": "\"\"\"Tabler.\"\"\"\n\n__title__ = \"enigma\"\n__description__ = \"Enigma emulator\"\n__url__ = \"\"\n__version__ = \"0.1\"\n__author__ = \"<NAME>\"\n__author_email__ = \"<EMAIL>\"\n__license__ = \"MIT\"\n__copyright__ = \"Copyright 2019 <NAME>\"\n", "id": "12777250", "language": "Python", "matching_score": 0.7318246960639954, "max_stars_count": 0, "path": "enigma/__version__.py" }, { "content": "import datetime\nimport requests\n\nBASE_URL = 'http://api.fixer.io'\n\n\ndef get_rates(\n day=datetime.datetime.today(),\n symbols=[], base=None):\n request_url = get_url_for_date(day)\n params = get_params(symbols, base)\n request = requests.get(request_url, params=params)\n rates = request.json()['rates']\n return rates\n\n\ndef get_url_for_date(day):\n day_string = day.strftime('%Y-%m-%d')\n request_url = '/'.join((BASE_URL, day_string))\n return request_url\n\n\ndef get_params(symbols=[], base=None):\n params = {}\n if len(symbols) > 0:\n symbols_string = ','.join(symbols)\n params['symbols'] = symbols_string\n if base is not None:\n params['base'] = base.upper()\n return params\n", "id": "10999738", "language": "Python", "matching_score": 1.091853380203247, "max_stars_count": 0, "path": "exchange_rates/__init__.py" }, { "content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='exchange_rates',\n version='1.1',\n description='Retrives currency exchange rates from http://api.fixer.io/',\n author='<NAME>',\n author_email='<EMAIL>',\n url='http://exchange_rates.lukeshiner.com',\n keywords=['exchange rates', 'currency', 'api'],\n install_requires=['requests'],\n packages=find_packages(),\n )\n", "id": "7188284", "language": "Python", "matching_score": 2.9667398929595947, "max_stars_count": 0, "path": "setup.py" }, { "content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='template_to_pdf',\n version='1.1',\n description='Converts jinja2 templates to HTML and PDF',\n author='<NAME>',\n author_email='<EMAIL>',\n url='http://www.lukeshiner.com',\n keywords=['template', 'html', 'pdf'],\n install_requires=['jinja2', 'pdfkit'],\n packages=find_packages(),\n )\n", "id": "113405", "language": "Python", "matching_score": 1.4881868362426758, "max_stars_count": 0, "path": "setup.py" }, { "content": "from jinja2 import Template\nimport pdfkit\n\n\ndef save_pdf(html, output_path):\n pdfkit.from_string(html, output_path)\n\n\ndef get_html(template_name, variables):\n template_path = template_name\n with open(template_path, 'r') as template_file:\n template = Template(template_file.read())\n html = template.render(variables)\n return html\n\n\ndef save_pdf_from_tamplate(template_name, variables, output_path):\n html = get_html(template_name, variables)\n save_pdf(html, output_path)\n", "id": "10273483", "language": "Python", "matching_score": 0.199288010597229, "max_stars_count": 0, "path": "template_to_pdf/__init__.py" }, { "content": "\"\"\"This module contains preset label formats.\"\"\"\n\nfrom reportlab.graphics import shapes\nfrom reportlab.graphics.barcode import eanbc\n\nfrom .label_format import LabelFormat\n\n\nclass DefaultLabelFormat(LabelFormat):\n \"\"\"Labeler's default label format.\"\"\"\n\n font = \"Helvetica-Bold\"\n vertical_margin = 10\n horizontal_margin = 5\n max_font_size = 18\n\n def get_text_height(self):\n \"\"\"Return the height of the text in ponts.\"\"\"\n return int((self.height / 100) * 16)\n\n def get_horizontal_location(self):\n \"\"\"Return the horizontal position of the text in ponts.\"\"\"\n return self.width / 2\n\n @staticmethod\n def wrap(text, line_length):\n \"\"\"Wrap the text if necessary.\"\"\"\n if line_length < len(text):\n return [text]\n split_index = text[:line_length].rfind(\" \")\n return [text[:split_index], text[split_index:]]\n\n\nclass BarcodeLabelFormat(DefaultLabelFormat):\n \"\"\"A label format for barcodes.\"\"\"\n\n def make_label(self, label, lines):\n \"\"\"Add text to label.\"\"\"\n text = lines[1]\n vertical_location = 15\n for line in self.wrap(text, 30):\n label.add(\n shapes.String(\n self.get_horizontal_location(),\n vertical_location,\n line,\n fontSize=8,\n fontName=self.font,\n textAnchor=self.text_anchor,\n )\n )\n vertical_location -= 10\n x_location = 15\n y_location = 25\n widget_class = eanbc.Ean13BarcodeWidget\n barcode = lines[0]\n bar_height = self.height * 0.5\n barcode_drawing = widget_class(\n barcode, x=x_location, y=y_location, barHeight=bar_height\n )\n label.add(barcode_drawing)\n\n\nclass AddressLabelFormat(LabelFormat):\n \"\"\"Labeler's default label format.\"\"\"\n\n font = \"Helvetica-Bold\"\n vertical_margin = 40\n horizontal_margin = 20\n max_font_size = 48\n\n def get_text_height(self):\n \"\"\"Return the height of the text in ponts.\"\"\"\n return self.max_font_size\n\n def get_horizontal_location(self):\n \"\"\"Return the horizontal position of the text in ponts.\"\"\"\n return self.width / 2\n\n def get_line_gap(self, lines):\n \"\"\"Return the gap between each line of text.\"\"\"\n available_height = self.get_usable_height()\n text_height = self.get_text_height()\n remaining_space_after_text = available_height - (text_height * len(lines))\n line_gap = int(remaining_space_after_text / len(lines))\n return text_height + line_gap\n\n\nclass SmallLabelFormat(LabelFormat):\n \"\"\"Format for generic text on a small label.\"\"\"\n\n font = \"Helvetica-Bold\"\n vertical_margin = 10\n horizontal_margin = 5\n max_font_size = 18\n\n def get_horizontal_location(self):\n \"\"\"Return the horizontal position of the text in ponts.\"\"\"\n return self.width / 2\n\n def get_text_height(self):\n \"\"\"Return the height of the text in ponts.\"\"\"\n return self.max_font_size\n\n def get_line_gap(self, lines):\n \"\"\"Return the gap between each line of text.\"\"\"\n available_height = self.get_usable_height()\n text_height = self.get_text_height()\n remaining_space_after_text = available_height - (text_height * len(lines))\n line_gap = int(remaining_space_after_text / len(lines))\n total_line_height = text_height + line_gap\n return total_line_height\n\n\nclass BayLabelFormat(LabelFormat):\n \"\"\"Format for Axminster bay labels.\"\"\"\n\n font = \"Helvetica-Bold\"\n vertical_margin = 40\n horizontal_margin = 20\n max_font_size = 72\n\n def get_text_height(self):\n \"\"\"Return the height of the text in ponts.\"\"\"\n return self.max_font_size\n\n def get_horizontal_location(self):\n \"\"\"Return the horizontal position of the text in ponts.\"\"\"\n return self.width / 2\n\n def get_line_gap(self, lines):\n \"\"\"Return the gap between each line of text.\"\"\"\n available_height = self.get_usable_height()\n text_height = self.get_text_height()\n remaining_space_after_text = available_height - (text_height * len(lines))\n line_gap = int(remaining_space_after_text / len(lines))\n return text_height + line_gap\n", "id": "2167027", "language": "Python", "matching_score": 4.561336994171143, "max_stars_count": 0, "path": "labeler/label_formats.py" }, { "content": "\"\"\"This module contains the LabelFormat class.\"\"\"\n\nfrom reportlab.graphics import shapes\nfrom reportlab.pdfbase.pdfmetrics import stringWidth\n\n\nclass LabelFormat:\n \"\"\"\n Base class for label formats.\n\n Contains the necessary information render text onto a single label.\n\n Takes the size and margin size for a label, along with one or more lines\n of text and renders the label with the text fitting between the margins.\n\n Attributes:\n font:\n The name of the font to use for label text.\n width:\n The width of the label in points.\n height:\n The hight of the label in points.\n lines:\n List of strings where each string is a line of text to render on\n the label.\n vertical_margin:\n The gap to leave between the top and bottom of the label and the\n text in points.\n horizontal_margin:\n The gap to leave between the sides of the label and the text in\n points.\n max_font_size:\n The largest font size to use when fitting text. If this is 0 the\n font size will be the largest that can fit.\n\n \"\"\"\n\n font = None\n width = 0\n height = 0\n vertical_margin = 0\n horizontal_margin = 0\n max_font_size = 0\n text_anchor = \"middle\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Create new Label Format.\n\n Kwargs:\n font:\n The name of the font to use for label text.\n width:\n The width of the label in points.\n height:\n The hight of the label in points.\n lines:\n List of strings where each string is a line of text to render\n on the label.\n vertical_margin:\n The gap to leave between the top and bottom of the label and\n the text in points.\n horizontal_margin:\n The gap to leave between the sides of the label and the text in\n points.\n max_font_size:\n The largest font size to use when fitting text. If this is 0\n the font size will be the largest that can fit.\n\n \"\"\"\n self.font = kwargs.get(\"font\", self.font)\n self.width = kwargs.get(\"width\", self.width)\n self.height = kwargs.get(\"height\", self.height)\n self.vertical_margin = kwargs.get(\"vertical_margin\", self.vertical_margin)\n self.horizontal_margin = kwargs.get(\"horizontal_margin\", self.horizontal_margin)\n self.max_font_size = kwargs.get(\"max_font_size\", self.max_font_size)\n self.text_anchor = kwargs.get(\"text_anchor\", self.text_anchor)\n\n def get_text_height(self):\n \"\"\"\n Return the height of the text.\n\n Sub-classes must implement this to set the height of the text.\n \"\"\"\n raise NotImplementedError()\n\n def get_horizontal_location(self):\n \"\"\"\n Return the horiziontal location of the text.\n\n Sub-classes must implement this to set the horizontal location of the\n text.\n \"\"\"\n raise NotImplementedError\n\n def get_usable_height(self):\n \"\"\"Return the maximum gap between the top and bottom margins.\"\"\"\n return self.height - (self.vertical_margin * 2)\n\n def get_usable_width(self):\n \"\"\"Return the maximum gap between the left and right margins.\"\"\"\n return self.width - (self.horizontal_margin * 2)\n\n def get_line_gap(self, lines):\n \"\"\"Return the gap between each line of text.\"\"\"\n return self.get_usable_height() - (self.get_text_height() * len(lines))\n\n def calculate_max_font_size(self, text):\n \"\"\"Return the maximum size of text that can fit on the label.\"\"\"\n font_size = self.max_font_size\n string_width = stringWidth(text, self.font, font_size)\n while string_width > self.get_usable_width():\n font_size *= 0.8\n string_width = stringWidth(text, self.font, font_size)\n return font_size\n\n def make_label(self, label, lines):\n \"\"\"\n Add text to label.\n\n Args:\n label:\n reportlab.graphics.shapes.Drawing object to use as label.\n lines:\n List containing each line of text as a string.\n\n \"\"\"\n horizontal_location = self.get_horizontal_location()\n vertical_location = self.vertical_margin\n for line in reversed(lines):\n font_size = self.calculate_max_font_size(line)\n label.add(\n shapes.String(\n horizontal_location,\n vertical_location,\n line,\n fontSize=font_size,\n fontName=self.font,\n textAnchor=self.text_anchor,\n )\n )\n vertical_location = vertical_location + self.get_line_gap(lines)\n", "id": "10910434", "language": "Python", "matching_score": 2.649066686630249, "max_stars_count": 0, "path": "labeler/label_format.py" }, { "content": "\"\"\"This module contains the LabelSize class, the base class for label sizes.\"\"\"\n\n\nclass LabelSize:\n \"\"\"\n Contains information about the size of a label.\n\n Attributes:\n width:\n The width of the label.\n height:\n The height of the label.\n\n \"\"\"\n\n width = 0\n height = 0\n\n def __init__(self, **kwargs):\n \"\"\"\n Create a label size.\n\n Kwargs:\n width:\n The width of the label.\n height:\n The height of the label.\n\n \"\"\"\n self.width = kwargs.get(\"width\", self.width)\n self.height = kwargs.get(\"height\", self.height)\n", "id": "6048651", "language": "Python", "matching_score": 2.576418399810791, "max_stars_count": 0, "path": "labeler/label_size.py" }, { "content": "\"\"\"This module contains preset label sizes.\"\"\"\n\nfrom .label_size import LabelSize\n\n\nclass DefaultLabelSize(LabelSize):\n \"\"\"The default label size.\"\"\"\n\n width = 45.7\n height = 25.4\n\n\nclass Thermal6x4Label(LabelSize):\n \"\"\"Label size for 6x4 inch thermal address labels.\"\"\"\n\n width = 152.4\n height = 101.6\n", "id": "10969604", "language": "Python", "matching_score": 1.78040611743927, "max_stars_count": 0, "path": "labeler/label_sizes.py" }, { "content": "\"\"\"This module contains the PaperSize class, the base class for paper sizes.\"\"\"\n\n\nclass PaperSize:\n \"\"\"\n Contains information about the size prining paper.\n\n Attributes:\n width:\n The width of the label.\n height:\n The height of the label.\n\n \"\"\"\n\n width = 210\n height = 297\n\n def __init__(self, **kwargs):\n \"\"\"\n Create a paper size.\n\n Kwargs:\n width:\n The width of the label.\n height:\n The height of the label.\n\n \"\"\"\n self.width = kwargs.get(\"width\", self.width)\n self.height = kwargs.get(\"height\", self.height)\n", "id": "12118845", "language": "Python", "matching_score": 2.6243836879730225, "max_stars_count": 0, "path": "labeler/paper_size.py" }, { "content": "\"\"\"This module contains preset paper sizes.\"\"\"\n\nfrom .paper_size import PaperSize\n\n\nclass A4(PaperSize):\n \"\"\"PaperSize for A4 paper.\"\"\"\n\n width = 210\n height = 297\n\n\nclass Thermal6x4Paper(PaperSize):\n \"\"\"Paper size for 6x4 inch thermal address labels.\"\"\"\n\n width = 152.4\n height = 101.6\n", "id": "418969", "language": "Python", "matching_score": 0.6694716811180115, "max_stars_count": 0, "path": "labeler/paper_sizes.py" }, { "content": "\"\"\"This module contains LabelSheet, the base class for sheets of labels.\"\"\"\n\nimport labels\nfrom reportlab.graphics import renderPDF\nfrom reportlab.pdfgen.canvas import Canvas\n\nfrom .label_formats import DefaultLabelFormat\nfrom .label_sizes import DefaultLabelSize\nfrom .paper_sizes import A4\n\n\nclass LabelSheet:\n \"\"\"\n Base class for label sheets.\n\n Contains the necessary information to create a sheet of labels.\n\n Attributes:\n paper_size:\n The size of paper the labels will be printed on.\n (labeler.PaperSize).\n label_size:\n The size of the labels (labler.LabelSize)\n columns:\n The number of columns of labels per sheet.\n rows:\n The number of rows of labels per sheet.\n left_margin:\n The gap to leave between the left side of the label and the\n padding.\n right_margin:\n The gap to leave between the right side of the label and the\n padding.\n left_padding:\n The gap between the left margin and the text.\n right_padding:\n The gap between the right margin and the text.\n top_padding:\n The gap between the top of the label and the text.\n bottom_padding:\n The gap between the top of the label and the text.\n corner_radius:\n Radius at which to round the corner.\n border:\n Draw the border of the label. Usefull for testing.\n\n \"\"\"\n\n paper_size = A4()\n label_size = DefaultLabelSize()\n label_format = DefaultLabelFormat()\n left_margin = 0\n right_margin = 0\n top_margin = 0\n bottom_margin = 0\n left_padding = 0\n right_padding = 0\n top_padding = 0\n bottom_padding = 0\n corner_radius = 0\n border = False\n columns = 4\n rows = 10\n\n def __init__(self, **kwargs):\n \"\"\"\n Create a label format.\n\n Kwargs:\n paper_size:\n The size of paper the labels will be printed on.\n (labeler.PaperSize).\n label_size:\n The size of the labels (labler.LabelSize)\n columns:\n The number of columns of labels per sheet.\n rows:\n The number of rows of labels per sheet.\n left_margin:\n The gap to leave between the left side of the label and the\n padding.\n right_margin:\n The gap to leave between the right side of the label and the\n padding.\n left_padding:\n The gap between the left margin and the text.\n right_padding:\n The gap between the right margin and the text.\n top_padding:\n The gap between the top of the label and the text.\n bottom_padding:\n The gap between the top of the label and the text.\n corner_radius:\n Radius at which to round the corner.\n border:\n Draw the border of the label. Usefull for testing.\n\n \"\"\"\n self.paper_size = kwargs.get(\"paper_size\", self.paper_size)\n self.label_size = kwargs.get(\"label_size\", self.label_size)\n self.label_format = kwargs.get(\"label_format\", self.label_format)\n self.left_margin = kwargs.get(\"left_margin\", self.left_margin)\n self.right_margin = kwargs.get(\"right_margin\", self.right_margin)\n self.top_margin = kwargs.get(\"top_margin\", self.top_margin)\n self.bottom_margin = kwargs.get(\"bottom_margin\", self.bottom_margin)\n self.left_padding = kwargs.get(\"left_padding\", self.left_padding)\n self.right_padding = kwargs.get(\"right_padding\", self.right_padding)\n self.top_padding = kwargs.get(\"top_padding\", self.top_padding)\n self.bottom_padding = kwargs.get(\"bottom_padding\", self.bottom_padding)\n self.corner_radius = kwargs.get(\"corner_radius\", self.corner_radius)\n self.border = kwargs.get(\"border\", self.border)\n self.columns = kwargs.get(\"columns\", self.columns)\n self.rows = kwargs.get(\"rows\", self.rows)\n\n def generate_PDF_from_data(self, data):\n \"\"\"Return generated labels as a reportlab.pdfgen.canvas.Canvas.\"\"\"\n specs = labels.Specification(\n self.paper_size.width,\n self.paper_size.height,\n self.columns,\n self.rows,\n self.label_size.width,\n self.label_size.height,\n left_margin=self.left_margin,\n right_margin=self.right_margin,\n top_margin=self.top_margin,\n bottom_margin=self.bottom_margin,\n left_padding=self.left_padding,\n right_padding=self.right_padding,\n top_padding=self.top_padding,\n bottom_padding=self.bottom_padding,\n corner_radius=self.corner_radius,\n )\n\n def draw_label(label, width, height, lines):\n label_format = self.label_format(width=width, height=height)\n label_format.make_label(label, lines)\n\n sheet = labels.Sheet(specs, draw_label, border=self.border)\n for item in data:\n sheet.add_label(item)\n canvas = Canvas(None, pagesize=sheet._pagesize)\n for page in sheet._pages:\n renderPDF.draw(page, canvas, 0, 0)\n canvas.showPage()\n return canvas\n", "id": "7734056", "language": "Python", "matching_score": 3.965128183364868, "max_stars_count": 0, "path": "labeler/label_sheet.py" }, { "content": "\"\"\"This module contains preset label sheets.\"\"\"\n\nfrom . import paper_sizes\nfrom .label_sheet import LabelSheet\nfrom .label_size import LabelSize\nfrom .paper_size import PaperSize\n\n\nclass STW046025PO(LabelSheet):\n \"\"\"Label sheet preset for STW046025PO label paper.\"\"\"\n\n paper_size = paper_sizes.A4\n label_size = LabelSize(width=45, height=25)\n columns = 4\n rows = 10\n left_margin = 9\n right_margin = 9\n top_margin = 21\n bottom_margin = 21\n corner_radius = 2\n left_padding = 0\n right_padding = 0\n padding_top = 0\n padding_bottom = 0\n\n\nclass ThermalAddressLabel4x6Sheet(LabelSheet):\n \"\"\"Label sheet preset for 4x6 thermal address labels.\"\"\"\n\n paper_size = PaperSize(width=152, height=101)\n label_size = LabelSize(width=152, height=101)\n columns = 1\n rows = 1\n left_margin = 0\n right_margin = 0\n top_margin = 0\n bottom_margin = 0\n corner_radius = 2\n left_padding = 0\n right_padding = 0\n padding_top = 0\n padding_bottom = 0\n\n\nclass BayLabelSheet(LabelSheet):\n \"\"\"Label sheet preset for A4 2x8 labels.\"\"\"\n\n paper_size = paper_sizes.A4\n label_size = LabelSize(width=100, height=34)\n columns = 2\n rows = 8\n left_margin = 5\n right_margin = 5\n top_margin = 15\n bottom_margin = 0\n corner_radius = 2\n left_padding = 0\n right_padding = 0\n padding_top = 0\n padding_bottom = 0\n", "id": "7767880", "language": "Python", "matching_score": 2.3490312099456787, "max_stars_count": 0, "path": "labeler/label_sheets.py" }, { "content": "\"\"\"\nThe Labeler package.\n\nGenerate PDF files for printing tiled labels.\n\"\"\"\n\nfrom .label_format import LabelFormat # NOQA\nfrom .label_formats import * # NOQA\nfrom .label_sheet import LabelSheet # NOQA\nfrom .label_sheets import * # NOQA\nfrom .label_size import LabelSize # NOQA\nfrom .label_sizes import * # NOQA\nfrom .paper_size import PaperSize # NOQA\nfrom .paper_sizes import * # NOQA\n", "id": "6283019", "language": "Python", "matching_score": 1.8600873947143555, "max_stars_count": 0, "path": "labeler/__init__.py" }, { "content": "from pathlib import Path\n\nimport labeler\nfrom labeler import __version__\n\n\ndef test_version_file_attributes():\n assert hasattr(__version__, \"__title__\")\n assert hasattr(__version__, \"__description__\")\n assert hasattr(__version__, \"__url__\")\n assert hasattr(__version__, \"__version__\")\n assert hasattr(__version__, \"__author__\")\n assert hasattr(__version__, \"__author_email__\")\n assert hasattr(__version__, \"__license__\")\n assert hasattr(__version__, \"__copyright__\")\n\n\ndef test_default_format(tmpdir):\n data = [\n [\"UK 12\", \"Pink Cat Slipper\", \"FW987\"],\n ['38\" Regular Tall', \"Grey Shoulders, Blue Body\", \"45632\"],\n [\"Medium\", \"Grey\", \"64535\"],\n [\"UK 12\", \"Pink Cat Slipper\", \"FW987\"],\n ['38\" Regular Tall', \"Grey Shoulders, Blue Body\", \"45632\"],\n [\"Medium\", \"Grey\", \"64535\"],\n ['38\" Regular Tall', \"Grey Shoulders, Blue Body\", \"45632\"],\n [\"Medium\", \"Grey\", \"64535\"],\n ]\n\n label_format = labeler.DefaultLabelFormat\n sheet = labeler.STW046025PO(label_format=label_format)\n canvas = sheet.generate_PDF_from_data(data)\n canvas._filename = str(Path(tmpdir / \"test.pdf\"))\n canvas.save()\n\n\ndef test_address_label_format(tmpdir):\n data = [\n [\"UK 12\", \"Pink Cat Slipper\", \"FW987\"],\n ['38\" Regular Tall', \"Grey Shoulders, Blue Body\", \"45632\"],\n [\"Medium\", \"Grey\", \"64535\"],\n [\"UK 12\", \"Pink Cat Slipper\", \"FW987\"],\n ['38\" Regular Tall', \"Grey Shoulders, Blue Body\", \"45632\"],\n [\"Medium\", \"Grey\", \"64535\"],\n ['38\" Regular Tall', \"Grey Shoulders, Blue Body\", \"45632\"],\n [\"Medium\", \"Grey\", \"64535\"],\n [\n '38\" Regular Tall',\n \"Grey Shoulders, Blue Body\",\n \"45632\",\n \"more words\",\n \"and some more\",\n \"and more\",\n \"and more\",\n ],\n [\"86759\", \"PAW PATROL SKYE\"],\n ]\n\n label_format = labeler.AddressLabelFormat\n sheet = labeler.ThermalAddressLabel4x6Sheet(label_format=label_format)\n canvas = sheet.generate_PDF_from_data(data)\n canvas._filename = str(Path(tmpdir / \"test.pdf\"))\n canvas.save()\n\n\ndef test_small_label_format(tmpdir):\n data = [\n [\"UK 12\", \"Pink Cat Slipper\", \"FW987\"],\n ['38\" Regular Tall', \"Grey Shoulders, Blue Body\", \"45632\"],\n [\"Medium\", \"Grey\", \"64535\"],\n [\"UK 12\", \"Pink Cat Slipper\", \"FW987\"],\n ['38\" Regular Tall', \"Grey Shoulders, Blue Body\", \"45632\"],\n [\"Medium\", \"Grey\", \"64535\"],\n ['38\" Regular Tall', \"Grey Shoulders, Blue Body\", \"45632\"],\n [\"Medium\", \"Grey\", \"64535\"],\n [\n '38\" Regular Tall',\n \"Grey Shoulders, Blue Body\",\n \"45632\",\n \"more words\",\n \"and some more\",\n \"and more\",\n \"and more\",\n ],\n [\"86759\", \"PAW PATROL SKYE\"],\n ]\n\n label_format = labeler.SmallLabelFormat\n sheet = labeler.STW046025PO(label_format=label_format)\n canvas = sheet.generate_PDF_from_data(data)\n canvas._filename = str(Path(tmpdir / \"test.pdf\"))\n canvas.save()\n", "id": "3352056", "language": "Python", "matching_score": 3.488619089126587, "max_stars_count": 0, "path": "tests/test_labeler.py" }, { "content": "\"\"\"Tabler.\"\"\"\n\n__title__ = \"labeler\"\n__description__ = \"Easily create PDF documents for printing on label paper\"\n__url__ = \"http://github.com/lukeshiner/labeler.git\"\n__version__ = \"0.0.1\"\n__author__ = \"<NAME>\"\n__author_email__ = \"<EMAIL>\"\n__license__ = \"MIT\"\n__copyright__ = \"Copyright 2018 <NAME>\"\n", "id": "4203356", "language": "Python", "matching_score": 4.328059196472168, "max_stars_count": 0, "path": "labeler/__version__.py" }, { "content": "\"\"\"Tabler.\"\"\"\n\n__title__ = \"tabler\"\n__description__ = \"Simple interface for tabulated data and .csv files\"\n__url__ = \"http://github.com/lukeshiner/tabler.git\"\n__version__ = \"2.4.2\"\n__author__ = \"<NAME>\"\n__author_email__ = \"<EMAIL>\"\n__license__ = \"MIT\"\n__copyright__ = \"Copyright 2018 <NAME>\"\n", "id": "9088077", "language": "Python", "matching_score": 3.102220058441162, "max_stars_count": 0, "path": "tabler/__version__.py" }, { "content": "\"\"\"\nTabler package.\n\nThe tabler package provides the :class:`tabler.Table` class for simple and\nintutive accessing, manipulation and writing of tablulated data.\n\n Basic Usage::\n\n >>> from tabler import Table\n >>> table = Table('somefile.csv')\n >>> table.open('Path/To/Input_File.csv')\n >>> table[0]['Price']\n '29.99'\n >>> table[0]['Price'] = 15.50\n >>> table[0]['Price']\n '15.5'\n >>> table.write('Path/To/Output_File')\n Writen 3 lines to file Path/To/Output_File.csv\n\n\"\"\"\n\nfrom .__version__ import (\n __author__,\n __author_email__,\n __copyright__,\n __description__,\n __license__,\n __title__,\n __url__,\n __version__,\n)\nfrom .table import Table\nfrom .tabletypes import CSV, CSVURL, HTML, ODS, XLSX\n\n__all__ = [\n \"Table\",\n \"CSV\",\n \"CSVURL\",\n \"HTML\",\n \"ODS\",\n \"XLSX\",\n \"__author_email__\",\n \"__author__\",\n \"__copyright__\",\n \"__description__\",\n \"__license__\",\n \"__title__\",\n \"__url__\",\n \"__version__\",\n]\n", "id": "1400829", "language": "Python", "matching_score": 2.5903658866882324, "max_stars_count": 0, "path": "tabler/__init__.py" }, { "content": "r\"\"\"tabletypes package.\n\nThe tabletypes package provides the :class:`tabler.tabletypes.BaseTableType`\nclass which can be subclassed to provided open and write methods to\n:class:`tabler.Table`.\n\nThey can be customised by providing paramaters to the __init__ method.\n\nAlso provides subclasses of :class:`table.tabletypes.BaseTableType` for\ncommon file types.\n\n- :class:`tabler.tabletypes.CSV` Open and write .csv files.\n- :class:`tabler.tabletypes.CSVURL` Open .csv over HTTP.\n- :class:`tabler.tabletypes.HTML`: Save table as .html file.\n- :class:`tabler.tabletypes.ODS`: Open and save Open Document\n Spreadsheed (.ods) files.\n- :class:`tabler.tabletypes.XLSX`: Open and save Microsoft Excel (.xlsx) files.\n\nBasic Usage::\n\n from tabler import Table\n from tabler.tabletypes import CSV\n\n table = Table('path/to/open.csv', table_type=CSV())\n table.save('path/to/save.csv', table_type=CSV())\n\nUsage with paramaters::\n\n from tabler import Table\n from tabler.tabletypes import CSV\n\n table = Table('path/to/open.csv', table_type=CSV(\n extension='.txt', delimiter='\\t', encoding='latin'))\n table.save('path/to/save.csv', table_type=CSV(\n extension='.txt', delimiter='\\t', encoding='latin'))\n\nAlternate Usage::\n\n from tabler import Table\n from tabler.tabletypes import CSV\n\n csv = CSV(delimiter='\\t', delimiter='\\t', encoding='latin')\n table = Table('path/to/open.csv', table_type=csv)\n table.save('path/to/save.csv', table_type=csv)\n\"\"\"\nfrom .basetabletype import BaseTableType\nfrom .csv import CSV, CSVURL\nfrom .html import HTML\nfrom .ods import ODS\nfrom .xlsx import XLSX\n\n__all__ = [\"BaseTableType\", \"CSV\", \"CSVURL\", \"ODS\", \"HTML\", \"XLSX\"]\n", "id": "12356903", "language": "Python", "matching_score": 1.5324406623840332, "max_stars_count": 0, "path": "tabler/tabletypes/__init__.py" }, { "content": "\"\"\"Convert tabler tables to HTML.\"\"\"\n\nfrom .tohtml import ToHTML\n\n__all__ = [\"ToHTML\"]\n", "id": "9752275", "language": "Python", "matching_score": 0.3926316797733307, "max_stars_count": 0, "path": "tabler/tohtml/__init__.py" }, { "content": "\"\"\"Convert tabler tables to HTML.\"\"\"\n\nimport os\nfrom typing import TYPE_CHECKING, Any, Dict, Optional\n\nfrom jinja2 import Template\n\nif TYPE_CHECKING:\n from tabler.table import Table\n\n\nclass ToHTML:\n \"\"\"Convert tabler tables to HTML.\"\"\"\n\n default_template = os.path.join(os.path.dirname(__file__), \"table_template.html\")\n\n def __init__(\n self,\n table: \"Table\",\n use_header: bool = True,\n template: Optional[Template] = None,\n escape: bool = True,\n ):\n \"\"\"Convert tabler tables to HTML.\"\"\"\n self.table = table\n self.escape = escape\n self.use_header = use_header\n self.template = self.get_template()\n self.context = self.get_context(table, self.use_header)\n\n @staticmethod\n def get_context(table: \"Table\", use_header: bool) -> Dict[str, Any]:\n \"\"\"Return template context.\"\"\"\n return {\n \"use_header\": use_header,\n \"header\": table.header,\n \"data\": [list(row) for row in table],\n }\n\n def get_template(self) -> Template:\n \"\"\"Return HTML template.\"\"\"\n with open(self.default_template, \"r\") as template_file:\n return Template(template_file.read())\n\n def render(self) -> str:\n \"\"\"Return rendered HTML.\"\"\"\n return self.template.render(self.context, autoescape=self.escape)\n", "id": "12272184", "language": "Python", "matching_score": 2.7227702140808105, "max_stars_count": 0, "path": "tabler/tohtml/tohtml.py" }, { "content": "\"\"\"This module a Table Type for writing tables as HTML.\"\"\"\nimport sys\nfrom typing import TYPE_CHECKING, Union\n\nfrom tabler.tohtml import ToHTML\n\nfrom .basetabletype import BaseTableType\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tabler.table import Table\n\n\nclass HTML(BaseTableType):\n \"\"\"Table Type for comma separated value (.csv) files.\n\n :param bool use_header: If True file will include column headers.\n Default(True)\n :param str encoding: Encoding of file. Default: utf8.\n :param str extension: Extension of file to save. Default .html.\n :param verbose: If True print status messages. If None use\n :class:`tabler.tabletype.BaseTableType`.verbose.\n :type verbose: bool or None.\n \"\"\"\n\n extensions = [\".html\"]\n empty_value = \"\"\n\n def __init__(\n self,\n use_header: bool = True,\n encoding: str = \"utf8\",\n extension: str = \".html\",\n verbose: bool = True,\n ):\n \"\"\"Consturct :class:`tabler.tabletypes.HTML`.\n\n :param bool use_header: If True file will include column headers.\n Default(True)\n :param str encoding: Encoding of file. Default: utf8.\n :param str extension: Extension of file to save. Default .html.\n :param verbose: If True print status messages. If None use\n :class:`tabler.tabletype.BaseTableType`.verbose.\n :type verbose: bool or None.\n \"\"\"\n self.encoding = encoding\n self.use_header = use_header\n super().__init__(extension, verbose=verbose)\n\n def write(self, table: \"Table\", path: Union[str, \"Path\"]) -> None:\n \"\"\"Save data from :class:`tabler.Table` to file.\n\n :param table:\"Table\" to save.\n :type table: :class:`tabler.Table`\n :param path: Path to file to be opened.\n :type path: str, pathlib.Path or compatible.\n \"\"\"\n html = ToHTML(table, self.use_header).render()\n html_file = open(str(path), \"w\", encoding=self.encoding)\n html_file.write(html)\n html_file.close()\n print(\n \"Written {} rows to file {}\".format(len(table.rows), path), file=sys.stderr\n )\n", "id": "9667982", "language": "Python", "matching_score": 4.08338737487793, "max_stars_count": 0, "path": "tabler/tabletypes/html.py" }, { "content": "\"\"\"This module provides Table Types for .csv files.\"\"\"\n\nimport csv\nimport sys\nfrom typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union\n\nimport requests\n\nfrom .basetabletype import BaseTableType\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tabler.table import Table\n\n\nclass CSV(BaseTableType):\n \"\"\"Table Type for comma separated value (.csv) files.\n\n :param str encoding: Encoding of file. Default: utf8.\n :param str delimiter: Delimiter used by file. Default , (Comma).\n :param str extension: Extension of file to save. Default .csv.\n :param verbose: If True print status messages. If None use\n :class:`tabler.tabletype.BaseTableType`.verbose.\n :type verbose: bool or None.\n \"\"\"\n\n extensions = [\".csv\", \".txt\"]\n empty_value = \"\"\n\n def __init__(\n self,\n encoding: str = \"utf-8\",\n delimiter: str = \",\",\n extension: str = \".csv\",\n verbose: Optional[bool] = None,\n ):\n \"\"\"Consturct :class:`tabler.tabletypes.CSV`.\n\n :param str encoding: Encoding of file. Default: utf8.\n :param str delimiter: Delimiter used by file. Default , (Comma).\n :param str extension: Extension of file to save. Default .csv.\n :param verbose: If True print status messages. If None use\n :class:`tabler.tabletype.BaseTableType`.verbose.\n :type verbose: bool or None.\n \"\"\"\n self.encoding = encoding\n self.delimiter = delimiter\n super().__init__(extension, verbose=verbose)\n\n def open_path(self, path: str) -> Tuple[List[str], List[List[Any]]]:\n \"\"\"Return header and rows from file.\n\n :param path: Path to file to be opened.\n :type path: str, pathlib.Path or compatible.\n \"\"\"\n with open(str(path), \"r\", encoding=self.encoding) as f:\n data = list(csv.reader(f, delimiter=self.delimiter))\n return self.parse_row_data(data)\n\n def write(self, table: \"Table\", path: Union[str, \"Path\"]) -> None:\n \"\"\"Save data from :class:`tabler.Table` to file.\n\n :param table:\"Table\" to save.\n :type table: :class:`tabler.Table`\n :param path: Path to file to be opened.\n :type path: str, pathlib.Path or compatible.\n \"\"\"\n with open(str(path), \"w\", newline=\"\", encoding=self.encoding) as f:\n writer = csv.writer(f, delimiter=self.delimiter)\n if table.header:\n writer.writerow(table.header)\n for row in table:\n writer.writerow(list(row))\n print(\n \"Written {} rows to file {}\".format(len(table.rows), path), file=sys.stderr\n )\n\n def parse_value(self, value: Any) -> Any:\n \"\"\"Return None if the value is empty, otherwise return str(value).\"\"\"\n value = super().parse_value(value)\n return str(value)\n\n\nclass CSVURL(CSV):\n \"\"\"Table type for opening .csv files over HTTP.\"\"\"\n\n def open_path(self, path: str) -> Tuple[List[str], List[List[Any]]]:\n \"\"\"Return header and rows from file.\n\n :param str path: URL of file to be opened.\n \"\"\"\n request = requests.get(path)\n text = []\n for line in request.iter_lines():\n text.append(line.decode(self.encoding))\n data = list(csv.reader(text))\n return self.parse_row_data(data)\n\n def write(self, table: \"Table\", path: Union[str, \"Path\"]) -> None:\n \"\"\"Save data from :class:`tabler.Table` to file.\n\n :param table:\"Table\" to save.\n :type table: :class:`tabler.Table`\n :param path: Path to file to be opened.\n :type path: str, pathlib.Path or compatible.\n \"\"\"\n raise NotImplementedError\n", "id": "5201983", "language": "Python", "matching_score": 4.350436210632324, "max_stars_count": 0, "path": "tabler/tabletypes/csv.py" }, { "content": "\"\"\"\nThis module provides the :class:`tabler.tabletypes.BaseTableType`.\n\nThis can be subclassed to provide Table Types for :class:`tabler.Table`.\nThey provide methods for opening and saving tables in different formats.\n\"\"\"\n\nfrom typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union\n\nfrom tabler import exceptions\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tabler import Table\n\n\ndef all_subclasses(cls: Type) -> List[Type]:\n \"\"\"Return all subclasses of class recursivley.\n\n :param class cls: Class for which subclasses will be returned.\n :rtype: list(class)\n \"\"\"\n return cls.__subclasses__() + [\n g for s in cls.__subclasses__() for g in all_subclasses(s)\n ]\n\n\nclass BaseTableType:\n \"\"\"Base class for Table Types.\n\n Table types are used to provide methods to :class:`tabler.Table` to load\n data from files and write to files of different types.\n\n Subclasses should implement :func:`tabler.tabletypes.BaseTableType.write`\n and / or :func:`tabler.tabletypes.BaseTableType.open` methods.\n\n Subclasses can also implement an **extensions** property. This is a list\n of file extensions matching the Table Type. It will then be used\n automatically when opening or writing files with a path ending in that\n extension if no table type is specified.\n\n Example::\n\n extensions = ['.csv', '.txt']\n \"\"\"\n\n empty_value: Union[str, int, float, None] = None\n verbose: bool = True\n null_values: Tuple[Union[str, int, float, None], ...] = (\"\", None)\n\n def __init__(self, extension: str, verbose: Optional[bool] = None) -> None:\n \"\"\"Construct :class:`tabler.tabletypes.BaseTableType`.\n\n :param str extension: File extension.\n :param verbose: If True print status messages. If None use\n :class:`tabler.tabletype.BaseTableType`.verbose.\n :type verbose: bool or None.\n \"\"\"\n self.extension = extension\n if verbose is not None:\n verbose = self.verbose\n\n @classmethod\n def get_by_extension(cls, extension: str) -> Any:\n \"\"\"Get apropriate subclass of **BaseTableType** for file extension.\n\n Uses :func:`tabler.tabletypes.basetabletype.all_subclasses` to check\n all subclasses of 'tabler.tabletypes.BaseTableType' for a matching\n extension in it's **extensions** property.\n\n :param str extension: File extension for which to file TableType.\n :raises tabler.exceptions.ExtensionNotRecognised: If no\n **BaseTableType** subclass matching **extension** is found.\n \"\"\"\n for table_type in all_subclasses(cls):\n if extension.lower() in table_type.extensions:\n return table_type()\n raise exceptions.ExtensionNotRecognised(extension)\n\n def open_path(self, path: str) -> Tuple[List[str], List[List[Any]]]:\n \"\"\"Return header and rows from file.\n\n :param path: Path to file to be opened.\n :type path: str, pathlib.Path or compatible.\n \"\"\"\n raise NotImplementedError\n\n def write(self, table: \"Table\", path: Union[str, \"Path\"]) -> None:\n \"\"\"Save data from :class:`tabler.Table` to file.\n\n :param table:\"Table\" to save.\n :type table: :class:`tabler.Table`\n :param path: Path to file to be opened.\n :type path: str, pathlib.Path or compatible.\n \"\"\"\n raise NotImplementedError\n\n def parse_row_data(\n self, rows: List[List[Any]]\n ) -> Tuple[List[str], List[List[Any]]]:\n \"\"\"Return header and rows.\"\"\"\n try:\n header = [_ for _ in rows[0]]\n data = [self.parse_row(row) for row in rows[1:]]\n except IndexError:\n raise ValueError(\"Input has no header or data.\")\n return header, data\n\n def parse_value(self, value: Any) -> Any:\n \"\"\"Return None if the value is empty, otherwise return str(value).\"\"\"\n if value in self.null_values:\n return self.empty_value\n else:\n return value\n\n def parse_row(self, row: List[Any]) -> List[Any]:\n \"\"\"Return a row of parsed values.\"\"\"\n return [self.parse_value(value) for value in row]\n\n def prepare_value(self, value: Any) -> Any:\n \"\"\"Prepare a value for writing.\"\"\"\n if value in self.null_values:\n return self.empty_value\n else:\n return value\n\n def prepare_row(\n self, row: List[Union[str, int, float, None]]\n ) -> List[Union[str, int, float, None]]:\n \"\"\"Remove excess empty values.\"\"\"\n while row[-1] == self.empty_value:\n row = row[:-1]\n return [self.prepare_value(value) for value in row]\n\n def prepare_rows(\n self,\n header: List[Any],\n rows: List[List[Any]],\n ) -> List[List[Any]]:\n \"\"\"Prepare rows for writing.\"\"\"\n data: List[List[Any]] = [header]\n data.extend([self.prepare_row(row) for row in rows])\n return data\n", "id": "1413122", "language": "Python", "matching_score": 3.510622501373291, "max_stars_count": 0, "path": "tabler/tabletypes/basetabletype.py" }, { "content": "\"\"\"Exceptions for tabler package.\"\"\"\n\n\nclass ExtensionNotRecognised(ValueError):\n \"\"\"Error finding TableType subclass by file extension.\"\"\"\n\n def __init__(self, extension: str) -> None:\n \"\"\"\n Initialise ExtensionNotRecognised exception.\n\n :param str extension: File extension for which no TableType was found.\n \"\"\"\n super().__init__(\"Extension '{}' not recognised.\".format(extension))\n\n\nclass TableInitialisationError(TypeError):\n \"\"\"Error initialising table due to incorrect arguments.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialise TableInitialisationError exception.\"\"\"\n super().__init__(\n \"Table cannot be initialised. \"\n \"Either filepath or header and data must be specified.\"\n )\n", "id": "5262660", "language": "Python", "matching_score": 0.8247770667076111, "max_stars_count": 0, "path": "tabler/exceptions.py" }, { "content": "\"\"\"Tests for the Enimga package.\"\"\"\n", "id": "6431661", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/__init__.py" }, { "content": "\"\"\"This module provides a Table Type for Microsft Excel (.xlsx) files.\"\"\"\n\nimport sys\nfrom typing import TYPE_CHECKING, Any, List, Tuple, Union\n\nfrom openpyxl import Workbook, load_workbook\n\nfrom .basetabletype import BaseTableType\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tabler.table import Table\n\n\nclass XLSX(BaseTableType):\n \"\"\"Table Type for Microsft Excel (.xlsx) files.\n\n :param str extension: Extension of file to save. Default .xlsx.\n :param verbose: If True print status messages. If None use\n :class:`tabler.tabletype.BaseTableType`.verbose.\n :type verbose: bool or None.\n \"\"\"\n\n extensions: List[str] = [\".xlsx\"]\n empty_value: Any = None\n\n def __init__(self, extension: str = \".xlsx\", verbose: bool = True):\n \"\"\"Consturct :class:`tabler.tabletypes.XLSX`.\n\n :param str extension: Extension of file to save. Default .xlsx.\n :param verbose: If True print status messages. If None use\n :class:`tabler.tabletype.BaseTableType`.verbose.\n :type verbose: bool or None.\n \"\"\"\n super().__init__(extension, verbose=verbose)\n\n def open_path(self, path: Union[str, \"Path\"]) -> Tuple[List[str], List[List[Any]]]:\n \"\"\"Return header and rows from file.\n\n :param path: Path to file to be opened.\n :type path: str, pathlib.Path or compatible.\n \"\"\"\n workbook = load_workbook(filename=str(path), read_only=True)\n worksheet = workbook.active\n data = [[cell.value for cell in row] for row in worksheet]\n workbook.close()\n return self.parse_row_data(data)\n\n def write(self, table: \"Table\", path: Union[str, \"Path\"]) -> None:\n \"\"\"Save data from :class:`tabler.Table` to file.\n\n :param table:\"Table\" to save.\n :type table: :class:`tabler.Table`\n :param path: Path to file to be opened.\n :type path: str, pathlib.Path or compatible.\n \"\"\"\n workbook = Workbook()\n worksheet = workbook.active\n worksheet.append(table.header)\n for row in table:\n worksheet.append(list(row))\n workbook.save(str(path))\n print(\n \"Written {} rows to file {}\".format(len(table.rows), path), file=sys.stderr\n )\n", "id": "3853797", "language": "Python", "matching_score": 5.051822185516357, "max_stars_count": 0, "path": "tabler/tabletypes/xlsx.py" }, { "content": "\"\"\"This module provides a Table Type for Open Document Format (.ods) files.\"\"\"\n\nimport sys\nfrom typing import TYPE_CHECKING, Any, List, Tuple, Union\n\nimport pyexcel_ods3 # type: ignore\n\nfrom .basetabletype import BaseTableType\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tabler.table import Table\n\n\nclass ODS(BaseTableType):\n \"\"\"Table Type for Open Document Format (.ods) files.\n\n :param str extension: Extension of file to save. Default .ods.\n :param verbose: If True print status messages. If None use\n :class:`tabler.tabletype.BaseTableType`.verbose.\n :type verbose: bool or None.\n \"\"\"\n\n extensions: List[str] = [\".ods\"]\n empty_value: Any = \"\"\n\n def __init__(self, sheet: int = 0, extension: str = \".ods\", verbose: bool = True):\n \"\"\"Consturct :class:`tabler.tabletypes.ODS`.\n\n :param str extension: Extension of file to save. Default .ods.\n :param verbose: If True print status messages. If None use\n :class:`tabler.tabletype.BaseTableType`.verbose.\n :type verbose: bool or None.\n \"\"\"\n self.sheet = sheet\n super().__init__(extension, verbose=verbose)\n\n def open_path(self, path: Union[str, \"Path\"]) -> Tuple[List[str], List[List[Any]]]:\n \"\"\"Return header and rows from file.\n\n :param path: Path to file to be opened.\n :type path: str, pathlib.Path or compatible.\n \"\"\"\n data = pyexcel_ods3.get_data(str(path))\n sheet = data[list(data.keys())[self.sheet]]\n return self.parse_row_data(sheet)\n\n def write(self, table: \"Table\", path: Union[str, \"Path\"]) -> None:\n \"\"\"Save data from :class:`tabler.Table` to file.\n\n :param table:\"Table\" to save.\n :type table: :class:`tabler.Table`\n :param path: Path to file to be opened.\n :type path: str, pathlib.Path or compatible.\n \"\"\"\n rows = self.prepare_rows(list(table.header), [list(_) for _ in table.rows])\n sheets = {\"Sheet {}\".format(self.sheet): rows}\n pyexcel_ods3.save_data(str(path), sheets)\n print(\n \"Written {} rows to file {}\".format(len(table.rows), path), file=sys.stderr\n )\n", "id": "737973", "language": "Python", "matching_score": 2.8613569736480713, "max_stars_count": 0, "path": "tabler/tabletypes/ods.py" }, { "content": "\"\"\"\nTable class.\n\nThis module provides the :class:`tabler.Table` class to read, write and edit\ntabulated data.\n\n\"\"\"\n\nimport os\nimport pathlib\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Collection, Iterable, Iterator, List, Optional, Tuple, Union\n\nfrom . import exceptions\nfrom .tablerow import TableRow\nfrom .tabletypes import BaseTableType\n\n\nclass Table:\n \"\"\"A wrapper object for tabulated data.\n\n Allows access to and manipulation of tablulated data. This data can be\n input directly or loaded from a file. Data can also be writen data to a\n file. Table rows are encapsulated with the\n :class:`tabler.tablerow.TableRow` class.\n\n Different filetypes can be read and written by providing a subclass of\n :class:`tabler.tabletypes.BaseTableType` which implements the open and\n write methods.\n\n A `filename` can be provided to open an existing file. An apropriate\n :class:`tabler.tabletypes.BaseTableType` object can be provided to specify\n how the file will be opened. If this is not specified one will be selected\n based on the file extension in the `filename` using default parameters.\n\n Alternatively **header** and **data** can be specified to populate the\n table directly.\n\n :param table_type: Table Type to use to open a file referenced\n by `filetype`.\n :type table_type: :class:`tabler.tabletypes.BaseTableType`\n\n :param str filepath: Path to file to be opened.\n\n :param list header: List of column headers to be used if not loaded\n from file.\n\n :param data: Two dimensional list. Each list will form a row of cell\n data.\n :type data: list(list(str, int or float))\n\n :raises ValueError: If filepath is None or both header and data are\n None.\n \"\"\"\n\n _EMPTY_HEADER = \"Unlabeled Column {}\"\n\n def __init__(\n self,\n filepath: Optional[str] = None,\n table_type: Optional[BaseTableType] = None,\n header: Optional[Collection[str]] = None,\n data: Optional[Iterable] = None,\n ) -> None:\n \"\"\"Construct a :class:`tabler.Table`.\n\n A `filename` can be provided to open an existing file. An apropriate\n :class:`tabler.tabletypes.BaseTableType` object can be provided to\n specify how the file will be opened. If this is not specified one will\n be selected based on the file extension in the `filename` using\n default parameters.\n\n Alternatively **header** and **data** can be specified to populate the\n table directly.\n\n :param table_type: Table Type to use to open a file referenced\n by `filetype`.\n :type table_type: :class:`tabler.tabletypes.BaseTableType`\n\n :param str filepath: Path to file to be opened.\n\n :param list header: List of column headers to be used if not loaded\n from file.\n\n :param data: Two dimensional list. Each list will form a row of cell\n data.\n :type data: list(list(str, int or float))\n\n :raises TypeError: If filepath is None or both header and data are\n None.\n \"\"\"\n self.table_type = table_type\n if filepath is not None:\n if self.table_type is None:\n extension = os.path.splitext(filepath)[-1]\n try:\n self.table_type = BaseTableType.get_by_extension(extension)\n except exceptions.ExtensionNotRecognised:\n raise ValueError(\n \"Table Type not specified and extension {} \"\n \"not recognised.\".format(extension)\n )\n self.load(*self.table_type.open_path(filepath))\n elif header is not None and data is not None:\n self.load(header, data)\n else:\n raise exceptions.TableInitialisationError()\n\n def __len__(self) -> int:\n return len(self.rows)\n\n def __iter__(self) -> Iterator[TableRow]:\n for row in self.rows:\n yield row\n\n def __getitem__(self, index: int) -> TableRow:\n return self.rows[index]\n\n def __str__(self) -> str:\n columns = str(len(self.header))\n rows = str(len(self.rows))\n lines = [\n \"Table Object containing {} colomuns and {} rows\".format(columns, rows),\n \"Column Headings: {}\".format(\", \".join(self.header)),\n ]\n return \"\\n\".join(lines)\n\n def __repr__(self) -> str:\n return self.__str__()\n\n def load(\n self, header: Collection, data: Iterable[Union[Iterable, TableRow]]\n ) -> None:\n \"\"\"\n Populate table with header and data.\n\n :param list header: Names of column headers.\n\n :param data: Rows of data. Each row must be a list of cell\n values\n :type data: list(list(str, int or float))\n \"\"\"\n self.empty()\n self.row_length: int = max([len(header)] + [len(_) for _ in data])\n self.header: tuple = self._prepare_header(header)\n self.rows: List[TableRow] = [\n TableRow(row, self.header) for row in self._prepare_data(data)\n ]\n\n def write(\n self, filepath: Union[str, Path], table_type: Optional[BaseTableType] = None\n ) -> None:\n \"\"\"Create file from table.\n\n :param table_type: Table Type to use to save the file.\n :type table_type: :class:`tabler.BaseTableType`\n\n :param str filepath: Path at which the file will be saved.\n \"\"\"\n path = pathlib.Path(filepath)\n if table_type is None:\n if self.table_type is not None:\n table_type = self.table_type\n else:\n table_type = BaseTableType.get_by_extension(path.suffix)\n if path.suffix != table_type.extension:\n path = path.with_suffix(table_type.extension)\n table_type.write(self, path)\n\n def empty(self) -> None:\n \"\"\"Clear all data.\"\"\"\n self.rows = []\n self.header = ()\n\n def is_empty(self) -> bool:\n \"\"\"Return True if the table conatins no data, otherwise return False.\n\n :rtype: bool\n \"\"\"\n if len(self.rows) == 0 and len(self.header) == 0:\n return True\n return False\n\n def append(self, row: Union[Iterable, TableRow]) -> None:\n \"\"\"Add new row to table.\n\n :param row: Data for new row.\n :type row: list or :class:`tabler.tablerow.TableRow`.\n \"\"\"\n self.rows.append(TableRow(list(row), self.header))\n\n def get_column(self, column: Union[int, str]) -> List:\n \"\"\"Return all values in a column.\n\n :param column: Name or index of to be returned.\n :type column: str or int.\n :rtype: list\n \"\"\"\n return [row[column] for row in self.rows]\n\n def remove_column(self, column: str) -> None:\n \"\"\"\n Remove a specified column from the Table.\n\n :param column: Name or index of to be removed.\n :type column: str or int.\n \"\"\"\n header = list(self.header)\n header.pop(header.index(column))\n self.header = tuple(header)\n for row in self.rows:\n row.remove_column(column)\n\n def print_r(self) -> None:\n \"\"\"Print table data in a readable format.\"\"\"\n for row in self.rows:\n print(list(row), file=sys.stderr)\n\n def copy(self) -> \"Table\":\n \"\"\"Return duplicate Table object.\"\"\"\n return self.__class__(\n header=self.header, data=[row.copy() for row in self.rows]\n )\n\n def sort(self, sort_key: str, asc: bool = True) -> None:\n \"\"\"Sort table by column.\n\n :param sort_key: Column header or index of column to sort by.\n :type sort_key: str or int\n\n :param bool asc: If True Table will be sorted in ascending order.\n Otherwise order will be descending. (Default: True)\n \"\"\"\n if isinstance(sort_key, str):\n column = self.header.index(sort_key)\n else:\n column = sort_key\n try:\n self.rows.sort(key=lambda x: float(list(x)[column]), reverse=not asc)\n except ValueError:\n # https://github.com/python/mypy/issues/9656\n self.rows.sort(key=lambda x: list(x)[column], reverse=not asc) # type: ignore\n\n def sorted(self, sort_key: str, asc: bool = True) -> \"Table\":\n \"\"\"Return a sorted duplicate of the Table.\n\n :param sort_key: Column header or index of column to sort by.\n :type sort_key: str or int\n\n :param bool asc: If True Table will be sorted in ascending order.\n Otherwise order will be descending. (Default: True)\n\n :rtype: :class:`tabler.Table`.\n \"\"\"\n temp_table = self.copy()\n temp_table.sort(sort_key, asc)\n return temp_table\n\n def split_by_row_count(self, row_count: int) -> List[\"Table\"]:\n \"\"\"Split table by row count.\n\n Create multiple :class:`tabler.Table` instances each with a subset of\n this one's data.\n\n :param int row_count: Number of rows in each Table.\n :rtype: list(:class:`tabler.Table`).\n \"\"\"\n split_tables = []\n for i in range(0, len(self.rows), row_count):\n new_table = Table(header=self.header, data=self.rows[i : i + row_count])\n split_tables.append(new_table)\n return split_tables\n\n def _prepare_header(self, header_row: Iterable[str]) -> Tuple[str, ...]:\n unlabled = 0\n header = []\n for item in header_row:\n if not item:\n unlabled += 1\n header.append(self._EMPTY_HEADER.format(unlabled))\n else:\n header.append(item)\n while len(header) < self.row_length:\n unlabled += 1\n header.append(self._EMPTY_HEADER.format(unlabled))\n return tuple(header)\n\n def _prepare_data(\n self, data: Iterable, empty_value: Optional[Any] = None\n ) -> List[List[Any]]:\n return [self._prepare_row(row, empty_value=empty_value) for row in data]\n\n def _prepare_row(\n self, row: Iterable, empty_value: Optional[Any] = None\n ) -> List[Union[str, int, float, None]]:\n if empty_value is None and self.table_type is not None:\n empty_value = self.table_type.empty_value\n prepared_row = []\n for value in row:\n if value is None or value == \"\":\n prepared_row.append(empty_value)\n else:\n prepared_row.append(value)\n while len(prepared_row) < self.row_length:\n prepared_row.append(empty_value)\n return prepared_row\n", "id": "9483089", "language": "Python", "matching_score": 3.3782646656036377, "max_stars_count": 0, "path": "tabler/table.py" }, { "content": "\"\"\"Provides the TableRow class.\n\nTableRow provides methods for working with rows in :class:`tabler.Table`\ninstances.\n\"\"\"\n\nfrom typing import Any, Iterable, Iterator, List, Union\n\n\nclass TableRow:\n \"\"\"Provide methods for rows in :class:`tabler.Table` instances.\"\"\"\n\n def __init__(self, row: List[List[Any]], header: Iterable[str]):\n \"\"\"Instansiate :class:`TableRow`.\n\n :param list row: Data stored in this row.\n :param list header: Column headers from table.\n \"\"\"\n self.row = row\n self.header = tuple(header)\n self.headers = {}\n\n for column in self.header:\n self.headers[column] = self.header.index(column)\n\n def __iter__(self) -> Iterator:\n for item in self.row:\n yield item\n\n def __getitem__(self, index: Union[int, str]) -> Any:\n if isinstance(index, int):\n return self.row[index]\n elif isinstance(index, str):\n return self.row[self.headers[index]]\n else:\n raise ValueError(f\"Index must be int or str, not {type(index)}.\")\n\n def __setitem__(self, index: Union[str, int], item: Any) -> None:\n if isinstance(index, int):\n self.row[index] = item\n elif isinstance(index, str):\n self.row[self.headers[index]] = item\n else:\n raise ValueError(f\"Index is must be int or str, not {type(index)}.\")\n\n def __str__(self) -> str:\n return \", \".join((str(cell) for cell in self.row))\n\n def __len__(self) -> int:\n return len(self.row)\n\n def remove_column(self, column: str) -> None:\n \"\"\"Remove the passed column.\n\n :param str column: Header for column to be removed.\n :raises: ValueError: If column is not a valid column header.\n \"\"\"\n column_index = self.header.index(column)\n self.row.pop(column_index)\n header = list(self.header)\n header.pop(header.index(column))\n self.header = tuple(header)\n\n def copy(self) -> \"TableRow\":\n \"\"\"Return duplicate tabler.tablerow.TableRow object.\n\n :rtype: :class:`tabler.tablerow.TableRow`.\n \"\"\"\n return TableRow(self.row, self.header)\n", "id": "10357483", "language": "Python", "matching_score": 1.146247148513794, "max_stars_count": 0, "path": "tabler/tablerow.py" }, { "content": "\"\"\"Tests for tabler.Table class.\"\"\"\n\n\nfrom pathlib import Path\n\nimport pytest\n\nfrom tabler import CSV, Table\nfrom tabler.tablerow import TableRow\n\nfrom .test_tools import TablerTestTools\n\n\nclass TestTable:\n def test_create_table_with_header_and_data(self):\n table = TablerTestTools.basic_table()\n TablerTestTools.table_valid(table)\n\n def test_create_table_no_args_raises(self):\n with pytest.raises(TypeError):\n Table()\n\n def test_access_cell_by_header_column_index(self):\n table = TablerTestTools.basic_table()\n assert table[0][0] == \"Red\"\n\n def test_access_cell_by_header_name_column_index(self):\n table = TablerTestTools.basic_table()\n assert table[0][\"Col1\"] == \"Red\"\n\n def test_access_cell_with_invalid_key(self):\n table = TablerTestTools.basic_table()\n with pytest.raises(ValueError) as e:\n table[0][5.9]\n assert \"Index\" in str(e)\n assert \"float\" in str(e)\n\n def test_change_cell_value(self):\n table = TablerTestTools.basic_table()\n assert table[0][\"Col1\"] == \"Red\"\n table[0][\"Col1\"] = \"Green\"\n assert table[0][\"Col1\"] == \"Green\"\n\n def test_change_cell_value_by_integer_index(self):\n table = TablerTestTools.basic_table()\n assert table[0][0] == \"Red\"\n table[0][0] = \"Green\"\n assert table[0][0] == \"Green\"\n\n def test_change_cell_to_float(self):\n table = TablerTestTools.basic_table()\n assert table[0][\"Col1\"] == \"Red\"\n table[0][\"Col1\"] = 0.975\n assert table[0][\"Col1\"] == 0.975\n\n def test_change_cell_to_int(self):\n table = TablerTestTools.basic_table()\n assert table[0][\"Col1\"] == \"Red\"\n table[0][\"Col1\"] = 567\n assert table[0][\"Col1\"] == 567\n\n def test_change_cell_to_None(self):\n table = TablerTestTools.basic_table()\n assert table[0][\"Col1\"] == \"Red\"\n table[0][\"Col1\"] = None\n assert table[0][\"Col1\"] is None\n\n def test_change_cell_with_invalid_index(self):\n table = TablerTestTools.basic_table()\n with pytest.raises(ValueError) as e:\n table[0][5.9] = 4\n assert \"Index\" in str(e)\n assert \"float\" in str(e)\n\n def test_get_table_column(self):\n table = TablerTestTools.basic_table()\n assert table.get_column(\"Col1\") == [\"Red\", \"Orange\"]\n\n def test_remove_column(self):\n table = TablerTestTools.basic_table()\n table.remove_column(\"Col2\")\n assert table.header == (\"Col1\", \"Col3\")\n assert list(table.rows[0]) == [\"Red\", \"Blue\"]\n assert list(table.rows[1]) == [\"Orange\", \"Magenta\"]\n\n def test_open_unknown_filetype_without_tabletype_raises(self, tmpdir):\n with pytest.raises(ValueError):\n Table(\"testfile.unk\")\n\n def test_write_unknown_filetype_without_tabletype_raises(self, tmpdir):\n table = TablerTestTools.basic_table()\n filepath = Path(str(tmpdir)) / \"testfile.unk\"\n with pytest.raises(ValueError):\n table.write(filepath)\n\n def test_table__str___and__repr__methods(self):\n table = TablerTestTools.basic_table()\n expected = (\n \"Table Object containing 3 colomuns and 2 rows\\n\"\n \"Column Headings: Col1, Col2, Col3\"\n )\n assert str(table) == expected\n assert repr(table) == expected\n\n def test_set_table_type(self, tmpdir):\n header = TablerTestTools.TEST_HEADER\n data = TablerTestTools.TEST_DATA\n table_type = CSV()\n table = Table(header=header, data=data, table_type=table_type)\n assert table.table_type == table_type\n path = Path(str(tmpdir)) / \"testfile\"\n table.write(path)\n\n def test_table_is_empty_method(self):\n t = Table(header=[], data=[])\n assert t.is_empty() is True\n t = TablerTestTools.basic_table()\n assert t.is_empty() is False\n\n def test_table_empty_method(self):\n t = TablerTestTools.basic_table()\n t.empty()\n assert t.is_empty() is True\n\n def test_table_copy_method(self):\n t1 = TablerTestTools.basic_table()\n t2 = t1.copy()\n assert t1 is not t2\n assert t1[0] is not t2[0]\n\n def test_table_sort_method_with_string_key(self):\n table = Table(header=(\"A\", \"B\", \"C\"), data=((8, 5, 6), (9, 3, 4), (6, 4, 7)))\n table.sort(\"B\")\n assert list(table[0]) == [9, 3, 4]\n assert list(table[1]) == [6, 4, 7]\n assert list(table[2]) == [8, 5, 6]\n\n def test_table_sort_method_with_integer_key(self):\n table = Table(header=(\"A\", \"B\", \"C\"), data=((8, 5, 6), (9, 3, 4), (6, 4, 7)))\n table.sort(1)\n assert list(table[0]) == [9, 3, 4]\n assert list(table[1]) == [6, 4, 7]\n assert list(table[2]) == [8, 5, 6]\n\n def test_table_sort_method_with_text(self):\n table = Table(\n header=(\"A\", \"B\", \"C\"),\n data=((\"i\", \"e\", \"g\"), (\"x\", \"c\", \"d\"), (\"f\", \"d\", \"g\")),\n )\n table.sort(\"B\")\n assert list(table[0]) == [\"x\", \"c\", \"d\"]\n assert list(table[1]) == [\"f\", \"d\", \"g\"]\n assert list(table[2]) == [\"i\", \"e\", \"g\"]\n\n def test_table_sort_method_descending(self):\n table = Table(\n header=(\"A\", \"B\", \"C\"),\n data=((\"i\", \"e\", \"g\"), (\"x\", \"c\", \"d\"), (\"f\", \"d\", \"g\")),\n )\n table.sort(\"B\", asc=False)\n assert list(table[0]) == [\"i\", \"e\", \"g\"]\n assert list(table[1]) == [\"f\", \"d\", \"g\"]\n assert list(table[2]) == [\"x\", \"c\", \"d\"]\n\n def test_table_sorted_method(self):\n table = Table(header=(\"A\", \"B\", \"C\"), data=((8, 5, 6), (9, 3, 4), (6, 4, 7)))\n sorted_table = table.sorted(\"B\")\n assert list(sorted_table[0]) == [9, 3, 4]\n assert list(sorted_table[1]) == [6, 4, 7]\n assert list(sorted_table[2]) == [8, 5, 6]\n assert sorted_table is not table\n\n def test_table_split_by_row_count_method(self):\n table = Table(\n header=[\"A\", \"B\", \"C\"],\n data=(\n (1, 2, 3),\n (4, 5, 6),\n (7, 8, 9),\n (10, 11, 12),\n (13, 14, 15),\n (15, 16, 17),\n ),\n )\n split_tables = table.split_by_row_count(2)\n assert len(split_tables) == 3\n for table in split_tables:\n assert len(table) == 2\n assert tuple(split_tables[0][0]) == (1, 2, 3)\n assert tuple(split_tables[0][1]) == (4, 5, 6)\n assert tuple(split_tables[1][0]) == (7, 8, 9)\n assert tuple(split_tables[1][1]) == (10, 11, 12)\n assert tuple(split_tables[2][0]) == (13, 14, 15)\n assert tuple(split_tables[2][1]) == (15, 16, 17)\n\n def test_table_split_by_row_count_method_with_odd_number_of_rows(self):\n table = Table(\n header=[\"A\", \"B\", \"C\"],\n data=(\n (1, 2, 3),\n (4, 5, 6),\n (7, 8, 9),\n (10, 11, 12),\n (13, 14, 15),\n (15, 16, 17),\n (18, 19, 20),\n ),\n )\n split_tables = table.split_by_row_count(2)\n assert len(split_tables) == 4\n assert len(split_tables[0]) == 2\n assert tuple(split_tables[0][0]) == (1, 2, 3)\n assert tuple(split_tables[0][1]) == (4, 5, 6)\n assert len(split_tables[1]) == 2\n assert tuple(split_tables[1][0]) == (7, 8, 9)\n assert tuple(split_tables[1][1]) == (10, 11, 12)\n assert len(split_tables[2]) == 2\n assert tuple(split_tables[2][0]) == (13, 14, 15)\n assert tuple(split_tables[2][1]) == (15, 16, 17)\n assert len(split_tables[3]) == 1\n assert tuple(split_tables[3][0]) == (18, 19, 20)\n\n def test_table_append_method_with_iterable(self):\n table = TablerTestTools.basic_table()\n new_row = (\"Pink\", \"Purple\", \"Brown\")\n table.append(new_row)\n assert tuple(table[2]) == new_row\n\n def test_table_append_method_with_TableRow(self):\n table = TablerTestTools.basic_table()\n data = (\"Pink\", \"Purple\", \"Brown\")\n new_row = TableRow(data, TablerTestTools.TEST_HEADER)\n table.append(new_row)\n assert tuple(table[2]) == data\n\n def test_table_row__str__method(self):\n table = TablerTestTools.basic_table()\n row = table.rows[0]\n expected = \"Red, Green, Blue\"\n assert str(row) == expected\n\n def test_print_r_method(self, capsys):\n table = TablerTestTools.basic_table()\n table.print_r()\n captured = capsys.readouterr()\n assert (\n captured.err\n == \"['Red', 'Green', 'Blue']\\n['Orange', 'Yellow', 'Magenta']\\n\"\n )\n", "id": "6908674", "language": "Python", "matching_score": 3.7249972820281982, "max_stars_count": 0, "path": "tests/test_tabler.py" }, { "content": "from pathlib import Path\n\nfrom tabler import Table\n\n\nclass TablerTestTools:\n\n TEST_HEADER = (\"Col1\", \"Col2\", \"Col3\")\n TEST_ROW_1 = [\"Red\", \"Green\", \"Blue\"]\n TEST_ROW_2 = [\"Orange\", \"Yellow\", \"Magenta\"]\n TEST_DATA = [TEST_ROW_1, TEST_ROW_2]\n\n @classmethod\n def table_valid(cls, table):\n assert len(table.header) == 3\n assert len(table) == 2\n assert all([len(row) == 3 for row in table])\n assert table.header == cls.TEST_HEADER\n assert list(table.rows[0]) == cls.TEST_ROW_1\n assert list(table.rows[1]) == cls.TEST_ROW_2\n assert table[0][\"Col1\"] == \"Red\"\n\n @classmethod\n def basic_table(cls):\n header = cls.TEST_HEADER\n data = cls.TEST_DATA\n return Table(header=header, data=data)\n\n\nclass TableTypeTestTools:\n tabletype = None\n BASIC_FILE_PATH = None\n WITH_NULLS_PATH = None\n WITH_INCOMPLETE_ROW = None\n WITH_LONG_ROW = None\n\n @classmethod\n def open_with_table_type(cls, table_type, path):\n table = Table(path, table_type=table_type)\n TablerTestTools.table_valid(table)\n\n @classmethod\n def write_with_table_type(cls, table_type, tmpdir):\n out_table = TablerTestTools.basic_table()\n filepath = Path(str(tmpdir)) / \"testfile{}\".format(table_type.extension)\n out_table.write(filepath, table_type=table_type)\n in_table = Table(filepath, table_type=table_type)\n assert [list(_) for _ in in_table] == [list(_) for _ in out_table]\n\n @classmethod\n def read_null_values_with_tabletype(cls, table_type, path):\n table = Table(path, table_type=table_type)\n assert list(table[0]) == [\"Red\", table_type.empty_value, \"Blue\"]\n\n @classmethod\n def format_with_table_type(cls, table_type, path, expected_formatting):\n table = Table(path, table_type=table_type)\n assert list(table.get_column(\"Values\")) == expected_formatting\n\n @classmethod\n def write_null_values_with_table_type(cls, table_type, tmpdir):\n table = Table(\n header=[\"Col1\", \"Col2\", \"Col3\"],\n data=[\n [\"Red\", table_type.empty_value, \"Blue\"],\n [\"Orange\", \"Yellow\", \"Magenta\"],\n ],\n )\n path = Path(str(tmpdir.join(\"empty_test\")))\n table.write(filepath=str(path), table_type=table_type)\n\n @classmethod\n def read_incomplete_rows_with_table_type(cls, table_type, path):\n table = Table(path, table_type)\n assert list(table[0]) == [\"Red\", \"Green\", table_type.empty_value]\n\n @classmethod\n def write_incomplete_rows_with_table_type(cls, table_type, tmpdir):\n table = Table(\n header=[\"Col1\", \"Col2\", \"Col3\"],\n data=[[\"Red\"], [\"Orange\", \"Yellow\", \"Magenta\"]],\n )\n path = Path(str(tmpdir.join(\"empty_test\")))\n table.write(filepath=str(path), table_type=table_type)\n\n @classmethod\n def read_long_rows_with_table_type(cls, table_type, path):\n table = Table(path, table_type)\n assert table.header == (\"Col1\", \"Col2\", \"Col3\", table._EMPTY_HEADER.format(1))\n assert list(table[0]) == [\"Red\", \"Green\", \"Blue\", \"Purple\"]\n\n @classmethod\n def write_long_rows_with_table_type(cls, table_type, tmpdir):\n table = Table(\n header=[\"Col1\", \"Col2\", \"Col3\"],\n data=[[\"Red\", \"Green\", \"Blue\", \"Purple\"], [\"Orange\", \"Yellow\", \"Magenta\"]],\n )\n path = Path(str(tmpdir.join(\"empty_test\")))\n table.write(filepath=str(path), table_type=table_type)\n", "id": "6807172", "language": "Python", "matching_score": 5.891040802001953, "max_stars_count": 0, "path": "tests/test_tools.py" }, { "content": "from pathlib import Path\n\nimport pytest\nimport requests_mock\n\nfrom tabler import CSV, CSVURL, Table\n\nfrom ...test_tools import TablerTestTools, TableTypeTestTools\n\n\nclass TestCSV:\n BASIC_FILE_PATH = Path(__file__).parent / \"testfile.csv\"\n WITH_NULLS_PATH = Path(__file__).parent / \"testfile_empties.csv\"\n WITH_INCOMPLETE_ROW = Path(__file__).parent / \"testfile_incomplete_rows.csv\"\n WITH_LONG_ROW = Path(__file__).parent / \"testfile_long_rows.csv\"\n TEST_FORMATTING = Path(__file__).parent / \"test_format.csv\"\n expected_formatting = [\"0\", \"0\", \"None\", \"893275023572039\"]\n\n def test_open(self):\n table = Table(self.BASIC_FILE_PATH, table_type=CSV())\n TablerTestTools.table_valid(table)\n\n def test_write(self, tmpdir):\n TableTypeTestTools.write_with_table_type(CSV(), tmpdir)\n\n def test_read_null_values(self):\n TableTypeTestTools.read_null_values_with_tabletype(CSV(), self.WITH_NULLS_PATH)\n\n def test_formatting(self):\n TableTypeTestTools.format_with_table_type(\n CSV(), self.TEST_FORMATTING, self.expected_formatting\n )\n\n def test_write_null_values(self, tmpdir):\n table = Table(\n header=[\"Col1\", \"Col2\", \"Col3\"],\n data=[[\"Red\", \"\", \"Blue\"], [\"Orange\", \"Yellow\", \"Magenta\"]],\n )\n path = Path(str(tmpdir.join(\"empty_test.csv\")))\n expected = \"Col1,Col2,Col3\\nRed,,Blue\\nOrange,Yellow,Magenta\\n\"\n table.write(filepath=str(path))\n with open(str(path), \"r\") as f:\n assert f.read() == expected\n\n def test_read_incomplete_rows(self):\n TableTypeTestTools.read_incomplete_rows_with_table_type(\n CSV(), self.WITH_INCOMPLETE_ROW\n )\n\n def test_write_incomplete_rows(self, tmpdir):\n TableTypeTestTools.write_incomplete_rows_with_table_type(CSV(), tmpdir)\n\n def test_read_long_rows(self):\n TableTypeTestTools.read_long_rows_with_table_type(CSV(), self.WITH_LONG_ROW)\n\n def test_open_file_without_table_type(self):\n TablerTestTools.table_valid(Table(str(Path(__file__).parent / \"testfile.csv\")))\n\n def test_write_long_rows_with(self, tmpdir):\n TableTypeTestTools.write_long_rows_with_table_type(CSV(), tmpdir)\n\n def test_save_file_without_extension(self, tmpdir):\n table = TablerTestTools.basic_table()\n filename = \"testfile\"\n path = str(tmpdir.join(filename))\n table.write(filepath=path, table_type=CSV())\n assert Path(path + \".csv\").exists()\n\n def test_save_file_without_table_type(self, tmpdir):\n table = TablerTestTools.basic_table()\n filename = \"testfile.csv\"\n path = Path(str(tmpdir)) / filename\n table.write(filepath=str(path))\n assert path.exists()\n\n def test_open_tab_delimited_csv(self):\n path = Path(__file__).parent / \"testfile_tab.csv\"\n TablerTestTools.table_valid(Table(path, CSV(delimiter=\"\\t\")))\n\n def test_open_csv_with_txt_extension(self):\n path = Path(__file__).parent / \"testfile.txt\"\n TablerTestTools.table_valid(Table(path, CSV()))\n\n def test_write_csv_with_no_header(self, tmpdir):\n table = TablerTestTools.basic_table()\n path = Path(str(tmpdir.join(\"table_with_no_header.csv\")))\n table.header = {}\n table.write(filepath=path, table_type=CSV())\n with open(path) as f:\n file_text = f.read()\n expected = \"Red,Green,Blue\\nOrange,Yellow,Magenta\\n\"\n assert file_text == expected\n\n\nclass TestCSVURL:\n tabletype = CSVURL()\n\n def test_open(self):\n with requests_mock.Mocker() as m:\n with open(str(Path(__file__).parent / \"testfile.csv\"), \"rb\") as f:\n m.get(\"http://test.com/testfile.csv\", content=f.read())\n table = Table(\"http://test.com/testfile.csv\", table_type=CSVURL())\n TablerTestTools.table_valid(table)\n\n def test_empty_content(self):\n with requests_mock.Mocker() as m:\n m.get(\"http://test.com/testfile.csv\", content=b\"\")\n with pytest.raises(ValueError):\n Table(\"http://test.com/testfile.csv\", table_type=CSVURL())\n\n def test_write(self, tmpdir):\n table = TablerTestTools.basic_table()\n with pytest.raises(NotImplementedError):\n table.write(\"path\", table_type=self.tabletype)\n", "id": "4245316", "language": "Python", "matching_score": 6.69224739074707, "max_stars_count": 0, "path": "tests/test_tabletypes/csv/test_csv.py" }, { "content": "from pathlib import Path\n\nfrom tabler import XLSX, Table\n\nfrom ...test_tools import TablerTestTools, TableTypeTestTools\n\n\nclass TestXLSX:\n tabletype = XLSX()\n\n BASIC_FILE_PATH = Path(__file__).parent / \"testfile.xlsx\"\n WITH_NULLS_PATH = Path(__file__).parent / \"testfile_empties.xlsx\"\n WITH_INCOMPLETE_ROW = Path(__file__).parent / \"testfile_incomplete_rows.xlsx\"\n WITH_LONG_ROW = Path(__file__).parent / \"testfile_long_rows.xlsx\"\n TEST_FORMATTING = Path(__file__).parent / \"test_format.xlsx\"\n expected_formatting = [0, 0, \"None\", 893275023572039]\n\n def test_open(self):\n table = Table(self.BASIC_FILE_PATH, table_type=XLSX())\n TablerTestTools.table_valid(table)\n\n def test_write(self, tmpdir):\n TableTypeTestTools.write_with_table_type(XLSX(), tmpdir)\n\n def test_read_null_values(self):\n TableTypeTestTools.read_null_values_with_tabletype(XLSX(), self.WITH_NULLS_PATH)\n\n def test_formatting(self):\n TableTypeTestTools.format_with_table_type(\n XLSX(), self.TEST_FORMATTING, self.expected_formatting\n )\n\n def test_write_null_values(self, tmpdir):\n TableTypeTestTools.write_null_values_with_table_type(XLSX(), tmpdir)\n\n def test_read_incomplete_rows(self):\n TableTypeTestTools.read_incomplete_rows_with_table_type(\n XLSX(), self.WITH_INCOMPLETE_ROW\n )\n\n def test_write_incomplete_rows(self, tmpdir):\n TableTypeTestTools.write_incomplete_rows_with_table_type(XLSX(), tmpdir)\n\n def test_read_long_rows(self):\n TableTypeTestTools.read_long_rows_with_table_type(XLSX(), self.WITH_LONG_ROW)\n\n def test_open_file_without_table_type(self):\n TablerTestTools.table_valid(Table(str(Path(__file__).parent / \"testfile.xlsx\")))\n\n def test_write_long_rows_with(self, tmpdir):\n TableTypeTestTools.write_long_rows_with_table_type(XLSX(), tmpdir)\n\n def test_save_file_without_extension(self, tmpdir):\n table = TablerTestTools.basic_table()\n filename = \"testfile\"\n path = str(tmpdir.join(filename))\n table.write(filepath=path, table_type=XLSX())\n assert Path(path + \".xlsx\").exists()\n\n def test_save_file_without_table_type(self, tmpdir):\n table = TablerTestTools.basic_table()\n filename = \"testfile.csv\"\n path = Path(str(tmpdir)) / filename\n table.write(filepath=str(path))\n assert path.exists()\n", "id": "1543519", "language": "Python", "matching_score": 6.967251777648926, "max_stars_count": 0, "path": "tests/test_tabletypes/xlsx/test_xlsx.py" }, { "content": "from pathlib import Path\n\nfrom tabler import ODS, Table\n\nfrom ...test_tools import TablerTestTools, TableTypeTestTools\n\n\nclass TestODS:\n tabletype = ODS()\n\n BASIC_FILE_PATH = Path(__file__).parent / \"testfile.ods\"\n WITH_NULLS_PATH = Path(__file__).parent / \"testfile_empties.ods\"\n WITH_INCOMPLETE_ROW = Path(__file__).parent / \"testfile_incomplete_rows.ods\"\n WITH_LONG_ROW = Path(__file__).parent / \"testfile_long_rows.ods\"\n TEST_FORMATTING = Path(__file__).parent / \"test_format.ods\"\n expected_formatting = [0, 0, \"None\", 893275023572039]\n\n def test_open(self):\n table = Table(self.BASIC_FILE_PATH, table_type=ODS())\n TablerTestTools.table_valid(table)\n\n def test_write(self, tmpdir):\n TableTypeTestTools.write_with_table_type(ODS(), tmpdir)\n\n def test_read_null_values(self):\n TableTypeTestTools.read_null_values_with_tabletype(ODS(), self.WITH_NULLS_PATH)\n\n def test_formatting(self):\n TableTypeTestTools.format_with_table_type(\n ODS(), self.TEST_FORMATTING, self.expected_formatting\n )\n\n def test_write_null_values(self, tmpdir):\n TableTypeTestTools.write_null_values_with_table_type(ODS(), tmpdir)\n\n def test_read_incomplete_rows(self):\n TableTypeTestTools.read_incomplete_rows_with_table_type(\n ODS(), self.WITH_INCOMPLETE_ROW\n )\n\n def test_write_incomplete_rows(self, tmpdir):\n TableTypeTestTools.write_incomplete_rows_with_table_type(ODS(), tmpdir)\n\n def test_read_long_rows(self):\n TableTypeTestTools.read_long_rows_with_table_type(ODS(), self.WITH_LONG_ROW)\n\n def test_open_file_without_table_type(self):\n TablerTestTools.table_valid(Table(str(Path(__file__).parent / \"testfile.ods\")))\n\n def test_write_long_rows_with(self, tmpdir):\n TableTypeTestTools.write_long_rows_with_table_type(ODS(), tmpdir)\n\n def test_save_file_without_extension(self, tmpdir):\n table = TablerTestTools.basic_table()\n filename = \"testfile\"\n path = str(tmpdir.join(filename))\n table.write(filepath=path, table_type=ODS())\n assert Path(path + \".ods\").exists()\n\n def test_save_file_without_table_type(self, tmpdir):\n table = TablerTestTools.basic_table()\n filename = \"testfile.csv\"\n path = Path(str(tmpdir)) / filename\n table.write(filepath=str(path))\n assert path.exists()\n", "id": "2994936", "language": "Python", "matching_score": 3.1616644859313965, "max_stars_count": 0, "path": "tests/test_tabletypes/ods/test_ods.py" }, { "content": "import pytest\n\nfrom tabler.tabletypes.basetabletype import BaseTableType\n\nfrom ..test_tools import TablerTestTools\n\n\nclass TestBaseTableType:\n def test_prepare_row(self):\n table_type = BaseTableType(\".csv\")\n table_type.empty_value = None\n row = [1, 3, \"A\", None, 5, \"C\", None, None, None]\n prepared_row = table_type.prepare_row(row)\n assert list(prepared_row) == [1, 3, \"A\", None, 5, \"C\"]\n\n def test_open(self):\n table_type = BaseTableType(\".csv\")\n with pytest.raises(NotImplementedError):\n table_type.open_path(\"path\")\n\n def test_write(self):\n table_type = BaseTableType(\".csv\")\n with pytest.raises(NotImplementedError):\n table_type.write(TablerTestTools.basic_table(), \"path\")\n", "id": "11638494", "language": "Python", "matching_score": 3.6382110118865967, "max_stars_count": 0, "path": "tests/test_tabletypes/test_tabletype.py" }, { "content": "from pathlib import Path\n\nimport pytest\n\nfrom tabler import HTML, Table\n\nfrom ...test_tools import TablerTestTools\n\n\nclass TestHTML:\n def test_open(self):\n with pytest.raises(NotImplementedError):\n Table(\"\", table_type=HTML())\n\n def test_write(self, tmpdir):\n table = TablerTestTools.basic_table()\n filepath = str(Path(str(tmpdir)) / \"testfile.html\")\n table.write(filepath, table_type=HTML())\n with open(str(Path(__file__).parent / \"expected.html\"), \"r\") as f:\n expected = f.read()\n with open(filepath, \"r\") as f:\n assert f.read() == expected\n", "id": "5028470", "language": "Python", "matching_score": 0.3831789791584015, "max_stars_count": 0, "path": "tests/test_tabletypes/html/test_html.py" }, { "content": "#!/usr/bin/python3\n\n# DatabaseTable by <NAME> (<EMAIL>)\n\n\nclass Query():\n \"\"\" Container used to provide usefull functions for accessing and\n manipulating a MySQL Table.\n \"\"\"\n\n def __init__(self, **kwargs):\n pass\n\n def test_list_match(self, item_list):\n if not isinstance(item_list, list):\n raise TypeError(\"Can only test list object\")\n list_type = type(item_list[0])\n for item in item_list:\n if not isinstance(item, list_type):\n return False\n return True\n\n def enforce_list_match(self, item_list):\n if not isinstance(item_list, list):\n raise TypeError(\"Can only test list object\")\n list_type = type(item_list[0])\n for item in item_list:\n if not isinstance(item, list_type):\n raise TypeError(\"List must contain only on type\")\n return False\n return True\n\n def get_where_clause(self, where):\n if isinstance(where, str):\n where_clause = where\n elif isinstance(where, dict):\n where_clause = \"WHERE \" + ' AND '.join(\n \"`\" + field + \"`\" + \"=\" + where[field] for field in where\n )\n else:\n where_clause = ''\n return where_clause\n", "id": "8266252", "language": "Python", "matching_score": 1.8650423288345337, "max_stars_count": 0, "path": "pydatabase/query.py" }, { "content": "#!/usr/bin/python3\n\n# SelectQuery by <NAME> (<EMAIL>)\n\nfrom . query import Query\n\n\nclass SelectQuery(Query):\n \"\"\" Container used to provide usefull functions for accessing and\n manipulating a MySQL Table.\n \"\"\"\n\n def __init__(\n self, database, table, fields, where=None, sort=None, limit=None):\n self.database = database\n self.table = table\n self.fields = fields\n self.where_clause = where\n self.sort_clause = sort\n self.limit = limit\n\n def __str__(self):\n query = \"SELECT \"\n query += ', '.join('`' + field + '`' for field in self.fields)\n query += \" FROM \" + self.database + \".\" + self.table\n query += \" \" + str(self.get_where_clause(self.where_clause))\n query += \" \" + str(self.get_sort_clause(self.sort_clause))\n if self.limit is not None:\n query += \" LIMIT \" + str(self.limit)\n query += \";\"\n return query\n\n def get_sort_clause(self, sort):\n if sort is None:\n return ''\n sort_clause = \"ORDER BY \"\n if isinstance(sort, str):\n sort_clause += \"`\" + sort + \"`\"\n elif isinstance(sort, list):\n if isinstance(sort[0], str):\n sort_clause += ', '.join(\n \"`\" + field + \"`\" for field in sort\n )\n elif isinstance(sort[0], list):\n clauses = []\n for clause in sort:\n string = \"`\" + clause[0] + \"`\"\n if len(clause) > 1:\n string += \" \" + clause[1]\n clauses.append(string)\n sort_clause += ', '.join(clauses)\n else:\n raise TypeError(\"sort must be string or list\")\n return sort_clause\n", "id": "6927059", "language": "Python", "matching_score": 1.28323495388031, "max_stars_count": 0, "path": "pydatabase/selectquery.py" }, { "content": "#!/usr/bin/python3\n\n# DatabaseTable by <NAME> (<EMAIL>)\n\nfrom . databaseconnection import DatabaseConnection\nfrom . databasecolumn import DatabaseColumn\n\n\nclass DatabaseTable(DatabaseConnection):\n \"\"\" Container used to provide usefull functions for accessing and\n manipulating a MySQL Table.\n \"\"\"\n\n update_key = None\n no_update_columns = ()\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.table = kwargs['table']\n self.description = self.query(\"DESCRIBE \" + self.table)\n self.columns = []\n for column in self.description:\n self.columns.append(DatabaseColumn(column))\n\n def get_all(self):\n \"\"\" Returns all data in table as a two dimensional list. \"\"\"\n return self.result_to_list(self.query(\"SELECT * FROM \" + self.table))\n\n def result_to_list(self, set_):\n \"\"\" Converts a two dimensional set to a two dimensional list. \"\"\"\n newArray = []\n for row in set_:\n newArray.append(list(row))\n return newArray\n\n def get_columns(self):\n \"\"\" Returns a list containing the table's column names. \"\"\"\n columnArray = []\n for column in self.columns:\n columnArray.append(column.name)\n return columnArray\n\n def truncate(self):\n self.query(\"TRUNCATE \" + self.table)\n\n def replace_from_file(self, update_file):\n self.truncate()\n self.update_from_file(update_file)\n\n def get_update_from_file_query(self, update_file):\n \"\"\" Updates MySQL Table with data from a .csv or a Table object.\n If self.update_key is set any record with a matching value will be\n updated. file.header or the first row of the .csv must match the table\n column names (order is not important).\n Prints a running percentage complete.\n Returns the number of records updated, then the number new records\n inserted.\n \"\"\"\n\n query = \"INSERT INTO \" + self.table + \" \"\n query += self.get_query_columns_string() + \" VALUES \"\n update_rows = []\n for row in update_file.rows:\n update_rows.append(self.get_insert_query_line(row))\n query += ', '.join(update_rows)\n if self.update_key is not None:\n query += \" ON DUPLICATE KEY UPDATE \"\n duplicate_updates = []\n for column in self.get_columns():\n if column != self.update_key:\n duplicate_updates.append(\n \"`\" + column + \"`=VALUES(`\" + column + \"`)\")\n query += ', '.join(duplicate_updates)\n query = query + \";\"\n\n return query\n\n def update_from_file(self, update_file, split_rows=0):\n print(\"Updating \" + self.database + \".\" + self.table + \" from file\")\n if split_rows > 0:\n tables = update_file.split_by_row_count(split_rows)\n queries = []\n for table in tables:\n queries.append(self.get_update_from_file_query(table))\n for query in queries:\n self.query(query)\n else:\n self.query(self.get_update_from_file_query(update_file))\n\n def get_column(self, column):\n \"\"\" Uses DatabaseConnection.get_column() to return a set\n containing all values in the specified column.\n \"\"\"\n\n return super().get_column(self.table, column)\n\n def get_column_as_strings(self, column):\n return super().get_column_as_strings(self.table, column)\n\n def get_insert_query(self, row):\n query = \"INSERT INTO \" + self.table + \" \"\n query += self.get_query_columns_string()\n query += \" VALUES \"\n query += self.get_insert_query_line(row)\n return query\n\n def get_insert_query_line(self, row):\n \"\"\" Creates an apropriate insert query to insert data from a\n TableRow into the table.\n \"\"\"\n\n values = self.get_query_values(row)\n query = \"(\"\n query += ', '.join(values)\n query = query + \")\"\n return query\n\n def get_update_query_line(self, row):\n columns = self.get_query_columns()\n values = self.get_query_values(row)\n query = \" SET (\"\n i = 0\n while i < len(columns):\n value_string = str(columns[i]) + \"=\" + str(values[i])\n query += ', '.join((query, value_string))\n i += 1\n query += \") WHERE \" + self.update_key + \"=\"\n query += self.escape_string(row.get_column(self.update_key))\n return query\n\n def get_update_query(self, row):\n \"\"\" Creates an apropriate update query to update a record with\n data from a TableRow.\n \"\"\"\n\n columns = self.get_query_columns()\n values = self.get_query_values(row)\n query = \"UPDATE \" + self.table\n i = 0\n query += self.get_update_query_line(row) + \";\"\n return query\n\n def get_query_columns(self):\n \"\"\" Returns a list of columns required for update and insert\n statements quoted with backticks.\n \"\"\"\n\n backtick = '`'\n columns = []\n for column in self.columns:\n if column.name not in self.no_update_columns:\n columns.append(backtick + str(column.name) + backtick)\n return columns\n\n def get_query_columns_string(self):\n string = '('\n columns = self.get_query_columns()\n string += ', '.join(columns)\n string += \")\"\n return string\n\n def get_query_values(self, row):\n \"\"\" Properly formats data from a TableRow for use in a MySQL\n insert or update query.\n \"\"\"\n\n values = []\n for column in self.columns:\n if column.name not in self.no_update_columns:\n record = row.get_column(column.name)\n if record == '' or record is None:\n record = 'NULL'\n elif column.data_type in ('varchar', 'text'):\n record = \"'\" + self.escape_string(record) + \"'\"\n values.append(record)\n return values\n\n def truncate(self):\n \"\"\" This will empty the table. Data may not be recoverable. \"\"\"\n self.query(\"TRUNCATE \" + self.table)\n\n def to_table(self):\n \"\"\" Returns Table containg this table's data. \"\"\"\n try:\n from .. table . table import Table as Table\n except ImportError:\n print('Table module not found')\n return None\n table = Table()\n table.load_from_database_table(self)\n return table\n\n def write(self, filepath):\n \"\"\" Creates a .csv of the table's data with the specified filepath.\"\"\"\n self.to_table().write(filepath)\n", "id": "10194220", "language": "Python", "matching_score": 3.786015510559082, "max_stars_count": 0, "path": "pydatabase/databasetable.py" }, { "content": "#!/usr/bin/python3\n\n# DatabaseColumn by <NAME> (<EMAIL>)\n\n\nclass DatabaseColumn():\n \"\"\" Container for columns of data imported from a mysql Database.\n Used by DatabaseTable\n \"\"\"\n\n def __init__(self, column_description):\n self.name = None\n self.data_type = None\n self.null = None\n self.name = column_description[0]\n self.data_type = self.get_data_type(column_description[1])\n\n if column_description[2] == 'NO':\n self.null = False\n elif column_description[2] == 'YES':\n self.null = True\n\n def get_data_type(self, dtype):\n \"\"\" Converts data type from mysql table description to a usable format.\n Used to properly format query statements in DatabaseTable\n \"\"\"\n\n datatypes = ['varchar', 'int', 'text', 'float']\n for datatype in datatypes:\n if dtype[0:len(datatype)] == datatype:\n return datatype\n", "id": "3253665", "language": "Python", "matching_score": 1.5433201789855957, "max_stars_count": 0, "path": "pydatabase/databasecolumn.py" }, { "content": "#!/usr/bin/python3\n\n# DatabaseConnection by <NAME> (<EMAIL>)\n\nimport pymysql\n\n\nclass DatabaseConnection(object):\n \"\"\" Used as an interface with a local MySQL instance\n Requires pymysql (https://pypi.python.org/pypi/PyMySQL#downloads)\n \"\"\"\n\n host = 'localhost'\n user = 'axevalley'\n password = '<PASSWORD>'\n charset = 'utf8'\n\n def __init__(self, **kwargs):\n self.database = kwargs['database']\n if 'host' in kwargs:\n self.host = kwargs['host']\n if 'user' in kwargs:\n self.user = kwargs['user']\n if 'password' in kwargs:\n self.password = kwargs['password']\n if 'charset' in kwargs:\n self.charset = kwargs['charsest']\n\n def query(self, query):\n \"\"\" Sends query to MySQL database and returns query results. \"\"\"\n\n conn = pymysql.connect(host=self.host, user=self.user,\n password=<PASSWORD>, db=self.database,\n charset=self.charset)\n cur = conn.cursor()\n try:\n cur.execute(str(query))\n except:\n print('Query Error: ')\n print(str(query))\n conn.close()\n return None\n conn.commit()\n results = cur.fetchall()\n cur.close()\n conn.close()\n return results\n\n def get_column(self, table, column):\n \"\"\"Queries MySQL database for specified column from specified table and\n returns the data therein as a set. \"\"\"\n\n results = self.query(\"SELECT \" + column + \" FROM \" + table)\n column = []\n for record in results:\n column.append(record[0])\n return set(column)\n\n def get_column_as_strings(self, table, column):\n \"\"\"Queries MySQL database for specified column from specified table and\n returns the data therein as a set of strings. \"\"\"\n\n results = self.query(\"SELECT \" + column + \" FROM \" + table)\n column = []\n for record in results:\n column.append(str(record[0]))\n return set(column)\n\n def escape_string(self, string):\n \"\"\"Provides basic string escapes for single quote characters. \"\"\"\n\n newstring = str(string)\n newstring = newstring.replace(\"'\", \"\\\\'\")\n return newstring\n", "id": "10097835", "language": "Python", "matching_score": 2.640942335128784, "max_stars_count": 0, "path": "pydatabase/databaseconnection.py" }, { "content": "from . databaseconnection import DatabaseConnection\nfrom . databasetable import DatabaseTable\n\n\ndef connect(**kwargs):\n database_connection = DatabaseConnection(**kwargs)\n return database_connection\n\n\ndef get_table(**kwargs):\n database_table = DatabaseTable(**kwargs)\n return database_table\n", "id": "10022224", "language": "Python", "matching_score": 0.142740398645401, "max_stars_count": 0, "path": "pydatabase/__init__.py" }, { "content": "import ftplib\nimport os\n\n\nclass FTP_Connection:\n\n def __init__(self, host='',\n username='anonymous',\n password='',\n folder='',\n port=21):\n self.host = host\n self.username = username\n self.port = port\n self.ftp = ftplib.FTP()\n self.ftp.connect(self.host, self.port)\n self.ftp.login(self.username, password)\n if folder != '':\n self.ftp.cwd(folder)\n\n def __repr__(self):\n return '{self.user}@{self.host}:{self.port}'\n\n def cd(self, folder):\n return self.ftp.cwd(folder)\n\n def ls(self):\n return self.ftp.dir()\n\n def mkdir(self, folder):\n return self.ftp.mkd(folder)\n\n def upload(self, file):\n filepath = os.path.abspath(file)\n filename = os.path.split(filepath)[1]\n self.ftp.storlines(\"STOR \" + filename, open(filepath, \"rb\"))\n\n def download(self, filename, savename=''):\n if savename == '':\n outfile = open(filename, 'wb')\n else:\n outfile = open(savename, 'wb')\n self.ftp.retrbinary(\"RETR \" + filename, outfile.write)\n", "id": "9071016", "language": "Python", "matching_score": 2.339122772216797, "max_stars_count": 0, "path": "lsftp/ftpconnection.py" }, { "content": "from . ftpconnection import FTP_Connection\n", "id": "6808125", "language": "Python", "matching_score": 0.15246669948101044, "max_stars_count": 0, "path": "lsftp/__init__.py" }, { "content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='lsftp',\n version='1.0',\n description='Wrapper for ftplib',\n author='<NAME>',\n author_email='<EMAIL>',\n url='http://tabler.lukeshiner.com',\n keywords=['ftp', 'ftplib', 'file transfer protocol'],\n install_requires=[],\n packages=find_packages(),\n )\n", "id": "9204880", "language": "Python", "matching_score": 2.6542317867279053, "max_stars_count": 0, "path": "setup.py" }, { "content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='pydatabase',\n version='1.0.02',\n description='Simple interface for MySQL databases',\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://github.com/axevalley/pydatabase.git',\n keywords=['database', 'mysql', 'simple'],\n install_requires=['tabler', 'pymysql'],\n packages=find_packages())\n", "id": "2695338", "language": "Python", "matching_score": 2.574326515197754, "max_stars_count": 0, "path": "setup.py" } ]
2.22585
GustavoHenriqueMuller
[ { "content": "#====-------------------------------------------------====#\r\n# Global variables (constants).\r\n#====-------------------------------------------------====#\r\nimport cv2 as cv\r\n\r\nTITLE = \"AutoDrawer\"\r\nPROGRAM_THEME = \"Reddit\"\r\nSTD_FONT = (\"Helvetica\", 12)\r\nSTD_HEADER_FONT = (\"Helvetica\", 17, \"bold\")\r\nINFO_BUTTON_COLOR = \"#d6af4d\"\r\nPROGRAM_WIDTH = 52\r\n\r\nTHRESHOLD_TYPES = {\"Binary\": cv.THRESH_BINARY,\r\n \"Binary Inverted\": cv.THRESH_BINARY_INV,\r\n \"Truncated\": cv.THRESH_TRUNC,\r\n \"To Zero\": cv.THRESH_TOZERO,\r\n \"To Zero Inverted\": cv.THRESH_TOZERO_INV}\r\nTHRESHOLD_CONTOUR_APPROX_METHODS = {\"None\": cv.CHAIN_APPROX_NONE,\r\n \"Simple\": cv.CHAIN_APPROX_SIMPLE,\r\n \"TC89_L1\": cv.CHAIN_APPROX_TC89_L1,\r\n \"TC89_KCOS\": cv.CHAIN_APPROX_TC89_KCOS}\r\nADAPTIVE_THRESHOLD_METHODS = {\"Mean C\": cv.ADAPTIVE_THRESH_MEAN_C,\r\n \"Gaussian C\": cv.ADAPTIVE_THRESH_GAUSSIAN_C}\r\n\r\nINFO_WINDOW_NAME = {\"infoSimpleThreshold\": \"Simple Threshold\",\r\n \"infoDelay\": \"Delay\",\r\n \"infoSimpleThresholdMaxValue\": \"Simple max value\",\r\n \"infoScale\": \"Scale\",\r\n \"infoBlocksize\": \"Blocksize\",\r\n \"infoC\": \"C\",\r\n \"infoAdaptiveThreshold\": \"Adaptative Threshold\",\r\n \"infoAdaptiveThresholdMaxValue\": \"Adaptive max value\"}\r\n\r\nINFO_DESCRIPTION = {\"infoSimpleThreshold\": \"For every pixel, the Normal Threshold value is applied.\\nIf the pixel value is smaller than the threshold,\\nit is set to 0, otherwise it is set to [Simple Max Value]\",\r\n \"infoDelay\": \"Delay is the time waited between drawing each point.\\nThe lower the Delay, the quicker the program will run,\\nbut with less details.\",\r\n \"infoSimpleThresholdMaxValue\": \"Simple Max Value is the value which is assigned to pixel values\\nexceeding the simple threshold.\",\r\n \"infoScale\": \"The dimensions of the image will get multiplied by this value.\\nIf the scale is 0.5 and the image is 1000x1000 pixels,\\nthe drawing will need 500x500 pixels to draw completely.\",\r\n \"infoBlocksize\": \"Size of a pixel neighborhood that is used to calculate\\na threshold value for the pixel: 3, 5, 7, and so on\",\r\n \"infoC\": \"Constant subtracted from the mean or weighted mean.\\nNormally, it is positive but may be zero or negative as well.\",\r\n \"infoAdaptiveThreshold\": \"In simple thresholding, the threshold value is global, i.e., it is same for all the pixels in the image.\\nAdaptive thresholding is the method where the threshold value is calculated for smaller regions\\nand therefore, there will be different threshold values for different regions.\",\r\n \"infoAdaptiveThresholdMaxValue\": \"Adaptive Max Value is the value which is assigned to pixel values\\nexceeding the adaptive threshold determined by the pixel neighborhood.\"}", "id": "10498924", "language": "Python", "matching_score": 3.7246828079223633, "max_stars_count": 5, "path": "globals.py" }, { "content": "#====-------------------------------------------------------------------------====#\r\n# MIT License\r\n\r\n# Copyright (c) 2020 GustavoHenriqueMuller\r\n\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n#====-------------------------------------------------------------------------====#\r\n\r\nimport drawer\r\nimport functions\r\nfrom globals import *\r\n\r\nimport PySimpleGUI as sg\r\n\r\npreview = False\r\npreviewType = \"\" # Image, Threshold or Contours\r\nusesAdaptiveThreshold = False\r\nimagePath = \"\"\r\nstartupTime = 3 # Seconds\r\ndelay = 0 # Seconds\r\nscale = 1.0\r\n\r\nsimpleThreshold = 0\r\nsimpleThresholdMaxValue = 0\r\nsimpleThresholdType = \"\"\r\nsimpleThresholdContourApproximationMethod = \"\"\r\n\r\nblockSize = 0\r\nc = 0\r\nadaptiveThresholdMaxValue = 0\r\nadaptiveThresholdType = \"\"\r\nadaptiveThresholdMethod = \"\"\r\nadaptiveThresholdContourApproximationMethod = \"\"\r\n\r\nsg.theme(PROGRAM_THEME)\r\nlayout = [ # Row 1\r\n [sg.Text(TITLE, font=STD_HEADER_FONT, size=(PROGRAM_WIDTH,1), justification=\"center\")],\r\n # Row 2\r\n [sg.Text(\"Image URL/Path:\", font=STD_FONT), sg.InputText(key=\"-imagePath-\", font=STD_FONT, text_color=\"blue\"), sg.FileBrowse(key=\"-fileBrowser-\", font=STD_FONT, size=(10,1), file_types=((\"All Files\", \"*\"), (\"PNG Files\", \"*.png\"), (\"JPG Files\", \"*.jpg\"), (\"JPEG Files\", \"*.jpeg\")))],\r\n # Row 3\r\n [sg.Text(\"_\" * PROGRAM_WIDTH * 2)],\r\n # Row 4\r\n [sg.Text(\"Simple Threshold Type:\", font=STD_FONT), sg.InputOptionMenu(key=\"-simpleThresholdType-\", values=list(THRESHOLD_TYPES.keys()), default_value=list(THRESHOLD_TYPES.keys())[0]),\r\n sg.Text(\"Simple Threshold:\", font=STD_FONT), sg.InputText(\"127\", key=\"-simpleThreshold-\", font=STD_FONT, size=(4,1)), sg.Button(\"?\", key=\"-infoSimpleThreshold-\", size=(2,1), button_color=(\"white\", INFO_BUTTON_COLOR)),\r\n sg.Text(\"Delay:\", font=STD_FONT), sg.InputText(\"0.00025\", key=\"-delay-\", font=STD_FONT, size=(7,1)), sg.Button(\"?\", key=\"-infoDelay-\", size=(2,1), button_color=(\"white\", INFO_BUTTON_COLOR))],\r\n # Row 5\r\n [sg.Text(\"Simple Contour Approximation Method:\", font=STD_FONT), sg.InputOptionMenu(key=\"-simpleThresholdContourApproximationMethod-\", values=list(THRESHOLD_CONTOUR_APPROX_METHODS.keys()), default_value=list(THRESHOLD_CONTOUR_APPROX_METHODS.keys())[0]),\r\n sg.Text(\"Simple Max Value:\", font=STD_FONT), sg.InputText(\"255\", key=\"-simpleThresholdMaxValue-\", font=STD_FONT, size=(4,1)),\r\n sg.Button(\"?\", key=\"-infoSimpleThresholdMaxValue-\", size=(2,1), button_color=(\"white\", INFO_BUTTON_COLOR)),\r\n sg.Text(\"Scale:\", font=STD_FONT), sg.InputText(\"1.0\", key=\"-scale-\", font=STD_FONT, size=(4,1)),\r\n sg.Button(\"?\", key=\"-infoScale-\", size=(2,1), button_color=(\"white\", INFO_BUTTON_COLOR))],\r\n # Row 6\r\n [sg.Text(\"_\" * PROGRAM_WIDTH * 2)],\r\n # Row 7\r\n [sg.Checkbox(\"Use adaptive threshold instead of simple\", key=\"-adaptiveThreshold-\", font=STD_FONT),\r\n sg.Button(\"?\", key=\"-infoAdaptiveThreshold-\", size=(2,1), button_color=(\"white\", INFO_BUTTON_COLOR)),\r\n sg.Text(\"Blocksize:\", font=STD_FONT), sg.InputText(\"11\", key=\"-blockSize-\", font=STD_FONT, size=(3,1)), sg.Button(\"?\", key=\"-infoBlocksize-\", size=(2,1), button_color=(\"white\", INFO_BUTTON_COLOR)),\r\n sg.Text(\"C:\", font=STD_FONT), sg.InputText(\"2\", key=\"-c-\", font=STD_FONT, size=(3,1)), sg.Button(\"?\", key=\"-infoC-\", size=(2,1), button_color=(\"white\", INFO_BUTTON_COLOR))],\r\n # Row 8\r\n [sg.Text(\"Adaptive Threshold Type:\", font=STD_FONT), sg.InputOptionMenu(key=\"-adaptiveThresholdType-\", values=list(THRESHOLD_TYPES.keys())[0:2], default_value=list(THRESHOLD_TYPES.keys())[0]),\r\n sg.Text(\"Adaptive Max Value:\", font=STD_FONT), sg.InputText(\"255\", key=\"-adaptiveThresholdMaxValue-\", font=STD_FONT, size=(4,1)), sg.Button(\"?\", key=\"-infoAdaptiveThresholdMaxValue-\", size=(2,1), button_color=(\"white\", INFO_BUTTON_COLOR))],\r\n # Row 9\r\n [sg.Text(\"Adaptive Contour Approximation Method:\", font=STD_FONT), sg.InputOptionMenu(key=\"-adaptiveThresholdContourApproximationMethod-\", values=list(THRESHOLD_CONTOUR_APPROX_METHODS.keys()), default_value=list(THRESHOLD_CONTOUR_APPROX_METHODS.keys())[0]),\r\n sg.Text(\"Adaptive Threshold Method:\", font=STD_FONT), sg.InputOptionMenu(key=\"-adaptiveThresholdMethod-\", values=list(ADAPTIVE_THRESHOLD_METHODS.keys()), default_value=list(ADAPTIVE_THRESHOLD_METHODS.keys())[0])],\r\n # Row 10\r\n [sg.Text(\"_\" * PROGRAM_WIDTH * 2)],\r\n # Row 11\r\n [sg.Button(\"Draw\", key=\"-draw-\", font=STD_FONT, size=(10,1)),\r\n sg.Button(\"Preview (Image)\", key=\"-previewImage-\", font=STD_FONT, size=(21,1)),\r\n sg.Button(\"Preview (Threshold)\", key=\"-previewThreshold-\", font=STD_FONT, size=(21,1)),\r\n sg.Button(\"Preview (Contours)\", key=\"-previewContours-\", font=STD_FONT, size=(21,1))],\r\n # Row 12\r\n [sg.Text(\"\", key=\"-error-\", font=STD_FONT, size=(PROGRAM_WIDTH,1), text_color=\"red\", pad=((0, 0),(15,0)))],\r\n [sg.Text(\"_\" * PROGRAM_WIDTH * 2)]]\r\n\r\n# Loop and Window\r\nwindow = sg.Window(TITLE, layout)\r\nwhile True:\r\n event, values = window.read()\r\n\r\n # Quit\r\n if event is None:\r\n exit()\r\n\r\n # Info\r\n elif event[:5] == \"-info\":\r\n event = event[1:]; event = event[:-1]\r\n sg.popup(INFO_DESCRIPTION[event], title=INFO_WINDOW_NAME[event])\r\n\r\n # Draw or Preview\r\n elif event == \"-draw-\" or event[:8] == \"-preview\":\r\n imagePath = values[\"-imagePath-\"]\r\n hasErrors, error = functions.checkErrors(values, imagePath)\r\n\r\n if not hasErrors:\r\n if event == \"-draw-\":\r\n preview = False\r\n else:\r\n preview = True\r\n previewType = event.replace(\"-\", \"\")\r\n previewType = previewType.replace(\"preview\", \"\")\r\n\r\n delay = float(values[\"-delay-\"])\r\n scale = float(values[\"-scale-\"])\r\n \r\n # Simple Threshold \r\n simpleThreshold = int(values[\"-simpleThreshold-\"]) \r\n simpleThresholdMaxValue = int(values[\"-simpleThresholdMaxValue-\"])\r\n simpleThresholdType = values[\"-simpleThresholdType-\"]\r\n simpleThresholdContourApproximationMethod = values[\"-simpleThresholdContourApproximationMethod-\"]\r\n\r\n # Adaptive Threshold\r\n usesAdaptiveThreshold = bool(values[\"-adaptiveThreshold-\"])\r\n if usesAdaptiveThreshold:\r\n blockSize = int(values[\"-blockSize-\"])\r\n c = int(values[\"-c-\"])\r\n adaptiveThresholdMaxValue = int(values[\"-adaptiveThresholdMaxValue-\"])\r\n adaptiveThresholdType = values[\"-adaptiveThresholdType-\"]\r\n adaptiveThresholdMethod = values[\"-adaptiveThresholdMethod-\"]\r\n adaptiveThresholdContourApproximationMethod = values[\"-adaptiveThresholdContourApproximationMethod-\"]\r\n\r\n # Minimize\r\n if not preview:\r\n window[\"-error-\"].update(\"Running...\")\r\n window.minimize()\r\n else:\r\n window[\"-error-\"].update(\"Getting image...\")\r\n\r\n window.refresh()\r\n exec(open(\"drawer.py\").read())\r\n window[\"-error-\"].update(\"\")\r\n else:\r\n window[\"-error-\"].update(error)\r\n", "id": "1607122", "language": "Python", "matching_score": 4.484452247619629, "max_stars_count": 5, "path": "main.py" }, { "content": "#====-------------------------------------------------====#\r\n# Drawer.\r\n# This file is responsible for generating the contours\r\n# and actually moving the mouse along their points.\r\n#====-------------------------------------------------====#\r\n\r\nimport main\r\nimport functions\r\n\r\nimport time\r\nimport winsound\r\nimport cv2 as cv\r\nimport numpy as np\r\nfrom urllib.request import urlopen\r\nimport keyboard\r\nfrom pynput.mouse import Button, Controller\r\nimport PySimpleGUI as sg\r\n\r\ndelay = main.delay\r\nscale = main.scale\r\n\r\nmouse = Controller()\r\nimage = functions.getImage(main.imagePath)\r\nimageGray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\r\nimageThresholded = []\r\ncontours = []\r\n\r\n# Getting contours\r\nif main.usesAdaptiveThreshold:\r\n contours, imageThresholded = functions.generateAdaptiveContours(imageGray, main.adaptiveThresholdMaxValue, main.adaptiveThresholdMethod,\r\n main.adaptiveThresholdType, main.blockSize, main.c, main.adaptiveThresholdContourApproximationMethod) \r\nelse:\r\n contours, imageThresholded = functions.generateSimpleContours(imageGray, main.simpleThreshold, main.simpleThresholdMaxValue,\r\n main.simpleThresholdType, main.simpleThresholdContourApproximationMethod) \r\n\r\n# Draw\r\nif not main.preview:\r\n # Startup\r\n time.sleep(main.startupTime)\r\n main.window.minimize()\r\n\r\n # InitX and InitY are the top-left corner of the image\r\n initX = mouse.position[0]\r\n initY = mouse.position[1]\r\n isDrawing = True\r\n\r\n for contour in contours:\r\n if not isDrawing:\r\n break\r\n\r\n mouse.release(Button.left)\r\n time.sleep(delay)\r\n\r\n for index, point in enumerate(contour):\r\n # Break\r\n if keyboard.is_pressed(\"esc\"):\r\n mouse.release(Button.left)\r\n isDrawing = False\r\n break\r\n \r\n # Next point\r\n mouse.position = (initX + (point[0][0] * scale), initY + (point[0][1] * scale))\r\n time.sleep(delay)\r\n \r\n # New contour\r\n if(index == 1):\r\n mouse.press(Button.left)\r\n time.sleep(delay)\r\n\r\n mouse.release(Button.left)\r\n winsound.Beep(440, 1000)\r\n\r\nelse:\r\n # Preview\r\n if main.previewType == \"Image\":\r\n cv.drawContours(image, contours, -1, (0,255,0), 2)\r\n cv.imshow(\"Image Preview\", image) # Shows image + contours\r\n\r\n elif main.previewType == \"Threshold\": \r\n cv.drawContours(imageThresholded, contours, -1, (0,255,0), 2)\r\n cv.imshow(\"Threshold Preview\", imageThresholded) # Shows thresholded image\r\n\r\n elif main.previewType == \"Contours\":\r\n blackimg = np.zeros(image.shape)\r\n\r\n cv.drawContours(blackimg, contours, -1, (0,255,0), 2)\r\n cv.imshow(\"Contours Preview\", blackimg) # Shows only contours\r\n\r\n cv.waitKey(0)", "id": "3104961", "language": "Python", "matching_score": 1.6786967515945435, "max_stars_count": 5, "path": "drawer.py" } ]
3.724683
davidxia
[ { "content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport dask.dataframe as dd\nimport numpy as np\n\nfrom nvtabular.dispatch import DataFrameType, annotate\n\nfrom .moments import _custom_moments\nfrom .operator import ColumnSelector, Operator\nfrom .stat_operator import StatOperator\n\n\nclass DataStats(StatOperator):\n def __init__(self):\n super().__init__()\n self.col_names = []\n self.col_types = []\n self.col_dtypes = []\n self.output = {}\n\n def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:\n return df\n\n @annotate(\"DataStats_fit\", color=\"green\", domain=\"nvt_python\")\n def fit(self, col_selector: ColumnSelector, ddf: dd.DataFrame):\n dask_stats = {}\n\n ddf_dtypes = ddf.head(1)\n\n # For each column, calculate the stats\n for col in col_selector.names:\n dask_stats[col] = {}\n self.col_names.append(col)\n # Get dtype for all\n dtype = ddf_dtypes[col].dtype\n self.col_dtypes.append(dtype)\n\n # Identify column type\n if np.issubdtype(dtype, np.floating):\n col_type = \"conts\"\n else:\n col_type = \"cats\"\n self.col_types.append(col_type)\n\n # Get cardinality for cats\n if col_type == \"cats\":\n dask_stats[col][\"cardinality\"] = ddf[col].nunique()\n\n # if string, replace string for their lengths for the rest of the computations\n if dtype == \"object\":\n ddf[col] = ddf[col].map_partitions(lambda x: x.str.len(), meta=(\"x\", int))\n # Add list support when cudf supports it:\n # https://github.com/rapidsai/cudf/issues/7157\n # elif col_type == \"cat_mh\":\n # ddf[col] = ddf[col].map_partitions(lambda x: x.list.len())\n\n # Get min,max, and mean\n dask_stats[col][\"min\"] = ddf[col].min()\n dask_stats[col][\"max\"] = ddf[col].max()\n\n # Get Percentage of NaNs for all\n dask_stats[col][\"per_nan\"] = 100 * (1 - ddf[col].count() / len(ddf[col]))\n\n return dask_stats, _custom_moments(ddf[col_selector.names])\n\n def fit_finalize(self, stats):\n dask_stats, moments = stats\n\n # merge in mean/std from the custom_moments code\n for col in moments.index:\n dask_stats[col][\"mean\"] = moments[\"mean\"].loc[col].item()\n dask_stats[col][\"std\"] = moments[\"std\"].loc[col].item()\n\n for i, col in enumerate(self.col_names):\n # Add dtype\n dask_stats[col][\"dtype\"] = str(self.col_dtypes[i])\n # Cast types for yaml\n if isinstance(dask_stats[col][\"per_nan\"], np.floating):\n dask_stats[col][\"per_nan\"] = dask_stats[col][\"per_nan\"].item()\n if self.col_types[i] == \"conts\":\n if isinstance(dask_stats[col][\"std\"], np.floating):\n dask_stats[col][\"std\"] = dask_stats[col][\"std\"].item()\n else:\n if isinstance(dask_stats[col][\"cardinality\"], np.integer):\n dask_stats[col][\"cardinality\"] = dask_stats[col][\"cardinality\"].item()\n self.output = dask_stats\n\n def clear(self):\n self.output = {}\n\n transform.__doc__ = Operator.transform.__doc__\n fit.__doc__ = StatOperator.fit.__doc__\n fit_finalize.__doc__ = StatOperator.fit_finalize.__doc__\n", "id": "2514417", "language": "Python", "matching_score": 2.789377450942993, "max_stars_count": 543, "path": "nvtabular/ops/data_stats.py" }, { "content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom typing import Any\n\nimport dask.dataframe as dd\nimport pandas as pd\n\nfrom nvtabular.dispatch import DataFrameType, _is_list_dtype, _pull_apart_list\n\nfrom .operator import ColumnSelector\nfrom .stat_operator import StatOperator\n\n\nclass ValueCount(StatOperator):\n \"\"\"\n The operator calculates the min and max lengths of multihot columns.\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.stats = {}\n\n def fit(self, col_selector: ColumnSelector, ddf: dd.DataFrame) -> Any:\n stats = {}\n for col in col_selector.names:\n series = ddf[col]\n if _is_list_dtype(series.compute()):\n stats[col] = stats[col] if col in stats else {}\n stats[col][\"value_count\"] = (\n {} if \"value_count\" not in stats[col] else stats[col][\"value_count\"]\n )\n offs = _pull_apart_list(series.compute())[1]\n lh, rh = offs[1:], offs[:-1]\n if isinstance(offs, pd.Series):\n rh = rh.reset_index(drop=True)\n deltas = lh - rh\n stats[col][\"value_count\"][\"min\"] = deltas.min()\n stats[col][\"value_count\"][\"max\"] = deltas.max()\n return stats\n\n def fit_finalize(self, dask_stats):\n self.stats = dask_stats\n\n def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:\n return df\n\n def output_properties(self):\n return self.stats\n", "id": "8243020", "language": "Python", "matching_score": 3.3425486087799072, "max_stars_count": 0, "path": "nvtabular/ops/value_counts.py" }, { "content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom typing import Any\n\nimport dask.dataframe as dd\n\nfrom .operator import ColumnSelector, Operator\n\n\nclass StatOperator(Operator):\n \"\"\"\n Base class for statistical operator classes. This adds a 'fit' and 'finalize' method\n on top of the Operator class.\n \"\"\"\n\n def fit(self, col_selector: ColumnSelector, ddf: dd.DataFrame) -> Any:\n \"\"\"Calculate statistics for this operator, and return a dask future\n to these statistics, which will be computed by the workflow.\"\"\"\n\n raise NotImplementedError(\n \"\"\"The dask operations needed to return a dictionary of uncomputed statistics.\"\"\"\n )\n\n def fit_finalize(self, dask_stats):\n \"\"\"Finalize statistics calculation - the workflow calls this function with\n the computed statistics from the 'fit' object'\"\"\"\n\n raise NotImplementedError(\n \"\"\"Follow-up operations to convert dask statistics in to member variables\"\"\"\n )\n\n def clear(self):\n \"\"\"zero and reinitialize all relevant statistical properties\"\"\"\n raise NotImplementedError(\"clear isn't implemented for this op!\")\n\n def set_storage_path(self, new_path, copy=False):\n \"\"\"Certain stat operators need external storage - for instance Categorify writes out\n parquet files containing the categorical mapping. When we save the operator, we\n also want to save these files as part of the bundle. Implementing this method\n lets statoperators bundle their dependant files into the new path that we're writing\n out (note that this could happen after the operator is created)\n \"\"\"\n", "id": "1808521", "language": "Python", "matching_score": 0.31670942902565, "max_stars_count": 0, "path": "nvtabular/ops/stat_operator.py" }, { "content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom pathlib import Path\n\nimport numpy\nimport pytest\n\nfrom nvtabular.columns.schema import ColumnSchema, Schema\nfrom nvtabular.columns.selector import ColumnSelector\nfrom nvtabular.tags import Tags\n\n\n@pytest.mark.parametrize(\"d_types\", [numpy.float32, numpy.float64, numpy.uint32, numpy.uint64])\ndef test_dtype_column_schema(d_types):\n column = ColumnSchema(\"name\", tags=[], properties=[], dtype=d_types)\n assert column.dtype == d_types\n\n\ndef test_column_schema_meta():\n column = ColumnSchema(\"name\", tags=[\"tag-1\"], properties={\"p1\": \"prop-1\"})\n\n assert column.name == \"name\"\n assert column.tags[0] == \"tag-1\"\n assert column.with_name(\"a\").name == \"a\"\n assert set(column.with_tags(\"tag-2\").tags) == set([\"tag-1\", \"tag-2\"])\n assert column.with_properties({\"p2\": \"prop-2\"}).properties == {\"p1\": \"prop-1\", \"p2\": \"prop-2\"}\n assert column.with_tags(\"tag-2\").properties == {\"p1\": \"prop-1\"}\n assert set(column.with_properties({\"p2\": \"prop-2\"}).tags) == set([\"tag-1\"])\n\n assert column == ColumnSchema(\"name\", tags=[\"tag-1\"], properties={\"p1\": \"prop-1\"})\n # should not be the same no properties\n assert column != ColumnSchema(\"name\", tags=[\"tag-1\"])\n # should not be the same no tags\n assert column != ColumnSchema(\"name\", properties={\"p1\": \"prop-1\"})\n\n\n@pytest.mark.parametrize(\"props1\", [{}, {\"p1\": \"p1\", \"p2\": \"p2\"}])\n@pytest.mark.parametrize(\"props2\", [{}, {\"p3\": \"p3\", \"p4\": \"p4\"}])\n@pytest.mark.parametrize(\"tags1\", [[], [\"a\", \"b\", \"c\"]])\n@pytest.mark.parametrize(\"tags2\", [[], [\"c\", \"d\", \"e\"]])\n@pytest.mark.parametrize(\"d_type\", [numpy.float, numpy.int])\n@pytest.mark.parametrize(\"list_type\", [True, False])\ndef test_column_schema_set_protobuf(tmpdir, props1, props2, tags1, tags2, d_type, list_type):\n # create a schema\n schema1 = ColumnSchema(\"col1\", tags=tags1, properties=props1, dtype=d_type, _is_list=list_type)\n schema2 = ColumnSchema(\"col2\", tags=tags2, properties=props2, dtype=d_type, _is_list=list_type)\n column_schema_set = Schema([schema1, schema2])\n # write schema out\n schema_path = Path(tmpdir)\n column_schema_set = column_schema_set.save_protobuf(schema_path)\n # read schema back in\n target = Schema.load_protobuf(schema_path)\n # compare read to origin\n assert column_schema_set == target\n\n\ndef test_column_schema_protobuf_domain_check(tmpdir):\n # create a schema\n schema1 = ColumnSchema(\n \"col1\",\n tags=[],\n properties={\"domain\": {\"min\": 0, \"max\": 10}},\n dtype=numpy.int,\n _is_list=False,\n )\n schema2 = ColumnSchema(\n \"col2\",\n tags=[],\n properties={\"domain\": {\"min\": 0.0, \"max\": 10.0}},\n dtype=numpy.float,\n _is_list=False,\n )\n column_schema_set = Schema([schema1, schema2])\n # write schema out\n schema_path = Path(tmpdir)\n saved_schema = column_schema_set.save_protobuf(schema_path)\n # read schema back in\n loaded_schema = Schema.load_protobuf(schema_path)\n # compare read to origin\n assert saved_schema == loaded_schema\n\n # load in protobuf file to tensorflow schema representation\n proto_schema = Schema.read_protobuf(schema_path / \"schema.pbtxt\")\n\n assert \"\"\"name: \"col1\"\\n min: 0\\n max: 10\\n\"\"\" in str(proto_schema)\n assert \"\"\"name: \"col2\"\\n min: 0.0\\n max: 10.0\\n\"\"\" in str(proto_schema)\n\n\ndef test_column_schema_tags_normalize():\n schema1 = ColumnSchema(\"col1\", tags=[\"categorical\", \"continuous\", \"item_id\"])\n assert schema1.tags == [Tags.CATEGORICAL, Tags.CONTINUOUS, Tags.ITEM_ID]\n\n\ndef test_dataset_schema_constructor():\n schema1 = ColumnSchema(\"col1\", tags=[\"a\", \"b\", \"c\"])\n schema2 = ColumnSchema(\"col2\", tags=[\"c\", \"d\", \"e\"])\n\n expected = {schema1.name: schema1, schema2.name: schema2}\n\n ds_schema_dict = Schema(expected)\n ds_schema_list = Schema([schema1, schema2])\n\n assert ds_schema_dict.column_schemas == expected\n assert ds_schema_list.column_schemas == expected\n\n with pytest.raises(TypeError) as exception_info:\n Schema(12345)\n\n assert \"column_schemas\" in str(exception_info.value)\n\n\ndef test_dataset_schema_select_by_tag():\n schema1 = ColumnSchema(\"col1\", tags=[\"a\", \"b\", \"c\"])\n schema2 = ColumnSchema(\"col2\", tags=[\"b\", \"c\", \"d\"])\n\n ds_schema = Schema([schema1, schema2])\n\n selected_schema1 = ds_schema.select_by_tag(\"a\")\n selected_schema2 = ds_schema.select_by_tag(\"d\")\n\n assert selected_schema1.column_schemas == {\"col1\": schema1}\n assert selected_schema2.column_schemas == {\"col2\": schema2}\n\n selected_schema_both = ds_schema.select_by_tag(\"c\")\n selected_schema_neither = ds_schema.select_by_tag(\"e\")\n selected_schema_multi = ds_schema.select_by_tag([\"b\", \"c\"])\n\n assert selected_schema_both.column_schemas == {\"col1\": schema1, \"col2\": schema2}\n assert selected_schema_neither.column_schemas == {}\n assert selected_schema_multi.column_schemas == {\"col1\": schema1, \"col2\": schema2}\n\n\ndef test_dataset_schema_select_by_name():\n schema1 = ColumnSchema(\"col1\", tags=[\"a\", \"b\", \"c\"])\n schema2 = ColumnSchema(\"col2\", tags=[\"b\", \"c\", \"d\"])\n\n ds_schema = Schema([schema1, schema2])\n\n selected_schema1 = ds_schema.select_by_name(\"col1\")\n selected_schema2 = ds_schema.select_by_name(\"col2\")\n\n assert selected_schema1.column_schemas == {\"col1\": schema1}\n assert selected_schema2.column_schemas == {\"col2\": schema2}\n\n selected_schema_multi = ds_schema.select_by_name([\"col1\", \"col2\"])\n\n assert selected_schema_multi.column_schemas == {\"col1\": schema1, \"col2\": schema2}\n\n with pytest.raises(KeyError) as exception_info:\n ds_schema.select_by_name(\"col3\")\n\n assert \"col3\" in str(exception_info.value)\n\n\ndef test_dataset_schemas_can_be_added():\n ds1_schema = Schema([ColumnSchema(\"col1\"), ColumnSchema(\"col2\")])\n ds2_schema = Schema([ColumnSchema(\"col3\"), ColumnSchema(\"col4\")])\n\n result = ds1_schema + ds2_schema\n\n expected = Schema(\n [ColumnSchema(\"col1\"), ColumnSchema(\"col2\"), ColumnSchema(\"col3\"), ColumnSchema(\"col4\")]\n )\n\n assert result == expected\n\n\ndef test_schema_can_be_added_to_none():\n schema_set = Schema([\"a\", \"b\", \"c\"])\n\n assert (schema_set + None) == schema_set\n assert (None + schema_set) == schema_set\n\n\ndef test_construct_schema_with_column_names():\n schema = Schema([\"x\", \"y\", \"z\"])\n expected = Schema([ColumnSchema(\"x\"), ColumnSchema(\"y\"), ColumnSchema(\"z\")])\n\n assert schema == expected\n\n\ndef test_dataset_schema_column_names():\n ds_schema = Schema([\"x\", \"y\", \"z\"])\n\n assert ds_schema.column_names == [\"x\", \"y\", \"z\"]\n\n\ndef test_applying_selector_to_schema_selects_by_name():\n schema = Schema([\"a\", \"b\", \"c\", \"d\", \"e\"])\n selector = ColumnSelector([\"a\", \"b\"])\n result = schema.apply(selector)\n\n assert result == Schema([\"a\", \"b\"])\n\n selector = None\n result = schema.apply(selector)\n\n assert result == schema\n\n\ndef test_applying_selector_to_schema_selects_by_tags():\n schema1 = ColumnSchema(\"col1\", tags=[\"a\", \"b\", \"c\"])\n schema2 = ColumnSchema(\"col2\", tags=[\"b\", \"c\", \"d\"])\n\n schema = Schema([schema1, schema2])\n selector = ColumnSelector(tags=[\"a\", \"b\"])\n result = schema.apply(selector)\n\n assert result.column_names == schema.column_names\n\n\ndef test_applying_selector_to_schema_selects_by_name_or_tags():\n schema1 = ColumnSchema(\"col1\")\n schema2 = ColumnSchema(\"col2\", tags=[\"b\", \"c\", \"d\"])\n\n schema = Schema([schema1, schema2])\n selector = ColumnSelector([\"col1\"], tags=[\"a\", \"b\"])\n result = schema.apply(selector)\n\n assert result.column_names == schema.column_names\n\n\ndef test_applying_inverse_selector_to_schema_selects_relevant_columns():\n schema = Schema([\"a\", \"b\", \"c\", \"d\", \"e\"])\n selector = ColumnSelector([\"a\", \"b\"])\n result = schema.apply_inverse(selector)\n\n assert result == Schema([\"c\", \"d\", \"e\"])\n\n selector = None\n result = schema.apply_inverse(selector)\n\n assert result == schema\n", "id": "8112612", "language": "Python", "matching_score": 2.9901678562164307, "max_stars_count": 543, "path": "tests/unit/columns/test_column_schemas.py" }, { "content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport os\nfrom dataclasses import dataclass, field\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Text\n\nimport numpy\n\n# this needs to be before any modules that import protobuf\n\nos.environ[\"PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION\"] = \"python\"\nfrom google.protobuf import json_format, text_format # noqa\nfrom google.protobuf.any_pb2 import Any # noqa\nfrom google.protobuf.struct_pb2 import Struct # noqa\nfrom tensorflow_metadata.proto.v0 import schema_pb2 # noqa\n\nfrom nvtabular.tags import Tags # noqa\n\n\ndef register_extra_metadata(column_schema, feature):\n filtered_properties = {k: v for k, v in column_schema.properties.items() if k != \"domain\"}\n msg_struct = Struct()\n # must pack message into \"Any\" type\n any_pack = Any()\n any_pack.Pack(json_format.ParseDict(filtered_properties, msg_struct))\n # extra_metadata only takes type \"Any\" messages\n feature.annotation.extra_metadata.add().CopyFrom(any_pack)\n return feature\n\n\ndef register_list(column_schema, feature):\n if str(column_schema._is_list):\n min_length, max_length = None, None\n if \"value_count\" in column_schema.properties:\n min_length = column_schema.properties[\"value_count\"][\"min\"]\n max_length = column_schema.properties[\"value_count\"][\"max\"]\n if min_length and max_length and min_length == max_length:\n shape = schema_pb2.FixedShape()\n dim = shape.dim.add()\n dim.size = min_length\n feature.shape.CopyFrom(shape)\n elif min_length and max_length and min_length < max_length:\n feature.value_count.CopyFrom(schema_pb2.ValueCount(min=min_length, max=max_length))\n else:\n # if no min max available set dummy value, to signal this is list\n feature.value_count.CopyFrom(schema_pb2.ValueCount(min=0, max=0))\n return feature\n\n\ndef set_protobuf_float(column_schema, feature):\n domain = column_schema.properties.get(\"domain\", {})\n feature.float_domain.CopyFrom(\n schema_pb2.FloatDomain(\n name=column_schema.name,\n min=domain.get(\"min\", None),\n max=domain.get(\"max\", None),\n )\n )\n feature.type = schema_pb2.FeatureType.FLOAT\n return feature\n\n\ndef set_protobuf_int(column_schema, feature):\n domain = column_schema.properties.get(\"domain\", {})\n feature.int_domain.CopyFrom(\n schema_pb2.IntDomain(\n name=column_schema.name,\n min=domain.get(\"min\", None),\n max=domain.get(\"max\", None),\n is_categorical=(\n Tags.CATEGORICAL in column_schema.tags\n or Tags.CATEGORICAL.value in column_schema.tags\n ),\n )\n )\n feature.type = schema_pb2.FeatureType.INT\n return feature\n\n\ndef register_dtype(column_schema, feature):\n # column_schema is a dict, changes are held\n # TODO: this double check can be refactored\n if column_schema.dtype:\n if column_schema._is_list:\n feature = proto_dict[\"list\"](column_schema, feature)\n if hasattr(column_schema.dtype, \"kind\"):\n string_name = numpy.core._dtype._kind_name(column_schema.dtype)\n elif hasattr(column_schema.dtype, \"item\"):\n string_name = type(column_schema.dtype(1).item()).__name__\n elif isinstance(column_schema.dtype, str):\n string_name = column_schema.dtype\n elif hasattr(column_schema.dtype, \"__name__\"):\n string_name = column_schema.dtype.__name__\n else:\n raise TypeError(f\"unsupported dtype for column schema: {column_schema.dtype}\")\n\n if string_name in proto_dict:\n feature = proto_dict[string_name](column_schema, feature)\n return feature\n\n\nproto_dict = {\n \"list\": register_list,\n \"float\": set_protobuf_float,\n \"int\": set_protobuf_int,\n \"uint\": set_protobuf_int,\n}\n\n\ndef create_protobuf_feature(column_schema):\n feature = schema_pb2.Feature()\n feature.name = column_schema.name\n feature = register_dtype(column_schema, feature)\n annotation = feature.annotation\n annotation.tag.extend(\n [tag.value if hasattr(tag, \"value\") else tag for tag in column_schema.tags]\n )\n # can be instantiated with no values\n # if so, unnecessary to dump\n # import pdb; pdb.set_trace()\n if len(column_schema.properties) > 0:\n feature = register_extra_metadata(column_schema, feature)\n return feature\n\n\n@dataclass(frozen=True)\nclass ColumnSchema:\n \"\"\"A schema containing metadata of a dataframe column.\"\"\"\n\n name: Text\n tags: Optional[List[Text]] = field(default_factory=list)\n properties: Optional[Dict[str, any]] = field(default_factory=dict)\n dtype: Optional[object] = None\n _is_list: bool = False\n\n def __post_init__(self):\n tags = _normalize_tags(self.tags or [])\n object.__setattr__(self, \"tags\", tags)\n\n def __str__(self) -> str:\n return self.name\n\n def with_name(self, name) -> \"ColumnSchema\":\n return ColumnSchema(\n name,\n tags=self.tags,\n properties=self.properties,\n dtype=self.dtype,\n _is_list=self._is_list,\n )\n\n def with_tags(self, tags) -> \"ColumnSchema\":\n if not isinstance(tags, list):\n tags = [tags]\n\n tags = list(set(list(self.tags) + tags))\n\n return ColumnSchema(\n self.name,\n tags=tags,\n properties=self.properties,\n dtype=self.dtype,\n _is_list=self._is_list,\n )\n\n def with_properties(self, properties):\n if not isinstance(properties, dict):\n raise TypeError(\"properties must be in dict format, key: value\")\n\n # Using new dictionary to avoid passing old ref to new schema\n properties.update(self.properties)\n\n return ColumnSchema(\n self.name,\n tags=self.tags,\n properties=properties,\n dtype=self.dtype,\n _is_list=self._is_list,\n )\n\n def with_dtype(self, dtype, is_list=None):\n is_list = is_list or self._is_list\n return ColumnSchema(\n self.name, tags=self.tags, properties=self.properties, dtype=dtype, _is_list=is_list\n )\n\n\nclass Schema:\n \"\"\"A collection of column schemas for a dataset.\"\"\"\n\n def __init__(self, column_schemas=None):\n column_schemas = column_schemas or {}\n\n if isinstance(column_schemas, dict):\n self.column_schemas = column_schemas\n elif isinstance(column_schemas, list):\n self.column_schemas = {}\n for column_schema in column_schemas:\n if isinstance(column_schema, str):\n column_schema = ColumnSchema(column_schema)\n self.column_schemas[column_schema.name] = column_schema\n else:\n raise TypeError(\"The `column_schemas` parameter must be a list or dict.\")\n\n @property\n def column_names(self):\n return list(self.column_schemas.keys())\n\n def apply(self, selector):\n if selector:\n schema = Schema()\n if selector.names:\n schema += self.select_by_name(selector.names)\n if selector.tags:\n schema += self.select_by_tag(selector.tags)\n return schema\n return self\n\n def apply_inverse(self, selector):\n if selector:\n return self - self.select_by_name(selector.names)\n return self\n\n def select_by_tag(self, tags):\n if not isinstance(tags, list):\n tags = [tags]\n\n selected_schemas = {}\n\n for _, column_schema in self.column_schemas.items():\n if any(x in column_schema.tags for x in tags):\n selected_schemas[column_schema.name] = column_schema\n\n return Schema(selected_schemas)\n\n def select_by_name(self, names):\n if isinstance(names, str):\n names = [names]\n\n selected_schemas = {key: self.column_schemas[key] for key in names}\n return Schema(selected_schemas)\n\n @staticmethod\n def read_protobuf(schema_path):\n with open(schema_path, \"r\") as f:\n schema = schema_pb2.Schema()\n text_format.Parse(f.read(), schema)\n\n return schema\n\n @classmethod\n def load_protobuf(cls, schema_path) -> \"Schema\":\n columns = []\n if isinstance(schema_path, (str, Path)):\n if isinstance(schema_path, str):\n schema_path = Path(schema_path)\n if schema_path.is_dir():\n schema_path = schema_path / \"schema.pbtxt\"\n schema = cls.read_protobuf(schema_path)\n\n for feat in schema.feature:\n _is_list = False\n dtype = None\n properties = {}\n tags = list(feat.annotation.tag) or []\n # only one item should ever be in extra_metadata\n if len(feat.annotation.extra_metadata) > 1:\n raise ValueError(\n f\"{feat.name}: extra_metadata should have 1 item, has \\\n {len(feat.annotation.extra_metadata)}\"\n )\n if feat.annotation.extra_metadata:\n properties = json_format.MessageToDict(feat.annotation.extra_metadata[0])[\"value\"]\n # what domain\n # load the domain values\n shape_name = feat.WhichOneof(\"shape_type\")\n if shape_name:\n _is_list = True\n field_name = feat.WhichOneof(\"domain_info\")\n if field_name:\n domain_values = getattr(feat, field_name)\n # if zero no values were passed\n if domain_values.max > 0:\n properties[\"domain\"] = {\"min\": domain_values.min, \"max\": domain_values.max}\n if feat.type:\n if feat.type == 2:\n dtype = numpy.int\n elif feat.type == 3:\n dtype = numpy.float\n columns.append(\n ColumnSchema(\n feat.name, tags=tags, properties=properties, dtype=dtype, _is_list=_is_list\n )\n )\n\n return Schema(columns)\n\n def save_protobuf(self, schema_path):\n schema_path = Path(schema_path)\n if not schema_path.is_dir():\n raise ValueError(f\"The path provided is not a valid directory: {schema_path}\")\n\n # traverse list of column schema\n schema = schema_pb2.Schema()\n features = []\n for col_name, col_schema in self.column_schemas.items():\n features.append(create_protobuf_feature(col_schema))\n schema.feature.extend(features)\n\n with open(schema_path / \"schema.pbtxt\", \"w\") as f:\n f.write(text_format.MessageToString(schema))\n return self\n\n def __iter__(self):\n return iter(self.column_schemas.values())\n\n def __len__(self):\n return len(self.column_schemas)\n\n def __repr__(self):\n return str([col_schema.__dict__ for col_schema in self.column_schemas.values()])\n\n def __eq__(self, other):\n if not isinstance(other, Schema) or len(self.column_schemas) != len(other.column_schemas):\n return False\n return self.column_schemas == other.column_schemas\n\n def __add__(self, other):\n if other is None:\n return self\n if not isinstance(other, Schema):\n raise TypeError(f\"unsupported operand type(s) for +: 'Schema' and {type(other)}\")\n\n return Schema({**self.column_schemas, **other.column_schemas})\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def __sub__(self, other):\n if other is None:\n return self\n\n if not isinstance(other, Schema):\n raise TypeError(f\"unsupported operand type(s) for -: 'Schema' and {type(other)}\")\n\n result = Schema({**self.column_schemas})\n\n for key in other.column_schemas.keys():\n if key in self.column_schemas.keys():\n result.column_schemas.pop(key, None)\n\n return result\n\n\ndef _normalize_tags(tags):\n return [Tags[tag.upper()] if tag in Tags._value2member_map_ else tag for tag in tags]\n", "id": "596601", "language": "Python", "matching_score": 1.611548662185669, "max_stars_count": 543, "path": "nvtabular/columns/schema.py" }, { "content": "import contextlib\nimport os\nimport signal\nimport subprocess\nimport time\nfrom distutils.spawn import find_executable\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport nvtabular as nvt\nimport nvtabular.ops as ops\nfrom nvtabular import ColumnSelector, Dataset\nfrom nvtabular.dispatch import HAS_GPU, _hash_series, _make_df\nfrom nvtabular.ops.operator import Supports\nfrom tests.conftest import assert_eq\n\ntriton = pytest.importorskip(\"nvtabular.inference.triton\")\ndata_conversions = pytest.importorskip(\"nvtabular.inference.triton.data_conversions\")\n\ngrpcclient = pytest.importorskip(\"tritonclient.grpc\")\ntritonclient = pytest.importorskip(\"tritonclient\")\n\nTRITON_SERVER_PATH = find_executable(\"tritonserver\")\n\n\nBACKEND = \"python\"\nif os.path.exists(\"/opt/tritonserver/backends/nvtabular/libtriton_nvtabular.so\"):\n BACKEND = \"nvtabular\"\n\n\n@contextlib.contextmanager\ndef run_triton_server(modelpath):\n cmdline = [\n TRITON_SERVER_PATH,\n \"--model-repository\",\n modelpath,\n \"--backend-config=tensorflow,version=2\",\n ]\n env = os.environ.copy()\n env[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n with subprocess.Popen(cmdline, env=env) as process:\n try:\n with grpcclient.InferenceServerClient(\"localhost:8001\") as client:\n # wait until server is ready\n for _ in range(60):\n if process.poll() is not None:\n retcode = process.returncode\n raise RuntimeError(f\"Tritonserver failed to start (ret={retcode})\")\n\n try:\n ready = client.is_server_ready()\n except tritonclient.utils.InferenceServerException:\n ready = False\n\n if ready:\n yield client\n return\n\n time.sleep(1)\n\n raise RuntimeError(\"Timed out waiting for tritonserver to become ready\")\n finally:\n # signal triton to shutdown\n process.send_signal(signal.SIGINT)\n\n\ndef _verify_workflow_on_tritonserver(\n tmpdir, workflow, df, model_name, output_model=\"tensorflow\", model_info=None\n):\n \"\"\"tests that the nvtabular workflow produces the same results when run locally in the\n process, and when run in tritonserver\"\"\"\n # fit the workflow and test on the input\n dataset = nvt.Dataset(df)\n workflow.fit(dataset)\n\n local_df = workflow.transform(dataset).to_ddf().compute(scheduler=\"synchronous\")\n triton.generate_nvtabular_model(\n workflow=workflow,\n name=model_name,\n output_path=tmpdir + f\"/{model_name}\",\n version=1,\n output_model=output_model,\n output_info=model_info,\n backend=BACKEND,\n )\n\n inputs = triton.convert_df_to_triton_input(df.columns, df)\n outputs = [grpcclient.InferRequestedOutput(col) for col in workflow.output_dtypes.keys()]\n with run_triton_server(tmpdir) as client:\n response = client.infer(model_name, inputs, outputs=outputs)\n\n for col in workflow.output_dtypes.keys():\n features = response.as_numpy(col)\n triton_df = _make_df({col: features.reshape(features.shape[0])})\n assert_eq(triton_df, local_df[[col]])\n\n\n@pytest.mark.skipif(TRITON_SERVER_PATH is None, reason=\"Requires tritonserver on the path\")\ndef test_error_handling(tmpdir):\n df = _make_df({\"x\": np.arange(10), \"y\": np.arange(10)})\n\n def custom_transform(col):\n if len(col) == 2:\n raise ValueError(\"Lets cause some problems\")\n return col\n\n features = [\"x\", \"y\"] >> ops.FillMissing() >> ops.Normalize() >> custom_transform\n workflow = nvt.Workflow(features)\n workflow.fit(nvt.Dataset(df))\n\n model_name = \"test_error_handling\"\n triton.generate_nvtabular_model(\n workflow, model_name, tmpdir + f\"/{model_name}\", backend=BACKEND\n )\n\n with run_triton_server(tmpdir) as client:\n inputs = triton.convert_df_to_triton_input([\"x\", \"y\"], df[:2])\n with pytest.raises(tritonclient.utils.InferenceServerException) as exception_info:\n client.infer(model_name, inputs)\n\n assert \"ValueError: Lets cause some problems\" in str(exception_info.value)\n\n\n@pytest.mark.skipif(TRITON_SERVER_PATH is None, reason=\"Requires tritonserver on the path\")\n@pytest.mark.parametrize(\"output_model\", [\"tensorflow\", \"pytorch\"])\ndef test_tritonserver_inference_string(tmpdir, output_model):\n df = _make_df({\"user\": [\"aaaa\", \"bbbb\", \"cccc\", \"aaaa\", \"bbbb\", \"aaaa\"]})\n features = [\"user\"] >> ops.Categorify()\n workflow = nvt.Workflow(features)\n\n if output_model == \"pytorch\":\n model_info = {\"user\": {\"columns\": [\"user\"], \"dtype\": \"int64\"}}\n else:\n model_info = None\n _verify_workflow_on_tritonserver(\n tmpdir, workflow, df, \"test_inference_string\", output_model, model_info\n )\n\n\n@pytest.mark.skipif(TRITON_SERVER_PATH is None, reason=\"Requires tritonserver on the path\")\n@pytest.mark.parametrize(\"output_model\", [\"tensorflow\", \"pytorch\"])\ndef test_large_strings(tmpdir, output_model):\n strings = [\"a\" * (2 ** exp) for exp in range(1, 17)]\n df = _make_df({\"description\": strings})\n features = [\"description\"] >> ops.Categorify()\n workflow = nvt.Workflow(features)\n\n if output_model == \"pytorch\":\n model_info = {\"description\": {\"columns\": [\"description\"], \"dtype\": \"int64\"}}\n else:\n model_info = None\n _verify_workflow_on_tritonserver(\n tmpdir, workflow, df, \"test_large_string\", output_model, model_info\n )\n\n\n@pytest.mark.skipif(TRITON_SERVER_PATH is None, reason=\"Requires tritonserver on the path\")\n@pytest.mark.parametrize(\"output_model\", [\"tensorflow\", \"pytorch\"])\ndef test_concatenate_dataframe(tmpdir, output_model):\n # we were seeing an issue in the rossmann workflow where we dropped certain columns,\n # https://github.com/NVIDIA/NVTabular/issues/961\n df = _make_df(\n {\n \"cat\": [\"aaaa\", \"bbbb\", \"cccc\", \"aaaa\", \"bbbb\", \"aaaa\"],\n \"cont\": [0.0, 1.0, 2.0, 3.0, 4.0, 5],\n }\n )\n # this bug only happened with a dataframe representation: force this by using a lambda\n cats = [\"cat\"] >> ops.LambdaOp(lambda col: _hash_series(col) % 1000)\n conts = [\"cont\"] >> ops.Normalize() >> ops.FillMissing() >> ops.LogOp()\n\n dataset = Dataset(df)\n workflow = nvt.Workflow(cats + conts).fit_schema(dataset.infer_schema())\n\n if output_model == \"pytorch\":\n model_info = {\n \"cat\": {\"columns\": [\"cat\"], \"dtype\": \"int32\"},\n \"cont\": {\"columns\": [\"cont\"], \"dtype\": \"float32\"},\n }\n else:\n model_info = None\n\n _verify_workflow_on_tritonserver(\n tmpdir, workflow, df, \"test_concatenate_dataframe\", output_model, model_info\n )\n\n\n@pytest.mark.skipif(TRITON_SERVER_PATH is None, reason=\"Requires tritonserver on the path\")\n@pytest.mark.parametrize(\"output_model\", [\"tensorflow\", \"pytorch\"])\ndef test_numeric_dtypes(tmpdir, output_model):\n if output_model == \"pytorch\":\n model_info = dict()\n else:\n model_info = None\n\n dtypes = []\n for width in [8, 16, 32, 64]:\n dtype = f\"int{width}\"\n dtypes.append((dtype, np.iinfo(dtype)))\n if output_model == \"pytorch\":\n model_info[dtype] = {\"columns\": [dtype], \"dtype\": dtype}\n\n dtype = f\"uint{width}\"\n dtypes.append((dtype, np.iinfo(dtype)))\n if output_model == \"pytorch\":\n model_info[dtype] = {\"columns\": [dtype], \"dtype\": dtype}\n\n for width in [32, 64]:\n dtype = f\"float{width}\"\n dtypes.append((dtype, np.finfo(dtype)))\n if output_model == \"pytorch\":\n model_info[dtype] = {\"columns\": [dtype], \"dtype\": dtype}\n\n def check_dtypes(col):\n assert str(col.dtype) == col.name\n return col\n\n # simple transform to make sure we can round-trip the min/max values for each dtype,\n # through triton, with the 'transform' here just checking that the dtypes are correct\n df = _make_df(\n {dtype: np.array([limits.max, 0, limits.min], dtype=dtype) for dtype, limits in dtypes}\n )\n features = nvt.ColumnSelector(df.columns) >> check_dtypes\n workflow = nvt.Workflow(features)\n _verify_workflow_on_tritonserver(\n tmpdir, workflow, df, \"test_numeric_dtypes\", output_model, model_info\n )\n\n\ndef test_generate_triton_multihot(tmpdir):\n df = _make_df(\n {\n \"userId\": [\"a\", \"a\", \"b\"],\n \"movieId\": [\"1\", \"2\", \"2\"],\n \"genres\": [[\"action\", \"adventure\"], [\"action\", \"comedy\"], [\"comedy\"]],\n }\n )\n\n cats = [\"userId\", \"movieId\", \"genres\"] >> nvt.ops.Categorify()\n workflow = nvt.Workflow(cats)\n workflow.fit(nvt.Dataset(df))\n expected = workflow.transform(nvt.Dataset(df)).to_ddf().compute()\n\n # save workflow to triton / verify we see some expected output\n repo = os.path.join(tmpdir, \"models\")\n triton.generate_nvtabular_model(workflow, \"model\", repo)\n workflow = None\n\n assert os.path.exists(os.path.join(repo, \"config.pbtxt\"))\n\n workflow = nvt.Workflow.load(os.path.join(repo, \"1\", \"workflow\"))\n transformed = workflow.transform(nvt.Dataset(df)).to_ddf().compute()\n\n assert_eq(expected, transformed)\n\n\n@pytest.mark.parametrize(\"engine\", [\"parquet\"])\n@pytest.mark.parametrize(\"output_model\", [\"tensorflow\", \"pytorch\"])\ndef test_generate_triton_model(tmpdir, engine, output_model, df):\n tmpdir = \"./tmp\"\n conts = [\"x\", \"y\", \"id\"] >> ops.FillMissing() >> ops.Normalize()\n cats = [\"name-cat\", \"name-string\"] >> ops.Categorify(cat_cache=\"host\")\n workflow = nvt.Workflow(conts + cats)\n workflow.fit(nvt.Dataset(df))\n expected = workflow.transform(nvt.Dataset(df)).to_ddf().compute()\n\n # save workflow to triton / verify we see some expected output\n if output_model == \"pytorch\":\n model_info = {\n \"name-cat\": {\"columns\": [\"name-cat\"], \"dtype\": \"int64\"},\n \"name-string\": {\"columns\": [\"name-string\"], \"dtype\": \"int64\"},\n \"id\": {\"columns\": [\"id\"], \"dtype\": \"float32\"},\n \"x\": {\"columns\": [\"x\"], \"dtype\": \"float32\"},\n \"y\": {\"columns\": [\"y\"], \"dtype\": \"float32\"},\n }\n else:\n model_info = None\n\n repo = os.path.join(tmpdir, \"models\")\n triton.generate_nvtabular_model(\n workflow=workflow,\n name=\"model\",\n output_path=repo,\n version=1,\n output_model=output_model,\n output_info=model_info,\n )\n workflow = None\n\n assert os.path.exists(os.path.join(repo, \"config.pbtxt\"))\n\n workflow = nvt.Workflow.load(os.path.join(repo, \"1\", \"workflow\"))\n transformed = workflow.transform(nvt.Dataset(df)).to_ddf().compute()\n assert_eq(expected, transformed)\n\n\n# lets test the data format conversion function on the full cartesian product\n# of the Support flags\n_SUPPORTS = list(Supports)\nif not HAS_GPU:\n _SUPPORTS = [s for s in _SUPPORTS if \"GPU\" not in str(s)]\n\n\n@pytest.mark.parametrize(\"_from\", _SUPPORTS)\n@pytest.mark.parametrize(\"_to\", _SUPPORTS)\ndef test_convert_format(_from, _to):\n convert_format = data_conversions.convert_format\n\n # we want to test conversion from '_from' to '_to' but this requires us roundtripping\n # from a known format. I'm picking pd -> _from -> _to -> pandas somewhat arbitrarily\n df = pd.DataFrame(\n {\"float\": [0.0, 1.0, 2.0], \"int\": [10, 11, 12], \"multihot\": [[0, 1, 2, 3], [3, 4], [5]]}\n )\n\n if _from != Supports.GPU_DICT_ARRAY and _to != Supports.GPU_DICT_ARRAY:\n df[\"string\"] = [\"aa\", \"bb\", \"cc\"]\n df[\"multihot_string\"] = [[\"aaaa\", \"bb\", \"cc\"], [\"dd\", \"ee\"], [\"fffffff\"]]\n\n start, kind = convert_format(df, Supports.CPU_DATAFRAME, _from)\n assert kind == _from\n mid, kind = convert_format(start, kind, _to)\n assert kind == _to\n final, kind = convert_format(mid, kind, Supports.CPU_DATAFRAME)\n assert kind == Supports.CPU_DATAFRAME\n assert_eq(df, final)\n\n\n@pytest.mark.skipif(TRITON_SERVER_PATH is None, reason=\"Requires tritonserver on the path\")\n@pytest.mark.parametrize(\"output_model\", [\"tensorflow\", \"pytorch\"])\ndef test_groupby_model(tmpdir, output_model):\n size = 20\n df = _make_df(\n {\n \"id\": np.random.choice([0, 1], size=size),\n \"ts\": np.linspace(0.0, 10.0, num=size),\n \"x\": np.arange(size),\n \"y\": np.linspace(0.0, 10.0, num=size),\n }\n )\n\n groupby_features = ColumnSelector([\"id\", \"ts\", \"x\", \"y\"]) >> ops.Groupby(\n groupby_cols=[\"id\"],\n sort_cols=[\"ts\"],\n aggs={\n \"x\": [\"sum\"],\n \"y\": [\"first\"],\n },\n name_sep=\"-\",\n )\n workflow = nvt.Workflow(groupby_features)\n\n if output_model == \"pytorch\":\n model_info = {\n \"x-sum\": {\"columns\": [\"x-sum\"], \"dtype\": \"int64\"},\n \"y-first\": {\"columns\": [\"y-first\"], \"dtype\": \"float64\"},\n \"id\": {\"columns\": [\"id\"], \"dtype\": \"int64\"},\n }\n else:\n model_info = None\n\n _verify_workflow_on_tritonserver(tmpdir, workflow, df, \"groupby\", output_model, model_info)\n", "id": "7024007", "language": "Python", "matching_score": 3.2687442302703857, "max_stars_count": 543, "path": "tests/unit/test_triton_inference.py" }, { "content": "#\r\n# Copyright (c) 2021, NVIDIA CORPORATION.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n#\r\n\r\nimport datetime as dt\r\nimport itertools\r\nimport json\r\nimport os\r\nimport shutil\r\nimport subprocess\r\nimport sys\r\n\r\nimport cudf\r\nimport cupy as cp\r\nimport tritonclient.grpc as grpcclient\r\nfrom tritonclient.utils import np_to_triton_dtype\r\n\r\nimport nvtabular as nvt\r\n\r\n\r\ndef _run_notebook(\r\n tmpdir,\r\n notebook_path,\r\n input_path,\r\n output_path,\r\n batch_size=None,\r\n gpu_id=0,\r\n clean_up=True,\r\n transform=None,\r\n params=None,\r\n main_block=-1,\r\n):\r\n params = params or []\r\n\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = os.environ.get(\"GPU_TARGET_ID\", gpu_id)\r\n\r\n if not os.path.exists(input_path):\r\n os.makedirs(input_path)\r\n if not os.path.exists(output_path):\r\n os.makedirs(output_path)\r\n if batch_size:\r\n os.environ[\"BATCH_SIZE\"] = os.environ.get(\"BATCH_SIZE\", batch_size)\r\n\r\n os.environ[\"INPUT_DATA_DIR\"] = input_path\r\n os.environ[\"OUTPUT_DATA_DIR\"] = output_path\r\n # read in the notebook as JSON, and extract a python script from it\r\n notebook = json.load(open(notebook_path))\r\n source_cells = [cell[\"source\"] for cell in notebook[\"cells\"] if cell[\"cell_type\"] == \"code\"]\r\n\r\n lines = [\r\n transform(line.rstrip()) if transform else line\r\n for line in itertools.chain(*source_cells)\r\n if not (line.startswith(\"%\") or line.startswith(\"!\"))\r\n ]\r\n\r\n # Replace config parms\r\n if params:\r\n\r\n def transform_fracs(line):\r\n line = line.replace(\"device_limit_frac = 0.7\", \"device_limit_frac = \" + str(params[0]))\r\n line = line.replace(\"device_pool_frac = 0.8\", \"device_pool_frac = \" + str(params[1]))\r\n return line.replace(\"part_mem_frac = 0.15\", \"part_mem_frac = \" + str(params[2]))\r\n\r\n lines = [transform_fracs(line) for line in lines]\r\n\r\n # Add guarding block and indentation\r\n if main_block >= 0:\r\n lines.insert(main_block, 'if __name__ == \"__main__\":')\r\n for i in range(main_block + 1, len(lines)):\r\n lines[i] = \" \" + lines[i]\r\n\r\n # save the script to a file, and run with the current python executable\r\n # we're doing this in a subprocess to avoid some issues using 'exec'\r\n # that were causing a segfault with globals of the exec'ed function going\r\n # out of scope\r\n script_path = os.path.join(tmpdir, \"notebook.py\")\r\n with open(script_path, \"w\") as script:\r\n script.write(\"\\n\".join(lines))\r\n output = subprocess.check_output([sys.executable, script_path])\r\n # save location will default to run location\r\n output = output.decode(\"utf-8\")\r\n _, note_name = os.path.split(notebook_path)\r\n note_name = note_name.split(\".\")[0]\r\n if output:\r\n with open(f\"test_res_{note_name}\", \"w+\") as w_file:\r\n w_file.write(output)\r\n # clear out products\r\n if clean_up:\r\n shutil.rmtree(output_path)\r\n return output\r\n\r\n\r\ndef _run_query(\r\n client,\r\n n_rows,\r\n model_name,\r\n workflow_path,\r\n data_path,\r\n actual_output_filename,\r\n output_name,\r\n input_cols_name=None,\r\n backend=\"tensorflow\",\r\n):\r\n\r\n workflow = nvt.Workflow.load(workflow_path)\r\n\r\n if input_cols_name is None:\r\n batch = cudf.read_csv(data_path, nrows=n_rows)[workflow.output_node.input_columns.names]\r\n else:\r\n batch = cudf.read_csv(data_path, nrows=n_rows)[input_cols_name]\r\n\r\n input_dtypes = workflow.input_dtypes\r\n columns = [(col, batch[col]) for col in batch.columns]\r\n\r\n inputs = []\r\n for i, (name, col) in enumerate(columns):\r\n d = col.values_host.astype(input_dtypes[name])\r\n d = d.reshape(len(d), 1)\r\n inputs.append(grpcclient.InferInput(name, d.shape, np_to_triton_dtype(input_dtypes[name])))\r\n inputs[i].set_data_from_numpy(d)\r\n\r\n outputs = [grpcclient.InferRequestedOutput(output_name)]\r\n time_start = dt.datetime.now()\r\n response = client.infer(model_name, inputs, request_id=\"1\", outputs=outputs)\r\n run_time = dt.datetime.now() - time_start\r\n\r\n output_key = \"output\" if backend == \"hugectr\" else \"0\"\r\n\r\n output_actual = cudf.read_csv(os.path.expanduser(actual_output_filename), nrows=n_rows)\r\n output_actual = cp.asnumpy(output_actual[output_key].values)\r\n output_predict = response.as_numpy(output_name)\r\n\r\n if backend == \"tensorflow\":\r\n output_predict = output_predict[:, 0]\r\n\r\n diff = abs(output_actual - output_predict)\r\n return diff, run_time\r\n", "id": "7794434", "language": "Python", "matching_score": 1.2576297521591187, "max_stars_count": 0, "path": "tests/integration/common/utils.py" }, { "content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nExample constants used in tests\n\"\"\"\nimport random\nimport string\nfrom datetime import datetime\nfrom Crypto.PublicKey import RSA\n\n\ndef generate_rsa_key():\n \"\"\"Generate priv,pub key pair for test\"\"\"\n key = RSA.generate(2048)\n private_key = key.export_key()\n public_key = key.publickey().export_key()\n return private_key, public_key\n\ndef get_random_string(length):\n \"\"\"Generate random string of given length\"\"\"\n return ''.join(random.choice(string.ascii_lowercase) for i in range(length))\n\nLOG_LEVEL=\"info\"\n\nEXAMPLE_PRIVATE_KEY, EXAMPLE_PUBLIC_KEY = generate_rsa_key()\n\nTIMESTAMP_FORMAT = '%Y-%m-%d'\nEXAMPLE_USER_REQUEST = {\n 'username': 'jdoe',\n 'password': '<PASSWORD>',\n 'password-repeat': '<PASSWORD>',\n 'firstname': 'John',\n 'lastname': 'Doe',\n 'birthday': '2000-01-01',\n 'timezone': 'GMT+1',\n 'address': '1600 Amphitheatre Parkway',\n 'state': 'CA',\n 'zip': '94043',\n 'ssn': '123',\n}\nEXAMPLE_USER = {\n 'accountid': '123',\n 'username': 'jdoe',\n 'passhash': b'<PASSWORD>',\n 'firstname': 'John',\n 'lastname': 'Doe',\n 'birthday': datetime.strptime('2000-01-01', TIMESTAMP_FORMAT).date(),\n 'timezone': 'GMT+1',\n 'address': '1600 Amphitheatre Parkway',\n 'state': 'CA',\n 'zip': '94043',\n 'ssn': '123',\n}\nEXPECTED_FIELDS = [\n 'username',\n 'password',\n '<PASSWORD>',\n 'firstname',\n 'lastname',\n 'birthday',\n 'timezone',\n 'address',\n 'state',\n 'zip',\n 'ssn',\n]\n\n# Usernames must be >1 and <=15 chars, alphanumeric and underscores\nINVALID_USERNAMES = [\n None, # null\n \"\", # empty string\n \" \", # only space\n \"b\", # single character\n \" user\", # starting with space\n \"*$&%($\", # non alphanumeric characters\n \"user*new\", # alphanumeric with non alphanumeric characters\n \"🏦💸\", # emojis\n \"user1💸\", # alphanumeric with emojis\n get_random_string(16), # 16 characters\n \" {}\".format(get_random_string(15)), # 15 characters + leading space\n \"{} \".format(get_random_string(15)), # 15 characters + trailing space\n \"{}\".format(get_random_string(100)), # 100 characters\n ]\n", "id": "5173915", "language": "Python", "matching_score": 0.4460531175136566, "max_stars_count": 3, "path": "bank-of-anthos/src/userservice/tests/constants.py" }, { "content": "from enum import Enum\n\n\nclass Tags(Enum):\n # Feature types\n CATEGORICAL = \"categorical\"\n CONTINUOUS = \"continuous\"\n LIST = \"list\"\n TEXT = \"text\"\n TEXT_TOKENIZED = \"text_tokenized\"\n TIME = \"time\"\n\n # Feature context\n USER = \"user\"\n ITEM = \"item\"\n ITEM_ID = \"item_id\"\n CONTEXT = \"context\"\n\n # Target related\n TARGETS = \"target\"\n BINARY = \"binary\"\n REGRESSION = \"regression\"\n MULTI_CLASS = \"multi_class\"\n", "id": "2026084", "language": "Python", "matching_score": 0.3148929178714752, "max_stars_count": 543, "path": "nvtabular/tags.py" } ]
1.611549
MullaAhmed
[ { "content": "def truth_question(t_q):\r\n from random import randint\r\n x=randint(0,(len(t_q)-1))\r\n z=str(t_q[x])\r\n t_q.pop(x)\r\n return(z)\r\n\r\ndef dare_question(d_q):\r\n from random import randint\r\n x=randint(0,(len(d_q)-1))\r\n z=str(d_q[x])\r\n d_q.pop(x)\r\n return(z)\r\n\r\n", "id": "10779698", "language": "Python", "matching_score": 2.2263433933258057, "max_stars_count": 0, "path": "Questions.py" }, { "content": "#importing questions\r\nfrom tkinter.font import BOLD\r\nimport pandas as pd\r\nt_data=pd.read_csv(\"Truth.csv\")\r\nt_q=list(t_data.iloc[:,1].values)\r\nd_data=pd.read_csv(\"Dare.csv\")\r\nd_q=list(d_data.iloc[:,1].values)\r\n\r\n\r\nimport tkinter , Questions\r\nwin1=tkinter.Tk() #Handle\r\nwin1.geometry(\"1193x671\") #Size of the window\r\n\r\n\r\nclass Truth():\r\n def __init__(self):\r\n self.win2 = tkinter.Tk()\r\n self.win2.geometry('1900x540')\r\n label = tkinter.Label(self.win2, text=(Questions.truth_question(t_q)), pady=250, padx=100, bg='#faeadd',fg='brown', font=(\"Helvetica\", \"30\", \"bold\"))\r\n label.pack(expand=True,fill=tkinter.X)\r\n back = tkinter.Button(self.win2, text='Back',padx=100, pady=10, bg='white', command=self.quit)\r\n back.place(x=620,y=490)\r\n self.win2.mainloop()\r\n\r\n def quit(self):\r\n self.win2.destroy()\r\n\r\n\r\nclass Dare():\r\n def __init__(self):\r\n self.win2 = tkinter.Tk()\r\n self.win2.geometry('1900x540')\r\n label = tkinter.Label(self.win2, text=(Questions.dare_question(d_q)), pady=250, padx=100, bg='#faeadd', fg='brown', font=(\"Helvetica\", \"30\", \"bold\")) \r\n label.pack(expand=True,fill=tkinter.X)\r\n back = tkinter.Button(self.win2, text='Click and Quit',padx=100,pady=10,bg='white',command=self.quit)\r\n back.place(x=620,y=490)\r\n self.win2.mainloop()\r\n\r\n def quit(self):\r\n self.win2.destroy()\r\n\r\n\r\n#Main Screen Backgrounds\r\nbg = tkinter.PhotoImage(file = \"Back.png\")\r\n \r\n\r\n# Show image using label\r\nbackground = tkinter.Label( win1, image = bg)\r\nbackground.place(x =0, y = 0)\r\n\r\n#Middle line\r\nlabel1 = tkinter.Label(win1,pady=1000,bg=\"black\")\r\nlabel1.place(x=600)\r\n\r\nlabel2 = tkinter.Label(win1, text=\"What Do You Pick?\",bg=\"#faeadd\", font=(\"Helvetica\", \"55\", \"bold\"))\r\nlabel2.place(x=210,y=160)\r\n\r\n#Buttons\r\nTruth = tkinter.Button(win1, text=\"Truth\", command=Truth,pady=10,padx=70,bg=\"brown\",font=(\"Helvetica\", \"20\",\"bold\")) \r\nTruth.config(fg=\"white\")\r\nTruth.place(x=250,y=400)\r\n\r\nDare = tkinter.Button(win1, text=\"Dare\" , command=Dare,pady=10,padx=70,bg=\"pink\",font=(\"Helvetica\", \"20\",\"bold\")) \r\nDare.place(x=700,y=400)\r\n\r\nwin1.mainloop()\r\n", "id": "729724", "language": "Python", "matching_score": 0.6838759183883667, "max_stars_count": 0, "path": "Main.py" }, { "content": "# Importing libraries\r\n#general stuff\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n#apna time ayega\r\nfrom preprocessing import *\r\nfrom util import *\r\n\r\n#sk learn\r\nfrom sklearn.svm import SVR\r\nfrom sklearn.model_selection import train_test_split\r\ndatasets=[\"B0005.mat\",\"B0006.mat\",\"B0007.mat\",\"B0018.mat\",'B0025.mat', 'B0026.mat', 'B0027.mat', 'B0028.mat', 'B0029.mat', 'B0030.mat', 'B0031.mat', 'B0032.mat', 'B0033.mat', 'B0034.mat', 'B0036.mat', 'B0038.mat', 'B0039.mat', 'B0040.mat', 'B0041.mat', 'B0045.mat', 'B0046.mat', 'B0047.mat', 'B0048.mat', 'B0049.mat', 'B0051.mat', 'B0053.mat', 'B0054.mat', 'B0055.mat', 'B0056.mat']\r\n\r\n#dataset not working 42,43,44,50,52\r\n#datasets that suck \r\ngood=[]\r\nbad=[]\r\n\r\nfor i in datasets:\r\n print(i)\r\n # Importing dataset (temp 24)\r\n battery = loadMat(i)\r\n #Creating dataframe\r\n dfbattery = getDataframe(battery)\r\n\r\n l=[]#temperory storage for r 2 scores\r\n d={}#temp dict to get random state values from r2_score\r\n for j in range (10):\r\n x_train_0, x_test_0, y_train_0, y_test_0 = train_test_split(dfbattery['cycle'], dfbattery['capacity'], test_size=0.1,random_state=j)\r\n lst_x, lst_y =(x_train_0, y_train_0)\r\n x_train_0=np.array(x_train_0)\r\n y_train_0=np.array(y_train_0)\r\n\r\n #training model\r\n from sklearn.svm import SVR\r\n x_train_0 = x_train_0.reshape(-1, 1)\r\n y_train_0 = y_train_0.reshape(-1, 1)\r\n regressor = SVR(C=2000, epsilon=0.0001,kernel='rbf')\r\n regressor.fit(x_train_0,y_train_0)\r\n y_pred = regressor.predict(x_test_0.values.reshape(-1, 1))\r\n\r\n # Evaluating the Model Performance\r\n from sklearn.metrics import r2_score\r\n x=float(r2_score(y_test_0,y_pred))\r\n l.append(x)\r\n d[x]=j\r\n \r\n\r\n\r\n z=(d[(max(l))])\r\n if max(l)>0.80:\r\n good.append(\"for {0} value of i {1} and r2_score {2} \".format(i,z,max(l)))\r\n else:\r\n bad.append(\"for {0} value of i {1} and r2_score {2} \".format(i,z,max(l)))\r\n \r\n x_train, x_test, y_train, y_test = train_test_split(dfbattery['cycle'], dfbattery['capacity'], test_size=0.1,random_state=z)\r\n x_train=np.array(x_train)\r\n y_train=np.array(y_train)\r\n\r\n x_train = x_train.reshape(-1, 1) #changes from 1 d array to 2 d array\r\n y_train = y_train.reshape(-1, 1)\r\n\r\n #Fitting model\r\n regressor = SVR(C=2000, epsilon=0.0001,kernel='rbf') #epsilon defines the tube inside which error is allowed(must be small)\r\n regressor.fit(x_train,y_train)\r\n\r\n #Predicting data\r\n y_pred = regressor.predict(x_test.values.reshape(-1, 1))\r\n\r\n #Plotting curve\r\n plt.plot(dfbattery['cycle'], dfbattery['capacity'],color='black')\r\n plt.plot(dfbattery['cycle'],regressor.predict(dfbattery[\"cycle\"].values.reshape(-1, 1)))\r\n plt.xlabel('Cycles')\r\n plt.ylabel('Battery Capacity')\r\n temp='Model performance for Battery '+ str((i.split(\".\"))[0])\r\n plt.title(temp)\r\n plt.show()\r\n\r\n\r\n \r\nfor j in good:\r\n print(\"GOOD\")\r\n print(j)\r\nfor k in bad:\r\n print(\"Bad\")\r\n print(k)", "id": "12832446", "language": "Python", "matching_score": 2.731546401977539, "max_stars_count": 0, "path": "capacity.py" }, { "content": "import pandas as pd\nimport numpy as np\n\n#gets value of capacity vale from every discharging cycle\ndef getBatteryCapcity(Battery):\n cycle = []\n capacity = []\n i = 1\n for Bat in Battery:\n if Bat['cycle'] == 'discharge':\n cycle.append(i)\n capacity.append(Bat['data']['Capacity'][0])\n i += 1\n return [cycle, capacity]\n\n#calculates battery retention percentage\ndef getBatteryCapacityRetention(capacity):\n retention=[]\n cycle=[]\n\n for i in range (len(capacity)):\n temp=(capacity[i]/max(capacity))*100\n retention.append(temp)\n cycle.append(i+1) \n return[cycle,retention]\n\n#getting all values from charging cycle (2d array)\ndef getChargingValues(Battery, Index):\n Battery = Battery[Index]['data']\n index = []\n i = 1\n for iterator in Battery['Voltage_measured']:\n index.append(i)\n i += 1\n return [index, Battery['Voltage_measured'], Battery['Current_measured'], Battery['Temperature_measured'], Battery['Voltage_charge'], Battery['Time']]\n\n\n#getting all values from discharging cycle (2d array)\ndef getDischargingValues(Battery, Index):\n Battery = Battery[Index]['data']\n index = []\n i = 1\n for iterator in Battery['Voltage_measured']:\n index.append(i)\n i += 1\n return (pd.DataFrame({\"Voltage measured\": Battery['Voltage_measured'],\"Current measured\" : Battery['Current_measured'], \"Temperature measured\": Battery['Temperature_measured'],\"Voltage load\": Battery['Voltage_load'],\"Time\" : Battery['Time']}))\n\n#gets maximum discharging temperature from each discharging\ndef getMaxDischargeTemp(Battery):\n cycle = []\n temp = []\n i = 1\n for Bat in Battery:\n if Bat['cycle'] == 'discharge':\n cycle.append(i)\n temp.append(max(Bat['data']['Temperature_measured']))\n i += 1\n return [cycle, temp]\n\ndef getMaxChargeTemp(Battery):\n cycle = []\n temp = []\n i = 1\n x=len(getMaxDischargeTemp(Battery)[0])+1\n for Bat in Battery :\n if Bat['cycle'] == 'charge' and i<x :\n cycle.append(i)\n temp.append(max(Bat['data']['Temperature_measured']))\n i += 1\n return [cycle, temp]\n\n#makes a dataframe\ndef getDataframe(Battery):\n l = getBatteryCapcity(Battery)\n data = {'cycle':l[0],'capacity':l[1]}\n return pd.DataFrame(data)\n\n\n\t#same as the one used for stocks(try with exp / weighted moving avgs)\ndef moving_average(data, window_size):\n window = np.ones(int(window_size))/float(window_size)\n #makes a [1,1,1...] (len=window_size)\n #divides those 1 by window_size and returns that array\n\n return np.convolve(data, window, 'same')\n #convolve function smae mode(there are 2 other modes too)(returns same lenght as input)\n #check notes\n\n#select a time frame say 5 days values are 2,3,4,5,6 on respective days and 11 on 6th day\n#moving avg= (2+3+4+5+6)/5 on 5th day\n#moving avg=(3+4+5+6+8)/5 on the 6ht day\n\ndef rollingAverage(x_stuff, y_stuff):\n window_size = 10\n\n avg = moving_average(y_stuff, window_size) #this will give moving averages of capacity with window of 10\n avg_list=list(avg)\n residual = y_stuff - avg\n testing_std = residual.rolling(window_size).std()\n testing_std_as_df = pd.DataFrame(testing_std)\n rolling_std = testing_std_as_df.replace(np.nan,testing_std_as_df.iloc[window_size - 1]).round(3).iloc[:,0].tolist()\n\n std = np.std(residual)\n lst=[]\n lst_index = 0\n lst_count = 0\n for i in y_stuff.index:\n if (y_stuff[i] > avg_list[lst_index] + (1.5 * rolling_std[lst_index])) | (y_stuff[i] < avg_list[lst_index] - (1.5 * rolling_std[lst_index])):\n lt=[i,x_stuff[i], y_stuff[i],avg_list[lst_index],rolling_std[lst_index]]\n lst.append(lt)\n lst_count+=1\n lst_index+=1\n\n lst_x = []\n lst_y = []\n\n for i in range (0,len(lst)):\n lst_x.append(lst[i][1])\n lst_y.append(lst[i][2])\n\n return lst_x, lst_y", "id": "4725521", "language": "Python", "matching_score": 2.0340585708618164, "max_stars_count": 0, "path": "util.py" }, { "content": "import scipy.io\nimport numpy as np\nfrom datetime import datetime\nimport pandas as pd\n\n\ndef convert_to_time(hmm):\n\treturn datetime(year=int(hmm[0]),month=int(hmm[1]),day=int(hmm[2]), hour=int(hmm[3]),minute=int(hmm[4]),second=int(hmm[5]))\n\ndef loadMat(matfile):\n\tdata = scipy.io.loadmat(matfile)\n\tfilename = matfile.split(\".\")[0] #B0005.mat --> B0005(str)\n\tcol = data[filename]#data(var) is a dict(len 4- but the 1st 3 are not useful ) and pure data(value) is extracted here \n\tcol = col[0][0][0][0]\n\tsize = col.shape[0] #shape returns lenght of multidimentional array as an array\n\n\tda = []\n\n\tfor i in range(size):\n\t\tk=list(col[i][3][0].dtype.fields.keys())#k will return list properities(headings) from each cycle\n\t\td1 = {}\n\t\td2 = {}\n\t\tif str(col[i][0][0]) != 'impedance':#this will look for charging discharging impedence part\n\t\t\tfor j in range(len(k)):\n\t\t\t\tt=col[i][3][0][0][j][0] # j here gives fields and t returns value of each field\n\t\t\t\tl=list(t)\n\t\t\t\td2[k[j]]=l #after running through the j loop d2 will have all the info about 1 particular cycle\n\t\t\n\t\td1['cycle']=str(col[i][0][0])\n\t\td1['temp']=int(col[i][1][0])\n\t\td1['time']=str(convert_to_time(col[i][2][0]))\n\t\td1['data']=d2\n\t\tda.append(d1)\n\n\treturn da\n\n\n\n\n\n\n", "id": "1119574", "language": "Python", "matching_score": 0.30727025866508484, "max_stars_count": 1, "path": "preprocessing.py" } ]
2.034059
wsojka00
[ { "content": "# reproducible-workflow-template", "id": "9992458", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "utils/plotting.py" }, { "content": "\"\"\"\npreproc.py\n==========\n\npreprocessing functions (blinks, smoothing, missing data...)\n\"\"\"\nimport numpy as np\nimport scipy.optimize\nimport pylab as plt\n\nfrom .convenience import *\n\n\ndef smooth_window(x,window_len=11,window='hanning'):\n \"\"\"smooth the data using a window with requested size.\n \n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal \n (with the window size) in both ends so that transient parts are minimized\n in the begining and end part of the output signal.\n \n adapted from SciPy Cookbook: `<https://scipy-cookbook.readthedocs.io/items/SignalSmooth.html>`_.\n \n Parameters\n ---------- \n x: the input signal \n window_len: the dimension of the smoothing window; should be an odd integer\n window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\n flat window will produce a moving average smoothing.\n\n Returns\n -------\n np.array: the smoothed signal \n \"\"\"\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n\n if window_len<3:\n return x\n\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window should be one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n\n y=np.convolve(w/w.sum(),s,mode='same')\n return y[(window_len-1):(-window_len+1)]\n\n\ndef detect_blinks_velocity(sy, smooth_winsize, vel_onset, vel_offset, min_onset_len=5, min_offset_len=5):\n \"\"\"\n Detect blinks as everything between a fast downward and a fast upward-trending PD-changes.\n \n This works similarly to :py:func:`blink_onsets_mahot()`.\n \n Parameters\n ----------\n sy: np.array\n pupil data\n smooth_winsize: int (odd)\n size of the Hanning-window in sampling points\n vel_onset: float\n velocity-threshold to detect the onset of the blink\n vel_offset: float\n velocity-threshold to detect the offset of the blink\n min_onset_len: int\n minimum number of consecutive samples that cross the threshold to detect onset\n min_offset_len: int\n minimum number of consecutive samples that cross the threshold to detect offset\n \"\"\"\n # generate smoothed signal and velocity-profile\n sym=smooth_window(sy, smooth_winsize, \"hanning\")\n vel=np.r_[0,np.diff(sym)] \n n=sym.size\n\n # find first negative vel-crossing \n onsets=np.where(vel<=vel_onset)[0]\n onsets_ixx=np.r_[np.diff(onsets),10]>1\n onsets_len=np.diff(np.r_[0,np.where(onsets_ixx)[0]])\n onsets=onsets[onsets_ixx]\n onsets=onsets[onsets_len>min_onset_len]\n\n ## offset finding\n offsets=np.where(vel>=vel_offset)[0]\n offsets_ixx=np.r_[10,np.diff(offsets)]>1\n offsets_len=np.diff(np.r_[np.where(offsets_ixx)[0],offsets.size])\n offsets=offsets[offsets_ixx]\n offsets=offsets[offsets_len>min_offset_len]\n \n \n ## find corresponding on- and off-sets\n blinks=[]\n on=onsets[0]\n while on is not None:\n offs=offsets[offsets>on]\n off=offs[0] if offs.size>0 else n\n blinks.append([on,off])\n ons=onsets[onsets>off]\n on=ons[0] if ons.size>0 else None\n \n ## if on- off-sets fall in a zero-region, grow until first non-zero sample\n blinks2=[]\n for (on,off) in blinks:\n while(on>0 and sy[on]==0):\n on-=1\n while(off<n-1 and sy[off]==0):\n off+=1\n blinks2.append([on,off])\n return np.array(blinks2)\n\ndef detect_blinks_zero(sy, min_duration, blink_val=0):\n \"\"\"\n Detect blinks as consecutive sequence of `blink_val` (f.eks., 0 or NaN) of at least\n `min_duration` successive values in the signal `sy`.\n Detected blinks are put a matrix `blinks` (nblinks x 2) where start and end\n are stored as indexes.\n \n Parameters\n ----------\n sy: np.array (float)\n signal\n min_duration: int\n minimum number of consecutive samples for a sequence of missing numbers to be treated as blink\n blink_val: \n \"missing value\" code\n \n Returns\n -------\n np.array (nblinks x 2) containing the indices of the start/end of the blinks\n \"\"\"\n x=np.r_[0, np.diff((sy==blink_val).astype(np.int))]\n starts=np.where(x==1)[0]\n ends=np.where(x==-1)[0]-1\n if sy[0]==blink_val: ## first value missing?\n starts=np.r_[0,starts] \n if ends.size!=starts.size: \n ## is the first start earlier than the first end?\n if starts[0]>ends[0]:\n ends=ends[1:] # drop first end\n else:\n starts=starts[:-1] # drop last start\n if ends[-1]==x.size:\n ends[-1]-=1\n blinks=[ [start,end] for start,end in zip(starts,ends) if end-start>=min_duration]\n return np.array(blinks)\n \n \ndef blink_onsets_mahot(sy, blinks, smooth_winsize, vel_onset, vel_offset, margin, blinkwindow):\n \"\"\"\n Method for finding the on- and offset for each blink (excluding transient).\n See https://figshare.com/articles/A_simple_way_to_reconstruct_pupil_size_during_eye_blinks/688001.\n \n Parameters\n ----------\n sy: np.array\n pupil data\n blinks: np.array (nblinks x 2) \n blink onset/offset matrix (contiguous zeros)\n smooth_winsize: int (odd)\n size of the Hanning-window in sampling points\n vel_onset: float\n velocity-threshold to detect the onset of the blink\n vel_offset: float\n velocity-threshold to detect the offset of the blink\n margin: tuple (int,int)\n margin that is subtracted/added to onset and offset (in sampling points)\n blinkwindow: int\n how much time before and after each blink to include (in sampling points) \n \"\"\"\n # generate smoothed signal and velocity-profile\n sym=smooth_window(sy, smooth_winsize, \"hanning\")\n vel=np.r_[0,np.diff(sym)] \n blinkwindow_ix=blinkwindow\n n=sym.size\n \n newblinks=[]\n for ix,(start,end) in enumerate(blinks): \n winstart,winend=max(0,start-blinkwindow_ix), min(end+blinkwindow_ix, n)\n slic=slice(winstart, winend) #start-blinkwindow_ix, end+blinkwindow_ix)\n winlength=vel[slic].size\n\n onsets=np.where(vel[slic]<=vel_onset)[0]\n offsets=np.where(vel[slic]>=vel_offset)[0]\n if onsets.size==0 or offsets.size==0:\n continue\n\n ## onsets are in \"local\" indices of the windows, start-end of blink global\n startl,endl=blinkwindow_ix if winstart>0 else start,end-start+blinkwindow_ix\n\n # find vel-crossing next to start of blink and move back to start of that crossing\n onset_ix=np.argmin(np.abs((onsets-startl<=0)*(onsets-startl)))\n while(onsets[onset_ix-1]+1==onsets[onset_ix]):\n onset_ix-=1\n onset=onsets[onset_ix]\n onset=max(0, onset-margin[0]) # avoid overflow to the left\n\n # find start of \"reversal period\" and move forward until it drops back\n offset_ix=np.argmin(np.abs(((offsets-endl<0)*np.iinfo(np.int).max)+(offsets-endl)))\n while(offset_ix<(len(offsets)-1) and offsets[offset_ix+1]-1==offsets[offset_ix]):\n offset_ix+=1 \n offset=offsets[offset_ix]\n offset=min(winlength-1, offset+margin[1]) # avoid overflow to the right\n newblinks.append( [onset+winstart,offset+winstart] )\n \n return np.array(newblinks) ", "id": "1417676", "language": "Python", "matching_score": 3.246192216873169, "max_stars_count": 13, "path": "pypillometry/preproc.py" }, { "content": "\"\"\"\npupildata.py\n============\n\nMain object-oriented entry point\n\"\"\"\n\nfrom .convenience import *\nfrom .baseline import *\nfrom .fakedata import *\nfrom .preproc import *\nfrom .io import *\nfrom .erpd import *\n\nimport pylab as plt\nimport matplotlib as mpl\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport numpy as np\nimport scipy.signal as signal\nfrom scipy.interpolate import interp1d\nfrom scipy import interpolate\nimport scipy\nfrom random import choice\nimport pickle\n\nimport collections.abc\n\nimport copy\nimport math\n\n#from pytypes import typechecked\nfrom typing import Sequence, Union, List, TypeVar, Optional, Tuple, Callable\nPupilArray=Union[np.ndarray, List[float]]\n\n\n_inplace=False ## default for whether or not inplace-operations should be used\n\n\nimport inspect\nimport functools\n\n## decoratory to keep a history of actions performed on dataset\n# can only be used with functions that return \"self\" \ndef keephistory(func):\n @functools.wraps(func)\n def wrapper(*args,**kwargs):\n obj=func(*args,**kwargs)\n funcname=func.__name__\n argstr=\",\".join([\"%s\"%(v) for v in args[1:]])\n kwargstr=\",\".join([\"%s=%s\"%(k,v) for k,v in kwargs.items()])\n allargs=argstr\n if len(allargs)>0 and len(kwargstr)>0:\n allargs+=\",\"+kwargstr\n elif len(kwargstr)>0:\n allargs+=kwargstr\n fstr=\"{func}({allargs})\".format(func=funcname, allargs=allargs)\n #fstr=\"{func}({argstr},{kwargstr})\".format(func=funcname,argstr=argstr,kwargstr=kwargstr)\n obj.add_to_history({\"funcstring\":fstr, \"funcname\":funcname, \"args\":args[1:], \"kwargs\":kwargs})\n return obj\n return wrapper\n \n \n\n#@typechecked\nclass PupilData:\n \"\"\"\n Class representing pupillometric data. \n \"\"\"\n \n def add_to_history(self, event):\n \"\"\"Add event to history\"\"\"\n try:\n self.history.append(event)\n except:\n self.history=[event]\n \n def print_history(self):\n \"\"\"\n Pretty-print the history of the current dataset (which manipulations where done on it).\n \"\"\"\n print(\"* \"+self.name)\n try:\n for i,ev in enumerate(self.history):\n print(\" \"*(i)+\"└ \" + ev[\"funcstring\"])\n except:\n print(\"no history\")\n \n def apply_history(self, obj):\n \"\"\"\n Apply history of operations done on `self` to `obj`.\n \n Parameters:\n -----------\n \n obj: :class:`.PupilData`\n object of class :class:`.PupilData` to which the operations are to be transferred\n \n Returns:\n --------\n \n copy of the :class:`.PupilData`-object to which the operations in `self` were applied\n \"\"\"\n for ev in self.history:\n obj=getattr(obj, ev[\"funcname\"])(*ev[\"args\"], **ev[\"kwargs\"])\n return obj\n\n def __len__(self) -> int:\n \"\"\"Return number of sampling points in the pupil data.\"\"\"\n return self.sy.size\n \n def nevents(self) -> int:\n \"\"\"Return number of events in pupillometric data.\"\"\"\n return self.event_onsets.size\n\n def nblinks(self) -> int:\n \"\"\"\n Return number of detected blinks. Should be run after `detect_blinks()`.\n \"\"\"\n return self.blinks.shape[0]\n \n def get_duration(self, units=\"min\"):\n fac=self._unit_fac(units)\n return (len(self)/self.fs*1000)*fac\n \n def _random_id(self, n:int=8) -> str:\n \"\"\"\n Create a random ID string that is easy to recognise.\n Based on <http://code.activestate.com/recipes/526619-friendly-readable-id-strings/>.\n \"\"\"\n v = 'aeiou'\n c = 'bdfghklmnprstvw'\n\n return ''.join([choice(v if i%2 else c) for i in range(n)])\n \n def __init__(self,\n pupil: PupilArray, \n sampling_rate: Optional[float]=None,\n time: Optional[PupilArray]=None,\n event_onsets: Optional[PupilArray]=None,\n event_labels: Optional[PupilArray]=None,\n name: Optional[str]=None,\n keep_orig: bool=True,\n fill_time_discontinuities: bool=True):\n \"\"\"\n Parameters\n ----------\n \n name: \n name of the dataset or `None` (in which case a random string is selected)\n time: \n timing array or `None`, in which case the time-array goes from [0,maxT]\n using `sampling_rate` (in ms)\n pupil:\n pupillary data at times `time` assumed to be in ms\n event_onsets: \n time-onsets of any events that are to be modelled in the pupil\n event_labels:\n for each event in `event_onsets`, provide a label\n sampling_rate: float\n sampling-rate of the pupillary signal in Hz\n keep_orig: bool\n keep a copy of the original dataset? If `True`, a copy of the :class:`.PupilData` object\n as initiated in the constructor is stored in member `PupilData.original`\n fill_time_discontinuities: bool\n sometimes, when the eyetracker loses signal, no entry in the EDF is made; \n when this option is True, such entries will be made and the signal set to 0 there\n \"\"\"\n self.sy=np.array(pupil, dtype=np.float)\n if sampling_rate is None and time is None:\n raise ValueError(\"you have to specify either sampling_rate or time-vector (or both)\")\n \n if time is None:\n maxT=len(self)/sampling_rate*1000.\n self.tx=np.linspace(0,maxT, num=len(self))\n else:\n self.tx=np.array(time, dtype=np.float)\n \n if sampling_rate is None:\n self.fs=np.round(1000./np.median(np.diff(self.tx)))\n else:\n self.fs=sampling_rate\n \n if fill_time_discontinuities:\n ## find gaps in the time-vector\n tx=self.tx\n sy=self.sy\n stepsize=np.median(np.diff(tx))\n n=tx.size\n gaps_end_ix=np.where(np.r_[stepsize,np.diff(tx)]>2*stepsize)[0]\n ngaps=gaps_end_ix.size\n if ngaps!=0:\n ## at least one gap here\n print(\"> Filling in %i gaps\"%ngaps)\n gaps_start_ix=gaps_end_ix-1\n print( ((tx[gaps_end_ix]-tx[gaps_start_ix])/1000), \"seconds\" )\n \n ntx=[tx[0:gaps_start_ix[0]]] # initial\n nsy=[sy[0:gaps_start_ix[0]]]\n for i in range(ngaps):\n start,end=gaps_start_ix[i], gaps_end_ix[i]\n # fill in the gap\n ntx.append( np.linspace(tx[start],tx[end], int((tx[end]-tx[start])/stepsize), endpoint=False) )\n nsy.append( np.zeros(ntx[-1].size) )\n\n # append valid signal\n if i==ngaps-1:\n nstart=n\n else:\n nstart=gaps_start_ix[i+1]\n ntx.append( tx[end:nstart] )\n nsy.append( sy[end:nstart] )\n\n ntx=np.concatenate(ntx)\n nsy=np.concatenate(nsy) \n self.tx=ntx\n self.sy=nsy\n \n if event_onsets is None:\n self.event_onsets=np.array([], dtype=np.float)\n else:\n self.event_onsets=np.array(event_onsets, dtype=np.float)\n \n # check whether onsets are in range\n for onset in self.event_onsets:\n if onset<self.tx.min() or onset>self.tx.max():\n raise ValueError(\"some event-onsets outside data range according to time-vector\")\n \n \n if event_labels is None:\n self.event_labels=np.zeros_like(self.event_onsets)\n else:\n if self.event_onsets.size!=np.array(event_labels).size:\n raise ValueError(\"event_labels must have same length as event_onsets\")\n self.event_labels=np.array(event_labels)\n \n if self.tx.size != self.sy.size:\n raise ValueError(\"time and pupil-array must have same length, found {} vs {}\".format(\n self.tx.size,self.sy.size))\n \n if name is None:\n self.name = self._random_id()\n else:\n self.name=name\n \n ## initialize baseline signal\n self.scale_params={\"mean\":0, \"sd\":1}\n self.baseline=np.zeros(len(self))\n self.baseline_estimated=False\n \n ## initialize response-signal\n self.response=np.zeros(len(self))\n self.response_pars=None\n self.response_estimated=False\n \n ## initialize blinks\n self.blinks=np.empty((0,2), dtype=np.int)\n self.blink_mask=np.zeros(len(self), dtype=np.int)\n \n ## interpolated mask\n self.interpolated_mask=np.zeros(len(self), dtype=np.int)\n self.missing=np.zeros(len(self), dtype=np.int)\n self.missing[self.sy==0]=1\n \n self.original=None\n if keep_orig: \n self.original=self.copy()\n \n ## start with empty history \n self.history=[]\n \n @keephistory\n def drop_original(self, inplace=_inplace):\n \"\"\"\n Drop original dataset from record (e.g., to save space).\n \"\"\"\n obj=self if inplace else self.copy()\n obj.original=None\n return obj\n \n @keephistory\n def reset_time(self, t0: float=0, inplace=_inplace):\n \"\"\"\n Resets time so that the time-array starts at time zero (t0).\n Resets onsets etc.\n \n Parameters\n ----------\n t0: float\n time at which the :class:`.PupilData`'s time-vector starts\n inplace: bool\n if `True`, make change in-place and return the object\n if `False`, make and return copy before making changes\n \"\"\"\n tmin=self.tx.min()\n obj=self if inplace else self.copy() \n obj.tx=(self.tx-tmin)+t0\n obj.event_onsets=(self.event_onsets-tmin)+t0\n return obj\n \n def write_file(self, fname:str):\n \"\"\"\n Save to file (using :mod:`pickle`).\n \n Parameters\n ----------\n \n fname: str\n filename\n \"\"\"\n pd_write_pickle(self, fname)\n \n @classmethod\n def from_file(cls, fname:str):\n \"\"\"\n Reads a :class:`.PupilData` object from a pickle-file.\n Use as ``pypillometry.PupilData.from_file(\"yourfile.pd\")``.\n \n Parameters\n ----------\n \n fname: str\n filename\n \"\"\"\n r=pd_read_pickle(fname)\n return r\n \n def _unit_fac(self, units):\n if units==\"sec\":\n fac=1./1000.\n elif units==\"min\":\n fac=1./1000./60.\n elif units==\"h\":\n fac=1./1000./60./60.\n else:\n fac=1.\n return fac\n\n @keephistory\n def sub_slice(self, start: float=-np.inf, end: float=np.inf, units: str=\"sec\"):\n \"\"\"\n Return a new `PupilData` object that is a shortened version\n of the current one (contains all data between `start` and\n `end` in units given by `units` (one of \"ms\", \"sec\", \"min\", \"h\").\n\n Parameters\n ----------\n \n start: float\n start for new dataset\n end: float\n end of new dataset\n units: str\n time units in which `start` and `end` are provided\n \"\"\"\n slic=self.copy()\n fac=self._unit_fac(units)\n tx=self.tx*fac\n keepix=np.where(np.logical_and(tx>=start, tx<=end))\n for k, v in slic.__dict__.items():\n if isinstance(v,np.ndarray) and v.size==self.sy.size:\n slic.__dict__[k]=slic.__dict__[k][keepix]\n evon=slic.event_onsets*slic._unit_fac(units)\n keepev=np.logical_and(evon>=start, evon<=end)\n slic.event_onsets=slic.event_onsets[keepev]\n slic.event_labels=slic.event_labels[keepev]\n ## just remove all detected blinks (need to rerun `detect_blinks()`)\n slic.blinks=np.empty((0,2), dtype=np.int)\n slic.blink_mask=np.zeros(len(slic), dtype=np.int)\n return slic\n\n def summary(self) -> dict:\n \"\"\"Return a summary of the :class:`.PupilData`-object.\"\"\"\n summary=dict(\n name=self.name,\n n=len(self),\n nmiss=np.sum(self.missing),#np.sum(np.isnan(self.sy))+np.sum(self.sy==0),\n perc_miss=np.sum(self.missing)/len(self)*100.,#(np.sum(np.isnan(self.sy))+np.sum(self.sy==0))/len(self)*100.,\n nevents=self.nevents(), \n nblinks=self.nblinks(),\n ninterpolated=self.interpolated_mask.sum(),\n blinks_per_min=self.nblinks()/(len(self)/self.fs/60.),\n fs=self.fs, \n duration_minutes=self.get_duration(\"min\"),\n start_min=self.tx.min()/1000./60.,\n end_min=self.tx.max()/1000./60.,\n baseline_estimated=self.baseline_estimated,\n response_estimated=self.response_estimated) \n return summary\n \n def size_bytes(self):\n \"\"\"\n Return size of current dataset in bytes.\n \"\"\"\n nbytes=len(pickle.dumps(self, -1))\n return nbytes\n \n def __repr__(self) -> str:\n \"\"\"Return a string-representation of the dataset.\"\"\"\n pars=self.summary()\n del pars[\"name\"]\n s=\"PupilData({name}, {size}):\\n\".format(name=self.name, size=sizeof_fmt(self.size_bytes()))\n flen=max([len(k) for k in pars.keys()])\n for k,v in pars.items():\n s+=(\" {k:<\"+str(flen)+\"}: {v}\\n\").format(k=k,v=v)\n s+=\" History:\\n *\\n\"\n try:\n for i,ev in enumerate(self.history):\n s+=\" \"*(i+1)+\"└ \" + ev[\"funcstring\"] +\"\\n\"\n except:\n s+=\" └no history\\n\"\n return s\n \n @keephistory \n def unscale(self, mean: Optional[float]=None, sd: Optional[float]=None, inplace=_inplace):\n \"\"\"\n Scale back to original values using either values provided as arguments\n or the values stored in `scale_params`.\n \n Parameters\n ----------\n mean: float\n mean to add from signal\n sd: float\n sd to scale with \n inplace: bool\n if `True`, make change in-place and return the object\n if `False`, make and return copy before making changes\n \n \"\"\"\n if mean is None:\n mean=self.scale_params[\"mean\"]\n if sd is None:\n sd=self.scale_params[\"sd\"]\n \n obj=self if inplace else self.copy()\n obj.scale_params={\"mean\":0, \"sd\":1}\n obj.sy=(self.sy*sd)+mean\n obj.baseline=(self.baseline*sd)+mean\n obj.response=(self.response*sd)\n return obj\n \n @keephistory\n def scale(self, mean: Optional[float]=None, sd: Optional[float]=None, inplace=_inplace):\n \"\"\"\n Scale the pupillary signal by subtracting `mean` and dividing by `sd`.\n If these variables are not provided, use the signal's mean and std.\n \n Parameters\n ----------\n \n mean: float\n mean to subtract from signal\n sd: float\n sd to scale with\n inplace: bool\n if `True`, make change in-place and return the object\n if `False`, make and return copy before making changes \n \n Note\n ----\n Scaling-parameters are being saved in the `scale_params` argument. \n \"\"\"\n if mean is None:\n mean=np.nanmean(self.sy)\n if sd is None:\n sd=np.nanstd(self.sy)\n\n obj=self if inplace else self.copy() \n obj.scale_params={\"mean\":mean, \"sd\":sd}\n obj.sy=(self.sy-mean)/sd\n obj.baseline=(self.baseline-mean)/sd\n obj.response=(self.response)/sd\n return obj\n \n @keephistory\n def lowpass_filter(self, cutoff: float, order: int=2, inplace=_inplace):\n \"\"\"\n Lowpass-filter signal using a Butterworth-filter, \n see :py:func:`pypillometry.baseline.butter_lowpass_filter()`.\n \n Parameters\n -----------\n\n cutoff: float\n lowpass-filter cutoff\n order: int\n filter order\n inplace: bool\n if `True`, make change in-place and return the object\n if `False`, make and return copy before making changes \n \"\"\"\n obj=self if inplace else self.copy() \n obj.sy=butter_lowpass_filter(self.sy, cutoff, self.fs, order)\n return obj\n\n @keephistory\n def smooth_window(self, window: str=\"hanning\", winsize: float=11, inplace=_inplace):\n \"\"\"\n Apply smoothing of the signal using a moving window. See :func:`.smooth_window()`.\n \n Parameters\n ----------\n window: str\n (the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'); \n flat window will produce a moving average smoothing.\n winsize: float\n the length of the window in ms \n inplace: bool\n if `True`, make change in-place and return the object\n if `False`, make and return copy before making changes \n \"\"\"\n winsize_ix=int(np.ceil(winsize/1000.*self.fs)) \n obj=self if inplace else self.copy() \n obj.sy=smooth_window(self.sy, winsize_ix, window )\n return obj\n \n @keephistory\n def downsample(self, fsd: float, dsfac: bool=False, inplace=_inplace):\n \"\"\"\n Simple downsampling scheme using mean within the downsampling window.\n See :py:func:`pypillometry.baseline.downsample()`.\n\n Parameters\n -----------\n\n fsd: \n new sampling-rate or decimate-factor\n dsfac:\n if False, `fsd` is the new sampling rate;\n if True, `fsd` is the decimate factor\n inplace: bool\n if `True`, make change in-place and return the object\n if `False`, make and return copy before making changes \n \"\"\"\n if dsfac:\n dsfac=fsd\n fsd=float(self.fs/dsfac)\n else:\n dsfac=int(self.fs/fsd) # calculate downsampling factor\n \n obj=self if inplace else self.copy()\n \n ## downsample all arrays that have the original sy-length\n # (this is so that the function is general for subclasses, as well)\n nd=self.sy.size\n for k, v in obj.__dict__.items():\n if isinstance(v,np.ndarray) and v.size==nd:\n obj.__dict__[k]=downsample(self.__dict__[k], dsfac)\n \n obj.fs=fsd\n return obj\n\n def copy(self, new_name: Optional[str]=None):\n \"\"\"\n Make and return a deep-copy of the pupil data.\n \"\"\"\n cls = self.__class__\n result = cls.__new__(cls)\n for k, v in self.__dict__.items():\n setattr(result, k, copy.deepcopy(v))\n if new_name is None:\n result.name=self.name+\"_\"+self._random_id(n=2)\n else:\n result.name=new_name\n return result \n\n def _plot(self, plot_range, overlays, overlay_labels, units, interactive, highlight_blinks, highlight_interpolated):\n fac=self._unit_fac(units)\n if units==\"sec\":\n xlab=\"seconds\"\n elif units==\"min\":\n xlab=\"minutes\"\n elif units==\"h\":\n xlab=\"hours\"\n else:\n xlab=\"ms\"\n tx=self.tx*fac\n evon=self.event_onsets*fac\n \n start,end=plot_range\n if start==-np.infty:\n startix=0\n else:\n startix=np.argmin(np.abs(tx-start))\n \n if end==np.infty:\n endix=tx.size\n else:\n endix=np.argmin(np.abs(tx-end))\n \n tx=tx[startix:endix]\n \n ixx=np.logical_and(evon>=start, evon<end)\n evlab=self.event_labels[ixx]\n evon=evon[ixx]\n overlays=(ov[startix:endix] for ov in overlays)\n \n if interactive:\n blinks=np.empty((0,2), dtype=np.int)\n interpolated=np.empty((0,2), dtype=np.int)\n if highlight_blinks:\n blinks=[]\n for sblink,eblink in self.blinks:\n if eblink<startix or sblink>endix:\n continue\n else:\n sblink=max(0,sblink-startix)\n eblink=min(endix,eblink-startix)\n blinks.append([sblink,eblink])\n blinks=np.array(blinks)\n if highlight_interpolated:\n a=np.diff(np.r_[0, self.interpolated_mask[startix:endix], 0])[:-1]\n istarts=np.where(a>0)[0]\n iends=np.where(a<0)[0]\n interpolated=[]\n for istart,iend in zip(istarts,iends):\n interpolated.append([istart,iend])\n plot_pupil_ipy(tx, self.sy[startix:endix], evon,\n overlays=overlays, overlay_labels=overlay_labels,\n blinks=blinks, interpolated=interpolated,\n xlab=xlab)\n else:\n plt.plot(tx, self.sy[startix:endix], label=\"signal\")\n for i,ov in enumerate(overlays):\n plt.plot(tx, ov, label=overlay_labels[i])\n plt.vlines(evon, *plt.ylim(), color=\"grey\", alpha=0.5)\n ll,ul=plt.ylim()\n for ev,lab in zip(evon,evlab):\n plt.text(ev, ll+(ul-ll)/2., \"%s\"%lab, fontsize=8, rotation=90)\n if highlight_interpolated:\n a=np.diff(np.r_[0, self.interpolated_mask[startix:endix], 0])[:-1]\n istarts=np.where(a>0)[0]\n iends=np.where(a<0)[0]\n for istart,iend in zip(istarts,iends):\n plt.gca().axvspan(tx[istart],tx[iend],color=\"green\", alpha=0.1)\n if highlight_blinks:\n for sblink,eblink in self.blinks:\n if eblink<startix or sblink>endix:\n continue\n else:\n sblink=min(tx.size-1, max(0,sblink-startix))\n eblink=min(endix-startix-1,eblink-startix)\n \n plt.gca().axvspan(tx[sblink],tx[eblink],color=\"red\", alpha=0.2)\n \n \n plt.legend()\n plt.xlabel(xlab) \n \n def plot(self, plot_range: Tuple[float,float]=(-np.infty, +np.infty),\n interactive: bool=False, \n baseline: bool=True, \n response: bool=False,\n model: bool=True,\n highlight_blinks: bool=True,\n highlight_interpolated: bool=True,\n units: str=\"sec\"\n ) -> None:\n \"\"\"\n Make a plot of the pupil data using `matplotlib` or :py:func:`pypillometry.convenience.plot_pupil_ipy()`\n if `interactive=True`.\n\n Parameters\n ----------\n plot_range: tuple (start,end)\n plot from start to end (in units of `units`)\n baseline: bool\n plot baseline if estimated\n response: bool\n plot response if estimated\n model: bool\n plot full model if baseline and response have been estimated\n interactive: bool\n if True, plot with sliders to adjust range\n units: str\n one of \"sec\"=seconds, \"ms\"=millisec, \"min\"=minutes, \"h\"=hours\n \"\"\"\n\n overlays=tuple()\n overlay_labels=tuple()\n if baseline and self.baseline_estimated:\n overlays+=(self.baseline,) \n overlay_labels+=(\"baseline\",)\n if response and self.response_estimated:\n overlays+=(self.response,)\n overlay_labels+=(\"response\",) \n if model and self.baseline_estimated and self.response_estimated:\n overlays+=(self.baseline+self.response,)\n overlay_labels+=(\"model\",)\n self._plot(plot_range, overlays, overlay_labels, units, interactive, highlight_blinks, highlight_interpolated)\n\n def plot_segments(self, overlay=None, pdffile: Optional[str]=None, interv: float=1, figsize=(15,5), ylim=None, **kwargs):\n \"\"\"\n Plot the whole dataset chunked up into segments (usually to a PDF file).\n\n Parameters\n ----------\n\n pdffile: str or None\n file name to store the PDF; if None, no PDF is written \n interv: float\n duration of each of the segments to be plotted (in minutes)\n figsize: Tuple[int,int]\n dimensions of the figures\n kwargs: \n arguments passed to :func:`.PupilData.plot()`\n\n Returns\n -------\n\n figs: list of :class:`matplotlib.Figure` objects\n \"\"\"\n\n # start and end in minutes\n smins,emins=self.tx.min()/1000./60., self.tx.max()/1000./60.\n segments=[]\n cstart=smins\n cend=smins\n while cend<emins:\n cend=min(emins, cstart+interv)\n segments.append( (cstart,cend) )\n cstart=cend\n\n figs=[]\n _backend=mpl.get_backend()\n mpl.use(\"pdf\")\n plt.ioff() ## avoid showing plots when saving to PDF \n\n for start,end in segments:\n plt.figure(figsize=figsize)\n self.plot( (start,end), units=\"min\", **kwargs)\n if overlay is not None:\n overlay.plot( (start, end), units=\"min\", **kwargs) \n if ylim is not None:\n plt.ylim(*ylim)\n figs.append(plt.gcf())\n\n\n if isinstance(pdffile, str):\n print(\"> Writing PDF file '%s'\"%pdffile)\n with PdfPages(pdffile) as pdf:\n for fig in figs:\n pdf.savefig(fig) \n\n ## switch back to original backend and interactive mode \n mpl.use(_backend) \n plt.ion()\n\n return figs \n \n @keephistory\n def estimate_baseline(self, method: str=\"envelope_iter_bspline_2\", inplace=_inplace, **kwargs):\n \"\"\"\n Apply one of the baseline-estimation methods.\n \n Parameters\n ----------\n \n method: \n \"envelope_iter_bspline_1\": :py:func:`pypillometry.baseline.baseline_envelope_iter_bspline()` \n with one iteration\n \"envelope_iter_bspline_2\": :py:func:`pypillometry.baseline.baseline_envelope_iter_bspline()` \n with two iterations\n inplace: bool\n if `True`, make change in-place and return the object\n if `False`, make and return copy before making changes \n \n kwargs:\n named arguments passed to the low-level function in :py:mod:`pypillometry.baseline`.\n \n Note\n -----\n the results of the estimation is stored in member `baseline`\n \n \"\"\"\n obj=self if inplace else self.copy()\n if method==\"envelope_iter_bspline_2\":\n txd,syd,base2,base1=baseline_envelope_iter_bspline(self.tx, self.sy,self.event_onsets,self.fs,**kwargs)\n f=interpolate.interp1d(txd, base2, kind=\"cubic\", bounds_error=False, fill_value=\"extrapolate\")\n obj.baseline=f(self.tx)\n elif method==\"envelope_iter_bspline_1\": \n txd,syd,base2,base1=baseline_envelope_iter_bspline(self.tx, self.sy,self.event_onsets,self.fs,**kwargs)\n f=interpolate.interp1d(txd, base1, kind=\"cubic\", bounds_error=False, fill_value=\"extrapolate\")\n obj.baseline=f(self.tx) \n else:\n raise ValueError(\"Undefined method for baseline estimation: %s\"%method) \n obj.baseline_estimated=True\n return obj\n\n def stat_per_event(self, interval: Tuple[float,float], event_select=None, statfct: Callable=np.mean, return_missing: Optional[str]=None):\n \"\"\"\n Return result of applying a statistical function to pupillometric data in a\n given interval relative to event-onsets. For example, extract mean \n pupil-size in interval before trial onset.\n\n Parameters\n -----------\n event_select: str or function\n variable describing which events to select and align to\n - if str: use all events whose label contains the string\n - if function: apply function to all labels, use those where the function returns True\n \n interval : tuple (min,max)\n time-window in ms relative to event-onset (0 is event-onset)\n\n statfct : function\n function mapping np.array to a single number\n\n return_missing: None, \"nmiss\", \"prop\"\n if None, only an array with the stats per event is return\n if \"nmiss\", returns a tuple (stat, nmiss) where `nmiss` is the number of missing vales in the timewin\n if \"prop\", return a tuple (stat, prop_miss) where `prop_miss` is the proportion missing vales in the timewin\n \n Returns\n --------\n\n result: np.array\n number of event-onsets long result array\n \"\"\"\n if callable(event_select):\n event_ix=np.array([bool(event_select(evlab)) for evlab in self.event_labels])\n elif isinstance(event_select, str):\n event_ix=np.array([event_select in evlab for evlab in self.event_labels])\n elif event_select is None:\n event_ix=np.arange(self.nevents())\n else:\n raise ValueError(\"event_select must be string or function\")\n \n stat =stat_event_interval(self.tx, self.sy, self.event_onsets[event_ix], interval, statfct)\n if return_missing==\"nmiss\":\n nmiss=stat_event_interval(self.tx, np.logical_or(self.missing, self.interpolated_mask), \n self.event_onsets[event_ix], interval, np.sum)\n ret=(stat,nmiss)\n elif return_missing==\"prop\":\n prop_miss=stat_event_interval(self.tx, np.logical_or(self.missing, self.interpolated_mask), \n self.event_onsets[event_ix], interval, np.mean)\n ret=(stat,prop_miss) \n else: \n ret=stat\n return ret\n \n @keephistory\n def estimate_response(self, npar: Union[str,float]=\"free\", tmax: Union[str,float]=\"free\", \n verbose: int=50,\n bounds: dict={\"npar\":(1,20), \"tmax\":(100,2000)},\n inplace=_inplace):\n \"\"\"\n Estimate pupil-response based on event-onsets, see\n :py:func:`pypillometry.pupil.pupil_response()`.\n \n\n npar: float\n npar-parameter for the canonical response-function or \"free\";\n in case of \"free\", the function optimizes for this parameter\n tmax: float\n tmax-parameter for the canonical response-function or \"free\";\n in case of \"free\", the function optimizes for this parameter\n bounds: dict\n in case that one or both parameters are estimated, give the lower\n and upper bounds for the parameters \n inplace: bool\n if `True`, make change in-place and return the object\n if `False`, make and return copy before making changes \n \n Note\n ----\n the results of the estimation is stored in members `response`, `response_x` (design matrix) \n and `response_pars`\n\n \"\"\"\n if not self.baseline_estimated:\n print(\"WARNING: no baseline estimated yet, using zero as baseline\")\n \n pred, coef, npar_est, tmax_est, x1=pupil_response(self.tx, self.sy-self.baseline, \n self.event_onsets, self.fs, \n npar=npar, tmax=tmax, verbose=verbose,\n bounds=bounds)\n obj=self if inplace else self.copy()\n obj.response_pars={\"npar\":npar_est,\n \"npar_free\":True if npar==\"free\" else False,\n \"tmax\":tmax_est,\n \"tmax_free\":True if tmax==\"free\" else False,\n \"coef\":coef,\n \"bounds\":bounds\n }\n \n obj.response=pred\n obj.response_x=x1\n obj.response_estimated=True\n return obj\n \n\n @keephistory\n def blinks_detect(self, min_duration:float=20, blink_val:float=0,\n winsize: float=11, vel_onset: float=-5, vel_offset: float=5, \n min_onset_len: int=5, min_offset_len: int=5,\n strategies: List[str]=[\"zero\",\"velocity\"],\n units=\"ms\", inplace=_inplace):\n \"\"\"\n Detect blinks in the pupillary signal using several strategies.\n First, blinks are detected as consecutive sequence of `blink_val` \n (f.eks., 0 or NaN). Second, blinks are defined as everything between\n two crossings of the velocity profile (from negative to positive).\n \n Detected blinks are put into member `blinks` (matrix 2 x nblinks where start and end\n are stored as indexes) and member `blink_mask` which codes for each sampling point\n whether there is a blink (1) or not (0).\n\n Finally, detected blinks have to be at least `min_duration` duration (in `units`).\n \n Parameters\n ----------\n min_duration: float\n minimum duration for a sequence of missing numbers to be treated as blink\n blink_val: float\n \"missing value\" code\n winsize:\n window-size for smoothing for velocity profile (in units)\n vel_onset:\n negative velocity that needs to be crossed; arbitrary units that depend on\n sampling rate etc\n vel_offset:\n positive velocity that needs to be exceeded; arbitrary units that depend on\n sampling rate etc\n min_onset_len: int\n minimum number of consecutive samples that crossed threshold in the velocity\n profile to detect as onset (to avoid noise-induced changes)\n min_offset_len: int\n minimum number of consecutive samples that crossed threshold in the velocity\n profile to detect as offset (to avoid noise-induced changes) \n strategies: list of strategies to use\n so far, use a list containing any combination of \"zero\" and \"velocity\"\n units: str\n one of \"ms\", \"sec\", \"min\", \"h\"\n inplace: bool\n if `True`, make change in-place and return the object\n if `False`, make and return copy before making changes \n \"\"\"\n fac=self._unit_fac(units)\n winsize_ms=winsize*fac\n winsize_ix=int(winsize_ms/1000.*self.fs)\n if winsize_ix % 2==0:\n winsize += 1\n min_duration_ms=min_duration*fac\n min_duration_ix=int(min_duration_ms/1000.*self.fs) \n\n obj=self if inplace else self.copy()\n \n # check for unknown strategies\n for strat in strategies:\n if strat not in [\"zero\", \"velocity\"]:\n print(\"WARN: strategy '%s' unknown\"%strat)\n \n ## detect blinks with the different strategies\n if \"velocity\" in strategies:\n blinks_vel=detect_blinks_velocity(self.sy, winsize_ix, vel_onset, vel_offset, min_onset_len, min_offset_len)\n else: \n blinks_vel=np.array([])\n \n if \"zero\" in strategies:\n blinks_zero=detect_blinks_zero(self.sy, 1, blink_val)\n else:\n blinks_zero=np.array([])\n\n ## merge the two blinks\n blinks=helper_merge_blinks(blinks_vel, blinks_zero)\n obj.blinks=np.array([[on,off] for (on,off) in blinks if off-on>=min_duration_ix])\n \n obj.blink_mask=np.zeros(self.sy.size, dtype=np.int)\n \n for start,end in obj.blinks:\n obj.blink_mask[start:end]=1\n return obj \n \n def blinks_plot(self, pdf_file: Optional[str]=None, nrow: int=5, ncol: int=3, \n figsize: Tuple[int,int]=(10,10), \n pre_blink: float=500, post_blink: float=500, units: str=\"ms\", \n plot_index: bool=True):\n \"\"\"\n Plot the detected blinks into separate figures each with nrow x ncol subplots. \n\n Parameters\n ----------\n pdf_file: str or None\n if the name of a file is given, the figures are saved into a multi-page PDF file\n ncol: int\n number of columns for the blink-plots\n pre_blink: float\n extend plot a certain time before each blink (in ms)\n post_blink: float\n extend plot a certain time after each blink (in ms)\n units: str\n units in which the signal is plotted\n plot_index: bool\n plot a number with the blinks' index (e.g., for identifying abnormal blinks)\n\n Returns\n -------\n\n list of plt.Figure objects each with nrow*ncol subplots\n in Jupyter Notebook, those are displayed inline one after the other\n \"\"\"\n fac=self._unit_fac(units)\n pre_blink_ix=int((pre_blink/1000.)*self.fs)\n post_blink_ix=int((post_blink/1000.)*self.fs)\n\n nblinks=self.blinks.shape[0]\n nsubplots=nrow*ncol # number of subplots per figure\n nfig=int(np.ceil(nblinks/nsubplots))\n\n figs=[]\n if isinstance(pdf_file,str):\n _backend=mpl.get_backend()\n mpl.use(\"pdf\")\n plt.ioff() ## avoid showing plots when saving to PDF \n \n iblink=0\n for i in range(nfig):\n fig=plt.figure(figsize=figsize)\n axs = fig.subplots(nrow, ncol).flatten()\n\n for ix,(start,end) in enumerate(self.blinks[(i*nsubplots):(i+1)*nsubplots]):\n iblink+=1\n slic=slice(start-pre_blink_ix,end+post_blink_ix)\n ax=axs[ix]\n ax.plot(self.tx[slic]*fac,self.sy[slic])\n\n ## highlight interpolated data\n a=np.diff(np.r_[0,self.interpolated_mask[slic],0])[:-1]\n istarts=start-pre_blink_ix+np.where(a>0)[0]\n iends=start-pre_blink_ix+np.where(a<0)[0]\n for istart,iend in zip(istarts,iends):\n ax.axvspan(self.tx[istart]*fac,self.tx[iend]*fac,color=\"green\", alpha=0.1)\n\n ## highlight blink\n ax.axvspan(self.tx[start]*fac,self.tx[end]*fac,color=\"red\", alpha=0.2)\n\n if plot_index: \n ax.text(0.5, 0.5, '%i'%(iblink), fontsize=12, horizontalalignment='center', \n verticalalignment='center', transform=ax.transAxes)\n figs.append(fig)\n\n if pdf_file is not None:\n print(\"> Saving file '%s'\"%pdf_file)\n with PdfPages(pdf_file) as pdf:\n for fig in figs:\n pdf.savefig(fig)\n ## switch back to original backend and interactive mode \n mpl.use(_backend) \n plt.ion()\n \n return figs \n\n @keephistory\n def blinks_merge(self, distance: float=100, remove_signal: bool=False, inplace=_inplace):\n \"\"\"\n Merge together blinks that are close together. \n Some subjects blink repeatedly and standard detection/interpolation can result in weird results.\n This function simply treats repeated blinks as one long blink.\n\n Parameters\n ----------\n\n distance: float\n merge together blinks that are closer together than `distance` in ms\n remove_signal: bool\n if True, set all signal values during the \"new blinks\" to zero so \n that :func:`.detect_blinks()` will pick them up; interpolation will work\n either way\n inplace: bool\n if `True`, make change in-place and return the object\n if `False`, make and return copy before making changes \n \"\"\"\n distance_ix=distance/self.fs*1000.\n\n newblinks=[] \n i=1\n cblink=self.blinks[0,:] ## start with first blink\n while(i<self.nblinks()):\n if (self.blinks[i,0]-cblink[1])<=distance_ix:\n # merge\n cblink[1]=self.blinks[i,1]\n else:\n newblinks.append(cblink)\n cblink=self.blinks[i,:]\n i+=1 \n newblinks.append(cblink)\n newblinks=np.array(newblinks) \n\n obj=self if inplace else self.copy()\n obj.blinks=newblinks\n\n ## set signal to zero within the new blinks\n if remove_signal:\n for start,end in obj.blinks:\n obj.sy[start:end]=0\n\n return obj \n \n @keephistory\n def blinks_interpolate(self, winsize: float=11, \n vel_onset: float=-5, vel_offset: float=5, \n margin: Tuple[float,float]=(10,30), \n interp_type: str=\"cubic\", inplace=_inplace):\n \"\"\"\n Interpolation of missing data \"in one go\".\n Detection of blinks happens using Mahot (2013), see :func:`.blink_onsets_mahot()`.\n \n Parameters\n ----------\n winsize: float\n size of the Hanning-window in ms\n vel_onset: float\n velocity-threshold to detect the onset of the blink\n vel_offset: float\n velocity-threshold to detect the offset of the blink\n margin: Tuple[float,float]\n margin that is subtracted/added to onset and offset (in ms)\n interp_type: str\n type of interpolation accepted by :func:`scipy.interpolate.interp1d()` \n inplace: bool\n if `True`, make change in-place and return the object\n if `False`, make and return copy before making changes \n \"\"\"\n # parameters in sampling units (from ms)\n winsize_ix=int(np.ceil(winsize/1000.*self.fs)) \n margin_ix=tuple(int(np.ceil(m/1000.*self.fs)) for m in margin)\n if winsize_ix % 2==0: ## ensure smoothing window is odd\n winsize_ix+=1 \n\n # generate smoothed signal and velocity-profile\n sym=smooth_window(self.sy, winsize_ix, \"hanning\")\n vel=np.r_[0,np.diff(sym)] \n\n blink_onsets=blink_onsets_mahot(self.sy, self.blinks, winsize_ix, vel_onset, vel_offset,\n margin_ix, int(np.ceil(500/1000*self.fs)))\n obj=self if inplace else self.copy()\n obj.interpolated_mask=np.zeros(self.sy.size)\n for on,off in blink_onsets:\n obj.interpolated_mask[on:off]=1\n f=scipy.interpolate.interp1d(self.tx[obj.interpolated_mask==0], sym[obj.interpolated_mask==0], \n kind=interp_type, bounds_error=False, fill_value=0)\n syr=f(self.tx)\n obj.sy=syr\n \n \n return obj\n \n @keephistory\n def blinks_interp_mahot(self, winsize: float=11, \n vel_onset: float=-5, vel_offset: float=5, \n margin: Tuple[float,float]=(10,30), \n blinkwindow: float=500,\n interp_type: str=\"cubic\",\n plot: Optional[str]=None, \n plot_dim: Tuple[int,int]=(5,3),\n plot_figsize: Tuple[int,int]=(10,8),\n inplace=_inplace):\n \"\"\"\n Implements the blink-interpolation method by Mahot (2013).\n \n Mahot, 2013:\n https://figshare.com/articles/A_simple_way_to_reconstruct_pupil_size_during_eye_blinks/688001.\n\n This procedure relies heavily on eye-balling (reconstructing visually convincing signal),\n so a \"plot\" option is provided that will plot many diagnostics (see paper linked above) that\n can help to set good parameter values for `winsize`, `vel_onset`, `vel_offset` and `margin`.\n\n Parameters\n ----------\n winsize: float\n size of the Hanning-window in ms\n vel_onset: float\n velocity-threshold to detect the onset of the blink\n vel_offset: float\n velocity-threshold to detect the offset of the blink\n margin: Tuple[float,float]\n margin that is subtracted/added to onset and offset (in ms)\n blinkwindow: float\n how much time before and after each blink to include (in ms)\n interp_type: str\n type of interpolation accepted by :func:`scipy.interpolate.interp1d()`\n plot: True, str or None\n if a string, the plot is going to be saved to a multipage PDF file; \n if None, no plotting is done\n if True, plot is not saved but produced\n plot_dim: tuple nrow x ncol \n number of subplots\n plot_figsize: tuple (width, height)\n dimensions for each figure\n inplace: bool\n if `True`, make change in-place and return the object\n if `False`, make and return copy before making changes \n \"\"\"\n # parameters in sampling units (from ms)\n winsize_ix=int(np.ceil(winsize/1000.*self.fs)) \n margin_ix=tuple(int(np.ceil(m/1000.*self.fs)) for m in margin)\n blinkwindow_ix=int(blinkwindow/1000.*self.fs)\n if winsize_ix % 2==0: ## ensure smoothing window is odd\n winsize_ix+=1 \n\n # generate smoothed signal and velocity-profile\n sym=smooth_window(self.sy, winsize_ix, \"hanning\")\n vel=np.r_[0,np.diff(sym)] \n syr=self.sy.copy() ## reconstructed signal\n\n nrow,ncol=plot_dim\n nsubplots=nrow*ncol \n nfig=int(np.ceil(self.nblinks()/nsubplots))\n figs=[]\n if isinstance(plot,str):\n _backend=mpl.get_backend()\n mpl.use(\"pdf\")\n plt.ioff() ## avoid showing plots when saving to PDF \n\n blink_onsets=blink_onsets_mahot(self.sy, self.blinks, winsize_ix, vel_onset, vel_offset,\n margin_ix, blinkwindow_ix)\n \n obj=self if inplace else self.copy() \n # loop through blinks\n for ix,(onset,offset) in enumerate(blink_onsets): \n if plot is not None: \n if ix % nsubplots==0:\n fig,axs=plt.subplots(nrow,ncol,figsize=plot_figsize)\n axs=axs.flatten()\n figs.append(fig)\n\n # calc the 4 time points\n t2,t3=onset,offset\n t1=max(0,t2-t3+t2)\n t4=min(t3-t2+t3, len(self)-1)\n if t1==t2:\n t2+=1\n if t3==t4:\n t3-=1\n \n txpts=[self.tx[pt] for pt in [t1,t2,t3,t4]]\n sypts=[self.sy[pt] for pt in [t1,t2,t3,t4]]\n intfct=interp1d(txpts,sypts, kind=interp_type)\n islic=slice(t2, t3)\n syr[islic]=intfct(self.tx[islic])\n\n ## record the interpolated datapoints\n obj.interpolated_mask[islic]=1\n\n slic=slice(max(0,onset-blinkwindow_ix), min(offset+blinkwindow_ix, len(self)))\n \n ## plotting for diagnostics\n #--------------------------\n if plot is not None: \n #fig,ax1=plt.subplots()\n ax1=axs[ix % nsubplots]\n ax1.plot(self.tx[slic]/1000., self.sy[slic], color=\"blue\", label=\"raw\")\n ax1.plot(self.tx[slic]/1000., sym[slic], color=\"green\", label=\"smoothed\")\n ax1.plot(self.tx[slic]/1000., syr[slic], color=\"red\", label=\"interpolated\")\n ax2=ax1.twinx()\n ax2.plot(self.tx[slic]/1000., vel[slic], color=\"orange\", label=\"velocity\")\n\n for pt in (t1,t2,t3,t4):\n ax1.plot(self.tx[pt]/1000., sym[pt], \"o\", color=\"red\")\n ax1.text(0.5, 0.5, '%i'%(ix+1), fontsize=12, horizontalalignment='center', \n verticalalignment='center', transform=ax1.transAxes)\n if ix % nsubplots==0:\n handles1, labels1 = ax1.get_legend_handles_labels()\n handles2, labels2 = ax2.get_legend_handles_labels()\n handles=handles1+handles2\n labels=labels1+labels2\n fig.legend(handles, labels, loc='upper right') \n if isinstance(plot, str):\n print(\"> Writing PDF file '%s'\"%plot)\n with PdfPages(plot) as pdf:\n for fig in figs:\n pdf.savefig(fig) \n ## switch back to original backend and interactive mode \n mpl.use(_backend) \n plt.ion()\n elif plot is not None:\n for fig in figs:\n pass\n #fig.show()\n\n # replace signal with the reconstructed one\n obj.sy=syr\n\n return obj\n \n def get_erpd(self, erpd_name: str, event_select, \n baseline_win: Optional[Tuple[float,float]]=None, \n time_win: Tuple[float,float]=(-500, 2000)):\n \"\"\"\n Extract event-related pupil dilation (ERPD).\n No attempt is being made to exclude overlaps of the time-windows.\n\n Parameters\n ----------\n erpd_name: str\n identifier for the result (e.g., \"cue-locked\" or \"conflict-trials\")\n\n baseline_win: tuple (float,float) or None\n if None, no baseline-correction is applied\n if tuple, the mean value in the window in milliseconds (relative to `time_win`) is \n subtracted from the single-trial ERPDs (baseline-correction)\n\n event_select: str or function\n variable describing which events to select and align to\n - if str: use all events whose label contains the string\n - if function: apply function to all labels, use those where the function returns True\n\n time_win: Tuple[float, float]\n time before and after event to include (in ms)\n\n\n \"\"\"\n if callable(event_select):\n event_ix=np.array([bool(event_select(evlab)) for evlab in self.event_labels])\n elif isinstance(event_select, str):\n event_ix=np.array([event_select in evlab for evlab in self.event_labels])\n else:\n raise ValueError(\"event_select must be string or function\")\n\n\n nev=event_ix.sum()\n time_win_ix=tuple(( int(np.ceil(tw/1000.*self.fs)) for tw in time_win ))\n duration_ix=time_win_ix[1]-time_win_ix[0]\n txw=np.linspace(time_win[0], time_win[1], num=duration_ix)\n\n ## resulting matrix and missing (interpolated/blinks/...) indicator for each datapoint\n erpd=np.zeros((nev,duration_ix))\n missing=np.ones((nev,duration_ix))\n\n # event-onsets as indices of the tx array\n evon=self.event_onsets[event_ix]\n # vectorized version (seems to be worse than naive one)\n #evon_ix=np.argmin(np.abs(np.tile(evon, (self.tx.size,1)).T-self.tx), axis=1)\n # naive version\n evon_ix=np.array([np.argmin(np.abs(ev-self.tx)) for ev in evon])\n\n for i,ev in enumerate(evon_ix):\n on,off=ev+time_win_ix[0], ev+time_win_ix[1]\n onl,offl=0,duration_ix # \"local\" window indices\n if on<0: ## pad with zeros in case timewindow starts before data\n onl=np.abs(on)\n on=0\n if off>=self.tx.size:\n offl=offl-(off-self.tx.size)\n off=self.tx.size\n\n erpd[i,onl:offl]=self.sy[on:off]\n missing[i,onl:offl]=np.logical_or(self.interpolated_mask[on:off], self.missing[on:off])\n\n baselines=[None for _ in range(nev)]\n if baseline_win is not None:\n if baseline_win[0]<time_win[0] or baseline_win[0]>time_win[1] or baseline_win[1]<time_win[0] or baseline_win[1]>time_win[1]:\n print(\"WARNING: baseline-window misspecified %s vs. %s; NOT doing baseline correction\"%(baseline_win, time_win))\n else:\n blwin_ix=tuple(( np.argmin(np.abs(bw-txw)) for bw in baseline_win ))\n\n for i in range(nev):\n baselines[i]=np.mean(erpd[i,blwin_ix[0]:blwin_ix[1]])\n erpd[i,:]-=baselines[i]\n\n return ERPD(erpd_name, txw, erpd, missing, baselines)\n \n\n \n#@typechecked \nclass FakePupilData(PupilData):\n \"\"\"\n Simulated pupil data for validation purposes.\n \"\"\"\n def __init__(self,\n pupil: PupilArray, \n sampling_rate: Optional[float]=None,\n time: Optional[PupilArray]=None,\n event_onsets: Optional[PupilArray]=None,\n event_labels: Optional[PupilArray]=None,\n name: Optional[str]=None,\n sim_params: dict={},\n real_baseline: Optional[PupilArray]=None,\n real_response_coef: Optional[PupilArray]=None):\n \"\"\"\n Constructor for artifical pupil data.\n \"\"\"\n super().__init__(pupil,sampling_rate,time,event_onsets,event_labels,name)\n self.name=\"fake_\"+self.name\n self.sim_params=sim_params\n self.sim_baseline=real_baseline\n \n ## OBS: not the real model but a simplification (npar/tmax may be different per event)\n x1=pupil_build_design_matrix(self.tx, self.event_onsets, self.fs, \n sim_params[\"prf_npar\"][0], sim_params[\"prf_tmax\"][0], 6000)\n amp=np.mean(real_baseline)*sim_params[\"evoked_response_perc\"]\n real_response=amp*np.dot(x1.T, real_response_coef) ## predicted signal\n \n self.sim_response=real_response\n self.sim_response_coef=real_response_coef\n\n @keephistory \n def unscale(self, mean: Optional[float]=None, sd: Optional[float]=None, inplace=_inplace):\n \"\"\"\n Scale back to original values using either values provided as arguments\n or the values stored in `scale_params`.\n \n Parameters\n ----------\n mean: mean to add from signal\n sd: sd to scale with \n inplace: bool\n if `True`, make change in-place and return the object\n if `False`, make and return copy before making changes \n \"\"\"\n mmean,ssd=self.scale_params[\"mean\"],self.scale_params[\"sd\"]\n obj=super().unscale(mean,sd,inplace)\n obj.sim_baseline=(self.sim_baseline*ssd)+mmean\n obj.sim_response=(self.sim_response*ssd)\n return obj\n \n @keephistory\n def scale(self, mean: Optional[float]=None, sd: Optional[float]=None, inplace=_inplace) -> None:\n \"\"\"\n Scale the pupillary signal by subtracting `mean` and dividing by `sd`.\n If these variables are not provided, use the signal's mean and std.\n \n Parameters\n ----------\n \n mean: mean to subtract from signal\n sd: sd to scale with\n inplace: bool\n if `True`, make change in-place and return the object\n if `False`, make and return copy before making changes \n \n Note\n ----\n Scaling-parameters are being saved in the `scale_params` argument. \n \"\"\"\n obj=super().scale(mean,sd)\n mean,sd=obj.scale_params[\"mean\"],obj.scale_params[\"sd\"]\n obj.sim_baseline=(self.sim_baseline-mean)/sd\n obj.sim_response=(self.sim_response)/sd\n return obj\n\n @keephistory\n def sub_slice(self, start: float=-np.inf, end: float=np.inf, units: str=\"sec\"):\n \"\"\"\n Return a new `PupilData` object that is a shortened version\n of the current one (contains all data between `start` and\n `end` in units given by `units` (one of \"ms\", \"sec\", \"min\", \"h\").\n \"\"\"\n slic=super().sub_slice(start,end,units)\n evon=self.event_onsets*self._unit_fac(units)\n keepev=np.logical_and(evon>=start, evon<=end)\n slic.sim_response_coef=slic.sim_response_coef[keepev]\n return slic\n \n \n def plot(self,\n plot_range: Tuple[float,float]=(-np.infty, +np.infty), \n interactive: bool=False, \n baseline: bool=True, \n response: bool=False,\n model: bool=True,\n simulated: bool=True,\n units: str=\"sec\"\n ) -> None:\n \"\"\"\n Make a plot of the pupil data using `matplotlib` or :py:func:`pypillometry.convenience.plot_pupil_ipy()`\n if `interactive=True`.\n\n Parameters\n ----------\n plot_range: tuple (start,end): plot from start to end (in units of `units`)\n baseline: plot baseline if estimated\n response: plot response if estimated\n model: plot full model if baseline and response have been estimated\n simulated: plot also the \"ground-truth\" baseline and response (i.e., the simulated one)?\n interactive: if True, plot with sliders to adjust range\n units: one of \"sec\"=seconds, \"ms\"=millisec, \"min\"=minutes, \"h\"=hours\n \"\"\"\n overlays=tuple()\n overlay_labels=tuple()\n if baseline and self.baseline_estimated:\n overlays+=(self.baseline,)\n overlay_labels+=(\"baseline\",)\n if baseline and simulated:\n overlays+=(self.sim_baseline,)\n overlay_labels+=(\"sim_baseline\",)\n if response and self.response_estimated:\n overlays+=(self.response,)\n overlay_labels+=(\"response\",)\n if response and simulated:\n overlays+=(self.sim_response,)\n overlay_labels+=(\"sim_response\",)\n if model and self.baseline_estimated and self.response_estimated:\n overlays+=(self.baseline+self.response,)\n overlay_labels+=(\"model\",)\n if model and simulated:\n overlays+=(self.sim_baseline+self.sim_response,)\n overlay_labels+=(\"real model\",)\n self._plot(plot_range, overlays, overlay_labels, units, interactive, False, False)\n\n \n \ndef plotpd_ia(*args: PupilData, figsize: Tuple[int]=(16,8), baseline: bool=True, events: Optional[int]=0):\n \"\"\"\n Interactive plotting for multiple `PupilData` objects.\n \n Parameters\n ----------\n args: `PupilData` datasets to plot\n figsize: dimensions of the plot\n baseline: plot baselines, too?\n events: plot event-markers? if None, no events are plotted, otherwise `events` \n is the index of the `PupilData` object to take the events from\n \"\"\"\n\n import pylab as plt\n from ipywidgets import interact, interactive, fixed, interact_manual, Layout\n import ipywidgets as widgets\n\n def draw_plot(plotxrange):\n xmin,xmax=plotxrange\n plt.figure(figsize=figsize)\n\n for i,pd in enumerate(args):\n ixmin=np.argmin(np.abs(pd.tx-xmin))\n ixmax=np.argmin(np.abs(pd.tx-xmax))\n\n\n plt.plot(pd.tx[ixmin:ixmax],pd.sy[ixmin:ixmax],label=pd.name)\n if baseline and pd.baseline_estimated:\n plt.plot(pd.tx[ixmin:ixmax], pd.baseline[ixmin:ixmax], label=\"BL: \"+pd.name)\n \n if not events is None: \n plt.vlines(args[events].event_onsets, *plt.ylim(), color=\"grey\", alpha=0.5)\n plt.xlim(xmin,xmax)\n plt.legend()\n\n xmin=np.min([pd.tx.min() for pd in args])\n xmax=np.max([pd.tx.max() for pd in args])\n wid_range=widgets.FloatRangeSlider(\n value=[xmin,xmax],\n min=xmin,\n max=xmax,\n step=1,\n description=' ',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.1f',\n layout=Layout(width='100%', height='80px')\n )\n\n interact(draw_plot, plotxrange=wid_range)\n \n \n\n \ndef plotpd(*args: PupilData, subplots: bool=False, baseline: bool=False):\n \"\"\"\n Plotting for `PupilData` objects.\n \n Parameters\n ----------\n \n subplots: plot the different `PupilData`-objects in the same plot or subplots\n \"\"\"\n if len(args)<3:\n ncol=len(args)\n nrow=1\n else:\n ncol=3\n nrow=np.ceil(len(args)/3.0)\n for i,pd in enumerate(args):\n if subplots:\n plt.subplot(nrow,ncol,i+1)\n plt.title(pd.name)\n plt.plot(pd.tx/1000./60., pd.sy, label=pd.name)\n if baseline and pd.baseline_estimated:\n plt.plot(pd.tx/1000./60., pd.baseline, label=\"BL: \"+pd.name)\n if i==0:\n plt.xlabel(\"time (min)\")\n plt.ylabel(\"PD\")\n if not subplots:\n plt.legend()\n \n \ndef create_fake_pupildata(**kwargs):\n \"\"\"\n Return a :py:class:`pyillometry.pupildata.FakePupilData` object by buildling it with\n :py:func:`pypillometry.fakedata.get_dataset()`.\n \n Parameters\n -----------\n \n ntrials:int\n number of trials\n isi: float\n inter-stimulus interval in seconds\n rtdist: tuple (float,float)\n mean and std of a (truncated at zero) normal distribution to generate response times\n pad: float\n padding before the first and after the last event in seconds \n fs: float\n sampling rate in Hz\n baseline_lowpass: float\n cutoff for the lowpass-filter that defines the baseline\n (highest allowed frequency in the baseline fluctuations) \n evoked_response_perc: float\n amplitude of the pupil-response as proportion of the baseline \n response_fluct_sd: float\n How much do the amplitudes of the individual events fluctuate?\n This is determined by drawing each individual pupil-response to \n a single event from a (positive) normal distribution with mean as determined\n by `evoked_response_perc` and sd `response_fluct_sd` (in units of \n `evoked_response_perc`).\n prf_npar: tuple (float,float)\n (mean,std) of the npar parameter from :py:func:`pypillometry.pupil.pupil_kernel()`. \n If the std is exactly zero, then the mean is used for all pupil-responses.\n If the std is positive, npar is taken i.i.d. from ~ normal(mean,std) for each event.\n prf_tmax: tuple (float,float)\n (mean,std) of the tmax parameter from :py:func:`pypillometry.pupil.pupil_kernel()`. \n If the std is exactly zero, then the mean is used for all pupil-responses.\n If the std is positive, tmax is taken i.i.d. from ~ normal(mean,std) for each event.\n prop_spurious_events: float\n Add random events to the pupil signal. `prop_spurious_events` is expressed\n as proportion of the number of real events. \n noise_amp: float\n Amplitude of random gaussian noise that sits on top of the simulated signal.\n Expressed in units of mean baseline pupil diameter.\n \"\"\"\n sim_params={\n \"ntrials\":100,\n \"isi\":1000.0,\n \"rtdist\":(1000.0,500.0),\n \"pad\":5000.0,\n \"fs\":1000.0,\n \"baseline_lowpass\":0.1,\n \"evoked_response_perc\":0.001,\n \"response_fluct_sd\":1,\n \"prf_npar\":(10.35,0),\n \"prf_tmax\":(917.0,0),\n \"prop_spurious_events\":0.1,\n \"noise_amp\":0.0001\n }\n sim_params.update(kwargs)\n #print(sim_params)\n tx,sy,baseline,event_onsets,response_coef=get_dataset(**sim_params)\n event_labels=[\"event\" for _ in range(event_onsets.size)]\n ds=FakePupilData(sy,sim_params[\"fs\"],tx, event_onsets,event_labels=event_labels,\n sim_params=sim_params, \n real_baseline=baseline, real_response_coef=response_coef)\n return ds\n \n ", "id": "11570300", "language": "Python", "matching_score": 7.645806312561035, "max_stars_count": 13, "path": "pypillometry/pupildata.py" }, { "content": "\"\"\"\nerpd.py\n============\n\nEvent-related pupil dilation.\n\n\"\"\"\nfrom .io import *\n\nimport pylab as plt\nimport matplotlib as mpl\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport numpy as np\nimport scipy.signal as signal\nfrom scipy.interpolate import interp1d\nfrom scipy import interpolate\nimport scipy\n\n\nfrom typing import Sequence, Union, List, TypeVar, Optional, Tuple, Callable\nPupilArray=Union[np.ndarray, List[float]]\nimport collections.abc\n\n\nclass ERPD:\n \"\"\"\n Class representing a event-related pupillary dilation (ERPD) for one subject.\n \"\"\"\n def __init__(self, name, tx, erpd, missing, baselines):\n self.name=name\n self.baselines=baselines\n self.tx=tx\n self.erpd=erpd\n self.missing=missing\n\n def summary(self) -> dict:\n \"\"\"Return a summary of the :class:`.PupilData`-object.\"\"\"\n summary=dict(\n name=self.name,\n nevents=self.erpd.shape[0],\n n=self.erpd.shape[1],\n window=(self.tx.min(), self.tx.max())\n )\n return summary\n\n def write_file(self, fname:str):\n \"\"\"\n Save to file (using :mod:`pickle`).\n \n Parameters\n ----------\n \n fname: str\n filename\n \"\"\"\n pd_write_pickle(self, fname)\n \n @classmethod\n def from_file(cls, fname:str):\n \"\"\"\n Reads a :class:`.ERPD` object from a pickle-file.\n Use as ``pypillometry.ERPD.from_file(\"yourfile.pd\")``.\n \n Parameters\n ----------\n \n fname: str\n filename\n \"\"\"\n r=pd_read_pickle(fname)\n return r\n \n def __repr__(self) -> str:\n \"\"\"Return a string-representation of the dataset.\"\"\"\n pars=self.summary()\n del pars[\"name\"]\n s=\"ERPD({name}):\\n\".format(name=self.name)\n flen=max([len(k) for k in pars.keys()])\n for k,v in pars.items():\n s+=(\" {k:<\"+str(flen)+\"}: {v}\\n\").format(k=k,v=v)\n return s\n \n def plot(self, overlays=None, meanfct=np.mean, varfct=scipy.stats.sem, plot_missing: bool=True): \n \"\"\"\n Plot mean and error-ribbons using `varct`.\n \n Parameters\n ----------\n \n overlays: single or sequence of :class:`.ERPDSingleSubject`-objects \n the overlays will be added to the same plot\n \n meanfct: callable\n mean-function to apply to the single-trial ERPDs for plotting\n varfct: callable or None\n function to calculate error-bands (e.g., :func:`numpy.std` for standard-deviation \n or :func:`scipy.stats.sem` for standard-error)\n if None, no error bands are plotted\n \n plot_missing: bool\n plot percentage interpolated/missing data per time-point?\n \"\"\"\n merpd=meanfct(self.erpd, axis=0)\n sderpd=varfct(self.erpd, axis=0) if callable(varfct) else None\n percmiss=np.mean(self.missing, axis=0)*100.\n ax1=plt.gca() \n if sderpd is not None:\n ax1.fill_between(self.tx, merpd-sderpd, merpd+sderpd, color=\"grey\", alpha=0.3)\n ax1.plot(self.tx, merpd, label=self.name) \n ax1.axvline(x=0, color=\"red\") \n ax1.set_ylabel(\"mean PD\")\n ax1.set_xlabel(\"time (ms)\")\n ax1.set_title(self.name)\n if plot_missing:\n ax2=ax1.twinx()\n ax2.plot(self.tx, percmiss, alpha=0.3)\n ax2.set_ylim(0,100)\n ax2.set_ylabel(\"% missing\")\n \n if overlays is not None:\n if not isinstance(overlays, collections.abc.Sequence):\n overlays=[overlays]\n for ov in overlays:\n merpd=meanfct(ov.erpd, axis=0)\n sderpd=varfct(ov.erpd, axis=0) if callable(varfct) else None\n percmiss=np.mean(ov.missing, axis=0)*100.\n if sderpd is not None:\n ax1.fill_between(self.tx, merpd-sderpd, merpd+sderpd, color=\"grey\", alpha=0.3)\n ax1.plot(self.tx, merpd, label=ov.name) \n if plot_missing:\n ax2.plot(ov.tx, percmiss, alpha=0.3)\n ax1.legend()\n \n\ndef plot_erpds(erpds):\n \"\"\"\n Plot a list of ERPD objects.\n \"\"\"\n erpds[0].plot(erpds[1:len(erpds)])\n \n\ndef group_erpd(datasets: List, erpd_name: str, event_select, \n baseline_win: Optional[Tuple[float,float]]=None, \n time_win: Tuple[float,float]=(-500, 2000),\n subj_meanfct=np.mean):\n \"\"\"\n Calculate group-level ERPDs by applying `subj_meanfct` to each subj-level ERPD.\n \n\n Parameters\n ----------\n datasets: list of PupilData objects\n one PupilData object for each subject that should go into the group-level ERPD.\n \n erpd_name: str\n identifier for the result (e.g., \"cue-locked\" or \"conflict-trials\")\n\n baseline_win: tuple (float,float) or None\n if None, no baseline-correction is applied\n if tuple, the mean value in the window in milliseconds (relative to `time_win`) is \n subtracted from the single-trial ERPDs (baseline-correction)\n\n event_select: str or function\n variable describing which events to select and align to\n - if str: use all events whose label contains the string\n - if function: apply function to all labels, use those where the function returns True\n\n time_win: Tuple[float, float]\n time before and after event to include (in ms)\n \n subj_meanfct: fct\n function to summarize each individual ERPD\n \n Returns\n -------\n\n an :py:class:`ERPD` object for the group\n \"\"\" \n erpds=[d.get_erpd(erpd_name, event_select, baseline_win, time_win) for d in datasets]\n merpd=np.array([subj_meanfct(e.erpd, axis=0) for e in erpds])\n mmissing=np.array([subj_meanfct(e.missing, axis=0) for e in erpds])\n tx=erpds[0].tx\n erpd=ERPD(erpd_name, tx, merpd, mmissing, None) \n return erpd\n", "id": "5943671", "language": "Python", "matching_score": 3.192922592163086, "max_stars_count": 13, "path": "pypillometry/erpd.py" }, { "content": "\"\"\"\nio.py\n=====\n\nRead/Write data from/to disk.\n\"\"\"\n\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport requests\nfrom .pupildata import *\n\ndef pd_write_pickle(pdobj, fname):\n \"\"\"\n Store the :class:`.PupilData`-object `pdobj` in file using :mod:`pickle`.\n \n Parameters\n ----------\n \n pdobj: :class:`.PupilData`\n dataset to save\n fname: str\n filename to save to\n \"\"\"\n with open(fname, \"wb\") as f:\n pickle.dump(pdobj,f)\n \ndef pd_read_pickle(fname):\n \"\"\"\n Read the :class:`.PupilData`-object `pdobj` from file using :mod:`pickle`.\n \n Parameters\n ----------\n \n fname: str\n filename or URL to load data from\n \n Returns\n -------\n \n pdobj: :class:`.PupilData`\n loaded dataset \n \"\"\"\n if fname.startswith(\"http\"):\n # try loading from URL\n res=requests.get(fname)\n if res.status_code==200:\n pdobj=pickle.loads(res.content)\n else:\n with open(fname, 'rb') as f:\n pdobj=pickle.load(f)\n return pdobj\n", "id": "7830146", "language": "Python", "matching_score": 1.023502230644226, "max_stars_count": 13, "path": "pypillometry/io.py" }, { "content": "import unittest\nimport tempfile\nimport os, pickle, hashlib\nimport sys\n#sys.path.insert(0,\"..\")\n#import pypillometry as pp\nfrom .. import *\n\nclass TestIO(unittest.TestCase):\n def test_pd_read_pickle_file(self):\n d=pd_read_pickle(\"data/test.pd\")\n self.assertEqual(d.fs, 500.0)\n self.assertEqual(len(d), 60001)\n def test_pd_read_pickle_http(self):\n d=pd_read_pickle(\"https://github.com/ihrke/pypillometry/blob/master/data/test.pd?raw=true\")\n self.assertEqual(d.fs, 500.0)\n self.assertEqual(len(d), 60001)\n \n def test_pd_write_pickle(self):\n d=pd_read_pickle(\"data/test.pd\")#create_fake_pupildata(ntrials=10)\n fpath=tempfile.mkdtemp()\n fname=os.path.join(fpath, \"test2.pd\")\n pd_write_pickle(d, fname)\n x=pd_read_pickle(fname)\n self.assertEqual(x.size_bytes(), d.size_bytes())\n self.assertEqual(x.name, d.name)\n dmd5=hashlib.md5(pickle.dumps(d,-1)).hexdigest()\n xmd5=hashlib.md5(pickle.dumps(x,-1)).hexdigest()\n self.assertEqual(dmd5,xmd5)\n\nif __name__ == '__main__':\n unittest.main()", "id": "1143105", "language": "Python", "matching_score": 1.67917001247406, "max_stars_count": 13, "path": "pypillometry/tests/test_io.py" }, { "content": "import unittest\nimport sys\nimport numpy as np\n#sys.path.insert(0,\"..\")\n#import pypillometry as pp\nfrom .. import *\n\nclass TestPupilData(unittest.TestCase):\n def setUp(self):\n self.dfake=create_fake_pupildata(ntrials=100, fs=500)\n self.d=PupilData.from_file(\"data/test.pd\")\n def test_from_file(self):\n d=PupilData.from_file(\"data/test.pd\")\n self.assertEqual(d.__class__, PupilData)\n def test_create_fakedata(self):\n d=create_fake_pupildata(ntrials=100)\n self.assertEqual(d.__class__, FakePupilData)\n def test_history(self):\n d=create_fake_pupildata(ntrials=100)\n self.assertEqual(len(d.history), 0)\n d=d.drop_original()\n self.assertEqual(len(d.history), 1)\n def test_drop_original(self):\n d2=self.dfake.drop_original()\n self.assertIsNone(d2.original)\n self.assertLess(d2.size_bytes(), self.dfake.size_bytes())\n def test_reset_time(self):\n d=self.dfake.reset_time(t0=500)\n self.assertEqual(d.tx[0], 500)\n d=d.reset_time()\n self.assertEqual(d.tx[0], 0)\n def test_summary(self):\n self.dfake.summary()\n self.d.summary()\n def test_len(self):\n self.assertEqual(self.d.sy.size, len(self.d))\n def test_nevents(self):\n self.d.nevents()\n def test_nblinks(self):\n self.assertEqual(self.d.nblinks(), 0)\n d=self.d.blinks_detect()\n self.assertGreater(d.nblinks(), 0)\n def test_get_duration(self):\n d1=self.d.get_duration(units=\"ms\")\n d2=self.d.get_duration(units=\"sec\")\n d3=self.d.get_duration(units=\"min\")\n d4=self.d.get_duration(units=\"h\")\n self.assertLess(d2,d1)\n self.assertLess(d3,d2)\n self.assertLess(d4,d3)\n self.assertAlmostEqual(d2, d1/1000.)\n self.assertAlmostEqual(d3, d2/60.)\n self.assertAlmostEqual(d4, d3/60.)\n def test_sub_slice(self):\n d=self.dfake.sub_slice(1, 2, units=\"min\")\n diff=d.tx[1]-d.tx[0]\n self.assertLess(abs(d.tx[0]-1*60*1000.), diff)\n self.assertLess(abs(d.get_duration(units=\"min\")-1), diff)\n def test_scale(self):\n d=self.dfake.scale()\n self.assertAlmostEqual(np.mean(d.sy), 0)\n self.assertAlmostEqual(np.std(d.sy), 1)\n d2=d.unscale()\n self.assertAlmostEqual(np.mean(d2.sy), np.mean(self.dfake.sy))\n self.assertAlmostEqual(np.std(d2.sy), np.std(self.dfake.sy))\n def test_lowpass_filter(self):\n d=self.dfake.lowpass_filter(2)\n def test_smooth_window(self):\n d=self.dfake.smooth_window()\n d2=d.smooth_window(window=\"bartlett\")\n d3=d.smooth_window(winsize=20)\n def test_downsample(self):\n d=self.dfake.downsample(100)\n self.assertEqual(d.fs, 100)\n with self.assertRaises(ZeroDivisionError):\n d.downsample(101)\n d2=self.dfake.downsample(5, dsfac=True)\n self.assertAlmostEqual(d.fs, d2.fs)\n def test_copy(self):\n d=self.dfake.copy()\n d.sy[0]=self.dfake.sy[0]+1.0\n self.assertNotEqual(d.sy[0], self.dfake.sy[0])\n def test_estimate_baseline(self):\n d=self.dfake.estimate_baseline()\n self.assertLess(np.sum(d.sy<d.baseline), 0.1*len(d))\n def test_stat_per_event(self):\n a1=self.dfake.stat_per_event([-100,0], return_missing=None)\n a2=self.dfake.stat_per_event([-100,0], return_missing=\"nmiss\")\n a3=self.dfake.stat_per_event([-100,0], return_missing=\"prop\")\n self.assertEqual(a1.__class__, np.ndarray)\n self.assertEqual(a2.__class__, tuple)\n self.assertEqual(a3.__class__, tuple)\n def test_estimate_response(self):\n d=self.dfake.estimate_response()\n def test_blinks_detect(self):\n d=self.d.blinks_detect()\n def test_blinks_merge(self):\n d=self.d.blinks_detect()\n d2=d.blinks_merge()\n def test_blinks_merge(self):\n d=self.d.blinks_detect().blinks_merge().blinks_interpolate()\n def test_blinks_merge(self):\n d=self.d.blinks_detect().blinks_merge().blinks_interp_mahot()\n def test_get_erpd(self):\n a=self.d.get_erpd(\"test\", lambda x: True)\n self.assertEqual(a.__class__, ERPD)\n \n \n \n \n \n \n \n\nif __name__ == '__main__':\n unittest.main()", "id": "82305", "language": "Python", "matching_score": 3.1244211196899414, "max_stars_count": 13, "path": "pypillometry/tests/test_pdata.py" }, { "content": "import unittest\nimport sys\n#sys.path.insert(0,\"..\")\n#import pypillometry as pp\nfrom .. import *\n\nclass TestPupil(unittest.TestCase):\n def setUp(self):\n self.d=create_fake_pupildata(ntrials=100)\n def test_pupil_kernel_t(self):\n pupil_kernel_t([1,2], 10, 900)\n\nif __name__ == '__main__':\n unittest.main()", "id": "9008745", "language": "Python", "matching_score": 0.32304421067237854, "max_stars_count": 13, "path": "pypillometry/tests/test_pupil.py" }, { "content": "\"\"\"\nPypillometry\n============\n\nThis is a python-package to help with processing of pupillometric data.\n\"\"\"\nfrom .baseline import *\nfrom .convenience import *\nfrom .fakedata import *\nfrom .pupil import *\nfrom .pupildata import *\nfrom .erpd import *\n\nimport os.path\n__package_path__ = os.path.abspath(os.path.dirname(__file__))\n\n", "id": "12663625", "language": "Python", "matching_score": 0.4383642375469208, "max_stars_count": 13, "path": "pypillometry/__init__.py" }, { "content": "\"\"\"\nfakedata.py\n====================================\nGenerate artificial pupil-data.\n\"\"\"\nimport numpy as np\nimport scipy.stats as stats\n\nfrom .baseline import *\nfrom .pupil import *\n\ndef generate_pupil_data(event_onsets, fs=1000, pad=5000, baseline_lowpass=0.2, \n evoked_response_perc=0.02, response_fluct_sd=1,\n prf_npar=(10.35,0), prf_tmax=(917.0,0),\n prop_spurious_events=0.2, noise_amp=0.0005):\n \"\"\"\n Generate artificial pupil data as a sum of slow baseline-fluctuations\n on which event-evoked responses are \"riding\". \n \n Parameters\n -----------\n \n event_onsets: list\n list of all events that evoke a response (in seconds)\n \n fs: float\n sampling rate in Hz\n pad: float\n append `pad` milliseconds of signal after the last event is decayed \n baseline_lowpass: float\n cutoff for the lowpass-filter that defines the baseline\n (highest allowed frequency in the baseline fluctuations)\n \n evoked_response_perc: float\n amplitude of the pupil-response as proportion of the baseline \n \n response_fluct_sd: float\n How much do the amplitudes of the individual events fluctuate?\n This is determined by drawing each individual pupil-response to \n a single event from a (positive) normal distribution with mean as determined\n by `evoked_response_perc` and sd `response_fluct_sd` (in units of \n `evoked_response_perc`).\n prf_npar: tuple (float,float)\n (mean,std) of the npar parameter from :py:func:`pypillometry.pupil.pupil_kernel()`. \n If the std is exactly zero, then the mean is used for all pupil-responses.\n If the std is positive, npar is taken i.i.d. from ~ normal(mean,std) for each event.\n prf_tmax: tuple (float,float)\n (mean,std) of the tmax parameter from :py:func:`pypillometry.pupil.pupil_kernel()`. \n If the std is exactly zero, then the mean is used for all pupil-responses.\n If the std is positive, tmax is taken i.i.d. from ~ normal(mean,std) for each event.\n prop_spurious_events: float\n Add random events to the pupil signal. `prop_spurious_events` is expressed\n as proportion of the number of real events. \n \n noise_amp: float\n Amplitude of random gaussian noise that sits on top of the simulated signal.\n Expressed in units of mean baseline pupil diameter.\n \n \n Returns\n --------\n \n tx, sy: np.array\n time and simulated pupil-dilation (n)\n x0: np.array\n baseline (n)\n delta_weights: np.array\n pupil-response strengths (len(event_onsets))\n \"\"\"\n nevents=len(event_onsets)\n ## npar\n if prf_npar[1]==0: # deterministic parameter\n npars=np.ones(nevents)*prf_npar[0]\n else:\n npars=np.random.randn(nevents)*prf_npar[1]+prf_npar[0]\n\n ## tmax\n if prf_tmax[1]==0: # deterministic parameter\n tmaxs=np.ones(nevents)*prf_tmax[0]\n else:\n tmaxs=np.random.randn(nevents)*prf_tmax[1]+prf_tmax[0]\n\n if np.any(npars<=0):\n raise ValueError(\"npar must be >0\")\n if np.any(tmaxs<=0):\n raise ValueError(\"tmax must be >0\")\n\n # get maximum duration of one of the PRFs\n maxdur=pupil_get_max_duration(npars.min(), tmaxs.max())\n\n T=np.array(event_onsets).max()+maxdur+pad # stop pad millisec after last event\n n=int(np.ceil(T/1000.*fs)) # number of sampling points\n sy=np.zeros(n) # pupil diameter \n tx=np.linspace(0,T,n) # time-vector in milliseconds\n\n # create baseline-signal \n slack=int(0.50*n) # add slack to avoid edge effects of the filter\n x0=butter_lowpass_filter(np.random.rand(n+slack), baseline_lowpass, fs, 2)[slack:(n+slack)]\n x0=x0*1000+5000 # scale it up to a scale as usually obtained from eyetracker\n\n\n ### real events regressor\n ## scaling\n event_ix=(np.array(event_onsets)/1000.*fs).astype(np.int)\n #a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std\n delta_weights=stats.truncnorm.rvs(-1/response_fluct_sd,np.inf, loc=1, scale=response_fluct_sd, size=event_ix.size)\n x1=np.zeros_like(sy)\n\n for i,ev in enumerate(event_onsets):\n # create kernel and delta-functions for events\n kernel=pupil_kernel(duration=maxdur,fs=fs,npar=npars[i], tmax=tmaxs[i])\n x1[event_ix[i]:(event_ix[i]+kernel.size)]=x1[event_ix[i]:(event_ix[i]+kernel.size)]+kernel*delta_weights[i]\n\n ## spurious events regressor\n\n sp_event_ix=np.random.randint(low=0,high=np.ceil((T-maxdur-pad)/1000.*fs),size=int( nevents*prop_spurious_events ))\n sp_events=tx[ sp_event_ix ]\n n_sp_events=sp_events.size\n\n ## npar\n if prf_npar[1]==0: # deterministic parameter\n npars=np.ones(n_sp_events)*prf_npar[0]\n else:\n npars=np.random.randn(n_sp_events)*prf_npar[1]+prf_npar[0]\n\n ## tmax\n if prf_tmax[1]==0: # deterministic parameter\n tmaxs=np.ones(n_sp_events)*prf_tmax[0]\n else:\n tmaxs=np.random.randn(n_sp_events)*prf_tmax[1]+prf_tmax[0]\n\n\n ## scaling\n sp_delta_weights=stats.truncnorm.rvs(-1/response_fluct_sd,np.inf, loc=1, scale=response_fluct_sd, size=sp_event_ix.size)\n x2=np.zeros_like(sy)\n\n for i,ev in enumerate(sp_events):\n # create kernel and delta-functions for events\n kernel=pupil_kernel(duration=maxdur,fs=fs,npar=npars[i], tmax=tmaxs[i])\n x2[sp_event_ix[i]:(sp_event_ix[i]+kernel.size)]=x2[sp_event_ix[i]:(sp_event_ix[i]+kernel.size)]+kernel*sp_delta_weights[i]\n\n amp=np.mean(x0)*evoked_response_perc # mean amplitude for the evoked response\n noise=noise_amp*np.mean(x0)*np.random.randn(n)\n\n sy = x0 + amp*x1 + amp*x2 + noise\n\n return (tx,sy,x0,delta_weights)\n\n\ndef get_dataset(ntrials=100, isi=2000, rtdist=(1000,500),fs=1000,pad=5000, **kwargs):\n \"\"\"\n Convenience function to run :py:func:`generate_pupil_data()` with standard parameters.\n Parameters\n -----------\n \n ntrials:int\n number of trials\n isi: float\n inter-stimulus interval in milliseconds\n rtdist: tuple (float,float)\n mean and std of a (truncated at zero) normal distribution to generate response times\n fs: float\n sampling rate\n pad: float\n padding before the first and after the last event in seconds\n kwargs: dict\n arguments for :py:func:`pypillometry.fakedata.generate_pupil_data()`\n\n \n Returns\n --------\n \n tx, sy: np.array\n time and simulated pupil-dilation (n)\n baseline: np.array\n baseline (n)\n event_onsets: np.array\n timing of the simulated event-onsets (stimuli and responses not separated)\n response_coef: np.array\n pupil-response strengths (len(event_onsets))\n \"\"\"\n stim_onsets=np.arange(ntrials)*isi+pad\n rts=stats.truncnorm.rvs( (0-rtdist[0])/rtdist[1], np.inf, loc=rtdist[0], scale=rtdist[1], size=ntrials)\n resp_onsets=stim_onsets+rts\n event_onsets=np.concatenate( (stim_onsets, resp_onsets) )\n\n kwargs.update({\"fs\":fs})\n tx,sy,baseline,response_coef=generate_pupil_data(event_onsets, **kwargs)\n return tx,sy,baseline,event_onsets, response_coef\n", "id": "47082", "language": "Python", "matching_score": 3.462527275085449, "max_stars_count": 13, "path": "pypillometry/fakedata.py" }, { "content": "\"\"\"\npupil.py\n========\n\nfunctions related to pupillary responses.\n\"\"\"\nimport numpy as np\nimport scipy.optimize\nimport pylab as plt\nfrom .convenience import *\n\ndef pupil_kernel_t(t,npar,tmax):\n \"\"\"\n According to Hoeks and Levelt (1993, \n https://link.springer.com/content/pdf/10.3758%2FBF03204445.pdf), \n the PRF can be described by the so-called Erlang gamma function\n $$h_{HL}(t) = t^n e^{-nt/t_{max}}$$\n which we normalize to\n $$h(t)=\\\\frac{1}{h_{max}} h_{HL}(t)$$\n where $$h_{max} = \\max_t{\\\\left(h_{HL}(t)\\\\right)} = e^{-n}t_{max}^{n}$$\n which yields a maximum value of 1 for this function. \n The function $h(t)$ is implemented in :py:func:`pupil_kernel()`.\n \n This version of the function evaluates the PRF at inputs `t`.\n \n Parameters\n -----------\n t: float/np.array\n in ms\n npar: float\n n in the equation above\n tmax: float\n t_{max} in the equation above\n\n Returns\n --------\n h: np.array\n PRF evaluated at `t`\n \"\"\"\n t=np.array(t)\n npar=float(npar)\n tmax=float(tmax)\n hmax=np.exp(-npar)*tmax**npar ## theoretical maximum\n h = t**(npar) * np.exp(-npar*t / tmax) #Erlang gamma function Hoek & Levelt (1993)\n h=h/hmax\n return h\n\ndef pupil_get_max_duration(npar,tmax,thr=1e-8,stepsize=1.):\n \"\"\"\n Get the time when the PRF with parameters $n$ and $t_{max}$ is decayed to\n `thr`. This gives an indication of how long the `duration` parameter\n in :py:func:`pupil_kernel()` should be chosen.\n \n Parameters\n -----------\n npar,tmax: float\n PRF parameters, see :py:func:`pupil_kernel()`\n thr: float\n desired value to which the PRF is decayed (the lower `thr`, the longer the duration)\n stepsize: float\n precision of the maximum duration in ms\n \n Returns\n --------\n tdur: float\n first value of t so that PRF(t)<`thr`\n \"\"\"\n # start looking from `tmax` (which is time of the peak)\n tdur=tmax\n while pupil_kernel_t(tdur,npar,tmax)>thr:\n tdur=tdur+stepsize # in steps of `stepsize` ms\n return tdur\n\ndef pupil_kernel(duration=4000, fs=1000, npar=10.1, tmax=930.0):\n \"\"\"\n According to Hoeks and Levelt (1993, \n https://link.springer.com/content/pdf/10.3758%2FBF03204445.pdf), \n the PRF can be described by the so-called Erlang gamma function\n $$h_{HL}(t) = t^n e^{-nt/t_{max}}$$\n which we normalize to\n $$h(t)=\\\\frac{1}{h_{max}} h_{HL}(t)$$\n where $$h_{max} = \\max_t{\\\\left(h_{HL}(t)\\\\right)} = e^{-n}t_{max}^{n}$$\n which yields a maximum value of 1 for this function. \n The function $h(t)$ is implemented in `pp.pupil_kernel()`.\n \n Parameters\n -----------\n \n duration: float\n in ms; maximum of the time window for which to calculate the PRF [0,duration]\n fs: float\n sampling rate for resolving the PRF\n npar: float\n n in the equation above\n tmax: float\n t_{max} in the equation above\n \n Returns\n --------\n \n h: np.array\n sampled version of h(t) over the interval [0,`duration`] with sampling rate `fs`\n \"\"\"\n n=int(duration/1000.*fs)\n t = np.linspace(0,duration, n, dtype = np.float)\n h=pupil_kernel_t(t,npar,tmax)\n #h = t**(npar) * np.exp(-npar*t / tmax) #Erlang gamma function Hoek & Levelt (1993)\n #hmax=np.exp(-npar)*tmax**npar ## theoretical maximum\n return h#/h.max() # rescale to height=1\n\ndef plot_prf(npar=10.1,tmax=930,max_duration=\"estimate\",fs=500,**kwargs):\n \"\"\"\n Plot profile of the pupil-response function (PRF) with \n parameters `npar` and `tmax`.\n \"\"\"\n if max_duration==\"estimate\":\n max_duration=pupil_get_max_duration(npar,tmax)\n n=int(fs*(max_duration/1000.))\n tx=np.linspace(0,max_duration,n)\n prf=pupil_kernel_t(tx,npar,tmax)\n plt.plot(tx,prf,**kwargs)\n plt.xlabel(\"time [ms]\")\n plt.ylabel(\"AU\")\n \n\ndef pupil_build_design_matrix(tx,event_onsets,fs,npar,tmax,max_duration=\"estimate\"):\n \"\"\"\n Construct design matrix (nevents x n).\n Each column has a single pupil-kernel with parameters `npar`, `tmax` starting at \n each `event_onset`.\n \n Parameters\n ----------\n \n tx: np.array\n in ms\n event_onsets: np.array\n timing of the events\n fs: float\n sampling rate (Hz)\n npar: float\n n in the equation of :py:func:`pypillometry.pupil.pupil_kernel()`\n tmax: float\n t_{max} in :py:func:`pypillometry.pupil.pupil_kernel()`\n max_duration: float or \"estimate\"\n either maximum duration in milliseconds or string \"estimate\" which\n causes the function to determine an appropriate duration based on the\n kernel parameters `npar` and `tmax`\n \n Returns\n -------\n x1: np.array (nevents x n) \n design matrix\n \"\"\"\n if max_duration==\"estimate\":\n max_duration=pupil_get_max_duration(npar,tmax)\n\n \n h=pupil_kernel(duration=max_duration, fs=fs, npar=npar, tmax=tmax) ## pupil kernel\n\n # event-onsets for each event\n x1 = np.zeros((event_onsets.size, tx.size), dtype=np.float) # onsets\n\n # event-onsets as indices of the txd array\n evon_ix=np.argmin(np.abs(np.tile(event_onsets, (tx.size,1)).T-tx), axis=1)\n\n for i in range(evon_ix.size):\n slic_add=h.size if (evon_ix[i]+h.size)<x1.shape[1] else x1.shape[1]-evon_ix[i] \n x1[i,evon_ix[i]:evon_ix[i]+slic_add]=h[0:slic_add]\n \n \n ## old, vectorized version (I thought it would be faster but it is, in fact, a lot slower :-(\n # # prepare stimulus and response-regressors\n # h=pupil_kernel(duration=max_duration, fs=fs, npar=npar, tmax=tmax) ## pupil kernel\n # \n # # event-onsets for each event\n # x1 = np.zeros((event_onsets.size, tx.size), dtype=np.float) # onsets\n # \n # # event-onsets as indices of the txd array\n # evon_ix=np.argmin(np.abs(np.tile(event_onsets, (tx.size,1)).T-tx), axis=1)\n # \n # X=np.meshgrid(np.arange(x1.shape[1]), np.arange(x1.shape[0]))[0]\n # evon_ix_M1=np.tile(evon_ix, (x1.shape[1],1)).T\n # evon_ix_M2=np.tile(evon_ix+h.size, (x1.shape[1],1)).T\n # \n # x1[ np.arange(event_onsets.size), evon_ix ]=1\n # x1[ np.logical_and(X>=evon_ix_M1, X<evon_ix_M2) ]=np.tile(h, evon_ix.size)\n return x1\n\ndef pupil_response(tx, sy, event_onsets, fs, npar=\"free\", tmax=\"free\", verbose=10, \n bounds={\"npar\":(1,20), \"tmax\":(100,2000)}, display_progress=True):\n \"\"\"\n Estimate pupil-response based on event-onsets.\n \n tx : np.ndarray\n time-vector in milliseconds \n sy : np.ndarray\n (baseline-corrected) pupil signal\n event_onsets : list\n onsets of events (stimuli/responses) in milliseconds \n fs : float\n sampling rate in Hz\n npar: float\n npar-parameter for the canonical response-function or \"free\";\n in case of \"free\", the function optimizes for this parameter\n tmax: float\n tmax-parameter for the canonical response-function or \"free\";\n in case of \"free\", the function optimizes for this parameter\n bounds: dict\n in case that one or both parameters are estimated, give the lower\n and upper bounds for the parameters\n \"\"\"\n def vprint(v, s):\n if v<=verbose:\n print(s,end=\"\")\n \n if npar==\"free\" and tmax==\"free\":\n print(\"MSG: optimizing both npar and tmax, might take a while...\")\n def objective(x, event_onsets, tx,sy,fs):\n vprint(50,\".\")\n npar_t,tmax_t=x\n npar,tmax=x\n #npar=trans_logistic_vec(npar_t, a=bounds[\"npar\"][0], b=bounds[\"npar\"][1], inverse=True)\n #tmax=trans_logistic_vec(tmax_t, a=bounds[\"tmax\"][0], b=bounds[\"tmax\"][1], inverse=True)\n maxdur=pupil_get_max_duration(npar,tmax)\n vprint(100, \"\\nnpar,tmax,maxdur=(%.2f,%.2f,%i)\"%(npar,tmax,maxdur)) \n x1=pupil_build_design_matrix(tx, event_onsets, fs, npar, tmax, maxdur)\n coef=scipy.optimize.nnls(x1.T, sy)[0] \n pred=np.dot(x1.T, coef) ## predicted signal\n resid=sy-pred ## residuals \n return np.sum(resid**2)\n \n #npar_start_trans=trans_logistic_vec(10,a=bounds[\"npar\"][0], b=bounds[\"npar\"][1],inverse=False)\n #tmax_start_trans=trans_logistic_vec(900,a=bounds[\"tmax\"][0], b=bounds[\"tmax\"][1],inverse=False)\n #r=scipy.optimize.minimize(objective, (npar_start_trans, tmax_start_trans), \n # args=(event_onsets,tx,sy,fs), \n # method=\"Nelder-Mead\") \n r=scipy.optimize.minimize(objective, (10,900), #(npar_start_trans, tmax_start_trans), \n args=(event_onsets,tx,sy,fs), bounds=[bounds[\"npar\"],bounds[\"tmax\"]],\n options={\"disp\":display_progress})\n #method=\"Nelder-Mead\") \n \n #npar=trans_logistic_vec(r.x[0], a=bounds[\"npar\"][0], b=bounds[\"npar\"][1], inverse=False)\n #tmax=trans_logistic_vec(r.x[1], a=bounds[\"tmax\"][0], b=bounds[\"tmax\"][1], inverse=False)\n npar,tmax=r.x[0],r.x[1]\n elif npar==\"free\":\n print(\"MSG: optimizing npar only, might take a while...\")\n def objective(x, tmax, event_onsets, tx,sy,fs):\n vprint(50,\".\")\n npar=x\n maxdur=pupil_get_max_duration(npar,tmax)\n vprint(100, \"\\nnpar,maxdur=(%.2f,%i)\"%(npar,maxdur)) \n x1=pupil_build_design_matrix(tx, event_onsets, fs, npar, tmax, maxdur) \n coef=scipy.optimize.nnls(x1.T, sy)[0] \n pred=np.dot(x1.T, coef) ## predicted signal\n resid=sy-pred ## residuals \n return np.sum(resid**2)\n r=scipy.optimize.minimize_scalar(objective, bounds=bounds[\"npar\"],\n args=(tmax,event_onsets,tx,sy,fs), \n method=\"bounded\", options={\"disp\":display_progress,\"xatol\":.1}) \n npar=r.x\n elif tmax==\"free\":\n print(\"MSG: optimizing tmax only, might take a while...\")\n def objective(x, npar, event_onsets, tx,sy,fs):\n vprint(50,\".\") \n tmax=x\n maxdur=pupil_get_max_duration(npar,tmax) \n vprint(100, \"\\ntmax,maxdur=(%.2f,%i)\"%(tmax,maxdur)) \n x1=pupil_build_design_matrix(tx, event_onsets, fs, npar, tmax, maxdur)\n coef=scipy.optimize.nnls(x1.T, sy)[0] \n pred=np.dot(x1.T, coef) ## predicted signal\n resid=sy-pred ## residuals \n return np.sum(resid**2)\n r=scipy.optimize.minimize_scalar(objective, bounds=bounds[\"tmax\"],\n args=(npar,event_onsets,tx,sy,fs), \n method=\"bounded\",options={\"disp\":display_progress,\"xatol\":1}) \n tmax=r.x\n \n maxdur=pupil_get_max_duration(npar,tmax)\n x1=pupil_build_design_matrix(tx, event_onsets, fs, npar, tmax, maxdur)\n coef=scipy.optimize.nnls(x1.T, sy)[0] \n pred=np.dot(x1.T, coef) ## predicted signal\n \n return pred, coef, npar, tmax, x1\n\n \n \ndef pupilresponse_nnls(tx, sy, event_onsets, fs, npar=10.1, tmax=930):\n \"\"\"\n Estimate single-event pupil responses based on canonical PRF (`pupil_kernel()`)\n using non-negative least-squares (NNLS).\n \n Parameters\n -----------\n \n tx : np.ndarray\n time-vector in milliseconds\n \n sy : np.ndarray\n (baseline-corrected) pupil signal\n \n event_onsets : list\n onsets of events (stimuli/responses) in seconds\n \n fs : float\n sampling rate in Hz\n \n npar,tmax: float\n parameters for :py:func:`pypillometry.pupil.pupil_kernel()`\n \n Returns\n --------\n \n (coef,pred,resid): tuple\n coef: purely-positive regression coefficients\n pred: predicted signal\n resid: residuals (sy-pred)\n \"\"\"\n x1=pupil_build_design_matrix(tx, event_onsets, fs, npar, tmax, \"estimate\")\n \n ## we use a non-negative least squares solver to force the PRF-coefficients to be positive\n coef=scipy.optimize.nnls(x1.T, sy)[0] \n pred=np.dot(x1.T, coef) ## predicted signal\n resid=sy-pred ## residual\n\n return coef,pred,resid\n \n\ndef stat_event_interval(tx,sy,event_onsets,interval,statfct=np.mean):\n \"\"\"\n Return result of applying a statistical function to pupillometric data in a\n given interval relative to event-onsets. For example, extract mean \n pupil-size in interval before trial onset.\n \n Parameters\n -----------\n \n tx : np.ndarray\n time-vector in milliseconds\n \n sy : np.ndarray\n (baseline-corrected) pupil signal\n \n event_onsets : list\n onsets of events (stimuli/responses) in seconds\n \n interval : tuple (min,max)\n time-window in ms relative to event-onset (0 is event-onset)\n \n statfct : function\n function mapping np.array to a single number\n \n Returns\n --------\n \n result: np.array\n number of event-onsets long result array\n \"\"\"\n event_onsets=np.array(event_onsets)\n starts=event_onsets+interval[0]\n ends =event_onsets+interval[1]\n\n res=np.zeros(event_onsets.size)\n\n for i,interv in enumerate(zip(starts,ends)):\n start_ix=np.argmin(np.abs(interv[0]-tx))\n end_ix=np.argmin(np.abs(interv[1]-tx))\n if start_ix==end_ix:\n end_ix+=1\n res[i]=statfct(sy[start_ix:end_ix])\n return res\n \n \n ", "id": "1004404", "language": "Python", "matching_score": 3.067080497741699, "max_stars_count": 13, "path": "pypillometry/pupil.py" }, { "content": "\"\"\"\nbaseline.py\n===========\n\nFunctions for estimating tonic pupillary fluctuations (baseline).\n\"\"\"\nimport numpy as np\nimport scipy.signal as signal\nimport scipy\nimport math\n\nimport scipy.interpolate\nfrom scipy.interpolate import interp1d, splrep, splev\n\nfrom .pupil import *\nfrom .convenience import *\n\nstan_code_baseline_model_asym_laplac=\"\"\"\n// Stan model for pupil-baseline estimation\n//\nfunctions{\n // asymmetric laplace function with the mu, sigma, tau parametrization\n real skew_double_exponential_lpdf(real y, real mu, real sigma, real tau) {\n return log(tau) + log1m(tau)\n - log(sigma)\n - 2 * ((y < mu) ? (1 - tau) * (mu - y) : tau * (y - mu)) / sigma;\n }\n \n // zero-centered asymmetric laplace function with the mu, lambda, kappa parametrization\n real skew_double_exponential2_lpdf(real y, real lam, real kappa) {\n return log(lam) - log(kappa+1/kappa)\n + ((y<0) ? (lam/kappa) : (-lam*kappa))*(y);\n }\n}\ndata{ \n int<lower=1> n; // number of timepoints in the signal\n vector[n] sy; // the pupil signal\n \n int<lower=1> ncol; // number of basis functions (columns in B)\n matrix[n,ncol] B; // spline basis functions\n \n int<lower=1> npeaks; // number of lower peaks in the signal\n int<lower=1> peakix[npeaks]; // index of the lower peaks in sy\n vector<lower=0>[npeaks] lam_prominences; // lambda-converted prominence values\n \n real<lower=0> lam_sig; // lambda for the signal where there is no peak\n real<lower=0,upper=1> pa; // proportion of allowed distribution below 0\n}\n\ntransformed data{\n vector[n] lam; // lambda at each timepoint\n real<lower=0> kappa; // calculated kappa from pa\n kappa=sqrt(pa)/sqrt(1-pa);\n \n lam=rep_vector(lam_sig, n); \n for(i in 1:npeaks){\n lam[peakix[i]]=lam_prominences[i];\n }\n}\nparameters {\n vector[ncol] coef; // coefficients for the basis-functions\n}\n\ntransformed parameters{\n \n}\n\nmodel {\n {\n vector[n] d;\n \n coef ~ normal(0,5);\n d=sy-(B*coef); // center at estimated baseline\n for( i in 1:n ){\n d[i] ~ skew_double_exponential2(lam[i], kappa);\n }\n }\n}\n\"\"\"\n\n\ndef bspline(txd, knots, spline_degree=3):\n \"\"\"\n Re-implementation from https://mc-stan.org/users/documentation/case-studies/splines_in_stan.html.\n Similar behaviour as R's bs() function from the splines-library.\n \n Parameters\n -----------\n \n txd: np.array\n time-vector\n knots: np.array\n location of the knots\n spline_degree: int\n degree of the spline\n \n Returns\n --------\n \n B: np.array\n matrix of basis functions\n \"\"\"\n n=txd.shape[0]\n num_knots=knots.shape[0]\n\n def build_b_spline(t, ext_knots, ind, order):\n n=t.shape[0]\n b_spline=np.zeros(n)\n w1=np.zeros(n)\n w2=np.zeros(n)\n if order==1:\n b_spline=np.zeros(n)\n b_spline[np.logical_and(t>=ext_knots[ind], t<ext_knots[ind+1]) ]=1\n else:\n if ext_knots[ind] != ext_knots[ind+order-1]:\n w1=(t-np.array([ext_knots[ind]]*n))/(ext_knots[ind+order-1]-ext_knots[ind])\n if ext_knots[ind+1]!=ext_knots[ind+order]:\n w2=1-(t-np.array([ext_knots[ind+1]]*n))/(ext_knots[ind+order]-ext_knots[ind+1])\n b_spline=w1*build_b_spline(t,ext_knots,ind,order-1)+w2*build_b_spline(t,ext_knots,ind+1,order-1)\n return b_spline\n\n num_basis = num_knots + spline_degree - 1; # total number of B-splines\n B=np.zeros( (num_basis, n) )\n ext_knots=np.concatenate( ([knots[0]]*spline_degree, knots, \n [knots[num_knots-1]]*spline_degree) )\n for i in range(num_basis):\n B[i,:] = build_b_spline(txd, ext_knots, i, spline_degree+1)\n B[num_basis-1,n-1]=1\n return B.T\n\ndef butter_lowpass(cutoff, fs, order=5):\n \"\"\"\n Get lowpass-filter coefficients for Butterworth-filter.\n \n Parameters\n -----------\n \n cutoff: float\n lowpass-filter cutoff\n fs: float\n sampling rate\n order: int\n filter order\n \n Returns\n -------\n \n (b,a): tuple (float,float)\n filter coefficients\n \"\"\"\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n b, a = signal.butter(order, normal_cutoff, btype='low', analog=False)\n return b, a\n\ndef butter_lowpass_filter(data, cutoff, fs, order=5):\n \"\"\"\n Lowpass-filter signal using a Butterworth-filter.\n \n Parameters\n -----------\n \n data: np.array\n data to lowpass-filter\n cutoff: float\n lowpass-filter cutoff\n fs: float\n sampling rate\n order: int\n filter order\n \n Returns\n -------\n \n y: np.array\n filtered data\n \"\"\"\n b, a = butter_lowpass(cutoff, fs, order=order)\n y = signal.filtfilt(b, a, data)\n return y\n\ndef downsample(y,R):\n \"\"\"\n Simple downsampling scheme using mean within the downsampling window.\n \n Parameters\n -----------\n \n y: np.array\n signal to downsample\n \n R: int\n decimate-factor\n \n Returns\n -------\n \n y: np.array\n downsampled data\n \"\"\"\n pad_size = int(math.ceil(float(y.size)/R)*R - y.size)\n y_padded = np.append(y, np.zeros(pad_size)*np.NaN)\n y2=scipy.nanmean(y_padded.reshape(-1,R), axis=1)\n return y2\n\n\n \ndef baseline_envelope_iter_bspline(tx,sy,event_onsets,fs, fsd=10, lp=2, \n lam_sig=1, lam_min=1, lam_max=100,\n verbose=0):\n \"\"\"\n Extract baseline based (re-)estimating the lower envelope using B-splines.\n See notebook `baseline_interp.ipynb` for details.\n The signal is downsampled (to `fsd` Hz) for speed.\n \n Parameters\n -----------\n \n tx : np.ndarray\n time-vector in seconds\n \n sy : np.ndarray\n raw pupil signal\n \n event_onsets : list\n onsets of events (stimuli/responses) in milliseconds\n \n fs : float\n sampling rate in Hz\n \n fsd : float\n downsampled sampling rate (if too slow, decrease)\n \n lp : float\n lowpass-filter cutoff (Hz)\n \n lam_sig: float\n parameter steering how much the baseline is shaped by the non-peaks of the signal\n \n lam_min,lam_max: float\n parameters mapping how much low- and high-prominence peaks influence the baseline\n \n verbose: int [0, 100]\n how much information to print (0 nothing, 100 everything)\n \n Returns\n -------\n \n (txd,syd,base2,base1) : tuple\n txd: downsampled time-array\n syd: downsampled and lowpass-filtered pupil signal\n base1: is the estimated base after the first round\n base2: is the final baseline estimate\n \n \"\"\"\n def vprint(v, s):\n if v<=verbose:\n print(\">\",s)\n\n dsfac=int(fs/fsd) # calculate downsampling factor\n vprint(100, \"Downsampling factor is %i\"%dsfac)\n \n # downsampling\n syc=butter_lowpass_filter(sy, lp, fs, order=2)\n syd=downsample(syc, dsfac)\n\n # z-scale for easier model-fitting\n symean,sysd=np.mean(syd),np.std(syd)\n syd=(syd-symean)/sysd\n txd=downsample(tx, dsfac)\n vprint(100, \"Downsampling done\")\n\n # peak finding and spline-building\n peaks_ix=signal.find_peaks(-syd)[0]\n prominences=signal.peak_prominences(-syd, peaks_ix)[0]\n peaks=txd[peaks_ix]\n vprint(100, \"Peak-detection done, %i peaks detected\"%peaks.shape[0])\n knots=np.concatenate( ([txd.min()], peaks, [txd.max()]) ) ## peaks as knots\n B=bspline(txd, knots, 3)\n vprint(100, \"B-spline matrix built, dims=%s\"%str(B.shape))\n \n\n # convert\n def prominence_to_lambda(w, lam_min=1, lam_max=100):\n w2=lam_min+((w-np.min(w))/(np.max(w-np.min(w))))*(lam_max-lam_min)\n return w2\n \n w=prominence_to_lambda(prominences, lam_min=lam_min, lam_max=lam_max)\n \n # load or compile model\n vprint(10, \"Compiling Stan model\")\n\n sm = StanModel_cache(stan_code_baseline_model_asym_laplac)\n \n ## put the data for the model together\n data={\n 'n':syd.shape[0],\n 'sy':syd,\n 'ncol':B.shape[1],\n 'B':B,\n 'npeaks':peaks_ix.shape[0],\n 'peakix':peaks_ix,\n 'lam_sig':lam_sig,\n 'pa':0.05,\n 'lam_prominences':w\n }\n \n ## variational optimization\n vprint(10, \"Optimizing Stan model\")\n opt=sm.vb(data=data)\n vbc=opt[\"mean_pars\"]\n meansigvb=np.dot(B, vbc)\n vprint(10, \"Done optimizing Stan model\")\n \n ## PRF model\n # new \"signal\"\n syd2=syd-meansigvb\n\n vprint(10, \"Estimating PRF model (NNLS)\") \n #coef,pred,resid=pupilresponse_nnls(txd,syd2,event_onsets,fs=fsd)\n pred, coef, _, _, _=pupil_response(txd, syd2, event_onsets, fsd, npar=10, tmax=917)\n resid=syd-pred\n vprint(10, \"Done Estimating PRF model (NNLS)\")\n \n ### 2nd iteration\n ## get new peaks\n syd3=syd-pred\n peaks2_ix=signal.find_peaks(-syd3)[0]\n prominences2=signal.peak_prominences(-syd3, peaks2_ix)[0]\n peaks2=txd[peaks2_ix]\n vprint(100, \"2nd Peak-detection done, %i peaks detected\"%peaks2.shape[0])\n\n knots2=np.concatenate( ([txd.min()], peaks2, [txd.max()]) ) ## peaks as knots\n B2=bspline(txd, knots2, 3)\n vprint(100, \"2nd B-spline matrix built, dims=%s\"%str(B2.shape))\n\n w2=prominence_to_lambda(prominences2, lam_min=lam_min, lam_max=lam_max)\n\n data2={\n 'n':syd3.shape[0],\n 'sy':syd3,\n 'ncol':B2.shape[1],\n 'B':B2,\n 'npeaks':peaks2_ix.shape[0],\n 'peakix':peaks2_ix,\n 'lam_sig':lam_sig,\n 'pa':0.05,\n 'lam_prominences':w2\n }\n \n ## variational optimization\n vprint(10, \"2nd Optimizing Stan model\")\n opt=sm.vb(data=data2)\n vbc2=opt[\"mean_pars\"]\n meansigvb2=np.dot(B2, vbc2) \n vprint(10, \"Done 2nd Optimizing Stan model\")\n \n return txd,(syd*sysd)+symean,(meansigvb2*sysd)+symean, (meansigvb*sysd)+symean\n\ndef baseline_envelope(tx,sy,event_onsets, fs=1000, lp=2, prominence_thr=80, interp_method=\"cubic\"):\n \"\"\"\n Extract baseline based on the lower envelope of the (filtered) signal.\n\n Steps: \n \n - filter away noise\n - detect high-prominence throughs in the signal \n - calculate lower envelope based on these peaks\n \n Parameters\n -----------\n \n tx : np.ndarray\n time-vector in seconds\n \n sy : np.ndarray\n raw pupil signal\n \n event_onsets : list\n onsets of events (stimuli/responses) in seconds\n \n fs : float\n sampling rate in Hz\n \n lp : float\n low-pass filter cutoff for removing random noise\n \n prominence_thr : float in [0,100]\n percentile of the prominence distribution (of the peaks) to \n use for determining prominent peaks (see `scipy.stats.peak_prominences()`)\n \n interp_method : string, one of [\"linear\", \"cubic\", \"spline\"]\n \"linear\" - linear interpolation between the high-prominence peaks\n \"cubic\" - cubic interpolation through all high-prominence peaks\n \"spline\" - a smoothing spline that is guaranteed to go through all\n high-prominence peaks and smoothes through all the other\n (lower-prominence) peaks\n \n Returns\n --------\n \n base: np.array\n baseline estimate\n \"\"\"\n syc=butter_lowpass_filter(sy, fs=fs, order=2, cutoff=lp)\n peaks_ix=signal.find_peaks(-syc)[0]\n prominences=signal.peak_prominences(-syc, peaks_ix)[0]\n res=signal.peak_widths(-syc, peaks_ix)\n width_locs=(-res[1],res[2]/fs,res[3]/fs)\n widths=res[0]\n peaks=tx[peaks_ix]\n widths=widths/fs # in seconds\n prominence_cutoff=np.percentile(prominences,prominence_thr)\n real_peaks=peaks[prominences>prominence_cutoff]\n real_peaks_ix=peaks_ix[prominences>prominence_cutoff]\n \n if interp_method in [\"linear\",\"cubic\"]:\n ## interpolate only most prominent peaks\n xinterp=np.concatenate( ([tx.min()],real_peaks,[tx.max()]) )\n yinterp=np.concatenate( ([syc[0]], syc[real_peaks_ix], [syc[-1]]) )\n f=interp1d(xinterp,yinterp, kind=interp_method)\n elif interp_method==\"spline\":\n ## use all peaks for interpolation and use \"real\" peaks as inner knots\n xinterp=np.concatenate( ([tx.min()],peaks,[tx.max()]) )\n yinterp=np.concatenate( ([syc[0]], syc[peaks_ix], [syc[-1]]) )\n f=scipy.interpolate.LSQUnivariateSpline(xinterp,yinterp,real_peaks)\n else:\n raise ValueError(\"interp_method must be one of 'linear','cubic','spline'\")\n x0=f(tx)\n \n return x0\n\n\ndef baseline_pupil_model(tx,sy,event_onsets, fs=1000, lp1=2, lp2=0.2):\n \"\"\"\n Extract baseline based on filtering after removing stim-locked activity.\n \n Steps:\n \n - filter away noise\n - regress out event-locked activity from the filtered signal using NNLS\n - remove modeled signal from filtered data\n - run another lowpass-filter to get rid of spurious signals\n \n Parameters\n -----------\n \n tx : np.ndarray\n time-vector in seconds\n \n sy : np.ndarray\n raw pupil signal\n \n event_onsets : list\n onsets of events (stimuli/responses) in seconds\n \n fs : float\n sampling rate in Hz\n \n lp1 : float\n low-pass filter cutoff for removing random noise\n \n lp2 : float\n low-pass filter cutoff for removing spurious peaks from the baseline-signal \n \n Returns\n --------\n \n base: np.array\n baseline estimate \n \"\"\"\n syc=butter_lowpass_filter(sy, fs=fs, order=2, cutoff=lp1)\n \n # calculate indices for event-onsets\n event_onsets_ix=np.argmin(np.abs(np.tile(event_onsets, (sy.size,1)).T-tx), axis=1)\n\n # set up a single regressor\n x1=np.zeros(sy.size, dtype=np.float)\n x1[event_onsets_ix]=1\n kernel=pupil_kernel(4, fs=fs)\n x1=np.convolve(x1, kernel, mode=\"full\")[0:x1.size]\n\n # solve with non-negative least-squares\n X=np.stack( (x1, np.ones(x1.size)))\n coef=scipy.optimize.nnls(X.T, syc)[0]\n pred=coef[1]+coef[0]*x1\n resid=syc-pred+coef[1]\n\n resid_lp=butter_lowpass_filter(resid, fs=fs, order=2, cutoff=lp2)\n x0 = resid_lp\n \n return x0\n", "id": "3269300", "language": "Python", "matching_score": 2.5567712783813477, "max_stars_count": 13, "path": "pypillometry/baseline.py" }, { "content": "\"\"\"\nconvenience.py\n==============\n\nSome convenience functions.\n\"\"\"\n\n\nimport numpy as np\n\ndef nprange(ar):\n return (ar.min(),ar.max())\n\n\nimport pystan\nimport pickle\nfrom hashlib import md5\n\ndef zscale(y):\n return (y-np.nanmean(y))/np.nanstd(y)\n\n\ndef p_asym_laplac(y, mu, sigma, tau):\n \"\"\"\n Asymmetric laplace distribution https://en.wikipedia.org/wiki/Asymmetric_Laplace_distribution;\n Parametrization as here: https://github.com/stan-dev/stan/issues/2312\n \n tau in [0,1]\n \"\"\"\n I=np.array(y<=mu, dtype=np.int)\n return (2*tau*(1-tau))/sigma*np.exp(-2/sigma * ( (1-tau)*I*(mu-y) + tau*(1-I)*(y-mu) ) )\n\ndef p_asym_laplac_kappa(y, mu, lam, kappa):\n \"\"\"\n Asymmetric laplace distribution https://en.wikipedia.org/wiki/Asymmetric_Laplace_distribution;\n Wikipedia parametrization.\n \n kappa in [0, infty] where 1 means symmetry\n \"\"\"\n I=np.array(y<=mu, dtype=np.int)\n return (lam)/(kappa+1./kappa)*np.exp( ( (lam/kappa)*I*(y-mu) - lam*kappa*(1-I)*(y-mu) ) )\n\n\ndef trans_logistic_vec(x, a, b, inverse=False):\n \"\"\"\n vectorized version of trans_logistic()\n \n goes from [a,b] to [-inf,+inf] and back;\n inverse=False: [a,b] -> [-inf, +inf]\n inverse=True: [-inf,+inf] -> [a,b]\n if a or b is +/-infty, a logarithmic/exponential transform is used\n \"\"\"\n eps=1e-15\n if inverse==False:\n # variables from [a,inf]\n x=np.where( (a>-np.infty) & (b==np.infty), np.log(np.maximum(x-a, eps)), x)\n # variables from [-inf, b]\n x=np.where( (a==-np.infty) & (b<np.infty), np.log(np.maximum(b-x, eps)), x)\n # variables from [a, b]\n x=np.where( (a>-np.infty) & (b<np.infty), -np.log( (b-a)/(x-a)-1 ), x)\n elif inverse==True:\n # variables from [-inf,inf] -> [a,inf]\n x=np.where( (a>-np.infty) & (b==np.infty), np.exp(x)+a, x)\n # variables from [-inf, inf] -> [-inf, b]\n x=np.where( (a==-np.infty) & (b<np.infty), b-np.exp(x), x)\n # variables from [-inf,inf] -> [a, b]\n x=np.where( (a>-np.infty) & (b<np.infty), (1./(1.+np.exp(-x)))*(b-a)+a, x)\n \n return x\n\ndef StanModel_cache(model_code, model_name=None, **kwargs):\n \"\"\"Use just as you would `stan`\"\"\"\n code_hash = md5(model_code.encode('ascii')).hexdigest()\n if model_name is None:\n cache_fn = 'cached-model-{}.pkl'.format(code_hash)\n else:\n cache_fn = 'cached-{}-{}.pkl'.format(model_name, code_hash)\n try:\n sm = pickle.load(open(cache_fn, 'rb'))\n except:\n sm = pystan.StanModel(model_code=model_code)\n with open(cache_fn, 'wb') as f:\n pickle.dump(sm, f)\n else:\n print(\"Using cached StanModel\")\n return sm\n\n\n\ndef plot_pupil_ipy(tx, sy, event_onsets=None, overlays=None, overlay_labels=None, \n blinks=None, interpolated=None,\n figsize=(16,8), xlab=\"ms\", nsteps=100):\n \"\"\"\n Plotting with interactive adjustment of plotting window.\n To use this, do\n\n $ pip install ipywidgets\n $ jupyter nbextension enable --py widgetsnbextension\n $ jupyter labextension install @jupyter-widgets/jupyterlab-manager\n\n Parameters\n ----------\n \n tx : np.ndarray\n time-vector in seconds \n sy : np.ndarray\n raw pupil signal \n event_onsets : list\n onsets of events (stimuli/responses) in seconds\n overlays: tuple of np.array\n signals to overlay over the plot, given as tuple of arrays of same length as `tx`\n overlay_labels: tuple of strings\n labels for the overlays to be displayed in the legend\n figsize: tuple of int\n dimensions for the plot\n xlab: str\n label for x-axis\n nsteps: int\n number of steps for slider\n \"\"\"\n import pylab as plt\n from ipywidgets import interact, interactive, fixed, interact_manual, Layout\n import ipywidgets as widgets\n\n def draw_plot(plotxrange):\n xmin,xmax=plotxrange\n ixmin=np.argmin(np.abs(tx-xmin))\n ixmax=np.argmin(np.abs(tx-xmax))\n plt.figure(figsize=figsize)\n\n plt.plot(tx[ixmin:ixmax],sy[ixmin:ixmax], label=\"signal\")\n if overlays is not None:\n if type(overlays) is np.ndarray:\n plt.plot(tx[ixmin:ixmax],overlays[ixmin:ixmax],label=overlay_labels)\n else:\n for i,overlay in enumerate(overlays):\n lab=overlay_labels[i] if overlay_labels is not None else None\n plt.plot(tx[ixmin:ixmax],overlay[ixmin:ixmax], label=lab)\n for istart,iend in interpolated:\n plt.gca().axvspan(tx[istart],tx[iend],color=\"green\", alpha=0.1)\n for istart,iend in blinks:\n plt.gca().axvspan(tx[istart],tx[iend],color=\"red\", alpha=0.1)\n\n plt.vlines(event_onsets, *plt.ylim(), color=\"grey\", alpha=0.5)\n plt.xlim(xmin,xmax)\n plt.xlabel(xlab)\n if overlay_labels is not None:\n plt.legend()\n\n\n wid_range=widgets.FloatRangeSlider(\n value=[tx.min(), tx.max()],\n min=tx.min(),\n max=tx.max(),\n step=(tx.max()-tx.min())/nsteps,\n description=' ',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.1f',\n layout=Layout(width='100%', height='80px')\n )\n\n interact(draw_plot, plotxrange=wid_range)\n \n\ndef helper_merge_blinks(b1,b2):\n if b1.size==0:\n return b2\n elif b2.size==0:\n return b1\n on=np.sort(np.concatenate( (b1[:,0], b2[:,0]) ))\n off=np.sort(np.concatenate( (b1[:,1], b2[:,1]) ))\n b=np.vstack((on,off)).T\n\n newb=[]\n on,off=b[0,:]\n for i in range(1,b.shape[0]):\n if b[i,0]<=off:\n # absorb onset from next \n off=max(off,b[i,1])\n else:\n newb.append([on,off])\n on,off=b[i,:]\n off=b[-1,1]\n newb.append([on,off])\n return np.array(newb)\n\n\ndef sizeof_fmt(num, suffix='B'):\n \"\"\"\n Convert number of bytes in `num` into human-readable string representation.\n Taken from https://stackoverflow.com/a/1094933\n \"\"\"\n for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, 'Yi', suffix)", "id": "4518535", "language": "Python", "matching_score": 2.439438581466675, "max_stars_count": 13, "path": "pypillometry/convenience.py" } ]
2.556771
yumiai
[ { "content": "import itertools\n\nKAPPA = [3]\n \nC = [300000000] #some amount greater than money raised, some ROI (10%) over money raised\nALPHA = [0.5] # computed using S1 * reserve / (S1 * reserve - S0 * reserve + S0*C[0])\nMONEY_RAISED = [1000000]\n\n# KAPPA = [3.0] # computed from I / I-(C*alpha)\n# C = [300000000] #some amount greater than money raised, some ROI (10%) over money raised\n# ALPHA = [0.5] # computed using S1 * reserve / (S1 * reserve - S0 * reserve + S0*C[0])\nPERIOD = [1200]\n\n### Monthly instalment from Impact Investor\nmonthly_instalment = [0]\n\n# New price singal : Determines signal shape for agent's behaviour heuristic on price\n# rules_price = [\"martin\"] #, \"step\"] # , \"ramp\", \"sin\"]\n\nrules_price = [\"martin\"] # , \"ramp\", \"sin\"]\n# rules_price = [\"martin\", \"step\", \"ramp\", \"sin\"]\n\n\n# reserve = 300 # MONEY_RAISED[0] - C[0]\n# supply = 600 #KAPPA[0]*(reserve/PRICE)\n# supply_free = supply\n# invariant_V = 1200 #(supply**KAPPA[0])/reserve\n# invariant_I = 650 #reserve + (C[0]*ALPHA[0])\n\n####### CONTINUOUS FUNDING #####################\nENABLE_CONTINUOUS = [True] #, False]\nTHETA = [0] # PORTION OF FUNDS FROM BONDING TO PROJECT, (1-theta) to reserve\n####### CONTINUOUS FUNDING #####################\n\n####### BURN ACTION #####################\nENABLE_BURN = [False]\n####### BURN ACTION #####################\n\n####### UNSIWAP STYLE TRADING #####################\nfee_numerator = [997]\nfee_denominator = [1000]\n\n####### UNSIWAP STYLE TRADING #####################\n\n#Alpha and price should be biased similarly\n# -1 indicates negative bias, signal linearly decreasing\n# 1 indicates positive bias, signal linearly increasing\nalpha_bias = [1]\nprice_bias = [1]\n\n####### KAPPA INTEGER ENFORCEMENT #####################\n# kappa_rule = [True, False] # TRUE means INTEGER enforcement, False allows decimal type\n\nkappa_rule = ['none'] \n# Round enforces Integer Rounding\n# None allows decimal type\n# Fixed kappa is fixed from initial value\n####### UNSIWAP STYLE TRADING #####################\n\n####### KAPPA INTEGER ENFORCEMENT #####################\n# kappa_rule = [True, False] # TRUE means INTEGER enforcement, False allows decimal type\n\nalpha_test = ['success'] \n# Round enforces Integer Rounding\n# None allows decimal type\n# Fixed kappa is fixed from initial value\n####### UNSIWAP STYLE TRADING #####################\n\n# print()\n\n# E = [0.1, 0.2, 0.3]\nE = [0.2]\n\nfactors = [rules_price, KAPPA, E, MONEY_RAISED, ALPHA, C, THETA, ENABLE_CONTINUOUS, ENABLE_BURN, alpha_bias, price_bias, alpha_test]\nproduct = list(itertools.product(*factors))\nrules_price, KAPPA, E, MONEY_RAISED, ALPHA, C, THETA, ENABLE_CONTINUOUS, ENABLE_BURN, alpha_bias, price_bias, alpha_test= zip(*product)\nrules_price = list(rules_price)\nKAPPA = list(KAPPA)\nE = list(E)\nMONEY_RAISED = list(MONEY_RAISED)\nALPHA = list(ALPHA)\nC = list(C)\nTHETA = list(THETA)\nENABLE_CONTINUOUS = list(ENABLE_CONTINUOUS)\nENABLE_BURN = list(ENABLE_BURN)\nalpha_bias = list(alpha_bias)\nprice_bias = list(price_bias)\nalpha_test = list(alpha_test)\n\n############ PARAMETRIC TESTS #########################################################################\n# factors = [rules_price, KAPPA, E, MONEY_RAISED, ALPHA, C, THETA, ENABLE_CONTINUOUS, ENABLE_BURN]\n# product = list(itertools.product(*factors))\n# rules_price, KAPPA, E, MONEY_RAISED, ALPHA, C, THETA, ENABLE_CONTINUOUS, ENABLE_BURN= zip(*product)\n# rules_price = list(rules_price)\n# KAPPA = list(KAPPA)\n# E = list(E)\n# MONEY_RAISED = list(MONEY_RAISED)\n# ALPHA = list(ALPHA)\n# C = list(C)\n# THETA = list(THETA)\n# ENABLE_CONTINUOUS = list(ENABLE_CONTINUOUS)\n# ENABLE_BURN = list(ENABLE_BURN)\n############ PARAMETRIC TESTS #########################################################################\n\n########## SYSTEM PARAMETERS ##########\nparams = {\n 'starting_kappa': KAPPA, # initial kappa\n 'starting_alpha': ALPHA, # initial alpha\n 'money_raised': MONEY_RAISED, # reserve + C\n 'monthly_instalment': monthly_instalment,\n 'C': C, # Commited outcome payout\n 'f': [0.03], # param to control certainty of alpha at extremes\n 'm': [0.15], # param to modulate curvature of alpha threshold band\n 'beta': [0.9], # param for Armijo rule\n 'dust': [10**(-8)], # param for Armijo rule\n 'period': PERIOD,\n 'rules_price': rules_price,\n 'E': E,\n 'ENABLE_CONTINUOUS' : ENABLE_CONTINUOUS,\n 'THETA' : THETA,\n 'ENABLE_BURN' : ENABLE_BURN,\n 'fee_numerator' : fee_numerator,\n 'fee_denominator' : fee_denominator,\n 'alpha_bias': alpha_bias,\n 'price_bias': price_bias,\n 'kappa_rule' : kappa_rule,\n 'alpha_test' : alpha_test,\n}\n", "id": "10960325", "language": "Python", "matching_score": 2.7503204345703125, "max_stars_count": 31, "path": "Code_With_Us/src/sim/model/sys_params.py" }, { "content": "# NOT USING THIS ATM\n# Everything below is currently in config.py\n# initial conditions are sometimes referred to as genesis state\n\nfrom .sys_params import params\n\nKAPPA = [2]\nPRICE = [1]\nC = [700]\nALPHA = [0.5, 1]\nMONEY_RAISED = [1000]\n\nreserve = MONEY_RAISED[0] - C[0]\nsupply = KAPPA[0]*(reserve/price)\nsupply_free = supply\ninvariant_V = (supply**KAPPA[0])/reserve\ninvariant_I = reserve + (C*ALPHA[0])\n\n# Put this in sys_params.py\nparams = {\n 'starting_kappa': KAPPA, # initial kappa\n 'starting_alpha': ALPHA, # initial alpha\n # 'starting_price': price,\n 'money_raised': MONEY_RAISED, # R+C\n 'C': C\n}\n\n# Put this in state_vars.py\ninitial_conditions = {\n 'reserve': reserve,\n 'pbar': price, # kappa*(reserve/supply), price is dR/dS = 1\n 'realized_price': 0,\n 'kappa': 0, # direct to initial kappa in params?\n 'supply': supply,\n # 'price': kappa*(reserve/supply), ### kappa*(reserve/supply)\n 'alpha': 0, # direct to initial alpha in params?\n 'supply_0': 0,\n 'supply_1': 0,\n 'supply_free': supply_free,\n 'attestations_0': 0,\n 'attestations_1': 0,\n 'invariant_V': invariant_V, # (supply**kappa)/reserve\n # (reserve + C*alpha) if alpha is directed to the initial alpha in params, this will change\n 'invariant_I': invariant_I\n}\n\n\n'''\ninitial_conditions = {\n 'reserve': 0,\n 'supply': 0, \n 'price': kappa*(reserve/supply), ### kappa*(reserve/supply)\n 'kappa': 0, ### direct to initial kappa in params?\n 'alpha': 0, ### direct to initial alpha in params?\n 'supply_0': 0,\n 'supply_1': 0,\n 'supply_free': supply,\n 'attestations_0': 0,\n 'attestations_1': 0,\n 'invariant_V': 0, ### (supply**kappa)/reserve\n 'invariant_I': 0 ### (reserve + C*alpha) if alpha is directed to the initial alpha in params, this will change\n} '''\n\n\n'''\ninitial_conditions = {'reserve': R0,\n 'supply': S0,\n 'price': P0,\n 'spot_price': P0,\n 'alpha': alpha0,\n 'spot_alpha': alpha0,\n 'rho': rho0\n }'''\n", "id": "11876435", "language": "Python", "matching_score": 3.9436490535736084, "max_stars_count": 31, "path": "src/sim/model/parts/old/state_variables.py" }, { "content": "# Everything below is currently in config.py\n\nparams = {\n 'starting_kappa': KAPPA, # initial kappa\n 'starting_alpha': ALPHA, # initial alpha\n # 'starting_price': price,\n 'money_raised': MONEY_RAISED, # R+C\n 'C': C,\n 'period': ['N/A', 2000, 2000, 2000]\n}\n\n''' params = {\n 'kappa': [kappa0],\n 'alpha': [alpha0],\n 'invariant_V0': [V0],\n 'invariant_I0': [I0],\n 'rule' : rules,\n 'dP' : ['N/A', P0/4, P0/1000, P0/2],\n 'period': ['N/A', 2000,2000,2000]\n} '''\n", "id": "7441739", "language": "Python", "matching_score": 0.38401293754577637, "max_stars_count": 31, "path": "src/sim/model/parts/old/sys_params.py" }, { "content": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n#import pickle\n\n\ndef alpha_plot(experiments,test_title):\n agent_private_alpha_signal = []\n agent_public_alpha_signal = []\n agent_private_alpha = []\n df = experiments.dataset[0]\n df = df[df['substep'] == df.substep.max()]\n for i in range (0,100): \n agent_public_alpha_signal_list = []\n agent_public_alpha_signal_list.append(df.chosen_agent.values[i]['agent_public_alpha_signal'])\n agent_public_alpha_signal.append(np.mean(agent_public_alpha_signal_list))\n agent_private_alpha_signal_list= []\n agent_private_alpha_signal_list.append(df.chosen_agent.values[i]['agent_private_alpha_signal'])\n agent_private_alpha_signal.append(np.mean(agent_private_alpha_signal_list))\n agent_private_alpha_list = []\n agent_private_alpha_list.append(df.chosen_agent.values[i]['agent_private_alpha'])\n agent_private_alpha.append(np.mean(agent_private_alpha_list))\n public_alpha = df.alpha\n fig = plt.figure(figsize=(15, 10))\n plt.plot(range(0,100),agent_public_alpha_signal,label='Agent Public Alpha Signal', marker='o')\n plt.plot(range(0,100),agent_private_alpha_signal,label='Agent Private Alpha Signal',marker='o')\n plt.plot(range(0,100),agent_private_alpha,label='Agent Private Alpha',marker='*')\n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Alpha')\n plt.show()\n \n return agent_public_alpha_signal,agent_private_alpha_signal, agent_private_alpha\n \ndef reserve_supply(experiments,test_title):\n \n df = experiments.dataset[0][experiments.dataset[0]['substep'] == experiments.dataset[0].substep.max()]\n\n fig = plt.figure(figsize=(15, 10))\n plt.plot(range(0,100),df.reserve,label='Reserve',marker='o')\n plt.plot(range(0,100),df.supply,label='Supply',marker='*')\n \n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Amount')\n\n plt.show()\n\n\ndef funds_from_bond(experiments,test_title):\n \n df = experiments.dataset[0][experiments.dataset[0]['substep'] == experiments.dataset[0].substep.max()]\n\n fig = plt.figure(figsize=(15, 10))\n plt.plot(range(0,100),df.funds_from_bond,label='Funds from Bonds',marker='+')\n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Amount')\n\n plt.show()\n\n\ndef price(experiments,test_title):\n \n df = experiments.dataset[0][experiments.dataset[0]['substep'] == experiments.dataset[0].substep.max()]\n\n fig = plt.figure(figsize=(15, 10))\n plt.plot(range(0,100),df.spot_price,label='Spot Price',marker='+')\n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Amount')\n\n plt.show()\n \n return df.spot_price\n \ndef agent_payout(experiments):\n t = 600\n S_free = experiments.dataset[0].supply_free[t]\n S_0 = experiments.dataset[0].supply_0[t]\n S_1 = experiments.dataset[0].supply_1[t]\n agents_id = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]\n payout_list = []\n for a in agents_id:\n q1 = experiments.dataset[0].agents[t].agent_attestations_1[a]\n q0 = experiments.dataset[0].agents[t].agent_attestations_0[a]\n s_free = experiments.dataset[0].agents[t].agent_supply_free[a]\n s1 = experiments.dataset[0].agents[t].agent_supply_1[a]\n s0 = experiments.dataset[0].agents[t].agent_supply_0[a]\n s = s_free + s1 + s0\n agent_private_alpha = experiments.dataset[0].agents[t].agent_private_alpha[a]\n Q0 = experiments.dataset[0].attestations_0[t]\n Q1 = 1 \n R = experiments.dataset[0].reserve[t]\n S = experiments.dataset[0].supply[t]\n C = 72000 \n alpha = experiments.dataset[0].alpha[t]\n if alpha < 0.5:\n alpha = 0\n elif alpha > 0.5:\n alpha = 1\n #print(\"s_free = \", s_free, \"| S = \", S)\n T1 = (s_free/S)*(C*alpha + R)\n T2 = (s1/(S-S_0))*alpha*(C+R)\n T3 = (s0/(S-S_1))*(1-alpha)*(R)\n agent_payout = T1+T2+T3\n payout_list.append(agent_payout)\n\n arr2d = np.array(payout_list)\n\n arr1d = arr2d.flatten()\n\n x = agents_id\n payouts = arr1d\n\n x_pos = [i for i, _ in enumerate(x)]\n\n fig = plt.figure(figsize=(15, 10))\n plt.bar(x_pos, payouts, color='green')\n plt.xlabel(\"Agent ID\")\n plt.ylabel(\"Payout amount (tokens)\")\n plt.title(\"Agent and their Payouts\")\n\n plt.xticks(x_pos, x)\n\n plt.show()\n \n return x_pos,payouts\n\ndef agent_payout_2(experiments):\n t = 600\n S_free = experiments.dataset[0].supply_free[t]\n S_0 = experiments.dataset[0].supply_0[t]\n S_1 = experiments.dataset[0].supply_1[t]\n agents_id = [0,1]\n payout_list = []\n for a in agents_id:\n q1 = experiments.dataset[0].agents[t].agent_attestations_1[a]\n q0 = experiments.dataset[0].agents[t].agent_attestations_0[a]\n s_free = experiments.dataset[0].agents[t].agent_supply_free[a]\n s1 = experiments.dataset[0].agents[t].agent_supply_1[a]\n s0 = experiments.dataset[0].agents[t].agent_supply_0[a]\n s = s_free + s1 + s0\n agent_private_alpha = experiments.dataset[0].agents[t].agent_private_alpha[a]\n Q0 = experiments.dataset[0].attestations_0[t]\n Q1 = 1 \n R = experiments.dataset[0].reserve[t]\n S = experiments.dataset[0].supply[t]\n C = 72000 \n alpha = experiments.dataset[0].alpha[t]\n if alpha < 0.5:\n alpha = 0\n elif alpha > 0.5:\n alpha = 1\n #print(\"s_free = \", s_free, \"| S = \", S)\n T1 = (s_free/S)*(C*alpha + R)\n T2 = (s1/(S-S_0))*alpha*(C+R)\n T3 = (s0/(S-S_1))*(1-alpha)*(R)\n agent_payout = T1+T2+T3\n payout_list.append(agent_payout)\n\n arr2d = np.array(payout_list)\n\n arr1d = arr2d.flatten()\n\n x = agents_id\n payouts = arr1d\n\n x_pos = [i for i, _ in enumerate(x)]\n\n fig = plt.figure(figsize=(15, 10))\n plt.bar(x_pos, payouts, color='green')\n plt.xlabel(\"Agent ID\")\n plt.ylabel(\"Payout amount (tokens)\")\n plt.title(\"Agent and their Payouts\")\n\n plt.xticks(x_pos, x)\n\n plt.show()\n\ndef load_experiment_data(test):\n with open('chimple_data/'+test+'/Alpha/agent_public_alpha_signal.pickle', 'rb') as filehandle:\n # read the data as binary data stream\n agent_public_alpha_signal = pickle.load(filehandle)\n\n with open('chimple_data/'+test+'/Alpha/agent_private_alpha_signal.pickle', 'rb') as filehandle:\n # read the data as binary data stream\n agent_private_alpha_signal = pickle.load(filehandle)\n \n with open('chimple_data/'+test+'/Price/spot_price.pickle', 'rb') as filehandle:\n # read the data as binary data stream\n spot_price = pickle.load(filehandle)\n \n with open('chimple_data/'+test+'/Payout/agent_id.pickle', 'rb') as filehandle:\n # read the data as binary data stream\n agent_id = pickle.load(filehandle)\n \n with open('chimple_data/'+test+'/Payout/payouts.pickle', 'rb') as filehandle:\n # read the data as binary data stream\n payouts = pickle.load(filehandle)\n \n \n return agent_public_alpha_signal,agent_private_alpha_signal,spot_price,agent_id,payouts\n\ndef private_alpha_summary(agent_private_alpha_signal_A,agent_private_alpha_signal_B,agent_private_alpha_signal_C,\n agent_private_alpha_signal_D,agent_private_alpha_signal_E,agent_private_alpha_signal_F,\n agent_private_alpha_signal_G,agent_private_alpha_signal_H):\n\n\n fig, ((ax1, ax2,ax3,ax4), (ax5, ax6,ax7,ax8)) = plt.subplots(nrows=2, ncols=4, \n sharex=True, sharey=True,figsize=(15, 10))\n\n fig.suptitle('Agent Private Alpha Signal')\n\n ax1.plot(range(0,100),agent_private_alpha_signal_A,label='Test Case A')\n ax1.set_title('Test Case A')\n ax1.set(xlabel='Timestep', ylabel='Amount')\n\n ax2.plot(range(0,100),agent_private_alpha_signal_B,label='Test Case B')\n ax2.set_title('Test Case B')\n ax2.set(xlabel='Timestep', ylabel='Amount')\n\n ax3.plot(range(0,100),agent_private_alpha_signal_C,label='Test Case C')\n ax3.set_title('Test Case C')\n ax3.set(xlabel='Timestep', ylabel='Amount')\n\n ax4.plot(range(0,100),agent_private_alpha_signal_D,label='Test Case D')\n ax4.set_title('Test Case D')\n ax4.set(xlabel='Timestep', ylabel='Amount')\n\n ax5.plot(range(0,100),agent_private_alpha_signal_E,label='Test Case E')\n ax5.set_title('Test Case E')\n ax5.set(xlabel='Timestep', ylabel='Amount')\n\n ax6.plot(range(0,100),agent_private_alpha_signal_F,label='Test Case F')\n ax6.set_title('Test Case F')\n ax6.set(xlabel='Timestep', ylabel='Amount')\n\n ax7.plot(range(0,100),agent_private_alpha_signal_G,label='Test Case G')\n ax7.set_title('Test Case G')\n ax7.set(xlabel='Timestep', ylabel='Amount')\n\n ax8.plot(range(0,100),agent_private_alpha_signal_H,label='Test Case H')\n ax8.set_title('Test Case H')\n ax8.set(xlabel='Timestep', ylabel='Amount')\n plt.show()\n \ndef spot_price_summary(spot_price_A,spot_price_B,spot_price_C,spot_price_D,spot_price_E,spot_price_F,\n spot_price_G,spot_price_H):\n\n fig, ((ax1, ax2,ax3,ax4), (ax5, ax6,ax7,ax8)) = plt.subplots(nrows=2, ncols=4, \n sharex=True, sharey=True,figsize=(15, 10))\n\n fig.suptitle('Spot Price')\n\n ax1.plot(range(0,100),spot_price_A,label='Test Case A',color='r')\n ax1.set_title('Test Case A')\n ax1.set(xlabel='Timestep', ylabel='Amount')\n\n ax2.plot(range(0,100),spot_price_B,label='Test Case B',color='r')\n ax2.set_title('Test Case B')\n ax2.set(xlabel='Timestep', ylabel='Amount')\n\n ax3.plot(range(0,100),spot_price_C,label='Test Case C',color='r')\n ax3.set_title('Test Case C')\n ax3.set(xlabel='Timestep', ylabel='Amount')\n\n ax4.plot(range(0,100),spot_price_D,label='Test Case D',color='r')\n ax4.set_title('Test Case D')\n ax4.set(xlabel='Timestep', ylabel='Amount')\n\n ax5.plot(range(0,100),spot_price_E,label='Test Case E',color='r')\n ax5.set_title('Test Case E')\n ax5.set(xlabel='Timestep', ylabel='Amount')\n\n ax6.plot(range(0,100),spot_price_F,label='Test Case F',color='r')\n ax6.set_title('Test Case F')\n ax6.set(xlabel='Timestep', ylabel='Amount')\n\n ax7.plot(range(0,100),spot_price_G,label='Test Case G',color='r')\n ax7.set_title('Test Case G')\n ax7.set(xlabel='Timestep', ylabel='Amount')\n\n ax8.plot(range(0,100),spot_price_H,label='Test Case H',color='r')\n ax8.set_title('Test Case H')\n ax8.set(xlabel='Timestep', ylabel='Amount')\n plt.show()\n \ndef agent_payout_summary(agent_id_A,payouts_A,agent_id_B,payouts_B,agent_id_C,payouts_C,agent_id_D,payouts_D,\n agent_id_E,payouts_E,agent_id_F,payouts_F,agent_id_G,payouts_G,agent_id_H,payouts_H):\n \n fig, ((ax1, ax2,ax3,ax4), (ax5, ax6,ax7,ax8)) = plt.subplots(nrows=2, ncols=4, \n sharex=True, sharey=False,figsize=(15, 10))\n\n fig.suptitle('Agent Payouts')\n\n\n ax1.bar(agent_id_A, payouts_A,label='Test Case A',color='g')\n ax1.set_title('Test Case A')\n ax1.set(xlabel='Agent ID', ylabel='Amount')\n\n ax2.bar(agent_id_B, payouts_B,label='Test Case B',color='g')\n ax2.set_title('Test Case B')\n ax2.set(xlabel='Agent ID')\n\n ax3.bar(agent_id_C, payouts_C,label='Test Case C',color='g')\n ax3.set_title('Test Case C')\n ax3.set(xlabel='Agent ID')\n\n ax4.bar(agent_id_D, payouts_D,label='Test Case D',color='g')\n ax4.set_title('Test Case D')\n ax4.set(xlabel='Agent ID')\n\n ax5.bar(agent_id_E, payouts_E,label='Test Case E',color='g')\n ax5.set_title('Test Case E')\n ax5.set(xlabel='Agent ID', ylabel='Amount')\n\n ax6.bar(agent_id_F, payouts_F,label='Test Case F',color='g')\n ax6.set_title('Test Case F')\n ax6.set(xlabel='Agent ID')\n\n ax7.bar(agent_id_G, payouts_G,label='Test Case G',color='g')\n ax7.set_title('Test Case G')\n ax7.set(xlabel='Agent ID')\n\n ax8.bar(agent_id_H, payouts_H,label='Test Case H',color='g')\n ax8.set_title('Test Case H')\n ax8.set(xlabel='Agent ID')\n plt.show()", "id": "7092591", "language": "Python", "matching_score": 5.503306865692139, "max_stars_count": 31, "path": "src/sim/model/parts/utils.py" }, { "content": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef alpha_plot(experiments,test_title,T):\n agent_private_alpha_signal = []\n agent_public_alpha_signal = []\n agent_private_alpha = []\n \n df = experiments\n df = df[df['substep'] == df.substep.max()]\n df.fillna(0,inplace=True)\n\n for i in range (0,T): \n agent_public_alpha_signal_list = []\n agent_public_alpha_signal_list.append(df.chosen_agent.values[i]['agent_public_alpha_signal'])\n agent_public_alpha_signal.append(np.mean(agent_public_alpha_signal_list))\n agent_private_alpha_signal_list= []\n agent_private_alpha_signal_list.append(df.chosen_agent.values[i]['agent_private_alpha_signal'])\n agent_private_alpha_signal.append(np.mean(agent_private_alpha_signal_list))\n agent_private_alpha_list = []\n agent_private_alpha_list.append(df.chosen_agent.values[i]['agent_private_alpha'])\n agent_private_alpha.append(np.mean(agent_private_alpha_list))\n public_alpha = df.alpha\n fig = plt.figure(figsize=(15, 10))\n plt.plot(range(0,T),agent_public_alpha_signal,label='Agent Public Alpha Signal', marker='o')\n plt.plot(range(0,T),agent_private_alpha_signal,label='Agent Private Alpha Signal',marker='o')\n plt.plot(range(0,T),agent_private_alpha,label='Agent Private Alpha',marker='*')\n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Alpha')\n plt.show()\n \ndef reserve_supply(experiments,test_title,T):\n \n df = experiments\n df = df[df['substep'] == df.substep.max()]\n df.fillna(0,inplace=True)\n\n fig = plt.figure(figsize=(15, 10))\n plt.plot(range(0,T),df.reserve,label='Reserve',marker='o')\n plt.plot(range(0,T),df.supply,label='Supply',marker='*')\n \n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Amount')\n\n plt.show()\n\ndef alpha(experiments,test_title,T):\n \n df = experiments\n df = df[df['substep'] == df.substep.max()]\n df.fillna(0,inplace=True)\n\n fig = plt.figure(figsize=(15, 10))\n plt.plot(range(0,T),df.alpha,label='Alpha',marker='o')\n \n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Amount')\n\n plt.show()\n \ndef supply_plot(experiments,test_title,T):\n \n df = experiments\n df = df[df['substep'] == df.substep.max()]\n df.fillna(0,inplace=True)\n\n fig = plt.figure(figsize=(15, 10))\n # plt.plot(range(0,T),df.reserve,label='Reserve',marker='o')\n plt.plot(range(0,T),df.supply,label='Supply',marker='*')\n \n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Amount')\n\n plt.show()\n\n\ndef price(experiments,test_title,T):\n \n df = experiments\n df = df[df['substep'] == df.substep.max()]\n df.fillna(0,inplace=True)\n\n fig = plt.figure(figsize=(15, 10))\n plt.plot(range(0,T),df.spot_price,label='Spot Price',marker='+')\n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Amount')\n\n plt.show()\n \n return \n\n\ndef agent_payout(experiments,t):\n \"\"\"\n For CWU Payout\n \"\"\"\n\n S_free = experiments.supply_free[t]\n S_0 = experiments.supply_0[t]\n S_1 = experiments.supply_1[t]\n agents_id = [0,1,2,3,4,5,6,7,8,9]\n payout_list = []\n for a in agents_id:\n q1 = experiments.agents[t].agent_attestations_1[a]\n q0 = experiments.agents[t].agent_attestations_0[a]\n s_free = experiments.agents[t].agent_supply_free[a]\n s1 = experiments.agents[t].agent_supply_1[a]\n s0 = experiments.agents[t].agent_supply_0[a]\n s = s_free + s1 + s0\n agent_private_alpha = experiments.agents[t].agent_private_alpha[a]\n Q0 = experiments.attestations_0[t]\n Q1 = 1 \n R = experiments.reserve[t]\n S = experiments.supply[t] - 1000000 # subtract initial amount\n C = 300000000 \n alpha = experiments.alpha[t]\n if alpha < 0.4:\n alpha = 0\n elif alpha >= 0.4:\n alpha = 1\n T1 = (s_free/S)*(C*alpha + R)\n # T2 = (s1/(S-S_0))*alpha*(C+R)\n # T3 = (s0/(S-S_1))*(1-alpha)*(R)\n\n agent_payout = T1\n payout_list.append(agent_payout)\n\n arr2d = np.array(payout_list)\n\n arr1d = arr2d.flatten()\n\n x = agents_id\n payouts = arr1d\n\n x_pos = [i for i, _ in enumerate(x)]\n\n fig = plt.figure(figsize=(15, 10))\n plt.bar(x_pos, payouts, color='green')\n plt.xlabel(\"Agent ID\")\n plt.ylabel(\"Payout amount (uXCHF)\")\n plt.title(\"Agent and their Payouts\")\n\n plt.xticks(x_pos, x)\n\n plt.show()\n\n# def agent_payout(experiments,t):\n# S_free = experiments.supply_free[t]\n# S_0 = experiments.supply_0[t]\n# S_1 = experiments.supply_1[t]\n# agents_id = [0,1,2,3,4,5,6,7,9]\n# payout_list = []\n# for a in agents_id:\n# q1 = experiments.agents[t].agent_attestations_1[a]\n# q0 = experiments.agents[t].agent_attestations_0[a]\n# s_free = experiments.agents[t].agent_supply_free[a]\n# s1 = experiments.agents[t].agent_supply_1[a]\n# s0 = experiments.agents[t].agent_supply_0[a]\n# s = s_free + s1 + s0\n# agent_private_alpha = experiments.agents[t].agent_private_alpha[a]\n# Q0 = experiments.attestations_0[t]\n# Q1 = 1 \n# R = experiments.reserve[t]\n# S = experiments.supply[t]\n# C = 300000000 \n# alpha = experiments.alpha[t]\n# if alpha < 0.5:\n# alpha = 0\n# elif alpha > 0.5:\n# alpha = 1\n# T1 = (s_free/S)*(C*alpha + R)\n# T2 = (s1/(S-S_0))*alpha*(C+R)\n# T3 = (s0/(S-S_1))*(1-alpha)*(R)\n# agent_payout = T1+T2+T3\n# payout_list.append(agent_payout)\n\n# arr2d = np.array(payout_list)\n\n# arr1d = arr2d.flatten()\n\n# x = agents_id\n# payouts = arr1d\n\n# x_pos = [i for i, _ in enumerate(x)]\n\n# fig = plt.figure(figsize=(15, 10))\n# plt.bar(x_pos, payouts, color='green')\n# plt.xlabel(\"Agent ID\")\n# plt.ylabel(\"Payout amount (uXCHF)\")\n# plt.title(\"Agent and their Payouts\")\n\n# plt.xticks(x_pos, x)\n\n# plt.show()\n\n \n \ndef agent_ROI(experiments,t):\n S_free = experiments.supply_free[t]\n S_0 = experiments.supply_0[t]\n S_1 = experiments.supply_1[t]\n agents_id = [0,1,2,3,4,5,6,7,8,9]\n payout_list = []\n roi = []\n for a in agents_id:\n q1 = experiments.agents[t].agent_attestations_1[a]\n q0 = experiments.agents[t].agent_attestations_0[a]\n s_free = experiments.agents[t].agent_supply_free[a]\n s1 = experiments.agents[t].agent_supply_1[a]\n s0 = experiments.agents[t].agent_supply_0[a]\n s = s_free + s1 + s0\n agent_private_alpha = experiments.agents[t].agent_private_alpha[a]\n Q0 = experiments.attestations_0[t]\n Q1 = 1 \n R = experiments.reserve[t]\n S = experiments.supply[t]\n C = 300000000 \n alpha = experiments.alpha[t]\n if alpha < 0.5:\n alpha = 0\n elif alpha > 0.5:\n alpha = 1\n T1 = (s_free/S)*(C*alpha + R)\n T2 = (s1/(S-S_0))*alpha*(C+R)\n T3 = (s0/(S-S_1))*(1-alpha)*(R)\n agent_payout = T1+T2+T3\n payout_list.append(agent_payout)\n #roi.append(((s_free / S_free) * (C + R) - 14000000) / 14000000)\n roi_0 = (s_free * experiments.spot_price.values.mean()) - (14000000 * experiments.spot_price.values[0]) / 14000000 \n roi.append(roi_0 * 100)\n\n arr2d = np.array(roi)\n\n arr1d = arr2d.flatten()\n\n x = agents_id\n return_on_investments = arr1d\n\n x_pos = [i for i, _ in enumerate(x)]\n\n fig = plt.figure(figsize=(15, 10))\n plt.bar(x_pos, return_on_investments, color='green')\n plt.xlabel(\"Agent ID\")\n plt.ylabel(\"ROI\")\n plt.title(\"Agent and their ROI\")\n\n plt.xticks(x_pos, x)\n\n plt.show()\n\n \n# def agent_profit(experiments,t):\n# S_free = experiments.supply_free[t]\n# S_0 = experiments.supply_0[t]\n# S_1 = experiments.supply_1[t]\n# agents_id = [0,1,2,3,4,5,6,7,9]\n# payout_list = []\n# profits = []\n# for a in agents_id:\n# q1 = experiments.agents[t].agent_attestations_1[a]\n# q0 = experiments.agents[t].agent_attestations_0[a]\n# s_free = experiments.agents[t].agent_supply_free[a]\n# s1 = experiments.agents[t].agent_supply_1[a]\n# s0 = experiments.agents[t].agent_supply_0[a]\n# s = s_free + s1 + s0\n# agent_private_alpha = experiments.agents[t].agent_private_alpha[a]\n# Q0 = experiments.attestations_0[t]\n# Q1 = 1 \n# R = experiments.reserve[t]\n# S = experiments.supply[t]\n# C = 300000000 \n# alpha = experiments.alpha[t]\n# if alpha < 0.5:\n# alpha = 0\n# elif alpha > 0.5:\n# alpha = 1\n# T1 = (s_free/S)*(C*alpha + R)\n# T2 = (s1/(S-S_0))*alpha*(C+R)\n# T3 = (s0/(S-S_1))*(1-alpha)*(R)\n# agent_payout = T1+T2+T3\n# payout_list.append(agent_payout)\n# profits.append(agent_payout - 14000000)\n\n# arr2d = np.array(profits)\n\n# arr1d = arr2d.flatten()\n\n# x = agents_id\n# profit = arr1d\n\n# x_pos = [i for i, _ in enumerate(x)]\n\n# fig = plt.figure(figsize=(15, 10))\n# plt.bar(x_pos, profit, color='green')\n# plt.xlabel(\"Agent ID\")\n# plt.ylabel(\"Profit\")\n# plt.title(\"Agents and their Profits\")\n\n# plt.xticks(x_pos, x)\n\n# plt.show()\n\ndef agent_profit(experiments,t):\n S_free = experiments.supply_free[t]\n S_0 = experiments.supply_0[t]\n S_1 = experiments.supply_1[t]\n agents_id = [0,1,2,3,4,5,6,7,8,9]\n payout_list = []\n profits = []\n for a in agents_id:\n q1 = experiments.agents[t].agent_attestations_1[a]\n q0 = experiments.agents[t].agent_attestations_0[a]\n s_free = experiments.agents[t].agent_supply_free[a]\n s1 = experiments.agents[t].agent_supply_1[a]\n s0 = experiments.agents[t].agent_supply_0[a]\n s = s_free + s1 + s0\n agent_private_alpha = experiments.agents[t].agent_private_alpha[a]\n Q0 = experiments.attestations_0[t]\n Q1 = 1 \n R = experiments.reserve[t]\n S = experiments.supply[t] - 1000000 # subtract initial amount\n C = 300000000 \n alpha = experiments.alpha[t]\n if alpha < 0.4:\n alpha = 0\n elif alpha > 0.4:\n alpha = 1\n T1 = (s_free/S)*(C*alpha + R)\n # T2 = (s1/(S-S_0))*alpha*(C+R)\n # T3 = (s0/(S-S_1))*(1-alpha)*(R)\n agent_payout = T1\n payout_list.append(agent_payout)\n profits.append(agent_payout - 14000000)\n\n arr2d = np.array(profits)\n\n arr1d = arr2d.flatten()\n\n x = agents_id\n profit = arr1d\n\n x_pos = [i for i, _ in enumerate(x)]\n\n fig = plt.figure(figsize=(15, 10))\n plt.bar(x_pos, profit, color='green')\n plt.xlabel(\"Agent ID\")\n plt.ylabel(\"Profit\")\n plt.title(\"Agents and their Profits\")\n\n plt.xticks(x_pos, x)\n\n plt.show()\n", "id": "6639300", "language": "Python", "matching_score": 5.838711738586426, "max_stars_count": 31, "path": "Code_With_Us/src/sim/model/parts/utils.py" }, { "content": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef alpha_plot(experiments,test_title,T):\n agent_private_alpha_signal = []\n agent_public_alpha_signal = []\n agent_private_alpha = []\n \n df = experiments\n df = df[df['substep'] == df.substep.max()]\n df.fillna(0,inplace=True)\n\n for i in range (0,T): \n agent_public_alpha_signal_list = []\n agent_public_alpha_signal_list.append(df.chosen_agent.values[i]['agent_public_alpha_signal'])\n agent_public_alpha_signal.append(np.mean(agent_public_alpha_signal_list))\n agent_private_alpha_signal_list= []\n agent_private_alpha_signal_list.append(df.chosen_agent.values[i]['agent_private_alpha_signal'])\n agent_private_alpha_signal.append(np.mean(agent_private_alpha_signal_list))\n agent_private_alpha_list = []\n agent_private_alpha_list.append(df.chosen_agent.values[i]['agent_private_alpha'])\n agent_private_alpha.append(np.mean(agent_private_alpha_list))\n public_alpha = df.alpha\n fig = plt.figure(figsize=(15, 10))\n plt.plot(range(0,T),agent_public_alpha_signal,label='Agent Public Alpha Signal', marker='o')\n plt.plot(range(0,T),agent_private_alpha_signal,label='Agent Private Alpha Signal',marker='o')\n plt.plot(range(0,T),agent_private_alpha,label='Agent Private Alpha',marker='*')\n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Alpha')\n plt.show()\n \ndef reserve_supply(experiments,test_title,T):\n \n df = experiments\n df = df[df['substep'] == df.substep.max()]\n df.fillna(0,inplace=True)\n\n fig = plt.figure(figsize=(15, 10))\n plt.plot(range(0,T),df.reserve,label='Reserve',marker='o')\n plt.plot(range(0,T),df.supply,label='Supply',marker='*')\n \n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Amount')\n\n plt.show()\n\ndef public_alpha(experiments,test_title,T):\n \n df = experiments\n df = df[df['substep'] == df.substep.max()]\n df.fillna(0,inplace=True)\n\n fig = plt.figure(figsize=(15, 10))\n plt.plot(range(0,T),df.public_alpha,label='Public Alpha',marker='o')\n \n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Amount')\n\n plt.show()\ndef alpha(experiments,test_title,T):\n \n df = experiments\n df = df[df['substep'] == df.substep.max()]\n df.fillna(0,inplace=True)\n\n fig = plt.figure(figsize=(15, 10))\n plt.plot(range(0,T),df.alpha,label='Alpha',marker='o')\n \n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Amount')\n\n plt.show()\n \ndef supply_plot(experiments,test_title,T):\n \n df = experiments\n df = df[df['substep'] == df.substep.max()]\n df.fillna(0,inplace=True)\n\n fig = plt.figure(figsize=(15, 10))\n # plt.plot(range(0,T),df.reserve,label='Reserve',marker='o')\n plt.plot(range(0,T),df.supply,label='Supply',marker='*')\n \n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Amount')\n\n plt.show()\n\n\ndef price(experiments,test_title,T):\n \n df = experiments\n df = df[df['substep'] == df.substep.max()]\n df.fillna(0,inplace=True)\n\n fig = plt.figure(figsize=(15, 10))\n plt.plot(range(0,T),df.spot_price,label='Spot Price',marker='+')\n plt.legend()\n plt.title(test_title)\n plt.xlabel('Timestep')\n plt.ylabel('Amount')\n\n plt.show()\n \n return \n\ndef agent_payout_calc(experiments,t, invest_list, initial_supply, C):\n \"\"\"\n For CWU Payout\n \"\"\"\n # print(experiments.agents[t])\n # C = 68100\n S_free = experiments.supply_free[t]\n S_0 = experiments.supply_0[t]\n S_1 = experiments.supply_1[t]\n agents_id = [0,1,2,3]\n payout_list = []\n no_R_payout_list = []\n # for a in agents_id:\n # # print(experiments.agents[t])\n # q1 = experiments.agents[t].agent_attestations_1[a]\n # q0 = experiments.agents[t].agent_attestations_0[a]\n # s_free = experiments.agents[t].agent_supply_free[a]\n for a in agents_id:\n # print(experiments.agents[t])\n q1 = experiments.agents[t].agent_attestations_1[a]\n q0 = experiments.agents[t].agent_attestations_0[a]\n s_free = experiments.agents[t].agent_supply_free[a]\n # print(a)\n # print('s_free', s_free)\n s1 = experiments.agents[t].agent_supply_1[a]\n s0 = experiments.agents[t].agent_supply_0[a]\n s = s_free + s1 + s0\n # print('s ', s)\n # print(\"s_free \", s_free)\n agent_private_alpha = experiments.agents[t].agent_private_alpha[a]\n Q0 = experiments.attestations_0[t]\n Q1 = 1 \n R = experiments.reserve[t]\n\n S = experiments.supply[t] - initial_supply # subtract initial amount No longer applicable, but can enter 0 and is unaffected\n # print(S)\n alpha = experiments.alpha[t]\n # TEMP TO SHOW A POINT\n alpha = 1\n # TEMP TO SHOW A POINT\n if alpha < 0.4:\n alpha = 0\n elif alpha >= 0.4:\n alpha = 1\n T1 = (s_free/S)*(C*alpha + R)\n # T2 = (s1/(S-S_0))*alpha*(C+R)\n # T3 = (s0/(S-S_1))*(1-alpha)*(R)\n\n agent_payout = T1\n payout_list.append(agent_payout)\n arr2d = np.array(payout_list)\n \n no_R_payout = (s_free/S)*(C*alpha)\n no_R_payout_list.append(no_R_payout)\n arr2d_no_R = np.array(no_R_payout_list)\n # print(no_R_payout)\n arr1d = arr2d.flatten()\n arr1d_no_R = arr2d_no_R.flatten()\n # print(arr1d_no_R, type(arr1d_no_R))\n S_zero = experiments.supply[0] #- initial_supply # subtract initial amount\n \n hatch_supply = S_zero #* (1 - experiments.alpha[0])\n hatch_payout_no_R = hatch_supply / experiments.supply[t] * C\n arr1d_no_R_with_hatch = np.insert(arr1d_no_R, 0, hatch_payout_no_R)\n hatch_payout = hatch_supply / experiments.supply[t] * (C + experiments.reserve[t])\n\n payouts = arr1d\n payouts_with_hatch = np.insert(arr1d, 0, hatch_payout)\n # print(payouts)\n investment = invest_list.copy()\n hatch = experiments.reserve[0]\n # investment[0] = investment[0] #+ hatch\n investment.insert(0,hatch)\n # print(invest_list)\n # print(np.sum(no_R_payout_list))\n # print(np.sum(no_R_payout_list)+hatch_payout_no_R)\n\n return investment, arr1d_no_R_with_hatch, payouts_with_hatch\n\ndef agent_payout_plot(experiments,t, invest_list, initial_supply, C):\n \"\"\"\n For Plotting CWU Payout\n \"\"\"\n investment, arr1d_no_R_with_hatch, payouts_with_hatch = agent_payout_calc(experiments,t, invest_list, initial_supply, C)\n # x = agents_id\n x = [0,1,2,3,4]\n x_pos = [i for i, _ in enumerate(x)]\n x_pos2 = [i+0.25 for i, _ in enumerate(x)]\n x_pos3 = [i-0.25 for i, _ in enumerate(x)]\n \n fig = plt.figure(figsize=(15, 10))\n \n plt.bar(x_pos3, investment, color='red', width=0.25)\n plt.bar(x_pos, arr1d_no_R_with_hatch, color='blue', width=0.25)\n plt.bar(x_pos2,payouts_with_hatch, color='green', width=0.25)\n\n plt.legend(['Invested', 'Outcome Share', 'Outcome + Reserve Share'])\n\n plt.xlabel(\"Agent ID\")\n plt.ylabel(\"Payout amount\")\n plt.title(\"Agents Spend and Return\")\n plt.xticks(x_pos, ['Hatch', '0', '1', '2', '3'])\n\n # plt.xticks(x_pos, x)\n # plt.xlabel(['Hatch', '0', '1', '2', '3'])\n return plt.show()\n\ndef summary_table(experiments,t, invest_list, initial_supply, C):\n \n investment, arr1d_no_R_with_hatch, payouts_with_hatch = agent_payout_calc(experiments,t, invest_list, initial_supply, C)\n agent_ids = ['Hatch', '0', '1', '2', '3']\n # print(invest_list)\n results_table=pd.DataFrame(index = agent_ids)\n results_table.index.name = 'Agent ID'\n results_table['Investment'] = investment\n results_table['Return'] = arr1d_no_R_with_hatch\n results_table['ROI %'] = 100 * (results_table.Return - results_table.Investment) / results_table.Investment\n \n return results_table.round(1) \n \ndef agent_ROI(experiments,t):\n S_free = experiments.supply_free[t]\n S_0 = experiments.supply_0[t]\n S_1 = experiments.supply_1[t]\n agents_id = [0,1,2,3,4,5,6,7,8,9]\n payout_list = []\n roi = []\n for a in agents_id:\n q1 = experiments.agents[t].agent_attestations_1[a]\n q0 = experiments.agents[t].agent_attestations_0[a]\n s_free = experiments.agents[t].agent_supply_free[a]\n s1 = experiments.agents[t].agent_supply_1[a]\n s0 = experiments.agents[t].agent_supply_0[a]\n s = s_free + s1 + s0\n agent_private_alpha = experiments.agents[t].agent_private_alpha[a]\n Q0 = experiments.attestations_0[t]\n Q1 = 1 \n R = experiments.reserve[t]\n S = experiments.supply[t]\n C = 300000000 \n alpha = experiments.alpha[t]\n if alpha < 0.5:\n alpha = 0\n elif alpha > 0.5:\n alpha = 1\n T1 = (s_free/S)*(C*alpha + R)\n T2 = (s1/(S-S_0))*alpha*(C+R)\n T3 = (s0/(S-S_1))*(1-alpha)*(R)\n agent_payout = T1+T2+T3\n payout_list.append(agent_payout)\n #roi.append(((s_free / S_free) * (C + R) - 14000000) / 14000000)\n roi_0 = (s_free * experiments.spot_price.values.mean()) - (14000000 * experiments.spot_price.values[0]) / 14000000 \n roi.append(roi_0 * 100)\n\n arr2d = np.array(roi)\n\n arr1d = arr2d.flatten()\n\n x = agents_id\n return_on_investments = arr1d\n\n x_pos = [i for i, _ in enumerate(x)]\n\n fig = plt.figure(figsize=(15, 10))\n plt.bar(x_pos, return_on_investments, color='green')\n plt.xlabel(\"Agent ID\")\n plt.ylabel(\"ROI\")\n plt.title(\"Agent and their ROI\")\n\n plt.xticks(x_pos, x)\n\n plt.show()\n\n \n# def agent_profit(experiments,t):\n# S_free = experiments.supply_free[t]\n# S_0 = experiments.supply_0[t]\n# S_1 = experiments.supply_1[t]\n# agents_id = [0,1,2,3,4,5,6,7,9]\n# payout_list = []\n# profits = []\n# for a in agents_id:\n# q1 = experiments.agents[t].agent_attestations_1[a]\n# q0 = experiments.agents[t].agent_attestations_0[a]\n# s_free = experiments.agents[t].agent_supply_free[a]\n# s1 = experiments.agents[t].agent_supply_1[a]\n# s0 = experiments.agents[t].agent_supply_0[a]\n# s = s_free + s1 + s0\n# agent_private_alpha = experiments.agents[t].agent_private_alpha[a]\n# Q0 = experiments.attestations_0[t]\n# Q1 = 1 \n# R = experiments.reserve[t]\n# S = experiments.supply[t]\n# C = 300000000 \n# alpha = experiments.alpha[t]\n# if alpha < 0.5:\n# alpha = 0\n# elif alpha > 0.5:\n# alpha = 1\n# T1 = (s_free/S)*(C*alpha + R)\n# T2 = (s1/(S-S_0))*alpha*(C+R)\n# T3 = (s0/(S-S_1))*(1-alpha)*(R)\n# agent_payout = T1+T2+T3\n# payout_list.append(agent_payout)\n# profits.append(agent_payout - 14000000)\n\n# arr2d = np.array(profits)\n\n# arr1d = arr2d.flatten()\n\n# x = agents_id\n# profit = arr1d\n\n# x_pos = [i for i, _ in enumerate(x)]\n\n# fig = plt.figure(figsize=(15, 10))\n# plt.bar(x_pos, profit, color='green')\n# plt.xlabel(\"Agent ID\")\n# plt.ylabel(\"Profit\")\n# plt.title(\"Agents and their Profits\")\n\n# plt.xticks(x_pos, x)\n\n# plt.show()\n\ndef agent_profit(experiments,t):\n S_free = experiments.supply_free[t]\n S_0 = experiments.supply_0[t]\n S_1 = experiments.supply_1[t]\n agents_id = [0,1,2,3,4,5,6,7,8,9]\n payout_list = []\n profits = []\n for a in agents_id:\n q1 = experiments.agents[t].agent_attestations_1[a]\n q0 = experiments.agents[t].agent_attestations_0[a]\n s_free = experiments.agents[t].agent_supply_free[a]\n s1 = experiments.agents[t].agent_supply_1[a]\n s0 = experiments.agents[t].agent_supply_0[a]\n s = s_free + s1 + s0\n agent_private_alpha = experiments.agents[t].agent_private_alpha[a]\n Q0 = experiments.attestations_0[t]\n Q1 = 1 \n R = experiments.reserve[t]\n S = experiments.supply[t] - 1000000 # subtract initial amount\n C = 300000000 \n alpha = experiments.alpha[t]\n if alpha < 0.4:\n alpha = 0\n elif alpha > 0.4:\n alpha = 1\n T1 = (s_free/S)*(C*alpha + R)\n # T2 = (s1/(S-S_0))*alpha*(C+R)\n # T3 = (s0/(S-S_1))*(1-alpha)*(R)\n agent_payout = T1\n payout_list.append(agent_payout)\n profits.append(agent_payout - 14000000)\n\n arr2d = np.array(profits)\n\n arr1d = arr2d.flatten()\n\n x = agents_id\n profit = arr1d\n\n x_pos = [i for i, _ in enumerate(x)]\n\n fig = plt.figure(figsize=(15, 10))\n plt.bar(x_pos, profit, color='green')\n plt.xlabel(\"Agent ID\")\n plt.ylabel(\"Profit\")\n plt.title(\"Agents and their Profits\")\n\n plt.xticks(x_pos, x)\n\n plt.show()\n", "id": "3872472", "language": "Python", "matching_score": 3.118654251098633, "max_stars_count": 31, "path": "Pilot/src/sim/model/parts/utils.py" }, { "content": "import pandas as pd\n\nfrom src.sim.model.sys_params import *\n\n# Set initialization state variables for Attestations\n\nPRICE = 1\nQ = 30000\nQ1 = 100\nQ0 = 30000\nS1 = 100 #0 #100\nS0 = 2000 #0 #30000\nr = 0 # Agent reserve, the amount of fiat tokens an agent starts with\ns = 0\ns1 = 0\ns0 = 0\ns_free = s - (s1+s0)\n\nC = C[0]\n\nr1 = 30000 # reserve of agent 1; represents Tranche 1\nr2 = 10000 # reserve of agent 2; represents Tranche 2\nr3 = 10000 # reserve of agent 3; represents Tranche 3\nr4 = 10000 # reserve of agent 4; represents Tranche 4\n\n#### FIX ALPHA, KAPPA Dependent VERSION \n#### FIX KAPPA, ALPHA Dependent VERSION \n\nPUBLIC_ALPHA = PUBLIC_ALPHA[0] # Enter value in sys_params.py\n\n# ALPHA = ALPHA[0] #### FIX KAPPA, ALPHA Dependent VERSION \n# ALPHA = 0.5 #### FIX ALPHA, KAPPA Dependent VERSION \n\n\n# KAPPA = KAPPA[0] #### FIX KAPPA, ALPHA Dependent VERSION \n######## Just for initalization of variables ##########\n#### Overwritten in configs.py for parameter sweeps with values in sys_params ######\nreserve = 1 # (1-THETA[0])*MONEY_RAISED[0]\n# reserve = 5000 # (1-THETA[0])*MONEY_RAISED[0]\n\nALPHA = PUBLIC_ALPHA * S1 * reserve / (S1 * reserve - S0 * reserve + S0*C) #### FIX KAPPA, ALPHA Dependent VERSION \n\nKAPPA = 1 + (C * ALPHA / reserve) #### FIX ALPHA, KAPPA Dependent VERSION \n\nsupply = KAPPA*(reserve/PRICE)\n# IF P0 = 1 , then Supply should equal Reserve\n# supply = reserve\nsupply_free = supply - (S0 + S1)\ninvariant_V = (supply**KAPPA)/reserve\n# supply = supply * (1 - ALPHA) # maybe the right supply awarded the hatch investor?\n# ALPHA = S1 * reserve / (S1 * reserve - S0 * reserve + S0*C) #### FIX KAPPA, ALPHA Dependent VERSION \n\ninvariant_I = reserve + (C*ALPHA)\n\n# ALPHA = 0.5\n# print(\"ALPHA = \", ALPHA)\n#invariant_I / (invariant_I - (C[0]*ALPHA))\n# print(\"KAPPA = \", KAPPA)\n##### Overwritten in configs.py for parameter sweeps with values in sys_params ######\ninvariant_I = reserve + (C*ALPHA)\n\n# Apply Alpha Initialization Ratio\nS1 = ((C * ALPHA) - (reserve * ALPHA)) / (reserve * (1 - ALPHA)) * S0\nprint(S1)\nprint(S0)\n# invariant_I = (KAPPA * C[0]*ALPHA) / (KAPPA - 1)\n\n# invariant_I = KAPPA * reserve # equates as above\n\n########## AGENT INITIALIZATION ##########\nnumber_of_agents = 4\n\nPRIVATE_ALPHA = 0.5\nPRIVATE_PRICE = 0.5\n\n\n\n# Configure agents for agent-based model\nagents_df = pd.DataFrame({\n 'agent_attestations_1': 0,\n 'agent_attestations_0': 0,\n 'agent_reserve': r,\n 'agent_supply_1': s1,\n 'agent_supply_0': s0,\n 'agent_supply_free': s_free,\n 'agent_private_alpha': PRIVATE_ALPHA,\n 'agent_private_price': PRIVATE_PRICE, \n 'agent_private_alpha_signal': 0,\n 'agent_private_price_signal': 0,\n 'agent_public_alpha_signal': 0,\n 'agent_public_price_signal': 0}, index=[0])\nagents_df = pd.concat([agents_df]*number_of_agents, ignore_index=True)\n# Adding IDs to agents\nagents_df.insert(0, 'id', range(0, len(agents_df)))\n\nagents_df['agent_private_alpha'] = 0.5, 0.5, 0.5, 0.5\n# agents_df['agent_private_price'] = 0.5, 0.5, 0.5, 0.5\nagents_df['agent_private_price'] = 10000, 10000, 10000, 10000\nagents_df['agent_reserve'] = 30000, 10000, 10000, 10000\n\n\n########## SYSTEM INITIALIZATION ##########\ninitial_conditions = {\n # Overwritten in configs.py with sys_params value for future parameter sweeps\n 'reserve': reserve,\n 'pbar': PRICE, # kappa*(reserve/supply), price is dR/dS = 1\n 'realized_price': 0,\n 'spot_price': PRICE,\n # 'kappa': 0,\n 'kappa': KAPPA,\n 'supply': supply,\n 'alpha': ALPHA,\n # 'alpha': ALPHA[0],\n\n 'alpha_bar': ALPHA,\n # 'alpha_bar': ALPHA[0],\n \n 'supply_0': S0,\n 'supply_1': S1,\n 'supply_free': supply_free,\n 'attestations': Q,\n 'attestations_0': Q0,\n 'attestations_1': Q1,\n 'invariant_V': invariant_V, # (supply**kappa)/reserve\n 'invariant_I': invariant_I,\n 'agents': agents_df,\n 'chosen_agent': 0,\n 'public_alpha': PUBLIC_ALPHA,\n 'delta_public_alpha' : 0,\n}\n\n\n# print(\"Initial Conditions (config.py) : \", initial_conditions)\n", "id": "11444535", "language": "Python", "matching_score": 7.0544514656066895, "max_stars_count": 31, "path": "Pilot/src/sim/model/state_variables.py" }, { "content": "import pandas as pd\n\nfrom src.sim.model.sys_params import *\n\n# Set initialization state variables for Attestations\n\nPRICE = 1\nQ = 30000\nQ1 = 100\nQ0 = 30000\nS1 = 100\nS0 = 30000\nr = 100 # Agent reserve, the amount of fiat tokens an agent starts with\ns = 0\ns1 = 0\ns0 = 0\ns_free = s - (s1+s0)\n\n\n######## Just for initalization of variables ##########\n#### Overwritten in configs.py for parameter sweeps with values in sys_params ######\nreserve = (1-THETA[0])*MONEY_RAISED[0]\nsupply = KAPPA[0]*(reserve/PRICE)\nsupply_free = supply - (S0 + S1)\ninvariant_V = (supply**KAPPA[0])/reserve\ninvariant_I = reserve + (C[0]*ALPHA[0])\n\nALPHA = S1 * reserve / (S1 * reserve - S0 * reserve + S0*C[0])\n# print(\"ALPHA = \", ALPHA)\nKAPPA = invariant_I / (invariant_I - (C[0]*ALPHA))\n# print(\"KAPPA = \", KAPPA)\n##### Overwritten in configs.py for parameter sweeps with values in sys_params ######\n\n\n########## AGENT INITIALIZATION ##########\nnumber_of_agents = 2\n\nPRIVATE_ALPHA = 0.5\nPRIVATE_PRICE = 0.5\n\n# Configure agents for agent-based model\nagents_df = pd.DataFrame({\n 'agent_attestations_1': 0,\n 'agent_attestations_0': 0,\n 'agent_reserve': r,\n 'agent_supply_1': s1,\n 'agent_supply_0': s0,\n 'agent_supply_free': s_free,\n 'agent_private_alpha': PRIVATE_ALPHA,\n 'agent_private_price': PRIVATE_PRICE, \n 'agent_private_alpha_signal': 0,\n 'agent_private_price_signal': 0,\n 'agent_public_alpha_signal': 0,\n 'agent_public_price_signal': 0}, index=[0])\nagents_df = pd.concat([agents_df]*number_of_agents, ignore_index=True)\n# Adding IDs to agents\nagents_df.insert(0, 'id', range(0, len(agents_df)))\n\n# 0.6, 0.7, 0.8, 0.9\n# 0.3, 0.4, 0.5, 0.6, 0.7\n# 0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1\nagents_df['agent_private_alpha'] = 0.5, 0.5#, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5\n# 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5\n# 0.5, 0.9, 1.0, 1.1, 1.5 # 0.2, 2, 3, 4, 6\nagents_df['agent_private_price'] = 0.5, 0.5#, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5\n# 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5\n\n########## SYSTEM INITIALIZATION ##########\ninitial_conditions = {\n # Overwritten in configs.py with sys_params value for future parameter sweeps\n 'reserve': reserve,\n 'pbar': PRICE, # kappa*(reserve/supply), price is dR/dS = 1\n 'realized_price': 0,\n 'spot_price': PRICE,\n # 'kappa': 0,\n 'kappa': 2,\n 'supply': supply,\n 'alpha': ALPHA,\n # 'alpha': ALPHA[0],\n\n 'alpha_bar': ALPHA,\n # 'alpha_bar': ALPHA[0],\n \n 'supply_0': S0,\n 'supply_1': S1,\n 'supply_free': supply_free,\n 'attestations': Q,\n 'attestations_0': Q0,\n 'attestations_1': Q1,\n 'invariant_V': invariant_V, # (supply**kappa)/reserve\n 'invariant_I': invariant_I,\n 'agents': agents_df,\n 'chosen_agent': 0,\n 'public_alpha': 0\n}\n\n\n# print(\"Initial Conditions (config.py) : \", initial_conditions)\n", "id": "8527127", "language": "Python", "matching_score": 1.3635196685791016, "max_stars_count": 31, "path": "src/sim/model/state_variables.py" }, { "content": "# NOT USING THIS ATM\nimport numpy as np\n\nE = # epsilon for convex combination\nC = # payout commitment\n\n\ndef kappa(deltaR, R, S, V, I, alpha):\n # if bond/burn and attest\n if deltaR != 0 & & deltaAlpha != 0:\n kappa = # ???\n # if only bond/burn\n elif deltaR != 0:\n kappa = (numpy.log(V*R))/(numpy.log(S))\n # if only attest\n elif deltaAlpha != 0:\n kappa = I/(I-C*alpha)\n # if no action\n else:\n kappa = (numpy.log(V*R))/(numpy.log(S))\n # check if (numpy.log(V*R))/(numpy.log(S)) == I/(I-C*alpha)\n return kappa\n\n\ndef invariant_V(R, S, kappa):\n return (S**kappa)/R\n\n\ndef invariant_I(deltaR, R, alpha):\n # if bond/burn and attest\n if deltaR != 0 & & deltaAlpha != 0:\n I = R+(C*alpha)\n # if only bond/burn\n elif deltaR != 0:\n I = R+(C*alpha)\n # if only attest\n elif deltaAlpha != 0:\n I = (C*alpha)/(1-(1/kappa))\n # check if R+(C*alpha) == (C*alpha)/(1-(1/kappa))\n # if no action\n else:\n I = R+(C*alpha)\n return I\n\n\ndef reserve(deltaR, S, V, I, alpha, kappa):\n # if bond/burn and attest\n if deltaR != 0:\n R = (S**kappa)/V\n # if only attest or no action\n else:\n R = I - (C*alpha)\n # check if (I - C*alpha) == (S**kappa)/V\n return R\n\n\ndef supply(R, V, kappa):\n return (V*R)**(1/kappa)\n\n\ndef spot_price(deltaR, deltaS, deltaAlpha, R, S, kappa):\n # if bond/burn and attest\n if deltaR != 0 & & deltaAlpha != 0:\n SP = kappa*(R/S)\n # if only bond/burn\n elif deltaR != 0:\n SP = deltaR/deltaS\n # if only attest\n elif deltaAlpha != 0:\n SP = kappa*(R/S)\n # if no action\n else:\n SP = kappa*(R/S)\n return SP\n\n\ndef spot_alpha(deltaS, R, S1, S0, alpha):\n # if attest\n if deltaS != 0:\n alpha_bar = alpha_bar(deltaS, R, C) # define alpha_bar\n SA = E*(alpha) + (1-E)*(alpha)*((S1+S0)/(S1+S0+deltaS)) + \\\n (alpha_bar)*(deltaS/(S1+S0+deltaS))\n # if no attest\n else:\n SA = alpha\n return SA\n\n\ndef alpha_bar(deltas, deltaq1, R, C, Q1, q1, s1, s): # what about Q0, S0, and attest_negative?\n A = (1/(Q1*(Q1+deltaq1))) * \\\n ((q1*(Q1*deltas) - (deltaq1*s)) + deltaq1*((Q1*s1) + (Q1*deltas)))\n alpha_bar = (deltas*R)/(A*(C+R) - (deltas*C))\n return alpha_bar\n\n\ndef bond(deltaR, R, S):\n kappa = # ???\n V = invariant_V(R, S, kappa)\n deltaAlpha = 0\n deltaS = (V*(R+deltaR))**(1/kappa)-S\n # or is it deltaR/deltaS?\n realized_price = spot_price(deltaR, deltaS, deltaAlpha, R, S, kappa)\n return deltaS, realized_price\n\n\ndef burn(deltaS, R, S):\n kappa = # ???\n V = (S**(kappa))/R\n deltaAlpha = 0\n deltaR = R-((S-deltaS)**kappa)/V\n # or is it deltaR/deltaS?\n realized_price = spot_price(deltaR, deltaS, deltaAlpha, R, S, kappa)\n return deltaR, realized_price\n", "id": "11169769", "language": "Python", "matching_score": 3.981783866882324, "max_stars_count": 31, "path": "src/sim/model/parts/old/bonding_curve_eq_2.py" }, { "content": "# NOT USING THIS ATM\nimport numpy as np\n\nE = # epsilon for convex combination\nC = # payout commitment\n\n\ndef kappa(R, S, V):\n kappa = (numpy.log(V*R))/(numpy.log(S))\n return kappa\n\n\ndef alpha():\n\n\ndef spot_price():\n\n\ndef spot_alpha():\n\n\ndef alpha_bar():\n\n\ndef invariant_V(R, S, kappa):\n return (S**kappa)/R\n\n\ndef invariant_I(deltaR, R, alpha):\n return I = R+(C*alpha)\n\n\ndef reserve():\n # if last action was bond/burn\n # if last action was attest\n\n\ndef supply(R, V, kappa):\n return (V*R)**(1/kappa)\n\n\ndef supply_0():\n\n\ndef supply_1():\n\n\ndef attestations_1():\n\n\ndef attestations_0():\n\n\ndef bond():\n\n\ndef burn():\n\n\ndef attest_pos():\n\n\ndef attest_neg():\n", "id": "10652146", "language": "Python", "matching_score": 2.063310384750366, "max_stars_count": 31, "path": "src/sim/model/parts/old/bonding_curve_eq_3.py" }, { "content": "# import numpy as np\n#\n# # default_kappa = kappa(R, S, V0)\n# default_exit_tax = .02\n#\n# #value function for a given state (R,S)\n# def invariant_V0(R,S,kappa=default_kappa):\n# return (S**kappa)/R\n#\n# ########### (replace alpha -> omega?) ##############\n# #value function for a given state (C, alpha)\n# def invariant_I0(C, alpha, kappa=default_kappa):\n# return (C*alpha*kappa)/(kappa-1)\n#\n# #given a value function (parameterized by kappa)\n# #and an invariant coeficient V0\n# #return Supply S as a function of reserve R\n# def reserve(S, V0, kappa=default_kappa):\n# return (S**kappa)/V0\n#\n# #given a value function (parameterized by kappa)\n# #and an invariant coeficient V0\n# #return Supply S as a function of reserve R\n# def supply(R, V0, kappa=default_kappa):\n# return (V0*R)**(1/kappa)\n#\n# ######### (variable name. kappa -> spot_kappa?) ##########\n# #given a state (R,S)\n# #and an invariant constant V0\n# #return kappa as a function of R, S, and V0\n# def kappa(R, S, V0):\n# return (numpy.log(V0*R))/(numpy.log(S))\n#\n# #given a value function (parameterized by kappa)\n# #and an invariant coeficient V0\n# #return a spot price P as a function of reserve R\n# def spot_price(R, V0, kappa=default_kappa):\n# return kappa*R**((kappa-1)/kappa)/V0**(1/kappa)\n#\n# #####(since deltaS is not used to calculate alpha, alpha does not change when more tokens are bonded. #######\n# #given a value function (parameterized by kappa)\n# #and an invariant coeficient I0\n# #return a spot alpha as a function of reserve R\n# def spot_alpha(S, I0, kappa=default_kappa, C):\n# return (I0*(kappa-1))/(C*kappa)\n#\n# #for a given state (R,S)\n# #given a value function (parameterized by kappa)\n# #and an invariant coeficient V0\n# #deposit deltaR to Mint deltaS\n# #with realized price deltaR/deltaS\n# def mint(deltaR, R, S, V0, kappa=default_kappa):\n# deltaS = (V0*(R+deltaR))**(1/kappa)-S\n# if deltaS == 0:\n# realized_price = spot_price(R+deltaR, V0, kappa)\n# else:\n# realized_price = deltaR/deltaS\n# return deltaS, realized_price\n#\n# #for a given state (R,S)\n# #given a value function (parameterized by kappa)\n# #and an invariant coefficient V0\n# #burn deltaS to Withdraw deltaR\n# #with realized price deltaR/deltaS\n# def withdraw(deltaS, R, S, V0, kappa=default_kappa):\n# deltaR = R-((S-deltaS)**kappa)/V0\n# if deltaS == 0:\n# realized_price = spot_price(R+deltaR, V0, kappa)\n# else:\n# realized_price = deltaR/deltaS\n# return deltaR, realized_price\n#\n# #for a given state (kappa)\n# #and an invariant coefficient I0\n# #bond deltaS_1 to obtain deltaQ_1\n# #with realized alpha as a function of I0, C, and kappa\n# def attest_pos(deltaS1, S1, C, I0, kappa):\n# deltaQ1 = (I0*(S1 + deltaS1))**(1/alpha)-Q1\n# if deltaQ1 == 0:\n# realized_alpha = spot_alpha(S1 + deltaS1, I0, kappa, C)\n# else:\n# realized_alpha = deltaS1/deltaQ1\n# return deltaQ1, realized_alpha\n#\n# #for a given state (kappa)\n# #and an invariant coefficient I0\n# #bond deltaS_0 to obtain deltaQ_0\n# #with realized alpha as a function of I0, C, and kappa\n# def attest_neg(deltaS0, S0, C, I0, kappa):\n# deltaQ0 = (I0*(S0 + deltaS0))**(1/alpha)-Q0\n# if deltaQ0 == 0:\n# realized_alpha = spot_alpha(S0 + deltaS0, I0, kappa, C)\n# else:\n# realized_alpha = deltaS0/deltaQ0\n# return deltaQ0, realized_alpha\n#\n# def withdraw_with_tax(deltaS, R,S, V0, exit_tax = default_exit_tax, kappa=default_kappa):\n# deltaR = R-((S-deltaS)**kappa)/V0\n# #print(deltaR)\n# quantity_taxed = exit_tax*deltaR\n# quantity_recieved = (1-exit_tax)*deltaR\n#\n# realized_price = quantity_recieved/deltaS\n#\n# return quantity_recieved, quantity_taxed, realized_price", "id": "4843658", "language": "Python", "matching_score": 1.308564305305481, "max_stars_count": 31, "path": "src/sim/model/parts/old/bonding_curve_eq_5.py" }, { "content": "# TODO: imports\nimport random\nimport math\n# f = 0.03 # param to control certainty of alpha at extremes\n# m = 0.15 # param to modulate curvature of alpha threshold band\n\n\ndef set_action(params, substep, state_history, prev_state):\n # params = params[0]\n # pprint(params)\n # print('Choose Action')\n R = prev_state['reserve']\n S = prev_state['supply']\n V = prev_state['invariant_V']\n I = prev_state['invariant_I']\n P = prev_state['spot_price']\n private_price = prev_state['chosen_agent']['agent_private_price']\n private_alpha = prev_state['chosen_agent']['agent_private_alpha']\n S1 = prev_state['supply_1']\n S0 = prev_state['supply_0']\n r = prev_state['chosen_agent']['agent_reserve']\n # print(\"AGENT RESERVE = \", r)\n # s = prev_state['chosen_agent']['agent_supply']\n # this model contains only the notion of s_free. Agent supply is implicit\n s_free = prev_state['chosen_agent']['agent_supply_free']\n Q1 = prev_state['attestations_1']\n Q0 = prev_state['attestations_0']\n start_kappa = params['starting_kappa']\n start_alpha = params['starting_alpha']\n alpha = prev_state['alpha']\n kappa = prev_state['kappa']\n f = params['f']\n m = params['m']\n dust = params['dust']\n beta = params['beta']\n period = params['period']\n tau = 0 # 1.2*private_price\n\n # print('r', r)\n\n # print('P', P, type(P))\n # print('R', R, type(R))\n # print('private_price', private_price, type(private_price))\n # print('s_free', s_free, type(s_free))\n # print('private_alpha', private_alpha, type(private_alpha))\n # print('alpha', alpha, type(alpha))\n # new_private_price is obtained from update_private_price() function in private_beliefs\n\n # USING ARMIJO RULE\n if P > (private_price + tau) and s_free > 0 and R > 0:\n mech_bc = 'burn'\n \n #amt_to_bond = 0\n #amt_to_burn = s_free*(random.randint(85, 90)/100)\n\n deltaS = s_free*(1-dust)\n deltaR = R-((S-deltaS)**kappa)/V\n# \n if deltaS == 0:\n protoRP = kappa*R**((kappa-1)/kappa)/V**(1/kappa)\n else:\n protoRP = deltaR/deltaS\n# \n while protoRP < private_price:\n deltaS = beta*deltaS\n# \n if deltaS == 0:\n protoRP = kappa*R**((kappa-1)/kappa)/V**(1/kappa)\n else:\n protoRP = deltaR/deltaS\n \n if protoRP < dust:\n break\n# \n RP = protoRP\n #print(\"PROTO RP (BURN) = \", protoRP)\n amt_to_burn = deltaS\n amt_to_bond = 0\n\n # amt_to_bond = 0\n # amt_to_burn = s_free*(random.randint(85, 90)/100)\n\n # max_burn = s_free*(1-dust)\n # print(\"s_free = \", s_free, \"| RAND = \", (random.randint(85, 90)/100))\n # amt_to_burn = amt*beta <-- send to iteration 2 of amt_to_burn calculation\n # print(\"Agent burns. Amt to burn = \", amt_to_burn)\n\n elif P < (private_price - tau) and r > 0 and S > 0:\n mech_bc = 'bond'\n\n #amt_to_bond = r*(random.randint(85, 90)/100)\n #amt_to_burn = 0\n\n deltaR = r*(1-dust)\n deltaS = (V*(R+deltaR))**(1/kappa)-S\n# \n if deltaS == 0:\n protoRP = kappa*R**((kappa-1)/kappa)/V**(1/kappa)\n else:\n protoRP = deltaR/deltaS\n# \n while protoRP > private_price:\n deltaR = beta*deltaR\n# \n if deltaS == 0:\n protoRP = kappa*R**((kappa-1)/kappa)/V**(1/kappa)\n else:\n protoRP = deltaR/deltaS\n \n if protoRP < dust:\n break\n# \n RP = protoRP\n #print(\"PROTO RP (BOND) = \", protoRP)\n amt_to_bond = deltaR\n amt_to_burn = 0\n\n # max_bond = r*(1-dust)\n # amt_to_burn = max_bond\n # amt_to_bond = r*(random.randint(85, 90)/100)\n # print(\"r =\", r, \"| RAND = \", (random.randint(85, 90)/100))\n # amt_to_burn = 0\n # print(\"Agent bonds. Amt to bond = \", amt_to_bond)\n\n else:\n # don't trade\n mech_bc = None\n amt_to_bond = 0\n amt_to_burn = 0\n # print(\"No trade. P = \", P, \"private_price = \", private_price)\n\n # print('alpha ', alpha)\n # print('alpha ', type(alpha), ' private_alpha ', type(private_alpha), ' s_free ', type(s_free))\n if alpha > private_alpha and s_free > 0:\n mech_pm = 'attest_neg'\n # print(\"Negative attestation. | alpha = \",\n # alpha, \"private_alpha = \", private_alpha)\n\n # Agent's choice of delta s\n amt_pos = 0\n\n # Heuristic 1: Random choice between 0-50% of agent supply\n # amt_neg = (random.randint(0, 50)/100)*s_free\n\n # Heuristic 2: Variable bandwidth threshold on alpha - private_alpha\n a = abs(alpha - private_alpha)\n d = 4*m*(1-a)*(a)\n g1 = d + (1-d-f)*a + f\n g0 = (1-d-f)*a\n amt_neg = random.uniform(g0, g1)*s_free\n # print(\"amt_neg = \", amt_neg)\n\n # Compute number of claims\n A = math.sqrt(1+((amt_pos+amt_neg)/S))\n amt_Q1 = 0\n amt_Q0 = Q0*(A-1)\n # print(\"amt_Q0 = \", amt_Q0)\n\n # amt_Q0 = alpha - private_alpha # units\n # amt_neg = amt_Q0 # delta_s to S0\n # amt_pos = 0 # delta_s to S1\n # S0 = S0 + amt_neg\n # Q0 = Q0 + amt_Q0\n\n elif alpha < private_alpha and s_free > 0:\n mech_pm = 'attest_pos'\n # print(\"Positive attestation. | alpha = \",\n # alpha, \"private_alpha = \", private_alpha)\n\n # Agent's choice of delta s\n # Heuristic 1: Random choice between 0-50% of agent supply\n # amt_pos = (random.randint(0, 50)/100)*s_free\n\n # Heuristic 2: Variable bandwidth threshold on alpha - private_alpha\n a = abs(alpha - private_alpha)\n d = 4*m*(1-a)*(a)\n g1 = d + (1-d-f)*a + f\n g0 = (1-d-f)*a\n amt_pos = random.uniform(g0, g1)*s_free\n\n amt_neg = 0\n # print(\"amt_pos = \", amt_pos)\n\n # Compute number of claims\n A = math.sqrt(1+((amt_pos+amt_neg)/S))\n amt_Q1 = Q1*(A-1)\n amt_Q0 = 0\n # print(\"amt_Q1 = \", amt_Q1)\n\n elif s_free <= 0:\n mech_pm = 'None'\n amt_pos = 0\n amt_neg = 0\n amt_Q1 = 0\n amt_Q0 = 0\n # print(\"Agent supply too low. Cannot attest\")\n\n else:\n # don't attest\n mech_pm = None\n amt_Q1 = 0\n amt_Q0 = 0\n amt_pos = 0\n amt_neg = 0\n # print(\"No attestation. alpha = \", alpha,\n # \"private_alpha = \", private_alpha, \"s_free = \", s_free)\n\n # action['posterior'] = {'S': S, 'R': R, 'P': P, 'S1': S0, 'S1': S1,\n # 'Q0': Q0, 'Q1': Q1, 'kappa': kappa, 'alpha': alpha, 'I': I, 'V': V}\n\n return {\n 'mech_bc': mech_bc,\n 'mech_pm': mech_pm,\n 'amt_to_bond': amt_to_bond,\n 'amt_to_burn': amt_to_burn,\n 'amt_Q1': amt_Q1,\n 'amt_Q0': amt_Q0,\n 'amt_pos': amt_pos,\n 'amt_neg': amt_neg\n }\n\n\n\"\"\" # Compute P BAR\n if deltaR == 0:\n Pbar = infty\n elif max_burn == 0 or max_bond == 0:\n Pbar = P\n else:\n if deltaS == 0:\n RP =\n else:\n RP = deltaR/deltaS\n\n Pbar = RP \"\"\"\n\n\n# new_private_price is obtained from update_private_price() function in private_beliefs\n\"\"\" if P > private_price and s_free > 0 and R > 0:\n mech_bc = 'burn' # burn deltaS to get deltaR.\n # print(\"Agent burns. P = \", P, \"| private_price = \", private_price)\n amt_to_bond = 0\n # amt reqd for next state P = current state price belief\n amt_to_burn = (P - private_price) * 0.5 * s_free\n\n elif P < private_price and r > 0 and S > 0:\n mech_bc = 'bond' # bond deltaR to get deltaS\n # print(\"Agent bonds. P = \", P, \"| private_price = \", private_price)\n amt_to_bond = (private_price - P) * 0.5 * r # units\n amt_to_burn = 0 \"\"\"\n", "id": "7833422", "language": "Python", "matching_score": 5.768587589263916, "max_stars_count": 31, "path": "src/sim/model/parts/choose_action.py" }, { "content": "import random\nimport math\n\ndef set_bond_action(params, substep, state_history, prev_state):\n R = prev_state['reserve']\n S = prev_state['supply']\n V = prev_state['invariant_V']\n I = prev_state['invariant_I']\n P = prev_state['spot_price']\n private_price = prev_state['chosen_agent']['agent_private_price']\n private_alpha = prev_state['chosen_agent']['agent_private_alpha']\n S1 = prev_state['supply_1']\n S0 = prev_state['supply_0']\n r = prev_state['chosen_agent']['agent_reserve']\n # print(\"AGENT RESERVE = \", r)\n # s = prev_state['chosen_agent']['agent_supply']\n # this model contains only the notion of s_free. Agent supply is implicit\n s_free = prev_state['chosen_agent']['agent_supply_free']\n Q1 = prev_state['attestations_1']\n Q0 = prev_state['attestations_0']\n start_kappa = params['starting_kappa']\n start_alpha = params['starting_alpha']\n alpha = prev_state['alpha']\n kappa = prev_state['kappa']\n f = params['f']\n m = params['m']\n dust = params['dust']\n beta = params['beta']\n period = params['period']\n tau = 0 # 1.2*private_price\n\n amt_to_burn = 0\n # amt_to_bond = 0\n\n mech_bc = 'bond'\n\n deltaR = r*(1-dust)\n\n amt_to_bond = deltaR\n amt_to_burn = 0\n\n mech_pm = None\n amt_Q1 = 0\n amt_Q0 = 0\n amt_pos = 0\n amt_neg = 0\n\n return {\n 'mech_bc': mech_bc,\n 'mech_pm': mech_pm,\n 'amt_to_bond': amt_to_bond,\n 'amt_to_burn': amt_to_burn,\n 'amt_Q1': amt_Q1,\n 'amt_Q0': amt_Q0,\n 'amt_pos': amt_pos,\n 'amt_neg': amt_neg\n }\n\n\ndef set_action(params, substep, state_history, prev_state):\n R = prev_state['reserve']\n S = prev_state['supply']\n V = prev_state['invariant_V']\n I = prev_state['invariant_I']\n P = prev_state['spot_price']\n private_price = prev_state['chosen_agent']['agent_private_price']\n private_alpha = prev_state['chosen_agent']['agent_private_alpha']\n S1 = prev_state['supply_1']\n S0 = prev_state['supply_0']\n r = prev_state['chosen_agent']['agent_reserve']\n # print(\"AGENT RESERVE = \", r)\n # s = prev_state['chosen_agent']['agent_supply']\n # this model contains only the notion of s_free. Agent supply is implicit\n s_free = prev_state['chosen_agent']['agent_supply_free']\n Q1 = prev_state['attestations_1']\n Q0 = prev_state['attestations_0']\n start_kappa = params['starting_kappa']\n start_alpha = params['starting_alpha']\n alpha = prev_state['alpha']\n kappa = prev_state['kappa']\n f = params['f']\n m = params['m']\n dust = params['dust']\n beta = params['beta']\n period = params['period']\n tau = 0 # 1.2*private_price\n\n # USING ARMIJO RULE\n if P > (private_price + tau) and s_free > 0 and R > 0:\n mech_bc = 'burn'\n\n\n deltaS = s_free*(1-dust)\n deltaR = R-((S-deltaS)**kappa)/V\n# \n if deltaS == 0:\n protoRP = kappa*R**((kappa-1)/kappa)/V**(1/kappa)\n else:\n protoRP = deltaR/deltaS\n# \n while protoRP < private_price:\n deltaS = beta*deltaS\n# \n if deltaS == 0:\n protoRP = kappa*R**((kappa-1)/kappa)/V**(1/kappa)\n else:\n protoRP = deltaR/deltaS\n \n if protoRP < dust:\n break\n# \n RP = protoRP\n amt_to_burn = deltaS\n amt_to_bond = 0\n\n\n elif P < (private_price - tau) and r > 0 and S > 0:\n mech_bc = 'bond'\n\n deltaR = r*(1-dust)\n deltaS = (V*(R+deltaR))**(1/kappa)-S\n# \n if deltaS == 0:\n protoRP = kappa*R**((kappa-1)/kappa)/V**(1/kappa)\n else:\n protoRP = deltaR/deltaS\n# \n while protoRP > private_price:\n deltaR = beta*deltaR\n# \n if deltaS == 0:\n protoRP = kappa*R**((kappa-1)/kappa)/V**(1/kappa)\n else:\n protoRP = deltaR/deltaS\n \n if protoRP < dust:\n break\n# \n RP = protoRP\n #print(\"PROTO RP (BOND) = \", protoRP)\n amt_to_bond = deltaR\n amt_to_burn = 0\n\n\n else:\n # don't trade\n mech_bc = None\n amt_to_bond = 0\n amt_to_burn = 0\n\n if alpha > private_alpha and s_free > 0:\n mech_pm = 'attest_neg'\n\n # Agent's choice of delta s\n amt_pos = 0\n\n\n # Heuristic 2: Variable bandwidth threshold on alpha - private_alpha\n a = abs(alpha - private_alpha)\n d = 4*m*(1-a)*(a)\n g1 = d + (1-d-f)*a + f\n g0 = (1-d-f)*a\n amt_neg = random.uniform(g0, g1)*s_free\n # print(\"amt_neg = \", amt_neg)\n\n # Compute number of claims\n A = math.sqrt(1+((amt_pos+amt_neg)/S))\n amt_Q1 = 0\n amt_Q0 = Q0*(A-1)\n\n\n elif alpha < private_alpha and s_free > 0:\n mech_pm = 'attest_pos'\n\n\n # Heuristic 2: Variable bandwidth threshold on alpha - private_alpha\n a = abs(alpha - private_alpha)\n d = 4*m*(1-a)*(a)\n g1 = d + (1-d-f)*a + f\n g0 = (1-d-f)*a\n amt_pos = random.uniform(g0, g1)*s_free\n\n amt_neg = 0\n\n # Compute number of claims\n A = math.sqrt(1+((amt_pos+amt_neg)/S))\n amt_Q1 = Q1*(A-1)\n amt_Q0 = 0\n\n elif s_free <= 0:\n mech_pm = 'None'\n amt_pos = 0\n amt_neg = 0\n amt_Q1 = 0\n amt_Q0 = 0\n\n else:\n # don't attest\n mech_pm = None\n amt_Q1 = 0\n amt_Q0 = 0\n amt_pos = 0\n amt_neg = 0\n\n\n return {\n 'mech_bc': mech_bc,\n 'mech_pm': mech_pm,\n 'amt_to_bond': amt_to_bond,\n 'amt_to_burn': amt_to_burn,\n 'amt_Q1': amt_Q1,\n 'amt_Q0': amt_Q0,\n 'amt_pos': amt_pos,\n 'amt_neg': amt_neg\n }\n\n\n", "id": "10231068", "language": "Python", "matching_score": 3.1733434200286865, "max_stars_count": 31, "path": "Code_With_Us/src/sim/model/parts/choose_action.py" }, { "content": "def getInputPrice(input_amount, input_reserve, output_reserve, params):\n fee_numerator = params['fee_numerator']\n fee_denominator = params['fee_denominator']\n input_amount_with_fee = input_amount * fee_numerator\n numerator = input_amount_with_fee * output_reserve\n denominator = (input_reserve * fee_denominator) + input_amount_with_fee\n return int(numerator // denominator)\n\n# For buying, not needed right now\ndef supply_tokens_added(params, substep, state_history, prev_state, policy_input):\n # params = params[0]\n timestep = prev_state['timestep']\n \n if params['ENABLE_BURN']:\n return ('UNI_supply', 0)\n else:\n tokens_sold = int(policy_input['amt_to_burn']) #amount of ETH being sold by the user\n token_supply = int(prev_state['UNI_supply'])\n return ('UNI_supply', token_supply + tokens_sold)\n\n\ndef reserve_redeemed(params, substep, state_history, prev_state, policy_input):\n # params = params[0]\n if params['ENABLE_BURN']:\n return ('UNI_reserve', 0)\n else:\n tokens_sold = int(policy_input['amt_to_burn']) #amount of tokens being sold by the user\n uni_reserve = int(prev_state['UNI_reserve'])\n if tokens_sold == 0:\n return ('UNI_reserve', uni_reserve)\n else:\n token_supply = int(prev_state['UNI_supply'])\n reserve_redeemed = int(getInputPrice(tokens_sold, token_supply, uni_reserve, params))\n return ('UNI_reserve', uni_reserve - reserve_redeemed)\n \n\n\ndef compute_r(R, S, V, kappa, r, deltaS, policy_input):\n if V == 0:\n print(\"V IS ZERO\")\n else:\n deltar = R-((S-deltaS)**kappa)/V\n\n r = r - policy_input['amt_to_bond'] + deltar\n return r\n\n\ndef compute_s_free(R, S, V, kappa, s_free, deltaR, policy_input, timestep):\n\n deltas = (V*(R+deltaR))**(1/kappa)-S\n\n s_free = s_free + deltas - policy_input['amt_to_burn']\n\n # TEST RANDOM DROP\n if timestep % 20 == 0:\n random_drop = 10\n else:\n random_drop = 0\n\n s_free = s_free + random_drop\n\n return s_free\n\n\ndef reserve_redeemed_to_agent(params, substep, state_history, prev_state, policy_input):\n \"\"\"\n If uniswap instance is permitted, update reserve redeemed to agent here from burning their tokens\n Burn (supply) is already computed in the bondburn, because the tokens to burn would be the same regardless of where they are burned (assumed price and fees equal)\n \"\"\"\n # params = params[0]\n agent = prev_state['chosen_agent']\n if params['ENABLE_BURN']:\n return 'chosen_agent', agent\n else:\n tokens_sold = int(policy_input['amt_to_burn']) #amount of tokens being sold by the user\n uni_reserve = int(prev_state['UNI_reserve'])\n if tokens_sold == 0:\n return 'chosen_agent', agent\n else:\n token_supply = int(prev_state['UNI_supply'])\n reserve_redeemed = int(getInputPrice(tokens_sold, token_supply, uni_reserve, params))\n r = agent['agent_reserve']\n agent['agent_reserve'] = r + reserve_redeemed\n return 'chosen_agent', agent\n\n", "id": "12293172", "language": "Python", "matching_score": 5.002259731292725, "max_stars_count": 31, "path": "Pilot/src/sim/model/parts/uniswap.py" }, { "content": "\n\ndef update_R(params, substep, state_history, prev_state, policy_input):\n # params = params[0]\n # access amt_to_burn using _input['action']['amt_to_burn'] because it's a dict of dicts\n R = prev_state['reserve']\n S = prev_state['supply']\n V = prev_state['invariant_V']\n\n kappa = prev_state['kappa']\n deltaS = policy_input['amt_to_burn']\n\n # print('R check ', (((S)**kappa)/V))\n if V == 0:\n print(\"V IS ZERO\") # degenerate\n else:\n deltaR = R - (((S-deltaS)**kappa)/V)\n \n # print(\"::::delta R::::\", deltaR)\n # print(\"::::AMTBOND::::\", policy_input['amt_to_bond'])\n\n ## Continuous ##\n # Continuous Enabled, newly reserved funds split to bond reserve and project funding\n if params['ENABLE_CONTINUOUS']:\n R = R + policy_input['amt_to_bond']*(1-params['THETA']) # - deltaR all burned funds not tempered by theta\n if params['ENABLE_BURN']:\n R = R - deltaR # for burning allowed (=TRUE) subtract burned funds from reserve\n \n # Continuous Not Enabled, all new reserve funds go to reserve the bond\n else:\n if params['ENABLE_BURN']:\n R = R + policy_input['amt_to_bond'] - deltaR # for burning allowed (=TRUE) subtract burned funds from reserve\n else:\n R = R + policy_input['amt_to_bond'] # for burning on bodning curve not allowed, occurs in uniswap\n # print(\"RESERVE = \", R, \" | deltaR = \", deltaR, \" | deltaS = \", deltaS)\n\n return 'reserve', R\n\ndef update_funds(params, substep, state_history, prev_state, policy_input):\n # params = params[0]\n # access amt_to_burn using _input['action']['amt_to_burn'] because it's a dict of dicts\n F = prev_state['funds_from_bond']\n V = prev_state['invariant_V']\n monthly_instalment = policy_input['monthly_instalment']\n \n if V == 0:\n print(\"V IS ZERO\") # degenerate\n else:\n ## Continuous ##\n if params['ENABLE_CONTINUOUS']:\n deltaF = policy_input['amt_to_bond'] * (params['THETA']) \n\n # burn if else\n\n else:\n deltaF = 0\n\n F += deltaF\n F += monthly_instalment\n return 'funds_from_bond', F\n\ndef update_S(params, substep, state_history, prev_state, policy_input):\n # params = params[0]\n R = prev_state['reserve']\n S = prev_state['supply']\n V = prev_state['invariant_V']\n kappa = prev_state['kappa']\n deltaR = policy_input['amt_to_bond']\n\n # print('S check ', ((V*R)**(1/kappa)))\n\n deltaS = (V*(R+deltaR))**(1/kappa) - S\n # S = S - deltaS + policy_input['amt_to_burn']\n # ?????????????????? Backwards ????????????????????\n\n S = S + deltaS - policy_input['amt_to_burn']\n # print(\"SUPPLY = \", S, \" | deltaR = \", deltaR, \" | deltaS = \", deltaS)\n\n return 'supply', S\n\n\ndef update_r(params, substep, state_history, prev_state, policy_input):\n R = prev_state['reserve']\n S = prev_state['supply']\n V = prev_state['invariant_V']\n kappa = prev_state['kappa']\n r = prev_state['chosen_agent']['agent_reserve']\n deltaS = policy_input['amt_to_burn']\n\n if V == 0:\n print(\"V IS ZERO\")\n else:\n deltar = R-((S-deltaS)**kappa)/V\n\n r = r - policy_input['amt_to_bond'] + deltar\n\n # print(\"AGENT RESERVE =\", r, \"deltar = \", deltar,\n # \"policy_input['amt_to_bond'] = \", policy_input['amt_to_bond'])\n return 'agent_reserve', r\n\n\ndef update_s_free_bondburn(params, substep, state_history, prev_state, policy_input):\n R = prev_state['reserve']\n S = prev_state['supply']\n V = prev_state['invariant_V']\n kappa = prev_state['kappa']\n s_free = prev_state['agent_supply_free']\n deltaR = policy_input['amt_to_bond']\n\n deltas = (V*(R+deltaR))**(1/kappa)-S\n\n s_free = s_free + deltas - policy_input['amt_to_burn']\n\n return 'agent_supply_free', s_free\n\n\ndef compute_r(R, S, V, kappa, r, deltaS, policy_input):\n if V == 0:\n r = policy_input['amt_to_bond']\n else:\n deltar = R-((S-deltaS)**kappa)/V\n r = r - policy_input['amt_to_bond'] + deltar\n return r\n\n\ndef compute_s_free(R, S, V, kappa, s_free, deltaR, policy_input, timestep):\n\n deltas = (V*(R+deltaR))**(1/kappa)-S\n # print(deltas)\n s_free = s_free + deltas - policy_input['amt_to_burn']\n\n # TEST RANDOM DROP\n if timestep % 20 == 0:\n random_drop = 0\n else:\n random_drop = 0\n\n s_free = s_free + random_drop\n\n return s_free\n\n\ndef update_agent_BC(params, substep, state_history, prev_state, policy_input):\n R = prev_state['reserve']\n S = prev_state['supply']\n V = prev_state['invariant_V']\n kappa = prev_state['kappa']\n\n agent = prev_state['chosen_agent']\n r = agent['agent_reserve']\n s_free = agent['agent_supply_free']\n\n deltaS = policy_input['amt_to_burn']\n deltaR = policy_input['amt_to_bond']\n\n timestep = prev_state['timestep']\n\n agent['agent_reserve'] = compute_r(R, S, V, kappa, r, deltaS, policy_input)\n\n agent['agent_supply_free'] = compute_s_free(\n R, S, V, kappa, s_free, deltaR, policy_input, timestep)\n\n # print(agent['agent_supply_free'])\n return 'chosen_agent', agent\n\n\ndef update_P_bondburn(params, substep, state_history, prev_state, policy_input):\n amt_to_bond = policy_input['amt_to_bond']\n amt_to_burn = policy_input['amt_to_burn']\n kappa = prev_state['kappa']\n R = prev_state['reserve']\n S = prev_state['supply']\n V = prev_state['invariant_V']\n\n if amt_to_bond > 0: # bond\n deltaR = amt_to_bond\n deltaS = (V*(R+deltaR))**(1/kappa)-S\n\n if deltaS == 0:\n P = kappa*(R**((kappa-1.0)/kappa)/(float(V) **\n (1.0/float(kappa)))) # Zero handling\n # return 'spot_price', P\n else:\n P = deltaR/deltaS # deltaR/deltaS\n # return 'spot_price', P\n\n elif amt_to_burn > 0: # burn\n deltaS = amt_to_burn\n deltaR = R - (((S-deltaS)**kappa)/V)\n\n if deltaS == 0:\n P = kappa*(R**((kappa-1.0)/kappa)/(float(V) **\n (1.0/float(kappa)))) # Zero handling\n # return 'spot_price', P\n else:\n P = deltaR/deltaS # deltaR/deltaS\n # return 'spot_price', P\n\n elif amt_to_burn == 0:\n P = kappa*(R**((kappa-1.0)/kappa)/(float(V) **\n (1.0/float(kappa)))) # Zero handling\n\n elif amt_to_bond == 0:\n P = prev_state['spot_price']\n\n else:\n P = amt_to_bond/amt_to_burn\n\n #print(\"PRICE (BOND/BURN): \", P)\n # print(\"SPOT PRICE P (from bondburn update) = \", P)\n return 'spot_price', P\n\n\ndef update_pbar(params, substep, state_history, prev_state, policy_input):\n R = prev_state['reserve']\n S = prev_state['supply']\n V = prev_state['invariant_V']\n kappa = prev_state['kappa']\n deltaS = policy_input['amt_to_burn']\n deltaR = policy_input['amt_to_bond']\n\n if deltaS != 0:\n deltaR = R-((S-deltaS)**kappa)/V\n if deltaR == 0:\n realized_price = prev_state['pbar']\n else:\n realized_price = deltaR/deltaS\n elif deltaR != 0:\n deltaS = (V*(R+deltaR))**(1/kappa)-S\n if deltaS == 0:\n realized_price = prev_state['pbar']\n else:\n realized_price = deltaR/deltaS\n else:\n realized_price = prev_state['pbar']\n\n # print(\"PRICE pbar (from bondburn update) =\", realized_price)\n return 'pbar', realized_price\n\n\ndef update_I_bondburn(params, substep, state_history, prev_state, policy_input):\n # params = params[0]\n R = prev_state['reserve']\n C = params['C']\n alpha = prev_state['alpha']\n deltaR = policy_input['amt_to_bond']\n\n I = (R + deltaR) + (C*alpha)\n #print(\"C =\", C, \"alpha = \", alpha, \"R = \", R, \"deltaR = \", deltaR)\n #print(\"I (from bondburn) =\", I)\n # print(\"--------------------------------------\")\n return 'invariant_I', I\n", "id": "9461594", "language": "Python", "matching_score": 1.7884308099746704, "max_stars_count": 31, "path": "Pilot/src/sim/model/parts/bondburn.py" }, { "content": "import pandas as pd\n\n\ndef put_agent_back_to_df(params, substep, state_history, prev_state, policy_input):\n chosen_agent = prev_state['chosen_agent']\n\n# print('the type of idx ' + str(type(chosen_agent_id)) +\n# ' the value is '+str(chosen_agent_id))\n chosen_agent_df = pd.DataFrame(\n chosen_agent, index=[int(chosen_agent['id'])])\n \n # print(\"UPDATED CHOSEN AGENT = \", chosen_agent_df, 'Time ', prev_state['timestep'])\n # print(\"---------END OF TIMESTEP\", prev_state['timestep'], \"-----------\")\n\n # print('Agent after a timestep:::' + chosen_agent_df.to_string())\n agents_df = prev_state['agents']\n agents_df.update(chosen_agent_df)\n\n # print('Agent combined:::' + agents_df.to_string())\n\n return \"agents\", agents_df\n", "id": "6129725", "language": "Python", "matching_score": 3.540968418121338, "max_stars_count": 31, "path": "src/sim/model/parts/put_agent_back_to_df.py" }, { "content": "import pandas as pd\n\n\ndef put_agent_back_to_df(params, substep, state_history, prev_state, policy_input):\n chosen_agent = prev_state['chosen_agent']\n\n\n chosen_agent_df = pd.DataFrame(\n chosen_agent, index=[int(chosen_agent['id'])])\n\n agents_df = prev_state['agents']\n agents_df.update(chosen_agent_df)\n\n\n return \"agents\", agents_df\n", "id": "4949428", "language": "Python", "matching_score": 2.4805006980895996, "max_stars_count": 31, "path": "Pilot/src/sim/model/parts/put_agent_back_to_df.py" }, { "content": "import pandas as pd\n\n\ndef get_value(value):\n # if value is array, then returns the first item, else returns value\n if isinstance(value, list):\n return value[0]\n\n return value\n\n\ndef choose_agent(params, substep, state_history, prev_state, policy_input):\n # print(prev_state['agents'].tail())\n\n # print(\"--------------TIMESTEP\", prev_state['timestep'], \"--------------\")\n # Randomly sample one agent from all agents\n chosen_agent_df = prev_state['agents'].sample(n=1)\n # print(\"CHOSEN AGENT = \", chosen_agent_df, 'Time ', prev_state['timestep'])\n\n chosen_agent = chosen_agent_df.to_dict('list')\n chosen_agent = {key: get_value(value)\n for key, value in chosen_agent.items()}\n\n timestep = prev_state['timestep']\n\n return ('chosen_agent', chosen_agent)\n", "id": "4848772", "language": "Python", "matching_score": 2.525864601135254, "max_stars_count": 31, "path": "src/sim/model/parts/choose_agent.py" }, { "content": "import pandas as pd\n\n\ndef get_value(value):\n if isinstance(value, list):\n return value[0]\n\n return value\n\n\ndef choose_agent(params, substep, state_history, prev_state, policy_input):\n\n timestep = prev_state['timestep']\n\n #agent = timestep % 10\n \n # print (\"TIMESTEP = \", timestep)\n if timestep >= 0 and timestep < 90:\n agent = 0\n # print(\"AGENT 0 = \", agent)\n elif timestep >= 90 and timestep < 180: \n agent = 1\n # print(\"AGENT 1 = \", agent)\n elif timestep >= 180 and timestep < 270: \n agent = 2\n # print(\"AGENT 2 = \", agent)\n elif timestep >= 270: \n agent = 3\n\n\n\n # # print(\"AGENT 3 = \", agent)\n \n # print (\"TIMESTEP = \", timestep)\n # if timestep < 30:\n # agent = 4\n # # print(\"AGENT 0 = \", agent)\n\n # elif timestep >= 30 and timestep < 180:\n # agent = 0\n # # print(\"AGENT 0 = \", agent)\n\n # elif timestep >= 180 and timestep < 270:\n # agent = 1\n # # print(\"AGENT 0 = \", agent)\n # elif timestep >= 270 and timestep < 360: \n # agent = 2\n # # print(\"AGENT 1 = \", agent)\n # elif timestep >= 360: # and timestep < 360: \n # agent = 3\n # print(\"AGENT 2 = \", agent)\n # elif timestep >= 3600: \n # agent = 3\n # print(\"AGENT 3 = \", agent)\n\n\n # print(\"PREV STATE AGENTS = \", prev_state['agents'])\n #print(\"I LOC AGENT = \", prev_state['agents'].iloc[agent].to_dict())\n\n chosen_agent = prev_state['agents'].iloc[agent].to_dict()\n chosen_agent = {key: get_value(value)\n for key, value in chosen_agent.items()}\n\n############# #####################################\n if agent == 0:\n chosen_agent['agent_reserve'] = chosen_agent['agent_reserve'] #+ 100\n\n return ('chosen_agent', chosen_agent)\n\n# 10 agents as we have it is fine\n# 14 days where each day each participant gets 1xCHF\n# each day the 10 participants all buy tokens on the boding curve with their 1xCHF\n# after 14 days the bond closes because the project is over (succeeds)\n# because the project wasn't actually spending any funds, the total amount of reward will in fact be the C + reserve", "id": "6991522", "language": "Python", "matching_score": 5.563309192657471, "max_stars_count": 31, "path": "Pilot/src/sim/model/parts/choose_agent.py" }, { "content": "import pandas as pd\n\n\ndef get_value(value):\n if isinstance(value, list):\n return value[0]\n\n return value\n\n\ndef choose_agent(params, substep, state_history, prev_state, policy_input):\n\n timestep = prev_state['timestep']\n\n agent = timestep % 10\n\n chosen_agent = prev_state['agents'].iloc[agent].to_dict()\n chosen_agent = {key: get_value(value)\n for key, value in chosen_agent.items()}\n\n\n\n return ('chosen_agent', chosen_agent)\n\n# 10 agents as we have it is fine\n# 14 days where each day each participant gets 1xCHF\n# each day the 10 participants all buy tokens on the boding curve with their 1xCHF\n# after 14 days the bond closes because the project is over (succeeds)\n# because the project wasn't actually spending any funds, the total amount of reward will in fact be the C + reserve", "id": "9091607", "language": "Python", "matching_score": 0.2015131562948227, "max_stars_count": 31, "path": "Code_With_Us/src/sim/model/parts/choose_agent.py" }, { "content": "# NOT USING THIS ATM\nfrom action import *\n\n\ndef update_kappa(params, step, sL, s, _input):\n\n # if action['mech'] == 'attest_pos' or action['mech'] == 'attest_neg'\n # elif if action['mech'] == 'bond' or action['mech'] == 'burn'\n\n action = _input['action']\n kappa = action['posterior']['kappa']\n\n key = 'spot_kappa'\n value = kappa\n\n return (key, value)\n", "id": "12489027", "language": "Python", "matching_score": 2.5833663940429688, "max_stars_count": 31, "path": "src/sim/model/parts/old/kappa.py" }, { "content": "# NOT USING THIS ATM\nfrom action import *\n\n\ndef update_R(params, step, sL, s, _input):\n\n action = _input['action']\n R = action['posterior']['R']\n\n key = 'reserve'\n value = R\n\n return (key, value)\n", "id": "10892351", "language": "Python", "matching_score": 2.579029083251953, "max_stars_count": 31, "path": "src/sim/model/parts/old/reserve.py" }, { "content": "# NOT USING THIS ATM\nfrom action import *\n\n\ndef update_S(params, step, sL, s, _input):\n\n action = _input['action']\n S = action['posterior']['S']\n\n key = 'supply'\n value = S\n\n return (key, value)\n", "id": "3553806", "language": "Python", "matching_score": 2.2740204334259033, "max_stars_count": 31, "path": "src/sim/model/parts/old/supply.py" }, { "content": "# NOT USING THIS ATM\nfrom action import *\n\n\ndef update_P(params, step, sL, s, _input):\n\n action = _input['action']\n P = action['posterior']['alpha']\n\n key = 'spot_alpha'\n value = alpha\n\n return (key, value)\n", "id": "1050708", "language": "Python", "matching_score": 2.8313379287719727, "max_stars_count": 31, "path": "src/sim/model/parts/old/alpha.py" }, { "content": "# NOT USING THIS ATM\nfrom action import *\n\n\ndef update_P(params, step, sL, s, _input):\n\n action = _input['action']\n P = action['posterior']['P']\n\n key = 'spot_price'\n value = P\n\n return (key, value)\n", "id": "7759766", "language": "Python", "matching_score": 0.3925507664680481, "max_stars_count": 31, "path": "src/sim/model/parts/old/price.py" }, { "content": "# from Model.src.sim.model.partial_state_update_block import partial_state_update_blocks\n# from src.sim import config as sys_model", "id": "9242488", "language": "Python", "matching_score": 2.131831407546997, "max_stars_count": 31, "path": "Pilot/src/sim/__init__.py" }, { "content": "#import os\n#import sys\n#path = os.path.join(os.path.dirname(__file__), os.pardir)\n#sys.path.append(path)\n\n#from Model.src.sim import config as sys_model\n\n#import os\n#import sys\n#path = os.path.join(os.path.dirname(__file__), os.pardir)\n#sys.path.append(path)\n", "id": "10205109", "language": "Python", "matching_score": 0.9280141592025757, "max_stars_count": 31, "path": "Math_Specification/src_old/__init__.py" }, { "content": "# The following imports NEED to be in the exact order\nfrom cadCAD import configs\n# ADD FOR PRINTING CONFIG\nfrom cadCAD.configuration.utils import *\nfrom cadCAD.engine import ExecutionMode, ExecutionContext, Executor\n\nfrom src.sim import config\n\nexec_mode = ExecutionMode()\nexec_ctx = ExecutionContext(context=exec_mode.multi_proc)\nsimulation = Executor(exec_context=exec_ctx, configs=configs)\nraw_system_events, tensor_field, session = simulation.execute()\ndf = pd.DataFrame(raw_system_events)\n\n\ndef get_M(k, v):\n if k == 'sim_config':\n k, v = 'M', v['M']\n return k, v\n\n\nconfig_ids = [\n dict(\n get_M(k, v) for k, v in config.__dict__.items() if k in ['simulation_id', 'run_id', 'sim_config']\n ) for config in configs\n]\n\n\n # 4.18 Method MC\ndef run(drop_midsteps=False, df = df):\n # results = df\n # print('config_ids = ', config_ids)\n # sub_dfs = pd.DataFrame(columns= range(max(df.subset)+1))\n \n results = pd.DataFrame()\n sim_ids = list(set([_id['simulation_id'] for _id in config_ids]))\n\n # print(sim_ids)\n sim_dfs = {_id: [] for _id in sim_ids}\n for i, config_id in enumerate(config_ids):\n sim_id, run_id = config_id['simulation_id'], config_id['run_id']\n params = config_id['M']\n result_record = pd.DataFrame.from_records(\n [tuple([i for i in params.values()])], columns=list(params.keys()))\n\n mod_record = {'sim_id': sim_id, 'meta': result_record}\n if sim_id not in sim_id_records:\n sim_id_records.append(sim_id)\n result_records_list.append(mod_record)\n\n sim_id = config_id['simulation_id']\n # print('sim id first loop = ',sim_id)\n\n sub_df = df[df.simulation == config_id['simulation_id']\n ][df.run == config_id['run_id'] + 1]\n sim_dfs[sim_id].append(sub_df)\n # print(sub_df[['simulation', 'run', 'substep', 'timestep']].tail(5))\n # print(sub_df.tail(5))\n\n for sim_id in sim_ids:\n result_record = [\n d for d in result_records_list if d['sim_id'] == sim_id][0]['meta']\n sim_dfs[sim_id] = pd.concat(sim_dfs[sim_id])\n sub_df = sim_dfs[sim_id]\n\n # print('sim id second loop = ',sim_id)\n # keep only last substep of each timestep\n if drop_midsteps:\n max_substep = max(sub_df.substep)\n is_droppable = (sub_df.substep != max_substep) & (\n sub_df.substep != 0)\n sub_df.drop(sub_df[is_droppable].index, inplace=True)\n\n # print(sub_df.head(3))\n # print(sub_df.tail(3))\n result_record['dataset'] = [sub_df]\n results = results.append(result_record)\n # print(sub_df[['simulation', 'run', 'substep', 'timestep']].tail(5))\n\n return results.reset_index()\n", "id": "10143281", "language": "Python", "matching_score": 4.402558326721191, "max_stars_count": 31, "path": "Pilot/src/sim/run.py" }, { "content": "# The following imports NEED to be in the exact order\n\nfrom cadCAD.engine import ExecutionMode, ExecutionContext, Executor\nfrom src.sim import config as sys_model\nfrom cadCAD import configs\nimport pandas as pd\npd.set_option('display.max_rows', None)\npd.set_option('display.max_columns', None)\npd.set_option('display.width', None)\npd.set_option('display.max_colwidth', None)\n\nfrom src.sim import config \n\ndef run(drop_midsteps=True):\n print()\n exec_mode = ExecutionMode()\n local_mode_ctx = ExecutionContext(context=exec_mode.multi_proc)\n runner = Executor(exec_context=local_mode_ctx, configs=configs)\n #results = pd.DataFrame()\n\n # pprint(configs)\n\n raw_system_events, tensor_field, sessions = runner.execute()\n simulation_result = pd.DataFrame(raw_system_events)\n print(type(simulation_result))\n print(tensor_field)\n print()\n return simulation_result.reset_index()\n\n\n# exec_mode = ExecutionMode()\n# exec_ctx = ExecutionContext(context=exec_mode.multi_proc)\n# simulation = Executor(exec_context=exec_ctx, configs=configs)\n# raw_system_events, tensor_field, session = simulation.execute()\n# df = pd.DataFrame(raw_system_events)\n# print(simulation_result)\n# print(f\"Tensor Field: {type(tensor_field)}\")\n# print(tabulate(tensor_field, headers='keys', tablefmt='psql'))\n# print(f\"Output: {type(simulation_result)}\")\n# print(tabulate(simulation_result, headers='keys', tablefmt='psql'))\n# print()\n\n# i = 0\n# verbose = False\n# results = {}\n# for raw_result, tensor_field in run.execute():\n# result = pd.DataFrame(raw_result)\n# if verbose:\n# print()\n# print(f\"Tensor Field: {type(tensor_field)}\")\n# print(tabulate(tensor_field, headers='keys', tablefmt='psql'))\n# print(f\"Output: {type(result)}\")\n# print(tabulate(result, headers='keys', tablefmt='psql'))\n# print()\n# results[i] = {}\n# results[i]['result'] = result\n# results[i]['simulation_parameters'] = simulation_parameters[i]\n# i += 1\n", "id": "3700889", "language": "Python", "matching_score": 3.876984119415283, "max_stars_count": 31, "path": "Math_Specification/src_old/run2.py" }, { "content": "import pandas as pd\n\nfrom cadCAD.engine import ExecutionMode, ExecutionContext, Executor\nfrom cadCAD.configuration import Experiment\nfrom cadCAD import configs\n\ndef run(drop_midsteps: bool=True) -> pd.DataFrame:\n\n def get_M(k, v):\n if k == 'sim_config':\n k, v = 'M', v['M']\n return k, v\n\n\n config_ids = [\n dict(\n get_M(k, v) for k, v in config.__dict__.items() if k in ['simulation_id', 'run_id', 'sim_config', 'subset_id']\n ) for config in configs\n ]\n\n exec_mode = ExecutionMode()\n exec_context = ExecutionContext(exec_mode.local_mode)\n run = Executor(exec_context=exec_context, configs=configs)\n # results = pd.DataFrame()\n\n (system_events, tensor_field, sessions) = run.execute()\n\n df = pd.DataFrame(system_events)\n for i, config_id in enumerate(config_ids):\n params = config_id['M']\n result_record = pd.DataFrame.from_records([tuple([i for i in params.values()])], columns=list(params.keys()))\n print('result_record', result_record)\n sub_df = df[df.subset == config_id['subset_id']]\n\n if drop_midsteps:\n max_substep = max(df.substep)\n is_droppable = (df.substep != max_substep)\n is_droppable &= (df.substep != 0)\n df = df.loc[~is_droppable]\n\n result_record['dataset'] = [sub_df]\n df = df.append(result_record)\n\n return (df.reset_index(), tensor_field, sessions)\n\n# if __name__ == '__main__':\n# import sys\n# # check\n# sys.path.append('./src')\n\n# from config_wrapper import ConfigWrapper\n# # import options as options\n\n# # change \n# import model as model\n \n# config = ConfigWrapper(market_model)\n# config.append()\n\n# results = run(drop_midsteps=True)\n# print(results)", "id": "4637903", "language": "Python", "matching_score": 5.685113430023193, "max_stars_count": 31, "path": "Pilot/src/run_wrapper2.py" }, { "content": "import pandas as pd\n\nfrom cadCAD.engine import ExecutionMode, ExecutionContext, Executor\nfrom cadCAD.configuration import Experiment\nfrom cadCAD import configs\n\ndef run(drop_midsteps: bool=True) -> pd.DataFrame:\n exec_mode = ExecutionMode()\n exec_context = ExecutionContext(exec_mode.local_mode)\n run = Executor(exec_context=exec_context, configs=configs)\n # results = pd.DataFrame()\n\n (system_events, tensor_field, sessions) = run.execute()\n\n df = pd.DataFrame(system_events)\n\n if drop_midsteps:\n max_substep = max(df.substep)\n is_droppable = (df.substep != max_substep)\n is_droppable &= (df.substep != 0)\n df = df.loc[~is_droppable]\n\n return (df.reset_index(), tensor_field, sessions)\n\n# if __name__ == '__main__':\n# import sys\n# # check\n# sys.path.append('./src')\n\n# from config_wrapper import ConfigWrapper\n# # import options as options\n\n# # change \n# import model as model\n \n# config = ConfigWrapper(market_model)\n# config.append()\n\n# results = run(drop_midsteps=True)\n# print(results)", "id": "9944974", "language": "Python", "matching_score": 0.9557237029075623, "max_stars_count": 31, "path": "Pilot/src/run_wrapper.py" }, { "content": "import matplotlib.pyplot as plt\n\ndef param_test_plot(experiments, config_ids, swept_variable, y_variable, *args):\n \"\"\"\n experiments is the simulation result dataframe.\n config_ids is the list configs executed upon in the simulation.\n swept_variable is the key (string) in config_ids that was being tested against.\n y_variable is the state_variable (string) to be plotted against default timestep.\n *args for plotting more state_variables (string).\n \"\"\"\n experiments = experiments.sort_values(by =['subset']).reset_index(drop=True)\n cols = 1\n rows = 1\n cc_idx = 0\n while cc_idx<len(experiments):\n cc = experiments.iloc[cc_idx]['subset']\n\n cc_label = experiments.iloc[cc_idx]['subset']\n\n secondary_label = [item['M'][swept_variable] for item in config_ids if item[\"subset_id\"]== cc_label]\n sub_experiments = experiments[experiments['subset']==cc]\n cc_idx += len(sub_experiments)\n fig, axs = plt.subplots(ncols=cols, nrows=rows, figsize=(15*cols,7*rows))\n\n df = sub_experiments.copy()\n colors = ['orange', 'g', 'magenta', 'r', 'k' ]\n\n ax = axs\n title = swept_variable + ' Effect on ' + y_variable + '\\n' + 'Scenario: ' + str(secondary_label[0]) + ' ' + swept_variable\n # + 'Scenario: ' + str(cc_label) + ' rules_price'\n ax.set_title(title)\n ax.set_ylabel('Funds')\n\n df.plot(x='timestep', y=y_variable, label=y_variable, ax=ax, legend=True, kind ='scatter')\n\n for count, arg in enumerate(args):\n df.plot(x='timestep', y=arg, label=arg, ax=ax, legend=True, color = colors[count], kind ='scatter')\n\n ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n\n ax.set_xlabel('Timesteps')\n ax.grid(color='0.9', linestyle='-', linewidth=1)\n\n plt.tight_layout()\n \n fig.tight_layout(rect=[0, 0, 1, .97])\n fig.patch.set_alpha(1)\n plt.close()\n return display(fig)", "id": "1577124", "language": "Python", "matching_score": 1.2103725671768188, "max_stars_count": 31, "path": "Pilot/src/utils.py" }, { "content": "# from Model.src.sim.model.parts.choose_action import *\n\n# prev_state (called s in documentation) is a dict. Captures the current state of the system\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\n# REMOVE and put in update_private_price function\n# Should be part of parameter if will be varied\n\n# P0 = [1]\n\n# signal = {\n# # 'dP': ['N/A', P0[0]/4, P0[0]/1000, P0[0]/2],\n# # 'period': ['N/A', 2000, 2000, 2000]\n# 'dP': P0[0]/4,\n# 'period': 2000,\n# 'sigma': [.005, 'N/A', 'N/A', 'N/A']\n# }\n\n\ndef update_private_price(params, substep, state_history, prev_state, policy_input):\n\n P0 = 1\n\n signal = {\n # 'dP': ['N/A', P0[0]/4, P0[0]/1000, P0[0]/2],\n # 'period': ['N/A', 2000, 2000, 2000]\n 'P0': P0,\n 'dP': P0/4,\n 'period': 2000,\n 'sigma': .005 # , 'N/A', 'N/A', 'N/A']\n }\n\n print(\"UPDATE PRIVATE PRICE\")\n ## params = params[0]\n rules_price = params['rules_price']\n period = params['period']\n timestep = prev_state['timestep']\n price = prev_state['spot_price']\n\n \"\"\" if rules_price == 'step':\n bump = int((timestep % int(period/2) == 0))*int(timestep > 0)\n sign = -(-1)**int((2*timestep/period))\n new_private_price = price + signal['dP']*bump*sign\n elif rules_price == 'ramp':\n sign = (-1)**int((2*timestep/period))\n new_private_price = price + signal['dP']*sign\n elif rules_price == 'sin':\n new_private_price = P0 + signal['dP'] * \\\n np.sin(2*np.pi*timestep/period)\n elif rules_price == 'martin':\n rv = np.random.normal(0, signal['sigma'])\n new_private_price = price+price*rv\n else:\n new_private_price = price \"\"\"\n\n # Private price belief signal is a sine wave\n # print(\"signal['dP'] = \", signal['dP'])\n # print(\"prev_state['timestep'] = \", prev_state['timestep'])\n\n #new_private_price = (random.randint(0, 100))/100\n\n # new_private_price = P0[0] + signal['dP'] * \\\n # np.sin(2*np.pi*prev_state['timestep']/signal['period'])\n\n # use martingale\n\n # Private price is a noisy reserve/supply <-- not using\n #r = prev_state['reserve']\n #s = prev_state['supply']\n #noise_r = (random.randint(-50, 50)/100)\n #noise_s = (random.randint(-50, 50)/100)\n\n #print(\"noise r = \", noise_r)\n #print(\"noise s = \", noise_s)\n\n #new_private_price = (r + (noise_r * r)) / (s + (noise_s * s))\n # print(\"--------------------------------------\")\n\n new_private_price = prev_state['chosen_agent']['agent_private_price']\n\n return 'private_price', new_private_price\n\n\ndef update_private_alpha(params, substep, state_history, prev_state, policy_input):\n # Private alpha belief signal is a ramp\n #sign = (-1)**int((2*prev_state['timestep']/signal['period']))\n #new_private_alpha = prev_state['alpha'] + signal['dP']*sign\n\n # new_private_alpha = P0[0] + signal['dP'] * \\\n # np.sin(2*np.pi*prev_state['timestep']/signal['period'])\n\n # new_private_alpha = (random.randint(50, 100))/100\n\n # e is private alpha's bias towards public alpha\n\n b = 0.8 # high bias\n\n public_alpha_signal = 0.9\n private_alpha_signal = random.randint(0,50)/100\n new_public_alpha = random.randint(0,50)/100\n new_private_alpha = (b)*new_public_alpha + (1-b)*private_alpha_signal\n\n #new_private_alpha = prev_state['chosen_agent']['agent_private_alpha']\n\n # print(\"new_private_alpha = \", new_private_alpha)\n return 'private_alpha', new_private_alpha\n\n\ndef update_agent_beliefs(params, substep, state_history, prev_state, policy_input):\n\n agent = prev_state['chosen_agent']\n timestep = prev_state['timestep']\n # params = params[0]\n alpha_bias = params['alpha_bias']\n price_bias = params['price_bias']\n\n #rv = np.random.normal(0, signal['sigma'])\n #new_private_price = price+price*rv\n #new_private_price = agent['agent_private_price']\n\n #new_private_price = agent['agent_private_price']\n #new_private_alpha = agent['agent_private_alpha']\n \n b_alpha = 0.0 # bias\n\n public_alpha_signal = 0.5 + ((1/1000)*timestep)\n private_alpha_signal = 0.5 + ((1/1000)*timestep)\n #private_alpha_signal = 0.5 - ((1/1000)*timestep)\n\n private_alpha = (b_alpha)*public_alpha_signal + (1-b_alpha)*private_alpha_signal\n\n b_price = 0.0 # bias\n\n public_price_signal = 0.5 + ((1/1000)*timestep)\n private_price_signal = 0.5 + ((1/1000)*timestep) \n #private_price_signal = 1.5 - ((1/1000)*timestep)\n\n private_price = (b_price)*public_price_signal + (1-b_price)*private_price_signal\n\n agent['agent_private_price_signal'] = private_price_signal\n agent['agent_private_alpha_signal'] = private_alpha_signal\n agent['agent_public_price_signal'] = public_price_signal\n agent['agent_public_alpha_signal'] = public_alpha_signal\n \n agent['agent_private_price'] = private_price\n agent['agent_private_alpha'] = private_alpha\n\n #print(\"agent['agent_private_price'] = \", agent['agent_private_price'])\n #print(\"agent['agent_private_alpha'] = \", agent['agent_private_alpha'])\n\n return 'chosen_agent', agent\n", "id": "7017565", "language": "Python", "matching_score": 2.9378533363342285, "max_stars_count": 31, "path": "src/sim/model/parts/private_beliefs.py" }, { "content": "# One Timestep can contain multiple PSUBs\n# At the end of each PSUB (called a substep), cadCAD returns the state of the system\n\nfrom src.sim.model.parts.private_beliefs import *\nfrom src.sim.model.parts.bondburn import *\nfrom src.sim.model.parts.attest import *\nfrom src.sim.model.parts.choose_action import set_action\nfrom src.sim.model.parts.monthly_instalment import add_instalment\nfrom src.sim.model.parts.choose_agent import choose_agent\nfrom src.sim.model.parts.put_agent_back_to_df import put_agent_back_to_df\n\n# print(\"-----------------PSUB---------------------\")\n\npartial_state_update_block = [\n {\n 'policies': {\n # 'agent': choose_agent\n },\n 'variables': {\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n # Initialization and exogenous processes\n 'chosen_agent': choose_agent,\n #'public_alpha': update_public_alpha\n }\n },\n {\n 'policies': {\n # 'act': set_action\n },\n 'variables': {\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n # Agent signaling\n # Capture any private signals eg. sine wave\n #'agent_private_price': update_private_price,\n #'agent_private_alpha': update_private_alpha,\n 'chosen_agent': update_agent_beliefs\n }\n },\n {\n 'policies': {\n 'act': set_action,\n 'add_instalment': add_instalment,\n },\n 'variables': {\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n # Bond-to-mint or burn-to-withdraw\n 'reserve': update_R,\n 'supply': update_S,\n 'spot_price': update_P_bondburn,\n 'pbar': update_pbar,\n 'invariant_I': update_I_bondburn,\n 'chosen_agent': update_agent_BC\n }\n },\n {\n 'policies': {\n 'act': set_action\n },\n 'variables': {\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n # Attest positive or attest negative\n 'attestations': update_Q,\n 'attestations_1': update_Q1,\n 'attestations_0': update_Q0,\n 'supply_1': update_S1,\n 'supply_0': update_S0,\n 'supply_free': update_S_free,\n 'chosen_agent': update_agent_PM,\n 'alpha': update_alpha,\n 'kappa': update_kappa,\n 'spot_price': update_P_attest,\n 'invariant_V': update_V\n }\n },\n # {\n # 'policies': {\n # },\n # 'variables': {\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n # # Resolve metrics\n\n # }\n # },\n {\n 'policies': {\n\n },\n 'variables': {\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n # Close out\n 'agents': put_agent_back_to_df\n }\n }\n]\n", "id": "2480421", "language": "Python", "matching_score": 1.7582769393920898, "max_stars_count": 31, "path": "src/sim/model/partial_state_update_block.py" }, { "content": "import random\nimport math\n\n\ndef add_instalment(params, substep, state_history, prev_state):\n # params = params[0]\n monthly_instalment = params['monthly_instalment']\n timestep = prev_state['timestep']\n\n if timestep%25 == 0:\n monthly_instalment = monthly_instalment\n else:\n monthly_instalment = 0\n\n return {\n 'monthly_instalment': monthly_instalment\n }\n\n", "id": "9386480", "language": "Python", "matching_score": 0.25212299823760986, "max_stars_count": 31, "path": "Pilot/src/sim/model/parts/monthly_instalment.py" }, { "content": "import numpy as np \n\ndef synthetic_alpha_test(params, substep, state_history, prev_state):\n\n previous_public_alpha = prev_state['public_alpha']\n\n alpha_noise = round(np.random.normal(0.1,0.05,1)[0],2) / 15\n\n if params['alpha_test'] == 'success':\n new_public_alpha = 1 - (1- (alpha_noise)) * (1 - previous_public_alpha)\n\n elif params['alpha_test'] == 'failure': \n new_public_alpha = (1- (alpha_noise)) * (previous_public_alpha)\n \n elif params['alpha_test'] == 'constant': \n new_public_alpha = previous_public_alpha\n\n delta_public_alpha = new_public_alpha - previous_public_alpha\n\n return {'public_alpha_update': new_public_alpha, 'delta_public_alpha' : delta_public_alpha}\n\n\ndef delta_public_alpha_update(params, substep, state_history, prev_state, policy_input):\n '''\n Tracks delta public alpha signal, used to generate alpha\n '''\n # previous_alpha = prev_state['public_alpha']\n delta_public_alpha = policy_input['delta_public_alpha']\n\n return 'delta_public_alpha', delta_public_alpha\n\ndef public_alpha_update(params, substep, state_history, prev_state, policy_input):\n '''\n Tracks public alpha signal, used to generate alpha\n '''\n # previous_alpha = prev_state['public_alpha']\n new_alpha = policy_input['public_alpha_update']\n\n return 'public_alpha', new_alpha", "id": "9656516", "language": "Python", "matching_score": 2.171975612640381, "max_stars_count": 31, "path": "Pilot/src/sim/model/parts/public_alpha.py" }, { "content": "import random\nimport numpy as np \n\ndef update_Q(params, substep, state_history, prev_state, policy_input):\n Q = prev_state['attestations']\n dQ = policy_input['amt_Q1'] + policy_input['amt_Q0']\n\n Q = Q + dQ\n return 'attestations', Q\n\n\ndef update_Q1(params, substep, state_history, prev_state, policy_input):\n Q1 = prev_state['attestations_1']\n\n Q1 = Q1 + policy_input['amt_Q1']\n return 'attestations_1', Q1\n\n\ndef update_Q0(params, substep, state_history, prev_state, policy_input):\n Q0 = prev_state['attestations_0']\n\n Q0 = Q0 + policy_input['amt_Q0']\n return 'attestations_0', Q0\n\n\ndef update_S1(params, substep, state_history, prev_state, policy_input):\n # action = _input['action']\n S1 = prev_state['supply_1']\n # amt_pos is a key in a dict of dicts\n\n S1 = S1 + policy_input['amt_pos']\n return 'supply_1', S1\n\n\ndef update_S0(params, substep, state_history, prev_state, policy_input):\n S0 = prev_state['supply_0']\n\n S0 = S0 + policy_input['amt_neg']\n return 'supply_0', S0\n\n\ndef update_S_free(params, substep, state_history, prev_state, policy_input):\n S_free = prev_state['supply_free']\n\n S_free = S_free - (policy_input['amt_neg'] + policy_input['amt_pos'])\n return 'supply_free', S_free\n\n\ndef compute_q1(q1, amt_Q1):\n\n q1 = q1 + amt_Q1\n\n #print(\"amt_Q1 from compute_q1: \", amt_Q1)\n\n return q1\n\n\ndef compute_q0(q0, amt_Q0):\n\n q0 = q0 + amt_Q0\n\n return q0\n\n\ndef compute_s1(s1, amt_pos):\n\n s1 = s1 + amt_pos\n\n return s1\n\n\ndef compute_s0(s0, amt_neg):\n\n s0 = s0 + amt_neg\n\n return s0\n\n\ndef compute_s_free(s_free, delta_s_free):\n\n s_free = s_free - delta_s_free\n\n return s_free\n\n\ndef update_agent_PM(params, substep, state_history, prev_state, policy_input):\n agent = prev_state['chosen_agent']\n q1 = agent['agent_attestations_1']\n q0 = agent['agent_attestations_0']\n s1 = agent['agent_supply_1']\n s0 = agent['agent_supply_0']\n s_free = agent['agent_supply_free']\n\n amt_Q1 = policy_input['amt_Q1']\n amt_Q0 = policy_input['amt_Q0']\n amt_pos = policy_input['amt_pos']\n amt_neg = policy_input['amt_neg']\n delta_s_free = policy_input['amt_pos'] + policy_input['amt_neg']\n\n #print(\"amt_Q1 from update_agent: \", amt_Q1)\n\n agent['agent_attestations_1'] = compute_q1(q1, amt_Q1)\n agent['agent_attestations_0'] = compute_q0(q0, amt_Q0)\n agent['agent_supply_1'] = compute_s1(s1, amt_pos)\n agent['agent_supply_0'] = compute_s0(s0, amt_neg)\n agent['agent_supply_free'] = compute_s_free(s_free, delta_s_free)\n\n return 'chosen_agent', agent\n\n\ndef attest_pos(R, C, E, alpha, Q, Q1, Q0, S, S1, S0, q0, q1, s_free, s1, s0, s, delta_q1, delta_q0, delta_s):\n\n S1 = S1 + delta_s\n\n new_alpha = S1 * R / (S1 * R - S0 * R + S0*C)\n\n return new_alpha\n\n\ndef attest_neg(R, C, E, alpha, Q, Q1, Q0, S, S1, S0, q0, q1, s_free, s1, s0, s, delta_q1, delta_q0, delta_s):\n\n S0 = S0 + delta_s\n\n new_alpha = S1 * R / (S1 * R - S0 * R + S0*C)\n\n return new_alpha\n\n# Remove prediction market - use generated data instead\n# def update_alpha(params, substep, state_history, prev_state, policy_input):\n\n# # params = params[0]\n# R = prev_state['reserve']\n# C = params['C']\n# E = params['E']\n# alpha = prev_state['alpha']\n\n# Q = prev_state['attestations_1'] + prev_state['attestations_0']\n# Q1 = prev_state['attestations_1']\n# Q0 = prev_state['attestations_0']\n# S = prev_state['supply']\n# S1 = prev_state['supply_1']\n# S0 = prev_state['supply_0']\n\n# q1 = prev_state['chosen_agent']['agent_attestations_1']\n# q0 = prev_state['chosen_agent']['agent_attestations_0']\n# s_free = prev_state['chosen_agent']['agent_supply_free']\n# s1 = prev_state['chosen_agent']['agent_supply_1']\n# s0 = prev_state['chosen_agent']['agent_supply_0']\n\n# s = s_free + s1 + s0\n\n# attest_action = policy_input['mech_pm']\n# delta_q1 = policy_input['amt_Q1']\n# delta_q0 = policy_input['amt_Q0']\n# delta_s = policy_input['amt_pos'] + policy_input['amt_neg']\n\n# new_alpha = S1 * R / (S1 * R - S0 * R + S0*C)\n\n# if attest_action == 'attest_pos': # positive attestation\n# new_alpha = attest_pos(R, C, E, alpha, Q, Q1, Q0, S, S1, S0,\n# q0, q1, s_free, s1, s0, s, delta_q1, delta_q0, delta_s)\n# # print(\"Positive attestation 1\")\n\n# elif attest_action == 'attest_neg': # negative attestation\n# new_alpha = attest_neg(R, C, E, alpha, Q, Q1, Q0, S, S1, S0,\n# q0, q1, s_free, s1, s0, s, delta_q1, delta_q0, delta_s)\n# # print(\"Negative attestation 1\")\n\n# else:\n# new_alpha = alpha\n\n# #print(\"new_alpha = \", new_alpha)\n# return 'alpha', new_alpha\ndef synthetic_alpha_test(params, substep, state_history, prev_state):\n\n alpha_noise = round(np.random.normal(0.5,0.2,1)[0],2) / 100\n \n # JUST FIXED FOR NOW\n # TEST CONSTANT ALPHA\n\n new_alpha = prev_state['alpha']\n previous_value = prev_state['alpha']\n # new_alpha = policy_input['new_alpha']\n value = previous_value + new_alpha / 140\n\n\n if params['alpha_test'] == 'success':\n value = 1 - (1- (alpha_noise)) * (1 - previous_value)\n\n elif params['alpha_test'] == 'failure':\n value = (1- (alpha_noise)) * (previous_value)\n\n\n # kappa = round(kappa)\n\n # elif params['kappa_rule'] == 'fixed':\n # kappa = params['starting_kappa']\n\n return {'new_alpha': value}\n\n\ndef synthetic_alpha(params, substep, state_history, prev_state):\n\n new_alpha = round(np.random.normal(0.5,0.2,1)[0],2)\n \n # JUST FIXED FOR NOW\n # TEST CONSTANT ALPHA\n\n # new_alpha = prev_state['alpha']\n previous_value = prev_state['alpha']\n # new_alpha = policy_input['new_alpha']\n value = previous_value + new_alpha / 140\n\n return {'new_alpha': value}\n\ndef update_alpha(params, substep, state_history, prev_state, policy_input):\n # previous_value = prev_state['alpha']\n value = policy_input['new_alpha']\n # value = previous_value + new_alpha / 140\n\n\n return 'alpha', value\n\n\n\ndef update_kappa(params, substep, state_history, prev_state, policy_input):\n\n # # params = params[0]\n # R = prev_state['reserve']\n C = params['C']\n # E = params['E']\n\n # alpha = prev_state['alpha']\n\n # Q = prev_state['attestations_1'] + prev_state['attestations_0']\n # Q1 = prev_state['attestations_1']\n # Q0 = prev_state['attestations_0']\n # S = prev_state['supply']\n # S1 = prev_state['supply_1']\n # S0 = prev_state['supply_0']\n I = prev_state['invariant_I']\n\n # q1 = prev_state['chosen_agent']['agent_attestations_1']\n # q0 = prev_state['chosen_agent']['agent_attestations_0']\n # s_free = prev_state['chosen_agent']['agent_supply_free']\n # s1 = prev_state['chosen_agent']['agent_supply_1']\n # s0 = prev_state['chosen_agent']['agent_supply_0']\n\n # s = s_free + s1 + s0\n\n # delta_q1 = policy_input['amt_Q1']\n # delta_q0 = policy_input['amt_Q0']\n # delta_s = policy_input['amt_pos'] + policy_input['amt_neg']\n\n # if delta_q1 > 0: # positive attestation\n # new_alpha = attest_pos(R, C, E, alpha, Q, Q1, Q0, S, S1, S0,\n # q0, q1, s_free, s1, s0, s, delta_q1, delta_q0, delta_s)\n\n # elif delta_q0 > 0: # negative attestation\n\n # new_alpha = attest_neg(R, C, E, alpha, Q, Q1, Q0, S, S1, S0,\n # q0, q1, s_free, s1, s0, s, delta_q1, delta_q0, delta_s)\n\n # else:\n # new_alpha = alpha\n\n\n\n new_alpha = policy_input['new_alpha']\n\n# if not used, price and s_free go very negative at the outset\n########################################################\n kappa = I / (I - (C*new_alpha))\n\n # if params['kappa_rule'] == 'round':\n # kappa = round(kappa)\n\n # elif params['kappa_rule'] == 'fixed':\n # kappa = params['starting_kappa']\n\n # elif params['kappa_rule'] == 'none':\n # kappa = kappa\n \n #print(\"kappa = \", kappa)\n return 'kappa', kappa\n\n\ndef update_I_attest(params, substep, state_history, prev_state, policy_input):\n\n # params = params[0]\n R = prev_state['reserve']\n C = params['C']\n E = params['E']\n\n alpha = prev_state['alpha']\n\n Q = prev_state['attestations_1'] + prev_state['attestations_0']\n Q1 = prev_state['attestations_1']\n Q0 = prev_state['attestations_0']\n S = prev_state['supply']\n S1 = prev_state['supply_1']\n S0 = prev_state['supply_0']\n\n q1 = prev_state['chosen_agent']['agent_attestations_1']\n q0 = prev_state['chosen_agent']['agent_attestations_0']\n s_free = prev_state['chosen_agent']['agent_supply_free']\n s1 = prev_state['chosen_agent']['agent_supply_1']\n s0 = prev_state['chosen_agent']['agent_supply_0']\n\n s = s_free + s1 + s0\n\n delta_q1 = policy_input['amt_Q1']\n delta_q0 = policy_input['amt_Q0']\n delta_s = policy_input['amt_pos'] + policy_input['amt_neg']\n\n if delta_q1 > 0: # positive attestation\n new_alpha = attest_pos(R, C, E, alpha, Q, Q1, Q0, S, S1, S0,\n q0, q1, s_free, s1, s0, s, delta_q1, delta_q0, delta_s)\n\n elif delta_q0 > 0: # negative attestation\n\n new_alpha = attest_neg(R, C, E, alpha, Q, Q1, Q0, S, S1, S0,\n q0, q1, s_free, s1, s0, s, delta_q1, delta_q0, delta_s)\n\n else:\n new_alpha = alpha\n\n new_alpha = policy_input['new_alpha']\n\n I = R + (C*new_alpha)\n\n #print(\"I (attest) = \", I)\n return \"invariant_I\", I\n\n\ndef update_P_attest(params, substep, state_history, prev_state, policy_input):\n\n # params = params[0]\n R = prev_state['reserve']\n C = params['C']\n E = params['E']\n I = prev_state['invariant_I']\n alpha = prev_state['alpha']\n\n Q = prev_state['attestations_1'] + prev_state['attestations_0']\n Q1 = prev_state['attestations_1']\n Q0 = prev_state['attestations_0']\n S = prev_state['supply']\n S1 = prev_state['supply_1']\n S0 = prev_state['supply_0']\n\n q1 = prev_state['chosen_agent']['agent_attestations_1']\n q0 = prev_state['chosen_agent']['agent_attestations_0']\n s_free = prev_state['chosen_agent']['agent_supply_free']\n s1 = prev_state['chosen_agent']['agent_supply_1']\n s0 = prev_state['chosen_agent']['agent_supply_0']\n\n s = s_free + s1 + s0\n\n delta_q1 = policy_input['amt_Q1']\n delta_q0 = policy_input['amt_Q0']\n delta_s = policy_input['amt_pos'] + policy_input['amt_neg']\n\n if delta_q1 > 0: # positive attestation\n new_alpha = attest_pos(R, C, E, alpha, Q, Q1, Q0, S, S1, S0,\n q0, q1, s_free, s1, s0, s, delta_q1, delta_q0, delta_s)\n\n elif delta_q0 > 0: # negative attestation\n\n new_alpha = attest_neg(R, C, E, alpha, Q, Q1, Q0, S, S1, S0,\n q0, q1, s_free, s1, s0, s, delta_q1, delta_q0, delta_s)\n else:\n new_alpha = alpha\n\n # I = R + (C*new_alpha)\n kappa = I / (I - (C*new_alpha))\n\n P = kappa * (R/S)\n # print(\"PRICE (ATTEST): \", P)\n\n #print(\"Spot Price P (attest) = \", P)\n return 'spot_price', P\n\n\ndef update_V(params, substep, state_history, prev_state, policy_input):\n\n # params = params[0]\n R = prev_state['reserve']\n C = params['C']\n E = params['E']\n I = prev_state['invariant_I']\n # alpha = prev_state['alpha']\n\n # Q = prev_state['attestations_1'] + prev_state['attestations_0']\n # Q1 = prev_state['attestations_1']\n # Q0 = prev_state['attestations_0']\n S = prev_state['supply']\n # S1 = prev_state['supply_1']\n # S0 = prev_state['supply_0']\n\n # q1 = prev_state['chosen_agent']['agent_attestations_1']\n # q0 = prev_state['chosen_agent']['agent_attestations_0']\n # s_free = prev_state['chosen_agent']['agent_supply_free']\n # s1 = prev_state['chosen_agent']['agent_supply_1']\n # s0 = prev_state['chosen_agent']['agent_supply_0']\n\n # s = s_free + s1 + s0\n\n # delta_q1 = policy_input['amt_Q1']\n # delta_q0 = policy_input['amt_Q0']\n # delta_s = policy_input['amt_pos'] + policy_input['amt_neg']\n\n # if delta_q1 > 0: # positive attestation\n # new_alpha = attest_pos(R, C, E, alpha, Q, Q1, Q0, S, S1, S0,\n # q0, q1, s_free, s1, s0, s, delta_q1, delta_q0, delta_s)\n\n # elif delta_q0 > 0: # negative attestation\n\n # new_alpha = attest_neg(R, C, E, alpha, Q, Q1, Q0, S, S1, S0,\n # q0, q1, s_free, s1, s0, s, delta_q1, delta_q0, delta_s)\n\n # else:\n # new_alpha = alpha\n\n new_alpha = policy_input['new_alpha']\n # I = R + (C*new_alpha)\n kappa = I / (I - (C*new_alpha))\n # print(\"S = \", S)\n # print(\"KAPPA = \", kappa)\n # print(\"R = \", R)\n V = (S**(kappa))/R\n\n return 'invariant_V', V\n", "id": "10460596", "language": "Python", "matching_score": 2.9771759510040283, "max_stars_count": 31, "path": "Code_With_Us/src/sim/model/parts/attest.py" }, { "content": "# RUN\n# The following imports NEED to be in the exact order\nimport numpy as np\nfrom copy import deepcopy\nfrom cadCAD.configuration.utils import config_sim\nfrom cadCAD.configuration import append_configs\nfrom cadCAD.engine import ExecutionMode, ExecutionContext, Executor\n# import config\nfrom cadCAD import configs\nimport pandas as pd\n\n\ndef run(drop_midsteps=True):\n exec_mode = ExecutionMode()\n multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)\n run = Executor(exec_context=multi_proc_ctx, configs=configs)\n results = pd.DataFrame()\n i = 0\n for raw_result, _ in run.execute():\n params = configs[i].sim_config['M']\n result_record = pd.DataFrame.from_records(\n [tuple([i for i in params.values()])], columns=list(params.keys()))\n\n df = pd.DataFrame(raw_result)\n # keep only last substep of each timestep\n if drop_midsteps:\n max_substep = max(df.substep)\n is_droppable = (df.substep != max_substep) & (df.substep != 0)\n df.drop(df[is_droppable].index, inplace=True)\n\n result_record['dataset'] = [df]\n results = results.append(result_record)\n i += 1\n return results.reset_index()\n\n# STATE VAR UPDATE: PRIVATE ALPHA\n\n\ndef update_private_alpha(params, substep, state_history, prev_state, policy_input):\n # Private alpha belief signal is a ramp\n # sign = (-1)**int((2*prev_state['timestep']/signal['period']))\n # new_private_alpha = prev_state['alpha'] + signal['dP']*sign\n new_private_alpha = P0[0] + signal['dP'] * \\\n np.sin(2*np.pi*prev_state['timestep']/signal['period'])\n # plt.plot(new_private_alpha, substep)\n # plt.show()\n print(\"new_private_alpha = \", new_private_alpha)\n return 'private_alpha', new_private_alpha\n\n# POLICY: SET ACTION\n\n\ndef set_action(params, substep, state_history, prev_state):\n private_alpha = prev_state['private_alpha']\n start_alpha = params['starting_alpha']\n alpha = prev_state['alpha']\n s = prev_state['agent_supply']\n\n if alpha > private_alpha:\n mech = 'attest_neg'\n print(\"Agent attests negative. alpha = \",\n alpha, \"private_alpha = \", private_alpha)\n amt_Q1 = 0\n amt_Q0 = alpha - private_alpha # units\n amt_neg = amt_Q0 # VERIFY THIS # units\n amt_pos = 0\n S0 = S0 + amt_neg\n Q0 = Q0 + amt_Q0\n\n elif alpha < private_alpha:\n mech = 'attest_pos'\n print(\"Agent attests positive. alpha = \",\n alpha, \"private_alpha = \", private_alpha)\n amt_Q1 = private_alpha - alpha # units\n amt_Q0 = 0\n amt_neg = 0\n amt_pos = amt_Q1 # VERIFY THIS\n S1 = S1 + amt_pos\n Q1 = Q1 + amt_Q1\n\n else:\n # don't attest\n mech = None\n amt_Q1 = 0\n amt_Q0 = 0\n amt_pos = 0\n amt_neg = 0\n print(\"No attestation. alpha = \", alpha,\n \"private_alpha = \", private_alpha)\n\n return {\n 'mech': mech,\n 'amt_Q1': amt_Q1,\n 'amt_Q0': amt_Q0,\n 'amt_pos': amt_pos,\n 'amt_neg': amt_neg\n }\n\n# STATE VAR UPDATE: ALPHA\n\n\ndef update_alpha(params, substep, state_history, prev_state, policy_input):\n\n alpha = prev_state['alpha']\n E = 1.3\n R = 300\n C = 700\n Q1 = [1]\n q1 = [0]\n S1 = [1]\n S0 = [0]\n s1 = [0]\n s = [20]\n deltaq1 = [1] # deltaq1 = deltas\n deltas = [1, 4, 5, 6, 3, 2, 0, 4]\n\n for i in range(len(deltas)):\n A = (1/(Q1[i]*(Q1[i]+deltaq1[i]))) * \\\n ((q1[i]*(Q1[i]*deltas[i]) - (deltaq1[i]*s[i])) +\n deltaq1[i]*((Q1[i]*s1[i]) + (Q1[i]*deltas[i])))\n\n alpha_bar = (deltas[i]*R)/(A*(C+R) - (deltas[i]*C))\n\n new_alpha = E*(alpha[i]) + (1-E)*(alpha[i])*((S1[i]+S0[i])/(S1[i]+S0[i]+deltas[i])) + \\\n (alpha_bar)*(deltas[i]/(S1[i]+S0[i]+deltas[i]))\n\n print(\"A = \", A)\n print(\"alpha_bar = \", alpha_bar)\n print(\"new_alpha = \", new_alpha)\n\n # Update operations\n Q1.append(Q1[i] + deltaq1[i])\n S1.append(S1[i] + deltas[i])\n S0.append(S0[i])\n s1.append(s1[i] + deltas[i])\n s.append(s[i] - deltas[i])\n q1.append(q1[i] + deltaq1[i])\n alpha.append(new_alpha)\n deltaq1.append(deltas[i])\n\n\n# PSUBS\npartial_state_update_blocks = [\n {\n 'policies': {\n 'act': set_action\n },\n 'variables': {\n 'private_alpha': update_private_alpha\n }\n },\n {\n 'policies': {\n 'act': set_action\n },\n 'variables': {\n 'alpha': update_alpha\n # 'agent_supply': update_s\n }\n }\n]\n\n# CONFIG.PY\ntime_periods_per_run = 400\nmonte_carlo_runs = 1\n\nALPHA = [0.5]\nC = [700]\nQ1 = 1\nQ0 = 1\n\nreserve = 300\ninvariant_I = reserve + (C[0]*ALPHA[0])\n\n# params\nparams = {\n 'starting_alpha': ALPHA, # initial alpha\n}\n\n# initial conditions\ninitial_conditions = {\n 'private_alpha': 0,\n 'alpha': ALPHA,\n 'supply_0': 0,\n 'supply_1': 0,\n 'attestations_0': Q0,\n 'attestations_1': Q1,\n 'invariant_I': invariant_I,\n 'agent_attestations_1': 0,\n 'agent_attestations_0': 0,\n 'agent_supply': 0,\n 'agent_supply_1': 0,\n 'agent_supply_0': 0\n}\n\nprint(\"Initial Conditions (config.py) : \", initial_conditions)\n\nsim_config = config_sim({\n 'T': range(time_periods_per_run),\n 'N': monte_carlo_runs,\n 'M': params\n})\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# The configurations above are packaged into a `Configuration` object\nappend_configs(\n # dict containing variable names and initial values\n initial_state=initial_conditions,\n # dict containing state update functions\n partial_state_update_blocks=partial_state_update_blocks,\n sim_configs=sim_config # dict containing simulation parameters\n)\n\nfor c in configs:\n c.initial_state = deepcopy(c.initial_state)\n\n print(\"Params (config.py) : \", c.sim_config['M'])\n\n c.initial_state['alpha'] = c.sim_config['M']['starting_alpha']\n\n", "id": "9702380", "language": "Python", "matching_score": 4.088482856750488, "max_stars_count": 31, "path": "src/sim/model/parts/old/alpha_test.py" }, { "content": "from cadCAD.configuration import append_configs # 4.18: append_configs. Later: Experiment\nfrom cadCAD.configuration.utils import config_sim\n\nfrom src.sim.model.state_variables import initial_conditions\nfrom src.sim.model.partial_state_update_block import partial_state_update_block\nfrom src.sim.model.sys_params import params\n\nfrom src.sim.sim_setup import SIMULATION_TIME_STEPS, MONTE_CARLO_RUNS\n\nfrom copy import deepcopy\nfrom cadCAD import configs\n\nsim_config = config_sim({\n 'T': range(SIMULATION_TIME_STEPS),\n 'N': MONTE_CARLO_RUNS,\n 'M': params\n})\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# The configurations above are packaged into a `Configuration` object\n\n###### 4.18 conversion #######################\n#exp = Experiment()\nappend_configs(\n###### 4.18 conversion #######################\n # dict containing variable names and initial values\n initial_state=initial_conditions,\n # dict containing state update functions\n partial_state_update_blocks=partial_state_update_block,\n # dict containing simulation parameters\n sim_configs=sim_config\n)\n\n# pprint(configs)\n\nfor c in configs:\n c.initial_state = deepcopy(c.initial_state)\n\n # print(\"Params (config.py) : \", c.sim_config['M'])\n\n c.initial_state['kappa'] = c.sim_config['M']['starting_kappa']\n c.initial_state['alpha'] = c.sim_config['M']['starting_alpha']\n c.initial_state['reserve'] = c.sim_config['M']['money_raised']\n c.initial_state['supply'] = c.initial_state['kappa'] * \\\n c.sim_config['M']['money_raised'] / c.initial_state['spot_price']\n c.initial_state['supply_free'] = c.initial_state['supply']\n c.initial_state['invariant_V'] = (\n c.initial_state['supply']**c.initial_state['kappa']) / c.initial_state['reserve']\n c.initial_state['invariant_I'] = c.initial_state['reserve'] + \\\n (c.sim_config['M']['C'] * c.initial_state['alpha'])\n", "id": "3932672", "language": "Python", "matching_score": 3.8810641765594482, "max_stars_count": 31, "path": "Pilot/src/sim/config.py" }, { "content": "from cadCAD.configuration.utils import config_sim\nfrom cadCAD.configuration import Experiment\nimport importlib\n\nclass ConfigWrapper:\n '''\n The ConfigWrapper allows you to pass a model as an argument, and update the simulation configuration.\n Maps (params, states) would be merge updated, and all other options are overrides.\n '''\n def __init__(\n self,\n model,\n N=None,\n T=None,\n M={},\n initial_state={},\n partial_state_update_blocks=None,\n env_processes={},\n exp=Experiment()\n ):\n m_state_variables = importlib.import_module(f'{model.__name__}.model.state_variables').initial_conditions\n m_psubs = importlib.import_module(f'{model.__name__}.model.partial_state_update_block').partial_state_update_block\n m_params = importlib.import_module(f'{model.__name__}.model.sys_params').params\n m_sim_params = importlib.import_module(f'{model.__name__}.sim_setup')\n \n self.N = N if N else m_sim_params.MONTE_CARLO_RUNS\n self.T = T if T else range(m_sim_params.SIMULATION_TIME_STEPS)\n \n m_params.update(M)\n self.M = m_params\n \n m_state_variables.update(initial_state)\n self.initial_state = m_state_variables\n \n self.partial_state_update_blocks = partial_state_update_blocks if partial_state_update_blocks else m_psubs\n self.env_processes = env_processes\n self.exp = exp\n\n def get_config(self):\n return config_sim(\n {\n 'N': self.N,\n 'T': self.T,\n 'M': self.M,\n }\n )\n\n def get_initial_conditions(self):\n return self.initial_state\n\n def append(self, sim_configs=None): #, initial_state=None\n if not isinstance(sim_configs, list):\n sim_configs = config_sim({'N': self.N, 'T': self.T, 'M': self.M})\n\n # if not isinstance(initial_state, list):\n # self.initial_state = initial_state\n\n self.exp.append_configs(\n sim_configs=sim_configs,\n initial_state=self.initial_state,\n partial_state_update_blocks=self.partial_state_update_blocks,\n env_processes=self.env_processes,\n )\n\n return self.exp", "id": "10060047", "language": "Python", "matching_score": 3.7669382095336914, "max_stars_count": 31, "path": "Pilot/src/config_wrapper.py" }, { "content": "# import libraries\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats as sts\nimport seaborn as sns\n\nSIMULATION_TIME_STEPS = 500\nMONTE_CARLO_RUNS = 1\n", "id": "4028205", "language": "Python", "matching_score": 0.31995293498039246, "max_stars_count": 31, "path": "src/sim/sim_setup.py" } ]
2.75032
Architector4
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# TAKEN FROM: https://github.com/i3/i3status/blob/master/contrib/wrapper.py\n# MUTILATED BY: Architector #4\n# This script is a simple wrapper which prefixes each i3status line with custom\n# information. It is a python reimplementation of:\n# http://code.stapelberg.de/git/i3status/tree/contrib/wrapper.pl\n#\n# To use it, ensure your ~/.i3status.conf contains this line:\n# output_format = \"i3bar\"\n# in the 'general' section.\n# Then, in your ~/.i3/config, use:\n# status_command i3status | ~/i3status/contrib/wrapper.py\n# In the 'bar' section.\n#\n# In its current version it will display the cpu frequency governor, but you\n# are free to change it to display whatever you like, see the comment in the\n# source code below.\n#\n# © 2012 <NAME> <<EMAIL>>\n#\n# This program is free software. It comes without any warranty, to the extent\n# permitted by applicable law. You can redistribute it and/or modify it under\n# the terms of the Do What The Fuck You Want To Public License (WTFPL), Version\n# 2, as published by Sam Hocevar. See http://sam.zoy.org/wtfpl/COPYING for more\n# details.\n\nimport sys\nimport subprocess\nimport json\n# Playerctl bindings: https://github.com/altdesktop/playerctl\nimport gi\ngi.require_version('Playerctl', '2.0')\nfrom gi.repository import Playerctl\nimport time\n\nsi_units = [\"\", \"K\", \"M\", \"G\", \"T\"]\n\ndef conv_si(num):\n \"\"\"\n Convert input number to a shorter string with a SI unit postfix.\n Does not support float values.\n \"\"\"\n for iteration, unit in enumerate(si_units):\n if num < 10000 or unit == si_units[-1]:\n return str(num)+unit\n num = num//1000\n\ndef get_max_frequency():\n \"\"\" Get the maximum allowed frequency for cpu0, assuming all CPUs use the same. \"\"\"\n with open ('/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq') as fp:\n return int(fp.readlines()[0].strip())*1000\n\ndef get_governor():\n \"\"\" Get the current governor for cpu0, assuming all CPUs use the same. \"\"\"\n with open('/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor') as fp:\n return fp.readlines()[0].strip()\n\n ### This old code was querying the window title of a window of class \"umpv\", was hacky and dumb.\n##vidname=\"\" # When rapidly switching workspaces, it fails to find the window.\n##vidstop=0 # This system here is to use what the previous query gave in case last one failed.\n#def get_playing_video_name():\n# sub = subprocess.Popen(\"xprop -id $(xdotool search --classname umpv) WM_NAME | cut -d\\\\\\\" -f2-\",\n# shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n# stdout=sub.stdout.readlines()\n# if len(stdout) == 0:\n# #if vidstop>2:\n# #vidname=\"\"\n# #vidstop+=1\n# #return vidname\n# return \"\"\n# line=stdout[0].decode(\"utf-8\")[:-2]#[:-7]\n# vidname=line\n# vidstop=0\n# return line\n\n# Get full media player status line using playerctl\ndef get_playing_media_name():\n players = []\n man=Playerctl.PlayerManager().props.player_names\n for name in man: # Iterate over every media player\n try: # A media player's properties may sometimes be unavailable for some reason\n player = Playerctl.Player.new_from_name(name)\n try: # Try getting title\n title = player.get_title()\n if len(title)==0: # Title is empty\n continue\n except Exception: # Couldn't get title - happens when there is no media player.\n continue\n artist = player.get_artist()\n pos = time.strftime(\"%H:%M:%S\", time.gmtime(max(player.get_position()/1000000, 0)))\n if 'mpris:length' in player.props.metadata.keys():\n length = time.strftime(\"%H:%M:%S\", time.gmtime(player.props.metadata['mpris:length']/1000000))\n else:\n length=None\n if (artist is None) & (title is None):\n continue\n #return \"\" # No valid media data yet\n output=\"\"\n if artist is not None:\n if len(artist)>0:\n output+=artist+\" - \"\n output+=title\n output+=\" (\"+pos\n if length is not None:\n output+=\"/\"+length\n output+=\")\"\n players.append( (output, player.props.status) )\n except Exception:\n players.append( (\"?\", \"?\") )\n continue\n return players\n\n\ndef print_line(message):\n \"\"\" Non-buffered printing to stdout. \"\"\"\n sys.stdout.write(message + '\\n')\n sys.stdout.flush()\n\ndef read_line():\n \"\"\" Interrupted respecting reader for stdin. \"\"\"\n # try reading a line, removing any extra whitespace\n try:\n line = sys.stdin.readline().strip()\n # i3status sends EOF, or an empty line\n if not line:\n sys.exit(3)\n return line\n # exit on ctrl-c\n except KeyboardInterrupt:\n sys.exit()\n\nif __name__ == '__main__':\n # Skip the first line which contains the version header.\n print_line(read_line())\n\n # The second line contains the start of the infinite array.\n print_line(read_line())\n\n while True:\n line, prefix = read_line(), ''\n # ignore comma at start of lines\n if line.startswith(','):\n line, prefix = line[1:], ','\n\n j = json.loads(line)\n # insert information into the start of the json, but could be anywhere\n # CHANGE THIS LINE TO INSERT SOMETHING ELSE\n #j.insert(0, {'full_text' : '%s' % get_governor(), 'name' : 'gov'})\n j.insert(0, {\n 'name' : 'max_freq',\n 'color': '#CCCCCC',\n 'full_text': conv_si(get_max_frequency())+\"Hz\"\n })\n\n players = get_playing_media_name()\n for i, media in enumerate(players):\n if media[0] != \"\":\n\n # Probably can be helped with Python 3.10 match-case...\n if media[1] == 'Playing':\n color = '#AAFFAA'\n else:\n if i%2 == 0:\n color = '#CCCCCC'\n else:\n color = '#AAAAAA'\n\n\n j.insert(0, {\n 'name' : 'media',\n #'markup' : 'pango', # Breaks with video title containing &\n 'color' : color,\n #'full_text' : '<span rise=\"3073\">%s</span>' % video\n 'full_text' : media[0]\n })\n\n # and echo back new encoded json\n print_line(prefix+json.dumps(j))\n", "id": "11471971", "language": "Python", "matching_score": 0, "max_stars_count": 2, "path": "bin/i3status-wrapper.py" }, { "content": "#!/usr/bin/env python3\n\nimport random\nimport subprocess\n\nresponses = [\n \"ЧО\",\n \"НЕ\",\n \"НИХАЧУ\",\n \"ДАВАЙ ПОТОМ\",\n \"ПАЛЕГЧЕ\",\n \"НЕ ТУДА ПОПАЛ\",\n \"ЭЭЭЭЭЭЭЭЭЭЭЭЭЭЭЭЭЭЭЭЭЭЭЭЭЭЭ\",\n \"Боюсь я тебе не могу это позволить, Дэйв.\",\n \"ЧАВО\",\n \"НИ ТЫКАЙ\\n!!!!!!!!!!!!!!!!!!!!!\",\n \"НЕЕЕЕЕЕЕ\",\n \"КОГДА\",\n \"К ЧЕМУ ЭТО\",\n \"КУДА ТЫКНУЛ?? НЕ ТУДА ТЫКНУЛ!!\",\n \"А\"\n ]\n\nresponse = random.choice(responses)\nsubprocess.call(['notify-send', response], shell=False)\n", "id": "4636982", "language": "Python", "matching_score": 0, "max_stars_count": 2, "path": "bin/shutdown.py" } ]
0
johnbeard
[ { "content": "\"\"\"\nClasses for representation and export of devhelp books\n\"\"\"\n\nimport lxml.etree as ET\n\nclass Keyword(object):\n\n def __init__(self, name, link, type):\n self.name = name\n self.link = link\n self.type = type\n\n def getXml(self):\n e = ET.Element('keyword', attrib={'name':self.name, 'link':self.link, 'type':self.type})\n return e\n\nclass Subsection(object):\n\n def __init__(self, name, link):\n self.name = name\n self.link = link\n\n self.subs = []\n\n def addSub(self, sub):\n self.subs.append(sub)\n\n def getXml(self):\n\n e = ET.Element('sub', attrib={'name':self.name, 'link':self.link})\n\n for s in self.subs:\n e.append(s.getXml());\n\n return e\n\nclass FunctionList(object):\n\n def __init__(self):\n\n # list of keyword items\n self.functions = []\n\n def addKeyword(self, kw):\n self.functions.append(kw)\n\n def getXml(self):\n\n e = ET.Element('functions')\n\n for f in self.functions:\n e.append(f.getXml())\n\n return e\n\nclass ChapterList(object):\n\n def __init__(self):\n\n # list of keyword items\n self.chapters = []\n\n def addChapter(self, sub):\n self.chapters.append(sub)\n\n def getXml(self):\n\n e = ET.Element('chapters')\n\n for f in self.chapters:\n e.append(f.getXml())\n\n return e\n\nclass DevhelpBook(object):\n\n xmlVersion = 2\n xmlns=\"http://www.devhelp.net/book\"\n\n def __init__(self, name, title, base, link, language=None, author=\"\"):\n self.name = name\n self.title = title\n self.base = base\n self.link = link\n self.language = language\n self.author = author\n\n self.funcs = FunctionList()\n self.chaps = ChapterList()\n\n def getXml(self):\n\n tree = ET.Element('book', attrib= {\n 'language':self.language,\n 'author':self.author,\n 'name':self.name,\n 'title':self.title,\n 'base':self.base,\n 'link':self.link,\n 'version':str(self.xmlVersion)\n })\n\n tree.append(self.chaps.getXml())\n tree.append(self.funcs.getXml())\n\n return ET.ElementTree(tree)\n\n def addChapter(self, sub):\n self.chaps.addChapter(sub)\n\n def addKeyword(self, kw):\n self.funcs.addKeyword(kw)\n\n def write(self, fn):\n tree = self.getXml()\n tree.write(fn, encoding='utf-8', standalone=False, pretty_print=True)\n", "id": "5196263", "language": "Python", "matching_score": 0.1946531981229782, "max_stars_count": 0, "path": "converters/common/pydevhelp/devhelp.py" }, { "content": "#! /usr/bin/env python\n\nimport plistlib\nimport sys\n\nimport argparse\nimport logging\n\n\ndef open_infile(filename):\n # can't use stdin, not seekable. Could read to a buffer, but..eh\n return open(filename, 'rb')\n\n\ndef open_outfile(filename):\n return open(filename, 'wb') if filename else sys.stdout.buffer\n\n\ndef close_file(handle):\n\n if handle is not sys.stdout and handle is not sys.stdin:\n handle.close()\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='A very simple PList editor.')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='show debugging information')\n parser.add_argument('-i', '--infile', required=True,\n help='the input file')\n parser.add_argument('-o', '--outfile',\n help='the output file, omit for stdout')\n parser.add_argument('-d', '--delete', action='append', metavar=\"KEY\",\n help='delete entries with the given key')\n parser.add_argument('-s', '--string', action='append', nargs=2,\n metavar=(\"KEY\", \"VALUE\"),\n help='add a string value or change an existing one')\n parser.add_argument('-t', '--true', action='append', metavar=\"KEY\",\n help='add a boolean true with the given key')\n parser.add_argument('-f', '--false', action='append', metavar=\"KEY\",\n help='add a boolean false with the given key')\n args = parser.parse_args()\n\n log_level = logging.DEBUG if args.verbose else logging.INFO\n logging.basicConfig(level=log_level)\n\n logging.debug(\"Modifying plist {}\".format(args.infile))\n\n ifhandle = open_infile(args.infile)\n plist = plistlib.load(ifhandle)\n close_file(ifhandle)\n\n if args.string:\n for key, val in args.string:\n logging.debug(\"Adding key {}: {}\".format(key, val))\n plist[key] = val\n\n if args.delete:\n for key in args.delete:\n logging.debug(\"Deleting key {}\".format(key))\n del plist[key]\n\n if args.true:\n for key in args.true:\n logging.debug(\"Adding true key {}\".format(key))\n plist[key] = True\n\n if args.false:\n for key in args.false:\n logging.debug(\"Adding false key {}\".format(key))\n plist[key] = False\n\n ofhandle = open_outfile(args.outfile)\n plistlib.dump(plist, ofhandle)\n close_file(ofhandle)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "4257668", "language": "Python", "matching_score": 1.727773904800415, "max_stars_count": 0, "path": "mod_plist.py" }, { "content": "#! /usr/bin/env python\n\nimport pydevhelp.devhelp as devhelp\nimport pydevhelp.dox_tagfile\n\nimport os\nimport argparse\n\nimport lxml.html as HTML\n\ndef getModules(fileList):\n \"\"\"\n These aren't in the tag file, and they don't even come out properly\n in Doxygen XML, just just manually mess with the files\n \"\"\"\n\n modList = {}\n\n for d in fileList:\n readModuleCrumbs(d, modList)\n\n mod = devhelp.Subsection(\"Modules\", modList['modules']['href'])\n\n addModules(mod, modList['modules'])\n\n return mod\n\ndef addModules(parent, module):\n\n for m in module['sub']:\n submod = module['sub'][m]\n s = devhelp.Subsection(m, submod['href'])\n parent.addSub(s)\n\n addModules(s, submod)\n\ndef readModuleCrumbs(file, modList):\n html = HTML.parse(file)\n\n # <li class=\"navelem\"><a class=\"el\" href=\"dir_e05d7e2b1ecd646af5bb94391405f3b5.html\">modules</a></li><li class=\"navelem\"><a class=\"el\" href=\"dir_fd421517ec8f709274e931dda731313f.html\">juce_core</a></li><li class=\"navelem\"><a class=\"el\" href=\"dir_0d31e411142695dc4add552e9ff0c68a.html\">streams</a></li> </ul>\n\n crumbs = html.iterfind(\"//li[@class='navelem']/a[@class='el']\")\n\n currMod = modList\n\n # add the crumbs to the module list, adding each level if not\n # there already\n for c in crumbs:\n\n name = c.text\n\n if name not in currMod:\n currMod[name] = {'name':name, 'href': c.attrib['href'], \"sub\": {}}\n\n currMod = currMod[name][\"sub\"]\n\nclass JuceDoc(object):\n\n def __init__(self, doc_src, doc_root):\n self.dsrc = doc_src\n\n self.db = devhelp.DevhelpBook(title = \"JUCE 4.3.0 Reference Manual\",\n name = \"juce-doc\",\n base = doc_root,\n link = \"index.html\",\n language = \"c++\")\n\n mods = self.getModules()\n self.db.addChapter(mods)\n\n tf = pydevhelp.dox_tagfile.DoxygenTagFile(os.path.join(self.dsrc, 'juce.tag'), filterFunc=self.filterTags)\n\n tf.extractKeywords( lambda kw: self.db.addKeyword(kw))\n\n nsChap = devhelp.Subsection('Namespaces', 'index.html')\n tf.extractNamespaces(lambda ns: nsChap.addSub(ns))\n self.db.addChapter(nsChap)\n\n classChap = devhelp.Subsection('Classes', 'classes.html')\n tf.extractClasses(None, lambda s: classChap.addSub(s))\n self.db.addChapter(classChap)\n\n def filterTags(self, tree):\n\n # the juce namespace is a bit wierd and just duplicates a subset of the classes\n for remove in tree.xpath(\"/tagfile/compound[@kind='namespace' and name = 'juce']\"):\n remove.getparent().remove(remove)\n\n # get rid of one of the duplicate littlfoot namespaces\n for remove in tree.xpath(\"/tagfile/compound[@kind='namespace' and name = 'juce::littlefoot']\"):\n remove.getparent().remove(remove)\n\n\n def getModules(self):\n \"\"\"\n These aren't in the tag file, and they don't even come out properly\n in Doxygen XML, just just manually mess with the files\n \"\"\"\n\n fileList = [os.path.join(self.dsrc, d) for d in os.listdir(self.dsrc) if d.startswith('dir_')];\n mod = getModules(fileList)\n\n return mod\n\n\n def output(self, fn):\n data = self.db.write(fn)\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Convert JUCE Doxygen documentation into devhelp documentation')\n\n parser.add_argument('-d', '--doc-src', metavar='DOCSRC',\n type=str, required=True,\n help='the root of existing generated JUCE Doxygen documentation (e.g. \"doxygen/doc\" under your JUCE path, or an installed location in /usr/share/doc)')\n parser.add_argument('-r', '--doc-root', metavar='DOCROOT',\n type=str, required=True,\n help='the root of the documentation when installed (probably an installed location in /usr/share/doc)')\n parser.add_argument('-o', '--output', metavar='OUTFILE',\n type=str, default='juce-doc.devhelp2',\n help='output devhelp2 file, default is current dir file called juce-doc.devhelp2')\n args = parser.parse_args()\n\n jd = JuceDoc(args.doc_src, args.doc_root)\n\n jd.output(args.output)\n", "id": "1914417", "language": "Python", "matching_score": 4.168394088745117, "max_stars_count": 0, "path": "converters/juce/python/juce_dox.py" }, { "content": "\"\"\"\nDoxygen Tag File converter\n\"\"\"\n\nfrom . import devhelp\n\nimport lxml.etree as ET\n\nclass DoxygenTagFile(object):\n\n def __init__(self, f, filterFunc):\n\n self.tags = ET.parse(f)\n\n filterFunc(self.tags)\n\n def _getLink(self, elem):\n\n link = elem.find('anchorfile').text\n\n anchor = elem.find('anchor').text\n\n if anchor:\n link += '#' + anchor\n\n return link\n\n def extractKeywords(self, handler):\n\n functions = self.tags.iterfind(\"//compound/member\")\n\n for f in functions:\n\n # there are the kinds of members that get keyword entries\n if f.attrib['kind'] not in ['function', 'enumvalue', 'typedef', 'define']:\n continue\n\n link = self._getLink(f)\n\n kw = devhelp.Keyword(f.find('name').text, link, f.attrib['kind'])\n\n handler(kw)\n\n def extractNamespaces(self, handler):\n\n ns = self.tags.iterfind(\"/compound[@kind='namespace']\")\n\n for n in ns:\n\n name = n.find('name').text\n\n s = devhelp.Subsection(name, n.find('filename').text)\n handler(s)\n\n def extractClasses(self, elem, handler):\n\n if elem is None:\n cs = self.tags.iterfind(\"./compound\")\n else:\n cs = elem.iterfind(\"./member\")\n\n for c in cs:\n\n kind = c.attrib['kind']\n\n # this should be in the xpath, but it says's 'or' is an invalid predicate\n # these are the element types that get subentries under 'classes'\n if kind not in ['class', 'struct', 'enumeration']:\n continue;\n\n name = c.find('name').text\n\n # we don't want template classes\n if name.endswith('>') and '<' in name:\n continue;\n\n try:\n fn = c.find('filename').text\n except AttributeError:\n fn = self._getLink(c)\n\n s = devhelp.Subsection(name, fn)\n handler(s)\n\n # add add sub elements in the same way\n self.extractClasses(c, lambda sub: s.addSub(sub))\n", "id": "2561949", "language": "Python", "matching_score": 0.5597792863845825, "max_stars_count": 0, "path": "converters/common/pydevhelp/dox_tagfile.py" }, { "content": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCalibre Plugin for CrossRef metadata import\n\nSomewhat derived from https://github/maxchaos/calibre-crossref, but without\nthe dependency on Habanero and Requests.\n\nLicenced under the MIT licence\n\"\"\"\nfrom __future__ import division\n\nfrom calibre.ebooks.metadata.sources.base import Source\nfrom calibre.ebooks.metadata.book.base import Metadata\n\nimport json\nimport urllib\nimport datetime\n\n\nclass CrossrefSource(Source):\n \"\"\"Crossref Metatdata Source plugin\"\"\"\n\n name = 'CrossRef'\n description = 'Query crossref.org for metadata'\n action_type = 'current'\n supported_platforms = ['windows', 'osx', 'linux']\n author = '<NAME>'\n version = (1, 0, 0)\n minimum_calibre_version = (0, 8, 0)\n\n capabilities = frozenset(['identify'])\n touched_fields = frozenset(\n ['title', 'authors', 'publisher', 'pubdate', 'series', 'series_index']\n )\n\n def identify(self,\n log, result_queue, abort,\n title=None, authors=None, identifiers=None,\n timeout=30):\n \"\"\"Implement the Calibre Metadata Source plugin indentify method:\n\n Take information about the work (title, authors, DOI, etc) and\n look up on Crossref for matches, present these to the user\n \"\"\"\n\n getter = CrossrefApiShim(CrossrefBasicApiProvider(), log)\n\n cands = []\n\n # Dispatch to the correct API interface according to the data we have\n if identifiers is not None and \"doi\" in identifiers:\n cands = getter.query_doi(identifiers['doi'])\n elif title:\n cands = getter.query_title(title, authors)\n\n for c in cands:\n result_queue.put(c)\n\n\nclass CrossrefApiShim(object):\n \"\"\"Shim class between the plugin and a CrossRef API provider. The provider\n is currently a basic implementation, but could be expanded to something\n like Habanero in future.\n\n This class returns Calibre-compatible metadata, translated from whatever\n data structures the Crossref API provides (JSON).\n \"\"\"\n\n def __init__(self, provider, log):\n self.api = provider\n self.log = log\n\n # list of fields we actually care about\n self.select = [\n 'DOI', 'title', 'author', 'container-title', 'issued',\n 'published-print', 'publisher',\n 'volume', 'issue', 'page']\n\n self.limit = 5\n\n def _log_json_error(self, json):\n self.log.error(\n 'Received message with status \"{status}\" '\n 'and message type {messagetype}.\\n '\n 'Message is \"{message}\".'\n .format(status=json['status'],\n messagetype=json['message-type'],\n message=json['message'][0]['message']))\n\n def query_doi(self, doi):\n \"\"\"\n Look up a work by DOI - should provide 0 or 1 results\n \"\"\"\n\n self.log.debug(\"Getting work metadata by DOI: %s\" % doi)\n\n id_dict = {'doi': doi}\n\n json = self.api.works(\n ids=id_dict, select=self.select, limit=self.limit)\n\n if json['status'] != 'ok':\n self._log_json_error(json)\n return None\n\n works = [json['message']]\n return [self._parse_work(work) for work in works]\n\n def query_title(self, title, authors):\n\n author_str = ' '.join(authors) if authors else None\n\n self.log.debug('Getting work by title \"{title}\" and authors \"{author}\"'\n .format(title=title, author=author_str))\n\n json = self.api.works(query_author=author_str, query_title=title,\n select=self.select, limit=self.limit)\n\n if json['status'] != 'ok':\n self._log_json_error(json)\n return None\n\n works = json['message']['items']\n return [self._parse_work(work) for work in works]\n\n def _parse_work(self, work):\n \"\"\"Convert a list of works returned in CrossRef JSON to Calibre\n Metadata objects\n \"\"\"\n\n title = work.get('title')[0]\n authors = self._parse_authors(work)\n\n # Now we have a title - init Calibre Metadata\n mi = Metadata(title, authors)\n\n doi = work.get('DOI')\n if doi:\n mi.set_identifier('doi', doi)\n\n pubdate = self._parse_pubdate(work)\n if pubdate:\n mi.pubdate = pubdate\n\n publisher = self._parse_publisher(work)\n if publisher:\n mi.publisher = publisher\n\n series = self._parse_series(work)\n if series:\n mi.series = series[0]\n mi.series_index = series[1]\n\n return mi\n\n def _parse_authors(self, json):\n json_authors = json.get('author', [])\n authors = []\n\n for author in json_authors:\n author_parts = [\n author.get('given', ''),\n author.get('family', '')]\n # Trim empty parts\n author_parts = [a for a in author_parts if a]\n authors.append(' '.join(author_parts))\n\n if not authors:\n authors = u'Unknown'\n\n return authors\n\n def _parse_pubdate(self, json):\n \"\"\"Get publication date from json info\"\"\"\n\n date = None\n\n issued = json.get('issued')\n if issued is not None:\n date = self._parse_date(issued)\n\n pubprint = json.get('published-print')\n if date is None and pubprint is not None:\n date = self._parse_date(pubprint)\n\n event = json.get('work')\n if date is None and event is not None:\n # prefer start date\n event_date = event.get('start') or event.get('end')\n\n if event_date is not None:\n date = self._parse_date(event_date)\n\n # return whatever we found, or None if all failed\n return date\n\n def _parse_date(self, json_date):\n\n date_parts = json_date.get('date-parts')[0][:3]\n\n if len(date_parts) < 1:\n return None\n\n date = None\n\n # Default dates\n date_ints = [int(date_parts[0]), 1, 1]\n\n for i in range(1, len(date_parts)):\n date_ints[i] = int(date_parts[i])\n\n from calibre.utils.date import utc_tz\n date = datetime.datetime(*(d for d in date_ints), tzinfo=utc_tz)\n return date\n\n def _parse_publisher(self, json):\n\n return json.get('publisher')\n\n def _parse_series(self, json):\n\n series = json.get('container-title')\n\n if series is None:\n return None\n\n series = series[0]\n\n vol = int(json.get('volume', '1'))\n\n # hack for issues A-B which happens sometimes\n iss = int(json.get('issue', '1').split('-')[0])\n\n print(vol, iss)\n\n s_index = vol + (iss / 100)\n\n return (series, s_index)\n\n\nclass CrossrefBasicApiProvider(object):\n \"\"\"\n Implement a very basic CrossRef API service, somewhat like Habanero, but\n just for the bits we need for this plugin.\n\n This class returns raw JSON from the Crossref API\n\n TODO: Allow to add email/API key\n \"\"\"\n\n def works(self, ids=None, query=None, limit=None, select=None, **kwargs):\n \"\"\"Basic implementation of the parts of the Habanero works interface\n that we need.\n\n If IDs is given with a DOI, this is used directly to return a single\n work.\n\n Otherwise, if a query is given, this is passed in directly\n \"\"\"\n\n if ids is not None and 'doi' in ids:\n return self._work_by_doi(ids['doi'])\n else:\n # construct a generic query request\n rq_data = {\n 'query': query,\n 'rows': limit,\n 'select': ','.join(select),\n }\n\n # strip empty fields\n rq_data = dict((k, v) for k, v in rq_data.items() if v)\n\n # Add kwarg queries (like query_title)\n rq_data.update(self._filter_query_dict(kwargs))\n\n # Rename query filters\n rq_data = self._rename_query_filters(rq_data)\n\n return self._works_by_query(rq_data)\n\n # unknown parameter combination\n return None\n\n def _filter_query_dict(self, x):\n \"\"\"Find query_ prefixed dict items and return dict of only them\"\"\"\n return dict((k, x[k]) for k, v in x.items() if k.find('query_') == 0)\n\n def _rename_query_filters(self, x):\n \"\"\"Transform kwarg parameter names into equivalents for the CrossRef\n API (stolen from Habanero)\"\"\"\n newkeys = [v.replace('container_title', 'container-title') for v in x]\n newkeys = [v.replace('query_', 'query.') for v in newkeys]\n mapping = dict(zip(x.keys(), newkeys))\n return {mapping[k]: v for k, v in x.items()}\n\n def _get_api_json(self, url):\n \"\"\"Get JSON from an API URL.\n\n Returns None when there's an error\n \"\"\"\n\n handle = urllib.urlopen(url)\n\n # Failed to get a good API hit - could just be unknown DOI, or a\n # malformed API query\n if handle.code != 200:\n return None\n\n data = handle.read()\n\n try:\n json_data = json.loads(data)\n except ValueError:\n # JSON decode error\n return None\n\n return json_data\n\n def _work_by_doi(self, doi):\n\n # Note: DOI is _not_ escaped, the slash is correct\n url = \"https://api.crossref.org/works/\" + doi\n\n return self._get_api_json(url)\n\n def _works_by_query(self, query_dict):\n\n query_str = urllib.urlencode(query_dict)\n url = \"https://api.crossref.org/works?{query}\".format(query=query_str)\n\n print(url)\n\n return self._get_api_json(url)\n", "id": "12580141", "language": "Python", "matching_score": 4.155075550079346, "max_stars_count": 0, "path": "crossref-source/crossref_source.py" }, { "content": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n#\n# CrossRef plugin tests for calibre\n# Copyright 2018 <NAME> <<EMAIL>>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import (unicode_literals, division)\n\n# Tests\n# To run these tests, install the plugin, then use:\n# calibre-debug -e test.py\n\nif __name__ == '__main__':\n from calibre.ebooks.metadata.sources.test import (\n test_identify_plugin, title_test, authors_test)\n\n # List of wprks we match in the tests\n work_tests = {\n \"paskin1999\": [\n title_test(\"Toward unique identifiers\", exact=True),\n authors_test([\"<NAME>\"])\n ],\n }\n\n tests_list = [\n (\n {\n 'title': 'Toward unique identifiers',\n 'authors': [\"<NAME>\"]\n },\n work_tests[\"paskin1999\"]\n ),\n (\n {\n 'identifiers': {\n 'doi': '10.1109/5.771073',\n },\n },\n work_tests[\"paskin1999\"]\n ),\n ]\n\n test_identify_plugin(\"CrossRef\", tests_list,\n fail_missing_meta=False)\n\n\n# vim: expandtab:shiftwidth=4:tabstop=4:softtabstop=4:textwidth=80\n", "id": "8914612", "language": "Python", "matching_score": 2.812460422515869, "max_stars_count": 0, "path": "crossref-source/test.py" } ]
2.270117
vbgl
[ { "content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n\"\"\"\n Coq Lexer\n\"\"\"\n\nfrom setuptools import setup\n\nentry_points = \"\"\"\n[pygments.lexers]\ncoqlex = coqlex:CoqLex\n\"\"\"\n\nsetup(name = 'pygments-coqlex',\n version = '0.1',\n description = __doc__,\n author = \"Vincent\",\n packages = ['coqlex'],\n entry_points = entry_points)\n\n", "id": "11944962", "language": "Python", "matching_score": 1.8287237882614136, "max_stars_count": 2, "path": "koko/setup.py" }, { "content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n\"\"\"\n Goto Lexer\n\"\"\"\n\nfrom setuptools import setup\n\nentry_points = \"\"\"\n[pygments.lexers]\ngotolex = gotolex:GotoLex\n\"\"\"\n\nsetup(name = 'pygments-gotolex',\n version = '0.1',\n description = __doc__,\n author = \"Vincent\",\n packages = ['gotolex'],\n entry_points = entry_points)\n\n", "id": "7571330", "language": "Python", "matching_score": 2.514580249786377, "max_stars_count": 2, "path": "gotostar/setup.py" }, { "content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n\"\"\"\n Grey-scale style\n\"\"\"\n\nfrom setuptools import setup\n\nentry_points = \"\"\"\n[pygments.styles]\ngs = gs:GS\n\"\"\"\n\nsetup(name = 'pygments-gs',\n version = '0.1',\n description = __doc__,\n author = \"Vincent\",\n packages = ['gs'],\n entry_points = entry_points)\n\n", "id": "6060828", "language": "Python", "matching_score": 2.086676597595215, "max_stars_count": 2, "path": "grize/setup.py" }, { "content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n\"\"\"\n Not a formatter, for further processing.\n\"\"\"\n\nfrom setuptools import setup\n\nentry_points = \"\"\"\n[pygments.formatters]\nnoformat = noformat:NoFormat\n\"\"\"\n\nsetup(name = 'pygments-noformat',\n version = '0.1',\n description = __doc__,\n author = \"Vincent\",\n packages = ['noformat'],\n entry_points = entry_points)\n\n", "id": "2529777", "language": "Python", "matching_score": 2.164472818374634, "max_stars_count": 2, "path": "noformat/setup.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\n Not a formatter, for further processing.\n\"\"\"\n\nfrom pygments.formatter import Formatter\n\nclass NoFormat(Formatter):\n u\"\"\"\n Format tokens as pairs type⋅content separated by a single\n tabulation.\n \"\"\"\n\n name = 'NoFormat'\n aliases = ['noformat']\n\n def format_unencoded(self, tokensource, outfile):\n for ttype, value in tokensource:\n lines = value.split('\\n')\n for line in lines[:-1]:\n outfile.write(str(ttype))\n outfile.write('\\t')\n outfile.write(unicode(line))\n # .replace('\\\\', '\\\\\\\\').replace('\\n', '\\\\n'))\n outfile.write('\\n')\n outfile.write('Token.Newline\\t\\n')\n outfile.write(str(ttype))\n outfile.write('\\t')\n outfile.write(unicode(lines[-1]))\n outfile.write('\\n')\n", "id": "4764521", "language": "Python", "matching_score": 0.24825651943683624, "max_stars_count": 2, "path": "noformat/noformat/__init__.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\n A Goto lexer\n\"\"\"\n\nfrom pygments.lexer import RegexLexer\nfrom pygments.token import Comment, Keyword, Name, Punctuation, String, Text\n\nclass GotoLex(RegexLexer):\n \"\"\"\n A Goto lexer\n \"\"\"\n name = 'Goto'\n aliases = ['goto']\n filenames = ['*.gts']\n mimetypes = ['text/x-goto']\n\n tokens = {\n 'root': [\n (r'\\s+', Text),\n (r'\\(\\*', Comment, 'comment'),\n (r'([^.\\s]+|\\.\\S)+', Text),\n ],\n 'comment': [\n (r'[^(*)]+', Comment),\n (r'\\(\\*', Comment, '#push'),\n (r'\\*\\)', Comment, '#pop'),\n (r'[(*)]', Comment),\n ],\n }\n", "id": "6394988", "language": "Python", "matching_score": 3.487293004989624, "max_stars_count": 2, "path": "gotostar/gotolex/__init__.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\n A Coq lexer\n\"\"\"\n\nfrom pygments.lexer import RegexLexer\nfrom pygments.token import Comment, Keyword, Name, Punctuation, String, Text\n\nclass CoqLex(RegexLexer):\n \"\"\"\n A Coq lexer\n \"\"\"\n name = 'Coq'\n aliases = ['coqq', 'koko']\n filenames = ['*.v']\n mimetypes = ['text/x-coq']\n\n tokens = {\n 'root': [\n (r'\\s+', Text),\n (r'\\(\\*', Comment.Special, 'comment'),\n (r'\\.(?=\\s)', Punctuation),\n (r'[()_,|=<>]', Punctuation),\n (r':=?', Punctuation),\n (r'({{|}})', Punctuation),\n (r'{\\|?', Punctuation, 'record'),\n (r'Proof\\s*\\.', Keyword, 'proof'),\n (r'((Local|Set|Unset|Open|Scope|Close|Hint|Resolve|Unfold|Constructors|Eval|Compute)\\s+)+', Keyword),\n (r'(Notation|Infix)', Keyword),\n (r'(Context|Variables?|Hypothes[ei]s)', Keyword),\n (r'(Require(\\s+(Import|Export))?\\s+)', Keyword, 'names'),\n (r'((Import|Export)\\s+)', Keyword, 'names'),\n (r'((Module(\\s+(Import|Export))?|Section|End|Transparent|Opaque|Arguments)\\s+)+', Keyword, 'name'),\n (r'((Let|Example|Definition|Fixpoint|Inductive|Parameter|Lemma|Theorem|Corollary|Class|Existing|Instances?|Record|Canonical|Structure)\\s+)+', Keyword, 'name'),\n (r'(match|let|in|if|then|else|return|with|end)(?=\\b)', Keyword),\n (ur'[α-ωa-zA-Z_][₁₂₃₄₅₆₇₈₉₀α-ωa-zA-Z0-9._]*', Text),\n (ur'(⟨|⌊|⌈|⌉|⌋|⟩)', Text),\n (ur'(⊤|⊥|∅)', Text),\n (r'([^.\\s]+|\\.\\S)+', Text),\n ],\n 'comment': [\n (r'[^(*)]+', Comment.Multiline),\n (r'\\(\\*', Comment.Special, '#push'),\n (r'\\*\\)', Comment.Special, '#pop'),\n (r'[(*)]', Comment.Multiline),\n ],\n 'name': [\n (ur'[α-ωa-zA-Z_][₁₂₃₄₅₆₇₈₉₀a-zA-Z0-9._]*', Name.Builtin, '#pop'),\n ],\n 'names': [\n (r'\\s+', Text),\n (ur'[a-zA-Z_][₁₂₃₄₅₆₇₈₉₀a-zA-Z0-9_]*', Name.Builtin),\n (ur'[a-zA-Z._]([₁₂₃₄₅₆₇₈₉₀a-zA-Z0-9_]*\\.)*[₁₂₃₄₅₆₇₈₉₀a-zA-Z0-9_]+', Name.Builtin),\n (r'\\.(?=\\s)', Punctuation, '#pop'),\n ],\n 'proof': [\n (r'(Qed|Defined)\\s*\\.', Keyword, '#pop'),\n (r'\\(\\*', Comment.Special, 'comment'),\n (r'\\s+', String),\n (r'\\.(?=\\s)', Punctuation),\n (r'([^.]+|\\.\\S)+', String),\n ],\n 'record': [\n (r'\\(\\*', Comment.Special, 'comment'),\n (ur'[α-ωa-zA-Z_][₁₂₃₄₅₆₇₈₉₀a-zA-Z0-9._]*', Name.Builtin, 'record-def'),\n (r'\\s+', Text),\n ('', Text, '#pop'),\n ],\n 'record-def': [\n (r'\\(\\*', Comment.Special, 'comment'),\n (r'\\s+', Text),\n (r';', Punctuation, '#pop'),\n (r'({{|}})', Punctuation),\n (r'{\\|?', Punctuation, 'record'),\n (r'}', Punctuation, '#pop'),\n (r':=?', Punctuation),\n (r'=>', Punctuation),\n (r'\\'', Punctuation),\n (r'[(),<>=!?_|]', Punctuation),\n (r'(match|let|in|if|then|else|return|with|end)(?=\\b)', Keyword),\n (ur'[α-ωa-zA-Z_][₁₂₃₄₅₆₇₈₉₀α-ωa-zA-Z0-9._]*', Text),\n (ur'(∀|∃|∈|→|∨|∧)', Text),\n (r'([^(]\\*|[^(;}]|\\([^*])+', Text),\n ],\n }\n", "id": "10783526", "language": "Python", "matching_score": 2.4042184352874756, "max_stars_count": 2, "path": "koko/coqlex/__init__.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\n Style gris\n\"\"\"\n\nfrom pygments.style import Style\nfrom pygments.token import Keyword, Name, Comment, String, Error, \\\n Number, Operator, Generic\n\nclass GS(Style):\n \"\"\"\n A grey-scale style\n \"\"\"\n default_style = \"\"\n styles = {\n Comment.Special: '#444',\n Comment.Preproc: '#000',\n Comment.Multiline: 'italic #444',\n Comment.Single: 'italic #444',\n Keyword: '#444',\n Name.Builtin: 'bg:#eee #000',\n String: 'italic #000'\n }\n", "id": "6480050", "language": "Python", "matching_score": 2.0127134323120117, "max_stars_count": 2, "path": "grize/gs/__init__.py" } ]
2.125575
diwanawabi
[ { "content": "# Date: 11/02/2020\r\n# Purpose: This program is to rewrite the payroll using methods that prints instructions\r\n# Start:\r\n# Define the marital status function\r\ndef getMaritalStatus():\r\n # Return the boolean for the marital status\r\n print(bool(getMarital))\r\n\r\n# Define the children function\r\ndef askNumKids():\r\n print(\"You have \", numKids ,\" kids so you get \"\\\r\n , extraPayment, \" extra payment for total number of kids.\")\r\n \r\n# Define the hours worked function\r\ndef getHours():\r\n # Return the hours\r\n print(hoursWorked)\r\n\r\n# Define the rate function\r\ndef calcRate(getMarital, hoursWorked):\r\n # Return pay rate for the user\r\n print(\"Your pay rate is \", payRate ,\" dollars per hour.\")\r\n\r\n\r\n# Define the gross pay function\r\ndef calcGrossPay(payRate, hoursWorked):\r\n print(\"Your gross pay is \", round(grossPay, 3) ,\" dollars.\")\r\n\r\n# Define the net pay function\r\ndef calcNetPay(grossPay, extraPayment):\r\n print(\"Your net pay is \", netPay ,\" dollars.\\n\")\r\n\r\n# Define the payroll function\r\ndef printStub(getMarital, payRate, numKids, hoursWorked, grossPay, netPay):\r\n print(\"Your payroll information is accordingly: \")\r\n # Print the payroll information if single\r\n if getMarital == 0:\r\n print(\"You have a net pay of \", netPay ,\" dollars, and gross pay of \",\\\r\n grossPay ,\" dollars because you are single and got \", payRate ,\\\r\n \" dollars pay rate per hour and got \", numKids ,\" kids and you \"\\\r\n \"worked \", hoursWorked ,\" hours.\")\r\n # Print the payroll information if married\r\n elif getMarital == 1:\r\n print(\"You have a net pay of \", netPay ,\" dollars, and gross pay of \",\\\r\n grossPay ,\" dollars because you are married and got \", payRate ,\\\r\n \" dollars pay rate per hour and got \", numKids ,\" kids and you \"\\\r\n \"worked \", hoursWorked ,\" hours.\")\r\n \r\n# Ask for an input to the marital status\r\n# 0 for single and 1 for married\r\ngetMarital = int(input(\"Please enter 0 if you are single and 1 if you are married: \"))\r\n# If user inputs 0 which means single\r\n# Return False boolean\r\nif getMarital == 0:\r\n False\r\n getMarital = 0\r\n# If the user inputs 1 then it will be true and return True boolean\r\n# If not then it will be false and asked for another input\r\nwhile getMarital != 1 and 0:\r\n False\r\n print(\"!!!!WRONG VALUE PLEASE TRY AGAIN!!!!\")\r\n getMarital = int(input(\"Please enter 0 if you are single and 1 if you are married: \"))\r\n\r\n# Ask for the number of hours worked\r\nhoursWorked = float(input(\"Please enter number of hours that you worked: \"))\r\n\r\n# Ask for the number of children the user has\r\nnumKids = int(input(\"Please enter number of children you have: \"))\r\n# If the number of kids is 1 to 4\r\nif 0 < numKids == 4:\r\n extraPayment = hoursWorked * numKids\r\n# If the number of kids is more than 4 just calculate the extra payment for\r\n# First 4 kids\r\nelif numKids > 4:\r\n extraPayment = 4 * hoursWorked\r\n# If the user enters 0 or negative then the extra payment is 0\r\nelse:\r\n extraPayment = 0\r\n\r\n# Pay rate formula\r\npayRate = 10 + getMarital * 5\r\n\r\n# Gross pay formula\r\ngrossPay = payRate * hoursWorked\r\n\r\n# Net pay formula\r\nnetPay = grossPay + extraPayment\r\n\r\n# Call all the functions\r\ngetMaritalStatus();\r\ngetHours();\r\naskNumKids();\r\ncalcRate(getMarital, hoursWorked);\r\ncalcGrossPay(payRate, hoursWorked);\r\ncalcNetPay(grossPay, extraPayment);\r\nprintStub(getMarital, payRate, numKids, hoursWorked, grossPay, netPay)\r\n", "id": "8860661", "language": "Python", "matching_score": 4.4864959716796875, "max_stars_count": 0, "path": "Programming Assignments/Programming Assignment #9.py" }, { "content": "# Author: <NAME>\r\n# Date: 11/23/2020\r\n# Purpose: This program is to (hopefully) make the last change to the payroll program\r\n# Start:\r\n# Open the file which has the information\r\nmyFile = open(\"payrollFile.txt\", \"r\")\r\nmyFileOutput = open(\"payrollFileOutput.txt\", \"w\")\r\n\r\n# Read the marital status of the employee\r\nmaritalStatus = myFile.readline()\r\nwhile maritalStatus != str(maritalStatus):\r\n maritalStatus = int(maritalStatus)\r\n\r\n# Read the number of hours worked in the line on the file that we opened\r\nhoursWorked = myFile.readline()\r\nwhile hoursWorked != str(hoursWorked):\r\n hoursWorked = float(hoursWorked)\r\n\r\n# Read the number of kids in the line on file that we opened\r\nnumKids = myFile.readline()\r\nwhile numKids != str(numKids):\r\n numKids = int(numKids)\r\n # If the number of kids is between 1 to 4\r\n if numKids > 0 or numkids == 4:\r\n extraPayment = hoursWorked * numKids\r\n # If the number of kids is 4 or bigger\r\n elif numKids > 4:\r\n extraPayment = 4 * hoursWorked\r\n # If the number of kids is smaller or equal to zero\r\n # The extra payment is 0\r\n else:\r\n extraPayment = 0\r\n\r\nfor readline in myFile:\r\n # Calculate the pay rate formula\r\n payRate = 10 + maritalStatus * 5\r\n\r\n # Calculate the gross pay formula\r\n grossPay = payRate * hoursWorked\r\n\r\n # Calculate the net pay formula\r\n netPay = grossPay + extraPayment\r\n # Print the payroll information if single\r\n\r\nif maritalStatus == 0:\r\n print(\"You have a net pay of \", netPay ,\" dollars, and gross pay of \",\\\r\n grossPay ,\" dollars because you are single and got \", payRate ,\\\r\n \" dollars pay rate per hour and got \", numKids ,\" kids and you \"\\\r\n \"worked \", hoursWorked ,\" hours.\", file = myFileOutput)\r\n # Print the payroll information if married\r\nelif maritalStatus == 1:\r\n print(\"You have a net pay of \", netPay ,\" dollars, and gross pay of \",\\\r\n grossPay ,\" dollars because you are married and got \", payRate ,\\\r\n \" dollars pay rate per hour and got \", numKids ,\" kids and you \"\\\r\n \"worked \", hoursWorked ,\" hours.\", file = myFileOutput)\r\n\r\n # Close files\r\nmyFile.close()\r\nmyFileOutput.close()\r\n\r\n#END\r\n", "id": "2350704", "language": "Python", "matching_score": 2.443948984146118, "max_stars_count": 0, "path": "Programming Assignments/Programming Assignment #10.py" }, { "content": "# Author: <NAME>\r\n# Date: 10/09/2020\r\n# Purpose: This program is to read payroll with income tax\r\n# Start:\r\nwhile True:\r\n # Ask the user if they are married or single\r\n # If the user is married tell them to enter 1 and if single ask them to enter 0\r\n status = int(input(\"Please enter 0 if you are single and 1 if you are married: \"))\r\n # If the user is single (enters 0)\r\n if status == 0:\r\n # Ask how many hours did he/she worked\r\n hours = float(input(\"How many hours did you work? \"))\r\n # Make sure the user doesn't have any children\r\n children = int(input(\"Do you have any children? If yes how many if no enter 0: \"))\r\n # If the user is married (enters 1)\r\n elif status == 1:\r\n # Ask the user how many children he/she has\r\n children = int(input(\"How many children do you have? \"))\r\n # Ask how many hours did he/she worked\r\n hours = float(input(\"How many hours did you work? \"))\r\n # If the user enters neither 0 nor 1\r\n else:\r\n # Tell them that they entered an invalid value\r\n print (\"Invalid value please try again\")\r\n # And ask for the status again until they enter a valid value which is either 0 or 1\r\n continue\r\n # Net payment formula\r\n netPay = (10 * hours) + (5 * status) + (children / 4)\r\n # If the netPay is greater than or equal to \r\n if netPay >= 600:\r\n # The tax rate would be 10%\r\n netPay = netPay * 10 / 100\r\n # Print the final net payment value\r\n print (\"Your net payment is \", netPay ,\" dollars without tax.\")\r\n # If the netPay is greater than or equal to 400\r\n elif netPay >= 400:\r\n # The tax rate would be 8%\r\n netPay = netPay * 8 / 100\r\n # Print the final net payment value\r\n print (\"Your net payment is \", netPay ,\" dollars without tax.\")\r\n # If the netPay is greater than or equal to 300\r\n elif netPay >= 300:\r\n # The tax rate would be 5%\r\n netPay = netPay * 5 / 100\r\n # Print the final net payment value\r\n print (\"Your net payment is \", netPay ,\" dollars without tax.\")\r\n # If the netPay is smaller than 300\r\n elif netPay < 300:\r\n # Print the final net payment value\r\n print (\"Your net payment is \", netPay ,\" dollars with zero tax.\")\r\n break\r\n \r\n", "id": "9471523", "language": "Python", "matching_score": 2.02840518951416, "max_stars_count": 0, "path": "Programming Assignments/Programming Assignment #5.py" }, { "content": "# Date: 10/03/2020\r\n# Purpose: This program is to repeat until the user enters a number that isnt positive for hours\r\n# Ask the user for number of hours that they worked\r\nhoursWorked = float(input(\"Please enter the number of hours that you worked: \"))\r\n\r\n# While the hoursWorked is positive print the statements below\r\nwhile hoursWorked > 0:\r\n # Ask fo the number of dependents\r\n dependents = int(input(\"Please enter the number of dependents that you have: \"))\r\n # If dependents are 3 or more than 3\r\n if dependents >= 3:\r\n insurance = 35\r\n else: \r\n insurance = 0\r\n # The pay rate\r\n payRate = 16.78\r\n # The social tax\r\n socialTax = 6/100 or 0.06\r\n # The federal tax\r\n federalTax = 14/100 or 0.14\r\n # The state tax\r\n stateTax = 5/100 or 0.05\r\n # Union dues\r\n unionDues = 10\r\n # Total taxes\r\n totalTaxes = (socialTax + federalTax + stateTax)\r\n # Total wage\r\n wage = hoursWorked * 40 + 1.5 * payRate * (hoursWorked + 40)\r\n # Net Wage\r\n netWage = wage - (wage * totalTaxes) - insurance - unionDues\r\n # Print all the information\r\n print (\"Your total wage is \", wage ,\" and your net wage is \",\\\r\n netWage ,\" dollars because you worked \", hoursWorked ,\\\r\n \" hours and have \", dependents ,\" dependents which \",\\\r\n \"you should pay \", insurance ,\" dollars for their \",\\\r\n \"insurance and have to pay a total of \", totalTaxes ,\\\r\n \" taxes which is consist of \", socialTax ,\" percent \"\\\r\n \"social tax, \", federalTax ,\" percent federal tax, and \",\\\r\n stateTax ,\" percent state tax with \", unionDues ,\" dollars union dues.\")\r\n # Ask for the hours work again \r\n hoursWorked = float(input(\"Please enter the number of hours that you worked: \"))\r\n \r\n# If the hours worked is non positive print the statement below\r\nelse:\r\n print (\"Error! You entered a non positive value so the program ends.\")\r\n", "id": "2385832", "language": "Python", "matching_score": 1.521480917930603, "max_stars_count": 0, "path": "Programming Assignments/Programming Assignment #7.py" }, { "content": "# Date: 10/14/2020\r\n# Purpose: This program is to replace 'is' occurance in line that the user inputs with 'was'\r\n# this program will run until the user inputs 'The end' phrase\r\n# Start:\r\n# Ask the user for an input\r\ninputLine = input(\"Please write some text: \")\r\n# The line which makes the program end\r\nterminationLine = \"The end\"\r\n\r\n# Open a while loop for when the user inputs something else other than 'The end'\r\nwhile inputLine != terminationLine:\r\n # Replace the 'is' with the 'was'\r\n # If the 'is' appears in the start and is not capitalized\r\n if inputLine[:3] == \"is \":\r\n inputLine = \"was\" + inputLine[2:]\r\n # If the 'is' appears in the start and is capitalized\r\n if inputLine[:3] == \"Is \":\r\n inputLine = \"Was\" + inputLine[2:]\r\n # If the 'is' appears in the end\r\n if inputLine[-3:] == \" is\":\r\n inputLine = inputLine[:-2] + \"was\"\r\n # If the 'is' appears in the middle\r\n inputLine = inputLine.replace(\" is \", \" was \")\r\n # Print out the statement\r\n print (inputLine)\r\n # Ask for the input of the user again\r\n inputLine = input(\"Please write some text: \")\r\n \r\n# If the user inputs 'The end'\r\nprint (\"Your program has been ended!\")\r\n", "id": "4856425", "language": "Python", "matching_score": 1.1668062210083008, "max_stars_count": 0, "path": "Programming Assignments/Programming Assignment #8.py" }, { "content": "# Date: 12/10/2020\r\n# Purpose: Programming Assignment #11\r\n# This program is to \"Calculate a diver's score in a competition\"\r\n# START:\r\n\r\n# 1.\r\n# Define the degree function\r\ndef degree():\r\n return difficultyLevel\r\n# 2.\r\n# Define the \r\ndef calculation(difficultyLevel):\r\n # Judges scores for 7 judges\r\n # Create a list\r\n judgeScores = []\r\n # Ask for the judges' scores 7 times\r\n for judgeScore in range(7):\r\n # Ask for score from judges\r\n judgeScore = float(input(\"Judge please enter a score for the diver: \"))\r\n # Check for bad data\r\n while judgeScore < 0 or judgeScore > 10:\r\n # Statement that says its wrong\r\n print(\"Wrong score for the diver in the game!\"\\\r\n \"\\nPlease Try again!\")\r\n # Ask the judge score again until correct\r\n judgeScore = float(input(\"Judge please enter a score for the diver: \"))\r\n # Add the values to the list\r\n judgeScores.append(judgeScore)\r\n judgeScores.sort()\r\n # Print out the list\r\n print(\"The list of scores in order is: \", judgeScores)\r\n \r\n # 3.\r\n # Throw out the highest and lowest scores\r\n judgeScores.remove(min(judgeScores))\r\n judgeScores.remove(max(judgeScores))\r\n # Print the list again\r\n print(\"The list of scores in order and clear form: \", judgeScores)\r\n\r\n # 4.\r\n # To find sum of scores later\r\n sumScores = sum(judgeScores)\r\n # Find the sum of the remaining elements\r\n print(\"The sum of the remaining elements is: \", sumScores)\r\n\r\n # 5.\r\n # Find total by multiplying degree of difficulty with sum and 0.6\r\n total = sumScores * difficultyLevel\r\n total = total * 0.6\r\n print(\"The diver's score is: \", total, \" as a total.\")\r\n\r\n# 6. \r\n# Main program ask for the 1. Difficulty level\r\n# Ask for the degree of difficulty of the dive make a defined function\r\ndifficultyLevel = float(input(\"Please enter the degree level of difficulty\"\\\r\n \" for the diving game between 1.2 and 3.8: \"))\r\n# Check for bad data\r\n# The diving's difficulty should be betweeen 1.2 to 3.8 points\r\nwhile difficultyLevel < 1.2 or difficultyLevel > 3.8:\r\n # Print the statement that says its wrong\r\n print(\"You entered a wrong number please try again!\")\r\n # Ask for the degree of difficulty again until the right answer is entered\r\n difficultyLevel = float(input(\"Please enter the degree level of difficulty\"\\\r\n \" for the diving game: \"))\r\n\r\n# 7.\r\n# Call the functions\r\ndegree();\r\ncalculation(difficultyLevel);\r\n\r\n# END\r\n", "id": "12027846", "language": "Python", "matching_score": 1.8208141326904297, "max_stars_count": 0, "path": "Programming Assignments/Programming Assignment #11.py" }, { "content": "# Author: <NAME>\r\n# Date: 10/03/2020\r\n# Purpose: This program is to find out the batting average for the starting line-up\r\n# Start:\r\n# Define the loop \r\n# Ask for the number of times at bat\r\nfor i in range(9):\r\n # Ask for the number of times at the bat of the player\r\n numTimesBat = int(input(\"Please enter the number of times at bat of \"\\\r\n \"this player: \"))\r\n # Ask for the number of hits of this player\r\n numHits = int(input(\"Please enter the number of hits of this player: \"))\r\n # Batting Average formula to calculate \r\n battingAverage = numHits / numTimesBat\r\n # If the Batting Average is between 0 and 1 then it is a true statement\r\n if 0 <= battingAverage <= 1:\r\n True\r\n # Else it is false\r\n else:\r\n print (\"Error! This player has \", numHits ,\" and an unexpected \"\\\r\n \"batting average. Please enter the values for the next player.\")\r\n False\r\n continue\r\n # Print the number of hits and batting average of this player\r\n print (\"This player has \", numHits ,\" number of hits at bats and \",\\\r\n # Round Batting Average to 3 decimal places\r\n \" a batting average of \", round(battingAverage, 3))\r\n \r\n#END\r\n", "id": "10287765", "language": "Python", "matching_score": 1.6633437871932983, "max_stars_count": 0, "path": "Programming Assignments/Programming Assignment #6.py" } ]
1.820814
viral-medialab
[ { "content": "# -*- coding: utf-8 -*-\n'''\n\tExtends the FeedGenerator to produce segments specific feeds.\n'''\n\nfrom lxml import etree\nfrom feedgen.ext.base import BaseExtension,BaseEntryExtension\n\nSEGMENT_NS = 'https://github.com/Viral-MediaLab/superglue-rss'\n\nclass SegmentExtension(BaseExtension):\n\t'''FeedGenerator extension for segment feeds.\n\t'''\n\tdef extend_ns(self):\n\t\treturn {'segment' : SEGMENT_NS}\n\n\nclass SegmentEntryExtension(BaseEntryExtension):\n\t'''FeedEntry extention for segment feeds\n\t'''\n\tdef __init__(self):\n\t\tself.__segment_duration = None\n\n\tdef extend_rss(self, entry):\n\t\t'''Add additional fields to an RSS item.\n\t\t:param feed: The RSS item XML element to use.\n\t\t'''\n\t\tif self.__segment_duration:\n\t\t\tduration = etree.SubElement(entry, '{%s}duration' % SEGMENT_NS)\n\t\t\tduration.text = self.__segment_duration\n\n\tdef duration(self, segment_duration=None):\n\t\t'''Get or set the duration of the video segment.\n\t\t:param segment_duration: The duration of the video segment in seconds.\n\t\t:returns: The duration of the video segment in seconds.\n\t\t'''\n\t\tif not segment_duration is None:\n\t\t\tself.__segment_duration = segment_duration\n\t\treturn self.__segment_duration\n", "id": "12594135", "language": "Python", "matching_score": 2.088341474533081, "max_stars_count": 0, "path": "segment_extension.py" }, { "content": "import time\nimport datetime\nimport pymongo\nimport pytz\nfrom feedgen.feed import FeedGenerator\nfrom pymongo import MongoClient\nfrom channels import get_channel_data\nfrom segment_extension import SegmentExtension, SegmentEntryExtension\n\nDAY = 86400000\nHOUR = 3600000\nTEXT_LENGHT_THRESHOLD = 10\ndef millis():\n return int(round(time.time() * 1000))\n\ndef millis_since(num_days=2):\n return millis() - num_days*DAY\n\n\ndef get_segments_by_channel (channel, since_time=10):\n MONGO_URL = 'mongodb://um.media.mit.edu:27017/super-glue'\n collection = MongoClient(MONGO_URL).get_default_database()['media']\n videos_limit = 100\n pipe = {\n \"story_segments\":{\"$exists\": True},\n \"is_news\":{\"$eq\": True},\n \"channel\": {\"$eq\":channel}}\n if since_time>0:\n pipe[\"date_added\"] = {\"$gt\":millis_since(since_time)}\n limit = videos_limit\n media_with_segments = collection.find(\n pipe,\n sort=[('date_added', pymongo.DESCENDING)],\n limit=videos_limit)\n else:\n media_with_segments = collection.find(\n pipe,\n sort=[('date_added', pymongo.DESCENDING)])\n segments = []\n for media in media_with_segments:\n for i, segment in enumerate(media['story_segments']):\n if 'text' in segment and len(segment['text'])>TEXT_LENGHT_THRESHOLD:\n segments.append({\n 'title':media['title'],\n 'link': \"%s#t=%.2f,%.2f\"%(media['media_url_no_comm'],segment['start']/1000.0,segment['end']/1000.0),\n 'description':segment['text'],\n 'pubDate':media['date_added'],\n 'guid': '%s_%d'%(media['_id'],i),\n 'segment:duration': segment['end']/1000.0-segment['start']/1000.0,\n 'enclosure': segment['thumbnail_image'] if 'thumbnail_image' in segment else ''\n })\n return segments\n\ndef generate_rss_feed (channel, since_time=10):\n channel_data = get_channel_data(channel)\n if channel_data:\n segments = get_segments_by_channel(channel, since_time)\n fg = FeedGenerator()\n fg.title(channel_data['name'])\n fg.link(href=channel_data['link'])\n fg.description(channel_data['description'])\n\n fg.register_extension('segment', SegmentExtension, SegmentEntryExtension)\n\n for segment in segments:\n fe = fg.add_entry()\n fe.guid(segment['guid'])\n fe.title(segment['title'])\n fe.description(segment['description'])\n fe.enclosure(segment['enclosure'], 0, 'image/jpeg')\n fe.link(href=segment['link'])\n fe.pubdate(datetime.datetime.fromtimestamp(segment['pubDate']/1000.0, pytz.utc))\n fe.segment.duration(str(segment['segment:duration']))\n\n return fg.rss_str(pretty=True)\n else:\n # channel was not found\n return \"<title>error: channel not found</title>\"\n", "id": "8740107", "language": "Python", "matching_score": 2.89574933052063, "max_stars_count": 0, "path": "generate_rss.py" }, { "content": "import csv\nimport urllib2\n\ndef get_channel_data(channel):\n\n url = 'https://docs.google.com/spreadsheets/d/1UtA_exZn515gYwv1Sfs0JBq2nOWnlR42Fb0Vw3eMpTs/pub?output=csv'\n response = urllib2.urlopen(url)\n cr = csv.reader(response)\n\n for row in cr:\n if row[0]==channel:\n return {\n \"name\": row[1],\n \"link\":row[4],\n \"description\": row[5]\n }\n # nothing was found, return None\n return None\n\nif __name__ == '__main__':\n main()\n", "id": "7105423", "language": "Python", "matching_score": 0.3302830159664154, "max_stars_count": 0, "path": "channels.py" }, { "content": "# a script to generate an RSS file fore each channel from the begining of time,\n# in order to backfill Media Cloud data\nfrom generate_rss import generate_rss_feed\n\nchannels = [\n\"002\",\n\"025\",\n\"202\",\n\"206\",\n\"229\",\n\"231\",\n\"232\",\n\"237\",\n\"242\",\n\"244\",\n\"249\",\n\"264\",\n\"276\",\n\"278\",\n\"284\",\n\"349\",\n\"350\",\n\"351\",\n\"353\",\n\"355\",\n\"356\",\n\"357\",\n\"360\",\n\"375\"]\n\nfor channel in channels:\n rss_str = generate_rss_feed(channel, since_time=-1)\n filename = \"backfilling_data/%s.rss\"%channel\n if rss_str:\n with open(filename, \"w\") as f:\n f.write(rss_str)\n else:\n print \"channel %s is empty\"%channel\n", "id": "6065023", "language": "Python", "matching_score": 2.5221285820007324, "max_stars_count": 0, "path": "backfilling_data.py" }, { "content": "from flask import Flask, render_template, jsonify\nfrom generate_rss import generate_rss_feed\n\napp = Flask(__name__)\n\n@app.route(\"/<channel>\")\ndef get_channel_rss_feed(channel):\n rss_str = generate_rss_feed(channel)\n return rss_str, 200, {'Content-Type': 'text/xml; charset=utf-8'}\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "id": "10870604", "language": "Python", "matching_score": 2.5082671642303467, "max_stars_count": 0, "path": "app.py" } ]
2.508267
OSP4DISS
[ { "content": "#! /usr/bin/env python3\n# Usage: python3 IncludeCode.py < inputFile > outputFile\n# Example: python3 IncludeCode.py < 000.pmd > 000.md\n#\n# Based on: https://fractallambda.com/2011/08/17/pincpy-including-files-and-script-output.html\n\nimport os\nimport re\nimport sys\n\ninclude_pattern = re.compile(\"(\\(>)(.*)(<\\))\")\n\nif __name__ == \"__main__\":\n output_lines = []\n\n for line in sys.stdin:\n include_match = re.match(include_pattern, line)\n if include_match:\n filename = include_match.group(2).strip()\n if os.path.isfile(filename):\n with open(filename, \"r\") as file:\n output_lines.extend(file.readlines())\n else:\n output_lines.append(\"\")\n else:\n output_lines.append(line)\n\n sys.stdout.writelines(output_lines)\n", "id": "1584888", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "assets/scripts/IncludeCode.py" } ]
0
Tresillo2017
[ { "content": "import serial\n\nser = serial.Serial('COM1',9600)\nser.open()\nwhile True:\n new_data = ser.read()\n if new_data:\n # flush serial\n ser.flushInput()\n print(\"new data:\", new_data)\n ser.write(b\"ACK\\r\")", "id": "7484493", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "python/server.py" }, { "content": "preguntas = [\n \"¿Quién descubrió América?\", # pregunta 1\n \"¿Cuándo terminó la Segunda Guerra Mundial?\", # pregunta 2 \n \"¿Cuántas especies de perros hay?\", # pregunta 3\n \"¿Quién salvó al pueblo hebreo del faraón egipcio?\", # pregunta 4 \n \"¿Quién es el tenista con más Grand Slam?\", # pregunta 5\n \"¿Cómo se llama el actor de <NAME>?\", # pregunta 6\n \"¿En qué continente se encuentran las Islas del Caribe?\", # pregunta 7\n \"¿Quién inventó la bombilla?\", # pregunta 8\n \"¿Cuál es el territorio con mayor población?\", # pregunta 9\n \"¿En qué ciudad española está el estadio conocido como Wanda Metropolitano?\", # pregunta 10\n \"¿Cuál es el río más caudaloso del mundo?\", # pregunta 11\n \"¿Quienes fueron los dos hermanos fundadores de Roma?\", # pregunta 12\n \"¿Quién era conocido como “el manco de Lepanto”?\", # pregunta 13\n \"¿Quién fue el famoso cantante del grupo musical Queen?\", # pregunta 14\n \"¿Quién es el YouTuber con más suscriptores?\", # pregunta 15\n \"¿Qué gran artista pintó la Capilla Sixtina?\", # pregunta 16\n \"¿Quién descubrió los satélites de Júpiter?\", # pregunta 17\n \"¿Qué país tiene más copas del mundo de fútbol?\", # pregunta 18\n \"¿De qué país es <NAME>?\", # pregunta 19\n \"¿Dónde jugó en la mayoría de su carrera <NAME>?\", # pregunta 20\n \"¿Qué equipo de fútbol tiene más Champions?\", # pregunta 21\n \"¿En qué continente se encuentra Suiza?\", # pregunta 22\n \"¿Cómo se llama el nuevo virus que nos ha tenido en pandemia durante 2 años?\", # pregunta 23\n \"¿Cuál era la moneda utilizada en España antes del Euro?\", # pregunta 24\n \"¿Cuál es la película más taquillera de la historia?\", # pregunta 25\n \"¿En qué año ganó España el mundial?\", # pregunta 26\n \"¿En qué continente se encuentra China?\", # pregunta 27\n \"¿Qué idioma es el más utilizado en el planeta?\", # pregunta 28\n \"¿Cómo se llamaba la enamorada de Don Quijote?\", # pregunta 29\n \"¿En qué serie sale el Demogorgon?\", # pregunta 30\n \"¿En qué ciudad se encuentra el Camp Nou?\", # pregunta 31\n \"¿Cómo se llama el rey de España?\", # pregunta 32\n \"¿Qué país está entre Perú y Colombia?\", # pregunta 33\n \"¿Quién fue la ganadora de los Grammy latinos en 2018?\", # pregunta 34\n \"¿Qué mide en París 333 metros?\", # pregunta 35\n \"¿Cuál es el pájaro símbolo de La Paz?\", # pregunta 36\n \"¿Cuántos jugadores tiene un equipo de voleibol?\", # pregunta 37\n \"¿Qué mes tiene menos días?\", # pregunta 38\n \"¿Quién fue el ganador de MotoGP en 2017?\", # pregunta 39\n \"¿Qué planeta se encuentra más cerca del Sol?\" # pregunta 40\n]\n\nans = [ \n 1, # pregunta 1\n 1, # pregunta 2\n 1, # pregunta 3\n 2, # pregunta 4 \n 1, # pregunta 5\n 1, # pregunta 6\n 2, # pregunta 7\n 1, # pregunta 8\n 1, # pregunta 9\n 1, # pregunta 10\n 1, # pregunta 11\n 1, # pregunta 12 \n 2, # pregunta 13\n 1, # pregunta 14\n 1, # pregunta 15\n 1, # pregunta 16\n 2, # pregunta 17\n 2, # pregunta 18\n 2, # pregunta 19\n 2, # pregunta 20\n 1, # pregunta 21\n 2, # pregunta 22\n 2, # pregunta 23\n 2, # pregunta 24\n 2, # pregunta 25\n 2, # pregunta 26\n 2, # pregunta 27\n 2, # pregunta 28\n 1, # pregunta 29\n 2, # pregunta 30\n 2, # pregunta 31\n 2, # pregunta 32\n 2, # pregunta 33\n 1, # pregunta 34\n 2, # pregunta 35\n 2, # pregunta 36\n 2, # pregunta 37\n 2, # pregunta 38\n 2, # pregunta 39\n 2, # pregunta 40\n] \n\nopcionesa = [ \n \"<NAME>\", # pregunta 1\n\n \"1945\", # pregunta 2\n\n \"343\", # pregunta 3\n\n \"Abraham\", # pregunta 4\n\n \"<NAME>\", # pregunta 5\n\n \"<NAME>\", # pregunta 6\n\n \"Europa\", # pregunta 7\n\n \"<NAME>\", # pregunta 8\n\n \"China\", # pregunta 9\n\n \"Madrid\", # pregunta 10\n\n \"Amazonas\", # pregunta 11\n\n \"<NAME>\", # pregunta 12\n\n \"<NAME>\", # pregunta 13\n\n \"<NAME>\", # pregunta 14\n\n \"Pewdiepie\", # pregunta 15\n\n \"<NAME>\", # pregunta 16\n\n \"Mozart\", # pregunta 17\n\n \"Francia\", # pregunta 18\n\n \"Francia\", # pregunta 19\n\n \"Barcelona\", # pregunta 20\n\n \"Real Madrid\", # pregunta 21\n\n \"América\", # pregunta 22\n \n \"Estoma Virus\", # pregunta 23\n \n \"Libra\", # pregunta 24\n \n \"Peppa Pig\", # pregunta 25\n \n \"2002\", # pregunta 26\n \n \"Europa\", # pregunta 27\n \n \"Español\", # pregunta 28\n \n \"Dulcinea\", # pregunta 29\n \n \"Vikingos\", # pregunta 30\n \n \"Valencia\", # pregunta 31\n \n \"<NAME>\", # pregunta 32\n \n \"Alemania\", # pregunta 33\n \n \"Rosalia\", # pregunta 34\n \n \"<NAME>\", # pregunta 35\n \n \"<NAME>\", # pregunta 36\n \n \"20\", # pregunta 37\n \n \"Octubre\", # pregunta 38\n \n \"<NAME>\", # pregunta 39\n \n \"Pluton\" # pregunta 40\n]\n\nopcionesb = [\n \"<NAME>\", # pregunta 1\n \n \"2000\", # pregunta 2\n \n \"50\", # pregunta 3\n \n \"Moises\", # pregunta 4\n \n \"<NAME>\", # pregunta 5\n \n \"<NAME>\", # pregunta 6\n \n \"America\", # pregunta 7\n \n \"<NAME>\", # pregunta 8\n \n \"Teruel\", # pregunta 9\n\n \"Barcelona\", # pregunta 10\n\n \"Duero\", # pregunta 11\n\n \"<NAME>\", # pregunta 12\n\n \"<NAME>\", # pregunta 13\n\n \"<NAME>\", # pregunta 14\n\n \"Djmariio\", # pregunta 15\n\n \"Beethoven\", # pregunta 16\n\n \"Galileo\", # pregunta 17\n\n \"Brasil\", # pregunta 18\n\n \"Argentina\", # pregunta 19\n\n \"Lakers\", # pregunta 20\n\n \"Alcoyano\", # pregunta 21\n \n \"Europa\", # pregunta 22\n \n \"Coronavirus\", # pregunta 23\n \n \"Peseta\", # pregunta 24\n \n \"Avengers:Endgame\", # pregunta 25\n \n \"2010\", # pregunta 26\n \n \"Asia\", # pregunta 27\n \n \"Mandarin\", # pregunta 28\n \n \"Eustaquia\", # pregunta 29\n \n \"Stranger Things\", # pregunta 30\n \n \"Barcelona\", # pregunta 31\n \n \"Felipe VI\", # pregunta 32\n \n \"Ecuador\", # pregunta 33\n \n \"Anita\", # pregunta 34\n \n \"<NAME>\", # pregunta 35\n \n \"Paloma\", # pregunta 36\n \n \"6\", # pregunta 37\n \n \"Febrero\", # pregunta 38\n \n \"<NAME>\", # pregunta 39\n \n \"Mercurio\" # pregunta 40\n]", "id": "2540025", "language": "Python", "matching_score": 0.9827951788902283, "max_stars_count": 0, "path": "python/arrays.py" }, { "content": "class bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n \n\nprint(\"Programa que comprueba si un año es bisiesto o no\")\nprint(\"Ingrese el año\")\nyear = eval(input(bcolors.OKBLUE))\nprint(bcolors.ENDC)\n\nif year % 4!= 0: \n print(bcolors.OKCYAN, year, bcolors.ENDC, \"No es bisiesto\")\nelif year % 4 == 0 and year % 100 != 0:\n print(bcolors.HEADER, year, bcolors.ENDC,\"Es bisiesto\")\nelif year % 4 == 0 and year % 100 == 0 and year % 400 != 0: #divisible entre 4 y 10 y no entre 400\n\tprint(bcolors.OKCYAN, year, bcolors.ENDC, \"No es bisiesto\")\nelif year % 4 == 0 and year % 100 == 0 and year % 400 == 0: #divisible entre 4, 100 y 400\n\tprint(bcolors.HEADER, year, bcolors.ENDC, \"Es bisiesto\")", "id": "4615705", "language": "Python", "matching_score": 3.0288245677948, "max_stars_count": 0, "path": "Python Codigos/TomasPython12.py" }, { "content": "class bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n \nprint(bcolors.HEADER, \"Programa que reordena palindromos\", bcolors.ENDC)\nprint(\"Ingresa el palindromo\")\npalindromo = input(bcolors.OKBLUE)\nprint(bcolors.ENDC)\n\n# def espalindromo(s):\nfor s in palindromo:\n if s == s[::-1]:\n print(\"es palindromo\")\n # return True\n else:\n print(\"No es palindromo\")\n # return False\n\ncomprobacion = espalindromo(palindromo)\n\nif comprobacion:\n print(bcolors.OKGREEN,\"Si es un palindromo\", bcolors.ENDC, \"(\", bcolors.BOLD, espalindromo(palindromo), bcolors.ENDC, \")\")\nelse:\n print(bcolors.FAIL, \"No es un palindromo\", bcolors.ENDC, \"(\", bcolors.BOLD, espalindromo(palindromo), bcolors.ENDC, \")\")", "id": "4290983", "language": "Python", "matching_score": 0.5003412961959839, "max_stars_count": 0, "path": "Python Codigos/TomasPython14.py" }, { "content": "class bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nprint(\"Programa sobre numeros pares\\n\")\nprint(\"Escribe un numero que sea par\", bcolors.OKBLUE)\nnumero = input()\nprint(bcolors.ENDC)\nresultado = int(numero) % 2\n# print(resultado) Debug only don't uncoment this unless you know what are you doing\nif resultado == 0:\n print(bcolors.OKBLUE,\"CORRECTO !!!\", bcolors.ENDC)\n print(bcolors.HEADER,\"El numero\", numero, \"es un numero par\", bcolors.ENDC)\n\nelif resultado == 1:\n print(bcolors.OKBLUE,\"INCORRECTO !!!\", bcolors.ENDC)\n print(bcolors.HEADER,\"El numero\", numero,\"es un numero\",bcolors.WARNING,\"impar\", bcolors.ENDC)\n\nelse:\n print(bcolors.FAIL,\"Lo siento el numero \",numero, \"no puedo clasificarlo\", bcolors.ENDC)\n\nprint(int(numero), \"/ \", \"2\", \"= \", int(numero) / 2)\n\n", "id": "4615553", "language": "Python", "matching_score": 1.490324854850769, "max_stars_count": 0, "path": "Python Codigos/TomasPython03.py" }, { "content": "class bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nprint(\"Numeros pares entre 1, 1000\")\n\nfor n in range(1,1000):\n if (n % 2 == 0):\n print(bcolors.OKBLUE,n, bcolors.ENDC,\"es un numero par\\n\")\n else:\n print(\"\")\n", "id": "9308747", "language": "Python", "matching_score": 1.4131581783294678, "max_stars_count": 0, "path": "Python Codigos/TomasPython06.py" }, { "content": "class bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nprint(\"Numeros impares entre 1, 1000\")\nprint(\"Introduce el numero\")\nnumero = eval(input(bcolors.OKBLUE))\nresultado = True\nprint(bcolors.ENDC)\ncontador = 0 \nwhile resultado:\n for n in range(1,numero):\n if (numero % n == 0):\n print(bcolors.OKCYAN, numero, bcolors.ENDC, \"es divisible por\", bcolors.WARNING, n, bcolors.ENDC)\n contador = contador + 1\n else:\n resultado = False\nprint(\"El numero total de divisores es\", bcolors.HEADER, contador, bcolors.ENDC)", "id": "5478399", "language": "Python", "matching_score": 1.7906875610351562, "max_stars_count": 0, "path": "Python Codigos/TomasPython08.py" }, { "content": "class bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nprint(\"Programa para comprobar si un numero es divisible o no por: 2, 3, 5, 7, 11\\n\")\nprint(\"Escribe un numero\", bcolors.OKBLUE)\nnumero = input()\nprint(bcolors.ENDC, \"\\n\")\n\nnumero = int(numero)\n# print(numero)\n\nif (numero % 2 == 0):\n print(\"El numero\", bcolors.OKGREEN , numero , bcolors.ENDC ,bcolors.OKBLUE ,\"SI\" , bcolors.ENDC , \"es divisible por 2\")\nelse:\n print(\"El numero\", bcolors.OKGREEN, numero, bcolors.ENDC, bcolors.WARNING,\"NO\", bcolors.ENDC, \"es divisible por 2\")\n\nif (numero % 3 == 0):\n print(\"El numero\", bcolors.OKGREEN, numero, bcolors.ENDC, bcolors.OKBLUE, \"SI\", bcolors.ENDC, \"es divisible por 3\")\nelse:\n print(\"El numero\", bcolors.OKGREEN, numero, bcolors.ENDC, bcolors.WARNING, \"NO\", bcolors.ENDC, \"es divisible por 3\")\n\nif (numero % 5 == 0):\n print(\"El numero\", bcolors.OKGREEN, numero, bcolors.ENDC, bcolors.OKBLUE, \"SI\", bcolors.ENDC, \"es divisible por 5\")\nelse:\n print(\"El numero\", bcolors.OKGREEN, numero, bcolors.ENDC, bcolors.WARNING, \"NO\", bcolors.ENDC, \"es divisible por 5\")\n\nif (numero % 7 == 0):\n print(\"El numero\", bcolors.OKGREEN, numero, bcolors.ENDC, bcolors.OKBLUE, \"SI\", bcolors.ENDC, \"es divisible por 7\")\nelse:\n print(\"El numero\", bcolors.OKGREEN, numero, bcolors.ENDC, bcolors.WARNING, \"NO\", bcolors.ENDC, \"es divisible por 7\") \n\nif (numero % 11 == 0):\n print(\"El numero\", bcolors.OKGREEN, numero, bcolors.ENDC, bcolors.OKBLUE, \"SI\", bcolors.ENDC, \"es divisible por 11\")\nelse:\n print(\"El numero\", bcolors.OKGREEN, numero, bcolors.ENDC, bcolors.WARNING, \"NO\", bcolors.ENDC, \"es divisible por 11\")\n\n", "id": "5471356", "language": "Python", "matching_score": 3.546211004257202, "max_stars_count": 0, "path": "Python Codigos/TomasPython04.py" }, { "content": "class bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\npositivo = 0\nnegativo = 0\n\n\n\nprint(\"Escribe numeros (el 0 termina el programa)\")\n\nwhile True:\n number = int(input(bcolors.HEADER))\n print(bcolors.ENDC)\n if number == 0:\n break\n elif number >= 0:\n positivo = positivo + 1\n elif number <= 0:\n negativo = negativo + 1\n \n\nprint(bcolors.BOLD, \"Positivos: \", bcolors.ENDC, 'o' * positivo)\n\nprint(bcolors.BOLD, \"Negativo: \", bcolors.ENDC, 'o' * negativo)", "id": "6147718", "language": "Python", "matching_score": 0.5728912353515625, "max_stars_count": 0, "path": "Python Codigos/TomasPython11.py" }, { "content": "from math import *\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nprint(bcolors.HEADER, \"Introduce el valor del radio\")\nradio = eval(input(bcolors.OKBLUE))\nprint(bcolors.ENDC)\n\nprint(bcolors.HEADER, \"Introduce el valor de x centro\")\nxc = eval(input(bcolors.OKBLUE))\nprint(bcolors.ENDC)\n\nprint(bcolors.HEADER, \"Introduce el valor de y centro\")\nyc = eval(input(bcolors.OKBLUE))\nprint(bcolors.ENDC)\n\nprint(bcolors.HEADER, \"Introduce el valor de x\")\nx = eval(input(bcolors.OKBLUE))\nprint(bcolors.ENDC)\n\nprint(bcolors.HEADER, \"Introduce el valor de y\")\ny = eval(input(bcolors.OKBLUE))\nprint(bcolors.ENDC)\n\n# abs(distancia - radio) <= pow(10, -2)\n# distancia == radio\n\ndef esta_dentro(xc, yc, x, y, radio):\n distancia = sqrt(pow(x-xc, 2)+ pow(y-yc, 2))\n if distancia < radio:\n return \"dentro del circulo\"\n elif distancia == radio: # abs(distancia - radio) <= 10^(-d) siendo d el numero de digitos decimales\n return \"sobre el circulo\"\n else:\n return \"fuera del circulo\"\n \nwhile True:\n print(bcolors.OKBLUE, \"Esta\", esta_dentro(xc, yc, x, y, radio) , bcolors.ENDC, \"\\n\")\n print(\"Quieres introducir otro punto (y/n)\")\n sigo = input(bcolors.OKCYAN)\n print(bcolors.ENDC) \n if sigo == \"y\" or sigo == \"Y\":\n print(bcolors.HEADER, \"Introduce el valor de x\")\n x = eval(input(bcolors.OKBLUE))\n print(bcolors.ENDC)\n\n print(bcolors.HEADER, \"Introduce el valor de y\")\n x = eval(input(bcolors.OKBLUE))\n print(bcolors.ENDC)\n else:\n break\n\nprint(bcolors.ENDC)", "id": "1012548", "language": "Python", "matching_score": 2.1854159832000732, "max_stars_count": 0, "path": "Python Codigos/TomasPython15.py" }, { "content": "from math import *\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n \nprint(bcolors.HEADER, \"Introduce las cordenadas de A\")\nxa = eval(input(bcolors.OKBLUE))\nya = eval(input(bcolors.OKBLUE))\nprint(bcolors.ENDC)\n\nprint(bcolors.HEADER, \"Introduce las cordenadas de B\")\nxb = eval(input(bcolors.OKBLUE))\nyb = eval(input(bcolors.OKBLUE))\nprint(bcolors.ENDC)\n\nprint(bcolors.HEADER, \"Introduce las cordenadas de C\")\nxc = eval(input(bcolors.OKBLUE))\nyc = eval(input(bcolors.OKBLUE))\nprint(bcolors.ENDC)\n\n\ndef distancia(x1, y1, x2, y2):\n return sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2))\n\ndef maximos_minimos(a, b, c):\n maximo = max(a, b, c)\n if max == a:\n cat1 = b\n cat2 = c\n elif max == b:\n cat1 = a\n cat2 = c \n else:\n cat1 = a\n cat2 = b\n return maximo, cat1, cat2\n\ndef tipo_lados(a, b, c):\n if a+b+c == 180:\n return \"El tipo de triangulo segun sus lados es equilatero\"\n elif a == c and a != b and c != b:\n return \"El tipo de triangulo segun sus lados es isosceles\"\n else:\n return \"El tipo de triangulo segun sus lados es escaleno\"\n\ndef tipo_angulos(a, b, c):\n a2 = round(pow(a, 2), 6)\n b2 = round(pow(b, 2), 6)\n c2 = round(pow(c, 2), 6)\n if a2 > b2 + c2:\n return \"El tipo de triangulo segun sus angulos es Obtusangulo\"\n elif a2 < b2 + c2:\n return \"El tipo de triangulo segun sus angulos es Acutangulo\"\n else:\n return \"El tipo de triangulo segun sus angulos es Rectangulo\"\n\nAB = distancia(xa, ya, xb, yb)\nAC = distancia(xa, ya, xc, yc)\nBC = distancia(xb, yb, xc, yc)\n\nh, c1, c2 = maximos_minimos(AB, AC, BC)\n\nprint (bcolors.OKCYAN,tipo_lados(AB,AC,BC), bcolors.ENDC)\nprint (bcolors.OKBLUE,tipo_angulos(h, c1, c2), bcolors.ENDC)\n\nprint(bcolors.ENDC)", "id": "8770444", "language": "Python", "matching_score": 1.0454254150390625, "max_stars_count": 0, "path": "Python Codigos/TomasPython16.py" }, { "content": "import math\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nprint(\"Programa que calcula una ecuacion de segundo grado\\n\")\nprint(\"A continuacion se le pedira el valor de a, b y c\")\nprint(\"Introduce el valor de a\", bcolors.OKBLUE)\na = eval(input())\nprint(bcolors.ENDC,\"\\n\",\"Introduce el valor de b\", bcolors.OKCYAN)\nb = eval(input())\nprint(bcolors.ENDC,\"\\n\",\"Introduce el valor de c\", bcolors.OKCYAN)\nc = eval(input())\nprint(bcolors.ENDC)\n\n\nx1 = (b*b) - (4*a*c)\nprint(x1)\nif x1 < 0:\n print(bcolors.FAIL, \"No hay soluciones reales\", bcolors.ENDC)\nelif x1 == 0:\n x4 = -b/2*a\n print(\"La unica solucion es\", bcolors.BOLD, x4, bcolors.ENDC)\nelse:\n x3 = math.sqrt(x1)\n a1 = (-b + x3)/2*a \n a2 = (-b - x3)/2*a\n print(\"La primera solucion es\", bcolors.OKCYAN, a1, bcolors.ENDC)\n print(\"La segunda solucion es\", bcolors.OKBLUE, a2, bcolors.ENDC)\n \n", "id": "3553909", "language": "Python", "matching_score": 1.4182612895965576, "max_stars_count": 0, "path": "Python Codigos/TomasPython10.py" }, { "content": "import time\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nprint(\"Programa que calcula todos los numeros primos desde 2 hasta el numero dado\")\nprint(\"Escribe un numero\")\nk = eval(input(bcolors.OKBLUE))\ncontador = 0\nprint(bcolors.ENDC)\n\n\ndef esprimo(k):\n if k> 1:\n for n in range(2, k): # Define un rango desde 2 hasta k \n if (k % n) == 0: # Comprueba si k / n su resto da 0\n return False # No es primo\n return True # Es primo\n else:\n return False # No es primo\n\nfor n in range(2, k): # Define un rango desde 2 hasta k\n if esprimo(n) == True: # Si la funcion 'esprimo' da true entra en el bucle\n print(bcolors.HEADER, n, bcolors.ENDC, \"es primo\", bcolors.WARNING,\"(True)\", bcolors.ENDC) # Imprime en pantalla que n es primo\n contador = contador + 1 # Suma 1 al contador\n time.sleep(0.1) # Espera 0.1 segundos\n else:\n time.sleep(0) # Espera 0 segundos\n\nprint(bcolors.BOLD,\"en total hay\",bcolors.ENDC,bcolors.OKGREEN,contador,bcolors.ENDC,bcolors.BOLD,\"numeros que son primos desde el 2 hasta\",bcolors.OKCYAN,k,bcolors.ENDC)\n", "id": "9316191", "language": "Python", "matching_score": 1.7626559734344482, "max_stars_count": 0, "path": "Python Codigos/TomasPython09.py" }, { "content": "from random import randint\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\ndef aleatorio():\n print(bcolors.OKBLUE)\n print(\"Primer numero\")\n a = randint(1,100)\n print(a)\n print(\"Segundo el numero\")\n b = randint(1,100)\n print(b)\n print(bcolors.ENDC)\n\n\n suma = (a + b) \n resta = (a - b)\n multiplicacion = (a * b)\n division = (a / b)\n division_entera = (a // b)\n resto_division = (a % b)\n\n def texto():\n print (a, \"+\", b, \"= \", suma)\n print (a, \"-\", b, \"= \", resta)\n print (a, \"*\", b, \"= \", multiplicacion)\n print (a, \"/\", b, \"= \", division)\n print (a, \"//\", b, \"= \", division_entera)\n print (a, \"%\", b, \"= \", resto_division)\n def resultado():\n print(\"Operaciones disponibles con \", a, \"y\", b)\n\n resultado()\n texto()\n\n\n\ndef automatico():\n print(bcolors.OKBLUE)\n a = input(\" Escribe el primer numero (#aleatorio#) \\n\")\n if a == \"aleatorio\":\n a1 = randint(1,100)\n b = input(\"Escribe el segundo numero (#aleatorio#) \\n\")\n if b == \"aleatorio\":\n b2 = randint(1,100)\n if a and b != \"aleatorio\":\n a = eval(a)\n b = eval(b)\n print(bcolors.ENDC)\n\n suma = (a1 + b2) \n resta = (a1 - b2)\n multiplicacion = (a1 * b2)\n division = (a1 / b2)\n division_entera = (a1 // b2)\n resto_division = (a1 % b2)\n\n\n def texto():\n print (a, \"+\", b, \"= \", suma)\n print (a, \"-\", b, \"= \", resta)\n print (a, \"*\", b, \"= \", multiplicacion)\n print (a, \"/\", b, \"= \", division)\n print (a, \"//\", b, \"= \", division_entera)\n print (a, \"%\", b, \"= \", resto_division)\n def resultado():\n print(\"Operaciones disponibles con \", a, \"y\", b)\n\n resultado()\n texto()\n\n def texto():\n print (a, \"+\", b, \"= \", suma)\n print (a, \"-\", b, \"= \", resta)\n print (a, \"*\", b, \"= \", multiplicacion)\n print (a, \"/\", b, \"= \", division)\n print (a, \"//\", b, \"= \", division_entera)\n print (a, \"%\", b, \"= \", resto_division)\n def resultado():\n print(\"Operaciones disponibles con \", a, \"y\", b)\n\n resultado()\n texto()\n\ndef manual():\n print(\"Has elegido el modo Manual \", \"\\n Hay 6 opciones disponibles \", bcolors.OKBLUE, \"\\n suma , resta , multiplicacion , division , division_entera , resto_division\" , bcolors.ENDC, bcolors.WARNING, \"TIENES QUE ESCRIBIRLO TAL Y COMO ESTA \\n\", bcolors.ENDC )\n a = input(\"Elige operacion: \")\n print(bcolors.OKBLUE)\n n1 = input(\"Escribe el primer numero (#aleatorio#) \")\n #if n1 == \"aleatorio\":\n # s1 = randint(1, 100)\n n2 = input(\"Escribe el segundo numero (#aleatorio#) \")\n #if n2 == \"aleatorio\":\n #s2 = randint(1,100)\n #if n1 and n2 != \"aleatorio\":\n s1 = eval(n1)\n s2 = eval(n2)\n print(bcolors.ENDC)\n \n if a == \"suma\":\n print(s1, \"+\", s2, \"= \", s1 + s2)\n if a == \"resta\":\n print(s1, \"-\", s2, \"= \", s1 - s2)\n if a == \"multiplicacion\":\n print(s1, \"*\", s2, \"= \", s1 * s2)\n if a == \"division\":\n print(s1, \"/\", s2, \"= \", s1 / s2)\n if a == \"division_entera\":\n print(s1, \"//\", s2, \"= \", s1 // s2)\n if a == \"resto_division\":\n print(s1, \"%\", s2, \"= \", s1 % s2)\n\n\nprint(\"Elige el modo que quieres\", bcolors.BOLD, \"'Automatico, Manual, Aleatorio' \", bcolors.ENDC, \"\\n En el\", bcolors.BOLD, \"Modo Automatico\", bcolors.ENDC, \"se haran todas las operaciones \", \"\\n Mientras que en el\", bcolors.BOLD , \"Modo Manual\", bcolors.ENDC , \"tu eliges que operacion queires hacer\", \"\\n\", \"En el\", bcolors.BOLD, \"modo aleatorio\", bcolors.ENDC, \"el primer y segundo numero es un valor aleatorio entre 1,100\" )\nmodo = input(\"Elige el modo que quieras: \")\n\nif modo == \"automatico\":\n automatico()\nif modo == \"manual\":\n manual()\nif modo == \"aleatorio\":\n aleatorio()\n# else:\n# print (bcolors.FAIL, \"Lo siento, no he encontrado ese modo en mi base de datos\")\n\n\n# He puesto una clase para cambiar el color de las letras para que sea mas comodo a la vista\n\n\n", "id": "3463521", "language": "Python", "matching_score": 1.2711493968963623, "max_stars_count": 0, "path": "_site/TomasPython01.py" }, { "content": "class bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n \nlista = list()\nespacios = 0\nprint(bcolors.BOLD, \"Programa que calcula la longitud de un texto\", bcolors.ENDC)\nprint(\"Introduce el texto\")\ntexto = input(bcolors.OKCYAN)\nprint(bcolors.ENDC)\n# print(lista)\nfor i in texto: \n if i == \" \": \n espacios += 1 \n\nprint(\"El texto tiene\", bcolors.OKGREEN, espacios+1, bcolors.ENDC, \"palabras\")", "id": "12581752", "language": "Python", "matching_score": 1.469090461730957, "max_stars_count": 0, "path": "Python Codigos/TomasPython13.py" }, { "content": "class bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nprint(\"Programa que analiza una palabra\\n\")\nprint(\"Escribe una palabra: \")\npalabra = input(bcolors.OKBLUE)\nprint(bcolors.ENDC)\nlongitud = len(palabra[:])\n\nif longitud <= 5: \n print(\"La palabra\", bcolors.OKBLUE,palabra , bcolors.ENDC,\"tiene\", bcolors.OKCYAN, len(palabra[:5]), bcolors.ENDC, \"letras\")\n print(\"La primera letra es\", bcolors.OKGREEN, palabra[0], bcolors.ENDC)\n print(\"Las ultima letra es\", bcolors.OKGREEN, palabra[3:], bcolors.ENDC)\n\nif longitud > 5:\n print(\"La palabra\", bcolors.OKBLUE,palabra , bcolors.ENDC,\"tiene\", bcolors.OKCYAN, len(palabra[:10]), bcolors.ENDC, \"letras\")\n print(\"Las cuatro primeras letras son\", bcolors.OKGREEN, palabra[0:4], bcolors.ENDC)\n print(\"Las cuatros ultimas letras son\", bcolors.OKGREEN, palabra[-4:], bcolors.ENDC)\n", "id": "1534928", "language": "Python", "matching_score": 2.9721803665161133, "max_stars_count": 0, "path": "Python Codigos/TomasPython05.py" }, { "content": "class bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\nprint(\"Programa que analiza una palabra\")\nprint(\"Escribe la palabra \", bcolors.OKBLUE)\npalabra =input() # Informatica\nprint(bcolors.ENDC)\ncaracteres = len(palabra[:])\nprint(\"La palabra\", bcolors.OKBLUE,\"'\", palabra,\"'\", bcolors.ENDC, \"Tiene\", caracteres, \"Letras\", bcolors.ENDC)\nprint(\"La primera letra es\", bcolors.OKGREEN, palabra[0], bcolors.ENDC)\nprint(\"La ultima letra es\", bcolors.OKGREEN, palabra[-1], bcolors.ENDC)\nprint(\"Las dos primeras letras son\", bcolors.OKGREEN, palabra[0:2], bcolors.ENDC)\nprint(\"Las dos ultimas letras son\", bcolors.OKGREEN, palabra[9:], bcolors.ENDC)", "id": "1522937", "language": "Python", "matching_score": 0.03593788295984268, "max_stars_count": 0, "path": "_site/TomasPython02.py" }, { "content": "from re import A, T\nfrom sys import exit\nimport time\nimport pygame\nimport random\nimport os\nfrom arrays import *\n\ngreen = (124,252,0)\nwhite = (250,250,250)\nred = (255,0,0)\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nturno = 0\npuntuacion = 0\npygame.init()\nfont = pygame.font.SysFont(None, 70)\n\ntextAlignLeft = 0\ntextAlignRight = 1\ntextAlignCenter = 2\ntextAlignBlock = 3\n\n# light shade of the button\ncolor_light = (170,170,170)\n\n# dark shade of the button\ncolor_dark = (100,100,100)\n\n# white color\ncolor = (255,255,255)\n\n# add pygame.FULLSCREEN for fullscreen\nwindow = pygame.display.set_mode((480, 320))\n\"\"\"\nStart of the width and height variables\n\"\"\"\n# stores the width of the\n# screen into a variable\nwidth = 360 # window.get_width()\n\n# stores the height of the\n# screen into a variable\nheight = 500 # window.get_height()\n\"\"\"\nEnd of the width and height variables\n\"\"\"\n# stores the (x,y) coordinates into\n# the variable as a tuple\nmouse = pygame.mouse.get_pos()\n\nsmallfont = pygame.font.SysFont('Corbel',80)\n\n# rendering a text written in\n# this font\ntext = smallfont.render('Corregir' , True , color)\n\n\n\n# Don't touch it (works)\ndef drawText(surface, text, color, rect, font, align=textAlignLeft, aa=False, bkg=None):\n lineSpacing = -2\n spaceWidth, fontHeight = font.size(\" \")[0], font.size(\"Tg\")[1]\n\n listOfWords = text.split(\" \")\n if bkg:\n imageList = [font.render(word, 1, color, bkg) for word in listOfWords]\n for image in imageList: image.set_colorkey(bkg)\n else:\n imageList = [font.render(word, aa, color) for word in listOfWords]\n\n maxLen = rect[2]\n lineLenList = [0]\n lineList = [[]]\n for image in imageList:\n width = image.get_width()\n lineLen = lineLenList[-1] + len(lineList[-1]) * spaceWidth + width\n if len(lineList[-1]) == 0 or lineLen <= maxLen:\n lineLenList[-1] += width\n lineList[-1].append(image)\n else:\n lineLenList.append(width)\n lineList.append([image])\n\n lineBottom = rect[1]\n lastLine = 0\n for lineLen, lineImages in zip(lineLenList, lineList):\n lineLeft = rect[0]\n if align == textAlignRight:\n lineLeft += + rect[2] - lineLen - spaceWidth * (len(lineImages)-1)\n elif align == textAlignCenter:\n lineLeft += (rect[2] - lineLen - spaceWidth * (len(lineImages)-1)) // 2\n elif align == textAlignBlock and len(lineImages) > 1:\n spaceWidth = (rect[2] - lineLen) // (len(lineImages)-1)\n if lineBottom + fontHeight > rect[1] + rect[3]:\n break\n lastLine += 1\n for i, image in enumerate(lineImages):\n x, y = lineLeft + i*spaceWidth, lineBottom\n surface.blit(image, (round(x), y))\n lineLeft += image.get_width()\n lineBottom += fontHeight + lineSpacing\n\n if lastLine < len(lineList):\n drawWords = sum([len(lineList[i]) for i in range(lastLine)])\n remainingText = \"\"\n for text in listOfWords[drawWords:]: remainingText += text + \" \"\n return remainingText\n return \"\"\n\ndef botton_siguiente(indice):\n pygame.init()\n while True:\n for ev in pygame.event.get():\n\n if ev.type == pygame.QUIT:\n pygame.quit()\n\n #checks if a mouse is clicked\n if ev.type == pygame.MOUSEBUTTONDOWN:\n \n #if the mouse is clicked on the\n # button the game is terminated\n if width/2 <= mouse[0] <= width/2+140 and height/2 <= mouse[1] <= height/2+40:\n print(\"Boton pulsado\")\n # siguiente()\n\n # fills the screen with a color\n #window.fill((60,25,60))\n\n # stores the (x,y) coordinates into\n # the variable as a tuple\n mouse = pygame.mouse.get_pos()\n \n # if mouse is hovered on a button it\n # changes to lighter shade \n if width/2 <= mouse[0] <= width/2+140 and height/2 <= mouse[1] <= height/2+40:\n pygame.draw.rect(window,color_light,[width/2,height/2,140,40])\n else:\n pygame.draw.rect(window,color_dark,[width/2,height/2,140,40])\n # superimposing the text onto our button\n window.blit(text , (width/2+50,height/2))\n \n # updates the frames of the game\n pygame.display.update()\n\ndef boton(ventana,texto,x,y,width,height):\n # texto A\n texto = smallfont.render(texto,True, color)\n botonc = pygame.Rect(x,y,width,height)\n pygame.draw.rect(ventana,color_dark, botonc) # 10, 140, 250, 50\n # else:\n # pygame.draw.rect(window,color_dark,[10,140,250,50]) # left, top, width, height\n # superimposing the text onto our button\n window.blit(texto , (x+10,y+10))\n \n # updates the frames of the game\n pygame.display.flip()\n return botonc\n\n\npygame.display.set_caption('Gana y Juega')\norden_preguntas = random.sample(range(0,39), 20)\n\n\n\ntextRect = pygame.Rect(50, 50, 430, 270)\n\n# add pygame.FULLSCREEN for fullscreen\nwindow = pygame.display.set_mode((800, 600), pygame.FULLSCREEN)\n\nrun = True\n\nfor turno in range(0,19): # 0,40 default\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n break\n \n print(bcolors.WARNING,\"Orden de preguntas: \",orden_preguntas, bcolors.ENDC, \"\\n\")\n msg = preguntas[orden_preguntas[turno]] \n window.fill((255, 255, 255)) \n textRect = pygame.Rect(70, 10, 600, 250)\n pygame.draw.rect(window, (255, 255, 255), textRect, 1)\n drawTextRect = textRect.inflate(-5, -5)\n drawText(window, msg, (0, 0, 0), drawTextRect, font, textAlignCenter, True)\n \n print(\"Antes de botones \", orden_preguntas[turno])\n \n # opciones\n botona = boton(window,opcionesa[orden_preguntas[turno]],10, 300, 700, 100)\n \n print(bcolors.HEADER,turno, bcolors.ENDC) # print to serial text of ans\n \n print(bcolors.OKCYAN,opcionesa[orden_preguntas[turno]],bcolors.ENDC) # print to serial text of botona\n \n botonb = boton(window,opcionesb[orden_preguntas[turno]],10, 450, 700, 100)\n \n print(bcolors.OKBLUE,opcionesb[orden_preguntas[turno]],bcolors.ENDC) # print to serial text of botonb\n \n print(bcolors.HEADER,turno, bcolors.ENDC) # print to serial text of ans\n \n print(bcolors.HEADER,ans[orden_preguntas[turno]], bcolors.ENDC) # print to serial text of ans\n \n print(\"Despues de botones \",orden_preguntas[turno])\n \n # fin de opciones\n \"\"\"\n Dibujar los dos botones con las soluciones\n Entrar en un bucle comprobando los eventos\n si el raton esta pulsado\n coger la posicion del raton \n if (boton1.collide(x,y))\n respuesta a\n elif (boton2.collide(x,y)) \n respuesta b \n \"\"\"\n pulsacion = False\n while not pulsacion:\n for ev in pygame.event.get():\n\n if ev.type == pygame.QUIT:\n pygame.quit()\n \n if ev.type == pygame.MOUSEBUTTONDOWN:\n x,y = pygame.mouse.get_pos()\n if botona.collidepoint(x,y):\n pulsacion = True\n respuesta = \"A\"\n print(bcolors.UNDERLINE, \"Pulsado a\", bcolors.ENDC)\n elif botonb.collidepoint(x,y):\n pulsacion = True\n respuesta = \"B\"\n print(bcolors.UNDERLINE,\"Pulsado b\", bcolors.ENDC)\n # clean event queue \n pygame.event.clear()\n pygame.display.flip()\n if respuesta == \"A\" and ans[orden_preguntas[turno]] == 1:\n puntuacion = puntuacion +1\n # Green color\n window.fill(green)\n pygame.display.flip()\n time.sleep(1)\n # default color\n window.fill(white)\n pygame.display.flip()\n elif respuesta == \"B\" and ans[orden_preguntas[turno]] == 2:\n puntuacion = puntuacion +1\n # Green color\n window.fill(green)\n pygame.display.flip()\n time.sleep(1)\n # default color\n window.fill(white)\n else:\n print(bcolors.FAIL,\"Respuesta incorrecta\", bcolors.ENDC)\n puntuacion = puntuacion - 1\n # Red background\n window.fill(red)\n pygame.display.update()\n time.sleep(1)\n # default background\n window.fill(white)\n pygame.display.flip() # left, top, width, height\n # puntuacion\n \n \n\n# if event.key == 1073741882 or 282:\n# pygame.quit()\n# exit()\n\n\n#Limpiar la pantalla\n## Escribir contador de respuestas correcta\nboton(window,\"Tu puntuacion es de\", 0,100,800,100)\nboton(window,str(puntuacion),0,230,800,100)\n\njugardenuevo = boton(window,\"Jugar de nuevo\", 100,450,500,100)\n# if the boton is pulsed start again the program\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n x,y = pygame.mouse.get_pos()\n if jugardenuevo.collidepoint(x,y):\n print(\"Boton pulsado\")\n os.system('python \"main.py\"')\n pygame.quit()\n exit()\n else:\n print(\"Boton no pulsado\")\n pygame.quit()\n exit()\n\n\n\npygame.display.flip()\ntime.sleep(5)\npygame.quit()\nexit()\n\n", "id": "10844166", "language": "Python", "matching_score": 7.05513858795166, "max_stars_count": 0, "path": "python/main.py" }, { "content": "from re import A\nimport pygame\nimport random\nimport os\nfrom arrays import *\n\n\nturno = 0\npygame.init()\nfont = pygame.font.SysFont(None, 40)\n\ntextAlignLeft = 0\ntextAlignRight = 1\ntextAlignCenter = 2\ntextAlignBlock = 3\n\n# light shade of the button\ncolor_light = (170,170,170)\n\n# dark shade of the button\ncolor_dark = (100,100,100)\n\n# white color\ncolor = (255,255,255)\n\nsmallfont = pygame.font.SysFont('Corbel',35)\n\n\n# rendering a text written in\n# this font\ntext = smallfont.render('Corregir' , True , color)\n\ndef drawText(surface, text, color, rect, font, align=textAlignLeft, aa=False, bkg=None):\n lineSpacing = -2\n spaceWidth, fontHeight = font.size(\" \")[0], font.size(\"Tg\")[1]\n\n listOfWords = text.split(\" \")\n if bkg:\n imageList = [font.render(word, 1, color, bkg) for word in listOfWords]\n for image in imageList: image.set_colorkey(bkg)\n else:\n imageList = [font.render(word, aa, color) for word in listOfWords]\n\n maxLen = rect[2]\n lineLenList = [0]\n lineList = [[]]\n for image in imageList:\n width = image.get_width()\n lineLen = lineLenList[-1] + len(lineList[-1]) * spaceWidth + width\n if len(lineList[-1]) == 0 or lineLen <= maxLen:\n lineLenList[-1] += width\n lineList[-1].append(image)\n else:\n lineLenList.append(width)\n lineList.append([image])\n\n lineBottom = rect[1]\n lastLine = 0\n for lineLen, lineImages in zip(lineLenList, lineList):\n lineLeft = rect[0]\n if align == textAlignRight:\n lineLeft += + rect[2] - lineLen - spaceWidth * (len(lineImages)-1)\n elif align == textAlignCenter:\n lineLeft += (rect[2] - lineLen - spaceWidth * (len(lineImages)-1)) // 2\n elif align == textAlignBlock and len(lineImages) > 1:\n spaceWidth = (rect[2] - lineLen) // (len(lineImages)-1)\n if lineBottom + fontHeight > rect[1] + rect[3]:\n break\n lastLine += 1\n for i, image in enumerate(lineImages):\n x, y = lineLeft + i*spaceWidth, lineBottom\n surface.blit(image, (round(x), y))\n lineLeft += image.get_width()\n lineBottom += fontHeight + lineSpacing\n\n if lastLine < len(lineList):\n drawWords = sum([len(lineList[i]) for i in range(lastLine)])\n remainingText = \"\"\n for text in listOfWords[drawWords:]: remainingText += text + \" \"\n return remainingText\n return \"\"\n\ndef botton_siguiente():\n pygame.init()\n while True:\n for ev in pygame.event.get():\n\n if ev.type == pygame.QUIT:\n pygame.quit()\n\n #checks if a mouse is clicked\n if ev.type == pygame.MOUSEBUTTONDOWN:\n \n #if the mouse is clicked on the\n # button the game is terminated\n if width/2 <= mouse[0] <= width/2+140 and height/2 <= mouse[1] <= height/2+40:\n print(\"Boton pulsado\")\n # siguiente()\n\n # fills the screen with a color\n #window.fill((60,25,60))\n\n # stores the (x,y) coordinates into\n # the variable as a tuple\n mouse = pygame.mouse.get_pos()\n \n # if mouse is hovered on a button it\n # changes to lighter shade \n if width/2 <= mouse[0] <= width/2+140 and height/2 <= mouse[1] <= height/2+40:\n pygame.draw.rect(window,color_light,[width/2,height/2,140,40])\n else:\n pygame.draw.rect(window,color_dark,[width/2,height/2,140,40])\n # superimposing the text onto our button\n window.blit(text , (width/2+50,height/2))\n \n # updates the frames of the game\n pygame.display.update()\n\n# def correcta():\n\n\npygame.display.set_caption('Gana y Juega')\norden_preguntas = random.sample(range(0,20), 20)\n\ntextRect = pygame.Rect(50, 50, 430, 270)\n\n# add pygame.FULLSCREEN for fullscreen\nwindow = pygame.display.set_mode((480, 320))\n\n# stores the width of the\n# screen into a variable\nwidth = window.get_width()\n\n# stores the height of the\n# screen into a variable\nheight = window.get_height()\n\n# stores the (x,y) coordinates into\n# the variable as a tuple\nmouse = pygame.mouse.get_pos()\n\nrun = True\n\nfor turno in range(0,20):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n break\n\n msg = preguntas[orden_preguntas[turno]]\n window.fill((255, 255, 255))\n textRect = pygame.Rect(25, 50, 430, 100)\n pygame.draw.rect(window, (255, 255, 255), textRect, 1)\n drawTextRect = textRect.inflate(-5, -5)\n drawText(window, msg, (0, 0, 0), drawTextRect, font, textAlignCenter, True)\n # opciones\n \n textRect = pygame.Rect(50,160,150,100) # left, top, width, height\n drawTextRect = textRect.inflate(-5, -5)\n drawText(window, opcionesa[orden_preguntas[turno]], (0, 0, 0), drawTextRect, font, textAlignCenter, True)\n \n textRect = pygame.Rect(300,170,150,100)\n drawTextRect = textRect.inflate(-5, -5)\n \n drawText(window, opcionesb[orden_preguntas[turno]], (0, 0, 0), drawTextRect, font, textAlignCenter, True)\n pygame.display.flip()\n # fin de opciones\n #Esperar respuesta por bluetooth\n \n botton_siguiente()\n\n# if event.key == 1073741882 or 282:\n# pygame.quit()\n# exit()\n\n\n#Limpiar la pantalla\n## Escribir contador de respuestas correcta\npygame.quit()\nexit()", "id": "12683342", "language": "Python", "matching_score": 7.049841403961182, "max_stars_count": 0, "path": "python/antiguo.py" } ]
1.46909
matheusAparicio
[ { "content": "import PySimpleGUI as sg\n\nclass Layout:\n\n def __init__(self):\n # Define the window's contents.\n self.layout = [[sg.Text(\"Tecla alfanumérica 1\"), sg.Input(enable_events=True, key='key1', size=[5, 5])],\n [sg.Text(\"Duração (seg):\"), sg.Input(default_text=5, enable_events=True, key='duration1', size=[8,5]),\n sg.Text(\"Delay (seg):\"), sg.Input(default_text=0, enable_events=True, key='delay1', size=[5,5])],\n\n [sg.Text('')],\n\n [sg.Text(\"Tecla alfanumérica 2\"), sg.Input(enable_events=True, key='key2', size=[5, 5])],\n\n [sg.Text('')],\n\n [sg.Text(\"Mouse\"), sg.Combo(['nenhum', 'left', 'right'], default_value='nenhum', key='mouseButton')],\n [sg.Text(\"Duração (seg):\"), sg.Input(default_text=5, enable_events=True, key='duration3', size=[8, 5]),\n sg.Text(\"Delay (seg):\"), sg.Input(default_text=0, enable_events=True, key='delay3', size=[5, 5])],\n\n [sg.Text('')],\n\n [sg.Text(\"Atalho para iniciar o macro:\")],\n [sg.Text(\"Tecla 1:\"), sg.Input(default_text='h', enable_events=True, key='keyInit1', size=[5, 5])],\n\n [sg.Text('')],\n\n [sg.Text(\"Delay para iniciar o macro:\")],\n [sg.Text(\"Delay (seg):\"), sg.Input(default_text=0, enable_events=True, key='delayInit', size=[5, 5])],\n\n [sg.Text('')],\n\n [sg.Button('Aceitar definições', key='startMacro'), sg.Button('Fechar macro', key='closeMacro')]]\n\n # Create the window\n self.window = sg.Window('simpleMacro', self.layout, size=[400, 400])\n self.event, self.values = self.window.read()\n\n def run(self):\n # Display and interact with the Window using an Event Loop\n while True:\n event, values = self.window.read()\n\n if event == sg.WINDOW_CLOSED or event == 'closeMacro':\n break\n\n if len(values['key1']) > 1:\n self.window['key1'].Update(values['key1'][:-1])\n if len(values['key2']) > 1:\n self.window['key2'].Update(values['key2'][:-1])\n\n if len(values['keyInit1']) > 1:\n self.window['keyInit1'].Update(values['keyInit1'][:-1])\n\n if values['duration1'] == \"\":\n values['duration1'] = 1\n self.window['duration1'].Update(values['duration1'])\n\n if values['delay1'] == \"\":\n values['delay1'] = 0\n self.window['delay1'].Update(values['delay1'])\n\n if values['duration3'] == \"\":\n values['duration3'] = 1\n self.window['duration3'].Update(values['duration3'])\n if values['delay3'] == \"\":\n values['delay3'] = 0\n self.window['delay3'].Update(values['delay3'])\n\n if event == 'startMacro':\n try:\n if (values['key1'] != \"\" or values['key2'] != \"\" or values['mouseButton'] != 'nenhum') and values['keyInit1'] != '':\n break\n except:\n pass\n\n self.event, self.values = self.window.read()\n\n # Finish up by removing from the screen\n self.window.close()\n", "id": "10742767", "language": "Python", "matching_score": 1.904349684715271, "max_stars_count": 0, "path": "layout.py" }, { "content": "from macro import Macro\nfrom pynput.keyboard import Key, Listener\nimport time\nfrom layout import Layout\n\n\nclass Main:\n\n def __init__(self):\n self.macro = Macro()\n self.layout = Layout()\n self.timer = 0\n\n def startLayout(self):\n self.layout.run()\n\n def run(self):\n time.sleep(int(self.layout.values['delayInit']))\n while self.macro.macroAtivo:\n try:\n if self.layout.values['key2'] == \"\":\n self.macro.press_key(int(self.layout.values['duration1']), self.layout.values['key1'],\n int(self.layout.values['delay1']))\n elif self.layout.values['key1'] == \"\":\n self.macro.press_key(int(self.layout.values['duration1']), self.layout.values['key2'],\n int(self.layout.values['delay1']))\n else:\n self.macro.press_two_keys(int(self.layout.values['duration1']), self.layout.values['key1'],\n self.layout.values['key2'], int(self.layout.values['delay1']))\n\n if self.layout.values['mouseButton'] != 'nenhum':\n self.macro.click_mouse(int(self.layout.values['duration3']), self.layout.values['mouseButton'],\n int(self.layout.values['delay3']))\n\n break\n\n except:\n pass\n\n\nmain = Main()\nmain.startLayout()\n\n\ndef on_press(key):\n if 'char' in dir(key): # check if char method exists,\n if key.char == main.layout.values['keyInit1']:\n main.macro.macroAtivo = True\n return False\n\n\ndef on_release(key):\n pass\n\n\nwith Listener(\n on_press=on_press,\n on_release=on_release) as listener:\n listener.join()\n\nmain.run()\n\n\n\n\n", "id": "10356071", "language": "Python", "matching_score": 2.86194109916687, "max_stars_count": 0, "path": "main.py" }, { "content": "import pyautogui\nimport time\n\n\nclass Macro:\n\n def __init__(self):\n self.__macroAtivo = False\n\n def press_key(self, hold_time: float, key: str, delay=0):\n start = time.time()\n while time.time() - start < hold_time:\n pyautogui.press(key)\n time.sleep(delay)\n\n def press_two_keys(self, hold_time: float, key1: str, key2: str, delay=0):\n start = time.time()\n while time.time() - start < hold_time:\n pyautogui.press(key1)\n pyautogui.press(key2)\n time.sleep(delay)\n\n def click_mouse(self, hold_time: int, button='left', delay=0):\n start = time.time()\n while time.time() - start < hold_time:\n pyautogui.click(button=button)\n time.sleep(delay)\n", "id": "4650819", "language": "Python", "matching_score": 0.6522568464279175, "max_stars_count": 0, "path": "macro.py" } ]
1.90435
xuzheliang135
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2021 <NAME> <<EMAIL>>\n# All rights reserved.\n#\n# This code is licensed under the MIT License.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files(the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions :\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nimport re\nimport os\nimport six\nimport requests\nimport mock\nfrom json import dumps\nfrom random import choice\nfrom string import ascii_uppercase as str_alpha\nfrom string import digits as str_num\n\nfrom apprise import plugins\nfrom apprise import NotifyType\nfrom apprise import Apprise\nfrom apprise import AppriseAsset\nfrom apprise import AppriseAttachment\nfrom apprise.common import OverflowMode\n\n# Disable logging for a cleaner testing output\nimport logging\nlogging.disable(logging.CRITICAL)\n\n\nclass AppriseURLTester(object):\n\n # Some exception handling we'll use\n req_exceptions = (\n requests.ConnectionError(\n 0, 'requests.ConnectionError() not handled'),\n requests.RequestException(\n 0, 'requests.RequestException() not handled'),\n requests.HTTPError(\n 0, 'requests.HTTPError() not handled'),\n requests.ReadTimeout(\n 0, 'requests.ReadTimeout() not handled'),\n requests.TooManyRedirects(\n 0, 'requests.TooManyRedirects() not handled'),\n )\n\n # Attachment Testing Directory\n __test_var_dir = os.path.join(\n os.path.dirname(os.path.dirname(__file__)), 'var')\n\n # Our URLs we'll test against\n __tests = []\n\n # Define how many characters exist per line\n row = 80\n\n # Some variables we use to control the data we work with\n body_len = 1024\n title_len = 1024\n\n def __init__(self, tests=None, *args, **kwargs):\n \"\"\"\n Our initialization\n \"\"\"\n # Create a large body and title with random data\n self.body = ''.join(\n choice(str_alpha + str_num + ' ') for _ in range(self.body_len))\n self.body = '\\r\\n'.join(\n [self.body[i: i + self.row]\n for i in range(0, len(self.body), self.row)])\n\n # Create our title using random data\n self.title = ''.join(\n choice(str_alpha + str_num) for _ in range(self.title_len))\n\n if tests:\n self.__tests = tests\n\n def add(self, url, meta):\n \"\"\"\n Adds a test suite to our object\n \"\"\"\n self.__tests.append({\n 'url': url,\n 'meta': meta,\n })\n\n def run_all(self):\n \"\"\"\n Run all of our tests\n \"\"\"\n # iterate over our dictionary and test it out\n for (url, meta) in self.__tests:\n self.run(url, meta)\n\n @mock.patch('requests.get')\n @mock.patch('requests.post')\n def run(self, url, meta, mock_post, mock_get):\n \"\"\"\n Run a specific test\n \"\"\"\n # Disable Throttling to speed testing\n plugins.NotifyBase.request_rate_per_sec = 0\n\n # Our expected instance\n instance = meta.get('instance', None)\n\n # Our expected server objects\n _self = meta.get('self', None)\n\n # Our expected Query response (True, False, or exception type)\n response = meta.get('response', True)\n\n # Our expected privacy url\n # Don't set this if don't need to check it's value\n privacy_url = meta.get('privacy_url')\n\n # Our regular expression\n url_matches = meta.get('url_matches')\n\n # Allow us to force the server response code to be something other then\n # the defaults\n requests_response_code = meta.get(\n 'requests_response_code',\n requests.codes.ok if response else requests.codes.not_found,\n )\n\n # Allow us to force the server response text to be something other then\n # the defaults\n requests_response_text = meta.get('requests_response_text')\n if not isinstance(requests_response_text, six.string_types):\n # Convert to string\n requests_response_text = dumps(requests_response_text)\n\n # Whether or not we should include an image with our request; unless\n # otherwise specified, we assume that images are to be included\n include_image = meta.get('include_image', True)\n if include_image:\n # a default asset\n asset = AppriseAsset()\n\n else:\n # Disable images\n asset = AppriseAsset(image_path_mask=False, image_url_mask=False)\n asset.image_url_logo = None\n\n test_requests_exceptions = meta.get(\n 'test_requests_exceptions', False)\n\n # Mock our request object\n robj = mock.Mock()\n robj.content = u''\n mock_get.return_value = robj\n mock_post.return_value = robj\n\n if test_requests_exceptions is False:\n # Handle our default response\n mock_post.return_value.status_code = requests_response_code\n mock_get.return_value.status_code = requests_response_code\n\n # Handle our default text response\n mock_get.return_value.content = requests_response_text\n mock_post.return_value.content = requests_response_text\n mock_get.return_value.text = requests_response_text\n mock_post.return_value.text = requests_response_text\n\n # Ensure there is no side effect set\n mock_post.side_effect = None\n mock_get.side_effect = None\n\n else:\n # Handle exception testing; first we turn the boolean flag\n # into a list of exceptions\n test_requests_exceptions = self.req_exceptions\n\n try:\n # We can now instantiate our object:\n obj = Apprise.instantiate(\n url, asset=asset, suppress_exceptions=False)\n\n except Exception as e:\n # Handle our exception\n if instance is None:\n print('%s %s' % (url, str(e)))\n raise e\n\n if not isinstance(e, instance):\n print('%s %s' % (url, str(e)))\n raise e\n\n # We're okay if we get here\n return\n\n if obj is None:\n if instance is not None:\n # We're done (assuming this is what we were\n # expecting)\n print(\"{} didn't instantiate itself \"\n \"(we expected it to be a {})\".format(\n url, instance))\n assert False\n # We're done because we got the results we expected\n return\n\n if instance is None:\n # Expected None but didn't get it\n print('%s instantiated %s (but expected None)' % (\n url, str(obj)))\n assert False\n\n if not isinstance(obj, instance):\n print('%s instantiated %s (but expected %s)' % (\n url, type(instance), str(obj)))\n assert False\n\n if isinstance(obj, plugins.NotifyBase):\n # We loaded okay; now lets make sure we can reverse\n # this url\n assert isinstance(obj.url(), six.string_types) is True\n\n # Test url() with privacy=True\n assert isinstance(\n obj.url(privacy=True), six.string_types) is True\n\n # Some Simple Invalid Instance Testing\n assert instance.parse_url(None) is None\n assert instance.parse_url(object) is None\n assert instance.parse_url(42) is None\n\n if privacy_url:\n # Assess that our privacy url is as expected\n if not obj.url(privacy=True).startswith(privacy_url):\n raise AssertionError(\n \"Privacy URL: '{}' != expected '{}'\".format(\n obj.url(privacy=True)[:len(privacy_url)],\n privacy_url))\n\n if url_matches:\n # Assess that our URL matches a set regex\n assert re.search(url_matches, obj.url())\n\n # Instantiate the exact same object again using the URL\n # from the one that was already created properly\n obj_cmp = Apprise.instantiate(obj.url())\n\n # Our object should be the same instance as what we had\n # originally expected above.\n if not isinstance(obj_cmp, plugins.NotifyBase):\n # Assert messages are hard to trace back with the\n # way these tests work. Just printing before\n # throwing our assertion failure makes things\n # easier to debug later on\n print('TEST FAIL: {} regenerated as {}'.format(\n url, obj.url()))\n assert False\n\n # Tidy our object\n del obj_cmp\n\n if _self:\n # Iterate over our expected entries inside of our\n # object\n for key, val in self.items():\n # Test that our object has the desired key\n assert hasattr(key, obj) is True\n assert getattr(key, obj) == val\n\n try:\n self.__notify(url, obj, meta, asset)\n\n except AssertionError:\n # Don't mess with these entries\n print('%s AssertionError' % url)\n raise\n\n # Tidy our object and allow any possible defined deconstructors to\n # be executed.\n del obj\n\n @mock.patch('requests.get')\n @mock.patch('requests.post')\n def __notify(self, url, obj, meta, asset, mock_post, mock_get):\n \"\"\"\n Perform notification testing against object specified\n \"\"\"\n #\n # Prepare our options\n #\n\n # Allow notification type override, otherwise default to INFO\n notify_type = meta.get('notify_type', NotifyType.INFO)\n\n # Whether or not we're testing exceptions or not\n test_requests_exceptions = meta.get('test_requests_exceptions', False)\n\n # Our expected Query response (True, False, or exception type)\n response = meta.get('response', True)\n\n # Our expected Notify response (True or False)\n notify_response = meta.get('notify_response', response)\n\n # Our expected Notify Attachment response (True or False)\n attach_response = meta.get('attach_response', notify_response)\n\n # Test attachments\n # Don't set this if don't need to check it's value\n check_attachments = meta.get('check_attachments', True)\n\n # Allow us to force the server response code to be something other then\n # the defaults\n requests_response_code = meta.get(\n 'requests_response_code',\n requests.codes.ok if response else requests.codes.not_found,\n )\n\n # Allow us to force the server response text to be something other then\n # the defaults\n requests_response_text = meta.get('requests_response_text')\n if not isinstance(requests_response_text, six.string_types):\n # Convert to string\n requests_response_text = dumps(requests_response_text)\n\n # A request\n robj = mock.Mock()\n robj.content = u''\n mock_get.return_value = robj\n mock_post.return_value = robj\n\n if test_requests_exceptions is False:\n # Handle our default response\n mock_post.return_value.status_code = requests_response_code\n mock_get.return_value.status_code = requests_response_code\n\n # Handle our default text response\n mock_get.return_value.content = requests_response_text\n mock_post.return_value.content = requests_response_text\n mock_get.return_value.text = requests_response_text\n mock_post.return_value.text = requests_response_text\n\n # Ensure there is no side effect set\n mock_post.side_effect = None\n mock_get.side_effect = None\n\n else:\n # Handle exception testing; first we turn the boolean flag\n # into a list of exceptions\n test_requests_exceptions = self.req_exceptions\n\n try:\n if test_requests_exceptions is False:\n # Disable throttling\n obj.request_rate_per_sec = 0\n\n # check that we're as expected\n assert obj.notify(\n body=self.body, title=self.title,\n notify_type=notify_type) == notify_response\n\n # check that this doesn't change using different overflow\n # methods\n assert obj.notify(\n body=self.body, title=self.title,\n notify_type=notify_type,\n overflow=OverflowMode.UPSTREAM) == notify_response\n assert obj.notify(\n body=self.body, title=self.title,\n notify_type=notify_type,\n overflow=OverflowMode.TRUNCATE) == notify_response\n assert obj.notify(\n body=self.body, title=self.title,\n notify_type=notify_type,\n overflow=OverflowMode.SPLIT) == notify_response\n\n #\n # Handle varations of the Asset Object missing fields\n #\n\n # First make a backup\n app_id = asset.app_id\n app_desc = asset.app_desc\n\n # now clear records\n asset.app_id = None\n asset.app_desc = None\n\n # Notify should still work\n assert obj.notify(\n body=self.body, title=self.title,\n notify_type=notify_type) == notify_response\n\n # App ID only\n asset.app_id = app_id\n asset.app_desc = None\n\n # Notify should still work\n assert obj.notify(\n body=self.body, title=self.title,\n notify_type=notify_type) == notify_response\n\n # App Desc only\n asset.app_id = None\n asset.app_desc = app_desc\n\n # Notify should still work\n assert obj.notify(\n body=self.body, title=self.title,\n notify_type=notify_type) == notify_response\n\n # Restore\n asset.app_id = app_id\n asset.app_desc = app_desc\n\n if check_attachments:\n # Test single attachment support; even if the service\n # doesn't support attachments, it should still\n # gracefully ignore the data\n attach = os.path.join(\n self.__test_var_dir, 'apprise-test.gif')\n assert obj.notify(\n body=self.body, title=self.title,\n notify_type=notify_type,\n attach=attach) == attach_response\n\n # Same results should apply to a list of attachments\n attach = AppriseAttachment((\n os.path.join(self.__test_var_dir, 'apprise-test.gif'),\n os.path.join(self.__test_var_dir, 'apprise-test.png'),\n os.path.join(self.__test_var_dir, 'apprise-test.jpeg'),\n ))\n assert obj.notify(\n body=self.body, title=self.title,\n notify_type=notify_type,\n attach=attach) == attach_response\n\n else:\n # Disable throttling\n obj.request_rate_per_sec = 0\n\n for _exception in self.req_exceptions:\n mock_post.side_effect = _exception\n mock_get.side_effect = _exception\n\n try:\n assert obj.notify(\n body=self.body, title=self.title,\n notify_type=NotifyType.INFO) is False\n\n except AssertionError:\n # Don't mess with these entries\n raise\n\n except Exception:\n # We can't handle this exception type\n raise\n\n except AssertionError:\n # Don't mess with these entries\n raise\n\n except Exception as e:\n # Check that we were expecting this exception to happen\n try:\n if not isinstance(e, response):\n raise e\n\n except TypeError:\n print('%s Unhandled response %s' % (url, type(e)))\n raise e\n\n #\n # Do the test again but without a title defined\n #\n try:\n if test_requests_exceptions is False:\n # check that we're as expected\n assert obj.notify(body='body', notify_type=notify_type) \\\n == notify_response\n\n else:\n for _exception in self.req_exceptions:\n mock_post.side_effect = _exception\n mock_get.side_effect = _exception\n\n try:\n assert obj.notify(\n body=self.body,\n notify_type=NotifyType.INFO) is False\n\n except AssertionError:\n # Don't mess with these entries\n raise\n\n except Exception:\n # We can't handle this exception type\n raise\n\n except AssertionError:\n # Don't mess with these entries\n raise\n\n except Exception as e:\n # Check that we were expecting this exception to happen\n if not isinstance(e, response):\n raise e\n\n return True\n", "id": "7726478", "language": "Python", "matching_score": 5.514811992645264, "max_stars_count": 0, "path": "test/helpers/rest.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2020 <NAME> <<EMAIL>>\n# All rights reserved.\n#\n# This code is licensed under the MIT License.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files(the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions :\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport platform\nimport subprocess\nimport os\n\nfrom .NotifyBase import NotifyBase\nfrom ..common import NotifyImageSize\nfrom ..common import NotifyType\nfrom ..utils import parse_bool\nfrom ..AppriseLocale import gettext_lazy as _\n\n# Default our global support flag\nNOTIFY_MACOSX_SUPPORT_ENABLED = False\n\nif platform.system() == 'Darwin':\n # Check this is Mac OS X 10.8, or higher\n major, minor = platform.mac_ver()[0].split('.')[:2]\n\n # Toggle our enabled flag if verion is correct and executable\n # found. This is done in such a way to provide verbosity to the\n # end user so they know why it may or may not work for them.\n NOTIFY_MACOSX_SUPPORT_ENABLED = \\\n (int(major) > 10 or (int(major) == 10 and int(minor) >= 8))\n\n\nclass NotifyMacOSX(NotifyBase):\n \"\"\"\n A wrapper for the MacOS X terminal-notifier tool\n\n Source: https://github.com/julienXX/terminal-notifier\n \"\"\"\n\n # Set our global enabled flag\n enabled = NOTIFY_MACOSX_SUPPORT_ENABLED\n\n requirements = {\n # Define our required packaging in order to work\n 'details': _(\n 'Only works with Mac OS X 10.8 and higher. Additionally '\n ' requires that /usr/local/bin/terminal-notifier is locally '\n 'accessible.')\n }\n\n # The default descriptive name associated with the Notification\n service_name = _('MacOSX Notification')\n\n # The services URL\n service_url = 'https://github.com/julienXX/terminal-notifier'\n\n # The default protocol\n protocol = 'macosx'\n\n # A URL that takes you to the setup/help of the specific protocol\n setup_url = 'https://github.com/caronc/apprise/wiki/Notify_macosx'\n\n # Allows the user to specify the NotifyImageSize object\n image_size = NotifyImageSize.XY_128\n\n # Disable throttle rate for MacOSX requests since they are normally\n # local anyway\n request_rate_per_sec = 0\n\n # Limit results to just the first 10 line otherwise there is just to much\n # content to display\n body_max_line_count = 10\n\n # The path to the terminal-notifier\n notify_path = '/usr/local/bin/terminal-notifier'\n\n # Define object templates\n templates = (\n '{schema}://',\n )\n\n # Define our template arguments\n template_args = dict(NotifyBase.template_args, **{\n 'image': {\n 'name': _('Include Image'),\n 'type': 'bool',\n 'default': True,\n 'map_to': 'include_image',\n },\n # Play the NAME sound when the notification appears.\n # Sound names are listed in Sound Preferences.\n # Use 'default' for the default sound.\n 'sound': {\n 'name': _('Sound'),\n 'type': 'string',\n },\n })\n\n def __init__(self, sound=None, include_image=True, **kwargs):\n \"\"\"\n Initialize MacOSX Object\n \"\"\"\n\n super(NotifyMacOSX, self).__init__(**kwargs)\n\n # Track whether or not we want to send an image with our notification\n # or not.\n self.include_image = include_image\n\n # Set sound object (no q/a for now)\n self.sound = sound\n return\n\n def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):\n \"\"\"\n Perform MacOSX Notification\n \"\"\"\n\n if not os.access(self.notify_path, os.X_OK):\n self.logger.warning(\n \"MacOSX Notifications require '{}' to be in place.\"\n .format(self.notify_path))\n return False\n\n # Start with our notification path\n cmd = [\n self.notify_path,\n '-message', body,\n ]\n\n # Title is an optional switch\n if title:\n cmd.extend(['-title', title])\n\n # The sound to play\n if self.sound:\n cmd.extend(['-sound', self.sound])\n\n # Support any defined images if set\n image_path = None if not self.include_image \\\n else self.image_url(notify_type)\n if image_path:\n cmd.extend(['-appIcon', image_path])\n\n # Always call throttle before any remote server i/o is made\n self.throttle()\n\n # Capture some output for helpful debugging later on\n self.logger.debug('MacOSX CMD: {}'.format(' '.join(cmd)))\n\n # Send our notification\n output = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n # Wait for process to complete\n output.wait()\n\n if output.returncode:\n self.logger.warning('Failed to send MacOSX notification.')\n self.logger.exception('MacOSX Exception')\n return False\n\n self.logger.info('Sent MacOSX notification.')\n return True\n\n def url(self, privacy=False, *args, **kwargs):\n \"\"\"\n Returns the URL built dynamically based on specified arguments.\n \"\"\"\n\n # Define any URL parametrs\n params = {\n 'image': 'yes' if self.include_image else 'no',\n }\n\n # Extend our parameters\n params.update(self.url_parameters(privacy=privacy, *args, **kwargs))\n\n if self.sound:\n # Store our sound\n params['sound'] = self.sound\n\n return '{schema}://_/?{params}'.format(\n schema=self.protocol,\n params=NotifyMacOSX.urlencode(params),\n )\n\n @staticmethod\n def parse_url(url):\n \"\"\"\n There are no parameters nessisary for this protocol; simply having\n gnome:// is all you need. This function just makes sure that\n is in place.\n\n \"\"\"\n\n results = NotifyBase.parse_url(url, verify_host=False)\n\n # Include images with our message\n results['include_image'] = \\\n parse_bool(results['qsd'].get('image', True))\n\n # Support 'sound'\n if 'sound' in results['qsd'] and len(results['qsd']['sound']):\n results['sound'] = NotifyMacOSX.unquote(results['qsd']['sound'])\n\n return results\n", "id": "10616014", "language": "Python", "matching_score": 4.237544059753418, "max_stars_count": 0, "path": "apprise/plugins/NotifyMacOSX.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2020 <NAME> <<EMAIL>>\n# All rights reserved.\n#\n# This code is licensed under the MIT License.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files(the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions :\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport os\nimport six\nimport sys\nimport mock\n\nimport apprise\n\ntry:\n # Python v3.4+\n from importlib import reload\nexcept ImportError:\n try:\n # Python v3.0-v3.3\n from imp import reload\n except ImportError:\n # Python v2.7\n pass\n\n# Disable logging for a cleaner testing output\nimport logging\nlogging.disable(logging.CRITICAL)\n\n\n@mock.patch('subprocess.Popen')\n@mock.patch('platform.system')\n@mock.patch('platform.mac_ver')\ndef test_plugin_macosx_general(mock_macver, mock_system, mock_popen, tmpdir):\n \"\"\"\n NotifyMacOSX() General Checks\n\n \"\"\"\n\n # Create a temporary binary file we can reference\n script = tmpdir.join(\"terminal-notifier\")\n script.write('')\n # Give execute bit\n os.chmod(str(script), 0o755)\n mock_cmd_response = mock.Mock()\n\n # Set a successful response\n mock_cmd_response.returncode = 0\n\n # Simulate a Mac Environment\n mock_system.return_value = 'Darwin'\n mock_macver.return_value = ('10.8', ('', '', ''), '')\n mock_popen.return_value = mock_cmd_response\n\n # Ensure our enviroment is loaded with this configuration\n reload(sys.modules['apprise.plugins.NotifyMacOSX'])\n reload(sys.modules['apprise.plugins'])\n reload(sys.modules['apprise.Apprise'])\n reload(sys.modules['apprise'])\n\n # Point our object to our new temporary existing file\n apprise.plugins.NotifyMacOSX.notify_path = str(script)\n\n obj = apprise.Apprise.instantiate(\n 'macosx://_/?image=True', suppress_exceptions=False)\n assert isinstance(obj, apprise.plugins.NotifyMacOSX) is True\n\n # Test url() call\n assert isinstance(obj.url(), six.string_types) is True\n\n # test notifications\n assert obj.notify(title='title', body='body',\n notify_type=apprise.NotifyType.INFO) is True\n\n # test notification without a title\n assert obj.notify(title='', body='body',\n notify_type=apprise.NotifyType.INFO) is True\n\n obj = apprise.Apprise.instantiate(\n 'macosx://_/?image=True', suppress_exceptions=False)\n assert isinstance(obj, apprise.plugins.NotifyMacOSX) is True\n assert obj.notify(title='title', body='body',\n notify_type=apprise.NotifyType.INFO) is True\n\n obj = apprise.Apprise.instantiate(\n 'macosx://_/?image=False', suppress_exceptions=False)\n assert isinstance(obj, apprise.plugins.NotifyMacOSX) is True\n assert isinstance(obj.url(), six.string_types) is True\n assert obj.notify(title='title', body='body',\n notify_type=apprise.NotifyType.INFO) is True\n\n # Test Sound\n obj = apprise.Apprise.instantiate(\n 'macosx://_/?sound=default', suppress_exceptions=False)\n assert isinstance(obj, apprise.plugins.NotifyMacOSX) is True\n assert obj.sound == 'default'\n assert isinstance(obj.url(), six.string_types) is True\n assert obj.notify(title='title', body='body',\n notify_type=apprise.NotifyType.INFO) is True\n\n # If our binary is inacccessible (or not executable), we can\n # no longer send our notifications\n os.chmod(str(script), 0o644)\n assert obj.notify(title='title', body='body',\n notify_type=apprise.NotifyType.INFO) is False\n\n # Restore permission\n os.chmod(str(script), 0o755)\n\n # But now let's disrupt the path location\n obj.notify_path = 'invalid_missing-file'\n assert not os.path.isfile(obj.notify_path)\n assert obj.notify(title='title', body='body',\n notify_type=apprise.NotifyType.INFO) is False\n\n # Test cases where the script just flat out fails\n mock_cmd_response.returncode = 1\n obj = apprise.Apprise.instantiate(\n 'macosx://', suppress_exceptions=False)\n assert isinstance(obj, apprise.plugins.NotifyMacOSX) is True\n assert obj.notify(title='title', body='body',\n notify_type=apprise.NotifyType.INFO) is False\n\n # Restore script return value\n mock_cmd_response.returncode = 0\n\n # Test case where we simply aren't on a mac\n mock_system.return_value = 'Linux'\n reload(sys.modules['apprise.plugins.NotifyMacOSX'])\n reload(sys.modules['apprise.plugins'])\n reload(sys.modules['apprise.Apprise'])\n reload(sys.modules['apprise'])\n\n # Point our object to our new temporary existing file\n apprise.plugins.NotifyMacOSX.notify_path = str(script)\n\n # Our object is disabled\n obj = apprise.Apprise.instantiate(\n 'macosx://_/?sound=default', suppress_exceptions=False)\n assert obj is None\n\n # Restore mac environment\n mock_system.return_value = 'Darwin'\n\n # Now we must be Mac OS v10.8 or higher...\n mock_macver.return_value = ('10.7', ('', '', ''), '')\n reload(sys.modules['apprise.plugins.NotifyMacOSX'])\n reload(sys.modules['apprise.plugins'])\n reload(sys.modules['apprise.Apprise'])\n reload(sys.modules['apprise'])\n\n # Point our object to our new temporary existing file\n apprise.plugins.NotifyMacOSX.notify_path = str(script)\n\n obj = apprise.Apprise.instantiate(\n 'macosx://_/?sound=default', suppress_exceptions=False)\n assert obj is None\n\n # A newer environment to test edge case where this is tested\n mock_macver.return_value = ('9.12', ('', '', ''), '')\n reload(sys.modules['apprise.plugins.NotifyMacOSX'])\n reload(sys.modules['apprise.plugins'])\n reload(sys.modules['apprise.Apprise'])\n reload(sys.modules['apprise'])\n\n # Point our object to our new temporary existing file\n apprise.plugins.NotifyMacOSX.notify_path = str(script)\n\n # This is just to test that the the minor (in this case .12)\n # is only weighed with respect to the major number as wel\n # with respect to the versioning\n obj = apprise.Apprise.instantiate(\n 'macosx://_/?sound=default', suppress_exceptions=False)\n assert obj is None\n", "id": "4325120", "language": "Python", "matching_score": 0.4230988025665283, "max_stars_count": 0, "path": "test/test_plugin_macosx.py" }, { "content": "# -*_ coding: utf-8 -*-\n\"\"\"mimecat - Easy catalogue of MIME types and extensions.\n\"\"\"\n#\n# taken from mimetypes.py\n#\nfrom pathlib import Path\n_KNOWNFILES = [\n \"/etc/mime.types\",\n \"/etc/httpd/mime.types\", # Mac OS X\n \"/etc/httpd/conf/mime.types\", # Apache\n \"/etc/apache/mime.types\", # Apache 1\n \"/etc/apache2/mime.types\", # Apache 2\n \"/usr/local/etc/httpd/conf/mime.types\",\n \"/usr/local/lib/netscape/mime.types\",\n \"/usr/local/etc/httpd/conf/mime.types\", # Apache 1.2\n \"/usr/local/etc/mime.types\", # Apache 1.3\n Path(__file__).parent/\"mime.types\", # default\n ]\n\n\nclass Catalogue(object):\n \"\"\"A Catalogue object represents a list of known MIME types and\n extensions. It can be initialized with a given filename or list of\n filenames. The files are expected to be in the format of a mime.types\n file.\n\n This class does not know about, care about, or possess the ability to\n process, parameters after the initial MIME type. For example,\n \"text/plain; charset=us-ascii.\"\n\n \"\"\"\n\n def __init__(self, filenames = None, filep = None):\n \"\"\"Initializes this catalogue from the filename or filenames in\n ``filenames`` or from the file or files in ``filep``\n\n If ``filenames`` and ``filep`` are None, then a list of common\n locations is tried to find ``mime.types`` when one is found, the MIME\n type definitions are loaded and the object is finished initializing. If\n none of the filenames can be found, IOError will be raised.\n\n If ``filenames`` is a list, then _all_ the files listed will be\n loaded. If ``filenames`` is a string, then the named file will be\n loaded. If none of the files can be found, IOError will be raised.\n\n If ``filep`` is not None, then that file-like object will be\n read. Note: Files will *not* be closed after reading. It is the\n caller's responsibility.\n\n If both ``filenames`` and ``filep`` are specified, the ``filep``\n is loaded first, followed by ``filenames``\n\n :param filenames: a filename or a list of filenames\n containing MIMEtype definitions in the style of mime.types\n :param filep: a file-like object to read definitions from.\n\n :raises: IOError If unable to find any of the files.\n\n \"\"\"\n self._types_to_exts = None\n self._exts_to_types = None\n self._known_mediatypes = None\n self._known_mimetypes = None\n self._known_extensions = None\n\n self.clear()\n\n if filenames is None and filep is None:\n self.load_filenames(_KNOWNFILES, True)\n else:\n if filep is not None:\n self.load_file(filep)\n\n if filenames is not None:\n if isinstance(filenames, (str, unicode)):\n filenames = [filenames]\n self.load_filenames(filenames)\n\n def clear(self):\n \"\"\"Clears out catalogue of known types.\n \"\"\"\n self._types_to_exts = {}\n self._exts_to_types = {}\n self._known_mediatypes = set()\n self._known_mimetypes = set()\n self._known_extensions = set()\n\n def load_filenames(self, filenames, stop_on_successful_load = False):\n \"\"\"Loads in MIME type defitions from ``filenames`` If\n ``stop_on_successful_load`` is True, then will stop on the first\n successful loading, else it will load all the files listed.\n\n :param filenames: List of files that could potentially contain\n MIME type defitions.\n\n :param stop_on_successful_load: If False, then load all the files.\n\n :raises: IOError If None of the listed files can be loaded.\n\n \"\"\"\n successful_load = False\n for filename in filenames:\n try:\n self.load_filename(filename)\n successful_load = True\n if stop_on_successful_load:\n break\n except IOError:\n pass\n\n if not successful_load:\n raise IOError(\"Could not locate a suitable mime.types file.\")\n\n def load_filename(self, filename):\n \"\"\"Loads in MIME type definitions from ``filename``.\n\n :param filename: The filename to load into the class\n \"\"\"\n with open(filename, \"r\") as filep:\n self.load_file(filep)\n\n def load_file(self, filep):\n \"\"\"Loads in MIME type definitions from open ``filep``\n :param filep: The file to load into the class\n \"\"\"\n for (mime_type, extensions) in _parse_file(filep):\n self.add_type(mime_type, extensions)\n\n @property\n def known_mediatypes(self):\n \"\"\"Returns the set of known media types (mediatype/subtype)\n\n :returns: frozen set of media types\n \"\"\"\n return frozenset(self._known_mediatypes)\n\n @property\n def known_mimetypes(self):\n \"\"\"Returns the set of known mimetypes.\n\n :returns: frozen set of mimetypes\n \"\"\"\n return frozenset(self._known_mimetypes)\n\n @property\n def known_extensions(self):\n \"\"\"Returns the set of known extensions.\n\n :returns: frozen set of extensions\n \"\"\"\n return frozenset(self._known_extensions)\n\n def get_extensions(self, typename):\n \"\"\"Returns an ordered list of known extensions to the given MIME type.\n Order is determined by the order in which the extensions were\n listed in the ``mime.types`` file. First extension encountered,\n then second, and so forth.\n\n :param typename: String of the MIME type.\n :returns: List of known extensions. These will include a leading .\n :raises: KeyError If MIME type is unknown.\n\n \"\"\"\n return self._types_to_exts[typename]\n\n def get_types(self, extension):\n \"\"\"Returns an ordered list of known MIME types for the given extension.\n Order is determined by the order in which the MIME types were\n added in the ``mime.types`` file.\n\n :param extension: String of the extension. This can include the\n leading . or omit it.\n :returns: List of known MIME types that use the given extension.\n :raises: KeyError If the extension is unknown.\n\n \"\"\"\n return self._exts_to_types[_canonicalize_extension(extension)]\n\n def add_type(self, typename, extensions):\n \"\"\"Adds a new entry for ``typename`` for the given list of\n ``extensions.`` If ``typename`` is already registered, then\n appends list of extensions to existing entry.\n\n :param typename: The MIME type to add.\n\n :param extensions: String of extension or list of extensions to\n add. This can include the leading . or omit it.\n\n :raises: ValueError If ``typename`` is not of the format type/subtype\n\n \"\"\"\n (mediatype, _) = typename.split(\"/\")\n\n if isinstance(extensions, str):\n extensions = [extensions]\n\n self._known_mediatypes |= set([mediatype])\n self._known_mimetypes |= set([typename])\n self._known_extensions |= set(_canonicalize_extension(ext) \\\n for ext in extensions)\n\n if typename not in self._types_to_exts:\n self._types_to_exts[typename] = []\n\n existing_exts = self._types_to_exts[typename]\n for ext in extensions:\n ext = _canonicalize_extension(ext)\n if ext not in existing_exts:\n existing_exts.append(ext)\n\n if ext not in self._exts_to_types:\n self._exts_to_types[ext] = []\n existing_types = self._exts_to_types[ext]\n\n if typename not in existing_types:\n existing_types.append(typename)\n\ndef _parse_file(filep):\n \"\"\"Returns a generator which yields parsed lines from a ``mime.types``\n file.\n\n :param filep: A file-like object opened for reading\n :yields: A tuple containing the mime_type and associated extensions.\n \"\"\"\n for line in filep:\n parsed_line = _parse_line(line)\n if parsed_line is None:\n continue\n yield parsed_line\n\ndef _parse_line(line):\n \"\"\"Parses a line from ``mime.types``\n\n :param line: The line to parse.\n :returns: Tuple with mimetype and a list of extensions. If line is blank,\n return None\n :raises: ValueError If mimetype is invalid (not type/subtype)\n \"\"\"\n if \"#\" in line:\n line = line[:line.find(\"#\")]\n\n parts = line.split()\n\n if not parts:\n return None\n\n mimetype = parts[0]\n\n mimetype.index(\"/\") # check for /, raise ValueError if not found\n\n extensions = []\n if len(parts) > 1:\n extensions = [_canonicalize_extension(ext) for ext in parts[1:]]\n\n return (mimetype, extensions)\n\ndef _canonicalize_extension(ext):\n \"\"\"Returns a transformed ext that has a uniform pattern.\n Specifically, if ``ext`` has a leading . then it is simply returned.\n If ``ext`` doesn't have a leading . then it is prepended.\n Exceptions to this are if ``ext`` is ``None`` or \"\". If ``ext``\n is \"\" then \"\" is return. If ``ext`` is None then None is returned.\n\n :param ext: The extension to canonicalize.\n :returns: The canonicalized extension.\n\n \"\"\"\n if ext is None or ext == \"\" or ext.startswith(\".\"):\n return ext\n return \".\" + ext\n", "id": "10751159", "language": "Python", "matching_score": 4.418646335601807, "max_stars_count": 0, "path": "mimecat.py" }, { "content": "# -*- coding: utf-8 -*-\nimport os\nimport unittest\nfrom StringIO import StringIO\n\nfrom mimecat import (Catalogue, _canonicalize_extension,\n _parse_file, _parse_line)\n\nTEST_MIME_TYPES = \"\"\"\n# This file maps Internet media types to unique file extension(s).\n# Although created for httpd, this file is used by many software systems\n# and has been placed in the public domain for unlimited redisribution.\n#\n# The table below contains both registered and (common) unregistered types.\n# A type that has no unique extension can be ignored -- they are listed\n# here to guide configurations toward known types and to make it easier to\n# identify \"new\" types. File extensions are also commonly used to indicate\n# content languages and encodings, so choose them carefully.\n#\n# Internet media types should be registered as described in RFC 4288.\n# The registry is at <http://www.iana.org/assignments/media-types/>.\n#\n# MIME type (lowercased)\t\t\tExtensions\n# ============================================\t==========\n# application/activemessage\napplication/andrew-inset\t\t\tez\napplication/json\t\t\t\tjson\n# application/kpml-request+xml\n# audio/amr\naudio/midi\t\t\t\t\tmid midi kar rmi\n# audio/mobile-xmf\naudio/mp4\t\t\t\t\tmp4a\naudio/mp4a-latm\t\t\tm4a m4p\naudio/ogg\t\t\t\t\toga ogg spx\nimage/jpeg\t\t\t\t\tjpeg jpg jpe\n# image/jpm\n# message/cpim\n# message/delivery-status\nmessage/rfc822\t\t\t\t\teml mime\ntext/css\t\t\t\t\tcss\ntext/plain\t\t\t\t\ttxt text conf def list log in\n# text/xml\nvideo/3gpp\t\t\t\t\t3gp\nvideo/3gpp2\t\t\t\t\t3g2\nvideo/ogg\t\t\t\t\togv\n\"\"\"\n\nclass CatalogueTests(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.test_filename = \"test.mime.types\"\n cls.test_filename_shibboleth = \"test-shibboleth.mime.types\"\n with open(cls.test_filename, \"w\") as filep:\n filep.write(TEST_MIME_TYPES)\n\n with open(cls.test_filename_shibboleth, \"w\") as filep:\n filep.write(\"text/plain2 txt\\n\")\n filep.write(\"text/plain txt2\\n\")\n\n @classmethod\n def tearDownClass(cls):\n os.unlink(cls.test_filename)\n os.unlink(cls.test_filename_shibboleth)\n\n def setUp(self):\n self.catalogue = Catalogue(self.test_filename)\n\n self.empty_catalogue = Catalogue(self.test_filename)\n self.empty_catalogue.clear()\n\n def test_init(self):\n cat = Catalogue(self.test_filename)\n self.assertIn(\"message/rfc822\",\n cat._known_mimetypes)\n\n def test_init_with_filep(self):\n with open(self.test_filename, \"r\") as filep:\n cat = Catalogue(filep = filep)\n self.assertIn(\"message/rfc822\",\n cat._known_mimetypes)\n\n def test_init_with_order(self):\n with open(self.test_filename, \"r\") as filep:\n cat = Catalogue(self.test_filename_shibboleth, filep)\n\n # test_filename should've been used first, so text/plain2 should\n # come after text/plain in the extensions to type map\n type_list = cat._exts_to_types[\".txt\"]\n self.assertGreater(type_list.index(\"text/plain2\"),\n type_list.index(\"text/plain\"))\n\n\n def test_init_fails(self):\n cat = None\n with self.assertRaises(IOError):\n cat = Catalogue([\"BOGUS_FILE\"])\n self.assertIsNone(cat)\n\n def test_clear(self):\n self.catalogue.clear()\n self.assertEqual( {}, self.catalogue._types_to_exts)\n self.assertEqual( {}, self.catalogue._exts_to_types)\n self.assertEqual(set(), self.catalogue._known_mediatypes)\n self.assertEqual(set(), self.catalogue._known_mimetypes)\n self.assertEqual(set(), self.catalogue._known_extensions)\n\n def test_load_filenames_stops(self):\n self.empty_catalogue.load_filenames([self.test_filename_shibboleth,\n self.test_filename],\n True)\n\n self.assertEqual(len(self.empty_catalogue._known_mediatypes), 1)\n self.assertEqual(len(self.empty_catalogue._known_mimetypes), 2)\n self.assertEqual(len(self.empty_catalogue._known_extensions), 2)\n\n def test_load_filenames_does_not_stop(self):\n self.empty_catalogue.load_filenames([self.test_filename_shibboleth,\n self.test_filename], False)\n\n self.assertGreater(len(self.empty_catalogue._known_mediatypes), 1)\n self.assertGreater(len(self.empty_catalogue._known_mimetypes), 2)\n self.assertGreater(len(self.empty_catalogue._known_extensions), 2)\n\n def test_load_filenames_fail(self):\n with self.assertRaises(IOError):\n self.empty_catalogue.load_filenames([\"BOGUS_FILE\", \"BOGUS_FILE2\"])\n\n def test_load_filename(self):\n self.empty_catalogue.load_filename(self.test_filename_shibboleth)\n self.assertEqual(len(self.empty_catalogue._known_mediatypes), 1)\n self.assertEqual(len(self.empty_catalogue._known_mimetypes), 2)\n self.assertEqual(len(self.empty_catalogue._known_extensions), 2)\n\n def test_load_filename_fails(self):\n with self.assertRaises(IOError):\n self.empty_catalogue.load_filename(\"BOGUS_FILE\")\n\n def test_load_file(self):\n with open(self.test_filename_shibboleth) as filep:\n self.empty_catalogue.load_file(filep)\n\n self.assertEqual(len(self.empty_catalogue._known_mediatypes), 1)\n self.assertEqual(len(self.empty_catalogue._known_mimetypes), 2)\n self.assertEqual(len(self.empty_catalogue._known_extensions), 2)\n\n def test_parse_file(self):\n with open(self.test_filename_shibboleth) as filep:\n items = [item for item in _parse_file(filep) if item is not None]\n self.assertEqual(len(items), 2)\n\n with open(self.test_filename) as filep:\n items = [item for item in _parse_file(filep) if item is not None]\n self.assertEqual(len(items), 13)\n\n def test_parse_line(self):\n result = _parse_line(\"#\")\n self.assertIsNone(result)\n\n result = _parse_line(\"# more\")\n self.assertIsNone(result)\n\n result = _parse_line(\"text/plain\")\n self.assertEqual((\"text/plain\", []), result)\n\n result = _parse_line(\"text/plain ext1 ext2 ext3\")\n self.assertEqual((\"text/plain\", [\".ext1\", \".ext2\", \".ext3\"]), result)\n\n result = _parse_line(\"text/plain ext1 ext2 ext3 # with comment\")\n self.assertEqual((\"text/plain\", [\".ext1\", \".ext2\", \".ext3\"]), result)\n\n result = _parse_line(\"# text/plain ext1 ext2 ext3\")\n self.assertIsNone(result)\n\n result = _parse_line(\"# text/plain ext1 ext2 ext3 # with comment\")\n self.assertIsNone(result)\n\n def test_parse_line_fails(self):\n with self.assertRaises(ValueError):\n _ = _parse_line(\"invalid exts\")\n\n def test_known_mediatypes(self):\n self.assertIn(\"application\", self.catalogue.known_mediatypes)\n self.assertIn(\"text\", self.catalogue.known_mediatypes)\n\n def test_known_mimetypes(self):\n self.assertIn(\"application/json\", self.catalogue.known_mimetypes)\n self.assertIn(\"audio/mp4\", self.catalogue.known_mimetypes)\n\n def test_known_extensions(self):\n self.assertIn(\".ez\", self.catalogue.known_extensions)\n self.assertIn(\".m4a\", self.catalogue.known_extensions)\n\n def test_get_extensions(self):\n exts = self.catalogue.get_extensions(\"audio/midi\")\n self.assertEqual(len(exts), 4)\n\n def test_get_extensions_fails(self):\n with self.assertRaises(KeyError):\n self.catalogue.get_extensions(\"bad/type\")\n\n def test_get_types(self):\n types = self.catalogue.get_types(\".txt\")\n self.assertEqual(len(types), 1)\n\n types = self.catalogue.get_types(\"txt\")\n self.assertEqual(len(types), 1)\n\n def test_get_types_with_duplicate(self):\n self.catalogue.add_type(\"text/plain2\", \".txt\")\n types = self.catalogue.get_types(\"txt\")\n self.assertIn(\"text/plain\", types)\n self.assertIn(\"text/plain2\", types)\n\n def test_get_types_fails(self):\n with self.assertRaises(KeyError):\n self.catalogue.get_types(\"asdf\")\n\n def test_add_type(self):\n self.empty_catalogue.add_type(\"text/plain\", \"txt\")\n self.assertIn(\"text\", self.empty_catalogue._known_mediatypes)\n self.assertIn(\"text/plain\", self.empty_catalogue._known_mimetypes)\n self.assertIn(\".txt\", self.empty_catalogue._known_extensions)\n\n self.empty_catalogue.clear()\n self.empty_catalogue.add_type(\"text/plain\", \".txt\")\n self.assertIn(\"text\", self.empty_catalogue._known_mediatypes)\n self.assertIn(\"text/plain\", self.empty_catalogue._known_mimetypes)\n self.assertIn(\".txt\", self.empty_catalogue._known_extensions)\n\n self.empty_catalogue.clear()\n self.empty_catalogue.add_type(\"text/plain\", [\".txt\"])\n self.assertIn(\"text\", self.empty_catalogue._known_mediatypes)\n self.assertIn(\"text/plain\", self.empty_catalogue._known_mimetypes)\n self.assertIn(\".txt\", self.empty_catalogue._known_extensions)\n\n def test_add_types_with_duplicate_extensions(self):\n self.empty_catalogue.add_type(\"text/plain\", \"txt\")\n self.empty_catalogue.add_type(\"text/doc\", \"txt\")\n self.assertIn(\"text/plain\", self.empty_catalogue._exts_to_types[\".txt\"])\n self.assertIn(\"text/doc\", self.empty_catalogue._exts_to_types[\".txt\"])\n\n self.assertIn(\".txt\", self.empty_catalogue._types_to_exts[\"text/plain\"])\n self.assertIn(\".txt\", self.empty_catalogue._types_to_exts[\"text/doc\"])\n\n def test_add_type_fails(self):\n with self.assertRaises(ValueError):\n self.empty_catalogue.add_type(\"textplain\", \".txt\")\n\n def test_canonicalize_extension(self):\n ret = _canonicalize_extension(\"test\")\n self.assertEqual(ret, \".test\")\n\n ret = _canonicalize_extension(\".test\")\n self.assertEqual(ret, \".test\")\n\n ret = _canonicalize_extension(\"\")\n self.assertEqual(ret, \"\")\n\n ret = _canonicalize_extension(None)\n self.assertIsNone(ret)\n", "id": "12851199", "language": "Python", "matching_score": 2.2854526042938232, "max_stars_count": 0, "path": "test_mimecat.py" }, { "content": "# -*- coding: utf-8 -*-\nfrom setuptools import setup\n\nsetup(name=\"mimecat\",\n version=\"0.1.1\",\n author=\"<NAME>\",\n description=(\"A simple module for handling a catalogue of MIME types and\"\n \" extensions\"),\n url = \"https://github.com/mizhi/mimecat\",\n license=\"MIT\",\n keywords=\"MIME types extensions\",\n py_modules=[\"mimecat\"],\n classifiers = [\n \"Development Status :: 3 - Alpha\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Software Development\"\n ])\n", "id": "10973353", "language": "Python", "matching_score": 0.3756240904331207, "max_stars_count": 0, "path": "setup.py" } ]
3.261498
musicnova
[ { "content": "import opentracing\n\nclass PyJobsEngine():\n pass", "id": "4329602", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "pyspark/pyjobs/apache_pyjobs/pyjobs_engine.py" }, { "content": "def run(self, msg):\n print(\"init commit\")\n", "id": "999069", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "qt_launcher.py" }, { "content": "import itertools\n\nG = {1:[6,7,8], 2: [5,7,8], 3: [5,6,8], 4:[5,6,7],\n 5:[2,3,4], 6:[1,3,4], 7:[1,2,4], 8:[1,2,3]}\n\nCOLORS = set(G.keys()) \ndef color_it(order):\n def get_neighb_colors(v, dict_colors):\n return {dict_colors[n] for n in G[v] if dict_colors[n]!=None}\n cur_colors = dict.fromkeys(G.keys(), None)\n for v in order:\n cur_colors[v] = min(COLORS - get_neighb_colors(v, cur_colors))\n return set(cur_colors.values())\n\nfor order in itertools.permutations(G.keys()):\n if len(color_it(order))==4:\n print(order)\n break\n", "id": "3045688", "language": "Python", "matching_score": 0.24957327544689178, "max_stars_count": 0, "path": "stepik.py" }, { "content": "# РОБОТ ИДИОТ\r\n# ВЕРСИЯ 1.0\r\n\r\n#!pip install websockets-client\r\n# https://bablofil.ru/binance-webscokets/\r\n# https://github.com/jsappme/node-binance-trader\r\n\r\n\r\nimport websocket\r\n\r\n\r\ndef on_open(ws):\r\n print(\"### connected ###\")\r\n\r\n\r\ndef on_message(ws, message):\r\n print(message)\r\n\r\n\r\ndef on_error(ws, error):\r\n print(error)\r\n\r\n\r\ndef on_close(ws):\r\n print(\"### closed ###\")\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ws = websocket.WebSocketApp(\"wss://stream.binance.com:9443/ws/itcbtc@aggTrade/ethbtc@aggTrade\"\r\n ,on_message=on_message\r\n ,on_error=on_error\r\n ,on_close=on_close)\r\n ws.on_open = on_open\r\n ws.run_forever()\r\n\r\n#!/usr/bin/env node\r\n\r\n# /* ============================================================\r\n# * node-binance-trader\r\n# * https://github.com/jsappme/node-binance-trader\r\n# * ============================================================\r\n# \r\n# const chalk = require('chalk')\r\n# const ora = require('ora')\r\n# const moment = require('moment')\r\n# const _ = require('lodash')\r\n# const numeral = require('numeral')\r\n# const clear = require('clear')\r\n# const figlet = require('figlet')\r\n# const Configstore = require('configstore')\r\n# const binance = require('binance-api-node').default\r\n# const inquirer = require(\"inquirer\")\r\n# const setTitle = require('node-bash-title')\r\n# \r\n# //////////////////////////////////////////////////////////////////////////////////\r\n# // https://www.binance.com/restapipub.html\r\n# // REPLACE xxx with your own API key key and secret.\r\n# //\r\n# const APIKEY = 'XXXX'\r\n# const APISECRET = 'XXXX'\r\n# //////////////////////////////////////////////////////////////////////////////////\r\n# \r\n# let pnl = 0\r\n# let step = 0\r\n# let trade_count = 0\r\n# let order_id = 0\r\n# let buy_price = 0.00\r\n# let bid_price = 0.00\r\n# let ask_price = 0.00\r\n# let switch_price = 0.00\r\n# let stop_price = 0.00\r\n# let loss_price = 0.00\r\n# let sell_price = 0.00\r\n# let buy_amount = 0.00\r\n# let stepSize = 0\r\n# let tickSize = 8\r\n# let tot_cancel = 0\r\n# let pair = \"\"\r\n# let buying_method = \"\"\r\n# let selling_method = \"\"\r\n# let init_buy_filled = false\r\n# \r\n# //////////////////////////////////////////////////////////////////////////////////\r\n# \r\n# // Binance API initialization //\r\n# const client = binance({apiKey: APIKEY, apiSecret: APISECRET, useServerTime: true})\r\n# \r\n# const conf = new Configstore('nbt')\r\n# let base_currency = conf.get('nbt.base_currency')?conf.get('nbt.base_currency'):\"USDT\"\r\n# let budget = conf.get('nbt.budget')?parseFloat(conf.get('nbt.budget')):1.00\r\n# let fixed_buy_price = conf.get('nbt.fixed_buy_price')?parseFloat(conf.get('nbt.fixed_buy_price')):0.00\r\n# let currency_to_buy = conf.get('nbt.currency_to_buy')?conf.get('nbt.currency_to_buy'):\"BTC\"\r\n# let profit_pourcent = conf.get('nbt.profit_pourcent')?conf.get('nbt.profit_pourcent'):0.80\r\n# let loss_pourcent = conf.get('nbt.loss_pourcent')?conf.get('nbt.loss_pourcent'):0.40\r\n# let trailing_pourcent = conf.get('nbt.trailing_pourcent')?conf.get('nbt.trailing_pourcent'):0.40\r\n# \r\n# clear()\r\n# \r\n# console.log(chalk.yellow(figlet.textSync('_N_B_T_', { horizontalLayout: 'fitted' })))\r\n# console.log(' ')\r\n# console.log(\" 🐬 \".padEnd(10) + ' ' + \" 🐬 \".padStart(11))\r\n# console.log(\" 🐬 \".padEnd(10) + chalk.bold.underline.cyan('Node Binance Trader') + \" 🐬 \".padStart(11))\r\n# console.log(\" 🐬 \".padEnd(10) + ' ' + \" 🐬 \".padStart(11))\r\n# console.log(' ')\r\n# console.log(chalk.yellow(' ⚠️ USE THIS APP AT YOUR OWN RISK ⚠️'))\r\n# console.log(' ')\r\n# \r\n# var buy_info_request = [\r\n# {\r\n# type: 'input',\r\n# name: 'base_currency',\r\n# message: chalk.cyan('What base currency would you use for the trade? (USDT, BTC, BNB or ETH)'),\r\n# default: base_currency,\r\n# validate: function(value) {\r\n# var valid = ((value.toUpperCase()==='BTC')||(value.toUpperCase()==='USDT')||(value.toUpperCase()==='ETH')||(value.toUpperCase()==='BNB'))\r\n# return valid || 'Currency not valid, please chose between USDT, BTC, BNB, ETH'\r\n# },\r\n# },\r\n# {\r\n# type: 'input',\r\n# name: 'budget',\r\n# default: budget,\r\n# message: chalk.cyan('What is your budget for this trade? (in base currency)(total value. > 15 USD.)'),\r\n# validate: function(value) {\r\n# var valid = !isNaN(parseFloat(value)) && (value>0)\r\n# return valid || 'Please enter a number superior than 0'\r\n# },\r\n# filter: Number\r\n# },\r\n# {\r\n# type: 'input',\r\n# name: 'currency_to_buy',\r\n# message: chalk.cyan('What currency would you like to buy?'),\r\n# default: currency_to_buy,\r\n# },\r\n# ]\r\n# \r\n# \r\n# const report = ora(chalk.grey('Starting the trade...'))\r\n# \r\n# ask_pair_budget = () => {\r\n# inquirer.prompt(buy_info_request).then(answers => {\r\n# pair = (answers.currency_to_buy + answers.base_currency).toUpperCase()\r\n# conf.set('nbt.base_currency', (answers.base_currency).toUpperCase())\r\n# conf.set('nbt.budget', answers.budget)\r\n# conf.set('nbt.currency_to_buy', (answers.currency_to_buy).toUpperCase())\r\n# base_currency = (answers.base_currency).toUpperCase()\r\n# currency_to_buy = (answers.currency_to_buy).toUpperCase()\r\n# budget = parseFloat(answers.budget)\r\n# buy_info_request[0].default = base_currency\r\n# buy_info_request[1].default = budget\r\n# buy_info_request[2].default = currency_to_buy\r\n# // FIND OUT IF PAIR EXISTS AND THE PAIR QUOTE INFO:\r\n# client.exchangeInfo().then(results => {\r\n# // CHECK IF PAIR IS UNKNOWN:\r\n# if (_.filter(results.symbols, {symbol: pair}).length > 0) {\r\n# setTitle('🐬 ' + pair + ' 🐬 ')\r\n# tickSize = _.filter(results.symbols, {symbol: pair})[0].filters[0].tickSize.indexOf(\"1\") - 1\r\n# stepSize = _.filter(results.symbols, {symbol: pair})[0].filters[1].stepSize\r\n# // GET ORDER BOOK\r\n# client.book({ symbol: pair }).then(results => {\r\n# // SO WE CAN TRY TO BUY AT THE 1ST BID PRICE + %0.02:\r\n# bid_price = parseFloat(results.bids[0].price)\r\n# ask_price = parseFloat(results.asks[0].price)\r\n# console.log( chalk.grey(moment().format('h:mm:ss').padStart(8))\r\n# + chalk.yellow(pair.padStart(10))\r\n# + chalk.grey(\" CURRENT 1ST BID PRICE: \" + bid_price ))\r\n# fixed_buy_price_input[0].default = results.bids[0].price\r\n# ask_buy_sell_options()\r\n# })\r\n# }\r\n# else {\r\n# console.log(chalk.magenta(\"SORRY THE PAIR \") + chalk.green(pair) + chalk.magenta(\" IS UNKNOWN BY BINANCE. Please try another one.\"))\r\n# ask_pair_budget()\r\n# }\r\n# })\r\n# })\r\n# }\r\n# \r\n# var buy_sell_options = [\r\n# {\r\n# type: 'list',\r\n# name: 'buy_option',\r\n# message: chalk.cyan('How would you like to buy:'),\r\n# choices: ['Buy at Market Price', 'Set a Buy Order just above Bid Price', 'Set a Buy Order at a Fixed Buy Price'],\r\n# },\r\n# {\r\n# type: 'list',\r\n# name: 'sell_option',\r\n# message: chalk.cyan('How would you like to sell:'),\r\n# choices: ['Set a Trailing Stop Loss', 'Set Stop Loss and Profit Percentages'],\r\n# },\r\n# ]\r\n# \r\n# ask_buy_sell_options = () => {\r\n# console.log(\" \")\r\n# inquirer.prompt(buy_sell_options).then(answers => {\r\n# if (answers.buy_option.includes(\"Market\")) {\r\n# // MARKET PRICE BUY //\r\n# buying_method = \"Market\"\r\n# if (answers.sell_option.includes(\"Trailing\")) {\r\n# selling_method = \"Trailing\"\r\n# ask_trailing_percent()\r\n# }\r\n# else {\r\n# selling_method = \"Profit\"\r\n# ask_loss_profit_percents()\r\n# }\r\n# }\r\n# if (answers.buy_option.includes(\"Bid\")) {\r\n# // BID PRICE BUY //\r\n# buying_method = \"Bid\"\r\n# if (answers.sell_option.includes(\"Trailing\")) {\r\n# selling_method = \"Trailing\"\r\n# ask_trailing_percent()\r\n# }\r\n# else {\r\n# selling_method = \"Profit\"\r\n# ask_loss_profit_percents()\r\n# }\r\n# }\r\n# if (answers.buy_option.includes(\"Fixed\")) {\r\n# // FIXED PRICE BUY //\r\n# buying_method = \"Fixed\"\r\n# ask_fixed_buy_price(answers.sell_option)\r\n# }\r\n# })\r\n# }\r\n# \r\n# var fixed_buy_price_input = [\r\n# {\r\n# type: 'input',\r\n# name: 'fixed_buy_price',\r\n# default: fixed_buy_price,\r\n# message: chalk.cyan('What is Fixed Buy Price? (in base currency)'),\r\n# validate: function(value) {\r\n# var valid = !isNaN(parseFloat(value)) && (value>0)\r\n# return valid || 'Please enter a number superior than 0'\r\n# },\r\n# filter: Number\r\n# }\r\n# ]\r\n# \r\n# ask_fixed_buy_price = (sell_option) => {\r\n# console.log(\" \")\r\n# inquirer.prompt(fixed_buy_price_input).then(answers => {\r\n# conf.set('nbt.fixed_buy_price', answers.fixed_buy_price)\r\n# fixed_buy_price = parseFloat(answers.fixed_buy_price)\r\n# fixed_buy_price_input[0].default = fixed_buy_price\r\n# console.log(chalk.grey(\"The bot will set a buy order at \" + fixed_buy_price))\r\n# if (sell_option.includes(\"Trailing\")) {\r\n# selling_method = \"Trailing\"\r\n# ask_trailing_percent()\r\n# }\r\n# else {\r\n# selling_method = \"Profit\"\r\n# ask_loss_profit_percents()\r\n# }\r\n# })\r\n# }\r\n# \r\n# var loss_profit_inputs = [\r\n# {\r\n# type: 'input',\r\n# name: 'loss_pourcent',\r\n# default: loss_pourcent,\r\n# message: chalk.hex('#FF6347')('Enter the stop loss percentage:'),\r\n# validate: function(value) {\r\n# var valid = !isNaN(parseFloat(value)) && (value>0.10) && (value<100.00)\r\n# return valid || 'Please enter a number between 0.10 and 99.99'\r\n# },\r\n# filter: Number\r\n# },\r\n# {\r\n# type: 'input',\r\n# name: 'profit_pourcent',\r\n# default: profit_pourcent,\r\n# message: chalk.hex('#3CB371')('Enter the profit percentage:'),\r\n# validate: function(value) {\r\n# var valid = !isNaN(parseFloat(value)) && (value>0.10) && (value<100.00)\r\n# return valid || 'Please enter a number between 0.10 and 99.99'\r\n# },\r\n# filter: Number\r\n# },\r\n# {\r\n# type: 'confirm',\r\n# name: 'confirm',\r\n# message: chalk.cyan('Start the trade now?'),\r\n# default: true\r\n# },\r\n# ]\r\n# \r\n# ask_loss_profit_percents = () => {\r\n# console.log(\" \")\r\n# inquirer.prompt(loss_profit_inputs).then(answers => {\r\n# if (answers.confirm) {\r\n# conf.set('nbt.profit_pourcent', answers.profit_pourcent)\r\n# conf.set('nbt.loss_pourcent', answers.loss_pourcent)\r\n# profit_pourcent = parseFloat(answers.profit_pourcent)\r\n# loss_pourcent = parseFloat(answers.loss_pourcent)\r\n# loss_profit_inputs[0].default = loss_pourcent\r\n# loss_profit_inputs[1].default = profit_pourcent\r\n# start_trading()\r\n# }\r\n# else {\r\n# ask_pair_budget()\r\n# }\r\n# })\r\n# }\r\n# \r\n# \r\n# var trailing_loss_input = [\r\n# {\r\n# type: 'input',\r\n# name: 'trailing_pourcent',\r\n# default: trailing_pourcent,\r\n# message: chalk.hex('#FF6347')('Enter the Trailing Loss Percentage:'),\r\n# validate: function(value) {\r\n# var valid = !isNaN(parseFloat(value)) && (value>0.10) && (value<100.00)\r\n# return valid || 'Please enter a number between 0.10 and 99.99'\r\n# },\r\n# filter: Number\r\n# },\r\n# {\r\n# type: 'confirm',\r\n# name: 'confirm',\r\n# message: chalk.cyan('Start the trade now?'),\r\n# default: true\r\n# },\r\n# ]\r\n# \r\n# ask_trailing_percent = () => {\r\n# console.log(\" \")\r\n# inquirer.prompt(trailing_loss_input).then(answers => {\r\n# if (answers.confirm) {\r\n# conf.set('nbt.trailing_pourcent', answers.trailing_pourcent)\r\n# trailing_pourcent = parseFloat(answers.trailing_pourcent)\r\n# trailing_loss_input[0].default = trailing_pourcent\r\n# start_trading()\r\n# }\r\n# else {\r\n# ask_pair_budget()\r\n# }\r\n# })\r\n# }\r\n# \r\n# \r\n# start_trading = () => {\r\n# var precision = stepSize.toString().split('.')[1].length || 0\r\n# if (buying_method === \"Fixed\") {\r\n# buy_amount = (( ((budget / fixed_buy_price) / parseFloat(stepSize)) | 0 ) * parseFloat(stepSize)).toFixed(precision)\r\n# buy_price = parseFloat(fixed_buy_price)\r\n# console.log(chalk.grey(\"BUYING \" + buy_amount + \" OF \" + currency_to_buy + \" AT FIXED PRICE \") + chalk.green(buy_price.toFixed(tickSize)))\r\n# client.order({\r\n# symbol: pair,\r\n# side: 'BUY',\r\n# quantity: buy_amount,\r\n# price: buy_price.toFixed(tickSize),\r\n# recvWindow: 1000000,\r\n# })\r\n# .then( (order_result) => {\r\n# order_id = order_result.orderId\r\n# auto_trade()\r\n# })\r\n# .catch((error) => {\r\n# //console.error(JSON.stringify(error))\r\n# report.fail(error)\r\n# ask_pair_budget()\r\n# })\r\n# }\r\n# else if (buying_method === \"Bid\") {\r\n# buy_amount = (( ((parseFloat(budget) / (parseFloat(bid_price) * 1.0002)) / parseFloat(stepSize)) | 0 ) * parseFloat(stepSize)).toFixed(precision)\r\n# buy_price = parseFloat(bid_price) * 1.0002\r\n# console.log(chalk.grey(\"BUYING \" + buy_amount + \" OF \" + currency_to_buy + \" AT JUST ABOVE 1ST BID PRICE \") + chalk.green(buy_price.toFixed(tickSize)))\r\n# client.order({\r\n# symbol: pair,\r\n# side: 'BUY',\r\n# quantity: buy_amount,\r\n# price: buy_price.toFixed(tickSize),\r\n# recvWindow: 1000000,\r\n# })\r\n# .then( (order_result) => {\r\n# order_id = order_result.orderId\r\n# auto_trade()\r\n# })\r\n# .catch((error) => {\r\n# //console.error(JSON.stringify(error))\r\n# report.fail(error)\r\n# ask_pair_budget()\r\n# })\r\n# }\r\n# else if (buying_method === \"Market\") {\r\n# buy_amount = (( ((parseFloat(budget) / (parseFloat(ask_price) * 1.0002)) / parseFloat(stepSize)) | 0 ) * parseFloat(stepSize)).toFixed(precision)\r\n# buy_price = parseFloat(ask_price)\r\n# console.log(chalk.green(\"BUYING \" + buy_amount + \" OF \" + currency_to_buy + \" AT MARKET PRICE\" ))\r\n# client.order({\r\n# symbol: pair,\r\n# side: 'BUY',\r\n# quantity: buy_amount,\r\n# type: 'MARKET',\r\n# recvWindow: 1000000,\r\n# })\r\n# .then( (order_result) => {\r\n# order_id = order_result.orderId\r\n# auto_trade()\r\n# })\r\n# .catch((error) => {\r\n# //console.error(JSON.stringify(error))\r\n# report.fail(error)\r\n# ask_pair_budget()\r\n# })\r\n# }\r\n# }\r\n# \r\n# auto_trade = () => {\r\n# step = 1\r\n# report.text = \"\"\r\n# report.start()\r\n# // LISTEN TO KEYBOARD PRSEED KEYS\r\n# process.stdin.resume()\r\n# process.stdin.setRawMode(true)\r\n# console.log(chalk.grey(\" ⚠️ Press [ CTRL + c ] or q to cancel the trade and sell everything at market price. ⚠️ \"))\r\n# console.log(\" \")\r\n# const curr_trade = trade_count\r\n# const clean_trades = client.ws.trades([pair], trade => {\r\n# \r\n# if (curr_trade !== trade_count) clean_trades()\r\n# report.text = add_status_to_trade_report(trade, \"\")\r\n# \r\n# // CHECK IF INITIAL BUY ORDER IS EXECUTED\r\n# if ( order_id && (step === 1) ) {\r\n# step = 99\r\n# checkBuyOrderStatus()\r\n# }\r\n# \r\n# // SWITCH PRICE REACHED SETTING UP SELL FOR PROFIT ORDER\r\n# if ( (selling_method === \"Profit\") && order_id && (step === 3) && (trade.price > switch_price) ) {\r\n# step = 99\r\n# console.log(chalk.grey(\" CANCEL STOP LOSS AND GO FOR PROFIT \"))\r\n# client.cancelOrder({\r\n# symbol: pair,\r\n# orderId: order_id,\r\n# recvWindow: 1000000,\r\n# })\r\n# .then(() => {\r\n# client.order({\r\n# symbol: pair,\r\n# side: 'SELL',\r\n# quantity: buy_amount,\r\n# price: sell_price,\r\n# recvWindow: 1000000,\r\n# })\r\n# .then((order) => {\r\n# step = 5\r\n# order_id = order.orderId\r\n# var log_report = chalk.grey(\" SELL ORDER READY \")\r\n# console.log(log_report)\r\n# })\r\n# .catch((error) => {\r\n# var log_report = chalk.magenta(\" ERROR #555 \")\r\n# console.error(log_report + error)\r\n# })\r\n# })\r\n# .catch((error) => {\r\n# console.log(\" ERROR #547 \")\r\n# console.error(error)\r\n# })\r\n# }\r\n# \r\n# // INCREASE THE TRAILING STOP LOSS PRICE\r\n# if ( (selling_method === \"Trailing\") && order_id && (step === 3) && (trade.price > switch_price) ) {\r\n# step = 99\r\n# tot_cancel = tot_cancel + 1\r\n# console.log(chalk.grey(\" CANCEL CURRENT STOP LOSS \"))\r\n# client.cancelOrder({\r\n# symbol: pair,\r\n# orderId: order_id,\r\n# recvWindow: 1000000,\r\n# })\r\n# .then(() => {\r\n# stop_price = (parseFloat(stop_price) + (parseFloat(stop_price) * trailing_pourcent / 100.00)).toFixed(tickSize)\r\n# loss_price = (parseFloat(stop_price) - (parseFloat(stop_price) * 0.040)).toFixed(tickSize)\r\n# set_stop_loss_order()\r\n# switch_price = (parseFloat(switch_price) + (parseFloat(switch_price) * trailing_pourcent / 100.00)).toFixed(tickSize)\r\n# console.log(chalk.grey(\" NEW TRAILING STOP LOSS SET @ \" + stop_price))\r\n# step = 3\r\n# })\r\n# .catch((error) => {\r\n# console.log(\" ERROR #547 \")\r\n# console.error(error)\r\n# })\r\n# }\r\n# \r\n# // PRICE BELLOW BUY PRICE SETTING UP STOP LOSS ORDER\r\n# if ( (selling_method==='Profit') && order_id && (step === 5) && (trade.price < buy_price) ) {\r\n# step = 99\r\n# console.log(chalk.grey(\" CANCEL PROFIT SETTING UP STOP LOSS \"))\r\n# tot_cancel = tot_cancel + 1\r\n# client.cancelOrder({\r\n# symbol: pair,\r\n# orderId: order_id,\r\n# recvWindow: 1000000,\r\n# })\r\n# .then(() => {\r\n# set_stop_loss_order()\r\n# })\r\n# .catch((error) => {\r\n# pnl = 100.00*(buy_price - trade.price)/buy_price\r\n# var log_report = chalk.magenta(\" LOSS PRICE REACHED THE BOT SHOULD HAVE SOLD EVERYTHING #454 \")\r\n# report.fail(add_status_to_trade_report(trade, log_report))\r\n# reset_trade()\r\n# setTimeout( () => { ask_pair_budget(), 1000 } )\r\n# })\r\n# }\r\n# \r\n# // CURRENT PRICE REACHED SELL PRICE\r\n# if ( (selling_method === \"Profit\") && order_id && (step === 5) && (trade.price >= sell_price) ) {\r\n# step = 99\r\n# client.getOrder({\r\n# symbol: pair,\r\n# orderId: order_id,\r\n# recvWindow: 1000000,\r\n# })\r\n# .then( (order_result) => {\r\n# if ( parseFloat(order_result.executedQty) < parseFloat(order_result.origQty) ) {\r\n# var log_report = chalk.grey(\" PROFIT PRICE REACHED BUT NOT ALL EXECUTED \" + order_result.executedQty )\r\n# report.text = add_status_to_trade_report(trade, log_report)\r\n# step = 5\r\n# }\r\n# else {\r\n# clean_trades()\r\n# pnl = 100.00*(trade.price - buy_price)/buy_price\r\n# var log_report = chalk.greenBright(\" 🐬 !!! WE HAVE A WINNER !!! 🐬 \")\r\n# report.text = add_status_to_trade_report(trade, log_report)\r\n# reset_trade()\r\n# report.succeed()\r\n# setTimeout( () => { ask_pair_budget(), 1000 } )\r\n# }\r\n# })\r\n# .catch((error) => {\r\n# console.error(\" ERROR 8 \" + error)\r\n# })\r\n# }\r\n# \r\n# // CURRENT PRICE REACHED STOP PRICE\r\n# if ( order_id && (step === 3) && (trade.price <= stop_price) ) {\r\n# step = 99\r\n# client.getOrder({\r\n# symbol: pair,\r\n# orderId: order_id,\r\n# recvWindow: 1000000,\r\n# })\r\n# .then( (order_result) => {\r\n# if ( parseFloat(order_result.executedQty) < parseFloat(order_result.origQty) ) {\r\n# var log_report = chalk.grey(\" STOP PRICE REACHED BUT NOT ALL EXECUTED \" + order_result.executedQty )\r\n# report.text = add_status_to_trade_report(trade, log_report)\r\n# step = 5\r\n# }\r\n# else {\r\n# clean_trades()\r\n# pnl = 100.00*(buy_price - trade.price)/buy_price\r\n# var log_report = chalk.magenta(\" STOP LOSS ALL EXECUTED\")\r\n# report.text = add_status_to_trade_report(trade, log_report)\r\n# reset_trade()\r\n# report.succeed()\r\n# setTimeout( () => { ask_pair_budget(), 1400 } )\r\n# }\r\n# })\r\n# .catch((error) => {\r\n# console.error(\" API ERROR #9 \" + error)\r\n# clean_trades()\r\n# pnl = 100.00*(buy_price - trade.price)/buy_price\r\n# var log_report = chalk.magenta(\" TRADE STOPPED \")\r\n# report.text = add_status_to_trade_report(trade, log_report)\r\n# reset_trade()\r\n# report.fail()\r\n# setTimeout( () => { ask_pair_budget(), 1400 } )\r\n# })\r\n# }\r\n# })\r\n# }\r\n# \r\n# sell_at_market_price = () => {\r\n# console.log(chalk.keyword('orange')(\" SELLING AT MARKET PRICE \"))\r\n# client.order({\r\n# symbol: pair,\r\n# side: 'SELL',\r\n# type: 'MARKET',\r\n# quantity: buy_amount,\r\n# recvWindow: 1000000,\r\n# })\r\n# .then( order => {\r\n# reset_trade()\r\n# report.succeed( chalk.magenta(\" THE BOT SOLD AT MARKET PRICE #777 \") )\r\n# setTimeout( () => { ask_pair_budget(), 2500 } )\r\n# })\r\n# .catch( error => {\r\n# report.fail( \" ERROR #7771 \" + buy_amount + \" :: \" + error )\r\n# reset_trade()\r\n# })\r\n# }\r\n# \r\n# checkBuyOrderStatus = () => {\r\n# client.getOrder({ symbol: pair, orderId: order_id, recvWindow: 1000000, })\r\n# .then( order => {\r\n# if (order.status === \"FILLED\") {\r\n# init_buy_filled = true\r\n# buy_amount = parseFloat(order.executedQty)\r\n# console.log(chalk.white(\" INITAL BUY ORDER FULLY EXECUTED \"))\r\n# client.myTrades({ symbol: pair, limit: 1, recvWindow: 1000000 }).then( mytrade => {\r\n# buy_price = parseFloat(mytrade[0].price)\r\n# console.log(chalk.gray(\" FINAL BUY PRICE @ \") + chalk.cyan(buy_price))\r\n# if (selling_method===\"Trailing\") {\r\n# stop_price = (buy_price - (buy_price * trailing_pourcent / 100.00)).toFixed(tickSize)\r\n# loss_price = (stop_price - (stop_price * 0.040)).toFixed(tickSize)\r\n# set_stop_loss_order()\r\n# switch_price = (buy_price + (buy_price * trailing_pourcent / 100.00)).toFixed(tickSize)\r\n# }\r\n# else {\r\n# stop_price = (buy_price - (buy_price * loss_pourcent / 100.00)).toFixed(tickSize)\r\n# loss_price = (stop_price - (stop_price * 0.040)).toFixed(tickSize)\r\n# set_stop_loss_order()\r\n# switch_price = (buy_price + (buy_price * profit_pourcent / 200.00)).toFixed(tickSize)\r\n# sell_price = (buy_price + (buy_price * profit_pourcent / 100.00)).toFixed(tickSize)\r\n# }\r\n# })\r\n# }\r\n# else {\r\n# console.log(chalk.gray(\" BUY ORDER NOT YET FULLY EXECUTED \"))\r\n# init_buy_filled = false\r\n# step = 1\r\n# }\r\n# })\r\n# }\r\n# \r\n# set_stop_loss_order = () => {\r\n# client.order({\r\n# symbol: pair,\r\n# side: 'SELL',\r\n# type: 'STOP_LOSS_LIMIT',\r\n# stopPrice: stop_price,\r\n# quantity: buy_amount,\r\n# price: loss_price,\r\n# recvWindow: 1000000,\r\n# })\r\n# .then((order) => {\r\n# order_id = order.orderId\r\n# var log_report = chalk.grey(\" STOP LOSS READY (\" + tot_cancel + \") @ \") + chalk.cyan(stop_price)\r\n# console.log(log_report)\r\n# step = 3\r\n# })\r\n# .catch((error) => {\r\n# console.error(\" ERRROR #1233 STOP PRICE (\" + stop_price + \") \" + error )\r\n# if (String(error).includes(\"MIN_NOTIONAL\")) {\r\n# console.error(\"⚠️ PLEASE MAKE SURE YOUR BUDGET VALUE IS SUPERIOR THAN 15 USD ⚠️\")\r\n# }\r\n# sell_at_market_price()\r\n# })\r\n# }\r\n# \r\n# add_status_to_trade_report = (trade, status) => {\r\n# if (init_buy_filled) {\r\n# var pnl = 100.00*(parseFloat(trade.price)-parseFloat(buy_price))/parseFloat(buy_price)\r\n# }\r\n# else {\r\n# var pnl = 0.00\r\n# }\r\n# return chalk.grey(moment().format('h:mm:ss').padStart(8))\r\n# + chalk.yellow(trade.symbol.padStart(10))\r\n# + (!trade.maker?chalk.green((chalk.grey(\"qty:\")+numeral(trade.quantity).format(\"0.000\")).padStart(30)):chalk.red((chalk.grey(\"qty:\")+numeral(trade.quantity).format(\"0.000\")).padStart(30)))\r\n# + chalk.grey(\" @ \") + chalk.cyan(trade.price).padEnd(24)\r\n# + ((pnl >= 0)?chalk.green((chalk.grey(\"pnl:\")+numeral(pnl).format(\"0.000\")).padStart(20)):chalk.red((chalk.grey(\"pnl:\")+numeral(pnl).format(\"0.000\")).padStart(20)))\r\n# + chalk.white(status)\r\n# }\r\n# \r\n# reset_trade = () => {\r\n# step = 0\r\n# trade_count = trade_count + 1\r\n# order_id = 0\r\n# buy_price = 0.00\r\n# stop_price = 0.00\r\n# loss_price = 0.00\r\n# sell_price = 0.00\r\n# tot_cancel = 0\r\n# init_buy_filled = false\r\n# }\r\n# \r\n# ////////////////////////////////////////////////////////////////////\r\n# // LISTEN TO KEYBOARD AND CANCEL THE TRADE IF (CRTL + C) OR Q PRESSED\r\n# process.stdin.setEncoding( 'utf8' )\r\n# process.stdin.on('keypress', ( key ) => {\r\n# if ( (key === '\\u0003') || (key === 'q') ) {\r\n# if (order_id) {\r\n# trade_count = trade_count + 1\r\n# console.log(\" --- STOPPING THE TRADE --- \")\r\n# client.cancelOrder({\r\n# symbol: pair,\r\n# orderId: order_id,\r\n# recvWindow: 1000000,\r\n# })\r\n# .then( (order) => {\r\n# console.log(\" CURRENT ORDER CANCELED \")\r\n# client.getOrder({\r\n# symbol: pair,\r\n# orderId: order_id,\r\n# recvWindow: 1000000,\r\n# })\r\n# .then( (order_result) => {\r\n# if (order_result.status === \"FILLED\") {\r\n# console.log(\"PREV ORDER FILLED\")\r\n# sell_at_market_price()\r\n# }\r\n# else if (order_result.status === \"PARTIALLY_FILLED\") {\r\n# console.log(\"PREV ORDER PARTIALLY_FILLED\")\r\n# if (order_result.side === \"BUY\") {\r\n# buy_amount = parseFloat(order_result.executedQty)\r\n# sell_at_market_price()\r\n# }\r\n# else {\r\n# buy_amount = parseFloat(order_result.origQty) - parseFloat(order_result.executedQty)\r\n# sell_at_market_price()\r\n# }\r\n# }\r\n# else if (order_result.status === \"CANCELED\") {\r\n# if (order_result.side === \"SELL\") {\r\n# sell_at_market_price()\r\n# }\r\n# else {\r\n# sell_at_market_price()\r\n# reset_trade()\r\n# report.succeed( chalk.magenta(\" THE BOT STOPPED THE TRADE #3365 \") )\r\n# setTimeout( () => { ask_pair_budget(), 2500 } )\r\n# }\r\n# }\r\n# })\r\n# .catch((error) => {\r\n# console.error(\" GET FINAL ORDER ERROR : \" + error)\r\n# sell_at_market_price()\r\n# })\r\n# })\r\n# .catch((error) => {\r\n# console.error(\" FINAL CANCEL ERROR : \" + error)\r\n# sell_at_market_price()\r\n# })\r\n# }\r\n# }\r\n# })\r\n# ////////////////////////////////////////////////////////////////////\r\n# \r\n# const run = async () => {\r\n# ask_pair_budget()\r\n# }\r\n# \r\n# run()", "id": "1586875", "language": "Python", "matching_score": 1.5640666484832764, "max_stars_count": 0, "path": "Binance/idiot.py" }, { "content": "import sys\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\nclass CustomWindow(QMainWindow):\n def paintEvent(self, event=None):\n painter = QPainter(self)\n\n painter.setOpacity(0.7)\n painter.setBrush(Qt.white)\n painter.setPen(QPen(Qt.white))\n painter.drawRect(self.rect())\n\n\napp = QApplication(sys.argv)\n\n# Create the main window\nwindow = CustomWindow()\n\nwindow.setWindowFlags(Qt.FramelessWindowHint)\nwindow.setAttribute(Qt.WA_NoSystemBackground, True)\nwindow.setAttribute(Qt.WA_TranslucentBackground, True)\n\n# Create the button\npushButton = QPushButton(window)\npushButton.setGeometry(QRect(240, 190, 90, 31))\npushButton.setText(\"Finished\")\npushButton.clicked.connect(app.quit)\n\n# Center the button\nqr = pushButton.frameGeometry()\ncp = QDesktopWidget().availableGeometry().center()\nqr.moveCenter(cp)\npushButton.move(qr.topLeft())\n\n# Run the application\nwindow.showFullScreen()\nsys.exit(app.exec_())\n", "id": "9293173", "language": "Python", "matching_score": 0.043658480048179626, "max_stars_count": 0, "path": "app.py" }, { "content": "import sys\r\nfrom datetime import timedelta, datetime\r\n\r\n\r\nfrom pyspark import HiveContext\r\nfrom pyspark.sql import functions as f, SparkSession\r\n\r\n\r\ndef algo(src, from_dt, to_dt):\r\n res = steps(src, from_dt, to_dt)\r\n return res\r\n\r\n\r\ndef steps(src, from_dt, to_dt):\r\n # Новые\r\n # фичи\r\n # Цифры\r\n # по\r\n # mcc\r\n # Погода\r\n # по\r\n # месту\r\n # расстояние\r\n # до\r\n # дальнейшего\r\n # соседа\r\n # максимальная\r\n # продолжительность\r\n # приобретений\r\n # в\r\n # данной\r\n # точке\r\n # по\r\n # дням\r\n #\r\n # ПРОССУМИРОВАТЬ\r\n # ДЕЛЬТЫ\r\n # ПО\r\n # РАЗНЫМ\r\n # КООРДИНАТАМ\r\n #\r\n # [Boosters]\r\n # Raiffeisen\r\n # Data\r\n # Cup.Baseline\r\n # Общий\r\n # подход:\r\n #\r\n # Добавляем\r\n # к\r\n # каждой\r\n # транзакции\r\n # столбец: is_work(если\r\n # транзакция\r\n # находится\r\n # в\r\n # пределах\r\n # 0.02\r\n # от\r\n # дома\r\n # клиента)\r\n # Добавляем\r\n # к\r\n # каждой\r\n # транзакции\r\n # столбец: is_home(если\r\n # транзакция\r\n # находится\r\n # в\r\n # пределах\r\n # 0.02\r\n # от\r\n # работы\r\n # клиента)\r\n # Обучаем\r\n # классификатор\r\n # предсказывающий\r\n # вероятность(is_home == 1)\r\n # для\r\n # транзакции\r\n # Обучаем\r\n # классификатор\r\n # предсказывающий\r\n # вероятность(is_work == 1)\r\n # для\r\n # транзакции\r\n # Точность\r\n # определения\r\n # местоположения:\r\n #\r\n # для\r\n # классификатора\r\n # is_home: ~3\r\n # x %\r\n # для\r\n # классификатора\r\n # is_work: ~2\r\n # x %\r\n # общая\r\n # оценка\r\n # на\r\n # Public\r\n # Leaderboard: ???\r\n # Примечание\r\n #\r\n # Требуется\r\n # Python\r\n # версии\r\n # 3.5\r\n # Требуется\r\n # библиотека\r\n # xgboost(для\r\n # обучения\r\n # использовалась\r\n # xgboost\r\n # версии\r\n # 0.7.post3)\r\n # Требуются\r\n # файлы: test_set.csv, train_set.csv\r\n # в\r\n # одном\r\n # каталоге\r\n # с\r\n # данным\r\n # скриптом\r\n # Требования\r\n # к\r\n # памяти: должно\r\n # работать\r\n # с\r\n # 2\r\n # Гб\r\n # свободного\r\n # RAM\r\n # Время\r\n # работы: ~3\r\n # минуты(тестировалось\r\n # на\r\n # процессоре\r\n # Intel\r\n # Core\r\n # i7 - 4770)\r\n #\r\n # % load_ext\r\n # autoreload\r\n # % autoreload\r\n # 2\r\n # ​\r\n # import sys\r\n # MODULES_PATH = '../code/'\r\n # if MODULES_PATH not in sys.path:\r\n # sys.path.append(MODULES_PATH)\r\n # import mfuncs\r\n #\r\n # import pandas as pd\r\n # import numpy as np\r\n # from tqdm import tqdm\r\n # tqdm.pandas()\r\n # pd.options.display.max_columns = 1000\r\n # pd.options.display.max_colwidth = -1\r\n # ​\r\n # import lightgbm as lgb\r\n # ​\r\n # ​\r\n # from sklearn.neighbors import NearestNeighbors\r\n # from sklearn.cluster import KMeans, MeanShift, estimate_bandwidth, AgglomerativeClustering\r\n # from sklearn.metrics import silhouette_samples, silhouette_score\r\n # ​\r\n # from sklearn.metrics.pairwise import pairwise_distances\r\n # import gmaps\r\n # API_KEY = '<KEY>'\r\n # gmaps.configure(api_key=API_KEY) # Your Google API key\r\n # % pylab\r\n # inline\r\n #\r\n # # Определим типы колонок для экономии памяти\r\n # dtypes = {\r\n # 'transaction_date': str,\r\n # 'atm_address': str,\r\n # 'country': str,\r\n # 'city': str,\r\n # 'amount': np.float32,\r\n # 'currency': np.float32,\r\n # 'mcc': str,\r\n # 'customer_id': str,\r\n # 'pos_address': str,\r\n # 'atm_address': str,\r\n # 'pos_lat': np.float32,\r\n # 'pos_lon': np.float32,\r\n # 'atm_lat': np.float32,\r\n # 'atm_lon': np.float32,\r\n # 'home_lat': np.float32,\r\n # 'home_lon': np.float32,\r\n # 'work_lat': np.float32,\r\n # 'work_lon': np.float32,\r\n # }\r\n # df_all = pd.read_csv('../data/df_all.csv', dtype=dtypes)\r\n # Обрабатываем\r\n # дату\r\n # транзакции\r\n # и\r\n # категориальные\r\n # признаки\r\n #\r\n # df_all['currency'] = df_all['currency'].fillna(-1).astype(np.int32)\r\n # df_all['mcc'] = df_all['mcc'].apply(lambda x: int(x.replace(',', ''))).astype(np.int32)\r\n # df_all['city'] = df_all['city_name'].factorize()[0].astype(np.int32)\r\n # df_all['country'] = df_all['country'].factorize()[0].astype(np.int32)\r\n # df_all['amount'] = 10 ** df_all['amount']\r\n # Фичи\r\n # для\r\n # даты\r\n #\r\n # # удаляем транзакции без даты\r\n # df_all = df_all[~df_all['transaction_date'].isnull()]\r\n # df_all['transaction_date'] = pd.to_datetime(df_all['transaction_date'], format='%Y-%m-%d')\r\n # df_all.shape\r\n #\r\n # df_all['month'] = df_all.transaction_date.dt.month\r\n # df_all['day'] = df_all.transaction_date.dt.day\r\n # df_all['dayofyear'] = df_all.transaction_date.dt.dayofyear\r\n # df_all['dayofweek'] = df_all.transaction_date.dt.dayofweek\r\n # df_all.shape\r\n #\r\n # # праздники\r\n # holidays_df = pd.read_csv('../data/internal/all_holidays.csv', header=None)\r\n # holidays_df[0] = pd.to_datetime(holidays_df[0])\r\n # holidays_df = holidays_df[holidays_df[0].dt.year == 2017]\r\n # holidays = holidays_df[0].dt.dayofyear.values\r\n # df_all['is_weekend'] = (df_all.dayofweek >= 6).astype(np.int8)\r\n # df_all['is_state_holiday'] = df_all['dayofyear'].isin(holidays).astype(np.int8)\r\n # df_all['is_holiday'] = df_all['is_weekend'] | df_all['is_state_holiday']\r\n # df_all.shape\r\n # Приводим\r\n # адрес\r\n # транзакции\r\n # для\r\n # pos\r\n # и\r\n # atm - транзакций\r\n # к\r\n # единообразному\r\n # виду\r\n # Просто\r\n # объединяем\r\n # в\r\n # одну\r\n # колонку\r\n # и\r\n # добавляем\r\n # фичу - это\r\n # атм\r\n # или\r\n # пос\r\n #\r\n # df_all['is_atm'] = (~df_all['atm_lat'].isnull()).astype(np.int8)\r\n # df_all['is_pos'] = (~df_all['pos_lat'].isnull()).astype(np.int8)\r\n # ​\r\n # df_all['add_lat'] = df_all['atm_lat'].fillna(0) + df_all['pos_lat'].fillna(0)\r\n # df_all['add_lon'] = df_all['atm_lon'].fillna(0) + df_all['pos_lon'].fillna(0)\r\n # ​\r\n # df_all.drop(['atm_lat', 'atm_lon', 'pos_lat', 'pos_lon'], axis=1, inplace=True)\r\n # ​\r\n # df_all = df_all[~((df_all['add_lon'] == 0) & (df_all['add_lon'] == 0))]\r\n # df_all.shape\r\n #\r\n # % % time\r\n # # грязный хак, чтобы не учить КНН на новом юзере каждый раз\r\n # df_all['fake_customer_id'] = (pd.factorize(df_all.customer_id)[0] + 1) * 100\r\n # ​\r\n # points = df_all[['fake_customer_id', 'add_lat', 'add_lon']].drop_duplicates().values\r\n # neigh = NearestNeighbors(2, radius=100000)\r\n # ​\r\n # # расстояние до уникальных точек\r\n # # neigh.fit(np.unique(points, axis=1))\r\n # neigh.fit(points)\r\n # ​\r\n # distances, indices = neigh.kneighbors(df_all[['fake_customer_id', 'add_lat', 'add_lon']].values)\r\n # df_all['distance_to_nearest_point'] = distances[:, 1]\r\n # del df_all['fake_customer_id']\r\n #\r\n # # кластерные фичи\r\n # df_cluster = pd.read_csv('../data/df_cluster.csv')\r\n # df_cluster.reset_index(drop=True, inplace=True)\r\n # df_all.reset_index(drop=True, inplace=True)\r\n # df_all = pd.concat([df_all, df_cluster.iloc[:, 3:]], axis=1)\r\n # df_cluster.head()\r\n # Генерируем\r\n # признаки\r\n # is_home, is_work\r\n # TODO: удалить\r\n # чуваков\r\n # у\r\n # которых\r\n # несколько\r\n # домов\r\n #\r\n # lat = df_all['home_lat'] - df_all['add_lat']\r\n # lon = df_all['home_lon'] - df_all['add_lon']\r\n # ​\r\n # df_all['is_home'] = (np.sqrt((lat ** 2) + (lon ** 2)) <= 0.02).astype(np.int8)\r\n # df_all['has_home'] = (~df_all['home_lon'].isnull()).astype(np.int8)\r\n # ​\r\n # lat = df_all['work_lat'] - df_all['add_lat']\r\n # lon = df_all['work_lon'] - df_all['add_lon']\r\n # df_all['is_work'] = (np.sqrt((lat ** 2) + (lon ** 2)) <= 0.02).astype(np.int8)\r\n # df_all['has_work'] = (~df_all['work_lon'].isnull()).astype(np.int8)\r\n # ​\r\n # # df_all.drop(['work_lat','work_lon','home_lat','home_lon'], axis=1, inplace=True)\r\n # Генерируем\r\n # категориальный\r\n # признак\r\n # для\r\n # адреса\r\n #\r\n # df_all['address'] = df_all['add_lat'].apply(lambda x: \"%.02f\" % x) + ';' + df_all['add_lon'].apply(\r\n # lambda x: \"%.02f\" % x)\r\n # df_all['address'] = df_all['address'].factorize()[0].astype(np.int32)\r\n #\r\n # df_all.sort_values(by=['customer_id', 'address', 'dayofyear'], inplace=True)\r\n #\r\n # def get_max_following_equal(arr, atype='eq'):\r\n # '''\r\n # types = eq, eq_gr, eq_gr_unique\r\n # '''\r\n # arr = arr.values\r\n # val_cur = 1\r\n # val_max = 1\r\n # if atype == 'eq_gr_unique':\r\n # arr = np.unique(arr)\r\n #\r\n # for i in range(arr.size - 1):\r\n # if atype in ['eq_gr', 'eq_gr_unique']:\r\n # if arr[i] + 1 >= arr[i + 1]:\r\n # val_cur += 1\r\n # else:\r\n # val_max = max(val_cur, val_max)\r\n # val_cur = 1\r\n # else:\r\n # if arr[i] == arr[i + 1]:\r\n # val_cur += 1\r\n # else:\r\n # val_max = max(val_cur, val_max)\r\n # val_cur = 1\r\n # return val_max\r\n #\r\n # # макс покупок подряд в день\r\n # gb = df_all.groupby(['customer_id', 'address'])\r\n # df_eq = gb['dayofyear'].apply(lambda x: get_max_following_equal(x)).reset_index()\r\n # df_eq.rename(columns={'dayofyear': 'dayofyear_streak_inday'}, inplace=True)\r\n # df_all = pd.merge(df_all, df_eq, on=['customer_id', 'address'], how='left')\r\n # # макс покупок дней подряд\r\n # gb = df_all.groupby(['customer_id', 'address'])\r\n # df_eq = gb['dayofyear'].apply(lambda x: get_max_following_equal(x, atype='eq_gr')).reset_index()\r\n # df_eq.rename(columns={'dayofyear': 'dayofyear_streak'}, inplace=True)\r\n # df_all = pd.merge(df_all, df_eq, on=['customer_id', 'address'], how='left')\r\n # # макс дней подряд\r\n # gb = df_all.groupby(['customer_id', 'address'])\r\n # df_eq = gb['dayofyear'].apply(lambda x: get_max_following_equal(x, atype='eq_gr_unique')).reset_index()\r\n # df_eq.rename(columns={'dayofyear': 'dayofyear_streak_days'}, inplace=True)\r\n # df_all = pd.merge(df_all, df_eq, on=['customer_id', 'address'], how='left')\r\n #\r\n # ​\r\n #\r\n # def get_num_closer(vals, unique=False, dist=0.02):\r\n # d = pairwise_distances(vals)\r\n # v = (d < dist).sum(axis=1)\r\n # if unique:\r\n # v -= (d == 0).sum(axis=1)\r\n # return pd.DataFrame(v, index=vals.index, columns=['num_neigh_dist{}_un{}'.format(dist, unique)])\r\n #\r\n # ​\r\n #\r\n # def get_ratio_closer(vals, unique=False, dist=0.02):\r\n # d = pairwise_distances(vals)\r\n # v = (d < dist).mean(axis=1)\r\n # if unique:\r\n # v -= (d == 0).mean(axis=1)\r\n # return pd.DataFrame(v, index=vals.index, columns=['ratio_neigh_dist{}_un{}'.format(dist, unique)])\r\n #\r\n # ​\r\n #\r\n # def get_num_far(vals, unique=False, dist=0.02):\r\n # d = pairwise_distances(vals)\r\n # v = (d >= dist).sum(axis=1)\r\n # return pd.DataFrame(v, index=vals.index, columns=['num_far_dist{}_un{}'.format(dist, unique)])\r\n #\r\n # ​\r\n # ​\r\n #\r\n # def get_median_closer(vals, unique=False, dist=0.02):\r\n # ind = vals.index\r\n # vals = vals.values\r\n # d = pairwise_distances(vals)\r\n # v = (d < dist)\r\n # if unique:\r\n # v = (d < dist) & (d != 0)\r\n # medians = []\r\n # for i in range(len(vals)):\r\n # medians.append(np.median(vals[v[i]], axis=0))\r\n #\r\n # ​\r\n # c1 = 'median_dist{}_un{}_lat'.format(dist, unique)\r\n # c2 = 'median_dist{}_un{}_lon'.format(dist, unique)\r\n # c3 = 'median_dist{}_un{}_lat_diff'.format(dist, unique)\r\n # c4 = 'median_dist{}_un{}_lon_diff'.format(dist, unique)\r\n # c5 = 'median_dist{}_un{}_diff'.format(dist, unique)\r\n # df_ = pd.DataFrame(medians, index=ind, columns=[c1, c2])\r\n # df_[c3] = np.abs(df_[c1] - vals[:, 0])\r\n # df_[c4] = np.abs(df_[c2] - vals[:, 1])\r\n # df_[c5] = df_[c3] + df_[c4]\r\n # return df_\r\n #\r\n #\r\n #df_all['add_lat_'] = (df_all['add_lat'] * 30).astype(np.int32)\r\n #df_all['add_lon_'] = (df_all['add_lon'] * 30).astype(np.int32)\r\n #\r\n #\r\n #def get_num_closer_complex(vals, unique=False, dist=0.01):\r\n # d = pairwise_distances(vals)\r\n # v = (d < dist).sum(axis=1)\r\n # if unique:\r\n # v -= (d == 0).sum(axis=1)\r\n # df_ = pd.DataFrame(v, index=vals.index, columns=['num_neigh_dist{}_un{}'.format(dist, unique)])\r\n # for dist in [0.02, 0.03]:\r\n # v = (d < dist).sum(axis=1)\r\n # if unique:\r\n # v -= (d == 0).sum(axis=1)\r\n # df_['num_neigh_dist{}_un{}'.format(dist, unique)] = v\r\n # for dist in [0.01, 0.02, 0.03]:\r\n # v = (d < dist).mean(axis=1)\r\n # if unique:\r\n # v -= (d == 0).mean(axis=1)\r\n # df_['ratio_neigh_dist{}_un{}'.format(dist, unique)] = v\r\n #\r\n # return df_\r\n #\r\n #​\r\n #df_clos = df_all.groupby(['add_lat_', 'add_lon_'])[['add_lat',\r\n # 'add_lon']].progress_apply(\r\n # lambda x: get_num_closer_complex(x, False, 0.01))\r\n #df_clos = df_clos.add_prefix('all_df_')\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #\r\n #del df_all['add_lat_']\r\n #del df_all['add_lon_']\r\n #\r\n #% % time\r\n ## медианы в радиусе\r\n #​\r\n ## df_clos = df_all.groupby('customer_id')[['add_lat',\r\n ## 'add_lon']].apply(lambda x: get_median_closer(x, False, 0.01))\r\n ## df_all = pd.concat([df_all, df_clos], axis=1)\r\n #​\r\n #​\r\n ## df_clos = df_all.groupby('customer_id')[['add_lat',\r\n ## 'add_lon']].apply(lambda x: get_median_closer(x, True, 0.01))\r\n ## df_all = pd.concat([df_all, df_clos], axis=1)\r\n #​\r\n #​\r\n #df_clos = df_all.groupby('customer_id')[['add_lat',\r\n # 'add_lon']].apply(lambda x: get_median_closer(x, False, 0.02))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #​\r\n #​\r\n #df_clos = df_all.groupby('customer_id')[['add_lat',\r\n # 'add_lon']].apply(lambda x: get_median_closer(x, True, 0.02))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #​\r\n #​\r\n ## df_clos = df_all.groupby('customer_id')[['add_lat',\r\n ## 'add_lon']].apply(lambda x: get_median_closer(x, False, 0.05))\r\n ## df_all = pd.concat([df_all, df_clos], axis=1)\r\n #​\r\n #​\r\n ## df_clos = df_all.groupby('customer_id')[['add_lat',\r\n ## 'add_lon']].apply(lambda x: get_median_closer(x, True, 0.05))\r\n ## df_all = pd.concat([df_all, df_clos], axis=1)\r\n #\r\n ## кол-ва соседей за радиусом\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_num_far(x, False, 0.01))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_num_far(x, False, 0.02))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_num_far(x, False, 0.03))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_num_far(x, False, 0.04))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_num_far(x, False, 0.05))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_num_far(x, False, 0.1))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_num_far(x, False, 1))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #\r\n ## кол-ва соседей внутри радиуса\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_num_closer(x, False, 0.01))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_num_closer(x, True, 0.01))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(get_num_closer)\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_num_closer(x, True, 0.02))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_num_closer(x, False, 0.03))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_num_closer(x, True, 0.03))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_num_closer(x, False, 0.04))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_num_closer(x, True, 0.04))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #\r\n ## доли соседей внутри радиуса\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_ratio_closer(x, False, 0.01))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_ratio_closer(x, True, 0.01))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #​\r\n #​\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_ratio_closer(x, False, 0.02))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_ratio_closer(x, True, 0.02))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #​\r\n #​\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_ratio_closer(x, False, 0.03))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_ratio_closer(x, True, 0.03))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #​\r\n #​\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_ratio_closer(x, False, 0.04))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #df_clos = df_all.groupby('customer_id')[['add_lat', 'add_lon']].apply(lambda x: get_ratio_closer(x, True, 0.04))\r\n #df_all = pd.concat([df_all, df_clos], axis=1)\r\n #Генерируем\r\n #абонентские\r\n #фичи\r\n #отвечающие\r\n #за\r\n #соотношения\r\n #между\r\n #точками\r\n #\r\n #df_all = df_all.merge(df_all.groupby('customer_id')['amount'].count().reset_index(name='cid_trans_count'), how='left')\r\n #df_all['cid_trans_count'] = df_all['cid_trans_count'].astype(np.int32)\r\n #​\r\n #df_all = df_all.merge(df_all.groupby('customer_id')['amount'].agg('sum').reset_index(name='cid_trans_sum'), how='left')\r\n #df_all['cid_trans_sum'] = df_all['cid_trans_sum'].astype(np.float32)\r\n #\r\n #\r\n #def add_count_sum_ratios(df_all, col):\r\n # col_count = 'cid_{}_trans_count'.format(col)\r\n # col_sum = 'cid_{}_trans_sum'.format(col)\r\n # df_ = df_all.groupby(['customer_id', col])['amount'].count().reset_index(name=col_count)\r\n # df_all = df_all.merge(df_, how='left')\r\n # df_all[col_count] = df_all[col_count].astype(np.int32)\r\n # df_all['ratio_{}_count'.format(col)] = df_all[col_count] / df_all['cid_trans_count']\r\n #\r\n # df_ = df_all.groupby(['customer_id', col])['amount'].agg('sum').reset_index(name=col_sum)\r\n # df_all = df_all.merge(df_, how='left')\r\n # df_all[col_sum] = df_all[col_sum].astype(np.float32)\r\n # df_all['ratio_{}_sum'.format(col)] = df_all[col_sum] / df_all['cid_trans_sum']\r\n # return df_all\r\n #\r\n #\r\n #df_all.city\r\n #\r\n #df_all = add_count_sum_ratios(df_all, 'address')\r\n #df_all = add_count_sum_ratios(df_all, 'terminal_id')\r\n #df_all = add_count_sum_ratios(df_all, 'mcc')\r\n #df_all = add_count_sum_ratios(df_all, 'is_holiday')\r\n #df_all = add_count_sum_ratios(df_all, 'city')\r\n #\r\n #for c in tqdm(df_all.columns):\r\n # if df_all[c].dtype == np.int64:\r\n # df_all[c] = df_all[c].astype(np.int32)\r\n # if df_all[c].dtype == np.float64:\r\n # df_all[c] = df_all[c].astype(np.float32)\r\n #\r\n #df_all['is_train'] = df_all['is_train'].astype(np.int8)\r\n #df_all['is_atm'] = df_all['is_atm'].astype(np.int8)\r\n #df_all['is_pos'] = df_all['is_pos'].astype(np.int8)\r\n #df_all['has_home'] = df_all['has_home'].astype(np.int8)\r\n #df_all['has_work'] = df_all['has_work'].astype(np.int8)\r\n #\r\n #df_all.dtypes.to_csv('../data/df_all_b11_dtypes.csv')\r\n #df_all.to_csv('../data/df_all_b11.csv', index=None)\r\n #\r\n #df_all.transaction_date.dtype\r\n #\r\n #df_all = pd.read_csv('../data/df_all_3983.csv')\r\n #\r\n #for c in tqdm(df_all.columns):\r\n # if df_all[c].dtype == np.int64:\r\n # df_all[c] = df_all[c].astype(np.int32)\r\n # if df_all[c].dtype == np.float64:\r\n # df_all[c] = df_all[c].astype(np.float32)\r\n #Мои\r\n #фичи\r\n #\r\n ## добавим признаки после групбая\r\n #df_gb = df_all[['customer_id', 'amount', 'add_lat', 'add_lon']].groupby('customer_id')\r\n #coord_stat_df = df_gb.agg(['mean', 'max', 'min'])\r\n #coord_stat_df['transactions_per_user'] = df_gb.agg('size')\r\n #coord_stat_df.columns = ['_'.join(col).strip() for col in coord_stat_df.columns.values]\r\n #coord_stat_df = coord_stat_df.astype(np.float32)\r\n #coord_stat_df.reset_index(inplace=True)\r\n #df_all = pd.merge(df_all, coord_stat_df, on='customer_id', how='left')\r\n #\r\n #cols = ['add_lat', 'add_lon']\r\n #types = ['min', 'max', 'mean']\r\n #for c in cols:\r\n # for t in types:\r\n # df_all['{}_diff_{}'.format(c, t)] = np.abs(df_all[c] - df_all['{}_{}'.format(c, t)], dtype=np.float32)\r\n #\r\n #df_all = df_all.loc[:, ~df_all.columns.duplicated()]\r\n #\r\n ## разности\r\n #df_all['lat_diff_cluster_lat'] = np.abs(df_all['add_lat'] - df_all['cl_lat'], dtype=np.float32)\r\n #df_all['lon_diff_cluster_lon'] = np.abs(df_all['add_lon'] - df_all['cl_lon'], dtype=np.float32)\r\n #df_all['lon_diff_cluster'] = (df_all['lat_diff_cluster_lat'] + df_all['lon_diff_cluster_lon']).astype(np.float32)\r\n #Фичи\r\n #mcc\r\n #\r\n ## категории\r\n #df_all['mcc_str'] = df_all['mcc'].astype(str).str.rjust(4, '0')\r\n #df_mcc = pd.read_csv('../data/internal/mcc.csv')\r\n #df_mcc = df_mcc.iloc[1:, :3]\r\n #df_mcc.columns = ['mcc_str', 'mcc_cat1', 'mcc_cat2']\r\n #df_mcc.drop_duplicates(subset=['mcc_str'], inplace=True)\r\n #df_mcc['mcc_cat1'] = pd.factorize(df_mcc['mcc_cat1'])[0].astype(np.int32)\r\n #df_mcc['mcc_cat2'] = pd.factorize(df_mcc['mcc_cat2'])[0].astype(np.int32)\r\n #df_mcc.fillna('none', inplace=True)\r\n #df_all = pd.merge(df_all, df_mcc, on='mcc_str', how='left')\r\n #del df_all['mcc_str']\r\n #df_mcc.head()\r\n #\r\n ## df_mcc['mcc_cat1'].fillna(-1, inplace=True)\r\n ## df_mcc['mcc_cat2'].fillna(-1, inplace=True)\r\n #​\r\n ## df_all = add_count_sum_ratios(df_all, 'mcc_cat1')\r\n ## df_all = add_count_sum_ratios(df_all, 'mcc_cat2')\r\n #\r\n #import geopandas as gpd\r\n #from shapely.geometry import Point, Polygon\r\n #\r\n #mos_shp = gpd.read_file('../data/internal/demography.shp')\r\n #​\r\n #_pnts = [Point(vals.T) for vals in df_all[df_all.city_name == 'Москва'][['add_lon', 'add_lat']].values]\r\n #pnts = gpd.GeoDataFrame(geometry=_pnts)\r\n #pnts.crs = mos_shp.crs\r\n #​\r\n #mos_shp.drop(['NAME', 'ABBREV_AO'], 1, inplace=True)\r\n #mos_shp['area'] = mos_shp['geometry'].area\r\n #for c in mos_shp.columns:\r\n # if c not in ['geometry', 'area'] and 'index' not in c:\r\n # mos_shp[c + 'dens'] = mos_shp[c] / mos_shp['area']\r\n #\r\n #% % time\r\n #cities_with_country = gpd.sjoin(pnts, mos_shp, how=\"left\", op='intersects')\r\n #\r\n #cols = cities_with_country.drop(['geometry', 'index_right'], 1).columns\r\n #for c in cols:\r\n # df_all[c] = -1\r\n #df_all.loc[df_all.city_name == 'Москва', cols] = cities_with_country\r\n #\r\n ## частота mcc\r\n #df_mcc = df_all['mcc'].value_counts(normalize=True).reset_index()\r\n #df_mcc.columns = ['mcc', 'mcc_freq']\r\n #df_all = pd.merge(df_all, df_mcc, on='mcc', how='left')\r\n #\r\n ## метро\r\n #mos_metro = pd.read_csv('../data/internal/moscow_metro.csv')\r\n #pet_metro = pd.read_csv('../data/internal/peter_metro.csv')\r\n #df_metro = pd.concat([mos_metro, pet_metro])\r\n #​\r\n #vals1 = df_all[['add_lat', 'add_lon']].values\r\n #vals2 = df_metro[['metro_lat', 'metro_lon']].values\r\n #X = pairwise_distances(vals1, vals2)\r\n #dist_to_min_metro = X.min(axis=1)\r\n #​\r\n #X[X == 0] = 10000\r\n #df_all['dist_to_minmetro'] = X.min(axis=1)\r\n #df_all['metro_in_01'] = (X < 0.01).sum(axis=1)\r\n #df_all['metro_in_001'] = (X < 0.001).sum(axis=1)\r\n #df_all['metro_in_02'] = (X < 0.02).sum(axis=1)\r\n #df_all['metro_in_005'] = (X < 0.005).sum(axis=1)\r\n #df_all['metro_in_03'] = (X < 0.03).sum(axis=1)\r\n #\r\n #df_cik = pd.read_csv('../data/internal/cik_uik.csv')\r\n #df_cik.dropna(subset=['lat_ik'], inplace=True)\r\n #df_cik.dropna(subset=['lon_ik'], inplace=True)\r\n #​\r\n #df_cik = df_cik[df_cik['lon_ik'] < 45]\r\n #vals1 = df_all[['add_lat', 'add_lon']].drop_duplicates().values.astype(np.float32)\r\n #df_vals = pd.DataFrame(vals1, columns=['add_lat', 'add_lon'])\r\n #vals2 = df_cik[['lat_ik', 'lon_ik']].drop_duplicates().values.astype(np.float32)\r\n #​\r\n #vals2.shape\r\n #\r\n #X = pairwise_distances(vals1, vals2)\r\n #X[X == 0] = 10000\r\n #​\r\n #df_vals['dist_to_ciktro'] = X.min(axis=1)\r\n #df_vals['cik_in_01'] = (X < 0.01).sum(axis=1)\r\n #df_vals['cik_in_001'] = (X < 0.001).sum(axis=1)\r\n #df_vals['cik_in_02'] = (X < 0.02).sum(axis=1)\r\n #df_vals['cik_in_005'] = (X < 0.005).sum(axis=1)\r\n #df_vals['cik_in_03'] = (X < 0.03).sum(axis=1)\r\n #\r\n #df_all['add_lat_'] = np.round(df_all['add_lat'] * 10000).astype(int)\r\n #df_all['add_lon_'] = np.round(df_all['add_lon'] * 10000).astype(int)\r\n #df_vals['add_lat_'] = np.round(df_vals['add_lat'] * 10000).astype(int)\r\n #df_vals['add_lon_'] = np.round(df_vals['add_lon'] * 10000).astype(int)\r\n #del df_vals['add_lat']\r\n #del df_vals['add_lon']\r\n #​\r\n #df_all = pd.merge(df_all, df_vals, on=['add_lat_', 'add_lon_'], how='left')\r\n #del X\r\n #del df_all['add_lat_']\r\n #del df_all['add_lon_']\r\n #\r\n ## погода\r\n ## буду смотреть погоду в 18-00\r\n #w1 = pd.read_csv('../data/internal/weather/moscow.csv', sep=';', index_col=False)\r\n #w1['city_name'] = 'Москва'\r\n #w1['transaction_date'] = pd.to_datetime(w1['Local time in Moscow'], format='%d.%m.%Y %H:%M')\r\n #del w1['Local time in Moscow']\r\n #w1 = w1[w1.transaction_date.dt.hour == 18].reset_index()\r\n #w1['transaction_date'] = w1['transaction_date'].dt.date\r\n #​\r\n #w2 = pd.read_csv('../data/internal/weather/peter.csv', sep=';', index_col=False)\r\n #w2['city_name'] = 'Санкт-Петербург '\r\n #w2['transaction_date'] = pd.to_datetime(w2['Local time in Moscow'], format='%d.%m.%Y %H:%M')\r\n #del w2['Local time in Moscow']\r\n #w2 = w2[w2.transaction_date.dt.hour == 18].reset_index()\r\n #w2['transaction_date'] = w2['transaction_date'].dt.date\r\n #​\r\n #df_weather = pd.concat([w1, w2], axis=0).reset_index()\r\n #df_weather['transaction_date'] = pd.to_datetime(df_weather['transaction_date'])\r\n #​\r\n #cn = df_weather['city_name'] # hardcode\r\n #df_weather = df_weather.select_dtypes(exclude=['object'])\r\n #df_weather['city_name'] = cn\r\n #for c in df_weather:\r\n # if df_weather[c].isnull().mean() > 0.9:\r\n # del df_weather[c]\r\n ## df_weather = df_weather.add_prefix('weather_')\r\n #df_all = pd.merge(df_all, df_weather, on=['city_name', 'transaction_date'], how='left')\r\n #\r\n #df_all['mcc_rm'] = df_all['mcc']\r\n #df_all.loc[~df_all['mcc_rm'].isin(df_all['mcc_rm'].value_counts().iloc[:32].index.values), 'mcc_rm'] = 99999\r\n #​\r\n #df_all['mcc_rm_cat1'] = df_all['mcc_cat1']\r\n #df_all.loc[~df_all['mcc_rm_cat1'].isin(df_all['mcc_rm_cat1'].value_counts().iloc[:32].index.values),\r\n # 'mcc_rm_cat1'] = 99999\r\n #\r\n #df_all = pd.concat([df_all,\r\n # pd.get_dummies(df_all['mcc_rm'], prefix='mcc_rm_ohe').astype(np.int8)], axis=1)\r\n #del df_all['mcc_rm']\r\n #\r\n #df_all = pd.concat([df_all,\r\n # pd.get_dummies(df_all['mcc_rm_cat1'], prefix='mcc_rm_cat1_ohe').astype(np.int8)], axis=1)\r\n #del df_all['mcc_rm_cat1']\r\n #​\r\n #df_all = pd.concat([df_all,\r\n # pd.get_dummies(df_all['mcc_cat2'], prefix='mcc_cat2_ohe').astype(np.int8)], axis=1)\r\n #del df_all['mcc_cat2']\r\n #\r\n #df_all = df_all.reset_index(drop=True)\r\n #\r\n ## сделаем групбай какие вообще есть mcc у посетителя. Это поможет понять его привычки\r\n #mcc_cols = [c for c in df_all.columns if 'mcc_rm_ohe' in c]\r\n #df_mcc = df_all.groupby('customer_id')[mcc_cols].agg(['mean', 'sum'])\r\n #df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n #df_mcc = df_mcc.astype(np.float32)\r\n #df_mcc = df_mcc.reset_index()\r\n #df_mcc.head()\r\n #df_all = pd.merge(df_all, df_mcc, on='customer_id', how='left')\r\n #\r\n ## по объемам\r\n #mcc_cols = [c for c in df_all.columns if 'mcc_rm_ohe' in c and 'mean' not in c and 'sum' not in c]\r\n #mcc_cols_ = [c + '_amount' for c in mcc_cols]\r\n #for c in mcc_cols:\r\n # df_all[c + '_amount'] = df_all[c] * df_all['amount']\r\n #\r\n #df_mcc = df_all.groupby('customer_id')[mcc_cols_].agg(['mean', 'sum'])\r\n #df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n #df_mcc = df_mcc.astype(np.float32)\r\n #df_mcc = df_mcc.reset_index()\r\n #df_mcc.head()\r\n #df_all = pd.merge(df_all, df_mcc, on='customer_id', how='left')\r\n #​\r\n ## df_all['add_lat_'] = (df_all['add_lat'] * 40).astype(np.int32)\r\n ## df_all['add_lon_'] = (df_all['add_lon'] * 40).astype(np.int32)\r\n #​\r\n ## df_mcc = df_all.groupby(['add_lat_', 'add_lon_'])[mcc_cols_].agg(['mean', 'sum'])\r\n ## df_mcc = df_mcc.add_suffix('_40coord')\r\n ## df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n ## df_mcc = df_mcc.astype(np.float32)\r\n ## df_mcc.reset_index(inplace=True)\r\n ## df_mcc.head()\r\n ## df_all = pd.merge(df_all, df_mcc, on=['add_lat_', 'add_lon_'], how='left')\r\n #​\r\n ## del df_all['add_lat_']\r\n ## del df_all['add_lon_']\r\n #\r\n ## по объемам\r\n #mcc_cols = [c for c in df_all.columns if 'mcc_rm_cat1_ohe' in c and 'mean' not in c and 'sum' not in c]\r\n #mcc_cols_ = [c + '_amount' for c in mcc_cols]\r\n #for c in mcc_cols:\r\n # df_all[c + '_amount'] = df_all[c] * df_all['amount']\r\n #\r\n #df_mcc = df_all.groupby('customer_id')[mcc_cols_].agg(['mean', 'sum'])\r\n #df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n #df_mcc = df_mcc.astype(np.float32)\r\n #df_mcc = df_mcc.reset_index()\r\n #df_mcc.head()\r\n #df_all = pd.merge(df_all, df_mcc, on='customer_id', how='left')\r\n #​\r\n ## df_all['add_lat_'] = (df_all['add_lat'] * 40).astype(np.int32)\r\n ## df_all['add_lon_'] = (df_all['add_lon'] * 40).astype(np.int32)\r\n #​\r\n ## df_mcc = df_all.groupby(['add_lat_', 'add_lon_'])[mcc_cols_].agg(['mean', 'sum'])\r\n ## df_mcc = df_mcc.add_suffix('_40coord')\r\n ## df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n ## df_mcc = df_mcc.astype(np.float32)\r\n ## df_mcc.reset_index(inplace=True)\r\n ## df_mcc.head()\r\n ## df_all = pd.merge(df_all, df_mcc, on=['add_lat_', 'add_lon_'], how='left')\r\n #​\r\n ## del df_all['add_lat_']\r\n ## del df_all['add_lon_']\r\n #\r\n ## сделаем групбай какие вообще есть mcc у посетителя. Это поможет понять его привычки\r\n ## mcc_cols = [c for c in df_all.columns if 'mcc_cat1' in c]\r\n ## df_mcc = df_all.groupby('customer_id')[mcc_cols].agg(['mean'])\r\n ## df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n ## df_mcc.reset_index(inplace=True)\r\n ## df_mcc.head()\r\n ## df_all = pd.merge(df_all, df_mcc, on='customer_id', how='left')\r\n #\r\n ## сделаем групбай какие вообще есть mcc у посетителя. Это поможет понять его привычки\r\n #mcc_cols = [c for c in df_all.columns if 'mcc_cat2_ohe' in c]\r\n #df_mcc = df_all.groupby('customer_id')[mcc_cols].agg(['mean', 'sum'])\r\n #df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n #df_mcc = df_mcc.astype(np.float32)\r\n #df_mcc.reset_index(inplace=True)\r\n #df_mcc.head()\r\n #df_all = pd.merge(df_all, df_mcc, on='customer_id', how='left')\r\n #\r\n ## РАСПРЕДЕЛЕНИЕ MCC В ОКРЕСТНОСТИ ЧУВАКА\r\n #df_all['add_lat_'] = (df_all['add_lat'] * 40).astype(np.int32)\r\n #df_all['add_lon_'] = (df_all['add_lon'] * 40).astype(np.int32)\r\n #​\r\n #df_mcc = df_all.groupby(['add_lat_', 'add_lon_'])[mcc_cols].agg(['mean', 'sum'])\r\n #df_mcc = df_mcc.add_suffix('_40coord')\r\n #df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n #df_mcc = df_mcc.astype(np.float32)\r\n #df_mcc.reset_index(inplace=True)\r\n #df_mcc.head()\r\n #df_all = pd.merge(df_all, df_mcc, on=['add_lat_', 'add_lon_'], how='left')\r\n #​\r\n #del df_all['add_lat_']\r\n #del df_all['add_lon_']\r\n #\r\n #mcc_cols = [c for c in df_all.columns if 'mcc_rm_ohe' in c and 'mean' not in c and 'sum' not in c]\r\n ## РАСПРЕДЕЛЕНИЕ MCC В ОКРЕСТНОСТИ ЧУВАКА\r\n #df_all['add_lat_'] = (df_all['add_lat'] * 100).astype(np.int32)\r\n #df_all['add_lon_'] = (df_all['add_lon'] * 100).astype(np.int32)\r\n #​\r\n #df_mcc = df_all.groupby(['add_lat_', 'add_lon_'])[mcc_cols].agg(['mean', 'sum'])\r\n #df_mcc = df_mcc.add_suffix('_100coord')\r\n #df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n #df_mcc = df_mcc.astype(np.float32)\r\n #df_mcc.reset_index(inplace=True)\r\n #df_mcc.head()\r\n #df_all = pd.merge(df_all, df_mcc, on=['add_lat_', 'add_lon_'], how='left')\r\n #​\r\n #del df_all['add_lat_']\r\n #del df_all['add_lon_']\r\n #\r\n ## РАСПРЕДЕЛЕНИЕ MCC В ОКРЕСТНОСТИ ЧУВАКА (ПРОВЕРИЛ-ЛУЧШЕ РАБОТАЕТ НА БОЛЬШИХ УЧАСТКАХ)\r\n #df_all['add_lat_'] = (df_all['add_lat'] * 200).astype(np.int32)\r\n #df_all['add_lon_'] = (df_all['add_lon'] * 200).astype(np.int32)\r\n #​\r\n #df_mcc = df_all.groupby(['add_lat_', 'add_lon_'])[mcc_cols].agg(['mean', 'sum'])\r\n #df_mcc = df_mcc.add_suffix('_200coord')\r\n #df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n #df_mcc = df_mcc.astype(np.float32)\r\n #df_mcc.reset_index(inplace=True)\r\n #df_mcc.head()\r\n #df_all = pd.merge(df_all, df_mcc, on=['add_lat_', 'add_lon_'], how='left')\r\n #​\r\n #del df_all['add_lat_']\r\n #del df_all['add_lon_']\r\n #\r\n ## РАСПРЕДЕЛЕНИЕ MCC В ОКРЕСТНОСТИ ЧУВАКА\r\n ## df_all['add_lat_'] = (df_all['add_lat'] * 100).astype(np.int32)\r\n ## df_all['add_lon_'] = (df_all['add_lon'] * 100).astype(np.int32)\r\n #​\r\n ## df_mcc = df_all.groupby(['add_lat_', 'add_lon_'])[mcc_cols].agg(['mean', 'sum'])\r\n ## df_mcc = df_mcc.add_suffix('_100coord')\r\n ## df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n ## df_mcc = df_mcc.astype(np.float32)\r\n ## df_mcc.reset_index(inplace=True)\r\n ## df_mcc.head()\r\n ## df_all = pd.merge(df_all, df_mcc, on=['add_lat_', 'add_lon_'], how='left')\r\n #​\r\n ## del df_all['add_lat_']\r\n ## del df_all['add_lon_']\r\n #Игрушки\r\n #с\r\n #адресами\r\n #\r\n #df_all['string'] = df_all['string'].fillna('')\r\n #df_all['string'] = df_all['string'].str.lower()\r\n #\r\n #df_all['has_street'] = df_all['string'].str.contains('улиц').astype(np.int8)\r\n #df_all['has_pereul'] = df_all['string'].str.contains('переул').astype(np.int8)\r\n #df_all['has_bulvar'] = df_all['string'].str.contains('бульв').astype(np.int8)\r\n #df_all['has_prospekt'] = df_all['string'].str.contains('проспект').astype(np.int8)\r\n #df_all['has_shosse'] = df_all['string'].str.contains('шосс').astype(np.int8)\r\n #​\r\n #df_all['has_torg'] = df_all['string'].str.contains('торгов').astype(np.int8)\r\n #df_all['has_bus'] = df_all['string'].str.contains('бизн').astype(np.int8)\r\n #Медианы\r\n #по\r\n #юзеру\r\n #и\r\n #по\r\n #без\r\n #дубликатов\r\n #\r\n #df_med = df_all.groupby('customer_id')['add_lat', 'add_lon'].agg('median').reset_index()\r\n #df_med.columns = ['customer_id', 'add_lat_median', 'add_lon_median']\r\n #df_all = pd.merge(df_all, df_med, on='customer_id', how='left')\r\n #\r\n #df_med = df_all.drop_duplicates(subset=['customer_id',\r\n # 'add_lat', 'add_lon']).groupby('customer_id')['add_lat', 'add_lon'].agg(\r\n # 'median').reset_index()\r\n #df_med.columns = ['customer_id', 'add_lat_median_unique', 'add_lon_median_unique']\r\n #df_all = pd.merge(df_all, df_med, on='customer_id', how='left')\r\n #\r\n #df_all['lat_diff_median'] = np.abs(df_all['add_lat'] - df_all['add_lat_median'])\r\n #df_all['lon_diff_median'] = np.abs(df_all['add_lon'] - df_all['add_lat_median'])\r\n #df_all['lat_diff_median_unique'] = np.abs(df_all['add_lat'] - df_all['add_lat_median_unique'])\r\n #df_all['lon_diff_median_unique'] = np.abs(df_all['add_lon'] - df_all['add_lon_median_unique'])\r\n #​\r\n #df_all['diff_median'] = df_all['lat_diff_median'] + df_all['lon_diff_median']\r\n #df_all['diff_median_unique'] = df_all['lat_diff_median_unique'] + df_all['lon_diff_median_unique']\r\n #\r\n #df_all.to_csv('../data/df_all_2lvl.csv', index=None)\r\n #\r\n #df_all.dtypes.to_csv('../data/df_all_2lvl_dtypes.csv')\r\n #OSM\r\n #https: // wiki.openstreetmap.org / wiki / RU: % D0 % 9\r\n #E % D0 % B1 % D1 % 8\r\n #A % D0 % B5 % D0 % BA % D1 % 82 % D1 % 8\r\n #B_ % D0 % BA % D0 % B0 % D1 % 80 % D1 % 82 % D1 % 8\r\n #B # .D0.9A.D0.BE.D0.BC.D0.BC.D0.B5.D1.80.D1.87.D0.B5.D1.81.D0.BA.D0.B8.D0.B5\r\n #\r\n #import ogr\r\n #\r\n #driver = ogr.GetDriverByName('OSM')\r\n #data = driver.Open('../data/internal/map.osm')\r\n #\r\n #nlayer = data.GetLayerCount() # 5\r\n #print(nlayer)\r\n #features = []\r\n #for i in range(nlayer):\r\n # features += [x for x in data.GetLayerByIndex(i)]\r\n #fast_food\r\n #food_court\r\n #файзен\r\n #raiffeisen\r\n #railway\r\n #\r\n #расстояние\r\n #до\r\n #бизнес\r\n #центров\r\n #\r\n #coords = []\r\n #for f in tqdm(features):\r\n # s = str(f.ExportToJson(as_object=True)).lower()\r\n # if 'бизнес' in s and 'центр' in s:\r\n # el = f.ExportToJson(as_object=True)['geometry']['coordinates'][0]\r\n # if type(el) != float:\r\n # coords.append(el)\r\n #\r\n #vals1 = df_all[['add_lon', 'add_lat']].drop_duplicates().values.astype(np.float32)\r\n #df_vals = pd.DataFrame(vals1, columns=['add_lat', 'add_lon'])\r\n #vals2 = np.array(coords, dtype=np.float32)\r\n #vals1.shape, vals2.shape\r\n #\r\n #X = pairwise_distances(vals1, vals2)\r\n #X[X == 0] = 10000\r\n #\r\n #suf = 'bc'\r\n #df_vals[suf + '_dist_to'] = X.min(axis=1)\r\n #df_vals[suf + '_in_01'] = (X < 0.01).sum(axis=1)\r\n #df_vals[suf + '_in_001'] = (X < 0.001).sum(axis=1)\r\n #df_vals[suf + '_in_02'] = (X < 0.02).sum(axis=1)\r\n #df_vals[suf + '_in_005'] = (X < 0.005).sum(axis=1)\r\n #df_vals[suf + '_in_03'] = (X < 0.03).sum(axis=1)\r\n #​\r\n #df_all['add_lat_'] = np.round(df_all['add_lat'] * 10000).astype(int)\r\n #df_all['add_lon_'] = np.round(df_all['add_lon'] * 10000).astype(int)\r\n #df_vals['add_lat_'] = np.round(df_vals['add_lat'] * 10000).astype(int)\r\n #df_vals['add_lon_'] = np.round(df_vals['add_lon'] * 10000).astype(int)\r\n #del df_vals['add_lat']\r\n #del df_vals['add_lon']\r\n #​\r\n #df_all = pd.merge(df_all, df_vals, on=['add_lat_', 'add_lon_'], how='left')\r\n #del X\r\n #del df_all['add_lat_']\r\n #del df_all['add_lon_']\r\n #до\r\n #фастфудов\r\n #http: // andrewgaidus.com / Convert_OSM_Data /\r\n #\r\n #driver = ogr.GetDriverByName('OSM')\r\n #data = driver.Open('../data/internal/map.osm')\r\n #layer_p = data.GetLayer('points') # 5\r\n #features_p = [x for x in layer_p]\r\n #\r\n #coords = []\r\n #for f in tqdm(features_p):\r\n # s = str(f.ExportToJson(as_object=True)).lower()\r\n # if 'fast_food' in s:\r\n # coords.append(f.ExportToJson(as_object=True)['geometry']['coordinates'])\r\n #\r\n #vals1 = df_all[['add_lon', 'add_lat']].drop_duplicates().values.astype(np.float32)\r\n #df_vals = pd.DataFrame(vals1, columns=['add_lat', 'add_lon'])\r\n #vals2 = np.array(coords, dtype=np.float32)\r\n #vals1.shape, vals2.shape\r\n #\r\n #X = pairwise_distances(vals1, vals2)\r\n #X[X == 0] = 10000\r\n #\r\n #suf = 'fastfood'\r\n #df_vals[suf + '_dist_to'] = X.min(axis=1)\r\n #df_vals[suf + '_in_01'] = (X < 0.01).sum(axis=1)\r\n #df_vals[suf + '_in_001'] = (X < 0.001).sum(axis=1)\r\n #df_vals[suf + '_in_02'] = (X < 0.02).sum(axis=1)\r\n #df_vals[suf + '_in_005'] = (X < 0.005).sum(axis=1)\r\n #df_vals[suf + '_in_03'] = (X < 0.03).sum(axis=1)\r\n #​\r\n #df_all['add_lat_'] = np.round(df_all['add_lat'] * 10000).astype(int)\r\n #df_all['add_lon_'] = np.round(df_all['add_lon'] * 10000).astype(int)\r\n #df_vals['add_lat_'] = np.round(df_vals['add_lat'] * 10000).astype(int)\r\n #df_vals['add_lon_'] = np.round(df_vals['add_lon'] * 10000).astype(int)\r\n #del df_vals['add_lat']\r\n #del df_vals['add_lon']\r\n #​\r\n #df_all = pd.merge(df_all, df_vals, on=['add_lat_', 'add_lon_'], how='left')\r\n #del X\r\n #del df_all['add_lat_']\r\n #del df_all['add_lon_']\r\n #станции\r\n #\r\n #coords = []\r\n #for f in tqdm(features_p):\r\n # s = str(f.ExportToJson(as_object=True)).lower()\r\n # if 'railway' in s:\r\n # coords.append(f.ExportToJson(as_object=True)['geometry']['coordinates'])\r\n #\r\n #vals1 = df_all[['add_lon', 'add_lat']].drop_duplicates().values.astype(np.float32)\r\n #df_vals = pd.DataFrame(vals1, columns=['add_lat', 'add_lon'])\r\n #vals2 = np.array(coords, dtype=np.float32)\r\n #vals1.shape, vals2.shape\r\n #\r\n #X = pairwise_distances(vals1, vals2)\r\n #X[X == 0] = 10000\r\n #\r\n #suf = 'rail'\r\n #df_vals[suf + '_dist_to'] = X.min(axis=1)\r\n #df_vals[suf + '_in_01'] = (X < 0.01).sum(axis=1)\r\n #df_vals[suf + '_in_001'] = (X < 0.001).sum(axis=1)\r\n #df_vals[suf + '_in_02'] = (X < 0.02).sum(axis=1)\r\n #df_vals[suf + '_in_005'] = (X < 0.005).sum(axis=1)\r\n #df_vals[suf + '_in_03'] = (X < 0.03).sum(axis=1)\r\n #​\r\n #df_all['add_lat_'] = np.round(df_all['add_lat'] * 10000).astype(int)\r\n #df_all['add_lon_'] = np.round(df_all['add_lon'] * 10000).astype(int)\r\n #df_vals['add_lat_'] = np.round(df_vals['add_lat'] * 10000).astype(int)\r\n #df_vals['add_lon_'] = np.round(df_vals['add_lon'] * 10000).astype(int)\r\n #del df_vals['add_lat']\r\n #del df_vals['add_lon']\r\n #​\r\n #df_all = pd.merge(df_all, df_vals, on=['add_lat_', 'add_lon_'], how='left')\r\n #del X\r\n #del df_all['add_lat_']\r\n #del df_all['add_lon_']\r\n #райф\r\n #\r\n #coords = []\r\n #for f in tqdm(features_p):\r\n # s = str(f.ExportToJson(as_object=True)).lower()\r\n # if 'райф' in s or 'raiffeisen' in s:\r\n # coords.append(f.ExportToJson(as_object=True)['geometry']['coordinates'])\r\n #\r\n #vals1 = df_all[['add_lon', 'add_lat']].drop_duplicates().values.astype(np.float32)\r\n #df_vals = pd.DataFrame(vals1, columns=['add_lat', 'add_lon'])\r\n #vals2 = np.array(coords, dtype=np.float32)\r\n #vals1.shape, vals2.shape\r\n #\r\n #X = pairwise_distances(vals1, vals2)\r\n #X[X == 0] = 10000\r\n #\r\n #suf = 'raif1'\r\n #df_vals[suf + '_dist_to'] = X.min(axis=1)\r\n #df_vals[suf + '_in_01'] = (X < 0.01).sum(axis=1)\r\n #df_vals[suf + '_in_001'] = (X < 0.001).sum(axis=1)\r\n #df_vals[suf + '_in_02'] = (X < 0.02).sum(axis=1)\r\n #df_vals[suf + '_in_005'] = (X < 0.005).sum(axis=1)\r\n #df_vals[suf + '_in_03'] = (X < 0.03).sum(axis=1)\r\n #​\r\n #df_all['add_lat_'] = np.round(df_all['add_lat'] * 10000).astype(int)\r\n #df_all['add_lon_'] = np.round(df_all['add_lon'] * 10000).astype(int)\r\n #df_vals['add_lat_'] = np.round(df_vals['add_lat'] * 10000).astype(int)\r\n #df_vals['add_lon_'] = np.round(df_vals['add_lon'] * 10000).astype(int)\r\n #del df_vals['add_lat']\r\n #del df_vals['add_lon']\r\n #​\r\n #df_all = pd.merge(df_all, df_vals, on=['add_lat_', 'add_lon_'], how='left')\r\n #del X\r\n #del df_all['add_lat_']\r\n #del df_all['add_lon_']\r\n #\r\n #coords = []\r\n #for f in tqdm(features):\r\n # s = str(f.ExportToJson(as_object=True)).lower()\r\n # if 'райф' in s or 'raiffeisen' in s:\r\n # el = f.ExportToJson(as_object=True)['geometry']['coordinates'][0]\r\n # if type(el) != float:\r\n # coords.append(el)\r\n #\r\n #vals1 = df_all[['add_lon', 'add_lat']].drop_duplicates().values.astype(np.float32)\r\n #df_vals = pd.DataFrame(vals1, columns=['add_lat', 'add_lon'])\r\n #vals2 = np.array(coords, dtype=np.float32)\r\n #vals1.shape, vals2.shape\r\n #\r\n #X = pairwise_distances(vals1, vals2)\r\n #X[X == 0] = 10000\r\n #\r\n #suf = 'raif2'\r\n #df_vals[suf + '_dist_to'] = X.min(axis=1)\r\n #df_vals[suf + '_in_01'] = (X < 0.01).sum(axis=1)\r\n #df_vals[suf + '_in_001'] = (X < 0.001).sum(axis=1)\r\n #df_vals[suf + '_in_02'] = (X < 0.02).sum(axis=1)\r\n #df_vals[suf + '_in_005'] = (X < 0.005).sum(axis=1)\r\n #df_vals[suf + '_in_03'] = (X < 0.03).sum(axis=1)\r\n #​\r\n #df_all['add_lat_'] = np.round(df_all['add_lat'] * 10000).astype(int)\r\n #df_all['add_lon_'] = np.round(df_all['add_lon'] * 10000).astype(int)\r\n #df_vals['add_lat_'] = np.round(df_vals['add_lat'] * 10000).astype(int)\r\n #df_vals['add_lon_'] = np.round(df_vals['add_lon'] * 10000).astype(int)\r\n #del df_vals['add_lat']\r\n #del df_vals['add_lon']\r\n #​\r\n #df_all = pd.merge(df_all, df_vals, on=['add_lat_', 'add_lon_'], how='left')\r\n #del X\r\n #del df_all['add_lat_']\r\n #del df_all['add_lon_']\r\n #\r\n #del vals2\r\n #LightGBM\r\n #\r\n #df_all.shape, df_all.columns.duplicated().sum()\r\n #\r\n #df_all = df_all.loc[:, ~df_all.columns.duplicated()]\r\n #\r\n #from sklearn.model_selection import train_test_split\r\n #​\r\n #ys = ['is_home', 'is_work']\r\n #drop_cols = ['atm_address', 'customer_id', 'pos_address', 'terminal_id', 'transaction_date',\r\n # 'is_home', 'has_home', 'is_work', 'has_work', 'is_train', 'city_name']\r\n #drop_cols += ['work_lat', 'work_lon', 'home_lat', 'home_lon', 'string']\r\n #​\r\n #drop_cols += ['pred:is_home', 'pred:is_work']\r\n ## cols = [c for c in df_all.columns if 'median_dist' in c]\r\n ## cols = [c for c in df_all.columns if 'lat' in c or 'lon' in c and 'diff' not in c and 'median' not in c]\r\n ## cols += ['address']\r\n ## drop_cols += cols\r\n #​\r\n #cols = [c for c in df_all.columns if 'mcc_ohe' in c and 'mean' not in c]\r\n ## cols += ['address']\r\n #drop_cols += cols\r\n #​\r\n #​\r\n #y_cols = ['is_home', 'is_work']\r\n #usecols = df_all.drop(drop_cols, 1, errors='ignore').columns\r\n #\r\n #params = {\r\n # 'objective': 'binary',\r\n # 'num_leaves': 511,\r\n # 'learning_rate': 0.01,\r\n # 'metric': 'binary_logloss',\r\n # 'feature_fraction': 0.8,\r\n # 'bagging_fraction': 0.8,\r\n # 'bagging_freq': 1,\r\n # 'num_threads': 12,\r\n # 'verbose': 0,\r\n #}\r\n #​\r\n #model = {}\r\n #\r\n #y_col = 'is_home'\r\n #​\r\n #cust_train = df_all[df_all['is_train'] == 1].groupby('customer_id')[y_col.replace('is_', 'has_')].max()\r\n #cust_train = cust_train[cust_train > 0].index\r\n #​\r\n #cust_train, cust_valid = train_test_split(cust_train, test_size=0.2, shuffle=True, random_state=111)\r\n #​\r\n #df_train = pd.DataFrame(cust_train, columns=['customer_id']).merge(df_all, how='left')\r\n #df_valid = pd.DataFrame(cust_valid, columns=['customer_id']).merge(df_all, how='left')\r\n #​\r\n #lgb_train = lgb.Dataset(df_train[usecols], df_train[y_col])\r\n #lgb_valid = lgb.Dataset(df_valid[usecols], df_valid[y_col])\r\n #​\r\n #gbm_h = lgb.train(params,\r\n # lgb_train,\r\n # valid_sets=[lgb_valid],\r\n # num_boost_round=2000,\r\n # verbose_eval=30,\r\n # early_stopping_rounds=100)\r\n #​\r\n #model[y_col] = gbm_h\r\n #\r\n #y_col = 'is_work'\r\n #​\r\n #cust_train = df_all[df_all['is_train'] == 1].groupby('customer_id')[y_col.replace('is_', 'has_')].max()\r\n #cust_train = cust_train[cust_train > 0].index\r\n #​\r\n #cust_train, cust_valid = train_test_split(cust_train, test_size=0.2, shuffle=True, random_state=111)\r\n #​\r\n #​\r\n #​\r\n #df_train = pd.DataFrame(cust_train, columns=['customer_id']).merge(df_all, how='left')\r\n #df_valid = pd.DataFrame(cust_valid, columns=['customer_id']).merge(df_all, how='left')\r\n #​\r\n #lgb_train = lgb.Dataset(df_train[usecols], df_train[y_col])\r\n #lgb_valid = lgb.Dataset(df_valid[usecols], df_valid[y_col])\r\n #​\r\n #gbm_w = lgb.train(params,\r\n # lgb_train,\r\n # valid_sets=[lgb_valid],\r\n # num_boost_round=2000,\r\n # verbose_eval=30,\r\n # early_stopping_rounds=100)\r\n #​\r\n #model[y_col] = gbm_w\r\n #Полезные\r\n #MCC\r\n #дом\r\n #6011 - финансы\r\n #5411 - придомовые\r\n #магазы\r\n #5814 - мак\r\n #5912 - аптеки\r\n #5921 - пиво\r\n #5499 - магазы\r\n #пяторочка\r\n #типа\r\n #5812 - рестроанчики\r\n #работа\r\n #\r\n #figsize(14, 10)\r\n #lgb.plot_importance(gbm_h, max_num_features=40)\r\n #\r\n #\r\n #def _best(x):\r\n # ret = None\r\n # for col in ys:\r\n # pred = ('pred:%s' % col)\r\n # if pred in x:\r\n # i = (x[pred].idxmax())\r\n # cols = [pred, 'add_lat', 'add_lon']\r\n # if col in x:\r\n # cols.append(col)\r\n # tmp = x.loc[i, cols]\r\n # tmp.rename({\r\n # 'add_lat': '%s:add_lat' % col,\r\n # 'add_lon': '%s:add_lon' % col,\r\n # }, inplace=True)\r\n # if ret is None:\r\n # ret = tmp\r\n # else:\r\n # ret = pd.concat([ret, tmp])\r\n # return ret\r\n #\r\n #​\r\n #​\r\n #\r\n #def predict_proba(dt, ys=['is_home', 'is_work']):\r\n # for col in ys:\r\n # pred = ('pred:%s' % col)\r\n # dt[pred] = model[col].predict(dt[usecols])\r\n # return dt.groupby('customer_id').apply(_best).reset_index()\r\n #\r\n #​\r\n #\r\n #def score(dt, ys=['is_home', 'is_work'], return_df=False):\r\n # dt_ret = predict_proba(dt, ys)\r\n # if return_df:\r\n # return dt_ret\r\n # mean = 0.0\r\n # for col in ys:\r\n # col_mean = dt_ret[col].mean()\r\n # mean += col_mean\r\n # if len(ys) == 2:\r\n # mean = mean / len(ys)\r\n # return mean\r\n #\r\n #\r\n #print(\"Train accuracy:\", score(df_train, ys=['is_home']))\r\n #print(\"Test accuracy:\", score(df_valid, ys=['is_home']))\r\n #​\r\n #print(\"Train accuracy:\", score(df_train, ys=['is_work']))\r\n #print(\"Test accuracy:\", score(df_valid, ys=['is_work']))\r\n #Train\r\n #accuracy: 0.5458070770722249\r\n #Test\r\n #accuracy: 0.5494186046511628\r\n #Train\r\n #accuracy: 0.4301987396994668\r\n #Test\r\n #accuracy: 0.3536821705426357\r\n #\r\n #Анализ\r\n #False - Negative\r\n #\r\n ## сколько вообще людей имеют хорошую точку\r\n #df_all[(df_all.is_train == 1)].groupby('customer_id')['is_work'].agg('max').mean()\r\n #\r\n #df_pred = score(df_valid, ys=['is_home'], return_df=True)\r\n #\r\n #df_pred.sample(5)\r\n #\r\n #cid = 'bf66305d0ec05abb6e6a6358acb8c2a1'\r\n #cid = df_pred[df_pred.is_home == 0].sample(1)['customer_id'].values[0]\r\n #​\r\n #df_an = df_all[df_all.customer_id == cid]\r\n #center_home = df_an[['home_lat', 'home_lon']].drop_duplicates().values\r\n #center_work = df_an[['work_lat', 'work_lon']].drop_duplicates().values\r\n #​\r\n #​\r\n #predicted_home = df_pred[df_pred.customer_id == cid][['is_home:add_lat', 'is_home:add_lon']].drop_duplicates().values\r\n #predicted_work = df_pred[df_pred.customer_id == cid][['is_work:add_lat', 'is_work:add_lon']].drop_duplicates().values\r\n #​\r\n #points_pos = df_an[df_an.is_pos == 1][['add_lat', 'add_lon']].dropna().values\r\n #points_atm = df_an[df_an.is_pos == 0][['add_lat', 'add_lon']].dropna().values\r\n #print(center_home.shape, center_work.shape, points_pos.shape, points_atm.shape)\r\n #​\r\n ## синие - покупки\r\n ## красные - банкоматы\r\n #gmap = gmaps.Map()\r\n #if len(points_pos) > 0:\r\n # gmap.add_layer(gmaps.symbol_layer(points_pos, hover_text='pos',\r\n # fill_color=\"blue\", stroke_color=\"blue\", scale=3))\r\n #if len(points_atm) > 0:\r\n # gmap.add_layer(gmaps.symbol_layer(points_atm, hover_text='atm',\r\n # fill_color=\"red\", stroke_color=\"red\", scale=3))\r\n #​\r\n #if not np.isnan(center_home)[0][0]:\r\n # gmap.add_layer(gmaps.marker_layer(center_home, label='home'))\r\n #if not np.isnan(center_work)[0][0]:\r\n # gmap.add_layer(gmaps.marker_layer(center_work, label='work'))\r\n #​\r\n #gmap.add_layer(gmaps.marker_layer(predicted_home, label='predicted_home'))\r\n #gmap.add_layer(gmaps.marker_layer(predicted_work, label='predicted_work'))\r\n #\r\n #gmap\r\n #\r\n #df_all.to_csv('../data/dfpredict1903.csv', index=None)\r\n #Predict\r\n #\r\n #del cust_test\r\n #\r\n #cust_test = df_all.loc[df_all['is_train'] == 0, 'customer_id'].unique()\r\n ## df_test = pd.DataFrame(cust_test, columns = ['customer_id']).merge(df_all, how = 'left')\r\n #df_test = predict_proba(pd.DataFrame(cust_test, columns=['customer_id']).merge(df_all, how='left'))\r\n #df_test.rename(columns={\r\n # 'customer_id': '_ID_',\r\n # 'is_home:add_lat': '_HOME_LAT_',\r\n # 'is_home:add_lon': '_HOME_LON_',\r\n # 'is_work:add_lat': '_WORK_LAT_',\r\n # 'is_work:add_lon': '_WORK_LON_'}, inplace=True)\r\n #df_test = df_test[['_ID_', '_WORK_LAT_', '_WORK_LON_', '_HOME_LAT_', '_HOME_LON_']]\r\n #​\r\n #df_test.head()\r\n #Формируем\r\n #submission - файл\r\n #\r\n ## Заполняем пропуски\r\n #df_ = pd.read_csv('../data/test_set.csv', dtype=dtypes, usecols=['customer_id'])\r\n #submission = pd.DataFrame(df_['customer_id'].unique(), columns=['_ID_'])\r\n #​\r\n #submission = submission.merge(df_test, how='left').fillna(0)\r\n ## Пишем файл submission\r\n #submission.to_csv('../submissions/base_14_635_331.csv', index=None)\r\n #\r\n #\r\n return src\r\n\r\ndef update_last_partition(dst, from_dt, to_dt):\r\n prev_day = datetime.strptime(from_dt, '%Y-%m-%d') - timedelta(days=1)\r\n res = spark.table(dst[\"d_train\"]).checkpoint()\r\n res = res.where(res.day == to_dt)\r\n res = res.withColumn(\"period_to_dt\", f.lit(prev_day)).withColumn(\"day\", f.lit(prev_day.strftime('%Y-%m-%d')))\r\n res.coalesce(8).write.format(\"orc\").insertInto(dst[\"d_train\"], overwrite=True)\r\n\r\n\r\ndef calc_05(src, dst, from_dt, to_dt):\r\n res = algo(src, from_dt, to_dt)\r\n res.coalesce(8).write.format(\"orc\").insertInto(dst[\"d_subway_entrance\"], overwrite=True)\r\n\r\n\r\ndef sandbox_src():\r\n return {\r\n \"psg_train\": spark.table(\"sandbox_mck.train\"),\r\n \"psg_test\": spark.table(\"sandbox_mck.test\"),\r\n \"psg_dev\": spark.table(\"sandbox_mck.dev\")\r\n }\r\n\r\n\r\ndef sandbox_dst():\r\n return {\r\n \"psg_result\": \"sandbox_mck.psg_result\"\r\n }\r\n\r\n\r\ndef prod_src():\r\n return {\r\n \"psg_train\": spark.table(\"prod_data.psg_train\"),\r\n \"psg_test\": spark.table(\"prod_data.psg_test\"),\r\n \"psg_dev\": spark.table(\"prod_data.psg_dev\")\r\n }\r\n\r\n\r\ndef prod_dst():\r\n return {\r\n \"psg_result\": \"prod_data.psg_result\"\r\n }\r\n\r\n\r\nif __name__ == '__main__':\r\n spark = SparkSession.builder.appName(\"calc_05_task\").enableHiveSupport().getOrCreate()\r\n spark.conf.set(\"spark.sql.sources.partitionOverwriteMode\", \"dynamic\")\r\n hivecontext = HiveContext(spark.sparkContext)\r\n hivecontext.setConf(\"hive.exec.dynamic.partition\", \"true\")\r\n hivecontext.setConf(\"hive.exec.dynamic.partition.mode\", \"nonstrict\")\r\n spark.sparkContext.setCheckpointDir(\"hdfs:///user/airflow/psg/calc_05_task\")\r\n\r\n opts = {\r\n 'from_dt': sys.argv[1],\r\n \"to_dt\": \"9999-12-31\"\r\n }\r\n\r\n update_last_partition(prod_dst(), opts[\"from_dt\"], opts[\"to_dt\"])\r\n calc_05(prod_src(), prod_dst(), opts[\"from_dt\"], opts[\"to_dt\"])\r\n\r\n", "id": "8766242", "language": "Python", "matching_score": 7.015952110290527, "max_stars_count": 0, "path": "Raif/pyspark/calc_05.py" }, { "content": "import sys\r\nfrom datetime import timedelta, datetime\r\n\r\n\r\nfrom pyspark import HiveContext\r\nfrom pyspark.sql import functions as f, SparkSession\r\n\r\n\r\ndef algo(src, from_dt, to_dt):\r\n res = steps(src, from_dt, to_dt)\r\n return res\r\n\r\n\r\ndef steps(src, from_dt, to_dt):\r\n # Новые\r\n # фичи\r\n # Цифры\r\n # по\r\n # mcc\r\n # Погода\r\n # по\r\n # месту\r\n # расстояние\r\n # до\r\n # дальнейшего\r\n # соседа\r\n # максимальная\r\n # продолжительность\r\n # приобретений\r\n # в\r\n # данной\r\n # точке\r\n # по\r\n # дням\r\n #\r\n # ПРОССУМИРОВАТЬ\r\n # ДЕЛЬТЫ\r\n # ПО\r\n # РАЗНЫМ\r\n # КООРДИНАТАМ\r\n #\r\n # [Boosters]\r\n # Raiffeisen\r\n # Data\r\n # Cup.Baseline\r\n # Общий\r\n # подход:\r\n #\r\n # Добавляем\r\n # к\r\n # каждой\r\n # транзакции\r\n # столбец: is_work(если\r\n # транзакция\r\n # находится\r\n # в\r\n # пределах\r\n # 0.02\r\n # от\r\n # дома\r\n # клиента)\r\n # Добавляем\r\n # к\r\n # каждой\r\n # транзакции\r\n # столбец: is_home(если\r\n # транзакция\r\n # находится\r\n # в\r\n # пределах\r\n # 0.02\r\n # от\r\n # работы\r\n # клиента)\r\n # Обучаем\r\n # классификатор\r\n # предсказывающий\r\n # вероятность(is_home == 1)\r\n # для\r\n # транзакции\r\n # Обучаем\r\n # классификатор\r\n # предсказывающий\r\n # вероятность(is_work == 1)\r\n # для\r\n # транзакции\r\n # Точность\r\n # определения\r\n # местоположения:\r\n #\r\n # для\r\n # классификатора\r\n # is_home: ~3\r\n # x %\r\n # для\r\n # классификатора\r\n # is_work: ~2\r\n # x %\r\n # общая\r\n # оценка\r\n # на\r\n # Public\r\n # Leaderboard: ???\r\n # Примечание\r\n #\r\n # Требуется\r\n # Python\r\n # версии\r\n # 3.5\r\n # Требуется\r\n # библиотека\r\n # xgboost(для\r\n # обучения\r\n # использовалась\r\n # xgboost\r\n # версии\r\n # 0.7.post3)\r\n # Требуются\r\n # файлы: test_set.csv, train_set.csv\r\n # в\r\n # одном\r\n # каталоге\r\n # с\r\n # данным\r\n # скриптом\r\n # Требования\r\n # к\r\n # памяти: должно\r\n # работать\r\n # с\r\n # 2\r\n # Гб\r\n # свободного\r\n # RAM\r\n # Время\r\n # работы: ~3\r\n # минуты(тестировалось\r\n # на\r\n # процессоре\r\n # Intel\r\n # Core\r\n # i7 - 4770)\r\n #\r\n # % load_ext\r\n # autoreload\r\n # % autoreload\r\n # 2\r\n # ​\r\n # import sys\r\n # MODULES_PATH = '../code/'\r\n # if MODULES_PATH not in sys.path:\r\n # sys.path.append(MODULES_PATH)\r\n # import mfuncs\r\n #\r\n # import pandas as pd\r\n # import numpy as np\r\n # from tqdm import tqdm\r\n # tqdm.pandas()\r\n # pd.options.display.max_columns = 1000\r\n # pd.options.display.max_colwidth = -1\r\n # ​\r\n # import lightgbm as lgb\r\n # ​\r\n # ​\r\n # from sklearn.neighbors import NearestNeighbors\r\n # from sklearn.cluster import KMeans, MeanShift, estimate_bandwidth, AgglomerativeClustering\r\n # from sklearn.metrics import silhouette_samples, silhouette_score\r\n # ​\r\n # from sklearn.metrics.pairwise import pairwise_distances\r\n # import gmaps\r\n # API_KEY = '<KEY>'\r\n # gmaps.configure(api_key=API_KEY) # Your Google API key\r\n # % pylab\r\n # inline\r\n # figsize(13, 13)\r\n # Populating\r\n # the\r\n # interactive\r\n # namespace\r\n # from numpy and matplotlib\r\n #\r\n # # Определим типы колонок для экономии памяти\r\n # dtypes = pd.read_csv('../data/df_all_b11_dtypes.csv', header=None, index_col=0).to_dict()[1]\r\n # dtypes.pop('transaction_date', None)\r\n # df_all = pd.read_csv('../data/df_all_b11.csv', dtype=dtypes, parse_dates=['transaction_date'])\r\n # Мои\r\n # фичи\r\n #\r\n # # добавим признаки после групбая\r\n # df_gb = df_all[['customer_id', 'amount', 'add_lat', 'add_lon']].groupby('customer_id')\r\n # coord_stat_df = df_gb.agg(['mean', 'max', 'min'])\r\n # coord_stat_df['transactions_per_user'] = df_gb.agg('size')\r\n # coord_stat_df.columns = ['_'.join(col).strip() for col in coord_stat_df.columns.values]\r\n # coord_stat_df = coord_stat_df.astype(np.float32)\r\n # coord_stat_df.reset_index(inplace=True)\r\n # df_all = pd.merge(df_all, coord_stat_df, on='customer_id', how='left')\r\n #\r\n # cols = ['add_lat', 'add_lon']\r\n # types = ['min', 'max', 'mean']\r\n # for c in cols:\r\n # for t in types:\r\n # df_all['{}_diff_{}'.format(c, t)] = np.abs(df_all[c] - df_all['{}_{}'.format(c, t)], dtype=np.float32)\r\n #\r\n # df_all = df_all.loc[:, ~df_all.columns.duplicated()]\r\n #\r\n # # разности\r\n # df_all['lat_diff_cluster_lat'] = np.abs(df_all['add_lat'] - df_all['cl_lat'], dtype=np.float32)\r\n # df_all['lon_diff_cluster_lon'] = np.abs(df_all['add_lon'] - df_all['cl_lon'], dtype=np.float32)\r\n # df_all['lon_diff_cluster'] = (df_all['lat_diff_cluster_lat'] + df_all['lon_diff_cluster_lon']).astype(np.float32)\r\n # Категории\r\n # mcc\r\n #\r\n # # категории\r\n # df_all['mcc_str'] = df_all['mcc'].astype(str).str.rjust(4, '0')\r\n # df_mcc = pd.read_csv('../data/internal/mcc.csv')\r\n # df_mcc = df_mcc.iloc[1:, :3]\r\n # df_mcc.columns = ['mcc_str', 'mcc_cat1', 'mcc_cat2']\r\n # df_mcc.drop_duplicates(subset=['mcc_str'], inplace=True)\r\n # df_mcc['mcc_cat1'] = pd.factorize(df_mcc['mcc_cat1'])[0].astype(np.int32)\r\n # df_mcc['mcc_cat2'] = pd.factorize(df_mcc['mcc_cat2'])[0].astype(np.int32)\r\n # df_mcc.fillna('none', inplace=True)\r\n # df_all = pd.merge(df_all, df_mcc, on='mcc_str', how='left')\r\n # del df_all['mcc_str']\r\n # df_mcc.head()\r\n # mcc_str\r\n # mcc_cat1\r\n # mcc_cat2\r\n # 1\r\n # 0001 - 1\r\n # 0\r\n # 2\r\n # 0002 - 1\r\n # 0\r\n # 3\r\n # 0003 - 1\r\n # 0\r\n # 4\r\n # 0004 - 1\r\n # 0\r\n # 5\r\n # 0005 - 1\r\n # 0\r\n # Плотности\r\n # населения\r\n # по\r\n # районам\r\n # МСК\r\n #\r\n # import geopandas as gpd\r\n # from shapely.geometry import Point, Polygon\r\n # mos_shp = gpd.read_file('../data/internal/demography.shp')\r\n # ​\r\n # _pnts = [Point(vals.T) for vals in df_all[df_all.city_name == 'Москва'][['add_lon', 'add_lat']].values]\r\n # pnts = gpd.GeoDataFrame(geometry=_pnts)\r\n # pnts.crs = mos_shp.crs\r\n # ​\r\n # mos_shp.drop(['NAME', 'ABBREV_AO'], 1, inplace=True)\r\n # mos_shp['area'] = mos_shp['geometry'].area\r\n # for c in mos_shp.columns:\r\n # if c not in ['geometry', 'area'] and 'index' not in c:\r\n # mos_shp[c + 'dens'] = mos_shp[c] / mos_shp['area']\r\n #\r\n # % % time\r\n # cities_with_country = gpd.sjoin(pnts, mos_shp, how=\"left\", op='intersects')\r\n # CPU\r\n # times: user\r\n # 44\r\n # s, sys: 260\r\n # ms, total: 44.3\r\n # s\r\n # Wall\r\n # time: 44.3\r\n # s\r\n #\r\n # cols = cities_with_country.drop(['geometry', 'index_right'], 1).columns\r\n # for c in cols:\r\n # df_all[c] = -1\r\n # df_all.loc[df_all.city_name == 'Москва', cols] = cities_with_country\r\n #\r\n # # частота mcc\r\n # df_mcc = df_all['mcc'].value_counts(normalize=True).reset_index()\r\n # df_mcc.columns = ['mcc', 'mcc_freq']\r\n # df_all = pd.merge(df_all, df_mcc, on='mcc', how='left')\r\n #\r\n # # метро\r\n # mos_metro = pd.read_csv('../data/internal/moscow_metro.csv')\r\n # pet_metro = pd.read_csv('../data/internal/peter_metro.csv')\r\n # df_metro = pd.concat([mos_metro, pet_metro])\r\n # ​\r\n # vals1 = df_all[['add_lat', 'add_lon']].values\r\n # vals2 = df_metro[['metro_lat', 'metro_lon']].values\r\n # X = pairwise_distances(vals1, vals2)\r\n # dist_to_min_metro = X.min(axis=1)\r\n # ​\r\n # # X[X == 0] = 10000\r\n # df_all['dist_to_minmetro'] = X.min(axis=1)\r\n # df_all['metro_in_01'] = (X < 0.01).sum(axis=1)\r\n # df_all['metro_in_001'] = (X < 0.001).sum(axis=1)\r\n # df_all['metro_in_02'] = (X < 0.02).sum(axis=1)\r\n # df_all['metro_in_005'] = (X < 0.005).sum(axis=1)\r\n # df_all['metro_in_03'] = (X < 0.03).sum(axis=1)\r\n #\r\n # # расстояние до участковых комиссий\r\n # df_cik = pd.read_csv('../data/internal/cik_uik.csv')\r\n # df_cik.dropna(subset=['lat_ik'], inplace=True)\r\n # df_cik.dropna(subset=['lon_ik'], inplace=True)\r\n # ​\r\n # df_cik = df_cik[df_cik['lon_ik'] < 45]\r\n # vals1 = df_all[['add_lat', 'add_lon']].drop_duplicates().values.astype(np.float32)\r\n # df_vals = pd.DataFrame(vals1, columns=['add_lat', 'add_lon'])\r\n # vals2 = df_cik[['lat_ik', 'lon_ik']].drop_duplicates().values.astype(np.float32)\r\n # ​\r\n # print(vals2.shape)\r\n # X = pairwise_distances(vals1, vals2)\r\n # ​\r\n # df_vals['dist_to_ciktro'] = X.min(axis=1)\r\n # df_vals['cik_in_01'] = (X < 0.01).sum(axis=1)\r\n # df_vals['cik_in_001'] = (X < 0.001).sum(axis=1)\r\n # df_vals['cik_in_02'] = (X < 0.02).sum(axis=1)\r\n # df_vals['cik_in_005'] = (X < 0.005).sum(axis=1)\r\n # df_vals['cik_in_03'] = (X < 0.03).sum(axis=1)\r\n # (37481, 2)\r\n #\r\n # df_all['add_lat_'] = np.round(df_all['add_lat'] * 10000).astype(int)\r\n # df_all['add_lon_'] = np.round(df_all['add_lon'] * 10000).astype(int)\r\n # df_vals['add_lat_'] = np.round(df_vals['add_lat'] * 10000).astype(int)\r\n # df_vals['add_lon_'] = np.round(df_vals['add_lon'] * 10000).astype(int)\r\n # df_vals.drop_duplicates(subset=['add_lat_', 'add_lon_'], inplace=True)\r\n # del df_vals['add_lat']\r\n # del df_vals['add_lon']\r\n # ​\r\n # df_all = pd.merge(df_all, df_vals, on=['add_lat_', 'add_lon_'], how='left')\r\n # del X\r\n # del df_all['add_lat_']\r\n # del df_all['add_lon_']\r\n #\r\n # # погода в МСК и ПИТЕРЕ\r\n # # буду смотреть погоду в 18-00\r\n # w1 = pd.read_csv('../data/internal/weather/moscow.csv', sep=';', index_col=False)\r\n # w1['city_name'] = 'Москва'\r\n # w1['transaction_date'] = pd.to_datetime(w1['Local time in Moscow'], format='%d.%m.%Y %H:%M')\r\n # del w1['Local time in Moscow']\r\n # w1 = w1[w1.transaction_date.dt.hour == 18].reset_index(drop=True)\r\n # w1['transaction_date'] = w1['transaction_date'].dt.date\r\n # ​\r\n # w2 = pd.read_csv('../data/internal/weather/peter.csv', sep=';', index_col=False)\r\n # w2['city_name'] = 'Санкт-Петербург'\r\n # w2['transaction_date'] = pd.to_datetime(w2['Local time in Moscow'], format='%d.%m.%Y %H:%M')\r\n # del w2['Local time in Moscow']\r\n # w2 = w2[w2.transaction_date.dt.hour == 18].reset_index(drop=True)\r\n # w2['transaction_date'] = w2['transaction_date'].dt.date\r\n # ​\r\n # df_weather = pd.concat([w1, w2], axis=0).reset_index(drop=True)\r\n # df_weather['transaction_date'] = pd.to_datetime(df_weather['transaction_date'])\r\n # ​\r\n # cn = df_weather['city_name'] # hardcode\r\n # df_weather = df_weather.select_dtypes(exclude=['object'])\r\n # df_weather['city_name'] = cn\r\n # for c in df_weather:\r\n # if df_weather[c].isnull().mean() > 0.9:\r\n # del df_weather[c]\r\n # # df_weather = df_weather.add_prefix('weather_')\r\n # df_all = pd.merge(df_all, df_weather, on=['city_name', 'transaction_date'], how='left')\r\n #\r\n # # df_all.drop(['index', 'T', 'Po', 'P', 'Pa', 'U', 'Ff', 'VV', 'Td', 'tR'], 1, inplace=True)\r\n #\r\n # # добавляем новые MCC OHE с самыми частыми категориями\r\n # df_all['mcc_rm'] = df_all['mcc']\r\n # df_all.loc[~df_all['mcc_rm'].isin(df_all['mcc_rm'].value_counts().iloc[:35].index.values), 'mcc_rm'] = 99999\r\n # ​\r\n # df_all['mcc_rm_cat1'] = df_all['mcc_cat1']\r\n # df_all.loc[~df_all['mcc_rm_cat1'].isin(df_all['mcc_rm_cat1'].value_counts().iloc[:35].index.values),\r\n # 'mcc_rm_cat1'] = 99999\r\n #\r\n # # OHE урезанных MCC\r\n # df_all = pd.concat([df_all,\r\n # pd.get_dummies(df_all['mcc_rm'], prefix='mcc_rm_ohe').astype(np.int8)], axis=1)\r\n # del df_all['mcc_rm']\r\n # df_all = pd.concat([df_all,\r\n # pd.get_dummies(df_all['mcc_rm_cat1'], prefix='mcc_rm_cat1_ohe').astype(np.int8)], axis=1)\r\n # del df_all['mcc_rm_cat1']\r\n # ​\r\n # df_all = pd.concat([df_all,\r\n # pd.get_dummies(df_all['mcc_cat2'], prefix='mcc_cat2_ohe').astype(np.int8)], axis=1)\r\n # del df_all['mcc_cat2']\r\n # df_all = df_all.reset_index(drop=True)\r\n #\r\n # mcc_cols_0 = [c for c in df_all.columns if 'mcc_rm_ohe' in c]\r\n # mcc_cols_1 = [c for c in df_all.columns if 'mcc_rm_cat1_ohe' in c]\r\n # mcc_cols_2 = [c for c in df_all.columns if 'mcc_cat2_ohe' in c]\r\n # ​\r\n # ​\r\n # mcc_cols_0_ = [c + '_amount' for c in mcc_cols_0]\r\n # mcc_cols_1_ = [c + '_amount' for c in mcc_cols_1]\r\n # mcc_cols_2_ = [c + '_amount' for c in mcc_cols_2]\r\n #\r\n # # сделаем групбай какие вообще есть mcc у посетителя. Это поможет понять его привычки\r\n # df_mcc = df_all.groupby('customer_id')[mcc_cols_0].agg(['mean', 'sum'])\r\n # df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n # df_mcc = df_mcc.astype(np.float32).reset_index()\r\n # df_all = pd.merge(df_all, df_mcc, on='customer_id', how='left')\r\n # df_mcc.head()\r\n # customer_id\r\n # mcc_rm_ohe_4111_mean\r\n # mcc_rm_ohe_4111_sum\r\n # mcc_rm_ohe_4784_mean\r\n # mcc_rm_ohe_4784_sum\r\n # mcc_rm_ohe_5200_mean\r\n # mcc_rm_ohe_5200_sum\r\n # mcc_rm_ohe_5211_mean\r\n # mcc_rm_ohe_5211_sum\r\n # mcc_rm_ohe_5261_mean\r\n # mcc_rm_ohe_5261_sum\r\n # mcc_rm_ohe_5311_mean\r\n # mcc_rm_ohe_5311_sum\r\n # mcc_rm_ohe_5331_mean\r\n # mcc_rm_ohe_5331_sum\r\n # mcc_rm_ohe_5411_mean\r\n # mcc_rm_ohe_5411_sum\r\n # mcc_rm_ohe_5499_mean\r\n # mcc_rm_ohe_5499_sum\r\n # mcc_rm_ohe_5533_mean\r\n # mcc_rm_ohe_5533_sum\r\n # mcc_rm_ohe_5541_mean\r\n # mcc_rm_ohe_5541_sum\r\n # mcc_rm_ohe_5641_mean\r\n # mcc_rm_ohe_5641_sum\r\n # mcc_rm_ohe_5651_mean\r\n # mcc_rm_ohe_5651_sum\r\n # mcc_rm_ohe_5661_mean\r\n # mcc_rm_ohe_5661_sum\r\n # mcc_rm_ohe_5691_mean\r\n # mcc_rm_ohe_5691_sum\r\n # mcc_rm_ohe_5699_mean\r\n # mcc_rm_ohe_5699_sum\r\n # mcc_rm_ohe_5712_mean\r\n # mcc_rm_ohe_5712_sum\r\n # mcc_rm_ohe_5732_mean\r\n # mcc_rm_ohe_5732_sum\r\n # mcc_rm_ohe_5812_mean\r\n # mcc_rm_ohe_5812_sum\r\n # mcc_rm_ohe_5813_mean\r\n # mcc_rm_ohe_5813_sum\r\n # mcc_rm_ohe_5814_mean\r\n # mcc_rm_ohe_5814_sum\r\n # mcc_rm_ohe_5912_mean\r\n # mcc_rm_ohe_5912_sum\r\n # mcc_rm_ohe_5921_mean\r\n # mcc_rm_ohe_5921_sum\r\n # mcc_rm_ohe_5941_mean\r\n # mcc_rm_ohe_5941_sum\r\n # mcc_rm_ohe_5942_mean\r\n # mcc_rm_ohe_5942_sum\r\n # mcc_rm_ohe_5945_mean\r\n # mcc_rm_ohe_5945_sum\r\n # mcc_rm_ohe_5977_mean\r\n # mcc_rm_ohe_5977_sum\r\n # mcc_rm_ohe_5992_mean\r\n # mcc_rm_ohe_5992_sum\r\n # mcc_rm_ohe_5995_mean\r\n # mcc_rm_ohe_5995_sum\r\n # mcc_rm_ohe_5999_mean\r\n # mcc_rm_ohe_5999_sum\r\n # mcc_rm_ohe_6011_mean\r\n # mcc_rm_ohe_6011_sum\r\n # mcc_rm_ohe_7230_mean\r\n # mcc_rm_ohe_7230_sum\r\n # mcc_rm_ohe_7832_mean\r\n # mcc_rm_ohe_7832_sum\r\n # mcc_rm_ohe_8099_mean\r\n # mcc_rm_ohe_8099_sum\r\n # mcc_rm_ohe_8999_mean\r\n # mcc_rm_ohe_8999_sum\r\n # mcc_rm_ohe_99999_mean\r\n # mcc_rm_ohe_99999_sum\r\n # 0\r\n # 0001\r\n # f322716470bf9bfc1708f06f00fc\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.02\r\n # 1.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.340000\r\n # 17.0\r\n # 0.020000\r\n # 1.0\r\n # 0.0\r\n # 0.0\r\n # 0.140000\r\n # 7.0\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.020000\r\n # 1.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.080000\r\n # 4.0\r\n # 0.360000\r\n # 18.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.020000\r\n # 1.0\r\n # 1\r\n # 000216\r\n # 83\r\n # ccb416637fe9a4cd35e4606e\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.00\r\n # 0.0\r\n # 0.038462\r\n # 3.0\r\n # 0.000000\r\n # 0.0\r\n # 0.012821\r\n # 1.0\r\n # 0.371795\r\n # 29.0\r\n # 0.012821\r\n # 1.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.012821\r\n # 1.0\r\n # 0.0\r\n # 0.0\r\n # 0.025641\r\n # 2.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.012821\r\n # 1.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.141026\r\n # 11.0\r\n # 0.064103\r\n # 5.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.115385\r\n # 9.0\r\n # 0.025641\r\n # 2.0\r\n # 0.064103\r\n # 5.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.102564\r\n # 8.0\r\n # 2\r\n # 0002\r\n # d0f8a642272b41c292c12ab6e602\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.00\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.051948\r\n # 4.0\r\n # 0.000000\r\n # 0.0\r\n # 0.610390\r\n # 47.0\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.142857\r\n # 11.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.194805\r\n # 15.0\r\n # 3\r\n # 0004\r\n # d182d9fede3ba2534b2d5e5ad27e\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.00\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.008333\r\n # 1.0\r\n # 0.000000\r\n # 0.0\r\n # 0.150000\r\n # 18.0\r\n # 0.016667\r\n # 2.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.016667\r\n # 2.0\r\n # 0.008333\r\n # 1.0\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.008333\r\n # 1.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.033333\r\n # 4.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.008333\r\n # 1.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.691667\r\n # 83.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.016667\r\n # 2.0\r\n # 0.000000\r\n # 0.0\r\n # 0.041667\r\n # 5.0\r\n # 4\r\n # 00072\r\n # 97\r\n # d86e14bd68bd87b1dbdefe302\r\n # 0.008333\r\n # 2.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.00\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.270833\r\n # 65.0\r\n # 0.008333\r\n # 2.0\r\n # 0.0\r\n # 0.0\r\n # 0.004167\r\n # 1.0\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.004167\r\n # 1.0\r\n # 0.004167\r\n # 1.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.0\r\n # 0.033333\r\n # 8.0\r\n # 0.004167\r\n # 1.0\r\n # 0.233333\r\n # 56.0\r\n # 0.045833\r\n # 11.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.008333\r\n # 2.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.020833\r\n # 5.0\r\n # 0.000000\r\n # 0.0\r\n # 0.029167\r\n # 7.0\r\n # 0.258333\r\n # 62.0\r\n # 0.008333\r\n # 2.0\r\n # 0.008333\r\n # 2.0\r\n # 0.004167\r\n # 1.0\r\n # 0.008333\r\n # 2.0\r\n # 0.037500\r\n # 9.0\r\n #\r\n # # по объемам\r\n # for i, c in enumerate(mcc_cols_0):\r\n # df_all[mcc_cols_0_[i]] = (df_all[c] * df_all['amount']).astype(np.float32)\r\n # for i, c in enumerate(mcc_cols_2):\r\n # df_all[mcc_cols_2_[i]] = (df_all[c] * df_all['amount']).astype(np.float32)\r\n #\r\n # # по объемам\r\n # df_mcc = df_all.groupby('customer_id')[mcc_cols_0_].agg(['mean', 'sum'])\r\n # df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n # df_mcc = df_mcc.astype(np.float32).reset_index()\r\n # df_all = pd.merge(df_all, df_mcc, on='customer_id', how='left')\r\n # df_mcc.head()\r\n # ​\r\n # # df_all['add_lat_'] = (df_all['add_lat'] * 40).astype(np.int32)\r\n # # df_all['add_lon_'] = (df_all['add_lon'] * 40).astype(np.int32)\r\n # ​\r\n # # df_mcc = df_all.groupby(['add_lat_', 'add_lon_'])[mcc_cols_].agg(['mean', 'sum'])\r\n # # df_mcc = df_mcc.add_suffix('_40coord')\r\n # # df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n # # df_mcc = df_mcc.astype(np.float32)\r\n # # df_mcc.reset_index(inplace=True)\r\n # # df_mcc.head()\r\n # # df_all = pd.merge(df_all, df_mcc, on=['add_lat_', 'add_lon_'], how='left')\r\n # ​\r\n # # del df_all['add_lat_']\r\n # # del df_all['add_lon_']\r\n # customer_id\r\n # mcc_rm_ohe_4111_amount_mean\r\n # mcc_rm_ohe_4111_amount_sum\r\n # mcc_rm_ohe_4784_amount_mean\r\n # mcc_rm_ohe_4784_amount_sum\r\n # mcc_rm_ohe_5200_amount_mean\r\n # mcc_rm_ohe_5200_amount_sum\r\n # mcc_rm_ohe_5211_amount_mean\r\n # mcc_rm_ohe_5211_amount_sum\r\n # mcc_rm_ohe_5261_amount_mean\r\n # mcc_rm_ohe_5261_amount_sum\r\n # mcc_rm_ohe_5311_amount_mean\r\n # mcc_rm_ohe_5311_amount_sum\r\n # mcc_rm_ohe_5331_amount_mean\r\n # mcc_rm_ohe_5331_amount_sum\r\n # mcc_rm_ohe_5411_amount_mean\r\n # mcc_rm_ohe_5411_amount_sum\r\n # mcc_rm_ohe_5499_amount_mean\r\n # mcc_rm_ohe_5499_amount_sum\r\n # mcc_rm_ohe_5533_amount_mean\r\n # mcc_rm_ohe_5533_amount_sum\r\n # mcc_rm_ohe_5541_amount_mean\r\n # mcc_rm_ohe_5541_amount_sum\r\n # mcc_rm_ohe_5641_amount_mean\r\n # mcc_rm_ohe_5641_amount_sum\r\n # mcc_rm_ohe_5651_amount_mean\r\n # mcc_rm_ohe_5651_amount_sum\r\n # mcc_rm_ohe_5661_amount_mean\r\n # mcc_rm_ohe_5661_amount_sum\r\n # mcc_rm_ohe_5691_amount_mean\r\n # mcc_rm_ohe_5691_amount_sum\r\n # mcc_rm_ohe_5699_amount_mean\r\n # mcc_rm_ohe_5699_amount_sum\r\n # mcc_rm_ohe_5712_amount_mean\r\n # mcc_rm_ohe_5712_amount_sum\r\n # mcc_rm_ohe_5732_amount_mean\r\n # mcc_rm_ohe_5732_amount_sum\r\n # mcc_rm_ohe_5812_amount_mean\r\n # mcc_rm_ohe_5812_amount_sum\r\n # mcc_rm_ohe_5813_amount_mean\r\n # mcc_rm_ohe_5813_amount_sum\r\n # mcc_rm_ohe_5814_amount_mean\r\n # mcc_rm_ohe_5814_amount_sum\r\n # mcc_rm_ohe_5912_amount_mean\r\n # mcc_rm_ohe_5912_amount_sum\r\n # mcc_rm_ohe_5921_amount_mean\r\n # mcc_rm_ohe_5921_amount_sum\r\n # mcc_rm_ohe_5941_amount_mean\r\n # mcc_rm_ohe_5941_amount_sum\r\n # mcc_rm_ohe_5942_amount_mean\r\n # mcc_rm_ohe_5942_amount_sum\r\n # mcc_rm_ohe_5945_amount_mean\r\n # mcc_rm_ohe_5945_amount_sum\r\n # mcc_rm_ohe_5977_amount_mean\r\n # mcc_rm_ohe_5977_amount_sum\r\n # mcc_rm_ohe_5992_amount_mean\r\n # mcc_rm_ohe_5992_amount_sum\r\n # mcc_rm_ohe_5995_amount_mean\r\n # mcc_rm_ohe_5995_amount_sum\r\n # mcc_rm_ohe_5999_amount_mean\r\n # mcc_rm_ohe_5999_amount_sum\r\n # mcc_rm_ohe_6011_amount_mean\r\n # mcc_rm_ohe_6011_amount_sum\r\n # mcc_rm_ohe_7230_amount_mean\r\n # mcc_rm_ohe_7230_amount_sum\r\n # mcc_rm_ohe_7832_amount_mean\r\n # mcc_rm_ohe_7832_amount_sum\r\n # mcc_rm_ohe_8099_amount_mean\r\n # mcc_rm_ohe_8099_amount_sum\r\n # mcc_rm_ohe_8999_amount_mean\r\n # mcc_rm_ohe_8999_amount_sum\r\n # mcc_rm_ohe_99999_amount_mean\r\n # mcc_rm_ohe_99999_amount_sum\r\n # 0\r\n # 0001\r\n # f322716470bf9bfc1708f06f00fc\r\n # 0.000000\r\n # 0.00000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 27.80541\r\n # 1390.270508\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 174.479874\r\n # 8723.993164\r\n # 3.240682\r\n # 162.034088\r\n # 0.0\r\n # 0.0\r\n # 142.566284\r\n # 7128.314453\r\n # 0.000000\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 3.753808\r\n # 187.690399\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.00000\r\n # 0.000000\r\n # 0.000000\r\n # 35.659580\r\n # 1782.979004\r\n # 6294.915039\r\n # 314745.750000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 20.779797\r\n # 1038.989868\r\n # 1\r\n # 000216\r\n # 83\r\n # ccb416637fe9a4cd35e4606e\r\n # 0.000000\r\n # 0.00000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.00000\r\n # 0.000000\r\n # 75.903763\r\n # 5920.493164\r\n # 0.000000\r\n # 0.000000\r\n # 46.309711\r\n # 3612.157471\r\n # 447.340057\r\n # 34892.523438\r\n # 2.944012\r\n # 229.632919\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.000000\r\n # 5.861734\r\n # 457.215271\r\n # 0.0\r\n # 0.0\r\n # 25.973988\r\n # 2025.971069\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 3.667529\r\n # 286.067230\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 32.625473\r\n # 2544.786865\r\n # 91.864258\r\n # 7165.412109\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.00000\r\n # 46.323765\r\n # 3613.253418\r\n # 88.990273\r\n # 6941.241211\r\n # 4656.711426\r\n # 363223.500000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 214.347534\r\n # 16719.107422\r\n # 2\r\n # 0002\r\n # d0f8a642272b41c292c12ab6e602\r\n # 0.000000\r\n # 0.00000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.00000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 8.041179\r\n # 619.170776\r\n # 0.000000\r\n # 0.000000\r\n # 102.385597\r\n # 7883.690918\r\n # 0.000000\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.00000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 244.772186\r\n # 18847.457031\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 129.416885\r\n # 9965.099609\r\n # 3\r\n # 0004\r\n # d182d9fede3ba2534b2d5e5ad27e\r\n # 0.000000\r\n # 0.00000\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.00000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 3.977230\r\n # 477.267609\r\n # 0.000000\r\n # 0.000000\r\n # 82.209663\r\n # 9865.159180\r\n # 3.002061\r\n # 360.247314\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 96.718063\r\n # 11606.167969\r\n # 11.536736\r\n # 1384.408325\r\n # 0.000000\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 15.889756\r\n # 1906.770752\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 23.739368\r\n # 2848.724121\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 10.003338\r\n # 1200.400513\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.00000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 1752.626221\r\n # 210315.156250\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 14.573447\r\n # 1748.813721\r\n # 0.000000\r\n # 0.000000\r\n # 72.051506\r\n # 8646.180664\r\n # 4\r\n # 00072\r\n # 97\r\n # d86e14bd68bd87b1dbdefe302\r\n # 4.562878\r\n # 1095.09082\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.00000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 0.000000\r\n # 237.856369\r\n # 57085.527344\r\n # 0.716898\r\n # 172.055511\r\n # 0.0\r\n # 0.0\r\n # 2.150652\r\n # 516.156555\r\n # 0.000000\r\n # 0.000000\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.000000\r\n # 6.908348\r\n # 1658.003418\r\n # 2.476619\r\n # 594.388672\r\n # 0.0\r\n # 0.0\r\n # 0.000000\r\n # 0.000000\r\n # 26.938381\r\n # 6465.211426\r\n # 0.738737\r\n # 177.296783\r\n # 62.065884\r\n # 14895.812500\r\n # 27.020506\r\n # 6484.921387\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 7.383849\r\n # 1772.123779\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 0.0\r\n # 20.362558\r\n # 4887.01416\r\n # 0.000000\r\n # 0.000000\r\n # 17.092093\r\n # 4102.102051\r\n # 1940.178711\r\n # 465642.875000\r\n # 3.774575\r\n # 905.898132\r\n # 2.133497\r\n # 512.039246\r\n # 9.404689\r\n # 2257.125244\r\n # 0.959698\r\n # 230.327621\r\n # 275.180389\r\n # 66043.296875\r\n #\r\n # # по объемам\r\n # df_mcc = df_all.groupby('customer_id')[mcc_cols_2_].agg(['mean', 'sum'])\r\n # df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n # df_mcc = df_mcc.astype(np.float32).reset_index()\r\n # df_mcc.head()\r\n # df_all = pd.merge(df_all, df_mcc, on='customer_id', how='left')\r\n # ​\r\n # # df_all['add_lat_'] = (df_all['add_lat'] * 40).astype(np.int32)\r\n # # df_all['add_lon_'] = (df_all['add_lon'] * 40).astype(np.int32)\r\n # ​\r\n # # df_mcc = df_all.groupby(['add_lat_', 'add_lon_'])[mcc_cols_].agg(['mean', 'sum'])\r\n # # df_mcc = df_mcc.add_suffix('_40coord')\r\n # # df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n # # df_mcc = df_mcc.astype(np.float32)\r\n # # df_mcc.reset_index(inplace=True)\r\n # # df_mcc.head()\r\n # # df_all = pd.merge(df_all, df_mcc, on=['add_lat_', 'add_lon_'], how='left')\r\n # ​\r\n # # del df_all['add_lat_']\r\n # # del df_all['add_lon_']\r\n #\r\n # # сделаем групбай какие вообще есть mcc у посетителя. Это поможет понять его привычки\r\n # # mcc_cols = [c for c in df_all.columns if 'mcc_cat1' in c]\r\n # # df_mcc = df_all.groupby('customer_id')[mcc_cols].agg(['mean'])\r\n # # df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n # # df_mcc.reset_index(inplace=True)\r\n # # df_mcc.head()\r\n # # df_all = pd.merge(df_all, df_mcc, on='customer_id', how='left')\r\n #\r\n # # сделаем групбай какие вообще есть mcc у посетителя. Это поможет понять его привычки\r\n # df_mcc = df_all.groupby('customer_id')[mcc_cols_2].agg(['mean', 'sum'])\r\n # df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n # df_mcc = df_mcc.astype(np.float32)\r\n # df_mcc.reset_index(inplace=True)\r\n # df_mcc.head()\r\n # df_all = pd.merge(df_all, df_mcc, on='customer_id', how='left')\r\n #\r\n # # РАСПРЕДЕЛЕНИЕ MCC В ОКРЕСТНОСТИ ЧУВАКА\r\n # df_all['add_lat_'] = (df_all['add_lat'] * 40).astype(np.int32)\r\n # df_all['add_lon_'] = (df_all['add_lon'] * 40).astype(np.int32)\r\n # ​\r\n # df_mcc = df_all.groupby(['add_lat_', 'add_lon_'])[mcc_cols_0].agg(['mean', 'sum'])\r\n # df_mcc = df_mcc.add_suffix('_40coord')\r\n # df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n # df_mcc = df_mcc.astype(np.float32)\r\n # df_mcc.reset_index(inplace=True)\r\n # df_mcc.head()\r\n # df_all = pd.merge(df_all, df_mcc, on=['add_lat_', 'add_lon_'], how='left')\r\n # ​\r\n # del df_all['add_lat_']\r\n # del df_all['add_lon_']\r\n #\r\n # mcc_cols = [c for c in df_all.columns if 'mcc_rm_ohe' in c and 'mean' not in c and 'sum' not in c]\r\n # # РАСПРЕДЕЛЕНИЕ MCC В ОКРЕСТНОСТИ ЧУВАКА\r\n # df_all['add_lat_'] = (df_all['add_lat'] * 100).astype(np.int32)\r\n # df_all['add_lon_'] = (df_all['add_lon'] * 100).astype(np.int32)\r\n # ​\r\n # df_mcc = df_all.groupby(['add_lat_', 'add_lon_'])[mcc_cols_0].agg(['mean', 'sum'])\r\n # df_mcc = df_mcc.add_suffix('_100coord')\r\n # df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n # df_mcc = df_mcc.astype(np.float32)\r\n # df_mcc.reset_index(inplace=True)\r\n # df_mcc.head()\r\n # df_all = pd.merge(df_all, df_mcc, on=['add_lat_', 'add_lon_'], how='left')\r\n # ​\r\n # del df_all['add_lat_']\r\n # del df_all['add_lon_']\r\n #\r\n # # РАСПРЕДЕЛЕНИЕ MCC В ОКРЕСТНОСТИ ЧУВАКА (ПРОВЕРИЛ-ЛУЧШЕ РАБОТАЕТ НА БОЛЬШИХ УЧАСТКАХ)\r\n # df_all['add_lat_'] = (df_all['add_lat'] * 40).astype(np.int32)\r\n # df_all['add_lon_'] = (df_all['add_lon'] * 40).astype(np.int32)\r\n # ​\r\n # df_mcc = df_all.groupby(['add_lat_', 'add_lon_'])[mcc_cols_2].agg(['mean', 'sum'])\r\n # df_mcc = df_mcc.add_suffix('_200coord')\r\n # df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n # df_mcc = df_mcc.astype(np.float32)\r\n # df_mcc.reset_index(inplace=True)\r\n # df_mcc.head()\r\n # df_all = pd.merge(df_all, df_mcc, on=['add_lat_', 'add_lon_'], how='left')\r\n # ​\r\n # del df_all['add_lat_']\r\n # del df_all['add_lon_']\r\n #\r\n # # РАСПРЕДЕЛЕНИЕ MCC В ОКРЕСТНОСТИ ЧУВАКА\r\n # # df_all['add_lat_'] = (df_all['add_lat'] * 100).astype(np.int32)\r\n # # df_all['add_lon_'] = (df_all['add_lon'] * 100).astype(np.int32)\r\n # ​\r\n # # df_mcc = df_all.groupby(['add_lat_', 'add_lon_'])[mcc_cols].agg(['mean', 'sum'])\r\n # # df_mcc = df_mcc.add_suffix('_100coord')\r\n # # df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n # # df_mcc = df_mcc.astype(np.float32)\r\n # # df_mcc.reset_index(inplace=True)\r\n # # df_mcc.head()\r\n # # df_all = pd.merge(df_all, df_mcc, on=['add_lat_', 'add_lon_'], how='left')\r\n # ​\r\n # # del df_all['add_lat_']\r\n # # del df_all['add_lon_']\r\n # Игрушки\r\n # с\r\n # адресами\r\n #\r\n # df_all['string'] = df_all['string'].fillna('')\r\n # df_all['string'] = df_all['string'].str.lower()\r\n #\r\n # df_all['has_street'] = df_all['string'].str.contains('улиц').astype(np.int8)\r\n # df_all['has_pereul'] = df_all['string'].str.contains('переул').astype(np.int8)\r\n # df_all['has_bulvar'] = df_all['string'].str.contains('бульв').astype(np.int8)\r\n # df_all['has_prospekt'] = df_all['string'].str.contains('проспект').astype(np.int8)\r\n # df_all['has_shosse'] = df_all['string'].str.contains('шосс').astype(np.int8)\r\n # ​\r\n # df_all['has_torg'] = df_all['string'].str.contains('торгов').astype(np.int8)\r\n # df_all['has_bus'] = df_all['string'].str.contains('бизн').astype(np.int8)\r\n # Медианы\r\n # по\r\n # юзеру\r\n # и\r\n # по\r\n # без\r\n # дубликатов\r\n #\r\n # dft = df_all.groupby('terminal_id')['add_lat'].agg('std').astype(np.float32).reset_index()\r\n # dft['moving_terminal'] = (dft['add_lat'] > 0).astype(np.int8)\r\n # del dft['add_lat']\r\n # df_all = pd.merge(df_all, dft, on='terminal_id', how='left')\r\n #\r\n # df_med = df_all.groupby('customer_id')['add_lat', 'add_lon'].agg('median').astype(np.float32).reset_index()\r\n # df_med.columns = ['customer_id', 'add_lat_median', 'add_lon_median']\r\n # df_all = pd.merge(df_all, df_med, on='customer_id', how='left')\r\n #\r\n # df_med = df_all.drop_duplicates(subset=['customer_id',\r\n # 'add_lat', 'add_lon']).groupby('customer_id')['add_lat', 'add_lon'].agg(\r\n # 'median').reset_index()\r\n # df_med.columns = ['customer_id', 'add_lat_median_unique', 'add_lon_median_unique']\r\n # df_all = pd.merge(df_all, df_med, on='customer_id', how='left')\r\n #\r\n # df_all['lat_diff_median'] = np.abs(df_all['add_lat'] - df_all['add_lat_median'])\r\n # df_all['lon_diff_median'] = np.abs(df_all['add_lon'] - df_all['add_lat_median'])\r\n # df_all['lat_diff_median_unique'] = np.abs(df_all['add_lat'] - df_all['add_lat_median_unique'])\r\n # df_all['lon_diff_median_unique'] = np.abs(df_all['add_lon'] - df_all['add_lon_median_unique'])\r\n # ​\r\n # df_all['diff_median'] = df_all['lat_diff_median'] + df_all['lon_diff_median']\r\n # df_all['diff_median_unique'] = df_all['lat_diff_median_unique'] + df_all['lon_diff_median_unique']\r\n #\r\n # del dft\r\n # del df_med\r\n # OSM\r\n # https: // wiki.openstreetmap.org / wiki / RU: % D0 % 9\r\n # E % D0 % B1 % D1 % 8\r\n # A % D0 % B5 % D0 % BA % D1 % 82 % D1 % 8\r\n # B_ % D0 % BA % D0 % B0 % D1 % 80 % D1 % 82 % D1 % 8\r\n # B # .D0.9A.D0.BE.D0.BC.D0.BC.D0.B5.D1.80.D1.87.D0.B5.D1.81.D0.BA.D0.B8.D0.B5\r\n #\r\n # import ogr\r\n # driver = ogr.GetDriverByName('OSM')\r\n # data_msk = driver.Open('../data/internal/moscow.osm')\r\n # data_peter = driver.Open('../data/internal/peter.osm')\r\n #\r\n # features = []\r\n # nlayer = data_msk.GetLayerCount() # 5\r\n # print(nlayer)\r\n # for i in range(nlayer):\r\n # features += [x for x in data_msk.GetLayerByIndex(i)]\r\n # nlayer = data_peter.GetLayerCount() # 5\r\n # print(nlayer)\r\n # for i in range(nlayer):\r\n # features += [x for x in data_peter.GetLayerByIndex(i)]\r\n # 5\r\n # 5\r\n # расстояние\r\n # до\r\n # бизнес\r\n # центров\r\n #\r\n # coords = []\r\n # for f in tqdm(features):\r\n # s = str(f.ExportToJson(as_object=True)).lower()\r\n # if 'бизнес' in s and 'центр' in s:\r\n # el = f.ExportToJson(as_object=True)['geometry']['coordinates'][0]\r\n # if type(el) != float:\r\n # coords.append(el)\r\n # 100 % |██████████ | 622083 / 622083[02:27 < 00:00, 4215.21\r\n # it / s]\r\n #\r\n # # coords = []\r\n # # for f in tqdm(features):\r\n # # s = str(f.ExportToJson(as_object=True)).lower()\r\n # # if 'running' in s:\r\n # # coords.append(s)\r\n # # print(s)\r\n # # el = f.ExportToJson(as_object=True)['geometry']['coordinates'][0]\r\n # # if type(el) != float:\r\n # # coords.append(el)\r\n #\r\n # vals1 = df_all[['add_lon', 'add_lat']].drop_duplicates().values.astype(np.float32)\r\n # df_vals = pd.DataFrame(vals1, columns=['add_lat', 'add_lon'])\r\n # vals2 = np.array(coords, dtype=np.float32)\r\n # vals1.shape, vals2.shape\r\n # ((160184, 2), (206, 2))\r\n #\r\n # X = pairwise_distances(vals1, vals2)\r\n # X[X == 0] = 10000\r\n #\r\n # suf = 'bc'\r\n # df_vals[suf + '_dist_to'] = X.min(axis=1)\r\n # df_vals[suf + '_in_01'] = (X < 0.01).sum(axis=1)\r\n # df_vals[suf + '_in_001'] = (X < 0.001).sum(axis=1)\r\n # df_vals[suf + '_in_02'] = (X < 0.02).sum(axis=1)\r\n # df_vals[suf + '_in_005'] = (X < 0.005).sum(axis=1)\r\n # df_vals[suf + '_in_03'] = (X < 0.03).sum(axis=1)\r\n # ​\r\n # df_all['add_lat_'] = np.round(df_all['add_lat'] * 10000).astype(int)\r\n # df_all['add_lon_'] = np.round(df_all['add_lon'] * 10000).astype(int)\r\n # df_vals['add_lat_'] = np.round(df_vals['add_lat'] * 10000).astype(int)\r\n # df_vals['add_lon_'] = np.round(df_vals['add_lon'] * 10000).astype(int)\r\n # del df_vals['add_lat']\r\n # del df_vals['add_lon']\r\n # ​\r\n # df_all = pd.merge(df_all, df_vals, on=['add_lat_', 'add_lon_'], how='left')\r\n # del X\r\n # del df_all['add_lat_']\r\n # del df_all['add_lon_']\r\n # до\r\n # фастфудов\r\n # http: // andrewgaidus.com / Convert_OSM_Data /\r\n #\r\n # driver = ogr.GetDriverByName('OSM')\r\n # data_msk = driver.Open('../data/internal/moscow.osm')\r\n # data_peter = driver.Open('../data/internal/peter.osm')\r\n # layer_p = data_msk.GetLayer('points') # 5\r\n # features_p = [x for x in layer_p]\r\n # layer_p = data_peter.GetLayer('points') # 5\r\n # features_p += [x for x in layer_p]\r\n #\r\n # # coords = []\r\n # # for f in tqdm(features_p):\r\n # # s = str(f.ExportToJson(as_object=True)).lower()\r\n # # if 'run' in s:\r\n # # print(s)\r\n # # coords.append(f.ExportToJson(as_object=True)['geometry']['coordinates'])\r\n #\r\n # coords = []\r\n # for f in tqdm(features_p):\r\n # s = str(f.ExportToJson(as_object=True)).lower()\r\n # if 'fast_food' in s:\r\n # coords.append(f.ExportToJson(as_object=True)['geometry']['coordinates'])\r\n # 100 % |██████████ | 343972 / 343972[01:16 < 00:00, 4515.63\r\n # it / s]\r\n #\r\n # vals1 = df_all[['add_lon', 'add_lat']].drop_duplicates().values.astype(np.float32)\r\n # df_vals = pd.DataFrame(vals1, columns=['add_lat', 'add_lon'])\r\n # vals2 = np.array(coords, dtype=np.float32)\r\n # vals1.shape, vals2.shape\r\n # ((160184, 2), (2562, 2))\r\n #\r\n # X = pairwise_distances(vals1, vals2)\r\n # X[X == 0] = 10000\r\n #\r\n # suf = 'fastfood'\r\n # df_vals[suf + '_dist_to'] = X.min(axis=1)\r\n # df_vals[suf + '_in_01'] = (X < 0.01).sum(axis=1)\r\n # df_vals[suf + '_in_001'] = (X < 0.001).sum(axis=1)\r\n # df_vals[suf + '_in_02'] = (X < 0.02).sum(axis=1)\r\n # df_vals[suf + '_in_005'] = (X < 0.005).sum(axis=1)\r\n # df_vals[suf + '_in_03'] = (X < 0.03).sum(axis=1)\r\n # ​\r\n # df_all['add_lat_'] = np.round(df_all['add_lat'] * 10000).astype(int)\r\n # df_all['add_lon_'] = np.round(df_all['add_lon'] * 10000).astype(int)\r\n # df_vals['add_lat_'] = np.round(df_vals['add_lat'] * 10000).astype(int)\r\n # df_vals['add_lon_'] = np.round(df_vals['add_lon'] * 10000).astype(int)\r\n # del df_vals['add_lat']\r\n # del df_vals['add_lon']\r\n # ​\r\n # df_all = pd.merge(df_all, df_vals, on=['add_lat_', 'add_lon_'], how='left')\r\n # del X\r\n # del df_all['add_lat_']\r\n # del df_all['add_lon_']\r\n # станции\r\n #\r\n # coords = []\r\n # for f in tqdm(features_p):\r\n # s = str(f.ExportToJson(as_object=True)).lower()\r\n # if 'railway' in s:\r\n # coords.append(f.ExportToJson(as_object=True)['geometry']['coordinates'])\r\n # 100 % |██████████ | 343972 / 343972[01:16 < 00:00, 4482.17\r\n # it / s]\r\n #\r\n # vals1 = df_all[['add_lon', 'add_lat']].drop_duplicates().values.astype(np.float32)\r\n # df_vals = pd.DataFrame(vals1, columns=['add_lat', 'add_lon'])\r\n # vals2 = np.array(coords, dtype=np.float32)\r\n # vals1.shape, vals2.shape\r\n # ((160184, 2), (8159, 2))\r\n #\r\n # X = pairwise_distances(vals1, vals2)\r\n # X[X == 0] = 10000\r\n #\r\n # suf = 'rail'\r\n # df_vals[suf + '_dist_to'] = X.min(axis=1)\r\n # df_vals[suf + '_in_01'] = (X < 0.01).sum(axis=1)\r\n # df_vals[suf + '_in_001'] = (X < 0.001).sum(axis=1)\r\n # df_vals[suf + '_in_02'] = (X < 0.02).sum(axis=1)\r\n # df_vals[suf + '_in_005'] = (X < 0.005).sum(axis=1)\r\n # df_vals[suf + '_in_03'] = (X < 0.03).sum(axis=1)\r\n # ​\r\n # df_all['add_lat_'] = np.round(df_all['add_lat'] * 10000).astype(int)\r\n # df_all['add_lon_'] = np.round(df_all['add_lon'] * 10000).astype(int)\r\n # df_vals['add_lat_'] = np.round(df_vals['add_lat'] * 10000).astype(int)\r\n # df_vals['add_lon_'] = np.round(df_vals['add_lon'] * 10000).astype(int)\r\n # del df_vals['add_lat']\r\n # del df_vals['add_lon']\r\n # ​\r\n # df_all = pd.merge(df_all, df_vals, on=['add_lat_', 'add_lon_'], how='left')\r\n # del X\r\n # del df_all['add_lat_']\r\n # del df_all['add_lon_']\r\n # райф\r\n #\r\n # coords = []\r\n # for f in tqdm(features_p):\r\n # s = str(f.ExportToJson(as_object=True)).lower()\r\n # if 'райф' in s or 'raiffeisen' in s:\r\n # coords.append(f.ExportToJson(as_object=True)['geometry']['coordinates'])\r\n # 100 % |██████████ | 343972 / 343972[01:15 < 00:00, 4561.06\r\n # it / s]\r\n #\r\n # vals1 = df_all[['add_lon', 'add_lat']].drop_duplicates().values.astype(np.float32)\r\n # df_vals = pd.DataFrame(vals1, columns=['add_lat', 'add_lon'])\r\n # vals2 = np.array(coords, dtype=np.float32)\r\n # vals1.shape, vals2.shape\r\n # ((160184, 2), (194, 2))\r\n #\r\n # X = pairwise_distances(vals1, vals2)\r\n # X[X == 0] = 10000\r\n #\r\n # suf = 'raif1'\r\n # df_vals[suf + '_dist_to'] = X.min(axis=1)\r\n # df_vals[suf + '_in_01'] = (X < 0.01).sum(axis=1)\r\n # df_vals[suf + '_in_001'] = (X < 0.001).sum(axis=1)\r\n # df_vals[suf + '_in_02'] = (X < 0.02).sum(axis=1)\r\n # df_vals[suf + '_in_005'] = (X < 0.005).sum(axis=1)\r\n # df_vals[suf + '_in_03'] = (X < 0.03).sum(axis=1)\r\n # ​\r\n # df_all['add_lat_'] = np.round(df_all['add_lat'] * 10000).astype(int)\r\n # df_all['add_lon_'] = np.round(df_all['add_lon'] * 10000).astype(int)\r\n # df_vals['add_lat_'] = np.round(df_vals['add_lat'] * 10000).astype(int)\r\n # df_vals['add_lon_'] = np.round(df_vals['add_lon'] * 10000).astype(int)\r\n # del df_vals['add_lat']\r\n # del df_vals['add_lon']\r\n # ​\r\n # df_all = pd.merge(df_all, df_vals, on=['add_lat_', 'add_lon_'], how='left')\r\n # del X\r\n # del df_all['add_lat_']\r\n # del df_all['add_lon_']\r\n #\r\n # coords = []\r\n # for f in tqdm(features):\r\n # s = str(f.ExportToJson(as_object=True)).lower()\r\n # if 'райф' in s or 'raiffeisen' in s:\r\n # el = f.ExportToJson(as_object=True)['geometry']['coordinates'][0]\r\n # if type(el) != float:\r\n # coords.append(el)\r\n # 100 % |██████████ | 622083 / 622083[02:27 < 00:00, 4226.28\r\n # it / s]\r\n #\r\n # vals1 = df_all[['add_lon', 'add_lat']].drop_duplicates().values.astype(np.float32)\r\n # df_vals = pd.DataFrame(vals1, columns=['add_lat', 'add_lon'])\r\n # vals2 = np.array(coords, dtype=np.float32)\r\n # vals1.shape, vals2.shape\r\n # ((160184, 2), (1, 2))\r\n #\r\n # X = pairwise_distances(vals1, vals2)\r\n # X[X == 0] = 10000\r\n #\r\n # suf = 'raif2'\r\n # df_vals[suf + '_dist_to'] = X.min(axis=1)\r\n # df_vals[suf + '_in_01'] = (X < 0.01).sum(axis=1)\r\n # df_vals[suf + '_in_001'] = (X < 0.001).sum(axis=1)\r\n # df_vals[suf + '_in_02'] = (X < 0.02).sum(axis=1)\r\n # df_vals[suf + '_in_005'] = (X < 0.005).sum(axis=1)\r\n # df_vals[suf + '_in_03'] = (X < 0.03).sum(axis=1)\r\n # ​\r\n # df_all['add_lat_'] = np.round(df_all['add_lat'] * 10000).astype(int)\r\n # df_all['add_lon_'] = np.round(df_all['add_lon'] * 10000).astype(int)\r\n # df_vals['add_lat_'] = np.round(df_vals['add_lat'] * 10000).astype(int)\r\n # df_vals['add_lon_'] = np.round(df_vals['add_lon'] * 10000).astype(int)\r\n # del df_vals['add_lat']\r\n # del df_vals['add_lon']\r\n # ​\r\n # df_all = pd.merge(df_all, df_vals, on=['add_lat_', 'add_lon_'], how='left')\r\n # del X\r\n # del df_all['add_lat_']\r\n # del df_all['add_lon_']\r\n #\r\n # del vals2\r\n #\r\n # for c in tqdm(df_all.columns):\r\n # if df_all[c].dtype == np.int64:\r\n # df_all[c] = df_all[c].astype(np.int32)\r\n # if df_all[c].dtype == np.float64:\r\n # df_all[c] = df_all[c].astype(np.float32)\r\n # 100 % |██████████ | 699 / 699[00:03 < 00:00, 220.50\r\n # it / s]\r\n #\r\n # df_all.dtypes.to_csv('../data/df_all_b21_dtypes.csv')\r\n # df_all.to_csv('../data/df_all_b21.csv', index=None)\r\n #\r\n # df_all.shape\r\n # (2294265, 699)\r\n #\r\n # % % time\r\n # # # Определим типы колонок для экономии памяти\r\n # # dtypes = pd.read_csv('../data/df_all_b2_dtypes.csv', header=None, index_col=0).to_dict()[1]\r\n # # dtypes.pop('transaction_date', None)\r\n # # df_all = pd.read_csv('../data/df_all_b2.csv', dtype=dtypes, parse_dates=['transaction_date'])\r\n # Ранки\r\n #\r\n # gb = df_all.groupby('customer_id')\r\n #\r\n # df_all['rank_amount_cid'] = df_all.groupby('customer_id')['amount'].rank()\r\n #\r\n # df_all = pd.merge(df_all,\r\n # df_all.groupby('customer_id')['amount'].agg(['size']).reset_index(), on='customer_id', how='left')\r\n #\r\n # df_all['rank_amount_cid_percent'] = df_all['rank_amount_cid'] / df_all['size']\r\n #\r\n # del features_p\r\n # Расстояния\r\n # до\r\n # центров\r\n #\r\n # am_cols = [c for c in df_all if 'amount' in c]\r\n #\r\n # df_all['dist_to_center'] = -1\r\n #\r\n # vals1 = df_all[df_all.city_name == 'Санкт-Петербург'][['add_lat', 'add_lon']].values\r\n # vals2 = np.array([[59.935386, 30.324629]])\r\n # X = pairwise_distances(vals1, vals2)\r\n # df_all.loc[df_all.city_name == 'Санкт-Петербург', 'dist_to_center'] = X\r\n #\r\n # vals1 = df_all[df_all.city_name == 'Москва'][['add_lat', 'add_lon']].values\r\n # vals2 = np.array([[55.7537090, 37.6198133]])\r\n # X = pairwise_distances(vals1, vals2)\r\n # df_all.loc[df_all.city_name == 'Москва', 'dist_to_center'] = X\r\n # LightGBM\r\n #\r\n # df_all.shape, df_all.columns.duplicated().sum()\r\n # ((2294265, 703), 0)\r\n #\r\n # df_all = df_all.loc[:, ~df_all.columns.duplicated()]\r\n #\r\n # from sklearn.model_selection import train_test_split\r\n # ​\r\n # ys = ['is_home', 'is_work']\r\n # drop_cols = ['atm_address', 'customer_id', 'pos_address', 'terminal_id', 'transaction_date',\r\n # 'is_home', 'has_home', 'is_work', 'has_work', 'is_train', 'city_name']\r\n # drop_cols += ['work_lat', 'work_lon', 'home_lat', 'home_lon', 'string']\r\n # ​\r\n # drop_cols += ['pred:is_home', 'pred:is_work']\r\n # # cols = [c for c in df_all.columns if 'median_dist' in c]\r\n # # cols = [c for c in df_all.columns if 'lat' in c or 'lon' in c and 'diff' not in c and 'median' not in c]\r\n # # cols += ['address']\r\n # # drop_cols += cols\r\n # ​\r\n # cols = [c for c in df_all.columns if 'mcc_ohe' in c and 'mean' not in c]\r\n # # cols += ['address']\r\n # drop_cols += cols\r\n # ​\r\n # ​\r\n # y_cols = ['is_home', 'is_work']\r\n # usecols = df_all.drop(drop_cols, 1, errors='ignore').columns\r\n #\r\n # params = {\r\n # 'objective': 'binary',\r\n # 'num_leaves': 511,\r\n # 'learning_rate': 0.01,\r\n # 'metric': 'binary_logloss',\r\n # 'feature_fraction': 0.8,\r\n # 'bagging_fraction': 0.8,\r\n # 'bagging_freq': 1,\r\n # 'num_threads': 12,\r\n # 'verbose': 0,\r\n # }\r\n # ​\r\n # model = {}\r\n #\r\n # y_col = 'is_home'\r\n # ​\r\n # cust_train = df_all[df_all['is_train'] == 1].groupby('customer_id')[y_col.replace('is_', 'has_')].max()\r\n # cust_train = cust_train[cust_train > 0].index\r\n # ​\r\n # cust_train, cust_valid = train_test_split(cust_train, test_size=0.2, shuffle=True, random_state=111)\r\n # ​\r\n # df_train = pd.DataFrame(cust_train, columns=['customer_id']).merge(df_all, how='left')\r\n # df_valid = pd.DataFrame(cust_valid, columns=['customer_id']).merge(df_all, how='left')\r\n # ​\r\n # lgb_train = lgb.Dataset(df_train[usecols], df_train[y_col])\r\n # lgb_valid = lgb.Dataset(df_valid[usecols], df_valid[y_col])\r\n # ​\r\n # gbm_h = lgb.train(params,\r\n # lgb_train,\r\n # valid_sets=[lgb_valid],\r\n # num_boost_round=2000,\r\n # verbose_eval=30,\r\n # early_stopping_rounds=100)\r\n # ​\r\n # model[y_col] = gbm_h\r\n # Training\r\n # until\r\n # validation\r\n # scores\r\n # don\r\n # 't improve for 100 rounds.\r\n # [30]\r\n # valid_0\r\n # 's binary_logloss: 0.583925\r\n # [60]\r\n # valid_0\r\n # 's binary_logloss: 0.516789\r\n # [90]\r\n # valid_0\r\n # 's binary_logloss: 0.47214\r\n # [120]\r\n # valid_0\r\n # 's binary_logloss: 0.442272\r\n # [150]\r\n # valid_0\r\n # 's binary_logloss: 0.421676\r\n # [180]\r\n # valid_0\r\n # 's binary_logloss: 0.40754\r\n # [210]\r\n # valid_0\r\n # 's binary_logloss: 0.397597\r\n # [240]\r\n # valid_0\r\n # 's binary_logloss: 0.390218\r\n # [270]\r\n # valid_0\r\n # 's binary_logloss: 0.385013\r\n # [300]\r\n # valid_0\r\n # 's binary_logloss: 0.381261\r\n # [330]\r\n # valid_0\r\n # 's binary_logloss: 0.378859\r\n # [360]\r\n # valid_0\r\n # 's binary_logloss: 0.376952\r\n # [390]\r\n # valid_0\r\n # 's binary_logloss: 0.375839\r\n # [420]\r\n # valid_0\r\n # 's binary_logloss: 0.375117\r\n # [450]\r\n # valid_0\r\n # 's binary_logloss: 0.3746\r\n # [480]\r\n # valid_0\r\n # 's binary_logloss: 0.374571\r\n # [510]\r\n # valid_0\r\n # 's binary_logloss: 0.374397\r\n # [540]\r\n # valid_0\r\n # 's binary_logloss: 0.374263\r\n # [570]\r\n # valid_0\r\n # 's binary_logloss: 0.374408\r\n # [600]\r\n # valid_0\r\n # 's binary_logloss: 0.374786\r\n # [630]\r\n # valid_0\r\n # 's binary_logloss: 0.375561\r\n # Early\r\n # stopping, best\r\n # iteration is:\r\n # [556]\r\n # valid_0\r\n # 's binary_logloss: 0.374234\r\n #\r\n # y_col = 'is_work'\r\n # ​\r\n # cust_train = df_all[df_all['is_train'] == 1].groupby('customer_id')[y_col.replace('is_', 'has_')].max()\r\n # cust_train = cust_train[cust_train > 0].index\r\n # ​\r\n # cust_train, cust_valid = train_test_split(cust_train, test_size=0.2, shuffle=True, random_state=111)\r\n # ​\r\n # ​\r\n # ​\r\n # df_train = pd.DataFrame(cust_train, columns=['customer_id']).merge(df_all, how='left')\r\n # df_valid = pd.DataFrame(cust_valid, columns=['customer_id']).merge(df_all, how='left')\r\n # ​\r\n # lgb_train = lgb.Dataset(df_train[usecols], df_train[y_col])\r\n # lgb_valid = lgb.Dataset(df_valid[usecols], df_valid[y_col])\r\n # ​\r\n # gbm_w = lgb.train(params,\r\n # lgb_train,\r\n # valid_sets=[lgb_valid],\r\n # num_boost_round=2000,\r\n # verbose_eval=30,\r\n # early_stopping_rounds=100)\r\n # ​\r\n # model[y_col] = gbm_w\r\n # Training\r\n # until\r\n # validation\r\n # scores\r\n # don\r\n # 't improve for 100 rounds.\r\n # [30]\r\n # valid_0\r\n # 's binary_logloss: 0.561026\r\n # [60]\r\n # valid_0\r\n # 's binary_logloss: 0.481483\r\n # [90]\r\n # valid_0\r\n # 's binary_logloss: 0.430032\r\n # [120]\r\n # valid_0\r\n # 's binary_logloss: 0.396466\r\n # [150]\r\n # valid_0\r\n # 's binary_logloss: 0.374586\r\n # [180]\r\n # valid_0\r\n # 's binary_logloss: 0.359539\r\n # [210]\r\n # valid_0\r\n # 's binary_logloss: 0.349693\r\n # [240]\r\n # valid_0\r\n # 's binary_logloss: 0.343236\r\n # [270]\r\n # valid_0\r\n # 's binary_logloss: 0.339659\r\n # [300]\r\n # valid_0\r\n # 's binary_logloss: 0.337191\r\n # [330]\r\n # valid_0\r\n # 's binary_logloss: 0.336102\r\n # [360]\r\n # valid_0\r\n # 's binary_logloss: 0.33665\r\n # [390]\r\n # valid_0\r\n # 's binary_logloss: 0.337185\r\n # [420]\r\n # valid_0\r\n # 's binary_logloss: 0.339059\r\n # Early\r\n # stopping, best\r\n # iteration is:\r\n # [336]\r\n # valid_0\r\n # 's binary_logloss: 0.336068\r\n #\r\n # gbm_w\r\n # Полезные\r\n # MCC\r\n # дом\r\n # 6011 - финансы\r\n # 5411 - придомовые\r\n # магазы\r\n # 5814 - мак\r\n # 5912 - аптеки\r\n # 5921 - пиво\r\n # 5499 - магазы\r\n # пяторочка\r\n # типа\r\n # 5812 - рестроанчики\r\n # работа\r\n #\r\n # figsize(14, 10)\r\n # lgb.plot_importance(gbm_h, max_num_features=40)\r\n #\r\n # def _best(x):\r\n # ret = None\r\n # for col in ys:\r\n # pred = ('pred:%s' % col)\r\n # if pred in x:\r\n # i = (x[pred].idxmax())\r\n # cols = [pred, 'add_lat', 'add_lon']\r\n # if col in x:\r\n # cols.append(col)\r\n # tmp = x.loc[i, cols]\r\n # tmp.rename({\r\n # 'add_lat': '%s:add_lat' % col,\r\n # 'add_lon': '%s:add_lon' % col,\r\n # }, inplace=True)\r\n # if ret is None:\r\n # ret = tmp\r\n # else:\r\n # ret = pd.concat([ret, tmp])\r\n # return ret\r\n #\r\n # ​\r\n # ​\r\n #\r\n # def predict_proba(dt, ys=['is_home', 'is_work']):\r\n # for col in ys:\r\n # pred = ('pred:%s' % col)\r\n # dt[pred] = model[col].predict(dt[usecols])\r\n # return dt.groupby('customer_id').apply(_best).reset_index()\r\n #\r\n # ​\r\n #\r\n # def score(dt, ys=['is_home', 'is_work'], return_df=False):\r\n # dt_ret = predict_proba(dt, ys)\r\n # if return_df:\r\n # return dt_ret\r\n # mean = 0.0\r\n # for col in ys:\r\n # col_mean = dt_ret[col].mean()\r\n # mean += col_mean\r\n # if len(ys) == 2:\r\n # mean = mean / len(ys)\r\n # return mean\r\n #\r\n # print(\"Train accuracy:\", score(df_train, ys=['is_home']))\r\n # print(\"Test accuracy:\", score(df_valid, ys=['is_home']))\r\n # ​\r\n # print(\"Train accuracy:\", score(df_train, ys=['is_work']))\r\n # print(\"Test accuracy:\", score(df_valid, ys=['is_work']))\r\n # Train\r\n # accuracy: 0.640571982549685\r\n # Test\r\n # accuracy: 0.6443798449612403\r\n # Train\r\n # accuracy: 0.5368395540475036\r\n # Test\r\n # accuracy: 0.3536821705426357\r\n # Train\r\n # accuracy: 0.5458070770722249\r\n # Test\r\n # accuracy: 0.5494186046511628\r\n # Train\r\n # accuracy: 0.4301987396994668\r\n # Test\r\n # accuracy: 0.3536821705426357\r\n #\r\n # Анализ\r\n # False - Negative\r\n #\r\n # # сколько вообще людей имеют хорошую точку\r\n # df_all[(df_all.is_train == 1)].groupby('customer_id')['is_work'].agg('max').mean()\r\n #\r\n # df_pred = score(df_valid, ys=['is_home'], return_df=True)\r\n #\r\n # df_pred.sample(5)\r\n #\r\n # cid = 'bf66305d0ec05abb6e6a6358acb8c2a1'\r\n # cid = df_pred[df_pred.is_home == 0].sample(1)['customer_id'].values[0]\r\n # ​\r\n # df_an = df_all[df_all.customer_id == cid]\r\n # center_home = df_an[['home_lat', 'home_lon']].drop_duplicates().values\r\n # center_work = df_an[['work_lat', 'work_lon']].drop_duplicates().values\r\n # ​\r\n # ​\r\n # predicted_home = df_pred[df_pred.customer_id == cid][\r\n # ['is_home:add_lat', 'is_home:add_lon']].drop_duplicates().values\r\n # predicted_work = df_pred[df_pred.customer_id == cid][\r\n # ['is_work:add_lat', 'is_work:add_lon']].drop_duplicates().values\r\n # ​\r\n # points_pos = df_an[df_an.is_pos == 1][['add_lat', 'add_lon']].dropna().values\r\n # points_atm = df_an[df_an.is_pos == 0][['add_lat', 'add_lon']].dropna().values\r\n # print(center_home.shape, center_work.shape, points_pos.shape, points_atm.shape)\r\n # ​\r\n # # синие - покупки\r\n # # красные - банкоматы\r\n # gmap = gmaps.Map()\r\n # if len(points_pos) > 0:\r\n # gmap.add_layer(gmaps.symbol_layer(points_pos, hover_text='pos',\r\n # fill_color=\"blue\", stroke_color=\"blue\", scale=3))\r\n # if len(points_atm) > 0:\r\n # gmap.add_layer(gmaps.symbol_layer(points_atm, hover_text='atm',\r\n # fill_color=\"red\", stroke_color=\"red\", scale=3))\r\n # ​\r\n # if not np.isnan(center_home)[0][0]:\r\n # gmap.add_layer(gmaps.marker_layer(center_home, label='home'))\r\n # if not np.isnan(center_work)[0][0]:\r\n # gmap.add_layer(gmaps.marker_layer(center_work, label='work'))\r\n # ​\r\n # gmap.add_layer(gmaps.marker_layer(predicted_home, label='predicted_home'))\r\n # gmap.add_layer(gmaps.marker_layer(predicted_work, label='predicted_work'))\r\n #\r\n # gmap\r\n #\r\n # df_all.to_csv('../data/dfpredict1903.csv', index=None)\r\n # Predict\r\n #\r\n # del cust_test\r\n #\r\n # cust_test = df_all.loc[df_all['is_train'] == 0, 'customer_id'].unique()\r\n # # df_test = pd.DataFrame(cust_test, columns = ['customer_id']).merge(df_all, how = 'left')\r\n # df_test = predict_proba(pd.DataFrame(cust_test, columns=['customer_id']).merge(df_all, how='left'))\r\n # df_test.rename(columns={\r\n # 'customer_id': '_ID_',\r\n # 'is_home:add_lat': '_HOME_LAT_',\r\n # 'is_home:add_lon': '_HOME_LON_',\r\n # 'is_work:add_lat': '_WORK_LAT_',\r\n # 'is_work:add_lon': '_WORK_LON_'}, inplace=True)\r\n # df_test = df_test[['_ID_', '_WORK_LAT_', '_WORK_LON_', '_HOME_LAT_', '_HOME_LON_']]\r\n # ​\r\n # df_test.head()\r\n # _ID_\r\n # _WORK_LAT_\r\n # _WORK_LON_\r\n # _HOME_LAT_\r\n # _HOME_LON_\r\n # 0\r\n # 000216\r\n # 83\r\n # ccb416637fe9a4cd35e4606e\r\n # 55.026001\r\n # 82.915001\r\n # 55.041073\r\n # 82.980629\r\n # 1\r\n # 0002\r\n # d0f8a642272b41c292c12ab6e602\r\n # 44.033001\r\n # 42.835999\r\n # 44.036594\r\n # 42.855629\r\n # 2\r\n # 0004\r\n # d182d9fede3ba2534b2d5e5ad27e\r\n # 43.585999\r\n # 39.723999\r\n # 43.572186\r\n # 39.734737\r\n # 3\r\n # 000\r\n # 8\r\n # c2445518c9392cb356c5c3db3392\r\n # 51.530449\r\n # 46.033218\r\n # 51.533936\r\n # 46.025490\r\n # 4\r\n # 000\r\n # b373cc4969c0be8e0933c08da67e1\r\n # 56.319836\r\n # 43.925976\r\n # 56.247688\r\n # 43.463734\r\n # Формируем\r\n # submission - файл\r\n #\r\n # # Заполняем пропуски\r\n # df_ = pd.read_csv('../data/test_set.csv', dtype=dtypes, usecols=['customer_id'])\r\n # submission = pd.DataFrame(df_['customer_id'].unique(), columns=['_ID_'])\r\n # ​\r\n # submission = submission.merge(df_test, how='left').fillna(0)\r\n # # Пишем файл submission\r\n # submission.to_csv('../submissions/base_16_644_353.csv', index=None)\r\n #\r\n # submission_2 = pd.read_csv('../submissions/base_11_625_34.csv')\r\n #\r\n # submission.head()\r\n #\r\n # submission_2.head()\r\n #\r\n # submission_3 = submission_2.copy()\r\n # submission_3['_WORK_LAT_'] = (submission['_WORK_LAT_'] + submission_2['_WORK_LAT_']) / 2\r\n # submission_3['_WORK_LON_'] = (submission['_WORK_LON_'] + submission_2['_WORK_LON_']) / 2\r\n # submission_3['_HOME_LAT_'] = (submission['_HOME_LAT_'] + submission_2['_HOME_LAT_']) / 2\r\n # submission_3['_HOME_LON_'] = (submission['_HOME_LON_'] + submission_2['_HOME_LON_']) / 2\r\n # submission_3.to_csv('../submissions/base_15and12/2.csv', index=None)\r\n # submission_3 = submission_2.copy()\r\n # submission_3['_WORK_LAT_'] = (submission['_WORK_LAT_'] + submission_2['_WORK_LAT_']) / 2\r\n # submission_3['_WORK_LON_'] = (submission['_WORK_LON_'] + submission_2['_WORK_LON_']) / 2\r\n # submission_3['_HOME_LAT_'] = (submission['_HOME_LAT_'] + submission_2['_HOME_LAT_']) / 2\r\n # submission_3['_HOME_LON_'] = (submission['_HOME_LON_'] + submission_2['_HOME_LON_']) / 2\r\n # submission_3.to_csv('../submissions/base_15and12/2.csv', index=None)\r\n #\r\n return src\r\n\r\ndef update_last_partition(dst, from_dt, to_dt):\r\n prev_day = datetime.strptime(from_dt, '%Y-%m-%d') - timedelta(days=1)\r\n res = spark.table(dst[\"d_train\"]).checkpoint()\r\n res = res.where(res.day == to_dt)\r\n res = res.withColumn(\"period_to_dt\", f.lit(prev_day)).withColumn(\"day\", f.lit(prev_day.strftime('%Y-%m-%d')))\r\n res.coalesce(8).write.format(\"orc\").insertInto(dst[\"d_train\"], overwrite=True)\r\n\r\n\r\ndef calc_07(src, dst, from_dt, to_dt):\r\n res = algo(src, from_dt, to_dt)\r\n res.coalesce(8).write.format(\"orc\").insertInto(dst[\"d_subway_entrance\"], overwrite=True)\r\n\r\n\r\ndef sandbox_src():\r\n return {\r\n \"psg_train\": spark.table(\"sandbox_mck.train\"),\r\n \"psg_test\": spark.table(\"sandbox_mck.test\"),\r\n \"psg_dev\": spark.table(\"sandbox_mck.dev\")\r\n }\r\n\r\n\r\ndef sandbox_dst():\r\n return {\r\n \"psg_result\": \"sandbox_mck.psg_result\"\r\n }\r\n\r\n\r\ndef prod_src():\r\n return {\r\n \"psg_train\": spark.table(\"prod_data.psg_train\"),\r\n \"psg_test\": spark.table(\"prod_data.psg_test\"),\r\n \"psg_dev\": spark.table(\"prod_data.psg_dev\")\r\n }\r\n\r\n\r\ndef prod_dst():\r\n return {\r\n \"psg_result\": \"prod_data.psg_result\"\r\n }\r\n\r\n\r\nif __name__ == '__main__':\r\n spark = SparkSession.builder.appName(\"calc_07_task\").enableHiveSupport().getOrCreate()\r\n spark.conf.set(\"spark.sql.sources.partitionOverwriteMode\", \"dynamic\")\r\n hivecontext = HiveContext(spark.sparkContext)\r\n hivecontext.setConf(\"hive.exec.dynamic.partition\", \"true\")\r\n hivecontext.setConf(\"hive.exec.dynamic.partition.mode\", \"nonstrict\")\r\n spark.sparkContext.setCheckpointDir(\"hdfs:///user/airflow/psg/calc_07_task\")\r\n\r\n opts = {\r\n 'from_dt': sys.argv[1],\r\n \"to_dt\": \"9999-12-31\"\r\n }\r\n\r\n update_last_partition(prod_dst(), opts[\"from_dt\"], opts[\"to_dt\"])\r\n calc_07(prod_src(), prod_dst(), opts[\"from_dt\"], opts[\"to_dt\"])\r\n\r\n", "id": "5556850", "language": "Python", "matching_score": 5.67368745803833, "max_stars_count": 0, "path": "Raif/pyspark/calc_07.py" }, { "content": "import sys\r\nfrom datetime import timedelta, datetime\r\n\r\n\r\nfrom pyspark import HiveContext\r\nfrom pyspark.sql import functions as f, SparkSession\r\n\r\n\r\ndef algo(src, from_dt, to_dt):\r\n res = steps(src, from_dt, to_dt)\r\n return res\r\n\r\n\r\ndef steps(src, from_dt, to_dt):\r\n # Общий\r\n # подход:\r\n #\r\n # Добавляем\r\n # к\r\n # каждой\r\n # транзакции\r\n # столбец: is_work(если\r\n # транзакция\r\n # находится\r\n # в\r\n # пределах\r\n # 0.02\r\n # от\r\n # дома\r\n # клиента)\r\n # Добавляем\r\n # к\r\n # каждой\r\n # транзакции\r\n # столбец: is_home(если\r\n # транзакция\r\n # находится\r\n # в\r\n # пределах\r\n # 0.02\r\n # от\r\n # работы\r\n # клиента)\r\n # Обучаем\r\n # классификатор\r\n # предсказывающий\r\n # вероятность(is_home == 1)\r\n # для\r\n # транзакции\r\n # Обучаем\r\n # классификатор\r\n # предсказывающий\r\n # вероятность(is_work == 1)\r\n # для\r\n # транзакции\r\n # Точность\r\n # определения\r\n # местоположения:\r\n #\r\n # для\r\n # классификатора\r\n # is_home: ~3\r\n # x %\r\n # для\r\n # классификатора\r\n # is_work: ~2\r\n # x %\r\n # общая\r\n # оценка\r\n # на\r\n # Public\r\n # Leaderboard: ???\r\n # Примечание\r\n #\r\n # Требуется\r\n # Python\r\n # версии\r\n # 3.5\r\n # Требуется\r\n # библиотека\r\n # xgboost(для\r\n # обучения\r\n # использовалась\r\n # xgboost\r\n # версии\r\n # 0.7.post3)\r\n # Требуются\r\n # файлы: test_set.csv, train_set.csv\r\n # в\r\n # одном\r\n # каталоге\r\n # с\r\n # данным\r\n # скриптом\r\n # Требования\r\n # к\r\n # памяти: должно\r\n # работать\r\n # с\r\n # 2\r\n # Гб\r\n # свободного\r\n # RAM\r\n # Время\r\n # работы: ~3\r\n # минуты(тестировалось\r\n # на\r\n # процессоре\r\n # Intel\r\n # Core\r\n # i7 - 4770)\r\n #\r\n # % load_ext\r\n # autoreload\r\n # % autoreload\r\n # 2\r\n # ​\r\n # import sys\r\n # MODULES_PATH = '../code/'\r\n # if MODULES_PATH not in sys.path:\r\n # sys.path.append(MODULES_PATH)\r\n # import mfuncs\r\n #\r\n # import pandas as pd\r\n # import numpy as np\r\n # from tqdm import tqdm\r\n # tqdm.pandas()\r\n # pd.options.display.max_columns = 1000\r\n # ​\r\n # import lightgbm as lgb\r\n # ​\r\n # ​\r\n # from sklearn.neighbors import NearestNeighbors\r\n # % pylab\r\n # inline\r\n # Populating\r\n # the\r\n # interactive\r\n # namespace\r\n # from numpy and matplotlib\r\n #\r\n # # Определим типы колонок для экономии памяти\r\n # dtypes = {\r\n # 'transaction_date': str,\r\n # 'atm_address': str,\r\n # 'country': str,\r\n # 'city': str,\r\n # 'amount': np.float32,\r\n # 'currency': np.float32,\r\n # 'mcc': str,\r\n # 'customer_id': str,\r\n # 'pos_address': str,\r\n # 'atm_address': str,\r\n # 'pos_adress_lat': np.float32,\r\n # 'pos_adress_lon': np.float32,\r\n # 'pos_address_lat': np.float32,\r\n # 'pos_address_lon': np.float32,\r\n # 'atm_address_lat': np.float32,\r\n # 'atm_address_lon': np.float32,\r\n # 'home_add_lat': np.float32,\r\n # 'home_add_lon': np.float32,\r\n # 'work_add_lat': np.float32,\r\n # 'work_add_lon': np.float32,\r\n # }\r\n # ​\r\n # # для экономии памяти будем загружать только часть атрибутов транзакций\r\n # usecols_train = ['customer_id', 'transaction_date', 'amount', 'country', 'city', 'currency', 'mcc',\r\n # 'pos_adress_lat', 'pos_adress_lon', 'atm_address_lat', 'atm_address_lon', 'home_add_lat',\r\n # 'home_add_lon', 'work_add_lat', 'work_add_lon']\r\n # usecols_test = ['customer_id', 'transaction_date', 'amount', 'country', 'city', 'currency', 'mcc',\r\n # 'pos_address_lat', 'pos_address_lon', 'atm_address_lat', 'atm_address_lon']\r\n # Читаем\r\n # train_set, test_set, соединяем\r\n # в\r\n # один\r\n # датасет\r\n #\r\n # dtypes = {\r\n # 'transaction_date': str,\r\n # 'atm_address': str,\r\n # 'country': str,\r\n # 'city': str,\r\n # 'amount': np.float32,\r\n # 'currency': np.float32,\r\n # 'mcc': str,\r\n # 'customer_id': str,\r\n # 'pos_address': str,\r\n # 'atm_address': str,\r\n # 'pos_adress_lat': np.float32,\r\n # 'pos_adress_lon': np.float32,\r\n # 'pos_address_lat': np.float32,\r\n # 'pos_address_lon': np.float32,\r\n # 'atm_address_lat': np.float32,\r\n # 'atm_address_lon': np.float32,\r\n # 'home_add_lat': np.float32,\r\n # 'home_add_lon': np.float32,\r\n # 'work_add_lat': np.float32,\r\n # 'work_add_lon': np.float32,\r\n # }\r\n # ​\r\n # rnm = {\r\n # 'atm_address_lat': 'atm_lat',\r\n # 'atm_address_lon': 'atm_lon',\r\n # 'pos_adress_lat': 'pos_lat',\r\n # 'pos_adress_lon': 'pos_lon',\r\n # 'pos_address_lat': 'pos_lat',\r\n # 'pos_address_lon': 'pos_lon',\r\n # 'home_add_lat': 'home_lat',\r\n # 'home_add_lon': 'home_lon',\r\n # 'work_add_lat': 'work_lat',\r\n # 'work_add_lon': 'work_lon',\r\n # }\r\n #\r\n # df_train = pd.read_csv('../data/train_set.csv', dtype=dtypes)\r\n # df_test = pd.read_csv('../data/test_set.csv', dtype=dtypes)\r\n # ​\r\n # df_train.rename(columns=rnm, inplace=True)\r\n # df_test.rename(columns=rnm, inplace=True)\r\n #\r\n # # удалим чувак с множественными адресами\r\n # print(df_train.shape)\r\n # gb = df_train.groupby('customer_id')['work_lat'].agg('nunique')\r\n # cid_incorrect = gb[gb == 2].index\r\n # df_train = df_train[~df_train.customer_id.isin(cid_incorrect.values)]\r\n # print(df_train.shape)\r\n # gb = df_train.groupby('customer_id')['home_lat'].agg('nunique')\r\n # cid_incorrect = gb[gb == 2].index\r\n # df_train = df_train[~df_train.customer_id.isin(cid_incorrect.values)]\r\n # print(df_train.shape)\r\n # (1224734, 18)\r\n # (1207958, 18)\r\n # (1142653, 18)\r\n #\r\n # # соединяем test/train в одном DataFrame\r\n # df_train['is_train'] = np.int32(1)\r\n # df_test['is_train'] = np.int32(0)\r\n # df_all = pd.concat([df_train, df_test])\r\n # ​\r\n # del df_train, df_test\r\n # Обрабатываем\r\n # дату\r\n # транзакции\r\n # и\r\n # категориальные\r\n # признаки\r\n #\r\n # df_all['currency'] = df_all['currency'].fillna(-1).astype(np.int32)\r\n # df_all['mcc'] = df_all['mcc'].apply(lambda x: int(x.replace(',', ''))).astype(np.int32)\r\n # df_all['city'] = df_all['city'].factorize()[0].astype(np.int32)\r\n # df_all['country'] = df_all['country'].factorize()[0].astype(np.int32)\r\n # ​\r\n # # удаляем транзакции без даты\r\n # df_all = df_all[~df_all['transaction_date'].isnull()]\r\n # df_all['transaction_date'] = pd.to_datetime(df_all['transaction_date'], format='%Y-%m-%d')\r\n # Фичи\r\n # для\r\n # даты\r\n #\r\n # df_all['month'] = df_all.transaction_date.dt.month\r\n # df_all['day'] = df_all.transaction_date.dt.day\r\n # df_all['dayofyear'] = df_all.transaction_date.dt.dayofyear\r\n # df_all['dayofweek'] = df_all.transaction_date.dt.dayofweek\r\n # Приводим\r\n # адрес\r\n # транзакции\r\n # для\r\n # pos\r\n # и\r\n # atm - транзакций\r\n # к\r\n # единообразному\r\n # виду\r\n # Просто\r\n # объединяем\r\n # в\r\n # одну\r\n # колонку\r\n # и\r\n # добавляем\r\n # фичу - это\r\n # атм\r\n # или\r\n # пос\r\n #\r\n # dfs = []\r\n # for cid in tqdm(df_all.customer_id.unique()):\r\n # df_an = df_all[df_all.customer_id == cid]\r\n # df_an = mfuncs.add_dist_to_neighbours(df_an)\r\n # dfs.append(df_an)\r\n # 100 % |██████████ | 19642 / 19642[2:17:07 < 00: 00, 6.49\r\n # it / s]\r\n #\r\n # df_knn = pd.concat(dfs)\r\n # df_knn.head()\r\n #\r\n # df_knn['pos2pos_1', 'pos2pos_2', 'atm2pos_1', 'atm2pos_2', 'pos2atm_1',\r\n # 'pos2atm_2', 'pos2atm_1', 'pos2atm_2']\r\n # Index(['amount', 'atm_address', 'atm_lat', 'atm_lon', 'city', 'country',\r\n # 'currency', 'customer_id', 'home_lat', 'home_lon', 'is_train', 'mcc',\r\n # 'pos_address', 'pos_lat', 'pos_lon', 'terminal_id', 'transaction_date',\r\n # 'work_lat', 'work_lon', 'month', 'day', 'dayofyear', 'dayofweek',\r\n # 'pos2pos_1', 'pos2pos_2', 'atm2pos_1', 'atm2pos_2', 'pos2atm_1',\r\n # 'pos2atm_2', 'pos2atm_1', 'pos2atm_2'],\r\n # dtype='object')\r\n #\r\n # df_knn.to_csv('../data/df_knn.csv', index=None)\r\n #\r\n # df_all = df_knn.copy()\r\n #\r\n # df_all['is_atm'] = (~df_all['atm_lat'].isnull()).astype(np.int8)\r\n # df_all['is_pos'] = (~df_all['pos_lat'].isnull()).astype(np.int8)\r\n # ​\r\n # df_all['add_lat'] = df_all['atm_lat'].fillna(0) + df_all['pos_lat'].fillna(0)\r\n # df_all['add_lon'] = df_all['atm_lon'].fillna(0) + df_all['pos_lon'].fillna(0)\r\n # ​\r\n # df_all.drop(['atm_lat', 'atm_lon', 'pos_lat', 'pos_lon'], axis=1, inplace=True)\r\n # ​\r\n # df_all = df_all[~((df_all['add_lon'] == 0) & (df_all['add_lon'] == 0))]\r\n # Генерируем\r\n # признаки\r\n # is_home, is_work\r\n # TODO: удалить\r\n # чуваков\r\n # у\r\n # которых\r\n # несколько\r\n # домов\r\n #\r\n # lat = df_all['home_lat'] - df_all['add_lat']\r\n # lon = df_all['home_lon'] - df_all['add_lon']\r\n # ​\r\n # df_all['is_home'] = (np.sqrt((lat ** 2) + (lon ** 2)) <= 0.02).astype(np.int8)\r\n # df_all['has_home'] = (~df_all['home_lon'].isnull()).astype(np.int8)\r\n # ​\r\n # lat = df_all['work_lat'] - df_all['add_lat']\r\n # lon = df_all['work_lon'] - df_all['add_lon']\r\n # df_all['is_work'] = (np.sqrt((lat ** 2) + (lon ** 2)) <= 0.02).astype(np.int8)\r\n # df_all['has_work'] = (~df_all['work_lon'].isnull()).astype(np.int8)\r\n # ​\r\n # df_all.drop(['work_lat', 'work_lon', 'home_lat', 'home_lon'], axis=1, inplace=True)\r\n # Генерируем\r\n # категориальный\r\n # признак\r\n # для\r\n # адреса\r\n #\r\n # df_all['address'] = df_all['add_lat'].apply(lambda x: \"%.02f\" % x) + ';' + df_all['add_lon'].apply(\r\n # lambda x: \"%.02f\" % x)\r\n # df_all['address'] = df_all['address'].factorize()[0].astype(np.int32)\r\n # Генерируем\r\n # несколько\r\n # абонентских\r\n # фич\r\n #\r\n # # количество транзакций каждого клиента\r\n # df_all = df_all.merge(df_all.groupby('customer_id')['amount'].count().reset_index(name='cid_trans_count'),\r\n # how='left')\r\n # df_all['cid_trans_count'] = df_all['cid_trans_count'].astype(np.int32)\r\n # ​\r\n # df_all = df_all.merge(\r\n # df_all.groupby(['customer_id', 'address'])['amount'].count().reset_index(name='cid_add_trans_count'),\r\n # how='left')\r\n # df_all['cid_add_trans_count'] = df_all['cid_add_trans_count'].astype(np.int32)\r\n # ​\r\n # # какая часть транзакций клиента приходится на данный адрес\r\n # # TODO: БОЛЬШЕ ТАКИХ ФИЧ\r\n # df_all['ratio1'] = df_all['cid_add_trans_count'] / df_all['cid_trans_count']\r\n # Мои\r\n # фичи\r\n #\r\n # df_gb[['amount', 'add_lat', 'add_lon']].agg(['mean', 'max', 'min'])\r\n # ---------------------------------------------------------------------------\r\n # ValueError\r\n # Traceback(most\r\n # recent\r\n # call\r\n # last)\r\n # < ipython - input - 53 - 479\r\n # f5089f31f > in < module > ()\r\n # ----> 1\r\n # df_gb[['amount', 'add_lat', 'add_lon']].agg(['mean'])\r\n #\r\n # / usr / local / lib / python3\r\n # .5 / dist - packages / pandas / core / groupby.py in aggregate(self, arg, *args, **kwargs)\r\n # 4034\r\n # versionadded = ''))\r\n # 4035\r\n #\r\n # def aggregate(self, arg, *args, **kwargs):\r\n #\r\n #-> 4036\r\n #return super(DataFrameGroupBy, self).aggregate(arg, *args, **kwargs)\r\n #4037\r\n #4038\r\n #agg = aggregate\r\n #\r\n #/ usr / local / lib / python3\r\n #.5 / dist - packages / pandas / core / groupby.py in aggregate(self, arg, *args, **kwargs)\r\n #3466\r\n #3467\r\n #_level = kwargs.pop('_level', None)\r\n #-> 3468\r\n #result, how = self._aggregate(arg, _level=_level, *args, **kwargs)\r\n #3469\r\n #if how is None:\r\n # 3470\r\n # return result\r\n #\r\n #/ usr / local / lib / python3\r\n #.5 / dist - packages / pandas / core / base.py in _aggregate(self, arg, *args, **kwargs)\r\n #632\r\n #return self._aggregate_multiple_funcs(arg,\r\n # 633\r\n #_level = _level,\r\n # --> 634\r\n #_axis = _axis), None\r\n #635 else:\r\n #636\r\n #result = None\r\n #\r\n #/ usr / local / lib / python3\r\n #.5 / dist - packages / pandas / core / base.py in _aggregate_multiple_funcs(self, arg, _level, _axis)\r\n #652\r\n #obj = self._selected_obj\r\n #653 else:\r\n #--> 654\r\n #obj = self._obj_with_exclusions\r\n #655\r\n #656\r\n #results = []\r\n #\r\n #pandas / _libs / src / properties.pyx in pandas._libs.lib.cache_readonly.__get__(pandas / _libs / lib.c: 44594)()\r\n #\r\n #/ usr / local / lib / python3\r\n #.5 / dist - packages / pandas / core / base.py in _obj_with_exclusions(self)\r\n #326 if self._selection is not None and isinstance(self.obj,\r\n # 327\r\n #ABCDataFrame):\r\n #--> 328\r\n #return self.obj.reindex(columns=self._selection_list)\r\n #329\r\n #330\r\n #if len(self.exclusions) > 0:\r\n #\r\n #/ usr / local / lib / python3\r\n #.5 / dist - packages / pandas / core / frame.py in reindex(self, index, columns, **kwargs)\r\n #2731\r\n #\r\n #\r\n #def reindex(self, index=None, columns=None, **kwargs):\r\n # 2732\r\n # return super(DataFrame, self).reindex(index=index, columns=columns,\r\n # -> 2733 ** kwargs)\r\n # 2734\r\n # 2735 @ Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)\r\n #\r\n # / usr / local / lib / python3\r\n # .5 / dist - packages / pandas / core / generic.py in reindex(self, *args, **kwargs)\r\n # 2513 # perform the reindex on the axes\r\n # 2514\r\n # return self._reindex_axes(axes, level, limit, tolerance, method,\r\n # -> 2515\r\n # fill_value, copy).__finalize__(self)\r\n # 2516\r\n # 2517\r\n #\r\n # def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,\r\n #\r\n # / usr / local / lib / python3.5 / dist-packages / pandas / core / frame.py in _reindex_axes(self,\r\n # axes, level, limit, tolerance, method, fill_value, copy)\r\n # 2672\r\n #\r\n # if columns is not None:\r\n # 2673\r\n # frame = frame._reindex_columns(columns, method, copy, level,\r\n # -> 2674\r\n # fill_value, limit, tolerance)\r\n # 2675\r\n # 2676\r\n # index = axes['index']\r\n #\r\n # / usr / local / lib / python3\r\n # .5 / dist - packages / pandas / core / frame.py in _reindex_columns(self, new_columns, method, copy, level,\r\n # fill_value, limit, tolerance)\r\n # 2697\r\n # return self._reindex_with_indexers({1: [new_columns, indexer]},\r\n # 2698\r\n # copy = copy, fill_value = fill_value,\r\n # -> 2699\r\n # allow_dups = False)\r\n # 2700\r\n # 2701\r\n #\r\n # def _reindex_multi(self, axes, copy, fill_value):\r\n #\r\n # / usr / local / lib / python3\r\n # .5 / dist - packages / pandas / core / generic.py in _reindex_with_indexers(self, reindexers, fill_value, copy,\r\n # allow_dups)\r\n # 2625\r\n # fill_value = fill_value,\r\n # 2626\r\n # allow_dups = allow_dups,\r\n # -> 2627\r\n # copy = copy)\r\n # 2628\r\n # 2629\r\n # if copy and new_data is self._data:\r\n # /\r\n # usr / local / lib / python3\r\n # .5 / dist - packages / pandas / core / internals.py in reindex_indexer(self, new_axis, indexer, axis, fill_value,\r\n # allow_dups, copy)\r\n # 3884 # some axes don't allow reindexing with dups\r\n # 3885\r\n # if not allow_dups:\r\n # ->\r\n # 3886\r\n # self.axes[axis]._can_reindex(indexer)\r\n # 3887\r\n # 3888\r\n # if axis >= self.ndim:\r\n # /\r\n # usr / local / lib / python3\r\n # .5 / dist - packages / pandas / core / indexes / base.py in _can_reindex(self, indexer)\r\n # 2834 # trying to reindex on an axis with duplicates\r\n # 2835\r\n # if not self.is_unique and len(indexer):\r\n # ->\r\n # 2836\r\n # raise ValueError(\"cannot reindex from a duplicate axis\")\r\n # 2837\r\n # 2838\r\n #\r\n # def reindex(self, target, method=None, level=None, limit=None,\r\n #\r\n # ValueError: cannot reindex\r\n #\r\n # from a duplicate\r\n # axis\r\n #\r\n # df_all[['customer_id', 'amount', 'add_lat', 'add_lon']]\r\n # customer_id\r\n # amount\r\n # add_lat\r\n # add_lon\r\n # 0\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 2.884034\r\n # 59.844074\r\n # 30.179153\r\n # 1\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 2.775633\r\n # 59.844074\r\n # 30.179153\r\n # 2\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.708368\r\n # 59.858200\r\n # 30.229023\r\n # 3\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 2.787498\r\n # 59.844074\r\n # 30.179153\r\n # 4\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 2.892510\r\n # 59.844074\r\n # 30.179153\r\n # 5\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 2.909018\r\n # 59.844074\r\n # 30.179153\r\n # 6\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 2.801228\r\n # 59.844074\r\n # 30.179153\r\n # 7\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 2.838200\r\n # 59.844074\r\n # 30.179153\r\n # 8\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.264740\r\n # 59.844074\r\n # 30.179153\r\n # 9\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.118792\r\n # 59.844074\r\n # 30.179153\r\n # 10\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.109393\r\n # 59.844074\r\n # 30.179153\r\n # 11\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.694397\r\n # 59.860001\r\n # 30.250999\r\n # 12\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 4.133102\r\n # 59.860001\r\n # 30.247000\r\n # 13\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.057675\r\n # 59.859001\r\n # 30.245001\r\n # 14\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.620160\r\n # 59.860001\r\n # 30.245001\r\n # 15\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.867951\r\n # 59.860001\r\n # 30.250000\r\n # 16\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 4.416207\r\n # 59.860001\r\n # 30.247999\r\n # 17\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.948432\r\n # 59.855999\r\n # 30.250000\r\n # 18\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.938953\r\n # 59.855000\r\n # 30.249001\r\n # 19\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.662942\r\n # 59.861000\r\n # 30.250999\r\n # 20\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 4.621914\r\n # 59.860001\r\n # 30.245001\r\n # 21\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 2.988430\r\n # 59.858002\r\n # 30.246000\r\n # 22\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 4.260764\r\n # 59.848999\r\n # 30.268999\r\n # 23\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 4.081697\r\n # 59.849998\r\n # 30.225000\r\n # 24\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.914050\r\n # 59.862000\r\n # 30.232000\r\n # 25\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.382170\r\n # 59.862000\r\n # 30.229000\r\n # 26\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.408833\r\n # 59.862000\r\n # 30.228001\r\n # 27\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.312841\r\n # 59.858002\r\n # 30.233000\r\n # 28\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 3.832169\r\n # 59.858002\r\n # 30.230000\r\n # 29\r\n # 0\r\n # dc0137d280a2a82d2dc89282450ff1b\r\n # 4.318061\r\n # 59.856998\r\n # 30.226999\r\n # ...............\r\n # 2215352\r\n # 7198\r\n # d32d7c354fc93587bc49641c6eef\r\n # 4.009341\r\n # 57.113998\r\n # 65.568001\r\n # 2215353\r\n # 6\r\n # dcb0b69a981bcda27289aa05a801519\r\n # 4.189260\r\n # 55.751999\r\n # 37.606998\r\n # 2215354\r\n # 6\r\n # dcb0b69a981bcda27289aa05a801519\r\n # 3.541705\r\n # 55.757999\r\n # 37.610001\r\n # 2215355\r\n # 6\r\n # dcb0b69a981bcda27289aa05a801519\r\n # 4.565164\r\n # 55.751999\r\n # 37.605000\r\n # 2215356\r\n # 6\r\n # dcb0b69a981bcda27289aa05a801519\r\n # 3.688071\r\n # 55.757999\r\n # 37.611000\r\n # 2215357\r\n # 6\r\n # dcb0b69a981bcda27289aa05a801519\r\n # 3.309568\r\n # 55.752998\r\n # 37.611000\r\n # 2215358\r\n # 6\r\n # dcb0b69a981bcda27289aa05a801519\r\n # 4.483331\r\n # 55.757999\r\n # 37.606998\r\n # 2215359\r\n # 6\r\n # dcb0b69a981bcda27289aa05a801519\r\n # 4.463511\r\n # 55.752998\r\n # 37.608002\r\n # 2215360\r\n # 6\r\n # dcb0b69a981bcda27289aa05a801519\r\n # 3.489367\r\n # 55.755001\r\n # 37.612999\r\n # 2215361\r\n # 6\r\n # dcb0b69a981bcda27289aa05a801519\r\n # 4.059295\r\n # 55.751999\r\n # 37.608002\r\n # 2215362\r\n # 6\r\n # dcb0b69a981bcda27289aa05a801519\r\n # 4.285100\r\n # 55.751999\r\n # 37.605999\r\n # 2215363\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 4.155188\r\n # 54.763000\r\n # 83.102997\r\n # 2215364\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 3.989709\r\n # 54.769001\r\n # 83.098999\r\n # 2215365\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 3.988788\r\n # 54.762001\r\n # 83.099998\r\n # 2215366\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 3.692765\r\n # 54.762001\r\n # 83.103996\r\n # 2215367\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 3.994782\r\n # 54.763000\r\n # 83.103996\r\n # 2215368\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 4.307022\r\n # 54.765999\r\n # 83.103996\r\n # 2215369\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 4.008067\r\n # 54.763000\r\n # 83.103996\r\n # 2215370\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 4.017159\r\n # 54.764999\r\n # 83.105003\r\n # 2215371\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 3.996451\r\n # 54.764000\r\n # 83.100998\r\n # 2215372\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 4.463464\r\n # 54.755001\r\n # 83.119003\r\n # 2215373\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 4.012532\r\n # 54.755001\r\n # 83.116997\r\n # 2215374\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 4.491719\r\n # 54.758999\r\n # 83.120003\r\n # 2215375\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 4.295208\r\n # 54.756001\r\n # 83.112999\r\n # 2215376\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 4.480490\r\n # 54.755001\r\n # 83.119003\r\n # 2215377\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 3.306456\r\n # 54.759998\r\n # 83.114998\r\n # 2215378\r\n # 6e30\r\n # f78725b6618176206d875f6a1aae\r\n # 4.487947\r\n # 54.757000\r\n # 83.119003\r\n # 2215379\r\n # 6\r\n # f238e23623353aa774eacfae00b55af\r\n # 2.484099\r\n # 55.743999\r\n # 37.540001\r\n # 2215380\r\n # 6\r\n # f238e23623353aa774eacfae00b55af\r\n # 2.980216\r\n # 55.743999\r\n # 37.539001\r\n # 2215381\r\n # 6\r\n # f238e23623353aa774eacfae00b55af\r\n # 4.588100\r\n # 55.745998\r\n # 37.535999\r\n # 2215382\r\n # rows × 4\r\n # columns\r\n #\r\n # df_gb['amount', 'add_lat', 'add_lon'].agg(['mean', 'max', 'min'])\r\n #\r\n # df_all.reset_index(inplace=True, drop=True)\r\n #\r\n # df_all[['customer_id', 'amount', 'add_lat', 'add_lon']].groupby('customer_id').agg('max')\r\n # amount\r\n # add_lat\r\n # add_lon\r\n # customer_id\r\n # 0001\r\n # f322716470bf9bfc1708f06f00fc\r\n # 4.614833\r\n # 56.251347\r\n # 43.446255\r\n # 000216\r\n # 83\r\n # ccb416637fe9a4cd35e4606e\r\n # 5.178286\r\n # 55.449772\r\n # 84.168541\r\n # 0002\r\n # d0f8a642272b41c292c12ab6e602\r\n # 3.617783\r\n # 59.829090\r\n # 50.173374\r\n # 0004\r\n # d182d9fede3ba2534b2d5e5ad27e\r\n # 4.209749\r\n # 43.597000\r\n # 39.975124\r\n # 00072\r\n # 97\r\n # d86e14bd68bd87b1dbdefe302\r\n # 4.696914\r\n # 59.949146\r\n # 38.980770\r\n # 000\r\n # 8\r\n # c2445518c9392cb356c5c3db3392\r\n # 4.471992\r\n # 53.198917\r\n # 46.748531\r\n # 000\r\n # b373cc4969c0be8e0933c08da67e1\r\n # 3.492366\r\n # 59.934464\r\n # 49.919624\r\n # 000\r\n # b709c6c6fb1e8efcfd95e57c2a9de\r\n # 4.319009\r\n # 56.512737\r\n # 86.172333\r\n # 000\r\n # c589e94c95984721de4b2bfb9ee4e\r\n # 3.154039\r\n # 55.725166\r\n # 37.425613\r\n # 001611e3\r\n # ac051a0ec91c88bbd9dbeb5a\r\n # 4.057639\r\n # 57.011002\r\n # 41.026821\r\n # 0016\r\n # 91\r\n # ae3885e80add35148a01e75206\r\n # 5.016934\r\n # 60.017960\r\n # 37.411518\r\n # 00204415\r\n # 9304738\r\n # ea7e3598131809851\r\n # 4.232937\r\n # 53.574001\r\n # 50.217651\r\n # 002631\r\n # 9\r\n # faa345a573522f0a04f5c55bb\r\n # 3.710053\r\n # 54.653000\r\n # 39.831928\r\n # 0027\r\n # a7618d97cc9fbda55fac457eaeb7\r\n # 3.375377\r\n # 59.008991\r\n # 51.009949\r\n # 002\r\n # 9\r\n # d9be3692701efe66741fa74a8f8b\r\n # 5.019326\r\n # 55.622921\r\n # 37.605988\r\n # 002\r\n # b9f6e118c54f1292e03d1a04d516e\r\n # 4.478792\r\n # 59.623196\r\n # 56.721725\r\n # 002\r\n # c40ec938e91de248400dec824bd49\r\n # 5.163564\r\n # 59.949371\r\n # 43.259167\r\n # 00317\r\n # c648bc11161417b342ad480e724\r\n # 3.835234\r\n # 59.860893\r\n # 30.580381\r\n # 0031\r\n # 915\r\n # eb230f772681fb5dc5a8d1c31\r\n # 5.007967\r\n # 60.011047\r\n # 38.189163\r\n # 003360\r\n # bff9882ca4a4f93394dd984822\r\n # 4.277591\r\n # 59.213764\r\n # 38.015736\r\n # 0037\r\n # f3de3d890df1022cc760a1dfd9d6\r\n # 4.543791\r\n # 59.842239\r\n # 37.974369\r\n # 003\r\n # 8\r\n # ea686d27899b0942409157d04ff2\r\n # 2.857959\r\n # 61.269089\r\n # 73.447105\r\n # 003\r\n # bc1334379d480c7e5f28240dc40d9\r\n # 3.717804\r\n # 55.995220\r\n # 60.108738\r\n # 003\r\n # fa58414cc55531fcc38423bea8f8e\r\n # 4.475021\r\n # 57.145241\r\n # 131.108932\r\n # 00450\r\n # ac1c22c9ee6dda590ff5366236c\r\n # 3.986032\r\n # 53.607090\r\n # 55.935070\r\n # 0046\r\n # c2952fb808aa11f74abce5abe097\r\n # 4.492328\r\n # 55.025314\r\n # 22.012411\r\n # 0050\r\n # 9465377\r\n # a24375b276c5da9a67fa5\r\n # 3.781627\r\n # 60.059582\r\n # 30.678854\r\n # 0051\r\n # 94\r\n # bf7238734eb49c142258c5a263\r\n # 4.481363\r\n # 55.788353\r\n # 136.648224\r\n # 0055\r\n # a1c72c44451165c872cd992c1d90\r\n # 3.299616\r\n # 55.894718\r\n # 38.442005\r\n # 005\r\n # b206d0ffec59e249e6f7adc1b2e83\r\n # 4.782627\r\n # 59.929787\r\n # 42.001141\r\n # ............\r\n # ff463f782f65fbde0e4f7ea4e327d99e\r\n # 4.160493\r\n # 58.557789\r\n # 82.653000\r\n # ff4bcb4ea454c4a38b97dc20cda7932c\r\n # 4.319700\r\n # 60.090919\r\n # 73.352402\r\n # ff4e78a42acf6bbb27d1678dc0f0e5a1\r\n # 4.806856\r\n # 61.790131\r\n # 124.923218\r\n # ff51d9888921dc6e5f74862dc0e0f250\r\n # 4.398249\r\n # 55.104000\r\n # 33.257999\r\n # ff578c093f6baff43818f61a52a6d03d\r\n # 3.462374\r\n # 56.004887\r\n # 92.939461\r\n # ff652e6110b6e6ce92b46c6c9a3fa28b\r\n # 3.937262\r\n # 52.442001\r\n # 78.918999\r\n # ff6db3bf1eb82426c894b05ae64d68a1\r\n # 3.307378\r\n # 55.606636\r\n # 37.795010\r\n # ff6e96c6e3e5f6bfea831834c49eca2a\r\n # 5.141136\r\n # 64.955009\r\n # 43.478653\r\n # ff6f12be73bfa5b3740bec1a420eca6b\r\n # 4.828726\r\n # 55.964073\r\n # 37.646362\r\n # ff70bf60dcdf63f99bf1af5af5d6c145\r\n # 4.314423\r\n # 60.199024\r\n # 30.505405\r\n # ff70c3c5f2dca00f255705305d75111d\r\n # 4.464280\r\n # 59.998505\r\n # 30.731922\r\n # ff71bdbcba59047f1fad88dcb7052151\r\n # 4.462170\r\n # 55.751099\r\n # 37.757999\r\n # ff78fca768ff08120c0c68bd26c719de\r\n # 4.676995\r\n # 56.231544\r\n # 39.579533\r\n # ff7a887d347a8d598dc8e559d3aaec2f\r\n # 3.311437\r\n # 56.521294\r\n # 44.079964\r\n # ff7e1c6c07469b28a07847540385e767\r\n # 4.119733\r\n # 59.938416\r\n # 44.989582\r\n # ff869ee855dc3f9b382c943eb43cc4ec\r\n # 4.581357\r\n # 59.918087\r\n # 87.601219\r\n # ff92d5420f5fb92a37e1280d1fc9e5f4\r\n # 4.687912\r\n # 57.952000\r\n # 56.119999\r\n # ff9becfaf9e022b46fe69d10c8060776\r\n # 4.705500\r\n # 55.988026\r\n # 85.233429\r\n # ffa959b073a8bde17f3b8b4b25409b69\r\n # 5.173771\r\n # 55.911999\r\n # 142.347260\r\n # ffad3c72297eb6d9a4b3672cd731396c\r\n # 5.172408\r\n # 59.923897\r\n # 132.363922\r\n # ffaeae55d4dbf29058f04e7a6a764f02\r\n # 3.534924\r\n # 55.725540\r\n # 37.858467\r\n # ffb35ffc8a90ba9dfff70be24513010a\r\n # 4.346421\r\n # 55.772861\r\n # 78.436401\r\n # ffb8fcf3f9d17ac3197b9e27cb757539\r\n # 4.695918\r\n # 60.061367\r\n # 39.586372\r\n # ffba001147dc6140d84070b7bc9479df\r\n # 3.585759\r\n # 51.610809\r\n # 46.038181\r\n # ffc5289194413ec68c3f7adc8121d69b\r\n # 4.152719\r\n # 55.911011\r\n # 37.701527\r\n # ffd097949a4a238296a7deadfb376cc0\r\n # 3.445825\r\n # 55.989838\r\n # 37.872677\r\n # ffd6622f135e264da543a541756e63a9\r\n # 5.013739\r\n # 56.351116\r\n # 41.312519\r\n # ffdd5ec2a90e355cf40525eac1a6fd34\r\n # 4.404512\r\n # 57.481705\r\n # 42.147800\r\n # ffe6875cf2566b5f273fc49f3c064031\r\n # 3.898256\r\n # 69.012062\r\n # 60.614304\r\n # ffebf4ea02c72183128d966721976ec9\r\n # 5.096354\r\n # 56.328468\r\n # 44.083584\r\n # 19642\r\n # rows × 3\r\n # columns\r\n #\r\n # # добавим признаки после групбая\r\n # df_gb = df_all[['customer_id', 'amount', 'add_lat', 'add_lon']].groupby('customer_id')\r\n # coord_stat_df = df_gb.agg(['mean', 'max', 'min'])\r\n # coord_stat_df['transactions_per_user'] = df_gb.agg('size')\r\n # coord_stat_df.columns = ['_'.join(col).strip() for col in coord_stat_df.columns.values]\r\n # coord_stat_df.reset_index(inplace=True)\r\n # df_all = pd.merge(df_all, coord_stat_df, on='customer_id', how='left')\r\n #\r\n # cols = ['add_lat', 'add_lon']\r\n # types = ['min', 'max', 'mean']\r\n # for c in cols:\r\n # for\r\n # t in types:\r\n # df_all['{}_ratio_{}'.format(c, t)] = np.abs(df_all[c] / df_all['{}_{}'.format(c, t)])\r\n #\r\n # df_all = pd.concat([df_all, pd.get_dummies(df_all['mcc'], prefix='mcc')], axis=1)\r\n # del df_all['mcc']\r\n # LightGBM\r\n #\r\n # df_all = df_all.loc[:, ~df_all.columns.duplicated()]\r\n #\r\n # from sklearn.model_selection import train_test_split\r\n # ​\r\n # ys = ['is_home', 'is_work']\r\n # drop_cols = ['atm_address', 'customer_id', 'pos_address', 'terminal_id', 'transaction_date',\r\n # 'is_home', 'has_home', 'is_work', 'has_work', 'is_train']\r\n # ​\r\n # drop_cols += ['pred:is_home', 'pred:is_work']\r\n # y_cols = ['is_home', 'is_work']\r\n # usecols = df_all.drop(drop_cols, 1, errors='ignore').columns\r\n #\r\n # params = {\r\n # 'objective': 'binary',\r\n # 'num_leaves': 63,\r\n # 'learning_rate': 0.01,\r\n # 'metric': 'binary_logloss',\r\n # 'feature_fraction': 0.8,\r\n # 'bagging_fraction': 0.8,\r\n # 'bagging_freq': 1,\r\n # 'num_threads': 12,\r\n # 'verbose': 0,\r\n # }\r\n # ​\r\n # model = {}\r\n #\r\n # y_col = 'is_home'\r\n # ​\r\n # cust_train = df_all[df_all['is_train'] == 1].groupby('customer_id')[y_col.replace('is_', 'has_')].max()\r\n # cust_train = cust_train[cust_train > 0].index\r\n # ​\r\n # cust_train, cust_valid = train_test_split(cust_train, test_size=0.2, shuffle=True, random_state=111)\r\n # ​\r\n # df_train = pd.DataFrame(cust_train, columns=['customer_id']).merge(df_all, how='left')\r\n # df_valid = pd.DataFrame(cust_valid, columns=['customer_id']).merge(df_all, how='left')\r\n # ​\r\n # lgb_train = lgb.Dataset(df_train[usecols], df_train[y_col])\r\n # lgb_valid = lgb.Dataset(df_valid[usecols], df_valid[y_col])\r\n # ​\r\n # gbm_h = lgb.train(params,\r\n # lgb_train,\r\n # valid_sets=[lgb_valid],\r\n # num_boost_round=2000,\r\n # verbose_eval=30,\r\n # early_stopping_rounds=300)\r\n # ​\r\n # model[y_col] = gbm_h\r\n # Training\r\n # until\r\n # validation\r\n # scores\r\n # don\r\n # 't improve for 300 rounds.\r\n # [30]\r\n # valid_0\r\n # 's binary_logloss: 0.60709\r\n # [60]\r\n # valid_0\r\n # 's binary_logloss: 0.55481\r\n # [90]\r\n # valid_0\r\n # 's binary_logloss: 0.52188\r\n # [120]\r\n # valid_0\r\n # 's binary_logloss: 0.500737\r\n # [150]\r\n # valid_0\r\n # 's binary_logloss: 0.486256\r\n # [180]\r\n # valid_0\r\n # 's binary_logloss: 0.476052\r\n # [210]\r\n # valid_0\r\n # 's binary_logloss: 0.46878\r\n # [240]\r\n # valid_0\r\n # 's binary_logloss: 0.463383\r\n # [270]\r\n # valid_0\r\n # 's binary_logloss: 0.458828\r\n # [300]\r\n # valid_0\r\n # 's binary_logloss: 0.455549\r\n # [330]\r\n # valid_0\r\n # 's binary_logloss: 0.452839\r\n # [360]\r\n # valid_0\r\n # 's binary_logloss: 0.450904\r\n # [390]\r\n # valid_0\r\n # 's binary_logloss: 0.448986\r\n # [420]\r\n # valid_0\r\n # 's binary_logloss: 0.447638\r\n # [450]\r\n # valid_0\r\n # 's binary_logloss: 0.446429\r\n # [480]\r\n # valid_0\r\n # 's binary_logloss: 0.445285\r\n # [510]\r\n # valid_0\r\n # 's binary_logloss: 0.444496\r\n # [540]\r\n # valid_0\r\n # 's binary_logloss: 0.443656\r\n # [570]\r\n # valid_0\r\n # 's binary_logloss: 0.443121\r\n # [600]\r\n # valid_0\r\n # 's binary_logloss: 0.44271\r\n # [630]\r\n # valid_0\r\n # 's binary_logloss: 0.442535\r\n # [660]\r\n # valid_0\r\n # 's binary_logloss: 0.442225\r\n # [690]\r\n # valid_0\r\n # 's binary_logloss: 0.441854\r\n # [720]\r\n # valid_0\r\n # 's binary_logloss: 0.44158\r\n # [750]\r\n # valid_0\r\n # 's binary_logloss: 0.441439\r\n # [780]\r\n # valid_0\r\n # 's binary_logloss: 0.441242\r\n # [810]\r\n # valid_0\r\n # 's binary_logloss: 0.441204\r\n # [840]\r\n # valid_0\r\n # 's binary_logloss: 0.441231\r\n # [870]\r\n # valid_0\r\n # 's binary_logloss: 0.441259\r\n # [900]\r\n # valid_0\r\n # 's binary_logloss: 0.441044\r\n # [930]\r\n # valid_0\r\n # 's binary_logloss: 0.440819\r\n # [960]\r\n # valid_0\r\n # 's binary_logloss: 0.440762\r\n # [990]\r\n # valid_0\r\n # 's binary_logloss: 0.440506\r\n # [1020]\r\n # valid_0\r\n # 's binary_logloss: 0.440368\r\n # [1050]\r\n # valid_0\r\n # 's binary_logloss: 0.440271\r\n # [1080]\r\n # valid_0\r\n # 's binary_logloss: 0.440108\r\n # [1110]\r\n # valid_0\r\n # 's binary_logloss: 0.440068\r\n # [1140]\r\n # valid_0\r\n # 's binary_logloss: 0.440076\r\n # [1170]\r\n # valid_0\r\n # 's binary_logloss: 0.440098\r\n # [1200]\r\n # valid_0\r\n # 's binary_logloss: 0.440219\r\n # [1230]\r\n # valid_0\r\n # 's binary_logloss: 0.440263\r\n # [1260]\r\n # valid_0\r\n # 's binary_logloss: 0.440486\r\n # [1290]\r\n # valid_0\r\n # 's binary_logloss: 0.440679\r\n # [1320]\r\n # valid_0\r\n # 's binary_logloss: 0.440664\r\n # [1350]\r\n # valid_0\r\n # 's binary_logloss: 0.440489\r\n # [1380]\r\n # valid_0\r\n # 's binary_logloss: 0.44051\r\n # [1410]\r\n # valid_0\r\n # 's binary_logloss: 0.440349\r\n # Early\r\n # stopping, best\r\n # iteration is:\r\n # [1124]\r\n # valid_0\r\n # 's binary_logloss: 0.439947\r\n #\r\n # y_col = 'is_work'\r\n # ​\r\n # cust_train = df_all[df_all['is_train'] == 1].groupby('customer_id')[y_col.replace('is_', 'has_')].max()\r\n # cust_train = cust_train[cust_train > 0].index\r\n # ​\r\n # cust_train, cust_valid = train_test_split(cust_train, test_size=0.2, shuffle=True, random_state=111)\r\n # ​\r\n # ​\r\n # ​\r\n # df_train = pd.DataFrame(cust_train, columns=['customer_id']).merge(df_all, how='left')\r\n # df_valid = pd.DataFrame(cust_valid, columns=['customer_id']).merge(df_all, how='left')\r\n # ​\r\n # lgb_train = lgb.Dataset(df_train[usecols], df_train[y_col])\r\n # lgb_valid = lgb.Dataset(df_valid[usecols], df_valid[y_col])\r\n # ​\r\n # gbm_w = lgb.train(params,\r\n # lgb_train,\r\n # valid_sets=[lgb_valid],\r\n # num_boost_round=2000,\r\n # verbose_eval=30,\r\n # early_stopping_rounds=300)\r\n # ​\r\n # model[y_col] = gbm_w\r\n # Training\r\n # until\r\n # validation\r\n # scores\r\n # don\r\n # 't improve for 300 rounds.\r\n # [30]\r\n # valid_0\r\n # 's binary_logloss: 0.577203\r\n # [60]\r\n # valid_0\r\n # 's binary_logloss: 0.505615\r\n # [90]\r\n # valid_0\r\n # 's binary_logloss: 0.46027\r\n # [120]\r\n # valid_0\r\n # 's binary_logloss: 0.429884\r\n # [150]\r\n # valid_0\r\n # 's binary_logloss: 0.409737\r\n # [180]\r\n # valid_0\r\n # 's binary_logloss: 0.395657\r\n # [210]\r\n # valid_0\r\n # 's binary_logloss: 0.385574\r\n # [240]\r\n # valid_0\r\n # 's binary_logloss: 0.378012\r\n # [270]\r\n # valid_0\r\n # 's binary_logloss: 0.372507\r\n # [300]\r\n # valid_0\r\n # 's binary_logloss: 0.367728\r\n # [330]\r\n # valid_0\r\n # 's binary_logloss: 0.364101\r\n # [360]\r\n # valid_0\r\n # 's binary_logloss: 0.36112\r\n # [390]\r\n # valid_0\r\n # 's binary_logloss: 0.359536\r\n # [420]\r\n # valid_0\r\n # 's binary_logloss: 0.357894\r\n # [450]\r\n # valid_0\r\n # 's binary_logloss: 0.35652\r\n # [480]\r\n # valid_0\r\n # 's binary_logloss: 0.3556\r\n # [510]\r\n # valid_0\r\n # 's binary_logloss: 0.354502\r\n # [540]\r\n # valid_0\r\n # 's binary_logloss: 0.353899\r\n # [570]\r\n # valid_0\r\n # 's binary_logloss: 0.353522\r\n # [600]\r\n # valid_0\r\n # 's binary_logloss: 0.353235\r\n # [630]\r\n # valid_0\r\n # 's binary_logloss: 0.353279\r\n # [660]\r\n # valid_0\r\n # 's binary_logloss: 0.353185\r\n # [690]\r\n # valid_0\r\n # 's binary_logloss: 0.353026\r\n # [720]\r\n # valid_0\r\n # 's binary_logloss: 0.353079\r\n # [750]\r\n # valid_0\r\n # 's binary_logloss: 0.353015\r\n # [780]\r\n # valid_0\r\n # 's binary_logloss: 0.35353\r\n # [810]\r\n # valid_0\r\n # 's binary_logloss: 0.353381\r\n # [840]\r\n # valid_0\r\n # 's binary_logloss: 0.353388\r\n # [870]\r\n # valid_0\r\n # 's binary_logloss: 0.353113\r\n # [900]\r\n # valid_0\r\n # 's binary_logloss: 0.353014\r\n # [930]\r\n # valid_0\r\n # 's binary_logloss: 0.35277\r\n # [960]\r\n # valid_0\r\n # 's binary_logloss: 0.353147\r\n # [990]\r\n # valid_0\r\n # 's binary_logloss: 0.3532\r\n # [1020]\r\n # valid_0\r\n # 's binary_logloss: 0.353182\r\n # [1050]\r\n # valid_0\r\n # 's binary_logloss: 0.353325\r\n # [1080]\r\n # valid_0\r\n # 's binary_logloss: 0.353466\r\n # [1110]\r\n # valid_0\r\n # 's binary_logloss: 0.353251\r\n # [1140]\r\n # valid_0\r\n # 's binary_logloss: 0.353296\r\n # [1170]\r\n # valid_0\r\n # 's binary_logloss: 0.353393\r\n # [1200]\r\n # valid_0\r\n # 's binary_logloss: 0.353586\r\n # [1230]\r\n # valid_0\r\n # 's binary_logloss: 0.353567\r\n # Early\r\n # stopping, best\r\n # iteration is:\r\n # [930]\r\n # valid_0\r\n # 's binary_logloss: 0.35277\r\n #\r\n # lgb.plot_importance(gbm_w, max_num_features=15)\r\n # < matplotlib.axes._subplots.AxesSubplot\r\n # at\r\n # 0x7f1889ff2400 >\r\n #\r\n # def _best(x):\r\n # ret = None\r\n # for col in ys:\r\n # pred = ('pred:%s' % col)\r\n # if pred in x:\r\n # i = (x[pred].idxmax())\r\n # cols = [pred, 'add_lat', 'add_lon']\r\n # if col in x:\r\n # cols.append(col)\r\n # tmp = x.loc[i, cols]\r\n # tmp.rename({\r\n # 'add_lat': '%s:add_lat' % col,\r\n # 'add_lon': '%s:add_lon' % col,\r\n # }, inplace=True)\r\n # if ret is None:\r\n # ret = tmp\r\n # else:\r\n # ret = pd.concat([ret, tmp])\r\n # return ret\r\n #\r\n # ​\r\n #\r\n # def predict_proba(dt, ys=['is_home', 'is_work']):\r\n # for col in ys:\r\n # pred = ('pred:%s' % col)\r\n # dt[pred] = model[col].predict(dt[usecols])\r\n # return dt.groupby('customer_id').apply(_best).reset_index()\r\n #\r\n # ​\r\n #\r\n # def score(dt, ys=['is_home', 'is_work']):\r\n # dt_ret = predict_proba(dt, ys)\r\n # mean = 0.0\r\n # for col in ys:\r\n # col_mean = dt_ret[col].mean()\r\n # mean += col_mean\r\n # if len(ys) == 2:\r\n # mean = mean / len(ys)\r\n # return mean\r\n #\r\n # print(\"Train accuracy:\", score(df_train, ys=['is_home']))\r\n # print(\"Test accuracy:\", score(df_valid, ys=['is_home']))\r\n # ​\r\n # print(\"Train accuracy:\", score(df_train, ys=['is_work']))\r\n # print(\"Test accuracy:\", score(df_valid, ys=['is_work']))\r\n # Train\r\n # accuracy: 0.5152524993591386\r\n # Test\r\n # accuracy: 0.5204918032786885\r\n # Predict\r\n #\r\n # cust_test = df_all[df_all['is_train'] == 0]['customer_id'].unique()\r\n # df_test = pd.DataFrame(cust_test, columns=['customer_id']).merge(df_all, how='left')\r\n # df_test = predict_proba(df_test)\r\n # df_test.rename(columns={\r\n # 'customer_id': '_ID_',\r\n # 'is_home:add_lat': '_HOME_LAT_',\r\n # 'is_home:add_lon': '_HOME_LON_',\r\n # 'is_work:add_lat': '_WORK_LAT_',\r\n # 'is_work:add_lon': '_WORK_LON_'}, inplace=True)\r\n # df_test = df_test[['_ID_', '_WORK_LAT_', '_WORK_LON_', '_HOME_LAT_', '_HOME_LON_']]\r\n # ​\r\n # df_test.head()\r\n # Формируем\r\n # submission - файл\r\n #\r\n # # Заполняем пропуски\r\n # df_ = pd.read_csv('../data/test_set.csv', dtype=dtypes, usecols=['customer_id'])\r\n # submission = pd.DataFrame(df_['customer_id'].unique(), columns=['_ID_'])\r\n # ​\r\n # submission = submission.merge(df_test, how='left').fillna(0)\r\n # # Пишем файл submission\r\n # submission.to_csv('../submissions/base_2_47_32.csv', index=None)\r\n return src\r\n\r\ndef update_last_partition(dst, from_dt, to_dt):\r\n prev_day = datetime.strptime(from_dt, '%Y-%m-%d') - timedelta(days=1)\r\n res = spark.table(dst[\"d_train\"]).checkpoint()\r\n res = res.where(res.day == to_dt)\r\n res = res.withColumn(\"period_to_dt\", f.lit(prev_day)).withColumn(\"day\", f.lit(prev_day.strftime('%Y-%m-%d')))\r\n res.coalesce(8).write.format(\"orc\").insertInto(dst[\"d_train\"], overwrite=True)\r\n\r\n\r\ndef calc_04(src, dst, from_dt, to_dt):\r\n res = algo(src, from_dt, to_dt)\r\n res.coalesce(8).write.format(\"orc\").insertInto(dst[\"d_subway_entrance\"], overwrite=True)\r\n\r\n\r\ndef sandbox_src():\r\n return {\r\n \"psg_train\": spark.table(\"sandbox_mck.train\"),\r\n \"psg_test\": spark.table(\"sandbox_mck.test\"),\r\n \"psg_dev\": spark.table(\"sandbox_mck.dev\")\r\n }\r\n\r\n\r\ndef sandbox_dst():\r\n return {\r\n \"psg_result\": \"sandbox_mck.psg_result\"\r\n }\r\n\r\n\r\ndef prod_src():\r\n return {\r\n \"psg_train\": spark.table(\"prod_data.psg_train\"),\r\n \"psg_test\": spark.table(\"prod_data.psg_test\"),\r\n \"psg_dev\": spark.table(\"prod_data.psg_dev\")\r\n }\r\n\r\n\r\ndef prod_dst():\r\n return {\r\n \"psg_result\": \"prod_data.psg_result\"\r\n }\r\n\r\n\r\nif __name__ == '__main__':\r\n spark = SparkSession.builder.appName(\"calc_04_task\").enableHiveSupport().getOrCreate()\r\n spark.conf.set(\"spark.sql.sources.partitionOverwriteMode\", \"dynamic\")\r\n hivecontext = HiveContext(spark.sparkContext)\r\n hivecontext.setConf(\"hive.exec.dynamic.partition\", \"true\")\r\n hivecontext.setConf(\"hive.exec.dynamic.partition.mode\", \"nonstrict\")\r\n spark.sparkContext.setCheckpointDir(\"hdfs:///user/airflow/psg/calc_04_task\")\r\n\r\n opts = {\r\n 'from_dt': sys.argv[1],\r\n \"to_dt\": \"9999-12-31\"\r\n }\r\n\r\n update_last_partition(prod_dst(), opts[\"from_dt\"], opts[\"to_dt\"])\r\n calc_04(prod_src(), prod_dst(), opts[\"from_dt\"], opts[\"to_dt\"])\r\n\r\n", "id": "12154991", "language": "Python", "matching_score": 4.954290390014648, "max_stars_count": 0, "path": "Raif/pyspark/calc_04.py" }, { "content": "import sys\r\nfrom datetime import timedelta, datetime\r\n\r\n\r\nfrom pyspark import HiveContext\r\nfrom pyspark.sql import functions as f, SparkSession\r\n\r\n\r\ndef algo(src, from_dt, to_dt):\r\n res = steps(src, from_dt, to_dt)\r\n return res\r\n\r\n\r\ndef steps(src, from_dt, to_dt):\r\n # Общий\r\n # подход:\r\n #\r\n # Добавляем\r\n # к\r\n # каждой\r\n # транзакции\r\n # столбец: is_work(если\r\n # транзакция\r\n # находится\r\n # в\r\n # пределах\r\n # 0.02\r\n # от\r\n # дома\r\n # клиента)\r\n # Добавляем\r\n # к\r\n # каждой\r\n # транзакции\r\n # столбец: is_home(если\r\n # транзакция\r\n # находится\r\n # в\r\n # пределах\r\n # 0.02\r\n # от\r\n # работы\r\n # клиента)\r\n # Обучаем\r\n # классификатор\r\n # предсказывающий\r\n # вероятность(is_home == 1)\r\n # для\r\n # транзакции\r\n # Обучаем\r\n # классификатор\r\n # предсказывающий\r\n # вероятность(is_work == 1)\r\n # для\r\n # транзакции\r\n # Точность\r\n # определения\r\n # местоположения:\r\n #\r\n # для\r\n # классификатора\r\n # is_home: ~3\r\n # x %\r\n # для\r\n # классификатора\r\n # is_work: ~2\r\n # x %\r\n # общая\r\n # оценка\r\n # на\r\n # Public\r\n # Leaderboard: ???\r\n # Примечание\r\n #\r\n # Требуется\r\n # Python\r\n # версии\r\n # 3.5\r\n # Требуется\r\n # библиотека\r\n # xgboost(для\r\n # обучения\r\n # использовалась\r\n # xgboost\r\n # версии\r\n # 0.7.post3)\r\n # Требуются\r\n # файлы: test_set.csv, train_set.csv\r\n # в\r\n # одном\r\n # каталоге\r\n # с\r\n # данным\r\n # скриптом\r\n # Требования\r\n # к\r\n # памяти: должно\r\n # работать\r\n # с\r\n # 2\r\n # Гб\r\n # свободного\r\n # RAM\r\n # Время\r\n # работы: ~3\r\n # минуты(тестировалось\r\n # на\r\n # процессоре\r\n # Intel\r\n # Core\r\n # i7 - 4770)\r\n #\r\n # % load_ext\r\n # autoreload\r\n # % autoreload\r\n # 2\r\n # ​\r\n # import sys\r\n # MODULES_PATH = '../code/'\r\n # if MODULES_PATH not in sys.path:\r\n # sys.path.append(MODULES_PATH)\r\n # import mfuncs\r\n #\r\n # import pandas as pd\r\n # import numpy as np\r\n # from tqdm import tqdm\r\n # tqdm.pandas()\r\n # pd.options.display.max_columns = 1000\r\n # pd.options.display.max_colwidth = -1\r\n # ​\r\n # import lightgbm as lgb\r\n # ​\r\n # ​\r\n # from sklearn.neighbors import NearestNeighbors\r\n # from sklearn.cluster import KMeans, MeanShift, estimate_bandwidth, AgglomerativeClustering\r\n # from sklearn.metrics import silhouette_samples, silhouette_score\r\n # ​\r\n # import gmaps\r\n # API_KEY = '<KEY>'\r\n # gmaps.configure(api_key=API_KEY) # Your Google API key\r\n # % pylab\r\n # inline\r\n # The\r\n # autoreload\r\n # extension is already\r\n # loaded.To\r\n # reload\r\n # it, use:\r\n # % reload_ext\r\n # autoreload\r\n #\r\n #\r\n #Populating\r\n #the\r\n #interactive\r\n #namespace\r\n #from numpy and matplotlib\r\n #/ usr / local / lib / python3\r\n #.5 / dist - packages / IPython / core / magics / pylab.py: 160: UserWarning: pylab\r\n #import has\r\n #\r\n #clobbered\r\n #these\r\n #variables: ['f']\r\n #` % matplotlib\r\n #` prevents\r\n #importing *\r\n #from pylab and numpy\r\n #\"\\n`%matplotlib` prevents importing * from pylab and numpy\"\r\n #\r\n #import pickle\r\n #\r\n #\r\n #def get_city(x):\r\n # if 'Москва' in x:\r\n # return 'Москва'\r\n # if 'Санкт-Петербург' in x:\r\n # return 'Санкт-Петербург'\r\n #\r\n # x = x.split(',')\r\n # if len(x) < 6:\r\n # return 'none'\r\n # else:\r\n # return x[-6]\r\n #\r\n #\r\n #with open('../data/internal/test_coords.pcl', 'rb') as f:\r\n # coords1 = pickle.load(f, encoding='latin1')\r\n #with open('../data/internal/train_coords.pcl', 'rb') as f:\r\n # coords2 = pickle.load(f, encoding='latin1')\r\n #​\r\n #coords = pd.concat([coords1, coords2])\r\n #coords['city_name'] = coords['string'].progress_apply(get_city)\r\n #​\r\n #coords['add_lat'] = (coords['action_lat'].round(4).fillna(0) * 1000).astype(int)\r\n #coords['add_lon'] = (coords['action_lon'].round(4).fillna(0) * 1000).astype(int)\r\n #​\r\n #(df_all['atm_lat'].round(4).fillna(0) * 1000).astype(int) + (df_all['pos_lat'].round(4).fillna(0) * 1000).astype(int)\r\n #​\r\n #​\r\n #coords.drop_duplicates(inplace=True)\r\n #coords[coords.string.str.contains('азан')].head(20)\r\n ## del coords['string']\r\n #100 % |██████████ | 243861 / 243861[00:00 < 00:00, 710780.37\r\n #it / s]\r\n #action_lat\r\n #action_lon\r\n #string\r\n #city_name\r\n #add_lat\r\n #add_lon\r\n #27\r\n #55.791915\r\n #49.109698\r\n #DoubleTree\r\n #by\r\n #Hilton\r\n #Hotel\r\n #Kazan\r\n #City\r\n #Center, 21, улица\r\n #Чернышевского, Старо - Татарская\r\n #слобода, Вахитовский\r\n #район, Казань, городской\r\n #округ\r\n #Казань, Татарстан, Приволжский\r\n #федеральный\r\n #округ, 420111, РФ\r\n #Казань\r\n #55791\r\n #49109\r\n #680\r\n #55.852706\r\n #49.189704\r\n #44, 4 - я\r\n #Станционная\r\n #улица, Малые\r\n #Дербышки, Советский\r\n #район, Казань, городской\r\n #округ\r\n #Казань, Татарстан, Приволжский\r\n #федеральный\r\n #округ, 420133, РФ\r\n #Казань\r\n #55852\r\n #49189\r\n #855\r\n #56.291382\r\n #44.075155\r\n #улица\r\n #Касьянова, Казанское\r\n #шоссе, Верхние\r\n #Печёры, Нижегородский\r\n #район, Нижний\r\n #Новгород, городской\r\n #округ\r\n #Нижний\r\n #Новгород, Нижегородская\r\n #область, Приволжский\r\n #федеральный\r\n #округ, 603163, РФ\r\n #Нижний\r\n #Новгород\r\n #56291\r\n #44075\r\n #889\r\n #55.781112\r\n #49.116999\r\n #Татарская\r\n #усадьба, 8, улица\r\n #Шигабутдина\r\n #Марджани, Старо - Татарская\r\n #слобода, Вахитовский\r\n #район, Казань, городской\r\n #округ\r\n #Казань, Татарстан, Приволжский\r\n #федеральный\r\n #округ, 420021, РФ\r\n #Казань\r\n #55781\r\n #49117\r\n #914\r\n #55.794052\r\n #49.114874\r\n #15 / 25, Кремлёвская\r\n #улица, Старо - Татарская\r\n #слобода, Вахитовский\r\n #район, Казань, городской\r\n #округ\r\n #Казань, Татарстан, Приволжский\r\n #федеральный\r\n #округ, 420066, РФ\r\n #Казань\r\n #55794\r\n #49114\r\n #1032\r\n #55.916448\r\n #49.148837\r\n #Осиновая\r\n #улица, Щербаково, Авиастроительный\r\n #район, Дачное\r\n #сельское\r\n #поселение, городской\r\n #округ\r\n #Казань, Татарстан, Приволжский\r\n #федеральный\r\n #округ, 420099, РФ\r\n #Дачное\r\n #сельское\r\n #поселение\r\n #55916\r\n #49148\r\n #1200\r\n #55.722931\r\n #37.803331\r\n #1\r\n #Б, улица\r\n #Красный\r\n #Казанец, Вешняки, район\r\n #Вешняки, Восточный\r\n #административный\r\n #округ, Москва, ЦФО, 111395, РФ\r\n #Москва\r\n #55722\r\n #37803\r\n #1222\r\n #59.930872\r\n #30.318425\r\n #17 - 19, Казанская\r\n #улица, Апраксин\r\n #двор, округ № 78, Центральный\r\n #район, Санкт - Петербург, Северо - Западный\r\n #федеральный\r\n #округ, 191014, РФ\r\n #Санкт - Петербург\r\n #59930\r\n #30318\r\n #1343\r\n #55.818358\r\n #49.132824\r\n #Шамиль, 3, проспект\r\n #Фатыха\r\n #Амирхана, Дружба, Ново - Савиновский\r\n #район, Казань, городской\r\n #округ\r\n #Казань, Татарстан, Приволжский\r\n #федеральный\r\n #округ, 420133, РФ\r\n #Казань\r\n #55818\r\n #49132\r\n #1501\r\n #59.930528\r\n #30.317599\r\n #26 / 27, Казанская\r\n #улица, Сенной\r\n #округ, Адмиралтейский\r\n #район, Санкт - Петербург, Северо - Западный\r\n #федеральный\r\n #округ, 190000, РФ\r\n #Санкт - Петербург\r\n #59930\r\n #30317\r\n #2021\r\n #55.851614\r\n #49.234232\r\n #2 - я\r\n #Клеверная\r\n #улица, Нагорный, Советский\r\n #район, Казань, городской\r\n #округ\r\n #Казань, Татарстан, Приволжский\r\n #федеральный\r\n #округ, 420075, РФ\r\n #Казань\r\n #55851\r\n #49234\r\n #2329\r\n #56.298490\r\n #44.079637\r\n #Подновье, Казанское\r\n #шоссе, Верхние\r\n #Печёры, Нижегородский\r\n #район, Нижний\r\n #Новгород, городской\r\n #округ\r\n #Нижний\r\n #Новгород, Нижегородская\r\n #область, Приволжский\r\n #федеральный\r\n #округ, 603163, РФ\r\n #Нижний\r\n #Новгород\r\n #56298\r\n #44079\r\n #2411\r\n #55.788924\r\n #49.117883\r\n #70\r\n #А, улица\r\n #Баумана, Старо - Татарская\r\n #слобода, Вахитовский\r\n #район, Казань, городской\r\n #округ\r\n #Казань, Татарстан, Приволжский\r\n #федеральный\r\n #округ, 420066, РФ\r\n #Казань\r\n #55788\r\n #49117\r\n #2585\r\n #59.932740\r\n #30.322259\r\n #8 - 10, Казанская\r\n #улица, Апраксин\r\n #двор, округ № 78, Центральный\r\n #район, Санкт - Петербург, Северо - Западный\r\n #федеральный\r\n #округ, 191014, РФ\r\n #Санкт - Петербург\r\n #59932\r\n #30322\r\n #3076\r\n #55.787008\r\n #49.161250\r\n #ул.Достоевского, улица\r\n #Абжалилова, Калуга, Вахитовский\r\n #район, Казань, городской\r\n #округ\r\n #Казань, Татарстан, Приволжский\r\n #федеральный\r\n #округ, 420133, РФ\r\n #Казань\r\n #55787\r\n #49161\r\n #3534\r\n #55.780012\r\n #49.211031\r\n #Crazy\r\n #Park, 141, проспект\r\n #Победы, Азино - 1, Советский\r\n #район, Казань, городской\r\n #округ\r\n #Казань, Татарстан, Приволжский\r\n #федеральный\r\n #округ, 420100, РФ\r\n #Казань\r\n #55780\r\n #49211\r\n #3541\r\n #55.780478\r\n #49.213015\r\n #Мега, 141, проспект\r\n #Победы, Азино - 1, Советский\r\n #район, Казань, городской\r\n #округ\r\n #Казань, Татарстан, Приволжский\r\n #федеральный\r\n #округ, 420100, РФ\r\n #Казань\r\n #55780\r\n #49213\r\n #3544\r\n #55.750827\r\n #49.209874\r\n #Проспект\r\n #Победы, проспект\r\n #Победы, Горки - 3, Приволжский\r\n #район, Казань, городской\r\n #округ\r\n #Казань, Татарстан, Приволжский\r\n #федеральный\r\n #округ, 420110, РФ\r\n #Казань\r\n #55750\r\n #49209\r\n #3546\r\n #55.791920\r\n #49.099091\r\n #Макдоналдс, 4, улица\r\n #Саид - Галеева, Ново - Татарская\r\n #слобода, Вахитовский\r\n #район, Казань, городской\r\n #округ\r\n #Казань, Татарстан, Приволжский\r\n #федеральный\r\n #округ, 420111, РФ\r\n #Казань\r\n #55791\r\n #49099\r\n #3550\r\n #55.799080\r\n #49.119108\r\n #9, улица\r\n #Бехтерева, Старо - Татарская\r\n #слобода, Вахитовский\r\n #район, Казань, городской\r\n #округ\r\n #Казань, Татарстан, Приволжский\r\n #федеральный\r\n #округ, 420066, РФ\r\n #Казань\r\n #55799\r\n #49119\r\n #\r\n ## Определим типы колонок для экономии памяти\r\n #dtypes = {\r\n # 'transaction_date': str,\r\n # 'atm_address': str,\r\n # 'country': str,\r\n # 'city': str,\r\n # 'amount': np.float32,\r\n # 'currency': np.float32,\r\n # 'mcc': str,\r\n # 'customer_id': str,\r\n # 'pos_address': str,\r\n # 'atm_address': str,\r\n # 'pos_adress_lat': np.float32,\r\n # 'pos_adress_lon': np.float32,\r\n # 'pos_address_lat': np.float32,\r\n # 'pos_address_lon': np.float32,\r\n # 'atm_address_lat': np.float32,\r\n # 'atm_address_lon': np.float32,\r\n # 'home_add_lat': np.float32,\r\n # 'home_add_lon': np.float32,\r\n # 'work_add_lat': np.float32,\r\n # 'work_add_lon': np.float32,\r\n #}\r\n #​\r\n ## для экономии памяти будем загружать только часть атрибутов транзакций\r\n #usecols_train = ['customer_id', 'transaction_date', 'amount', 'country', 'city', 'currency', 'mcc', 'pos_adress_lat',\r\n # 'pos_adress_lon', 'atm_address_lat', 'atm_address_lon', 'home_add_lat', 'home_add_lon', 'work_add_lat',\r\n # 'work_add_lon']\r\n #usecols_test = ['customer_id', 'transaction_date', 'amount', 'country', 'city', 'currency', 'mcc', 'pos_address_lat',\r\n # 'pos_address_lon', 'atm_address_lat', 'atm_address_lon']\r\n #Читаем\r\n #train_set, test_set, соединяем\r\n #в\r\n #один\r\n #датасет\r\n #\r\n #dtypes = {\r\n # 'transaction_date': str,\r\n # 'atm_address': str,\r\n # 'country': str,\r\n # 'city': str,\r\n # 'amount': np.float32,\r\n # 'currency': np.float32,\r\n # 'mcc': str,\r\n # 'customer_id': str,\r\n # 'pos_address': str,\r\n # 'atm_address': str,\r\n # 'pos_adress_lat': np.float32,\r\n # 'pos_adress_lon': np.float32,\r\n # 'pos_address_lat': np.float32,\r\n # 'pos_address_lon': np.float32,\r\n # 'atm_address_lat': np.float32,\r\n # 'atm_address_lon': np.float32,\r\n # 'home_add_lat': np.float32,\r\n # 'home_add_lon': np.float32,\r\n # 'work_add_lat': np.float32,\r\n # 'work_add_lon': np.float32,\r\n #}\r\n #​\r\n #rnm = {\r\n # 'atm_address_lat': 'atm_lat',\r\n # 'atm_address_lon': 'atm_lon',\r\n # 'pos_adress_lat': 'pos_lat',\r\n # 'pos_adress_lon': 'pos_lon',\r\n # 'pos_address_lat': 'pos_lat',\r\n # 'pos_address_lon': 'pos_lon',\r\n # 'home_add_lat': 'home_lat',\r\n # 'home_add_lon': 'home_lon',\r\n # 'work_add_lat': 'work_lat',\r\n # 'work_add_lon': 'work_lon',\r\n #}\r\n #\r\n #df_train = pd.read_csv('../data/train_set.csv', dtype=dtypes)\r\n #df_test = pd.read_csv('../data/test_set.csv', dtype=dtypes)\r\n #​\r\n #df_train.rename(columns=rnm, inplace=True)\r\n #df_test.rename(columns=rnm, inplace=True)\r\n #\r\n ## соединяем test/train в одном DataFrame\r\n #df_train['is_train'] = np.int32(1)\r\n #df_test['is_train'] = np.int32(0)\r\n #df_all = pd.concat([df_train, df_test])\r\n #​\r\n #del df_train, df_test\r\n #\r\n #df_all['add_lat'] = (df_all['atm_lat'].round(4).fillna(0) * 1000).astype(int) + (\r\n # df_all['pos_lat'].round(4).fillna(0) * 1000).astype(int)\r\n #df_all['add_lon'] = (df_all['atm_lon'].round(4).fillna(0) * 1000).astype(int) + (\r\n # df_all['pos_lon'].round(4).fillna(0) * 1000).astype(int)\r\n #\r\n #% % time\r\n #df_all = pd.merge(df_all, coords, on=['add_lat', 'add_lon'], how='left')\r\n #CPU\r\n #times: user\r\n #513\r\n #ms, sys: 40\r\n #ms, total: 553\r\n #ms\r\n #Wall\r\n #time: 552\r\n #ms\r\n #\r\n #coords.drop_duplicates(subset=['add_lat', 'add_lon'], inplace=True)\r\n #\r\n #df_all.drop(['add_lat', 'add_lon', 'action_lat', 'action_lon', ], axis=1, inplace=True)\r\n #\r\n #df_all.columns\r\n #Index(['amount', 'atm_address', 'atm_lat', 'atm_lon', 'city', 'country',\r\n # 'currency', 'customer_id', 'home_lat', 'home_lon', 'is_train', 'mcc',\r\n # 'pos_address', 'pos_lat', 'pos_lon', 'terminal_id', 'transaction_date',\r\n # 'work_lat', 'work_lon', 'string', 'city_name'],\r\n # dtype='object')\r\n #\r\n #df_all.to_csv('../data/df_all.csv', index=None)\r\n #Замена\r\n #городов\r\n #Чето\r\n #слегка\r\n #ухудшило\r\n #скор\r\n #\r\n #city_replace = [\r\n # ['peter|stpete|spb', 'SANKT-PETERBU'],\r\n # ['moscow|moskva|mosocw|moskow', 'MOSCOW'],\r\n # ['novosib|nvsibr', 'NOVOSIBIRSK'],\r\n # ['kater', 'EKATERINBURG'],\r\n # ['n.*novg', 'NIZHNIY NOV'],\r\n # ['novg', 'VEL.NOVGOROD'],\r\n # ['erep', 'CHEREPOVETS'],\r\n # ['rasnod', 'KRASNODAR'],\r\n # ['rasno[yj]', 'KRASNOYARSK'],\r\n # ['sama', 'SAMARA'],\r\n # ['kazan', 'KAZAN'],\r\n # ['soch[iy]', 'SOCHI'],\r\n # ['r[yj]aza', 'RYAZAN'],\r\n # ['arza', 'ARZAMAS'],\r\n # ['podol.?sk', 'PODOLSK'],\r\n # ['himki', 'KHIMKI'],\r\n # ['rostov', 'ROSTOV'], # will ovveride Rostov-Na-Don later\r\n # ['rostov.*do', 'ROSTOV-NA-DON'],\r\n # ['ufa', 'UFA'],\r\n # ['^orel|ory[oe]l', 'OREL'],\r\n # ['korol', 'KOROLEV'],\r\n # ['vkar', 'SYKTYVKAR'],\r\n # ['rozavo|rzavo', 'PETROZAVODSK'],\r\n # ['c.*abinsk', 'CHELYABINSK'],\r\n # ['g omsk|^omsk', 'OMSK'],\r\n # ['tomsk', 'TOMSK'],\r\n # ['vorone', 'VORONEZH'],\r\n # ['[yj]arosl', 'YAROSLAVL'],\r\n # ['novoros', 'NOVOROSSIYSK'],\r\n # ['m[yie]t[yi]s', 'MYTISHCHI'],\r\n # ['kal..?ga', 'KALUGA'],\r\n # ['perm', 'PERM'],\r\n # ['volgog|volgrd', 'VOLGOGRAD'],\r\n # ['kirov[^a-z]|kirov$', 'KIROV'],\r\n # ['krasnogo', 'KRASNOGORSK'],\r\n # ['^mo\\W+$|^mo$', 'MO'],\r\n # ['irk', 'IRKUTSK'],\r\n # ['balashi', 'BALASHIKHA'],\r\n # ['kaliningrad', 'KALININGRAD'],\r\n # ['anap', 'ANAPA'],\r\n # ['surgut', 'SURGUT'],\r\n # ['odin[tc]', 'ODINTSOVO'],\r\n # ['kemer', 'KEMEROVO'],\r\n # ['t[yuio].?men', 'TYUMEN'],\r\n # ['sarat', 'SARATOV'],\r\n # ['t[uoy]u?la', 'TULA'],\r\n # ['bert', 'LYUBERTSY'],\r\n # ['kotel', 'KOTELNIKI'],\r\n # ['lipet', 'LIPETSK'],\r\n # ['leznodor', 'ZHELEZNODOROZ'],\r\n # ['domod', 'DOMODEDOVO'],\r\n # ['br[yji][a]nsk|braynsk', 'BRYANSK'],\r\n # ['saransk', 'SARANSK'],\r\n # ['znogor', 'ZHELEZNOGORSK'],\r\n # ['smol', 'SMOLENSK'],\r\n # ['sevolo', 'VSEVOLOZHSK'],\r\n # ['p[uy].*kino', 'PUSHKINO'],\r\n # ['re..?tov', 'REUTOV'],\r\n # ['kursk|koursk', 'KURSK'],\r\n # ['belgorod', 'BELGOROD'],\r\n # ['r[yj]azan', 'RYAZAN'],\r\n # ['solnechno', 'SOLNECHNOGORS'],\r\n # ['utorovsk', 'YALUTOROVSK'],\r\n # ['tver', 'TVER'],\r\n # ['barn', 'BARNAUL'],\r\n # ['to.?l..?.?tt[iy]', 'TOLYATTI'],\r\n # ['i[zjg].?evsk', 'IZHEVSK']\r\n #]\r\n #​\r\n #df_all['city'] = df_all['city'].str.lower()\r\n #df_all['city'].fillna('nan_city', inplace=True)\r\n #for city_reg, city_name in tqdm(city_replace):\r\n # df_all.loc[df_all['city'].str.contains(city_reg), 'city'] = city_name\r\n #Обрабатываем\r\n #дату\r\n #транзакции\r\n #и\r\n #категориальные\r\n #признаки\r\n #\r\n #df_all['currency'] = df_all['currency'].fillna(-1).astype(np.int32)\r\n #df_all['mcc'] = df_all['mcc'].apply(lambda x: int(x.replace(',', ''))).astype(np.int32)\r\n #df_all['city'] = df_all['city'].factorize()[0].astype(np.int32)\r\n #df_all['country'] = df_all['country'].factorize()[0].astype(np.int32)\r\n #Фичи\r\n #для\r\n #даты\r\n #\r\n ## удаляем транзакции без даты\r\n #df_all = df_all[~df_all['transaction_date'].isnull()]\r\n #df_all['transaction_date'] = pd.to_datetime(df_all['transaction_date'], format='%Y-%m-%d')\r\n #\r\n #df_all['month'] = df_all.transaction_date.dt.month\r\n #df_all['day'] = df_all.transaction_date.dt.day\r\n #df_all['dayofyear'] = df_all.transaction_date.dt.dayofyear\r\n #df_all['dayofweek'] = df_all.transaction_date.dt.dayofweek\r\n #\r\n ## праздники\r\n #holidays_df = pd.read_csv('../data/internal/all_holidays.csv', header=None)\r\n #holidays_df[0] = pd.to_datetime(holidays_df[0])\r\n #holidays_df = holidays_df[holidays_df[0].dt.year == 2017]\r\n #holidays = holidays_df[0].dt.dayofyear.values\r\n #df_all['is_weekend'] = (df_all.dayofweek >= 6).astype(np.int8)\r\n #df_all['is_state_holiday'] = df_all['dayofyear'].isin(holidays).astype(np.int8)\r\n #df_all['is_holiday'] = df_all['is_weekend'] | df_all['is_state_holiday']\r\n #Приводим\r\n #адрес\r\n #транзакции\r\n #для\r\n #pos\r\n #и\r\n #atm - транзакций\r\n #к\r\n #единообразному\r\n #виду\r\n #Просто\r\n #объединяем\r\n #в\r\n #одну\r\n #колонку\r\n #и\r\n #добавляем\r\n #фичу - это\r\n #атм\r\n #или\r\n #пос\r\n #\r\n #df_all['is_atm'] = (~df_all['atm_lat'].isnull()).astype(np.int8)\r\n #df_all['is_pos'] = (~df_all['pos_lat'].isnull()).astype(np.int8)\r\n #​\r\n #df_all['add_lat'] = df_all['atm_lat'].fillna(0) + df_all['pos_lat'].fillna(0)\r\n #df_all['add_lon'] = df_all['atm_lon'].fillna(0) + df_all['pos_lon'].fillna(0)\r\n #​\r\n #df_all.drop(['atm_lat', 'atm_lon', 'pos_lat', 'pos_lon'], axis=1, inplace=True)\r\n #​\r\n #df_all = df_all[~((df_all['add_lon'] == 0) & (df_all['add_lon'] == 0))]\r\n #\r\n #% % time\r\n ## грязный хак, чтобы не учить КНН на новом юзере каждый раз\r\n #df_all['fake_customer_id'] = (pd.factorize(df_all.customer_id)[0] + 1) * 100\r\n #​\r\n #points = df_all[['fake_customer_id', 'add_lat', 'add_lon']].drop_duplicates().values\r\n #neigh = NearestNeighbors(2, radius=100000)\r\n #​\r\n ## расстояние до уникальных точек\r\n ## neigh.fit(np.unique(points, axis=1))\r\n #neigh.fit(points)\r\n #​\r\n #distances, indices = neigh.kneighbors(df_all[['fake_customer_id', 'add_lat', 'add_lon']].values)\r\n #df_all['distance_to_nearest_point'] = distances[:, 1]\r\n #del df_all['fake_customer_id']\r\n #Кластерные\r\n #признаки\r\n #Сохранены\r\n #в\r\n #df_cluster\r\n #\r\n ## фичи с кластерами из тинькова\r\n #dfs = []\r\n #customers = df_all.customer_id.unique()\r\n #np_values = df_all[['customer_id', 'add_lat', 'add_lon']].values\r\n #​\r\n #for i in tqdm(range(len(customers))):\r\n # customer = customers[i]\r\n # points = np_values[np_values[:, 0] == customer][:, 1:]\r\n # # оцениваем число кластеров\r\n # # avgs = []\r\n # # max_cluster = min(10,len(points))\r\n # # for i in range(2,max_cluster):\r\n # # kmeans = KMeans(n_clusters=i, random_state=2).fit(points)\r\n # # labels = kmeans.labels_\r\n # # silhouette_avg = silhouette_score(points, labels)\r\n # # avgs.append(silhouette_avg)\r\n #\r\n # # if max_cluster == 2:\r\n # # kmeans = KMeans(n_clusters=2, random_state=2).fit(points)\r\n # # labels = kmeans.labels_\r\n # # silhouette_avg = silhouette_score(points, labels)\r\n # # avgs.append(silhouette_avg)\r\n #\r\n # # n_cluster = avgs.index(max(avgs)) + 2 # так как индексы с 0 а кластеры с 2\r\n # # получаем лучший кластер\r\n # if np.unique(points).size == 2:\r\n # dfs.append(np.zeros((len(points), 4)))\r\n # continue\r\n # n_cluster = 2\r\n # kmeans = KMeans(n_clusters=n_cluster, random_state=2).fit(points)\r\n # # kmeans = AgglomerativeClustering(n_clusters=n_cluster,linkage='average').fit(points)\r\n # labels = kmeans.labels_\r\n # centers = kmeans.cluster_centers_\r\n # silhouette_avg = silhouette_score(points, labels)\r\n # # формируем датафрейм\r\n # sample_silhouette_values = silhouette_samples(points, labels)\r\n # # cluster_df = pd.DataFrame(data=np.vstack((labels, sample_silhouette_values)).T,columns=['label','score'])\r\n # # cluster_df.label = cluster_df.label.astype(np.int32)\r\n # # cluster_df['cluster_center_lat'] = cluster_df.apply(lambda row: centers[int(row['label'])][0], axis=1)\r\n # # cluster_df['cluster_center_lon'] = cluster_df.apply(lambda row: centers[int(row['label'])][1], axis=1)\r\n # arr_label_score = np.vstack((labels, sample_silhouette_values)).T\r\n # arr_label_score = np.hstack([arr_label_score, centers[labels]])\r\n # dfs.append(arr_label_score)\r\n #\r\n #df_cluster = pd.DataFrame(np.vstack(dfs), columns=['cl_label', 'cl_score', 'cl_lat', 'cl_lon'])\r\n #df_all.reset_index(inplace=True, drop=True)\r\n #df_all = pd.concat([df_all, df_cluster], axis=1)\r\n #\r\n #df_all.to_csv('../data/df_all_1.csv', index=None)\r\n #\r\n #df_all = pd.read_csv('../data/df_all_1.csv')\r\n #df_all[['customer_id', 'add_lat', 'add_lon', 'cl_label',\r\n # 'cl_score', 'cl_lat', 'cl_lon']].to_csv('../data/df_cluster.csv', index=None)\r\n #df_all.head()\r\n #загружаем\r\n #кластерные\r\n #признаки\r\n #\r\n #df_cluster = pd.read_csv('../data/df_cluster.csv')\r\n #df_cluster.reset_index(drop=True, inplace=True)\r\n #df_cluster.head()\r\n #\r\n #df_all.reset_index(drop=True, inplace=True)\r\n #\r\n #df_all = pd.concat([df_all, df_cluster.iloc[:, 3:]], axis=1)\r\n #Генерируем\r\n #признаки\r\n #is_home, is_work\r\n #TODO: удалить\r\n #чуваков\r\n #у\r\n #которых\r\n #несколько\r\n #домов\r\n #\r\n #lat = df_all['home_lat'] - df_all['add_lat']\r\n #lon = df_all['home_lon'] - df_all['add_lon']\r\n #​\r\n #df_all['is_home'] = (np.sqrt((lat ** 2) + (lon ** 2)) <= 0.02).astype(np.int8)\r\n #df_all['has_home'] = (~df_all['home_lon'].isnull()).astype(np.int8)\r\n #​\r\n #lat = df_all['work_lat'] - df_all['add_lat']\r\n #lon = df_all['work_lon'] - df_all['add_lon']\r\n #df_all['is_work'] = (np.sqrt((lat ** 2) + (lon ** 2)) <= 0.02).astype(np.int8)\r\n #df_all['has_work'] = (~df_all['work_lon'].isnull()).astype(np.int8)\r\n #​\r\n ## df_all.drop(['work_lat','work_lon','home_lat','home_lon'], axis=1, inplace=True)\r\n #Генерируем\r\n #категориальный\r\n #признак\r\n #для\r\n #адреса\r\n #\r\n #df_all['address'] = df_all['add_lat'].apply(lambda x: \"%.02f\" % x) + ';' + df_all['add_lon'].apply(\r\n # lambda x: \"%.02f\" % x)\r\n #df_all['address'] = df_all['address'].factorize()[0].astype(np.int32)\r\n #Генерируем\r\n #абонентские\r\n #фичи\r\n #отвечающие\r\n #за\r\n #соотношения\r\n #между\r\n #точками\r\n #\r\n #df_all = df_all.merge(df_all.groupby('customer_id')['amount'].count().reset_index(name='cid_trans_count'), how='left')\r\n #df_all['cid_trans_count'] = df_all['cid_trans_count'].astype(np.int32)\r\n #​\r\n #df_all = df_all.merge(df_all.groupby('customer_id')['amount'].agg('sum').reset_index(name='cid_trans_sum'), how='left')\r\n #df_all['cid_trans_sum'] = df_all['cid_trans_sum'].astype(np.float32)\r\n #\r\n #\r\n #def add_count_sum_ratios(df_all, col):\r\n # col_count = 'cid_{}_trans_count'.format(col)\r\n # col_sum = 'cid_{}_trans_sum'.format(col)\r\n # df_ = df_all.groupby(['customer_id', col])['amount'].count().reset_index(name=col_count)\r\n # df_all = df_all.merge(df_, how='left')\r\n # df_all[col_count] = df_all[col_count].astype(np.int32)\r\n # df_all['ratio_{}_count'.format(col)] = df_all[col_count] / df_all['cid_trans_count']\r\n #\r\n # df_ = df_all.groupby(['customer_id', col])['amount'].agg('sum').reset_index(name=col_sum)\r\n # df_all = df_all.merge(df_, how='left')\r\n # df_all[col_sum] = df_all[col_sum].astype(np.float32)\r\n # df_all['ratio_{}_sum'.format(col)] = df_all[col_sum] / df_all['cid_trans_sum']\r\n # return df_all\r\n #\r\n #\r\n #df_all = add_count_sum_ratios(df_all, 'address')\r\n #df_all = add_count_sum_ratios(df_all, 'terminal_id')\r\n #df_all = add_count_sum_ratios(df_all, 'mcc')\r\n #df_all = add_count_sum_ratios(df_all, 'is_holiday')\r\n #Мои\r\n #фичи\r\n #\r\n ## добавим признаки после групбая\r\n #df_gb = df_all[['customer_id', 'amount', 'add_lat', 'add_lon']].groupby('customer_id')\r\n #coord_stat_df = df_gb.agg(['mean', 'max', 'min'])\r\n #coord_stat_df['transactions_per_user'] = df_gb.agg('size')\r\n #coord_stat_df.columns = ['_'.join(col).strip() for col in coord_stat_df.columns.values]\r\n #coord_stat_df.reset_index(inplace=True)\r\n #df_all = pd.merge(df_all, coord_stat_df, on='customer_id', how='left')\r\n #\r\n #cols = ['add_lat', 'add_lon']\r\n #types = ['min', 'max', 'mean']\r\n #for c in cols:\r\n # for t in types:\r\n # df_all['{}_diff_{}'.format(c, t)] = np.abs(df_all[c] - df_all['{}_{}'.format(c, t)])\r\n #\r\n ## разности\r\n #df_all['lat_diff_cluster_lat'] = np.abs(df_all['add_lat'] - df_all['cl_lat'])\r\n #df_all['lon_diff_cluster_lon'] = np.abs(df_all['add_lon'] - df_all['cl_lon'])\r\n #Фичи\r\n #mcc\r\n #\r\n ## категории\r\n #df_all['mcc_str'] = df_all['mcc'].astype(str).str.rjust(4, '0')\r\n #df_mcc = pd.read_csv('../data/internal/mcc.csv')\r\n #df_mcc = df_mcc.iloc[1:, :3]\r\n #df_mcc.columns = ['mcc_str', 'mcc_cat1', 'mcc_cat2']\r\n #df_mcc.drop_duplicates(subset=['mcc_str'], inplace=True)\r\n #df_mcc['mcc_cat1'] = pd.factorize(df_mcc['mcc_cat1'])[0]\r\n #df_mcc['mcc_cat2'] = pd.factorize(df_mcc['mcc_cat2'])[0]\r\n #df_mcc.fillna('none', inplace=True)\r\n #df_all = pd.merge(df_all, df_mcc, on='mcc_str', how='left')\r\n #del df_all['mcc_str']\r\n #df_mcc.head()\r\n #\r\n ### WTF???\r\n #df_all = add_count_sum_ratios(df_all, 'mcc_cat1')\r\n #df_all = add_count_sum_ratios(df_all, 'mcc_cat2')\r\n #\r\n ## частота mcc\r\n #df_mcc = df_all['mcc'].value_counts(normalize=True).reset_index()\r\n #df_mcc.columns = ['mcc', 'mcc_freq']\r\n #df_all = pd.merge(df_all, df_mcc, on='mcc', how='left')\r\n #\r\n #df_all = pd.concat([df_all, pd.get_dummies(df_all['mcc'], prefix='mcc')], axis=1)\r\n #del df_all['mcc']\r\n #\r\n ## df_all = pd.concat([df_all, pd.get_dummies(df_all['mcc_cat1'], prefix='mcc_cat1')], axis=1)\r\n ## del df_all['mcc_cat1']\r\n #​\r\n #df_all = pd.concat([df_all, pd.get_dummies(df_all['mcc_cat2'], prefix='mcc_cat2')], axis=1)\r\n #del df_all['mcc_cat2']\r\n #\r\n ## сделаем групбай какие вообще есть mcc у посетителя. Это поможет понять его привычки\r\n #mcc_cols = [c for c in df_all.columns if 'mcc' in c and 'cat' not in c]\r\n #df_mcc = df_all.groupby('customer_id')[mcc_cols].agg(['max', 'mean'])\r\n #df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n #df_mcc.reset_index(inplace=True)\r\n #df_mcc.head()\r\n #df_all = pd.merge(df_all, df_mcc, on='customer_id', how='left')\r\n #\r\n ## сделаем групбай какие вообще есть mcc у посетителя. Это поможет понять его привычки\r\n #mcc_cols = [c for c in df_all.columns if 'mcc_cat1' in c]\r\n #df_mcc = df_all.groupby('customer_id')[mcc_cols].agg(['max', 'mean'])\r\n #df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n #df_mcc.reset_index(inplace=True)\r\n #df_mcc.head()\r\n #df_all = pd.merge(df_all, df_mcc, on='customer_id', how='left')\r\n #\r\n ## сделаем групбай какие вообще есть mcc у посетителя. Это поможет понять его привычки\r\n #mcc_cols = [c for c in df_all.columns if 'mcc_cat2' in c]\r\n #df_mcc = df_all.groupby('customer_id')[mcc_cols].agg(['max', 'mean'])\r\n #df_mcc.columns = ['_'.join(col).strip() for col in df_mcc.columns.values]\r\n #df_mcc.reset_index(inplace=True)\r\n #df_mcc.head()\r\n #df_all = pd.merge(df_all, df_mcc, on='customer_id', how='left')\r\n #LightGBM\r\n #\r\n #df_all.shape\r\n #\r\n #df_all = df_all.loc[:, ~df_all.columns.duplicated()]\r\n #\r\n #from sklearn.model_selection import train_test_split\r\n #​\r\n #ys = ['is_home', 'is_work']\r\n #drop_cols = ['atm_address', 'customer_id', 'pos_address', 'terminal_id', 'transaction_date',\r\n # 'is_home', 'has_home', 'is_work', 'has_work', 'is_train']\r\n #drop_cols += ['work_lat', 'work_lon', 'home_lat', 'home_lon']\r\n #​\r\n #drop_cols += ['pred:is_home', 'pred:is_work']\r\n #y_cols = ['is_home', 'is_work']\r\n #usecols = df_all.drop(drop_cols, 1, errors='ignore').columns\r\n #\r\n #params = {\r\n # 'objective': 'binary',\r\n # 'num_leaves': 63,\r\n # 'learning_rate': 0.01,\r\n # 'metric': 'binary_logloss',\r\n # 'feature_fraction': 0.8,\r\n # 'bagging_fraction': 0.8,\r\n # 'bagging_freq': 1,\r\n # 'num_threads': 12,\r\n # 'verbose': 0,\r\n #}\r\n #​\r\n #model = {}\r\n #\r\n #y_col = 'is_home'\r\n #​\r\n #cust_train = df_all[df_all['is_train'] == 1].groupby('customer_id')[y_col.replace('is_', 'has_')].max()\r\n #cust_train = cust_train[cust_train > 0].index\r\n #​\r\n #cust_train, cust_valid = train_test_split(cust_train, test_size=0.2, shuffle=True, random_state=111)\r\n #​\r\n #df_train = pd.DataFrame(cust_train, columns=['customer_id']).merge(df_all, how='left')\r\n #df_valid = pd.DataFrame(cust_valid, columns=['customer_id']).merge(df_all, how='left')\r\n #​\r\n #lgb_train = lgb.Dataset(df_train[usecols], df_train[y_col])\r\n #lgb_valid = lgb.Dataset(df_valid[usecols], df_valid[y_col])\r\n #​\r\n #gbm_h = lgb.train(params,\r\n # lgb_train,\r\n # valid_sets=[lgb_valid],\r\n # num_boost_round=2000,\r\n # verbose_eval=30,\r\n # early_stopping_rounds=300)\r\n #​\r\n #model[y_col] = gbm_h\r\n #\r\n #y_col = 'is_work'\r\n #​\r\n #cust_train = df_all[df_all['is_train'] == 1].groupby('customer_id')[y_col.replace('is_', 'has_')].max()\r\n #cust_train = cust_train[cust_train > 0].index\r\n #​\r\n #cust_train, cust_valid = train_test_split(cust_train, test_size=0.2, shuffle=True, random_state=111)\r\n #​\r\n #​\r\n #​\r\n #df_train = pd.DataFrame(cust_train, columns=['customer_id']).merge(df_all, how='left')\r\n #df_valid = pd.DataFrame(cust_valid, columns=['customer_id']).merge(df_all, how='left')\r\n #​\r\n #lgb_train = lgb.Dataset(df_train[usecols], df_train[y_col])\r\n #lgb_valid = lgb.Dataset(df_valid[usecols], df_valid[y_col])\r\n #​\r\n #gbm_w = lgb.train(params,\r\n # lgb_train,\r\n # valid_sets=[lgb_valid],\r\n # num_boost_round=2000,\r\n # verbose_eval=30,\r\n # early_stopping_rounds=300)\r\n #​\r\n #model[y_col] = gbm_w\r\n #\r\n #lgb.plot_importance(gbm_h, max_num_features=15)\r\n #\r\n #\r\n #def _best(x):\r\n # ret = None\r\n # for col in ys:\r\n # pred = ('pred:%s' % col)\r\n # if pred in x:\r\n # i = (x[pred].idxmax())\r\n # cols = [pred, 'add_lat', 'add_lon']\r\n # if col in x:\r\n # cols.append(col)\r\n # tmp = x.loc[i, cols]\r\n # tmp.rename({\r\n # 'add_lat': '%s:add_lat' % col,\r\n # 'add_lon': '%s:add_lon' % col,\r\n # }, inplace=True)\r\n # if ret is None:\r\n # ret = tmp\r\n # else:\r\n # ret = pd.concat([ret, tmp])\r\n # return ret\r\n #\r\n #​\r\n #​\r\n #\r\n #def predict_proba(dt, ys=['is_home', 'is_work']):\r\n # for col in ys:\r\n # pred = ('pred:%s' % col)\r\n # dt[pred] = model[col].predict(dt[usecols])\r\n # return dt.groupby('customer_id').apply(_best).reset_index()\r\n #\r\n #​\r\n #\r\n #def score(dt, ys=['is_home', 'is_work'], return_df=False):\r\n # dt_ret = predict_proba(dt, ys)\r\n # if return_df:\r\n # return dt_ret\r\n # mean = 0.0\r\n # for col in ys:\r\n # col_mean = dt_ret[col].mean()\r\n # mean += col_mean\r\n # if len(ys) == 2:\r\n # mean = mean / len(ys)\r\n # return mean\r\n #\r\n #\r\n #print(\"Train accuracy:\", score(df_train, ys=['is_home']))\r\n #print(\"Test accuracy:\", score(df_valid, ys=['is_home']))\r\n #​\r\n #print(\"Train accuracy:\", score(df_train, ys=['is_work']))\r\n #print(\"Test accuracy:\", score(df_valid, ys=['is_work']))\r\n #Анализ\r\n #False - Negative\r\n #\r\n ## сколько вообще людей имеют хорошую точку\r\n #df_all[(df_all.is_train == 1)].groupby('customer_id')['is_work'].agg('max').mean()\r\n #\r\n #df_pred = score(df_valid, ys=['is_home'], return_df=True)\r\n #\r\n #df_pred.sample(5)\r\n #\r\n #cid = 'bf66305d0ec05abb6e6a6358acb8c2a1'\r\n #cid = df_pred[df_pred.is_home == 0].sample(1)['customer_id'].values[0]\r\n #​\r\n #df_an = df_all[df_all.customer_id == cid]\r\n #center_home = df_an[['home_lat', 'home_lon']].drop_duplicates().values\r\n #center_work = df_an[['work_lat', 'work_lon']].drop_duplicates().values\r\n #​\r\n #​\r\n #predicted_home = df_pred[df_pred.customer_id == cid][['is_home:add_lat', 'is_home:add_lon']].drop_duplicates().values\r\n #predicted_work = df_pred[df_pred.customer_id == cid][['is_work:add_lat', 'is_work:add_lon']].drop_duplicates().values\r\n #​\r\n #points_pos = df_an[df_an.is_pos == 1][['add_lat', 'add_lon']].dropna().values\r\n #points_atm = df_an[df_an.is_pos == 0][['add_lat', 'add_lon']].dropna().values\r\n #print(center_home.shape, center_work.shape, points_pos.shape, points_atm.shape)\r\n #​\r\n ## синие - покупки\r\n ## красные - банкоматы\r\n #gmap = gmaps.Map()\r\n #if len(points_pos) > 0:\r\n # gmap.add_layer(gmaps.symbol_layer(points_pos, hover_text='pos',\r\n # fill_color=\"blue\", stroke_color=\"blue\", scale=3))\r\n #if len(points_atm) > 0:\r\n # gmap.add_layer(gmaps.symbol_layer(points_atm, hover_text='atm',\r\n # fill_color=\"red\", stroke_color=\"red\", scale=3))\r\n #​\r\n #if not np.isnan(center_home)[0][0]:\r\n # gmap.add_layer(gmaps.marker_layer(center_home, label='home'))\r\n #if not np.isnan(center_work)[0][0]:\r\n # gmap.add_layer(gmaps.marker_layer(center_work, label='work'))\r\n #​\r\n #gmap.add_layer(gmaps.marker_layer(predicted_home, label='predicted_home'))\r\n #gmap.add_layer(gmaps.marker_layer(predicted_work, label='predicted_work'))\r\n #\r\n #gmap\r\n #\r\n #df_an\r\n #Predict\r\n #\r\n #cust_test = df_all[df_all['is_train'] == 0]['customer_id'].unique()\r\n #df_test = pd.DataFrame(cust_test, columns=['customer_id']).merge(df_all, how='left')\r\n #df_test = predict_proba(df_test)\r\n #df_test.rename(columns={\r\n # 'customer_id': '_ID_',\r\n # 'is_home:add_lat': '_HOME_LAT_',\r\n # 'is_home:add_lon': '_HOME_LON_',\r\n # 'is_work:add_lat': '_WORK_LAT_',\r\n # 'is_work:add_lon': '_WORK_LON_'}, inplace=True)\r\n #df_test = df_test[['_ID_', '_WORK_LAT_', '_WORK_LON_', '_HOME_LAT_', '_HOME_LON_']]\r\n #​\r\n #df_test.head()\r\n #Формируем\r\n #submission - файл\r\n #\r\n ## Заполняем пропуски\r\n #df_ = pd.read_csv('../data/test_set.csv', dtype=dtypes, usecols=['customer_id'])\r\n #submission = pd.DataFrame(df_['customer_id'].unique(), columns=['_ID_'])\r\n #​\r\n #submission = submission.merge(df_test, how='left').fillna(0)\r\n ## Пишем файл submission\r\n #submission.to_csv('../submissions/base_3_529_333.csv', index=None)\r\n #\r\n #\r\n return src\r\n\r\ndef update_last_partition(dst, from_dt, to_dt):\r\n prev_day = datetime.strptime(from_dt, '%Y-%m-%d') - timedelta(days=1)\r\n res = spark.table(dst[\"d_train\"]).checkpoint()\r\n res = res.where(res.day == to_dt)\r\n res = res.withColumn(\"period_to_dt\", f.lit(prev_day)).withColumn(\"day\", f.lit(prev_day.strftime('%Y-%m-%d')))\r\n res.coalesce(8).write.format(\"orc\").insertInto(dst[\"d_train\"], overwrite=True)\r\n\r\n\r\ndef calc_03(src, dst, from_dt, to_dt):\r\n res = algo(src, from_dt, to_dt)\r\n res.coalesce(8).write.format(\"orc\").insertInto(dst[\"d_subway_entrance\"], overwrite=True)\r\n\r\n\r\ndef sandbox_src():\r\n return {\r\n \"psg_train\": spark.table(\"sandbox_mck.train\"),\r\n \"psg_test\": spark.table(\"sandbox_mck.test\"),\r\n \"psg_dev\": spark.table(\"sandbox_mck.dev\")\r\n }\r\n\r\n\r\ndef sandbox_dst():\r\n return {\r\n \"psg_result\": \"sandbox_mck.psg_result\"\r\n }\r\n\r\n\r\ndef prod_src():\r\n return {\r\n \"psg_train\": spark.table(\"prod_data.psg_train\"),\r\n \"psg_test\": spark.table(\"prod_data.psg_test\"),\r\n \"psg_dev\": spark.table(\"prod_data.psg_dev\")\r\n }\r\n\r\n\r\ndef prod_dst():\r\n return {\r\n \"psg_result\": \"prod_data.psg_result\"\r\n }\r\n\r\n\r\nif __name__ == '__main__':\r\n spark = SparkSession.builder.appName(\"calc_03_task\").enableHiveSupport().getOrCreate()\r\n spark.conf.set(\"spark.sql.sources.partitionOverwriteMode\", \"dynamic\")\r\n hivecontext = HiveContext(spark.sparkContext)\r\n hivecontext.setConf(\"hive.exec.dynamic.partition\", \"true\")\r\n hivecontext.setConf(\"hive.exec.dynamic.partition.mode\", \"nonstrict\")\r\n spark.sparkContext.setCheckpointDir(\"hdfs:///user/airflow/psg/calc_03_task\")\r\n\r\n opts = {\r\n 'from_dt': sys.argv[1],\r\n \"to_dt\": \"9999-12-31\"\r\n }\r\n\r\n update_last_partition(prod_dst(), opts[\"from_dt\"], opts[\"to_dt\"])\r\n calc_03(prod_src(), prod_dst(), opts[\"from_dt\"], opts[\"to_dt\"])\r\n\r\n", "id": "6490478", "language": "Python", "matching_score": 8.638575553894043, "max_stars_count": 0, "path": "Raif/pyspark/calc_03.py" }, { "content": "import sys\r\nfrom datetime import timedelta, datetime\r\n\r\n\r\nfrom pyspark import HiveContext\r\nfrom pyspark.sql import functions as f, SparkSession\r\n\r\n\r\ndef algo(src, from_dt, to_dt):\r\n res = steps(src, from_dt, to_dt)\r\n return res\r\n\r\n\r\ndef steps(src, from_dt, to_dt):\r\n\r\n import sys\r\n MODULES_PATH = '../code/'\r\n if MODULES_PATH not in sys.path:\r\n sys.path.append(MODULES_PATH)\r\n import mfuncs\r\n\r\n import pandas as pd\r\n import numpy as np\r\n from tqdm import tqdm\r\n tqdm.pandas()\r\n pd.options.display.max_columns = 1000\r\n\r\n import lightgbm as lgb\r\n\r\n from sklearn.neighbors import NearestNeighbors\r\n\r\n # start of step 01\r\n df_train = pd.read_csv('../data/train_set.csv')\r\n df_test = pd.read_csv('../data/test_set.csv')\r\n rnm = {\r\n 'atm_address_lat': 'atm_lat',\r\n 'atm_address_lon': 'atm_lon',\r\n 'pos_adress_lat': 'pos_lat',\r\n 'pos_adress_lon': 'pos_lon',\r\n 'home_add_lat': 'home_lat',\r\n 'home_add_lon': 'home_lon',\r\n 'work_add_lat': 'work_lat',\r\n 'work_add_lon': 'work_lon',\r\n }\r\n df_train.rename(columns=rnm, inplace=True)\r\n df_test.rename(columns=rnm, inplace=True)\r\n\r\n\r\n # start of step 02\r\n df_train['target_work'] = df_train.progress_apply(mfuncs.add_poswork_target, axis=1)\r\n df_train['target_home'] = df_train.progress_apply(mfuncs.add_poshome_target, axis=1)\r\n\r\n\r\n # start of step 03\r\n df_train.to_csv('../data/train_1.csv', index=None)\r\n\r\n # start of step 04\r\n df_train.info()\r\n\r\n # start of step 05\r\n df_train.head()\r\n\r\n # start of step 06\r\n df_train.country.value_counts(normalize=True)[:10]\r\n print(df_train.shape, df_test.shape)\r\n df_train = df_train[df_train.country.isin(['RUS', 'RU'])]\r\n df_test = df_test[df_test.country.isin(['RUS', 'RU'])]\r\n print(df_train.shape, df_test.shape)\r\n del df_train['country'], df_test['country']\r\n\r\n # start of step 07\r\n print(df_train.shape, df_train.currency.value_counts(normalize=True))\r\n df_train = df_train[df_train.currency == 643]\r\n print(df_train.shape)\r\n del df_train['currency']\r\n\r\n # start of step 08\r\n print(df_train.shape, df_train.currency.value_counts(normalize=True))\r\n df_train = df_train[df_train.currency == 643]\r\n print(df_train.shape)\r\n del df_train['currency']\r\n\r\n # start of step 09\r\n print(df_train.shape)\r\n gb = df_train.groupby('customer_id')['work_lat'].agg('nunique')\r\n cid_incorrect = gb[gb == 2].index\r\n df_train = df_train[~df_train.customer_id.isin(cid_incorrect.values)]\r\n print(df_train.shape)\r\n gb = df_train.groupby('customer_id')['home_lat'].agg('nunique')\r\n cid_incorrect = gb[gb == 2].index\r\n df_train = df_train[~df_train.customer_id.isin(cid_incorrect.values)]\r\n print(df_train.shape)\r\n\r\n # start of step 10\r\n print(df_train.shape)\r\n df_train = df_train[df_train[['atm_lat', 'pos_lat']].isnull().sum(axis=1) == 1]\r\n print(df_train.shape)\r\n df_train['type'] = 'atm'\r\n df_train.loc[~df_train['pos_lat'].isnull(), 'type'] = 'pos'\r\n df_train['type'].value_counts()\r\n\r\n # start of step 11\r\n cid = df_train.sample(1)['customer_id'].values[0]\r\n df_an = df_train[df_train.customer_id == cid]\r\n df_point_dup = df_an.groupby(['pos_lat', 'pos_lon']).agg('size').reset_index()\r\n df_point_dup.columns = ['pos_lat', 'pos_lon', 'pos_customer_freq']\r\n df_an = pd.merge(df_an, df_point_dup, on=['pos_lat', 'pos_lon'], how='left')\r\n\r\n df_an.head()\r\n\r\n # start of step 12\r\n df_train.head()\r\n df_train[df_train.type == 'pos'].drop_duplicates(['pos_lat',\r\n 'pos_lon']).groupby(['terminal_id']).agg('size').value_counts()\r\n df_train[df_train.type == 'atm'].drop_duplicates(['atm_lat',\r\n 'atm_lon']).groupby(['terminal_id']).agg('size').value_counts()\r\n df_train[df_train.terminal_id == '1e15d02895068c3a864432f0c06f5ece']['atm_address'].unique()\r\n df_train[df_train.type == 'atm'].drop_duplicates(['atm_lat',\r\n 'atm_lon']).groupby(['terminal_id']).agg('size')\r\n\r\n import gmaps\r\n API_KEY = '<KEY>'\r\n gmaps.configure(api_key=API_KEY) # Your Google API key\r\n\r\n cid = '0dc0137d280a2a82d2dc89282450ff1b'\r\n cid = df_train.sample(1)['customer_id'].values[0]\r\n df_an = df_train[df_train.customer_id == cid]\r\n center_home = df_an[['home_lat', 'home_lon']].drop_duplicates().values\r\n center_work = df_an[['work_lat', 'work_lon']].drop_duplicates().values\r\n points_pos = df_an[['pos_lat', 'pos_lon']].dropna().values\r\n points_atm = df_an[['atm_lat', 'atm_lon']].dropna().values\r\n print(center_home.shape, center_work.shape, points_pos.shape, points_atm.shape)\r\n\r\n gmap = gmaps.Map()\r\n if len(points_pos) > 0:\r\n gmap.add_layer(gmaps.symbol_layer(points_pos, hover_text='pos',\r\n fill_color=\"blue\", stroke_color=\"blue\", scale=3))\r\n if len(points_atm) > 0:\r\n gmap.add_layer(gmaps.symbol_layer(points_atm, hover_text='atm',\r\n fill_color=\"red\", stroke_color=\"red\", scale=3))\r\n\r\n if not np.isnan(center_home)[0][0]:\r\n gmap.add_layer(gmaps.marker_layer(center_home, label='home'))\r\n if not np.isnan(center_work)[0][0]:\r\n gmap.add_layer(gmaps.marker_layer(center_work, label='work'))\r\n\r\n gmap\r\n\r\n center_home = df_train[['home_lat', 'home_lon']].dropna().values\r\n center_work = df_train[['work_lat', 'work_lon']].dropna().values\r\n\r\n gmap = gmaps.Map()\r\n gmap.add_layer(gmaps.symbol_layer(center_home, fill_color=\"red\", stroke_color=\"red\"))\r\n gmap\r\n\r\n np.isnan(center_home)\r\n\r\n df_train.groupby(['customer_id']).agg('size').sort_values().value_counts()\r\n\r\n df_test.customer_id.drop_duplicates().isin(df_train.customer_id.unique()).mean()\r\n\r\n df_train['duplicated'] = df_train.duplicated()\r\n\r\n df_pos = df_train[df_train['type'] == 'pos']\r\n # target == pos in\r\n df_pos['target_work'] = df_pos.progress_apply(mfuncs.add_poswork_target, axis=1)\r\n df_pos['target_home'] = df_pos.progress_apply(mfuncs.add_poshome_target, axis=1)\r\n\r\n df_pos['target_work'].mean(), df_pos['target_home'].mean()\r\n\r\n df_pos.to_csv('../data/df_pos.csv', index=None)\r\n\r\n df_pos = pd.read_csv('../data/df_pos.csv')\r\n\r\n df_point_dup = df_pos.groupby(['customer_id', 'pos_lat', 'pos_lon']).agg('size').reset_index()\r\n df_point_dup.columns = ['customer_id', 'pos_lat', 'pos_lon', 'pos_customer_freq']\r\n df_pos = pd.merge(df_pos, df_point_dup, on=['customer_id', 'pos_lat', 'pos_lon'], how='left')\r\n\r\n dfs = []\r\n for cid in tqdm(df_pos.customer_id.unique()):\r\n df_an = df_pos[df_pos.customer_id == cid]\r\n df_an = mfuncs.add_dist_to_neighbours(df_an)\r\n dfs.append(df_an)\r\n\r\n df_pos['transaction_date'] = pd.to_datetime(df_pos['transaction_date'], format='%Y-%m-%d')\r\n df_pos['month'] = df_pos.transaction_date.dt.month\r\n df_pos['day'] = df_pos.transaction_date.dt.day\r\n df_pos['dayofyear'] = df_pos.transaction_date.dt.dayofyear\r\n df_pos['dayofweek'] = df_pos.transaction_date.dt.dayofweek\r\n df_pos.transaction_date.dtype\r\n\r\n df_gb = df_pos.groupby('customer_id')\r\n coord_stat_df = df_gb[['amount', 'pos_lat', 'pos_lon']].agg(['mean', 'max', 'min'])\r\n coord_stat_df['transactions_per_user'] = df_gb.agg('size')\r\n coord_stat_df.columns = ['_'.join(col).strip() for col in coord_stat_df.columns.values]\r\n coord_stat_df.reset_index(inplace=True)\r\n df_pos = pd.merge(df_pos, coord_stat_df, on='customer_id', how='left')\r\n\r\n cols = ['pos_lat', 'pos_lon']\r\n types = ['min', 'max', 'mean']\r\n for c in cols:\r\n for t in types:\r\n df_pos['{}_diff_{}'.format(c, t)] = np.abs(df_pos[c] - df_pos['{}_{}'.format(c, t)])\r\n\r\n df_pos = pd.concat([df_pos, pd.get_dummies(df_pos['mcc'], prefix='mcc')], axis=1)\r\n del df_pos['mcc']\r\n\r\n df_pos.head()\r\n\r\n drop_cols = ['customer_id', 'terminal_id', 'target_home', 'target_work', 'atm_address',\r\n 'pos_address', 'work_add_lat', 'work_add_lon', 'home_add_lat', 'home_add_lon',\r\n 'city', 'type', 'transaction_date']\r\n drop_cols += ['atm_address', 'atm_address_lat', 'atm_address_lon']\r\n df_pos.drop(drop_cols, 1, errors='ignore').head()\r\n # drop_cols = ['pos_address', 'pos_address_lat', 'pos_address_lon']\r\n\r\n from sklearn.model_selection import train_test_split, StratifiedKFold, KFold\r\n df_pos_id = df_pos.customer_id.drop_duplicates().reset_index(drop=True)\r\n skf_id = list(KFold(n_splits=5, shuffle=True, random_state=15).split(df_pos_id))\r\n skf = []\r\n for train_ind, test_ind in skf_id:\r\n train_ind_ = df_pos[df_pos.customer_id.isin(df_pos_id.loc[train_ind].values)].index.values\r\n test_ind_ = df_pos[df_pos.customer_id.isin(df_pos_id.loc[test_ind].values)].index.values\r\n skf.append([train_ind_, test_ind_])\r\n\r\n df_pos['target_work'].mean()\r\n\r\n df_pos.head()\r\n\r\n cid = '442fd7e3af4d8c3acd7807aa65bb5e85'\r\n df_an = df_pos[df_pos.customer_id == cid]\r\n\r\n df_an = mfuncs.add_dist_to_neighbours(df_an)\r\n\r\n df_pos.customer_id.unique\r\n\r\n if np.array([1]).size:\r\n print(1)\r\n\r\n lgb_train = lgb.Dataset(df_pos.drop(drop_cols, 1, errors='ignore'), df_pos['target_home'])\r\n\r\n params = {\r\n 'objective': 'binary',\r\n 'num_leaves': 511,\r\n 'learning_rate': 0.05,\r\n # 'metric' : 'error',\r\n 'feature_fraction': 0.8,\r\n 'bagging_fraction': 0.8,\r\n 'bagging_freq': 1,\r\n 'num_threads': 12,\r\n 'verbose': 0,\r\n }\r\n\r\n gbm = lgb.cv(params,\r\n lgb_train,\r\n num_boost_round=2000,\r\n folds=skf,\r\n verbose_eval=10,\r\n early_stopping_rounds=500)\r\n\r\n df_pos.loc[i2].shape\r\n\r\n i1, i2 = skf[0]\r\n df_pos[df_pos.loc[i1]]['customer_id'].unique\r\n\r\n df_pos.loc[i1]\r\n\r\n df_pos.dtypes\r\n\r\n res = df_pos\r\n return res\r\n\r\ndef update_last_partition(dst, from_dt, to_dt):\r\n prev_day = datetime.strptime(from_dt, '%Y-%m-%d') - timedelta(days=1)\r\n res = spark.table(dst[\"d_train\"]).checkpoint()\r\n res = res.where(res.day == to_dt)\r\n res = res.withColumn(\"period_to_dt\", f.lit(prev_day)).withColumn(\"day\", f.lit(prev_day.strftime('%Y-%m-%d')))\r\n res.coalesce(8).write.format(\"orc\").insertInto(dst[\"d_train\"], overwrite=True)\r\n\r\n\r\ndef calc_06(src, dst, from_dt, to_dt):\r\n res = algo(src, from_dt, to_dt)\r\n res.coalesce(8).write.format(\"orc\").insertInto(dst[\"d_subway_entrance\"], overwrite=True)\r\n\r\n\r\ndef sandbox_src():\r\n return {\r\n \"psg_train\": spark.table(\"sandbox_mck.train\"),\r\n \"psg_test\": spark.table(\"sandbox_mck.test\"),\r\n \"psg_dev\": spark.table(\"sandbox_mck.dev\")\r\n }\r\n\r\n\r\ndef sandbox_dst():\r\n return {\r\n \"psg_result\": \"sandbox_mck.psg_result\"\r\n }\r\n\r\n\r\ndef prod_src():\r\n return {\r\n \"psg_train\": spark.table(\"prod_data.psg_train\"),\r\n \"psg_test\": spark.table(\"prod_data.psg_test\"),\r\n \"psg_dev\": spark.table(\"prod_data.psg_dev\")\r\n }\r\n\r\n\r\ndef prod_dst():\r\n return {\r\n \"psg_result\": \"prod_data.psg_result\"\r\n }\r\n\r\n\r\nif __name__ == '__main__':\r\n spark = SparkSession.builder.appName(\"calc_06_task\").enableHiveSupport().getOrCreate()\r\n spark.conf.set(\"spark.sql.sources.partitionOverwriteMode\", \"dynamic\")\r\n hivecontext = HiveContext(spark.sparkContext)\r\n hivecontext.setConf(\"hive.exec.dynamic.partition\", \"true\")\r\n hivecontext.setConf(\"hive.exec.dynamic.partition.mode\", \"nonstrict\")\r\n spark.sparkContext.setCheckpointDir(\"hdfs:///user/airflow/psg/calc_06_task\")\r\n\r\n opts = {\r\n 'from_dt': sys.argv[1],\r\n \"to_dt\": \"9999-12-31\"\r\n }\r\n\r\n update_last_partition(prod_dst(), opts[\"from_dt\"], opts[\"to_dt\"])\r\n calc_06(prod_src(), prod_dst(), opts[\"from_dt\"], opts[\"to_dt\"])\r\n\r\n", "id": "2057979", "language": "Python", "matching_score": 4.157724380493164, "max_stars_count": 0, "path": "Raif/pyspark/calc_06.py" }, { "content": "import numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\ntqdm.pandas()\nfrom numpy.linalg import norm\nfrom sklearn.neighbors import NearestNeighbors\n\n \ndef rgb(value):\n minimum, maximum = float(0), float(255)\n ratio = 2 * (value-minimum) / (maximum - minimum)\n b = int(max(0, 255*(1 - ratio)))\n r = int(max(0, 255*(ratio - 1)))\n g = 255 - b - r\n return (r, g, b)\n\ndef dist_latlon(lat1, lon1, lat2, lon2):\n return norm([lat1 - lat2, lon1 - lon2])\n\ndef add_poswork_target(x):\n lat1, lon1, lat2, lon2 = x[['pos_lat', 'pos_lon', 'work_lat', 'work_lon']]\n d = dist_latlon(lat1, lon1, lat2, lon2)\n return int(d < 0.02)\n\ndef add_poshome_target(x):\n lat1, lon1, lat2, lon2 = x[['pos_lat', 'pos_lon', 'home_lat', 'home_lon']]\n d = dist_latlon(lat1, lon1, lat2, lon2)\n return int(d < 0.02)\n\ndef add_dist_to_neighbours(df):\n\tdf_point_dup = df.groupby(['pos_lat', 'pos_lon']).agg('size').reset_index()\n\tdf_point_dup.columns = ['pos_lat', 'pos_lon', 'pos_customer_freq']\n\tdf = pd.merge(df, df_point_dup, on=['pos_lat', 'pos_lon'], how='left')\n\n\t# расстояния до двух ближайщих соседей\n\tpoints_pos = df[['pos_lat', 'pos_lon']].dropna().values\n\tif points_pos.size:\n\t\tneigh_pos = NearestNeighbors(2)\n\t\tneigh_pos.fit(np.unique(points_pos, axis=1)) \n\telse:\n\t\tneigh_pos = None\n\tdf_ = df.apply(lambda x: nearest_dist(x, neigh_pos, 'pos', 'pos2pos'), axis=1)\n\tdf = pd.concat([df, df_], axis=1)\n\tdf_ = df.apply(lambda x: nearest_dist(x, neigh_pos, 'atm', 'atm2pos'), axis=1)\n\tdf = pd.concat([df, df_], axis=1)\n\n\tpoints_atm = df[['atm_lat', 'atm_lon']].dropna().values\n\tif points_atm.size:\n\t\tneigh_atm = NearestNeighbors(2)\n\t\tneigh_atm.fit(np.unique(points_atm, axis=1)) \n\telse:\n\t\tneigh_atm = None\n\tdf_ = df.apply(lambda x: nearest_dist(x, neigh_atm, 'pos', 'pos2atm'), axis=1)\n\tdf = pd.concat([df, df_], axis=1)\n\tdf_ = df.apply(lambda x: nearest_dist(x, neigh_atm, 'atm', 'pos2atm'), axis=1)\n\tdf = pd.concat([df, df_], axis=1)\n\n\t# neigh_all = NearestNeighbors(2)\n\t# neigh_all.fit(np.unique(np.unique(np.vstack([points_pos, points_atm]), axis=1), axis=1))\n\n\n\treturn df\n\n\ndef nearest_dist(row, neigh, ttype, prefix):\n lat, lon = row[['{}_lat'.format(ttype), '{}_lon'.format(ttype)]].values\n if np.any(np.isnan([lat, lon])):\n distances = [-1, -1]\n elif not neigh:\n \tdistances = [-1, -1]\n else:\n distances, indices = neigh.kneighbors([[lat, lon]])\n return pd.Series(data=distances[0], index=['{}_1'.format(prefix), '{}_2'.format(prefix)])\n\ndef check_submit(path_to_csv):\n \"\"\"\n Dummy checking of submission\n \n :param path_to_csv: path to your submission file\n \"\"\"\n df = pd.read_csv(path_to_csv)\n assert df.shape == (9997, 5), u'Мало или много строк'\n # несмотря на то, что названия не имеют особого значения, правильный порядк колонок позволит не запутаться в широте-долготе\n assert list(df.columns) == ['_ID_', '_WORK_LAT_', '_WORK_LON_', '_HOME_LAT_', '_HOME_LON_'], u'Неверные названия столбцов'\n assert np.any(df['_ID_'].duplicated()) == False, u'Одному клиенту соответствует больше одной записи'\n for col_name in df.columns:\n if col_name != '_ID_':\n assert df[col_name].dtype in (np.float, np.int), u'В колонке {col_name} есть NULL'.format(col_name=col_name)\n assert df[col_name].isnull().sum() == 0, u'В колонке {col_name} есть NULL'.format(col_name=col_name)", "id": "11673595", "language": "Python", "matching_score": 0.6328924298286438, "max_stars_count": 0, "path": "Raif/code/mfuncs.py" }, { "content": "# #!/usr/bin/python\n# #\n# # HiPy - A Simple Python framework for Apache Hive\n# #\n# # Copyright 2011 Netflix, Inc.\n# #\n# # Licensed under the Apache License, Version 2.0 (the \"License\");\n# # you may not use this file except in compliance with the License.\n# # You may obtain a copy of the License at\n# #\n# # http://www.apache.org/licenses/LICENSE-2.0\n# #\n# # Unless required by applicable law or agreed to in writing, software\n# # distributed under the License is distributed on an \"AS IS\" BASIS,\n# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# # See the License for the specific language governing permissions and\n# # limitations under the License.\n# #\n# #\n# # See http://code.google.com/a/apache-extras.org/p/hipy/ for documentation\n# #\n# # Version 1.1\n# #\n# \n# from __future__ import with_statement # Not needed for Python 2.6+\n# \n# import sys, os, inspect, struct, zlib, datetime, shutil, subprocess, glob, tempfile, shutil, fileinput, fnmatch\n# from copy import copy\n# import simplejson as json\n# import pickle\n# from optparse import OptionParser\n# \n# HiPyPath = __file__\n# \n# #########################################\n# # Utility functions \n# #########################################\n# \n# def PopIfList( tuple ) :\n# return tuple[0] if ( len( tuple ) == 1 and isinstance( tuple[0], list ) ) else tuple\n# \n# def Bracket( string ) :\n# if string[0] == '(' and string[len(string)-1] == ')' :\n# b = 0\n# n = 0\n# for i in range(0,len(string)) :\n# c=string[i]\n# if c == '(' : b += 1\n# if c == ')' : b -= 1\n# if b <= 0 : n += 1\n# \n# if n == 1 :\n# return string\n# \n# return '(' + string + ')'\n# \n# # Stringify an object, ensuring there are quotes if it is in fact a string \n# def QuotedString( arg ) :\n# if not isinstance( arg, str ) :\n# return str( arg )\n# \n# if len( arg ) > 1 :\n# first = arg[0]\n# last = arg[len(arg)-1]\n# if ( first == last ) and ( first == '\"' or first == \"'\" ) :\n# arg = arg[1:len(arg)-1]\n# return \"'\" + arg + \"'\"\n# \n# #########################################\n# # Exceptions \n# #########################################\n# \n# class HiPyException(Exception) :\n# def __init__( self, args ) :\n# Exception.__init__( self, args )\n# \n# #########################################\n# # Hive Types \n# #########################################\n# \n# class HiveType :\n# def __init__( self, name, alt = None, var = None ) :\n# self.name = name\n# self.alt = name if alt is None else alt\n# self.var = var\n# \n# def __str__( self ) :\n# return self.name\n# \n# def __repr__( self ) :\n# return 'HiPy.'+self.var if self.var else object.__repr__( self )\n# \n# def __eq__( self, other ) :\n# return self.var == other.var\n# \n# def __ne__( self, other ) :\n# return not ( self == other )\n# \n# HiveTinyInt = HiveType( \"TINYINT\", var='HiveTinyInt' )\n# HiveSmallInt = HiveType( \"SMALLINT\", var='HiveSmallInt' )\n# HiveInt = HiveType( \"INT\", var='HiveInt' )\n# HiveBigInt = HiveType( \"BIGINT\", var='HiveBigInt' )\n# HiveBoolean = HiveType( \"BOOLEAN\", var='HiveBoolean' )\n# HiveFloat = HiveType( \"FLOAT\", var='HiveFloat' )\n# HiveDouble = HiveType( \"DOUBLE\", var='HiveDouble' )\n# HiveString = HiveType( \"STRING\", var='HiveString' )\n# HiveJson = HiveType( \"STRING\", \"JSON\", var='HiveJson' )\n# \n# class HiveArray( HiveType ) :\n# def __init__( self, datatype ) :\n# HiveType.__init__( self, \"ARRAY<\" + datatype.name + \">\" )\n# self.datatype = datatype\n# \n# def __repr__( self ) :\n# return 'HiPy.HiveArray( ' + repr( self.datatype ) + ' )'\n# \n# def __eq__( self, other ) :\n# return isinstance( other, HiveArray ) and ( self.datatype == other.datatype )\n# \n# class HiveMap( HiveType ) :\n# def __init__( self, keytype, datatype ) :\n# HiveType.__init__( self, \"MAP<\" + str( keytype )+ \",\" + str( datatype ) + \">\" )\n# self.keytype = keytype\n# self.datatype = datatype\n# \n# def __repr__( self ) :\n# return 'HiPy.HiveMap( ' + repr( self.keytype ) + ', ' + repr( self.datatype ) + ' )'\n# \n# def __eq__( self, other ) :\n# return isinstance( other, HiveMap ) and self.keytype == other.keytype and self.datatype == other.datatype\n# \n# class HiveStruct( HiveType ) :\n# def __init__( self, schema ) :\n# HiveType.__init__( self, \"STRUCT<\" + \",\".join( [ key + ':' + str( type ) for ( key, type ) in schema ] ) + \">\" )\n# self.schema = schema\n# \n# def __repr__( self ) :\n# return 'HiPy.HiveStruct( ' + repr( self.schema ) + ' )'\n# # return '[' + ','.join( [ '(\"' + key + '\",' + repr( type )+ ')' for ( key, type ) in schema ] ) + ']'\n# \n# def __eq__( self, other ) :\n# return isinstance( other, HiveStruct ) and self.schema == other.schema\n# \n# \n# HiveBasicTypes = { 'TINYINT' : HiveTinyInt,\n# 'SMALLINT' : HiveSmallInt,\n# 'INT' : HiveInt,\n# 'BIGINT' : HiveBigInt,\n# 'BOOLEAN' : HiveBoolean,\n# 'FLOAT' : HiveFloat,\n# 'DOUBLE' : HiveDouble,\n# 'STRING' : HiveString,\n# 'JSON' : HiveJson }\n# \n# def HiveTypeFromString( string ) :\n# string = string.strip()\n# ustring = string.upper()\n# if ustring in HiveBasicTypes :\n# return HiveBasicTypes[ ustring ]\n# \n# if ustring.startswith( \"ARRAY<\" ) and ustring.endswith( \">\" ) :\n# return HiveArray( HiveTypeFromString( string[ 6 : len( string ) - 1 ] ) )\n# \n# if ustring.startswith( \"MAP<\" ) and ustring.endswith( \">\" ) :\n# types = string[ 4 : len( string ) - 1 ].split( ',' )\n# return HiveMap( HiveTypeFromString( types[0] ), HiveTypeFromString( types[1] ) )\n# \n# if ustring.startswith( \"STRUCT<\" ) and ustring.endswith( \">\" ) :\n# elements = string[ 7 : len( string ) - 1 ].split( ',' )\n# schema = [ ]\n# for element in elements :\n# ( key, type ) = element.split( ':' )\n# schema.append( ( key, HiveTypeFromString( type ) ) )\n# return HiveStruct( schema )\n# \n# raise HiPyException( \"Unrecognised Hive type: \" + string )\n# \n# def SchemaToSql( schema ) :\n# return \"( \" + \", \".join( [ col.name + ' ' + str( col.type ) for col in schema ] ) + \" ) \"\n# \n# def SchemaToPython( schema ) :\n# return [ ( col.name, col.type ) for col in schema ]\n# \n# def SchemaFromDescribe( describe ) :\n# schema = [ ]\n# for line in describe :\n# elements = line.split( '\\t' )\n# key = elements[ 0 ]\n# type = elements[ 1 ]\n# schema.append( ( key, HiveTypeFromString( type ) ) )\n# return schema\n# \n# # Python representation of Hive types:\n# # String gives basic type name\n# # 1-tuple indicates Hive array\n# # 2-tuple indicates Hive map\n# # List is list of 2-tuples giving hive struct\n# \n# #def PythonToHiveType( python ) :\n# # if isinstance( python, str ) :\n# # return HiveBasicTypes[ python ]\n# # elif isinstance( python, tuple ) and len( python ) == 1 :\n# # return HiveArray( PythonToHiveType( python[0] ) )\n# # elif isinstance( python, tuple ) :\n# # return HiveMap( PythonToHiveType( python[0] ), PythonToHiveType( python[1] ) )\n# # elif isinstance( python, array ) :\n# # return HiveStruct( [ ( key, PythonToHiveType( type ) ) for ( key, type ) in python ] )\n# \n# def HiveTypeFromExpression( obj ) :\n# if isinstance( obj, int ) : return HiveInt\n# elif isinstance( obj, long ) : return HiveBigInt\n# elif isinstance( obj, float ) : return HiveDouble\n# elif isinstance( obj, bool ) : return HiveBoolean\n# elif isinstance( obj, str ) : return HiveString\n# \n# return obj.type\n# \n# #def PythonToSchema( pythonSchema ) : \n# # return [ Column( name, PythonToHiveType( type ) ) for ( name, type ) in pythonSchema ]\n# \n# #########################################\n# # Columns\n# #\n# # A Column represents a data element with a type and optionally, a table to which it belongs and a name in that table\n# # XXX What does it mean to have a name and no table ? The name is a convenient hint for when its added to a table\n# #\n# # A Column can be\n# # - a source column in a Hive data table - uses the Column base type and has name and table defined\n# # - an entry in a Select, referring to a column in another table - uses the As subclass to carry the reference to the original column\n# # - an expression combining multiple other Columns and other information. There are several kinds\n# # - a Cast expression, changing the type of a column - uses the Cast subclass\n# # - an Arithmetic expression - currently converted to a string when first encountered (FIXME)\n# # - a Function expression - uses the Function subclass\n# # - a map expression - uses the MapEntry subclass\n# #\n# # Columns can appear in the following places\n# # - In SELECT lists as \"<full definition> AS <local name>\"\n# # - In WHERE clauses as \"<full definition>\"\n# # - In SORT BY, DISTRIBUTE BY, CLUSTER BY as \"<qualified name>\"\n# # - In GROUP BY as \"<qualified name>\"\n# # - In ON clauses as \"<qualified name>\"\n# # - In a schema as \"<local name> <type>\"\n# #\n# # <local name> refers to the name of the column in the current Select\n# # <qualified name> equals the <local name> when the column is in the current Select, or <table>.<local name> otherwise\n# # <full definition> is the full definition of the column\n# #\n# # str( column ) is shorthand for the full definition\n# #########################################\n# \n# class Column :\n# def __init__( self, name, type ) :\n# self.table = None\n# self.name = name\n# self.type = type\n# \n# # Returns the reference to the column as it appears in SORT BY, DISTRIBUTE BY, CLUSTER BY and other SELECTs\n# def __str__( self ) :\n# return self.Expression()\n# \n# # Returns the (qualified) name of the column as it should appear in WHERE, SORT BY, DISTRIBUTE BY, CLUSTER BY and other SELECTs\n# def QualifiedName( self ) :\n# if not self.table or not self.table.name or self.table.inscope :\n# return self.name\n# return self.table.name + \".\" + self.name\n# \n# # Returns the expression for the column as it appears in WHERE and GROUP BY\n# def Expression( self ) :\n# return self.QualifiedName()\n# \n# def Table( self ) :\n# return self.table\n# \n# def __getitem__( self, key ) :\n# if isinstance( self.type, HiveMap ) and self.type.keytype == HiveString :\n# return MapEntry( self.type.datatype, self, key )\n# elif isinstance( self.type, HiveArray ) :\n# return ArrayEntry( self.type.datatype, self, key )\n# \n# raise HiPyException( self.name + \" is not a Hive Map type with string keys or Hive Array.\" )\n# \n# \n# def __lt__( self, other ) : return Operator( '<', HiveBoolean, self, other )\n# def __le__( self, other ) : return Operator( '<=', HiveBoolean, self, other )\n# def __eq__( self, other ) : return Operator( '=', HiveBoolean, self, other ) if ( other != None ) else UnaryOperator( ' is null', HiveBoolean, self, True )\n# def __ne__( self, other ) : return Operator( '!=', HiveBoolean, self, other ) if ( other != None ) else UnaryOperator( ' is not null', HiveBoolean, self, True )\n# def __gt__( self, other ) : return Operator( '>', HiveBoolean, self, other )\n# def __ge__( self, other ) : return Operator( '>=', HiveBoolean, self, other )\n# \n# def __add__( self, other ) : return Operator( '+', self.type, self, other )\n# def __sub__( self, other ) : return Operator( '-', self.type, self, other ) \n# def __mul__( self, other ) : return Operator( '*', self.type, self, other )\n# def __div__( self, other ) : return Operator( '/', self.type, self, other )\n# def __mod__( self, other ) : return Operator( '%', self.type, self, other )\n# \n# def __and__( self, other ) : return Condition( 'AND', self, other )\n# def __rand_( self, other ) : return Condition( 'AND', other, self )\n# def __or__( self, other ) : return Condition( 'OR', self, other )\n# def __ror__( self, other ) : return Condition( 'OR', other, self )\n# \n# def __invert__( self ) : return Not( self )\n# \n# # Convert a schema ( list of ( name, type ) pairs ) to a list of Column objects \n# def Columns( *schema ) :\n# schema = PopIfList( schema )\n# if ( len( schema ) > 0 ) :\n# if not isinstance( schema[0], Column ) : schema = [ Column( name, type ) for ( name, type ) in schema ]\n# return schema\n# \n# class UnaryOperator(Column) :\n# def __init__( self, op, type, arg, postfix = False ) :\n# Column.__init__( self, None, type )\n# self.op = op\n# self.arg = arg\n# self.postfix = postfix\n# \n# def Table( self ) :\n# return self.arg.Table() if isinstance( self.arg, Column ) else None\n# \n# def Expression( self ) :\n# if self.postfix :\n# return '(' + str( self.arg ) + self.op + ')'\n# else :\n# return '(' + self.op + str( self.arg ) + ')'\n# \n# def Not( arg ) :\n# return UnaryOperator( ' NOT ', HiveBoolean, arg )\n# \n# class Operator(Column) :\n# def __init__( self, op, type, lhs, rhs ) :\n# Column.__init__( self, None, type )\n# self.op = op\n# self.lhs = lhs\n# self.rhs = rhs\n# \n# def Table( self ) :\n# if isinstance( self.lhs, Column ) :\n# if isinstance( self.rhs, Column ) :\n# if self.lhs.Table() == self.rhs.Table() :\n# return self.lhs.Table()\n# else :\n# return False\n# else :\n# return self.lhs.Table()\n# else :\n# if isinstance( self.rhs, Column ) :\n# return self.rhs.Table()\n# else :\n# return None\n# \n# def Expression( self ) :\n# return '(' + QuotedString( self.lhs ) + self.op + QuotedString( self.rhs ) + ')'\n# \n# def Like( lhs, rhs ) :\n# return Operator( \" LIKE \", HiveBoolean, lhs, rhs )\n# \n# def Rlike( lhs, rhs ) :\n# return Operator( \" RLIKE \", HiveBoolean, lhs, rhs )\n# \n# def Any( iterable ) :\n# return reduce( lambda a, b: a | b, iterable )\n# \n# def All( iterable ) :\n# return reduce( lambda a, b : a & b, iterable )\n# \n# class Condition() :\n# def __init__( self, condition, lhs, rhs ) :\n# self.condition = condition\n# self.lhs = lhs\n# self.rhs = rhs\n# \n# def __str__( self ) :\n# return self.Expression()\n# \n# def Expression( self ) :\n# return '(' + str( self.lhs ) + ' ' + self.condition + ' ' + str( self.rhs ) + ')'\n# \n# class ArrayEntry(Column) :\n# def __init__( self, type, array, index, name = None ) :\n# Column.__init__( self, name if name else (array.name+str( index )), type )\n# self.array = array\n# self.index = index\n# \n# def Table( self ) :\n# return self.array.Table()\n# \n# def Expression( self ) :\n# return str( self.array ) + \"[\" + str( self.index ) + \"]\"\n# \n# class MapEntry(Column) :\n# def __init__( self, type, map, key, name = None ) :\n# Column.__init__( self, name if name else key, type )\n# self.map = map\n# self.key = key\n# \n# def Table( self ) :\n# return self.map.Table()\n# \n# def Expression( self ) :\n# return str( self.map ) + \"['\" + str( self.key ) + \"']\"\n# \n# \n# #########################################\n# # Hive functions\n# #########################################\n# \n# class As(Column) :\n# def __init__( self, column, alias ) :\n# Column.__init__( self, alias, column.type )\n# self.original = column\n# \n# # This function is used only in SELECT and TRANSFORM for the definition of a column\n# def Define( self, alias = True ) :\n# expression = self.original.QualifiedName() if self.original.table else self.original.Expression()\n# if ( not alias ) or ( self.original.table and self.original.name == self.name ) or ( isinstance( self.original, MapEntry ) and self.original.key == self.name ) :\n# return expression\n# return expression + ' AS ' + self.name\n# \n# def Expression( self ) :\n# if self.table :\n# return self.QualifiedName()\n# if self.original.table :\n# return self.original.QualifiedName()\n# return self.original.Expression()\n# \n# class Function(Column) :\n# def __init__( self, type, function, name, *parameters ) :\n# Column.__init__( self, name, type )\n# self.function = function\n# self.parameters = PopIfList( parameters )\n# \n# def Table( self ) :\n# tables = frozenset( [ parameter.Table() for parameter in self.parameters if isinstance( parameter, Column ) ] )\n# if len( tables ) == 1 :\n# [ table ] = tables\n# return table\n# return None\n# \n# def Expression( self ) :\n# sql = self.function\n# if self.parameters and len( self.parameters ) > 0 :\n# sql = sql + \"( \" + \" ,\".join( [ str( parameter ) for parameter in self.parameters ] ) + \" )\"\n# return sql\n# \n# class Cast(Column) :\n# def __init__( self, expression, type, name = None ) :\n# Column.__init__( self, name if name else expression.name, type )\n# self.expression = expression\n# \n# def Table( self ) :\n# return self.expression.Table()\n# \n# def Expression( self ) :\n# return \"CAST( \" + str( self.expression ) + \" AS \" + str( self.type ) + \" )\" \n# \n# class Max(Function) :\n# def __init__( self, expression, name = None ) :\n# Function.__init__( self, HiveTypeFromExpression( expression ), \"MAX\", name, expression )\n# \n# class Sum(Function) :\n# def __init__( self, expression, name = None ) :\n# Function.__init__( self, HiveTypeFromExpression( expression ), \"SUM\", name, expression )\n# \n# class Avg(Function) :\n# def __init__( self, expression, name = None ) :\n# Function.__init__( self, HiveTypeFromExpression( expression ), \"AVG\", name, expression )\n# \n# class If(Function) :\n# def __init__( self, condition, iftrue, iffalse, name = None ) :\n# Function.__init__( self, HiveTypeFromExpression( iftrue ), \"IF\", name, condition, iftrue, iffalse )\n# \n# class Count(Function) :\n# def __init__( self, expression = None, name = None ) :\n# Function.__init__( self, HiveInt, \"COUNT\", name, expression if expression else '1' )\n# \n# class Distinct(Function) :\n# def __init__( self, expression = None, name = None ) :\n# Function.__init__( self, HiveTypeFromExpression(expression), \"DISTINCT\", name, expression)\n# \n# class CountExpr(Count) :\n# def __init__( self, expression, name = None ) :\n# Count.__init__( self, If( expression, 'true', 'null' ), name )\n# \n# class PercentileBase(Function) :\n# def __init__( self, function, expression, percentiles, name = None ) :\n# try :\n# percentiles = \"ARRAY(\" + ','.join( [ \"%0.7f\" % x for x in percentiles ] ) + \")\"\n# except :\n# pass\n# Function.__init__( self, HiveArray( HiveDouble ), function, name, expression, percentiles )\n# \n# class Percentile(PercentileBase) :\n# def __init__( self, expression, percentiles, name = None ) :\n# PercentileBase.__init__( self, \"PERCENTILE\", expression, percentiles, name )\n# \n# class PercentileApprox(PercentileBase) :\n# def __init__( self, expression, percentiles, name = None ) :\n# PercentileBase.__init__( self, \"PERCENTILE_APPROX\", expression, percentiles, name )\n# \n# class Cdf(Percentile) :\n# _p = [ 0.5 * pow( 0.8, i ) for i in range( 57, 1, -1 ) ]\n# Percentiles = _p + [ 0.5 ] + [ 1-x for x in reversed( _p ) ]\n# \n# def __init__( self, expression, name = None ) :\n# Percentile.__init__( self, expression, Cdf.Percentiles, name )\n# \n# \n# #########################################\n# # SQL Construction\n# #########################################\n# \n# class SqlBase : \n# def __init__( self ) :\n# self.Clear()\n# self.indent = 0\n# \n# def Add( self, string ) :\n# self.sql = self.sql + string\n# \n# def Clear( self ) :\n# self.sql = \"\"\n# \n# def AddNewlines( self, n = 2 ) :\n# self.sql = self.sql +'\\n' * n\n# \n# #########################################\n# # Row format\n# #########################################\n# \n# class RowFormat( SqlBase ) :\n# def __init__( self ) :\n# SqlBase.__init__( self )\n# \n# HiveCharMap = { '\\n' : '\\\\n', '\\t' : '\\\\t', '\\r' : '\\\\r' }\n# \n# def HiveCharRepr( char ) :\n# # Hive doesn't seem to like hexadecimal escaped control codes as Python repr returns\n# # So here we will put them in octal\n# if char in HiveCharMap : return HiveCharMap[ char ]\n# return char if ord( char ) > 31 and ord( char ) < 127 else '\\\\%03o' % ord( char )\n# \n# class DelimitedRowFormat( RowFormat ) :\n# def __init__( self, *fields ) :\n# RowFormat.__init__( self )\n# if len( fields ) == 1 : fields = fields[0]\n# ( self.fieldDelimiter, self.collectionDelimiter, self.mapKeyDelimiter, self.lineDelimiter ) = fields\n# \n# def __str__( self ) :\n# self.Clear()\n# self.Add( \"\\nROW FORMAT DELIMITED \" )\n# if self.fieldDelimiter :\n# self.Add( \"\\n\\tFIELDS TERMINATED BY '\" + HiveCharRepr( self.fieldDelimiter ) + \"' \" )\n# if self.collectionDelimiter :\n# self.Add( \"\\n\\tCOLLECTION ITEMS TERMINATED BY '\" + HiveCharRepr( self.collectionDelimiter ) + \"' \" )\n# if self.mapKeyDelimiter :\n# self.Add( \"\\n\\tMAP KEYS TERMINATED BY '\" + HiveCharRepr( self.mapKeyDelimiter ) + \"' \" )\n# if self.lineDelimiter :\n# self.Add( \"\\n\\tLINES TERMINATED BY '\" + HiveCharRepr( self.lineDelimiter ) + \"' \" )\n# \n# return self.sql\n# \n# def __repr__( self ) :\n# return repr( ( self.fieldDelimiter, self.collectionDelimiter, self.mapKeyDelimiter, self.lineDelimiter ) )\n# \n# DefaultDelimitedRowFormat = DelimitedRowFormat( '\\001', '\\002', '\\003', '\\n' )\n# \n# #########################################\n# # Tables and Selects\n# #########################################\n# \n# class Table(SqlBase) :\n# def __init__( self, name = None, schema = None ) :\n# SqlBase.__init__( self )\n# self.name = name\n# self.schema = [ ]\n# self.inscope = False\n# if schema :\n# self.AddSchema( schema )\n# \n# def AddColumn( self, column, name = None ) :\n# # print \"Adding column '\" + str( name ) + \"' to table \"+ str( self.name ) + \": name = \" + str( column.name ) + \", table = \" + str( column.table ) + \", def = \" + str( column )\n# # if column.table :\n# column = As( column, name if name else column.name )\n# # else :\n# # column = copy( column )\n# column.table = self\n# if name : column.name = name\n# self.schema.append( column )\n# setattr( self, column.name, column )\n# return column\n# \n# def AddSchema( self, *schema ) :\n# schema = Columns( PopIfList( schema ) )\n# for column in schema : self.AddColumn( column )\n# \n# def ClearSchema( self ) :\n# for column in self.schema :\n# delattr( self, column.name )\n# self.schema = [ ]\n# \n# def __str__( self ) :\n# return self.name\n# \n# def Declaration( self ) :\n# return \"\"\n# \n# def Files( self, recurse ) :\n# return { }\n# \n# def HasTransform( self ) :\n# return False\n# \n# def Tables( self ) :\n# return [ ]\n# \n# def SetNameIfNeeded( self, query ) :\n# pass\n# \n# def SetInFromClause( self ) :\n# pass\n# \n# class Join(Table) :\n# LEFT = \"LEFT \"\n# LEFT_OUTER = \"LEFT OUTER \"\n# LEFT_SEMI = \"LEFT SEMI \"\n# RIGHT = \"RIGHT \"\n# RIGHT_OUTER = \"RIGHT_OUTER \"\n# FULL = \"FULL \"\n# FULL_OUTER = \"FULL OUTER \"\n# \n# def __init__( self, left, right, jointype = None ) :\n# Table.__init__( self, None )\n# self.left = left\n# self.jointype = jointype\n# self.right = right\n# self.conditions = [ ]\n# self.left.SetInFromClause()\n# self.right.SetInFromClause()\n# \n# def On( self, condition ) :\n# self.conditions.append( condition )\n# return self\n# \n# def __str__( self ) :\n# self.Clear()\n# self.Add( str( self.left ) )\n# self.Add( ( self.jointype if self.jointype else \"\" ) + \"\\nJOIN \" + str( self.right ) + \" \" )\n# if len( self.conditions ) > 0 :\n# self.Add( \"\\nON \" )\n# self.Add( \" AND \".join( [ str( condition ) for condition in self.conditions ] ) )\n# self.Add( \" \" )\n# \n# return self.sql\n# \n# def Files( self, recurse ) :\n# if not recurse : return { }\n# return dict( self.left.Files( True ).items() + self.right.Files( True ).items() )\n# \n# def HasTransform( self ) :\n# return self.left.HasTransform() or self.right.HasTransform()\n# \n# def Tables( self ) :\n# return self.left.Tables() + self.right.Tables()\n# \n# def SetNameIfNeeded( self, query ) :\n# self.left.SetNameIfNeeded( query )\n# self.right.SetNameIfNeeded( query )\n# \n# class QualifiedColumn :\n# def __init__( self, col, qualifier ) :\n# self.column = col\n# self.qualifier = qualifier\n# \n# def QualifiedName( self ) :\n# return self.column.QualifiedName() + ' ' + self.qualifier\n# \n# class Ascending(QualifiedColumn) :\n# def __init__( self, col ) :\n# QualifiedColumn.__init__( self, col, 'ASC' )\n# \n# class Descending(QualifiedColumn) :\n# def __init__( self, col ) :\n# QualifiedColumn.__init__( self, col, 'DESC' ) \n# \n# class SelectBase(Table) :\n# def __init__( self, *columns ) :\n# Table.__init__( self )\n# columns = PopIfList( columns )\n# self.select = [ self.AddColumn( column ) for column in columns ]\n# self.fromClause = None\n# self.transform = None\n# self.distribute = [ ]\n# self.sort = [ ]\n# self.group = [ ]\n# self.cluster = [ ]\n# self.order = [ ]\n# self.where = [ ]\n# self.rowFormat = DefaultDelimitedRowFormat\n# self.modules = [ ]\n# self.code = [ ]\n# self.files = [ ]\n# self.limit = None\n# self.distinct = False\n# \n# tables = frozenset( [ x for x in [ col.Table() for col in columns ] if x != None ] )\n# if len( tables ) == 1 :\n# [ table ] = tables\n# self.From( table )\n# \n# def From( self, table ) :\n# self.fromClause = table\n# table.SetInFromClause()\n# return self\n# \n# def Transform( self, transform, userargs = None, numkeys = 0 ) :\n# self.transform = { 'transform' : transform,\n# 'userargs' : userargs,\n# 'numkeys' : numkeys,\n# 'input' : self.schema,\n# 'informat' : DefaultDelimitedRowFormat,\n# 'outformat' : DefaultDelimitedRowFormat }\n# self.ClearSchema()\n# self.AddSchema( transform.schema )\n# return self\n# \n# def AddModule( self, module, copy = True ) :\n# self.modules.append( ( module, copy ) )\n# \n# def AddCode( self, c ) :\n# self.code.append( c )\n# \n# def AddFile( self, f ) :\n# self.files.append( f )\n# \n# def Where( self, where ) :\n# if where : self.where.append( where )\n# return self\n# \n# def DistributeBy( self, *distribute ) :\n# self.distribute.extend( PopIfList( distribute ) )\n# return self\n# \n# def SortBy( self, *sort ) :\n# self.sort.extend( PopIfList( sort ) )\n# return self\n# \n# def GroupBy( self, *group ) :\n# self.group.extend( PopIfList( group ) )\n# return self\n# \n# def ClusterBy( self, *cluster ) :\n# self.cluster.extend( PopIfList( cluster ) )\n# return self\n# \n# def OrderBy( self, *order ) :\n# self.order.extend( PopIfList( order ) )\n# return self\n# \n# def Limit( self, limit ) :\n# self.limit = limit\n# return self\n# \n# def SqlFrom( self ) :\n# return ( \"\\nFROM \" + str( self.fromClause ) + \" \" ) if self.fromClause else \"\"\n# \n# def SqlSelect( self ) :\n# \n# self.inscope = True\n# if self.fromClause : self.fromClause.inscope = True\n# \n# self.Clear()\n# self.Add( \"\\nSELECT \" )\n# if self.distinct :\n# self.Add( \"DISTINCT \" )\n# \n# if self.transform :\n# if not isinstance( self.fromClause, SelectBase ) :\n# print \"HiPy: Warning: Did you mean to specify a transform on the map side of a map-reduce ?\"\n# self.Add( \"TRANSFORM( \" )\n# numkeys = self.transform[ 'numkeys' ]\n# if len( self.transform['input'] ) > 0 :\n# self.Add( \", \".join( [ col.Define( False ) for col in self.transform['input'] ] ) + \" ) \" )\n# input = self.transform[ 'input' ]\n# else :\n# self.Add( \"* ) \" )\n# input = self.fromClause.schema\n# \n# self.Add( str( self.transform[ 'informat' ] ) )\n# \n# transform = self.transform['transform']\n# if inspect.isfunction( transform ) or inspect.isclass( transform ) :\n# ( self.source, self.script ) = self.CreateTransformDriverScript( transform, self.modules, self.code,\n# self.transform['userargs'],\n# ( SchemaToPython( input[0:numkeys] ),\n# SchemaToPython( input[numkeys:] ),\n# SchemaToPython( self.schema ),\n# self.transform[ 'informat' ],\n# self.transform[ 'outformat' ] ) )\n# \n# self.Add( \"\\nUSING 'python \" + self.source + \"' \" )\n# else :\n# self.Add( \"\\nUSING '\" + function + \"' \" )\n# \n# self.Add( \"AS \" + SchemaToSql( self.schema ) )\n# self.Add( str( self.transform[ 'outformat' ] ) )\n# \n# elif self.select :\n# self.Add( \", \".join( [ col.Define() for col in self.select ] ) + \" \" )\n# \n# else :\n# self.Add( \"* \" )\n# self.AddSchema( self.fromClause.schema )\n# \n# if len( self.where ) > 0 :\n# self.Add( \"\\nWHERE \" )\n# self.Add( \" AND \".join( [ Bracket( str( where ) ) for where in self.where ] ) )\n# \n# if self.distribute : self.Add( \"\\nDISTRIBUTE BY \" + \", \".join( [ col.QualifiedName() for col in self.distribute ] ) + \" \" )\n# if self.group : self.Add( \"\\nGROUP BY \" + \", \".join( [ str( col ) for col in self.group ] ) + \" \" )\n# if self.cluster : self.Add( \"\\nCLUSTER BY \" + \", \".join( [ col.QualifiedName()for col in self.cluster ] ) + \" \" )\n# if self.order : self.Add( \"\\nORDER BY \" + \", \".join( [ col.QualifiedName()for col in self.order ] ) + \" \" )\n# if self.sort : self.Add( \"\\nSORT BY \" + \", \".join( [ col.QualifiedName() for col in self.sort ] ) + \" \" )\n# \n# if self.limit :\n# self.Add( \"\\nLIMIT \" + str( self.limit ) + \" \" )\n# \n# self.inscope = False\n# if self.fromClause : self.fromClause.inscope = False\n# \n# return self.sql\n# \n# def __str__( self ) :\n# \n# sql = self.SqlFrom() + self.SqlSelect()\n# \n# return ( \"( \" + sql + \") \" + self.name + \" \" ) if self.name else sql\n# \n# def CreateTransformDriverScript( self, transform, modules, code, userargs, argtuple ) :\n# transformname = transform.__name__\n# source = transformname + \".drvr.py\"\n# \n# sourcecode = \"\"\n# \n# code = [ transform ] + code\n# for object in code :\n# if inspect.isclass( object ) :\n# for cls in reversed( inspect.getmro( object ) ) :\n# sourcecode += inspect.getsource( cls )\n# modules.extend( cls.modules if hasattr( cls, 'modules' ) else [ ] )\n# else :\n# sourcecode += inspect.getsource( object )\n# modules.extend( object.modules if hasattr( object, 'modules' ) else [ ] )\n# \n# script = \"#!/usr/bin/python\\n\" \\\n# \"import sys, os\\n\" \\\n# \"sys.path.append(os.getcwd())\\n\" \\\n# \"import HiPy\\n\"\n# \n# if len( modules ) > 0 :\n# script += \"import \" + ', '.join( set( [ module.__name__ for ( module, copy ) in modules ] ) ) +'\\n'\n# \n# script += sourcecode\n# \n# script += \"if __name__ == '__main__' :\\n\"\n# \n# if inspect.isfunction( transform ) :\n# script += \" HiPy.Transform( HiPy.TransformWrapper, \" \\\n# + \"( \" + transformname + \", \" + repr( userargs ) + \" )\" \\\n# + \", \" + repr( argtuple ) + \" )\\n\"\n# else :\n# script += \" HiPy.Transform( \" + transformname + \", \" + repr( userargs ) + \", \" + repr( argtuple ) + \" )\\n\"\n# \n# return ( source, script )\n# \n# def Files( self, recurse ) :\n# files = dict( ( file, None ) for file in self.files )\n# if self.transform :\n# files[ self.source ] = self.script\n# if hasattr( self.transform['transform'], 'files' ) :\n# for file in self.transform['transform'].files :\n# files[ os.path.abspath( os.path.join( os.path.dirname( inspect.getsourcefile( self.transform['transform'] ) ), file ) ) ] = None\n# for ( module, copy ) in self.modules :\n# if copy and hasattr( module, '__file__' ) :\n# files[ module.__file__ ] = None\n# if recurse and self.fromClause :\n# files = dict( files.items() + self.fromClause.Files( True ).items() )\n# return files\n# \n# def Tables( self ) :\n# return self.fromClause.Tables() if self.fromClause else [ ]\n# \n# def HasTransform( self ) :\n# return self.transform or ( self.fromClause and self.fromClause.HasTransform() )\n# \n# class Select(SelectBase) :\n# def __init__( self, *columns ) :\n# SelectBase.__init__( self, *columns )\n# self.AddToDefaultQuery()\n# self.iterator = None\n# self.dir = None\n# self.table = None\n# \n# def Execute( self ) :\n# if not self.query : self.AddToDefaultQuery()\n# self.query.Execute()\n# \n# def __iter__( self ) : \n# self.Execute()\n# \n# for x in os.listdir( self.dir ) :\n# f = self.dir + \"/\" + x\n# if os.stat( f ).st_size == 0 or fnmatch.fnmatch( x, \"*.crc\" ) :\n# os.remove( f )\n# \n# outputfiles = glob.glob( self.dir + \"/*\" )\n# if len( outputfiles ) > 0 :\n# #rows = subprocess.Popen( [ \"cat\" ] + outputfiles, stdout=subprocess.PIPE )\n# #self.iterator = InputIterator( rows.stdout, self.rowFormat, self.schema )\n# \n# self.iterator = InputIterator( fileinput.input( outputfiles ), self.rowFormat, self.schema )\n# \n# else :\n# self.iterator = [ ].__iter__()\n# \n# return self.iterator\n# \n# def GetResults( self, targetdir, copy = False ) :\n# self.Execute()\n# \n# if not copy :\n# try:\n# os.symlink( self.dir, targetdir )\n# except:\n# copy = True\n# \n# if copy :\n# shutil.copytree( self.dir, targetdir )\n# \n# \n# def WriteToTable( self, table, partition = None ) :\n# self.table = table\n# self.partition = partition\n# if not self.query : self.AddToDefaultQuery()\n# \n# def Tables( self ) :\n# return SelectBase.Tables( self ) + ( [ self.table ] if self.table else [ ] )\n# \n# def SetNameIfNeeded( self, query ) :\n# if not self.name :\n# self.name = query.NextSelectName()\n# if self.fromClause :\n# self.fromClause.SetNameIfNeeded( query )\n# \n# def AddToDefaultQuery( self ) :\n# self.query = DefaultQuery\n# self.query.AddSelect( self )\n# \n# def SetInFromClause( self ) :\n# if self.query :\n# self.query.RemoveSelect( self )\n# self.query = None\n# \n# class NewTable( Table ) :\n# def __init__( self, name, schema, \\\n# ifNotExists = False, \\\n# partitions = None, \\\n# location = None, \\\n# rowFormat = None, \\\n# storedAs = None ) :\n# Table.__init__( self, name, schema )\n# self.ifNotExists = ifNotExists\n# self.partitions = Columns( partitions )\n# self.location = location\n# self.rowFormat = rowFormat\n# self.storedAs = storedAs\n# \n# def Declaration( self ) :\n# self.Clear()\n# self.Add( \"\\nCREATE \" + ( \"EXTERNAL \" if self.location else \"\" ) + \"TABLE \" )\n# self.Add( ( \"IF NOT EXISTS \" if self.ifNotExists else \"\" ) + self.name + \"\\n\" )\n# self.Add( SchemaToSql( self.schema ) )\n# if self.partitions :\n# self.Add( \"\\nPARTITIONED BY \" + SchemaToSql( self.partitions ) )\n# if self.rowFormat :\n# self.Add( str( self.rowFormat ) )\n# if self.storedAs :\n# self.Add( \"\\nSTORED AS \" + self.storedAs + \" \" )\n# if self.location :\n# self.Add( \"\\nLOCATION '\" + self.location + \"'\" )\n# self.Add( \";\" )\n# \n# return self.sql\n# \n# def Tables( self ) :\n# return [ self ]\n# \n# #########################################\n# # Query management\n# #########################################\n# \n# class QueryBase( SqlBase ) :\n# def __init__( self ) :\n# SqlBase.__init__( self )\n# self.options = [ ]\n# self.files = { }\n# self.jars = [ ]\n# self.archives = [ ]\n# self.selects = set()\n# self.dirvar = '${querydir}'\n# \n# self.nextSelectName = \"a\"\n# \n# def AddFile( self, file ) :\n# ( name, source ) = file if isinstance( file, tuple ) else ( file, None )\n# self.files[ name ] = source\n# \n# def AddFiles( self, files ) :\n# for name in files : self.files[name] = files[name]\n# \n# def AddJar( self, file ) :\n# self.jars.append( file )\n# \n# def AddArchive( self, file ) :\n# self.archives.append( file )\n# \n# def AddSelect( self, select ) :\n# self.selects.add( select )\n# select.query = self\n# \n# def RemoveSelect( self, select ) :\n# self.selects.remove( select )\n# select.query = None\n# \n# def Select( self, *columns ) :\n# select = Select( *columns )\n# self.AddSelect( select )\n# return select\n# \n# def NextSelectName( self ) :\n# result = self.nextSelectName\n# self.nextSelectName = chr( ord( self.nextSelectName ) + 1 )\n# return result\n# \n# def SetOption( self, name, value ) :\n# self.options.append( ( name, value ) )\n# \n# def SqlTables( self ) :\n# sql = \"\"\n# \n# for select in self.selects :\n# for table in select.Tables() :\n# sql = sql + table.Declaration()\n# \n# return sql\n# \n# def SqlQueries( self ) :\n# \n# # Group by common FROM tables\n# queries = { }\n# for select in self.selects :\n# frm = select.fromClause\n# if frm in queries :\n# queries[ frm ].append( select )\n# else :\n# queries[ frm ] = [ select ]\n# \n# sqls = [ ]\n# sql = \"\"\n# \n# for frm in queries :\n# \n# sql = queries[ frm ][ 0 ].SqlFrom()\n# \n# for select in queries[ frm ] :\n# \n# if select.dir :\n# sql = sql + \"\\nINSERT OVERWRITE LOCAL DIRECTORY '\" + select.dir + \"'\"\n# else :\n# sql = sql + \"\\nINSERT OVERWRITE TABLE \" + str( select.table )\n# if select.partition :\n# sql = sql + \" PARTITION( \" + select.partition + \")\"\n# \n# sql = sql + select.SqlSelect()\n# \n# self.AddFiles( select.Files( False ) )\n# \n# sqls.append( sql )\n# \n# self.AddFiles( frm.Files( True ) )\n# \n# return \";\\n\".join( sqls )\n# \n# def __str__( self ) :\n# \n# self.Clear()\n# \n# self.Add( ''.join( [ \"SET \" + name + \"=\" + str( value ) + \";\\n\" for ( name, value ) in self.options ] ) )\n# \n# queries = self.SqlQueries()\n# \n# self.Add( self.SqlTables() )\n# \n# if len( self.files ) > 0 : self.Add( \"\\nADD FILES \" + \" \".join( [ self.MakePath( filename ) for filename in self.files ] ) + \";\\n\" )\n# if len( self.jars ) > 0 : self.Add( \"\\nADD JARS \" + \" \".join( [ self.MakePath( jar ) for jar in self.jars ] ) + \";\\n\" )\n# if len( self.archives ) > 0 : self.Add( \"\\nADD ARCHIVES \" + \" \".join( [ self.MakePath( archive ) for archive in self.archives ] ) + \";\\n\" )\n# \n# self.Add( queries )\n# \n# return self.sql\n# \n# def MakePath( self, filename ) :\n# return os.path.join( self.dirvar, os.path.expanduser( filename ) )\n# \n# class Query(QueryBase) :\n# # The directory is used as a workspace for execution of the query \n# # hive is a function which executes a hive query, given the filename \n# def __init__( self, dir, hive = None ) :\n# QueryBase.__init__( self )\n# self.hive = hive\n# self.didExecute = False\n# self.cache = True\n# self.cwd = os.getcwd()\n# self.SetCacheDirectory( dir )\n# self.querydir = None\n# self.resultdir = None\n# \n# def SetCacheDirectory( self, dir ) :\n# if dir[0] == '/' :\n# self.dir = dir\n# elif dir[0] == '~' :\n# self.dir = os.path.expanduser( dir )\n# else :\n# self.dir = self.cwd + '/' + dir\n# \n# self.tmpdir = self.dir + '/tmp'\n# if not os.path.exists( self.tmpdir ) :\n# os.makedirs( self.tmpdir )\n# \n# def CreateQueryDirectories( self ) :\n# self.querydir = tempfile.mkdtemp( prefix = self.hashstr, dir = self.tmpdir )\n# for select in self.selects :\n# if select.dir :\n# dir = select.dir.replace( self.dirvar, self.querydir )\n# if not os.path.exists( dir ) :\n# os.mkdir( dir )\n# \n# def Hash( self ) :\n# queryscripts = \"\\n\".join( [ self.query.replace( \".pyc\", \".py\" ) ] + [ script for script in self.files.itervalues() if script ] )\n# return struct.pack( \"!I\", zlib.crc32( queryscripts ) & 0xffffffff ).encode( 'base64' )[0:6].replace( '/', '=' )\n# \n# def IsCached( self ) :\n# if not self.cache :\n# return False\n# \n# oldruns = [ name for name in os.listdir( self.dir ) if name.startswith( self.hashstr ) ]\n# if len( oldruns ) > 0 :\n# self.resultdir = self.dir + '/' + oldruns[ len( oldruns ) - 1 ]\n# return True\n# \n# def CreateQueryFiles( self ) :\n# self.queryfile = self.querydir + \"/query.q\"\n# with open( self.queryfile, \"w\" ) as f :\n# f.write( self.query )\n# \n# # Create the script files\n# for ( filename, script ) in self.files.iteritems() :\n# filepath = self.querydir + '/' + filename\n# if script :\n# with open( filepath, \"w\" ) as f :\n# f.write( script )\n# \n# def RenameQueryDir( self ) :\n# self.resultdir = self.dir + '/' + self.hashstr + \"-\" + datetime.datetime.today().strftime( \"%Y%m%d-%H%M\" )\n# os.rename( self.querydir, self.resultdir )\n# self.querydir = None\n# \n# def Execute( self ) :\n# if not self.didExecute :\n# cwd = os.getcwd()\n# \n# # Decide sub-directory names (if necessary) for the selects\n# for select in self.selects :\n# select.SetNameIfNeeded( self )\n# if not select.dir and not select.table :\n# select.dir = self.dirvar + '/' + select.name\n# \n# # Add a file for HiPy.py if necessary\n# if not all( [ not select.HasTransform() for select in self.selects ] ) :\n# self.AddFile( HiPyPath )\n# \n# # Generate the SQL and driver script source\n# self.query = str( self )\n# self.hashstr = self.Hash()\n# \n# print self.query\n# print \"Script hash is: \" + self.hashstr\n# \n# # If not cached, run the query and move the results to a subdir with the hash-based name\n# if not self.IsCached() :\n# # Create a tmp directory for this run\n# self.CreateQueryDirectories()\n# \n# # Create the query file\n# self.CreateQueryFiles()\n# \n# # Run the query\n# returncode = self.hive( self.queryfile, params = [ '-d', self.dirvar[2:len(self.dirvar)-1]+ '=' + self.querydir ] )\n# if returncode != 0 :\n# raise HiPyException( \"Hive error\" )\n# \n# # Rename the directory now it's no longer an attempt\n# self.RenameQueryDir()\n# \n# for select in self.selects :\n# if select.dir :\n# select.dir = select.dir.replace( self.dirvar, self.resultdir )\n# \n# # Done!\n# self.didExecute = True\n# \n# os.chdir( cwd )\n# \n# def Clean( self, all ) :\n# if self.didExecute :\n# if self.querydir :\n# shutil.rmtree( self.querydir, ignore_errors = True )\n# if self.resultdir :\n# shutil.rmtree( self.resultdir, ignore_errors = True )\n# if all :\n# shutil.rmtree( self.tmpdir, ignore_errors = True )\n# \n# def Cache( self, cache ) :\n# self.cache = cache\n# \n# #########################################\n# # Default Query\n# #########################################\n# \n# DefaultQuery = Query( '~/.hipy' )\n# \n# def SetHive( hive ) :\n# DefaultQuery.hive = hive\n# \n# def Reset() :\n# global DefaultQuery\n# DefaultQuery = Query( '~/.hipy', DefaultQuery.hive )\n# \n# def Execute() :\n# DefaultQuery.Execute()\n# \n# def Cache( cache ) :\n# DefaultQuery.Cache( cache )\n# \n# def SetOption( name, value ) :\n# DefaultQuery.SetOption( name, value )\n# \n# def Clean( all = False ) :\n# DefaultQuery.Clean( all )\n# \n# \n# #########################################\n# # Describe tables\n# ######################################### \n# \n# class DescribeHandler :\n# def __init__( self ) :\n# pass\n# \n# def __call__( self, output, error ) :\n# self.schema = SchemaFromDescribe( output )\n# \n# def Describe( tablename, hive = None ) :\n# if hive == None :\n# hive = DefaultQuery.hive\n# \n# handler = DescribeHandler()\n# returncode = hive( script = \"describe \"+tablename, handler = handler )\n# if returncode != None :\n# return None\n# \n# return Table( tablename, handler.schema )\n# \n# #########################################\n# # Data row object\n# #########################################\n# class DataRow :\n# def __init__( self, data, schema ) :\n# self.value = data\n# self.schema = schema\n# \n# def __len__( self ) :\n# return len( self.value )\n# \n# def __getitem__( self, key ) :\n# if isinstance( key, str ) :\n# for idx in range(0, len( self.schema ) ):\n# if self.schema[idx].name == key :\n# return self.value[idx]\n# error = \" \".join( [ key, \"not found in (\", \", \".join( x.name for x in self.schema ), \")\" ] )\n# raise KeyError( error )\n# \n# if isinstance( key, slice ) :\n# return DataRow( self.value[ key ], self.schema[ key ] )\n# \n# return self.value[ key ]\n# \n# def __setitem__( self, key, value ) :\n# if isinstance( key, str ) :\n# for idx in range(0, len( self.schema ) ):\n# if self.schema[idx].name == key :\n# self.value[idx] = value\n# return\n# raise KeyError\n# \n# self.value[ key ] = value\n# \n# def __iter__( self ) :\n# return self.value.__iter__()\n# \n# def __reversed__( self ) :\n# return self.value.__reversed__()\n# \n# def __eq__( self, other ) :\n# if isinstance( other, DataRow ) :\n# return self.value == other.value\n# return self.value == other\n# \n# def __ne__( self, other ) :\n# return not self.__eq__( other )\n# \n# def dict( self ) :\n# return dict( ( col.name, value ) for ( col, value ) in zip( self.schema, self.value ) )\n# \n# def __str__( self ) :\n# return str( self.value )\n# \n# #########################################\n# # Input/output processing\n# #########################################\n# \n# def ReadHiveType( value, type, rowFormat ) :\n# \n# if value == '\\N' : return None\n# \n# if isinstance( type, HiveArray ) :\n# out = [ ReadHiveType( element, type.datatype, rowFormat ) for element in value.split( rowFormat.collectionDelimiter ) ]\n# elif isinstance( type, HiveMap ) :\n# out = { }\n# elements = value.split( rowFormat.collectionDelimiter )\n# for element in elements :\n# keyvalue = element.split( rowFormat.mapKeyDelimiter )\n# key = ReadHiveType( keyvalue[0], type.keytype, rowFormat )\n# val = ReadHiveType( keyvalue[1], type.datatype, rowFormat )\n# out[key] = val\n# elif isinstance( type, HiveStruct ) :\n# out = [ ReadHiveType( element, elementtype, rowFormat ) \\\n# for ( element, ( key, elementtype ) ) \\\n# in zip( value.split( rowFormat.collectionDelimiter ), type.schema ) ]\n# elif type == HiveTinyInt or type == HiveSmallInt or type == HiveInt or type == HiveBigInt :\n# out = int( float( value ) )\n# elif type == HiveBigInt :\n# out = long( float( value ) )\n# elif type == HiveBoolean :\n# out = bool( value )\n# elif type == HiveFloat or type == HiveDouble :\n# out = float( value )\n# elif type == HiveString :\n# out = str( value )\n# elif type == HiveJson :\n# try :\n# out = JsonLoads( value )\n# except ValueError :\n# out = None\n# else :\n# raise TypeError\n# \n# return out\n# \n# def WriteHiveType( value, type, rowFormat ) :\n# if isinstance( type, HiveArray ) :\n# out = rowFormat.collectionDelimiter.join( [ WriteHiveType( element, type.datatype, rowFormat ) for element in value ] )\n# elif isinstance( type, HiveMap ) :\n# out = rowFormat.collectionDelimiter.join( [ ( WriteHiveType( key, type.keytype, rowFormat ) + \\\n# rowFormat.mapKeyDelimiter + \\\n# WriteHiveType( val, type.datatype, rowFormat ) ) \\\n# for ( key, val ) in value.items() ] )\n# elif isinstance( type, HiveStruct ) :\n# out = rowFormat.collectionDelimiter.join( [ WriteHiveType( element, elementtype ) \\\n# for ( element, ( name, elementtype ) ) \\\n# in zip( value, type.schema ) ] )\n# elif type == HiveJson :\n# out = JsonDumps( value )\n# elif value == None :\n# out = '\\\\N'\n# else :\n# out = str( value )\n# \n# return out\n# \n# def Deserialize( hiverow, rowFormat, schema ) : \n# value = [ ReadHiveType( field.rstrip( rowFormat.lineDelimiter ), col.type, rowFormat ) \\\n# for ( field, col ) \\\n# in zip( hiverow.split( rowFormat.fieldDelimiter ), schema ) ]\n# return DataRow( value, schema )\n# \n# def Serialize( datarow, rowFormat, schema ) :\n# return rowFormat.fieldDelimiter.join( [ WriteHiveType( value, col.type, rowFormat ) \\\n# for ( value, col ) in zip( datarow, schema ) ] ) \\\n# + rowFormat.lineDelimiter\n# \n# class InputIterator :\n# def __init__( self, iterator, rowFormat, schema ) :\n# self.iterator = iterator\n# self.rowFormat = rowFormat\n# self.schema = schema\n# self.nextrow = None\n# \n# def __iter__( self ) :\n# return self\n# \n# def next( self ) :\n# return Deserialize( self.iterator.next(), self.rowFormat, self.schema )\n# \n# class Output :\n# def __init__( self, rowFormat, schema ) :\n# self.rowFormat = rowFormat\n# self.schema = schema\n# \n# def __call__( self, row ) :\n# return Serialize( row, self.rowFormat, self.schema )\n# \n# \n# #########################################\n# # Transform driver\n# #########################################\n# \n# class PeekableIterator :\n# def __init__( self, iterator ) :\n# self.iterator = iterator\n# self.nextitem = None\n# \n# def __iter__( self ) :\n# return self\n# \n# def next( self ) :\n# if not self.nextitem : return self.iterator.next()\n# \n# nextitem = self.nextitem\n# self.nextitem = None\n# return nextitem\n# \n# def peek( self ) :\n# if not self.nextitem : self.nextitem = self.iterator.next()\n# return self.nextitem\n# \n# def put_back( self, item ) :\n# if self.nextitem : raise \"Can't put_back more than once!\"\n# self.nextitem = item\n# \n# class GroupIterator :\n# def __init__( self, key, input ) :\n# self.key = key\n# self.input = input\n# \n# def __iter__( self ) :\n# return self\n# \n# def next( self ) :\n# nextrow = self.input.next()\n# if nextrow[0:len(self.key)] == self.key :\n# return nextrow[len(self.key):]\n# \n# self.input.put_back( nextrow )\n# \n# raise StopIteration\n# \n# class GroupByKey :\n# def __init__( self, numkeys, input ) :\n# self.numkeys = numkeys\n# self.input = PeekableIterator( input.__iter__() )\n# \n# def __iter__( self ) :\n# return self\n# \n# def next( self ) :\n# key = self.input.peek()[0:self.numkeys]\n# return ( key, GroupIterator( key, self.input ) )\n# \n# class TransformWrapper :\n# def __init__( self, fnargs ) :\n# ( self.function, self.userargs ) = fnargs\n# \n# def __call__( self, keys, input ) :\n# return self.function( self.userargs, keys, input )\n# \n# def Transform( transform, userargs, argtuple ) :\n# \n# infile = sys.stdin\n# outfile = sys.stdout\n# \n# ( inkeyschema, inschema, outschema, informat, outformat ) = argtuple\n# \n# input = InputIterator( infile, DelimitedRowFormat( informat ), Columns( inkeyschema + inschema ) )\n# output = Output( DelimitedRowFormat( outformat ), Columns( outschema ) )\n# \n# transformobj = transform( userargs )\n# \n# if len( inkeyschema ) != 0 :\n# for ( key, values ) in GroupByKey( len(inkeyschema), input ) :\n# if len( key ) != len( inkeyschema ) :\n# sys.stderr.write( \"Key error: key=\" + repr( key ) + \", schema=\" + repr( inkeyschema ) + \"\\n\" )\n# else :\n# for out in transformobj( key, values ) :\n# outfile.write( output( out ) ) \n# \n# else :\n# for out in transformobj( None, input ) :\n# outfile.write( output( out ) ) \n# \n# ###################################\n# # Configure from file\n# ###################################\n# \n# # Configuration object contains\n# # 'tables' : Dictionary of the table objects\n# \n# class Configuration :\n# def __init__( self, dir, name, module ) :\n# self.dir = os.path.expanduser( dir )\n# self.filename = name\n# self.path = self.dir + '/' + self.filename\n# self.module = module\n# self.config = None\n# self.loading = False\n# \n# def Initialize( self ) :\n# self.config = { 'tables': { } }\n# \n# def Load( self ) :\n# if self.config :\n# return self.config\n# \n# if self.loading :\n# return None\n# \n# try :\n# configfile = open( self.path, 'r' )\n# except IOError :\n# print \"HiPy: No configuration found ...\"\n# return None\n# \n# try :\n# self.loading = True\n# self.config = pickle.load( configfile )\n# self.loading = False\n# except :\n# print \"HiPy: Configuration load error ...\"\n# return None\n# \n# configfile.close()\n# print \"HiPy: Configuration loaded.\"\n# \n# return self.config\n# \n# def Save( self ) :\n# if not self.config :\n# return\n# \n# if not os.path.exists( self.dir ) :\n# os.makedirs( self.dir )\n# \n# try :\n# configfile = open( self.path, 'w' )\n# except IOError :\n# print \"HiPy: Configuration save error (1) ...\"\n# return\n# \n# try :\n# pickle.dump( self.config, configfile )\n# except :\n# print \"HiPy: Configuration save error (2) ...\"\n# return\n# \n# configfile.close()\n# \n# print \"HiPy: Configuration saved.\"\n# \n# def Configure( self ) :\n# if not self.Load() :\n# self.Initialize()\n# self.Save()\n# \n# def Update( self ) :\n# if not self.Load() :\n# self.Initialize()\n# self.ReadTablesFromHive( default_hive_tables )\n# else :\n# self.ReadTablesFromHive( self.config[ 'tables' ].keys() )\n# \n# self.ReadDevicesFromDataoven()\n# self.Save()\n# self.AddTablesToModule()\n# \n# def ReadTablesFromHive( self, tablelist ) :\n# tables = self.config[ 'tables' ]\n# for tablename in tablelist :\n# print \"HiPy: Reading schema for \" + tablename + \" from Hive\"\n# tables[ tablename ] = HiPy.Describe( tablename )\n# \n# def GetTables( self ) :\n# return self.config[ 'tables' ]\n# \n# def GetTable( self, tablename ) :\n# tables = self.config[ 'tables' ]\n# if tablename in tables :\n# return tables[ tablename ]\n# \n# table = Describe( tablename )\n# if table is not None :\n# tables[ tablename ] = table\n# self.Save()\n# \n# return table\n# \n# ########################################\n# # Configuration commands\n# ########################################\n# \n# def Update() :\n# global config\n# return config.Update()\n# \n# def ShowTables() :\n# for table in GetTables().iterkeys() :\n# print table\n# \n# def ShowTable( arg ) :\n# table = GetTable( arg )\n# if table is not None :\n# for col in table.schema :\n# print col.name, str( col.type )\n# else :\n# print \"Unknown table: \", arg\n# \n# def GetTable( tablename ) :\n# global config\n# return config.GetTable( tablename )\n# \n# def GetTables() :\n# global config\n# return config.GetTables()\n# \n# ########################################\n# # Json parsing\n# ######################################## \n# \n# JsonLoads = json.loads\n# JsonDumps = json.dumps\n# \n# def SetJsonParser( loads, dumps ) :\n# global JsonLoads\n# global JsonDumps\n# \n# JsonLoads = loads\n# JsonDumps = dumps\n# \n# ########################################\n# # main() function for command line usage\n# ######################################## \n# \n# def main() :\n# import HiPy\n# \n# usage = \"usage: %prog [options] command\\n\\n\" \\\n# \"Commands:\\n\" \\\n# \" showtables - show configured tables\\n\" \\\n# \" showtable <table name> - show the schema of a table\"\n# \n# options = OptionParser( usage )\n# \n# ( options, args ) = options.parse_args()\n# \n# if len( args ) < 1 :\n# sys.stderr.write( \"Must provide a command\\n\" )\n# sys.exit( 1 )\n# \n# zeroargs = { 'showtables' : HiPy.ShowTables }\n# oneargs = { 'showtable' : HiPy.ShowTable }\n# \n# command = args[0].lower()\n# \n# if command in zeroargs :\n# zeroargs[ command ]()\n# elif command in oneargs :\n# oneargs[ command ]( args[1] )\n# else :\n# print \"Unrecognized command: \", args[0]\n# \n# if __name__ == \"__main__\" :\n# main()\n# else :\n# config = Configuration( \"~/.hipy\", \"config\", sys.modules[__name__] )\n# config.Configure()\n# \n", "id": "11970262", "language": "Python", "matching_score": 12.574763298034668, "max_stars_count": 0, "path": "hipy/HiPyFork.py" }, { "content": "#!/usr/bin/python\n#\n# HiPy - A Simple Python framework for Apache Hive\n#\n# Copyright 2011 Netflix, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n# See http://code.google.com/a/apache-extras.org/p/hipy/ for documentation\n#\n# Version 1.1\n#\n\nimport sys\nimport inspect\nimport struct\nimport zlib\nimport datetime\nimport glob\nimport tempfile\nimport shutil\nimport fileinput\nimport json\nimport pickle\nfrom optparse import OptionParser\n\nimport os\nimport fnmatch\n\n\nHiPyPath = __file__\n\n#########################################\n# Utility functions\n#########################################\n\n\ndef PopIfList(tuple):\n return tuple[0] if (len(tuple) == 1 and isinstance(tuple[0], list)) else tuple\n\n\ndef Bracket(string):\n if string[0] == '(' and string[len(string)-1] == ')':\n b = 0\n n = 0\n for i in range(0, len(string)):\n c = string[i]\n if c == '(':\n b += 1\n if c == ')':\n b -= 1\n if b <= 0:\n n += 1\n\n if n == 1:\n return string\n\n return '(' + string + ')'\n\n\n# Stringify an object, ensuring there are quotes if it is in fact a string\ndef QuotedString(arg):\n if not isinstance(arg, str):\n return str(arg)\n\n if len(arg) > 1:\n first = arg[0]\n last = arg[len(arg)-1]\n if (first == last) and (first == '\"' or first == \"'\"):\n arg = arg[1:len(arg)-1]\n return \"'\" + arg + \"'\"\n\n#########################################\n# Exceptions\n#########################################\n\n\nclass HiPyException(Exception):\n def __init__(self, args):\n Exception.__init__(self, args)\n\n#########################################\n# Hive Types\n#########################################\n\n\nclass HiveType:\n def __init__(self, name, alt=None, var=None):\n self.name = name\n self.alt = name if alt is None else alt\n self.var = var\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return 'HiPy.'+self.var if self.var else object.__repr__(self)\n\n def __eq__(self, other):\n return self.var == other.var\n\n def __ne__(self, other):\n return not (self == other)\n\nHiveTinyInt = HiveType(\"TINYINT\", var='HiveTinyInt')\nHiveSmallInt = HiveType(\"SMALLINT\", var='HiveSmallInt')\nHiveInt = HiveType(\"INT\", var='HiveInt')\nHiveBigInt = HiveType(\"BIGINT\", var='HiveBigInt')\nHiveBoolean = HiveType(\"BOOLEAN\", var='HiveBoolean')\nHiveFloat = HiveType(\"FLOAT\", var='HiveFloat')\nHiveDouble = HiveType(\"DOUBLE\", var='HiveDouble')\nHiveString = HiveType(\"STRING\", var='HiveString')\nHiveJson = HiveType(\"STRING\", \"JSON\", var='HiveJson')\n\n\nclass HiveArray(HiveType):\n def __init__(self, datatype):\n HiveType.__init__(self, \"ARRAY<\" + datatype.name + \">\")\n self.datatype = datatype\n\n def __repr__(self):\n return 'HiPy.HiveArray(' + repr(self.datatype) + ')'\n\n def __eq__(self, other):\n return isinstance(other, HiveArray) and (self.datatype == other.datatype)\n\n\nclass HiveMap(HiveType):\n def __init__(self, keytype, datatype):\n HiveType.__init__(self, \"MAP<\" + str(keytype) + \",\" + str(datatype) + \">\")\n self.keytype = keytype\n self.datatype = datatype\n\n def __repr__(self):\n return 'HiPy.HiveMap(' + repr(self.keytype) + ', ' + repr(self.datatype) + ')'\n\n def __eq__(self, other):\n return isinstance(other, HiveMap) and self.keytype == other.keytype and self.datatype == other.datatype\n\n\nclass HiveStruct(HiveType):\n def __init__(self, schema):\n HiveType.__init__(self, \"STRUCT<\" + \",\".join([key + ':' + str(type) for (key, type) in schema]) + \">\")\n self.schema = schema\n\n def __repr__(self):\n return 'HiPy.HiveStruct(' + repr(self.schema) + ')'\n\n def __eq__(self, other):\n return isinstance(other, HiveStruct) and self.schema == other.schema\n\n\nHiveBasicTypes = {\n 'TINYINT': HiveTinyInt,\n 'SMALLINT': HiveSmallInt,\n 'INT': HiveInt,\n 'BIGINT': HiveBigInt,\n 'BOOLEAN': HiveBoolean,\n 'FLOAT': HiveFloat,\n 'DOUBLE': HiveDouble,\n 'STRING': HiveString,\n 'JSON': HiveJson\n}\n\n\ndef HiveTypeFromString(string):\n string = string.strip()\n ustring = string.upper()\n if ustring in HiveBasicTypes:\n return HiveBasicTypes[ustring]\n\n if ustring.startswith(\"ARRAY<\") and ustring.endswith(\">\"):\n return HiveArray(HiveTypeFromString(string[6: len(string) - 1]))\n\n if ustring.startswith(\"MAP<\") and ustring.endswith(\">\"):\n types = string[4: len(string) - 1].split(',')\n return HiveMap(HiveTypeFromString(types[0]), HiveTypeFromString(types[1]))\n\n if ustring.startswith(\"STRUCT<\") and ustring.endswith(\">\"):\n elements = string[7: len(string) - 1].split(',')\n schema = []\n for element in elements:\n (key, type) = element.split(':')\n schema.append((key, HiveTypeFromString(type)))\n return HiveStruct(schema)\n\n raise HiPyException(\"Unrecognised Hive type: \" + string)\n\n\ndef SchemaToSql(schema):\n return \"(\" + \", \".join([col.name + ' ' + str(col.type) for col in schema]) + \") \"\n\n\ndef SchemaToPython(schema):\n return [(col.name, col.type) for col in schema]\n\n\ndef SchemaFromDescribe(describe):\n schema = []\n for line in describe:\n elements = line.split('\\t')\n key = elements[0]\n type = elements[1]\n schema.append((key, HiveTypeFromString(type)))\n return schema\n\n# Python representation of Hive types:\n# String gives basic type name\n# 1-tuple indicates Hive array\n# 2-tuple indicates Hive map\n# List is list of 2-tuples giving hive struct\n\n# def PythonToHiveType(python):\n# if isinstance(python, str):\n# return HiveBasicTypes[python]\n# elif isinstance(python, tuple) and len(python) == 1:\n# return HiveArray(PythonToHiveType(python[0]))\n# elif isinstance(python, tuple):\n# return HiveMap(PythonToHiveType(python[0]), PythonToHiveType(python[1]))\n# elif isinstance(python, array):\n# return HiveStruct([(key, PythonToHiveType(type)) for (key, type) in python])\n\n\ndef HiveTypeFromExpression(obj):\n if isinstance(obj, int):\n return HiveInt\n elif isinstance(obj, long):\n return HiveBigInt\n elif isinstance(obj, float):\n return HiveDouble\n elif isinstance(obj, bool):\n return HiveBoolean\n elif isinstance(obj, str):\n return HiveString\n\n return obj.type\n\n\n# def PythonToSchema(pythonSchema):\n# return [Column(name, PythonToHiveType(type)) for (name, type) in pythonSchema]\n\n#########################################\n# Columns\n#\n# A Column represents a data element with a type and optionally, a table to which it belongs and a name in that table\n# XXX What does it mean to have a name and no table ? The name is a convenient hint for when its added to a table\n#\n# A Column can be\n# - a source column in a Hive data table - uses the Column base type and has name and table defined\n# - an entry in a Select, referring to a column in another table - uses the As subclass to carry the reference to the\n# original column\n# - an expression combining multiple other Columns and other information. There are several kinds\n# - a Cast expression, changing the type of a column - uses the Cast subclass\n# - an Arithmetic expression - currently converted to a string when first encountered (FIXME)\n# - a Function expression - uses the Function subclass\n# - a map expression - uses the MapEntry subclass\n#\n# Columns can appear in the following places\n# - In SELECT lists as \"<full definition> AS <local name>\"\n# - In WHERE clauses as \"<full definition>\"\n# - In SORT BY, DISTRIBUTE BY, CLUSTER BY as \"<qualified name>\"\n# - In GROUP BY as \"<qualified name>\"\n# - In ON clauses as \"<qualified name>\"\n# - In a schema as \"<local name> <type>\"\n#\n# <local name> refers to the name of the column in the current Select\n# <qualified name> equals the <local name> when the column is in the current Select, or <table>.<local name> otherwise\n# <full definition> is the full definition of the column\n#\n# str(column) is shorthand for the full definition\n#########################################\n\nclass Column:\n def __init__(self, name, type):\n self.table=None\n self.name = name\n self.type = type\n\n # Returns the reference to the column as it appears in SORT BY, DISTRIBUTE BY, CLUSTER BY and other SELECTs\n def __str__(self):\n return self.Expression()\n\n # Returns the (qualified) name of the column as it should appear in WHERE, SORT BY,\n # DISTRIBUTE BY, CLUSTER BY and other SELECTs\n def QualifiedName(self):\n if not self.table or not self.table.name or self.table.inscope:\n return self.name\n return self.table.name + \".\" + self.name\n\n # Returns the expression for the column as it appears in WHERE and GROUP BY\n def Expression(self):\n return self.QualifiedName()\n\n def Table(self):\n return self.table\n\n def __getitem__(self, key):\n if isinstance(self.type, HiveMap) and self.type.keytype == HiveString:\n return MapEntry(self.type.datatype, self, key)\n elif isinstance(self.type, HiveArray):\n return ArrayEntry(self.type.datatype, self, key)\n\n raise HiPyException(self.name + \" is not a Hive Map type with string keys or Hive Array.\")\n\n def __lt__(self, other):\n return Operator('<', HiveBoolean, self, other)\n\n def __le__(self, other):\n return Operator('<=', HiveBoolean, self, other)\n\n def __eq__(self, other):\n if other is not None:\n return Operator('=', HiveBoolean, self, other)\n else:\n UnaryOperator(' is null', HiveBoolean, self, True)\n\n def __ne__(self, other):\n if other is not None:\n return Operator('!=', HiveBoolean, self, other)\n else:\n UnaryOperator(' is not null', HiveBoolean, self, True)\n\n def __gt__(self, other):\n return Operator('>', HiveBoolean, self, other)\n\n def __ge__(self, other):\n return Operator('>=', HiveBoolean, self, other)\n\n def __add__(self, other):\n return Operator('+', self.type, self, other)\n\n def __sub__(self, other):\n return Operator('-', self.type, self, other)\n\n def __mul__(self, other):\n return Operator('*', self.type, self, other)\n\n def __div__(self, other):\n return Operator('/', self.type, self, other)\n\n def __mod__(self, other):\n return Operator('%', self.type, self, other)\n\n def __and__(self, other):\n return Condition('AND', self, other)\n\n def __rand_(self, other):\n return Condition('AND', other, self)\n\n def __or__(self, other):\n return Condition('OR', self, other)\n\n def __ror__(self, other):\n return Condition('OR', other, self)\n\n def __invert__(self):\n return Not(self)\n\n\n# Convert a schema (list of (name, type) pairs) to a list of Column objects\ndef Columns(*schema):\n schema = PopIfList(schema)\n if (len(schema) > 0):\n if not isinstance(schema[0], Column):\n schema = [Column(name, type) for (name, type) in schema]\n return schema\n\n\nclass UnaryOperator(Column):\n def __init__(self, op, type, arg, postfix=False):\n Column.__init__(self, None, type)\n self.op = op\n self.arg = arg\n self.postfix = postfix\n\n def Table(self):\n return self.arg.Table() if isinstance(self.arg, Column) else None\n\n def Expression(self):\n if self.postfix:\n return '(' + str(self.arg) + self.op + ')'\n else:\n return '(' + self.op + str(self.arg) + ')'\n\n\ndef Not(arg):\n return UnaryOperator(' NOT ', HiveBoolean, arg)\n\n\nclass Operator(Column):\n def __init__(self, op, type, lhs, rhs):\n Column.__init__(self, None, type)\n self.op = op\n self.lhs = lhs\n self.rhs = rhs\n\n def Table(self):\n if isinstance(self.lhs, Column):\n if isinstance(self.rhs, Column):\n if self.lhs.Table() == self.rhs.Table():\n return self.lhs.Table()\n else:\n return False\n else:\n return self.lhs.Table()\n else:\n if isinstance(self.rhs, Column):\n return self.rhs.Table()\n else:\n return None\n\n def Expression(self):\n return '(' + QuotedString(self.lhs) + self.op + QuotedString(self.rhs) + ')'\n\n\ndef Like(lhs, rhs):\n return Operator(\" LIKE \", HiveBoolean, lhs, rhs)\n\n\ndef Rlike(lhs, rhs):\n return Operator(\" RLIKE \", HiveBoolean, lhs, rhs)\n\n\ndef Any(iterable):\n return reduce(lambda a, b: a | b, iterable)\n\n\ndef All(iterable):\n return reduce(lambda a, b: a & b, iterable)\n\n\nclass Condition():\n def __init__(self, condition, lhs, rhs):\n self.condition = condition\n self.lhs = lhs\n self.rhs = rhs\n\n def __str__(self):\n return self.Expression()\n\n def Expression(self):\n return '(' + str(self.lhs) + ' ' + self.condition + ' ' + str(self.rhs) + ')'\n\n\nclass ArrayEntry(Column):\n def __init__(self, type, array, index, name=None):\n Column.__init__(self, name if name else (array.name+str(index)), type)\n self.array = array\n self.index = index\n\n def Table(self):\n return self.array.Table()\n\n def Expression(self):\n return str(self.array) + \"[\" + str(self.index) + \"]\"\n\n\nclass MapEntry(Column):\n def __init__(self, type, map, key, name=None):\n Column.__init__(self, name if name else key, type)\n self.map = map\n self.key = key\n\n def Table(self):\n return self.map.Table()\n\n def Expression(self):\n return str(self.map) + \"['\" + str(self.key) + \"']\"\n\n\n#########################################\n# Hive functions\n#########################################\n\nclass As(Column):\n def __init__(self, column, alias):\n Column.__init__(self, alias, column.type)\n self.original = column\n\n # This function is used only in SELECT and TRANSFORM for the definition of a column\n def Define(self, alias=True):\n expression = self.original.QualifiedName() if self.original.table else self.original.Expression()\n if (not alias) or (self.original.table and self.original.name == self.name) or \\\n (isinstance(self.original, MapEntry) and self.original.key == self.name):\n return expression\n return expression + ' AS ' + self.name\n\n def Expression(self):\n if self.table:\n return self.QualifiedName()\n if self.original.table:\n return self.original.QualifiedName()\n return self.original.Expression()\n\n\nclass Function(Column):\n def __init__(self, type, function, name, *parameters):\n Column.__init__(self, name, type)\n self.function = function\n self.parameters = PopIfList(parameters)\n\n def Table(self):\n tables = frozenset([parameter.Table() for parameter in self.parameters if isinstance(parameter, Column)])\n if len(tables) == 1:\n [table] = tables\n return table\n return None\n\n def Expression(self):\n sql = self.function\n if self.parameters and len(self.parameters) > 0:\n sql = sql + \"(\" + \" ,\".join([str(parameter) for parameter in self.parameters]) + \")\"\n return sql\n\n\nclass Cast(Column):\n def __init__(self, expression, type, name=None):\n Column.__init__(self, name if name else expression.name, type)\n self.expression = expression\n\n def Table(self):\n return self.expression.Table()\n\n def Expression(self):\n return \"CAST(\" + str(self.expression) + \" AS \" + str(self.type) + \")\"\n\n\nclass Max(Function):\n def __init__(self, expression, name=None):\n Function.__init__(self, HiveTypeFromExpression(expression), \"MAX\", name, expression)\n\nclass Sum(Function):\n def __init__(self, expression, name=None):\n Function.__init__(self, HiveTypeFromExpression(expression), \"SUM\", name, expression)\n\nclass Avg(Function):\n def __init__(self, expression, name=None):\n Function.__init__(self, HiveTypeFromExpression(expression), \"AVG\", name, expression)\n\nclass If(Function):\n def __init__(self, condition, iftrue, iffalse, name=None):\n Function.__init__(self, HiveTypeFromExpression(iftrue), \"IF\", name, condition, iftrue, iffalse)\n\nclass Count(Function):\n def __init__(self, expression=None, name=None):\n Function.__init__(self, HiveInt, \"COUNT\", name, expression if expression else '1')\n\nclass Distinct(Function):\n def __init__(self, expression=None, name=None):\n Function.__init__(self, HiveTypeFromExpression(expression), \"DISTINCT\", name, expression)\n\nclass CountExpr(Count):\n def __init__(self, expression, name=None):\n Count.__init__(self, If(expression, 'true', 'null'), name)\n\nclass PercentileBase(Function):\n def __init__(self, function, expression, percentiles, name=None):\n try:\n percentiles = \"ARRAY(\" + ','.join([\"%0.7f\" % x for x in percentiles]) + \")\"\n except:\n pass\n Function.__init__(self, HiveArray(HiveDouble), function, name, expression, percentiles)\n\nclass Percentile(PercentileBase):\n def __init__(self, expression, percentiles, name=None):\n PercentileBase.__init__(self, \"PERCENTILE\", expression, percentiles, name)\n\nclass PercentileApprox(PercentileBase):\n def __init__(self, expression, percentiles, name=None):\n PercentileBase.__init__(self, \"PERCENTILE_APPROX\", expression, percentiles, name)\n\nclass Cdf(Percentile):\n _p = [0.5 * pow(0.8, i) for i in range(57, 1, -1)]\n Percentiles = _p + [0.5]+ [1-x for x in reversed(_p)]\n\n def __init__(self, expression, name=None):\n Percentile.__init__(self, expression, Cdf.Percentiles, name)\n\n\n#########################################\n# SQL Construction\n#########################################\n\nclass SqlBase:\n def __init__(self):\n self.Clear()\n self.indent = 0\n\n def Add(self, string):\n self.sql = self.sql + string\n\n def Clear(self):\n self.sql = \"\"\n\n def AddNewlines(self, n = 2):\n self.sql = self.sql +'\\n' * n\n\n#########################################\n# Row format\n#########################################\n\nclass RowFormat(SqlBase):\n def __init__(self):\n SqlBase.__init__(self)\n\nHiveCharMap = { '\\n': '\\\\n', '\\t': '\\\\t', '\\r': '\\\\r'}\n\ndef HiveCharRepr(char):\n # Hive doesn't seem to like hexadecimal escaped control codes as Python repr returns\n # So here we will put them in octal\n if char in HiveCharMap: return HiveCharMap[char]\n return char if ord(char) > 31 and ord(char) < 127 else '\\\\%03o' % ord(char)\n\nclass DelimitedRowFormat(RowFormat):\n def __init__(self, *fields):\n RowFormat.__init__(self)\n if len(fields) == 1: fields = fields[0]\n (self.fieldDelimiter, self.collectionDelimiter, self.mapKeyDelimiter, self.lineDelimiter) = fields\n\n def __str__(self):\n self.Clear()\n self.Add(\"\\nROW FORMAT DELIMITED \")\n if self.fieldDelimiter:\n self.Add(\"\\n\\tFIELDS TERMINATED BY '\" + HiveCharRepr(self.fieldDelimiter) + \"' \")\n if self.collectionDelimiter:\n self.Add(\"\\n\\tCOLLECTION ITEMS TERMINATED BY '\" + HiveCharRepr(self.collectionDelimiter) + \"' \")\n if self.mapKeyDelimiter:\n self.Add(\"\\n\\tMAP KEYS TERMINATED BY '\" + HiveCharRepr(self.mapKeyDelimiter) + \"' \")\n if self.lineDelimiter:\n self.Add(\"\\n\\tLINES TERMINATED BY '\" + HiveCharRepr(self.lineDelimiter) + \"' \")\n\n return self.sql\n\n def __repr__(self):\n return repr((self.fieldDelimiter, self.collectionDelimiter, self.mapKeyDelimiter, self.lineDelimiter))\n\nDefaultDelimitedRowFormat = DelimitedRowFormat('\\001', '\\002', '\\003', '\\n')\n\n#########################################\n# Tables and Selects\n#########################################\n\nclass Table(SqlBase):\n def __init__(self, name=None, schema=None):\n SqlBase.__init__(self)\n self.name = name\n self.schema = []\n self.inscope = False\n if schema:\n self.AddSchema(schema)\n\n def AddColumn(self, column, name=None):\n# print \"Adding column '\" + str(name) + \"' to table \"+ str(self.name) + \": name = \" + str(column.name) + \", table = \" + str(column.table) + \", def = \" + str(column)\n# if column.table:\n column = As(column, name if name else column.name)\n# else:\n# column = copy(column)\n column.table = self\n if name: column.name = name\n self.schema.append(column)\n setattr(self, column.name, column)\n return column\n\n def AddSchema(self, *schema):\n schema = Columns(PopIfList(schema))\n for column in schema: self.AddColumn(column)\n\n def ClearSchema(self):\n for column in self.schema:\n delattr(self, column.name)\n self.schema = []\n\n def __str__(self):\n return self.name\n\n def Declaration(self):\n return \"\"\n\n def Files(self, recurse):\n return {}\n\n def HasTransform(self):\n return False\n\n def Tables(self):\n return []\n\n def SetNameIfNeeded(self, query):\n pass\n\n def SetInFromClause(self):\n pass\n\nclass Join(Table):\n LEFT = \"LEFT \"\n LEFT_OUTER = \"LEFT OUTER \"\n LEFT_SEMI = \"LEFT SEMI \"\n RIGHT = \"RIGHT \"\n RIGHT_OUTER = \"RIGHT_OUTER \"\n FULL = \"FULL \"\n FULL_OUTER = \"FULL OUTER \"\n\n def __init__(self, left, right, jointype=None):\n Table.__init__(self, None)\n self.left = left\n self.jointype = jointype\n self.right = right\n self.conditions = []\n self.left.SetInFromClause()\n self.right.SetInFromClause()\n\n def On(self, condition):\n self.conditions.append(condition)\n return self\n\n def __str__(self):\n self.Clear()\n self.Add(str(self.left))\n self.Add((self.jointype if self.jointype else \"\") + \"\\nJOIN \" + str(self.right) + \" \")\n if len(self.conditions) > 0:\n self.Add(\"\\nON \")\n self.Add(\" AND \".join([str(condition) for condition in self.conditions]))\n self.Add(\" \")\n\n return self.sql\n\n def Files(self, recurse):\n if not recurse: return {}\n return dict(self.left.Files(True).items() + self.right.Files(True).items())\n\n def HasTransform(self):\n return self.left.HasTransform() or self.right.HasTransform()\n\n def Tables(self):\n return self.left.Tables() + self.right.Tables()\n\n def SetNameIfNeeded(self, query):\n self.left.SetNameIfNeeded(query)\n self.right.SetNameIfNeeded(query)\n\nclass QualifiedColumn:\n def __init__(self, col, qualifier):\n self.column = col\n self.qualifier = qualifier\n\n def QualifiedName(self):\n return self.column.QualifiedName() + ' ' + self.qualifier\n\nclass Ascending(QualifiedColumn):\n def __init__(self, col):\n QualifiedColumn.__init__(self, col, 'ASC')\n\nclass Descending(QualifiedColumn):\n def __init__(self, col):\n QualifiedColumn.__init__(self, col, 'DESC')\n\nclass SelectBase(Table):\n def __init__(self, *columns):\n Table.__init__(self)\n columns = PopIfList(columns)\n self.select = [self.AddColumn(column) for column in columns]\n self.fromClause=None\n self.transform=None\n self.distribute = []\n self.sort = []\n self.group = []\n self.cluster = []\n self.order = []\n self.where = []\n self.rowFormat = DefaultDelimitedRowFormat\n self.modules = []\n self.code = []\n self.files = []\n self.limit=None\n self.distinct = False\n\n tables = frozenset([x for x in [col.Table() for col in columns]if x != None])\n if len(tables) == 1:\n [table]= tables\n self.From(table)\n\n def From(self, table):\n self.fromClause = table\n table.SetInFromClause()\n return self\n\n def Transform(self, transform, userargs=None, numkeys = 0):\n self.transform = { 'transform': transform,\n 'userargs' : userargs,\n 'numkeys' : numkeys,\n 'input' : self.schema,\n 'informat' : DefaultDelimitedRowFormat,\n 'outformat': DefaultDelimitedRowFormat}\n self.ClearSchema()\n self.AddSchema(transform.schema)\n return self\n\n def AddModule(self, module, copy = True):\n self.modules.append((module, copy))\n\n def AddCode(self, c):\n self.code.append(c)\n\n def AddFile(self, f):\n self.files.append(f)\n\n def Where(self, where):\n if where: self.where.append(where)\n return self\n\n def DistributeBy(self, *distribute):\n self.distribute.extend(PopIfList(distribute))\n return self\n\n def SortBy(self, *sort):\n self.sort.extend(PopIfList(sort))\n return self\n\n def GroupBy(self, *group):\n self.group.extend(PopIfList(group))\n return self\n\n def ClusterBy(self, *cluster):\n self.cluster.extend(PopIfList(cluster))\n return self\n\n def OrderBy(self, *order):\n self.order.extend(PopIfList(order))\n return self\n\n def Limit(self, limit):\n self.limit = limit\n return self\n\n def SqlFrom(self):\n return (\"\\nFROM \" + str(self.fromClause) + \" \") if self.fromClause else \"\"\n\n def SqlSelect(self):\n\n self.inscope = True\n if self.fromClause: self.fromClause.inscope = True\n\n self.Clear()\n self.Add(\"\\nSELECT \")\n if self.distinct:\n self.Add(\"DISTINCT \")\n\n if self.transform:\n if not isinstance(self.fromClause, SelectBase):\n print \"HiPy: Warning: Did you mean to specify a transform on the map side of a map-reduce ?\"\n self.Add(\"TRANSFORM(\")\n numkeys = self.transform['numkeys']\n if len(self.transform['input']) > 0:\n self.Add(\", \".join([col.Define(False) for col in self.transform['input']]) + \") \")\n input = self.transform['input']\n else:\n self.Add(\"*) \")\n input = self.fromClause.schema\n\n self.Add(str(self.transform['informat']))\n\n transform = self.transform['transform']\n if inspect.isfunction(transform) or inspect.isclass(transform):\n (self.source, self.script) = self.CreateTransformDriverScript(transform, self.modules, self.code,\n self.transform['userargs'],\n (SchemaToPython(input[0:numkeys]),\n SchemaToPython(input[numkeys:]),\n SchemaToPython(self.schema),\n self.transform['informat'],\n self.transform['outformat']))\n\n self.Add(\"\\nUSING 'python \" + self.source + \"' \")\n else:\n self.Add(\"\\nUSING '\" + function + \"' \")\n\n self.Add(\"AS \" + SchemaToSql(self.schema))\n self.Add(str(self.transform['outformat']))\n\n elif self.select:\n self.Add(\", \".join([col.Define() for col in self.select]) + \" \")\n\n else:\n self.Add(\"* \")\n self.AddSchema(self.fromClause.schema)\n\n if len(self.where) > 0:\n self.Add(\"\\nWHERE \")\n self.Add(\" AND \".join([Bracket(str(where)) for where in self.where]))\n\n if self.distribute : self.Add(\"\\nDISTRIBUTE BY \" + \", \".join([col.QualifiedName() for col in self.distribute]) + \" \")\n if self.group : self.Add(\"\\nGROUP BY \" + \", \".join([str(col) for col in self.group]) + \" \")\n if self.cluster : self.Add(\"\\nCLUSTER BY \" + \", \".join([col.QualifiedName()for col in self.cluster]) + \" \")\n if self.order : self.Add(\"\\nORDER BY \" + \", \".join([col.QualifiedName()for col in self.order]) + \" \")\n if self.sort : self.Add(\"\\nSORT BY \" + \", \".join([col.QualifiedName() for col in self.sort]) + \" \")\n\n if self.limit:\n self.Add(\"\\nLIMIT \" + str(self.limit) + \" \")\n\n self.inscope = False\n if self.fromClause: self.fromClause.inscope = False\n\n return self.sql\n\n def __str__(self):\n\n sql = self.SqlFrom() + self.SqlSelect()\n\n return (\"(\" + sql + \") \" + self.name + \" \") if self.name else sql\n\n def CreateTransformDriverScript(self, transform, modules, code, userargs, argtuple):\n transformname = transform.__name__\n source = transformname + \".drvr.py\"\n\n sourcecode = \"\"\n\n code = [transform]+ code\n for object in code:\n if inspect.isclass(object):\n for cls in reversed(inspect.getmro(object)):\n sourcecode += inspect.getsource(cls)\n modules.extend(cls.modules if hasattr(cls, 'modules') else [])\n else:\n sourcecode += inspect.getsource(object)\n modules.extend(object.modules if hasattr(object, 'modules') else [])\n\n script = \"#!/usr/bin/python\\n\" \\\n \"import sys, os\\n\" \\\n \"sys.path.append(os.getcwd())\\n\" \\\n \"import HiPy\\n\"\n\n if len(modules) > 0:\n script += \"import \" + ', '.join(set([module.__name__ for (module, copy) in modules])) +'\\n'\n\n script += sourcecode\n\n script += \"if __name__ == '__main__':\\n\"\n\n if inspect.isfunction(transform):\n script += \" HiPy.Transform(HiPy.TransformWrapper, \" \\\n + \"(\" + transformname + \", \" + repr(userargs) + \")\" \\\n + \", \" + repr(argtuple) + \")\\n\"\n else:\n script += \" HiPy.Transform(\" + transformname + \", \" + repr(userargs) + \", \" + repr(argtuple) + \")\\n\"\n\n return (source, script)\n\n def Files(self, recurse):\n files = dict((file, None) for file in self.files)\n if self.transform:\n files[self.source]= self.script\n if hasattr(self.transform['transform'], 'files'):\n for file in self.transform['transform'].files:\n files[os.path.abspath(os.path.join(os.path.dirname(inspect.getsourcefile(self.transform['transform'])), file))]= None\n for (module, copy) in self.modules:\n if copy and hasattr(module, '__file__'):\n files[module.__file__]= None\n if recurse and self.fromClause:\n files = dict(files.items() + self.fromClause.Files(True).items())\n return files\n\n def Tables(self):\n return self.fromClause.Tables() if self.fromClause else []\n\n def HasTransform(self):\n return self.transform or (self.fromClause and self.fromClause.HasTransform())\n\nclass Select(SelectBase):\n def __init__(self, *columns):\n SelectBase.__init__(self, *columns)\n self.AddToDefaultQuery()\n self.iterator=None\n self.dir=None\n self.table=None\n\n def Execute(self):\n if not self.query: self.AddToDefaultQuery()\n self.query.Execute()\n\n def __iter__(self):\n self.Execute()\n\n for x in os.listdir(self.dir):\n f = self.dir + \"/\" + x\n if os.stat(f).st_size == 0 or fnmatch.fnmatch(x, \"*.crc\"):\n os.remove(f)\n\n outputfiles = glob.glob(self.dir + \"/*\")\n if len(outputfiles) > 0:\n #rows = subprocess.Popen([\"cat\"]+ outputfiles, stdout=subprocess.PIPE)\n #self.iterator = InputIterator(rows.stdout, self.rowFormat, self.schema)\n\n self.iterator = InputIterator(fileinput.input(outputfiles), self.rowFormat, self.schema)\n\n else:\n self.iterator = [].__iter__()\n\n return self.iterator\n\n def GetResults(self, targetdir, copy = False):\n self.Execute()\n\n if not copy:\n try:\n os.symlink(self.dir, targetdir)\n except:\n copy = True\n\n if copy:\n shutil.copytree(self.dir, targetdir)\n\n\n def WriteToTable(self, table, partition=None):\n self.table = table\n self.partition = partition\n if not self.query: self.AddToDefaultQuery()\n\n def Tables(self):\n return SelectBase.Tables(self) + ([self.table]if self.table else [])\n\n def SetNameIfNeeded(self, query):\n if not self.name:\n self.name = query.NextSelectName()\n if self.fromClause:\n self.fromClause.SetNameIfNeeded(query)\n\n def AddToDefaultQuery(self):\n self.query = DefaultQuery\n self.query.AddSelect(self)\n\n def SetInFromClause(self):\n if self.query:\n self.query.RemoveSelect(self)\n self.query=None\n\nclass NewTable(Table):\n def __init__(self, name, schema, \\\n ifNotExists = False, \\\n partitions=None, \\\n location=None, \\\n rowFormat=None, \\\n storedAs=None):\n Table.__init__(self, name, schema)\n self.ifNotExists = ifNotExists\n self.partitions = Columns(partitions)\n self.location = location\n self.rowFormat = rowFormat\n self.storedAs = storedAs\n\n def Declaration(self):\n self.Clear()\n self.Add(\"\\nCREATE \" + (\"EXTERNAL \" if self.location else \"\") + \"TABLE \")\n self.Add((\"IF NOT EXISTS \" if self.ifNotExists else \"\") + self.name + \"\\n\")\n self.Add(SchemaToSql(self.schema))\n if self.partitions:\n self.Add(\"\\nPARTITIONED BY \" + SchemaToSql(self.partitions))\n if self.rowFormat:\n self.Add(str(self.rowFormat))\n if self.storedAs:\n self.Add(\"\\nSTORED AS \" + self.storedAs + \" \")\n if self.location:\n self.Add(\"\\nLOCATION '\" + self.location + \"'\")\n self.Add(\";\")\n\n return self.sql\n\n def Tables(self):\n return [self]\n\n#########################################\n# Query management\n#########################################\n\n\nclass QueryBase(SqlBase):\n def __init__(self):\n SqlBase.__init__(self)\n self.options = []\n self.files = {}\n self.jars = []\n self.archives = []\n self.selects = set()\n self.dirvar = '${querydir}'\n\n self.nextSelectName = \"a\"\n\n def AddFile(self, file):\n (name, source) = file if isinstance(file, tuple) else (file, None)\n self.files[name]= source\n\n def AddFiles(self, files):\n for name in files: self.files[name] = files[name]\n\n def AddJar(self, file):\n self.jars.append(file)\n\n def AddArchive(self, file):\n self.archives.append(file)\n\n def AddSelect(self, select):\n self.selects.add(select)\n select.query = self\n\n def RemoveSelect(self, select):\n self.selects.remove(select)\n select.query=None\n\n def Select(self, *columns):\n select = Select(*columns)\n self.AddSelect(select)\n return select\n\n def NextSelectName(self):\n result = self.nextSelectName\n self.nextSelectName = chr(ord(self.nextSelectName) + 1)\n return result\n\n def SetOption(self, name, value):\n self.options.append((name, value))\n\n def SqlTables(self):\n sql = \"\"\n\n for select in self.selects:\n for table in select.Tables():\n sql = sql + table.Declaration()\n\n return sql\n\n def SqlQueries(self):\n\n # Group by common FROM tables\n queries = {}\n for select in self.selects:\n frm = select.fromClause\n if frm in queries:\n queries[frm].append(select)\n else:\n queries[frm] = [select]\n\n sqls = []\n sql = \"\"\n\n for frm in queries:\n\n sql = queries[frm][0].SqlFrom()\n\n for select in queries[frm]:\n\n if select.dir:\n sql = sql + \"\\nINSERT OVERWRITE LOCAL DIRECTORY '\" + select.dir + \"'\"\n else:\n sql = sql + \"\\nINSERT OVERWRITE TABLE \" + str(select.table)\n if select.partition:\n sql = sql + \" PARTITION(\" + select.partition + \")\"\n\n sql = sql + select.SqlSelect()\n\n self.AddFiles(select.Files(False))\n\n sqls.append(sql)\n\n self.AddFiles(frm.Files(True))\n\n return \";\\n\".join(sqls)\n\n def __str__(self):\n\n self.Clear()\n\n self.Add(''.join([\"SET \" + name + \"=\" + str(value) + \";\\n\" for (name, value) in self.options]))\n\n queries = self.SqlQueries()\n\n self.Add(self.SqlTables())\n\n if len(self.files) > 0:\n self.Add(\"\\nADD FILES \" + \" \".join([self.MakePath(filename) for filename in self.files]) + \";\\n\")\n if len(self.jars) > 0:\n self.Add(\"\\nADD JARS \" + \" \".join([self.MakePath(jar) for jar in self.jars]) + \";\\n\")\n if len(self.archives) > 0:\n self.Add(\"\\nADD ARCHIVES \" + \" \".join([self.MakePath(archive) for archive in self.archives]) + \";\\n\")\n\n self.Add(queries)\n\n return self.sql\n\n def MakePath(self, filename):\n return os.path.join(self.dirvar, os.path.expanduser(filename))\n\n\nclass Query(QueryBase):\n # The directory is used as a workspace for execution of the query\n # hive is a function which executes a hive query, given the filename\n def __init__(self, dir, hive=None):\n QueryBase.__init__(self)\n self.hive = hive\n self.didExecute = False\n self.cache = True\n self.cwd = os.getcwd()\n self.SetCacheDirectory(dir)\n self.querydir=None\n self.resultdir=None\n\n def SetCacheDirectory(self, dir):\n if dir[0] == '/':\n self.dir = dir\n elif dir[0] == '~':\n self.dir = os.path.expanduser(dir)\n else:\n self.dir = self.cwd + '/' + dir\n\n self.tmpdir = self.dir + '/tmp'\n if not os.path.exists(self.tmpdir):\n os.makedirs(self.tmpdir)\n\n\n def CreateQueryDirectories(self):\n self.querydir = tempfile.mkdtemp(prefix = self.hashstr, dir = self.tmpdir)\n for select in self.selects:\n if select.dir:\n dir = select.dir.replace(self.dirvar, self.querydir)\n if not os.path.exists(dir):\n os.mkdir(dir)\n\n def Hash(self):\n queryscripts = \"\\n\".join([self.query.replace(\".pyc\", \".py\")]+ [script for script in self.files.itervalues() if script])\n return struct.pack(\"!I\", zlib.crc32(queryscripts) & 0xffffffff).encode('base64')[0:6].replace('/', '=')\n\n def IsCached(self):\n if not self.cache:\n return False\n\n oldruns = [name for name in os.listdir(self.dir) if name.startswith(self.hashstr)]\n if len(oldruns) > 0:\n self.resultdir = self.dir + '/' + oldruns[len(oldruns) - 1]\n return True\n\n def CreateQueryFiles(self):\n self.queryfile = self.querydir + \"/query.q\"\n with open(self.queryfile, \"w\") as f:\n f.write(self.query)\n\n # Create the script files\n for (filename, script) in self.files.iteritems():\n filepath = self.querydir + '/' + filename\n if script:\n with open(filepath, \"w\") as f:\n f.write(script)\n\n def RenameQueryDir(self):\n self.resultdir = self.dir + '/' + self.hashstr + \"-\" + datetime.datetime.today().strftime(\"%Y%m%d-%H%M\")\n os.rename(self.querydir, self.resultdir)\n self.querydir=None\n\n def Execute(self):\n if not self.didExecute:\n cwd = os.getcwd()\n\n # Decide sub-directory names (if necessary) for the selects\n for select in self.selects:\n select.SetNameIfNeeded(self)\n if not select.dir and not select.table:\n select.dir = self.dirvar + '/' + select.name\n\n # Add a file for HiPy.py if necessary\n if not all([not select.HasTransform() for select in self.selects]):\n self.AddFile(HiPyPath)\n\n # Generate the SQL and driver script source\n self.query = str(self)\n self.hashstr = self.Hash()\n\n print self.query\n print \"Script hash is: \" + self.hashstr\n\n # If not cached, run the query and move the results to a subdir with the hash-based name\n if not self.IsCached():\n # Create a tmp directory for this run\n self.CreateQueryDirectories()\n\n # Create the query file\n self.CreateQueryFiles()\n\n # Run the query\n returncode = self.hive(self.queryfile, params = ['-d', self.dirvar[2:len(self.dirvar)-1]+ '=' + self.querydir])\n if returncode != 0:\n raise HiPyException(\"Hive error\")\n\n # Rename the directory now it's no longer an attempt\n self.RenameQueryDir()\n\n for select in self.selects:\n if select.dir:\n select.dir = select.dir.replace(self.dirvar, self.resultdir)\n\n # Done!\n self.didExecute = True\n\n os.chdir(cwd)\n\n def Clean(self, all):\n if self.didExecute:\n if self.querydir:\n shutil.rmtree(self.querydir, ignore_errors = True)\n if self.resultdir:\n shutil.rmtree(self.resultdir, ignore_errors = True)\n if all:\n shutil.rmtree(self.tmpdir, ignore_errors = True)\n\n def Cache(self, cache):\n self.cache = cache\n\n#########################################\n# Default Query\n#########################################\n\nDefaultQuery = Query('~/.hipy')\n\ndef SetHive(hive):\n DefaultQuery.hive = hive\n\ndef Reset():\n global DefaultQuery\n DefaultQuery = Query('~/.hipy', DefaultQuery.hive)\n\ndef Execute():\n DefaultQuery.Execute()\n\ndef Cache(cache):\n DefaultQuery.Cache(cache)\n\ndef SetOption(name, value):\n DefaultQuery.SetOption(name, value)\n\ndef Clean(all = False):\n DefaultQuery.Clean(all)\n\n\n#########################################\n# Describe tables\n#########################################\n\nclass DescribeHandler:\n def __init__(self):\n pass\n\n def __call__(self, output, error):\n self.schema = SchemaFromDescribe(output)\n\ndef Describe(tablename, hive=None):\n if hive == None:\n hive = DefaultQuery.hive\n\n handler = DescribeHandler()\n returncode = hive(script = \"describe \"+tablename, handler = handler)\n if returncode != None:\n return None\n\n return Table(tablename, handler.schema)\n\n#########################################\n# Data row object\n#########################################\nclass DataRow:\n def __init__(self, data, schema):\n self.value = data\n self.schema = schema\n\n def __len__(self):\n return len(self.value)\n\n def __getitem__(self, key):\n if isinstance(key, str):\n for idx in range(0, len(self.schema)):\n if self.schema[idx].name == key:\n return self.value[idx]\n error = \" \".join([key, \"not found in (\", \", \".join(x.name for x in self.schema), \")\"])\n raise KeyError(error)\n\n if isinstance(key, slice):\n return DataRow(self.value[key], self.schema[key])\n\n return self.value[key]\n\n def __setitem__(self, key, value):\n if isinstance(key, str):\n for idx in range(0, len(self.schema)):\n if self.schema[idx].name == key:\n self.value[idx] = value\n return\n raise KeyError\n\n self.value[key]= value\n\n def __iter__(self):\n return self.value.__iter__()\n\n def __reversed__(self):\n return self.value.__reversed__()\n\n def __eq__(self, other):\n if isinstance(other, DataRow):\n return self.value == other.value\n return self.value == other\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def dict(self):\n return dict((col.name, value) for (col, value) in zip(self.schema, self.value))\n\n def __str__(self):\n return str(self.value)\n\n#########################################\n# Input/output processing\n#########################################\n\ndef ReadHiveType(value, type, rowFormat):\n\n if value == '\\N': return None\n\n if isinstance(type, HiveArray):\n out = [ReadHiveType(element, type.datatype, rowFormat) for element in value.split(rowFormat.collectionDelimiter)]\n elif isinstance(type, HiveMap):\n out = {}\n elements = value.split(rowFormat.collectionDelimiter)\n for element in elements:\n keyvalue = element.split(rowFormat.mapKeyDelimiter)\n key = ReadHiveType(keyvalue[0], type.keytype, rowFormat)\n val = ReadHiveType(keyvalue[1], type.datatype, rowFormat)\n out[key] = val\n elif isinstance(type, HiveStruct):\n out = [ReadHiveType(element, elementtype, rowFormat) \\\n for (element, (key, elementtype)) \\\n in zip(value.split(rowFormat.collectionDelimiter), type.schema)]\n elif type == HiveTinyInt or type == HiveSmallInt or type == HiveInt or type == HiveBigInt:\n out = int(float(value))\n elif type == HiveBigInt:\n out = long(float(value))\n elif type == HiveBoolean:\n out = bool(value)\n elif type == HiveFloat or type == HiveDouble:\n out = float(value)\n elif type == HiveString:\n out = str(value)\n elif type == HiveJson:\n try:\n out = JsonLoads(value)\n except ValueError:\n out=None\n else:\n raise TypeError\n\n return out\n\ndef WriteHiveType(value, type, rowFormat):\n if isinstance(type, HiveArray):\n out = rowFormat.collectionDelimiter.join([WriteHiveType(element, type.datatype, rowFormat) for element in value])\n elif isinstance(type, HiveMap):\n out = rowFormat.collectionDelimiter.join([(WriteHiveType(key, type.keytype, rowFormat) + \\\n rowFormat.mapKeyDelimiter + \\\n WriteHiveType(val, type.datatype, rowFormat)) \\\n for (key, val) in value.items()])\n elif isinstance(type, HiveStruct):\n out = rowFormat.collectionDelimiter.join([WriteHiveType(element, elementtype) \\\n for (element, (name, elementtype)) \\\n in zip(value, type.schema)])\n elif type == HiveJson:\n out = JsonDumps(value)\n elif value == None:\n out = '\\\\N'\n else:\n out = str(value)\n\n return out\n\ndef Deserialize(hiverow, rowFormat, schema):\n value = [ReadHiveType(field.rstrip(rowFormat.lineDelimiter), col.type, rowFormat) \\\n for (field, col) \\\n in zip(hiverow.split(rowFormat.fieldDelimiter), schema)]\n return DataRow(value, schema)\n\ndef Serialize(datarow, rowFormat, schema):\n return rowFormat.fieldDelimiter.join([WriteHiveType(value, col.type, rowFormat) \\\n for (value, col) in zip(datarow, schema)]) \\\n + rowFormat.lineDelimiter\n\nclass InputIterator:\n def __init__(self, iterator, rowFormat, schema):\n self.iterator = iterator\n self.rowFormat = rowFormat\n self.schema = schema\n self.nextrow=None\n\n def __iter__(self):\n return self\n\n def next(self):\n return Deserialize(self.iterator.next(), self.rowFormat, self.schema)\n\nclass Output:\n def __init__(self, rowFormat, schema):\n self.rowFormat = rowFormat\n self.schema = schema\n\n def __call__(self, row):\n return Serialize(row, self.rowFormat, self.schema)\n\n\n#########################################\n# Transform driver\n#########################################\n\nclass PeekableIterator:\n def __init__(self, iterator):\n self.iterator = iterator\n self.nextitem=None\n\n def __iter__(self):\n return self\n\n def next(self):\n if not self.nextitem: return self.iterator.next()\n\n nextitem = self.nextitem\n self.nextitem=None\n return nextitem\n\n def peek(self):\n if not self.nextitem: self.nextitem = self.iterator.next()\n return self.nextitem\n\n def put_back(self, item):\n if self.nextitem: raise \"Can't put_back more than once!\"\n self.nextitem = item\n\nclass GroupIterator:\n def __init__(self, key, input):\n self.key = key\n self.input = input\n\n def __iter__(self):\n return self\n\n def next(self):\n nextrow = self.input.next()\n if nextrow[0:len(self.key)] == self.key:\n return nextrow[len(self.key):]\n\n self.input.put_back(nextrow)\n\n raise StopIteration\n\nclass GroupByKey:\n def __init__(self, numkeys, input):\n self.numkeys = numkeys\n self.input = PeekableIterator(input.__iter__())\n\n def __iter__(self):\n return self\n\n def next(self):\n key = self.input.peek()[0:self.numkeys]\n return (key, GroupIterator(key, self.input))\n\nclass TransformWrapper:\n def __init__(self, fnargs):\n (self.function, self.userargs) = fnargs\n\n def __call__(self, keys, input):\n return self.function(self.userargs, keys, input)\n\ndef Transform(transform, userargs, argtuple):\n\n infile = sys.stdin\n outfile = sys.stdout\n\n (inkeyschema, inschema, outschema, informat, outformat) = argtuple\n\n input = InputIterator(infile, DelimitedRowFormat(informat), Columns(inkeyschema + inschema))\n output = Output(DelimitedRowFormat(outformat), Columns(outschema))\n\n transformobj = transform(userargs)\n\n if len(inkeyschema) != 0:\n for (key, values) in GroupByKey(len(inkeyschema), input):\n if len(key) != len(inkeyschema):\n sys.stderr.write(\"Key error: key=\" + repr(key) + \", schema=\" + repr(inkeyschema) + \"\\n\")\n else:\n for out in transformobj(key, values):\n outfile.write(output(out))\n\n else:\n for out in transformobj(None, input):\n outfile.write(output(out))\n\n###################################\n# Configure from file\n###################################\n\n# Configuration object contains\n# 'tables': Dictionary of the table objects\n\nclass Configuration:\n def __init__(self, dir, name, module):\n self.dir = os.path.expanduser(dir)\n self.filename = name\n self.path = self.dir + '/' + self.filename\n self.module = module\n self.config=None\n self.loading = False\n\n def Initialize(self):\n self.config = { 'tables': {}}\n\n def Load(self):\n if self.config:\n return self.config\n\n if self.loading:\n return None\n\n try:\n configfile = open(self.path, 'r')\n except IOError:\n print \"HiPy: No configuration found ...\"\n return None\n\n try:\n self.loading = True\n self.config = pickle.load(configfile)\n self.loading = False\n except:\n print \"HiPy: Configuration load error ...\"\n return None\n\n configfile.close()\n print \"HiPy: Configuration loaded.\"\n\n return self.config\n\n def Save(self):\n if not self.config:\n return\n\n if not os.path.exists(self.dir):\n os.makedirs(self.dir)\n\n try:\n configfile = open(self.path, 'w')\n except IOError:\n print \"HiPy: Configuration save error (1) ...\"\n return\n\n try:\n pickle.dump(self.config, configfile)\n except:\n print \"HiPy: Configuration save error (2) ...\"\n return\n\n configfile.close()\n\n print \"HiPy: Configuration saved.\"\n\n def Configure(self):\n if not self.Load():\n self.Initialize()\n self.Save()\n\n def Update(self):\n if not self.Load():\n self.Initialize()\n self.ReadTablesFromHive(default_hive_tables)\n else:\n self.ReadTablesFromHive(self.config['tables'].keys())\n\n self.ReadDevicesFromDataoven()\n self.Save()\n self.AddTablesToModule()\n\n def ReadTablesFromHive(self, tablelist):\n tables = self.config['tables']\n for tablename in tablelist:\n print \"HiPy: Reading schema for \" + tablename + \" from Hive\"\n tables[tablename]= HiPy.Describe(tablename)\n\n def GetTables(self):\n return self.config['tables']\n\n def GetTable(self, tablename):\n tables = self.config['tables']\n if tablename in tables:\n return tables[tablename]\n\n table = Describe(tablename)\n if table is not None:\n tables[tablename]= table\n self.Save()\n\n return table\n\n########################################\n# Configuration commands\n########################################\n\ndef Update():\n global config\n return config.Update()\n\ndef ShowTables():\n for table in GetTables().iterkeys():\n print table\n\ndef ShowTable(arg):\n table = GetTable(arg)\n if table is not None:\n for col in table.schema:\n print col.name, str(col.type)\n else:\n print \"Unknown table: \", arg\n\ndef GetTable(tablename):\n global config\n return config.GetTable(tablename)\n\ndef GetTables():\n global config\n return config.GetTables()\n\n########################################\n# Json parsing\n########################################\n\nJsonLoads = json.loads\nJsonDumps = json.dumps\n\ndef SetJsonParser(loads, dumps):\n global JsonLoads\n global JsonDumps\n\n JsonLoads = loads\n JsonDumps = dumps\n\n########################################\n# Custom parsing\n########################################\n\nimport json\nimport re\n\nclass HiPyFork:\n @staticmethod\n def repair2one(dct, rkey, okey, what):\n DBG = ''\n if len(DBG): print(\"\\nREPAIR \" + str(what))\n r = what[rkey]\n del what[rkey]\n dct[rkey] = r\n keys = list(what[okey].keys())\n for k in keys:\n what[k] = what[okey][k]\n del what[okey]\n if len(DBG): print(\"\\nDONE REPAIR\" + str(dct) + \"\\n\")\n return dct\n\n @staticmethod\n def parse2one2word(dct, rkey, okey, ekey, key, word, mandatory, fescape):\n DBG = ''\n res = {}\n if len(DBG): print(\"\\nBEGIN \" + key + \" \" + word + \" \" + str(mandatory))\n ddl_text = re.sub('^[ \\n]*', r'', dct[rkey]).lower()\n if len(DBG): print(\"ddl_text = \" + ddl_text)\n if ddl_text.startswith(word.lower()):\n if len(DBG): print(\"STARTS\")\n dct[okey][key] = [word if fescape is None else fescape(word)]\n res = {rkey: ddl_text[len(word):], okey: dct[okey]}\n else:\n if len(DBG): print(\"NOT STARTS\")\n if mandatory:\n if len(DBG): print(\"ERROR\")\n res = {rkey: dct[rkey], ekey: dct[okey]}\n else:\n res = {rkey: dct[rkey], okey: dct[okey]}\n if okey not in res.keys(): raise Exception(str(res))\n if len(DBG): print(str(res))\n return res\n\n @staticmethod\n def parse2one2beforeholder(dct, rkey, okey, ekey, key, goodholders, badholders, mandatory, fescape):\n DBG = ''\n res = {}\n if len(DBG): print(\"\\nBEGIN \" + key + \" \" + str(goodholders) + \" \" + str(badholders) + \" \" + str(mandatory))\n ddl_text = re.sub('^[ \\n]*', r'', dct[rkey]).lower()\n if len(DBG): print(\"ddl_text = \" + ddl_text)\n min_good = -1\n for goodholder in goodholders:\n good = ddl_text.find(goodholder.lower())\n min_good = good if good < min_good or min_good == -1 else min_good\n min_bad = -1\n for badholder in badholders:\n bad = ddl_text.find(badholder.lower())\n min_bad = bad if bad < min_bad or min_bad == -1 else min_bad\n if (min_good != -1 and min_bad == -1) or (min_good < min_bad and min_bad != -1 and min_good != -1):\n if len(DBG): print(\"FOUND\")\n what = ddl_text[0:min_good]\n dct[okey][key] = [what if fescape is None else fescape(what)]\n res = {rkey: ddl_text[len(what):], okey: dct[okey]}\n else:\n if len(DBG): print(\"NOT FOUND\")\n if mandatory:\n if len(DBG): print(\"ERROR\")\n res = {rkey: dct[rkey], ekey: dct[okey]}\n else:\n res = {rkey: dct[rkey], okey: dct[okey]}\n if okey not in res.keys(): raise Exception(str(res))\n if len(DBG): print(str(res))\n return res\n\n @staticmethod\n def parse2one(ddl_text):\n # https://www.cloudera.com/documentation/enterprise/5-8-x/topics/impala_create_table.html\n # https://github.com/quux00/hive-json-schema/blob/master/src/main/java/net/thornydev/JsonHiveSchema.java\n R = \"R1\"\n O = \"O1\"\n E = \"E1\"\n T = \"_1\"\n js = {R: ddl_text, O: {}}\n try:\n js = parse2one2word(js, R, O, E, \"create_a\", \"CREATE\", True, None)\n js = parse2one2word(js, R, O, E, \"external_b\", \"EXTERNAL\", False, None)\n js = parse2one2word(js, R, O, E, \"table_c\", \"TABLE\", True, None)\n js = parse2one2word(js, R, O, E, \"if_not_exists_d\", \"IF NOT EXISTS\", False, None)\n js = parse2one2beforeholder(js, R, O, E, \"db_name_e\", [\".\"], [\"(\"], False, lambda x: x.replace('`', ''))\n js = parse2one2word(js, R, O, E, \"dot_f\", \".\", False, None)\n js = parse2one2beforeholder(js, R, O, E, \"table_name_g\", [\"(\"], [\")\"], True, lambda x: x.replace('`', '').replace(' ', ''))\n js[O][\"h\"] = {R: js[R], O: {}}\n js[O][\"h\"] = parse2one2word(js[O][\"h\"], R, O, E, \"open_ha\", \"(\", True, None)\n js = repair2one(js, R, O, js[O][\"h\"])\n js[O][\"h\"][\"hb\"] = [{R: js[R], O: {}}]\n i_hb = 0\n while True:\n js[O][\"h\"][\"hb\"][i_hb] = parse2one2beforeholder(js[O][\"h\"][\"hb\"][i_hb]\n , R, O, E, \"col_name_hba\", [\" \"], [\")\"], True, lambda x: x.replace('`', '').replace(' ', ''))\n js[O][\"h\"][\"hb\"][i_hb] = parse2one2beforeholder(js[O][\"h\"][\"hb\"][i_hb]\n , R, O, E, \"data_type_hbb\", [\",\", \")\"], [], True, None)\n #js = repair2one(js, R, O, js[O][\"h\"][\"hb\"][i_hb])\n #js[O][\"h\"][\"hb\"][i_hb] = {R: js[R], O: {}}\n js[O][\"h\"][\"hb\"][i_hb] = parse2one2word(js[O][\"h\"][\"hb\"][i_hb]\n , R, O, E, \"comment_hbc\", \"COMMENT\", False, None)\n js[O][\"h\"][\"hb\"][i_hb] = parse2one2beforeholder(js[O][\"h\"][\"hb\"][i_hb]\n , R, O, E, \"col_comment_hbd\", [\",\", \")\"], [], False, None)\n #js[O][\"h\"][\"hb\"][i_hb] = {R: js[R], O: {}}\n js[O][\"h\"][\"hb\"][i_hb] = parse2one2word(js[O][\"h\"][\"hb\"][i_hb]\n , R, O, E, \"comma_hbe\", \",\", False, None)\n js = repair2one(js, R, O, js[O][\"h\"][\"hb\"][i_hb])\n if \"comma_hbe\" in js[O][\"h\"][\"hb\"][i_hb].keys():\n js[O][\"h\"][\"hb\"].append({R: js[R], O: {}})\n i_hb = i_hb + 1\n else:\n break\n #js[O][\"h\"][R] = js[R]\n #js[O][\"h\"] = parse2one2word(js[O][\"h\"], R, O, E, \"close_hc\", \")\", True, None)\n js = parse2one2word(js, R, O, E, \"close_hc\", \")\", True, None) # FIXME\n js[O][\"i\"] = {R: js[R], O: {}}\n js[O][\"i\"] = parse2one2word(js[O][\"i\"], R, O, E, \"partitioned_by_ia\", \"PARTITIONED BY\", False, None)\n js[O][\"i\"] = parse2one2word(js[O][\"i\"], R, O, E, \"open_ib\", \"(\", False, None)\n js = repair2one(js, R, O, js[O][\"i\"])\n js[O][\"i\"][\"ic\"] = [{R: js[R], O: {}}]\n i_ic = 0\n while True:\n js[O][\"i\"][\"ic\"][i_ic] = parse2one2beforeholder(js[O][\"i\"][\"ic\"][i_ic]\n , R, O, E, \"col_name_ica\", [\" \"], [\")\"], False, lambda x: x.replace('`', '').replace(' ', ''))\n js[O][\"i\"][\"ic\"][i_ic] = parse2one2beforeholder(js[O][\"i\"][\"ic\"][i_ic]\n , R, O, E, \"data_type_icb\", [\",\", \")\"], [], False, None)\n #js = repair2one(js, R, O, js[O][\"i\"][\"ic\"][i_ic])\n #js[O][\"i\"][\"ic\"][i_ic] = {R: js[R], O: {}}\n js[O][\"i\"][\"ic\"][i_ic] = parse2one2word(js[O][\"i\"][\"ic\"][i_ic]\n , R, O, E, \"comment_icc\", \"COMMENT\", False, None)\n js[O][\"i\"][\"ic\"][i_ic] = parse2one2beforeholder(js[O][\"i\"][\"ic\"][i_ic]\n , R, O, E, \"col_comment_icd\", [\",\", \")\"], [], False, None)\n #js[O][\"i\"][\"ic\"][i_ic] = {R: js[R], O: {}}\n js[O][\"i\"][\"ic\"][i_ic] = parse2one2word(js[O][\"i\"][\"ic\"][i_ic]\n , R, O, E, \"comma_ice\", \",\", False, None)\n js = repair2one(js, R, O, js[O][\"i\"][\"ic\"][i_ic])\n if \"comma_ice\" in js[O][\"i\"][\"ic\"][i_ic].keys():\n js[O][\"i\"][\"ic\"].append({R: js[R], O: {}})\n i_ic = i_ic + 1\n else:\n break\n #js[O][\"h\"][R] = js[R]\n #js[O][\"h\"] = parse2one2word(js[O][\"h\"], R, O, E, \"close_hc\", \")\", True, None)\n js = parse2one2word(js, R, O, E, \"close_id\", \")\", False, None) # FIXME\n except:\n import traceback\n return {\"error\": str(traceback.format_exc()), \"js\": js}\n js[T] = js[O]\n del js[O]\n return js\n\n @staticmethod\n def parse2two(ddl_text):\n return {}\n\n @staticmethod\n def parse2three(ddl_text):\n return {}\n\n @staticmethod\n def parse2json(ddl_text):\n # https://www.cloudera.com/documentation/enterprise/5-8-x/topics/impala_create_table.html\n # https://github.com/quux00/hive-json-schema/blob/master/src/main/java/net/thornydev/JsonHiveSchema.java\n res1 = parse2one(ddl_text)\n if \"_1\" in res1.keys(): return res1\n print(\"FAIL #1: \" + str(res1) + \"\\n\")\n res2 = parse2two(ddl_text)\n if \"_2\" in res2.keys(): return res2\n print(\"FAIL #2: \" + str(res2) + \"\\n\")\n res3 = parse2three(ddl_text)\n if \"_3\" in res3.keys(): return res3\n print(\"FAIL #3: \" + str(res3) + \"\\n\")\n return {}\n\n########################################\n# main() function for command line usage\n########################################\n\ndef main():\n import HiPy\n\n usage = \"usage: %prog [options] command\\n\\n\" \\\n \"Commands:\\n\" \\\n \" showtables - show configured tables\\n\" \\\n \" showtable <table name> - show the schema of a table\"\n\n options = OptionParser(usage)\n\n (options, args) = options.parse_args()\n\n if len(args) < 1:\n sys.stderr.write(\"Must provide a command\\n\")\n sys.exit(1)\n\n zeroargs = {'showtables': HiPy.ShowTables}\n oneargs = {'showtable': HiPy.ShowTable}\n\n command = args[0].lower()\n\n if command in zeroargs:\n zeroargs[command]()\n elif command in oneargs:\n oneargs[command](args[1])\n else:\n print \"Unrecognized command: \", args[0]\n\nif __name__ == \"__main__\":\n main()\nelse:\n config = Configuration(\"~/.hipy\", \"config\", sys.modules[__name__])\n config.Configure()\n", "id": "7958891", "language": "Python", "matching_score": 2.0980021953582764, "max_stars_count": 0, "path": "hipy/__init__.py" }, { "content": "# -*- coding: utf-8 -*-\r\nimport os\r\nfrom datetime import datetime, timedelta\r\n\r\nfrom airflow import DAG\r\nfrom airflow.models import Variable\r\nfrom airflow.operators.bash_operator import BashOperator\r\nfrom airflow.operators.dummy_operator import DummyOperator\r\n\r\nu\"\"\"\r\nAirflow script for calc_05\r\n\"\"\"\r\n\r\nALERT_MAILS = Variable.get(\"gv_ic_admin_lst\")\r\nDAG_NAME = str(os.path.basename(__file__).split('.')[0])\r\nOWNER = 'User Airflow'\r\nDEPENDS_ON_PAST = True\r\nEMAIL_ON_FAILURE = True\r\nEMAIL_ON_RETRY = False\r\nRETRIES = int(Variable.get('gv_dag_retries'))\r\nPOOL = 'data_pool'\r\nMAIN_VAR_NAME = 'gv_' + DAG_NAME\r\n\r\nSRV_LIST = Variable.get('gv_psg_kafka_srv_list')\r\nQUEUE_NAME = Variable.get('gv_psg_kafka_queue_name')\r\nPARTITIONS = Variable.get('gv_psg_kafka_partitions')\r\nLOADDTTM=str(datetime.now()).replace(\" \",\"_\")\r\nWAIT_HRS = 1\r\n\r\nstart_dt = datetime(2018, 11, 15)\r\n\r\n# setting default arguments of dag\r\ndefault_args = {\r\n 'owner': OWNER,\r\n 'depends_on_past': DEPENDS_ON_PAST,\r\n 'start_date': start_dt,\r\n 'email': ALERT_MAILS,\r\n 'email_on_failure': EMAIL_ON_FAILURE,\r\n 'email_on_retry': EMAIL_ON_RETRY,\r\n 'retries': RETRIES,\r\n 'pool': POOL\r\n}\r\n\r\n# Creating DAG with parameters\r\ndag = DAG(DAG_NAME, default_args=default_args, schedule_interval=\"0 */4 * * *\")\r\ndag.doc_md = __doc__\r\n\r\ndag_start = DummyOperator(\r\n task_id='dag_start',\r\n dag=dag\r\n)\r\n\r\ndag_end = DummyOperator(\r\n task_id='dag_end',\r\n dag=dag\r\n)\r\n\r\nalgo_bash_cmd = \"\"\"\r\nkinit airflow/airflow@HOME.LOCAL -kt /opt/airflow/airflow_home/kt/airflow.keytab\r\nspark-submit --master yarn \\\r\n--num-executors {{ params.partitions }} \\\r\n--executor-cores 3 \\\r\n--executor-memory 6G \\\r\n--driver-cores 5 \\\r\n--driver-memory 10G \\\r\n--conf 'spark.driver.extraJavaOptions=-Djava.security.auth.login.config={{ params.home }}/kt/kafka_client.conf' \\\r\n--conf 'spark.executor.extraJavaOptions=-Djava.security.auth.login.config={{ params.home }}/kt/kafka_client.conf' \\\r\n--packages org.apache.spark:spark-sql-kafka-0-10_2.11:2.1.1 \\\r\n--jars \"\"\"+\"/opt/airflow/airflow-home/utils/HiveHomeUDF-0.0.1.jar\"+\"\"\" \\\r\n{{ params.home }}/dags/pyspark/prod_data/calc_05.py {{ params.srv_list }} {{ params.queue_name }} {{ params.partitions }} {{ params.loaddttm }}\r\n\"\"\"\r\n\r\nalgo_bash_load = BashOperator(\r\n task_id='prod_data_algo_calc_05',\r\n bash_command=algo_bash_cmd,\r\n execution_timeout=timedelta(hours=WAIT_HRS),\r\n params={\r\n 'home': '/opt/airflow/airflow_home',\r\n 'srv_list': SRV_LIST,\r\n 'queue_name': QUEUE_NAME,\r\n 'partitions': PARTITIONS,\r\n 'loaddttm': LOADDTTM\r\n },\r\n wait_for_downstream=True,\r\n dag=dag\r\n)\r\n\r\ndag_start.set_downstream(algo_bash_load)\r\nalgo_bash_load.set_downstream(dag_end)\r\n", "id": "6628597", "language": "Python", "matching_score": 1.2141754627227783, "max_stars_count": 0, "path": "Raif/pyspark/run_calc_05.py" }, { "content": "import os\nfrom setuptools import setup\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\ndef requirements(fname):\n for line in open(os.path.join(os.path.dirname(__file__), fname)):\n yield line.strip()\n\n\nsetup(\n name=\"hipy\",\n version='1.1',\n author=\"<NAME>\",\n author_email=\"<EMAIL>\",\n description=(\"Python Framework for Apache Hive\"),\n license=\"Apache License 2.0\",\n url=\"https://code.google.com/a/apache-extras.org/p/hipy/\",\n py_modules=['HiPy'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Libraries :: Python Modules\"\n ],\n)\n", "id": "3504007", "language": "Python", "matching_score": 0.6207243204116821, "max_stars_count": 0, "path": "setup.py" } ]
1.564067
aseefahmed
[ { "content": "import contextlib\nfrom io import BytesIO\nfrom zipfile import ZipFile\n\nfrom lxml import etree\n\nfrom .http import HTTP\n\n\nclass EpubAccessor(object):\n\n CONTAINER_FILE = \"META-INF/container.xml\"\n IDPF_NAMESPACE = \"http://www.idpf.org/2007/opf\"\n\n @classmethod\n @contextlib.contextmanager\n def open_epub(cls, url, content=None):\n \"\"\"Cracks open an EPUB to expose its contents\n\n :param url: A url representing the EPUB, only used for errors and in\n the absence of the `content` parameter\n :param content: A string representing the compressed EPUB\n\n :return: A tuple containing a ZipFile of the EPUB and the path to its\n package\n \"\"\"\n if not (url or content):\n raise ValueError(\"Cannot open epub without url or content\")\n if url and not content:\n # Get the epub from the url if no content has been made available.\n content = HTTP.get_with_timeout(url).content\n content = BytesIO(content)\n\n with ZipFile(content) as zip_file:\n if not cls.CONTAINER_FILE in zip_file.namelist():\n raise ValueError(\"Invalid EPUB file, not modifying: %s\" % url)\n\n with zip_file.open(cls.CONTAINER_FILE) as container_file:\n container = container_file.read()\n rootfiles_element = etree.fromstring(container).find(\n \"{urn:oasis:names:tc:opendocument:xmlns:container}rootfiles\"\n )\n\n if rootfiles_element is None:\n raise ValueError(\"Invalid EPUB file, not modifying: %s\" % url)\n\n rootfile_element = rootfiles_element.find(\n \"{urn:oasis:names:tc:opendocument:xmlns:container}rootfile\"\n )\n if rootfile_element is None:\n raise ValueError(\"Invalid EPUB file, not modifying: %s\" % url)\n\n package_document_path = rootfile_element.get(\"full-path\")\n yield zip_file, package_document_path\n\n @classmethod\n def get_element_from_package(cls, zip_file, package_document_path, element_tag):\n \"\"\"Pulls one or more elements from the package_document\"\"\"\n [element] = cls.get_elements_from_package(\n zip_file, package_document_path, [element_tag]\n )\n return element\n\n @classmethod\n def get_elements_from_package(cls, zip_file, package_document_path, element_tags):\n \"\"\"Pulls one or more elements from the package_document\"\"\"\n if not isinstance(element_tags, list):\n element_tags = [element_tags]\n elements = list()\n with zip_file.open(package_document_path) as package_file:\n package = package_file.read()\n for element_tag in element_tags:\n element = etree.fromstring(package).find(\n \"{%s}%s\" % (cls.IDPF_NAMESPACE, element_tag)\n )\n if element is None:\n raise ValueError(\n \"Invalid EPUB file: '%s' could not be found\" % element_tag\n )\n elements.append(element)\n return elements\n", "id": "12553327", "language": "Python", "matching_score": 0.9681457877159119, "max_stars_count": 0, "path": "core/util/epub.py" }, { "content": "import os\n\n\ndef sample_data(filename, sample_data_dir, mode=\"rb\"):\n base_path = os.path.split(__file__)[0]\n resource_path = os.path.join(base_path, \"files\", sample_data_dir)\n path = os.path.join(resource_path, filename)\n\n with open(path, mode) as f:\n return f.read()\n", "id": "3671543", "language": "Python", "matching_score": 0.5662626028060913, "max_stars_count": 16, "path": "tests/api/__init__.py" }, { "content": "#!/usr/bin/env python\n\"\"\"Adds InCommon SAML federation metadata to `samlfederations` table.\"\"\"\n\nimport os\nimport sys\n\nfrom contextlib2 import closing\n\nbin_dir = os.path.split(__file__)[0]\npackage_dir = os.path.join(bin_dir, \"..\")\nsys.path.append(os.path.abspath(package_dir))\n\nfrom api.saml.metadata.federations import incommon\nfrom api.saml.metadata.federations.model import SAMLFederation\nfrom core.model import production_session\n\nwith closing(production_session()) as db:\n incommon_federation = (\n db.query(SAMLFederation)\n .filter(SAMLFederation.type == incommon.FEDERATION_TYPE)\n .one_or_none()\n )\n\n if not incommon_federation:\n incommon_federation = SAMLFederation(\n incommon.FEDERATION_TYPE,\n incommon.IDP_METADATA_SERVICE_URL,\n incommon.CERTIFICATE,\n )\n\n db.add(incommon_federation)\n db.commit()\n", "id": "5034261", "language": "Python", "matching_score": 2.5129003524780273, "max_stars_count": 0, "path": "migration/20201112-add-incommon-saml-federation-metadata.py" }, { "content": "#!/usr/bin/env python\n\"\"\"Remove `random` sort options from Circulation Manager.\"\"\"\n\nimport os\nimport sys\n\nfrom contextlib2 import closing\n\nbin_dir = os.path.split(__file__)[0]\npackage_dir = os.path.join(bin_dir, \"..\")\nsys.path.append(os.path.abspath(package_dir))\n\nfrom core.model import production_session\nfrom migartion_scripts import RandomSortOptionRemover\n\nwith closing(production_session()) as db:\n remover = RandomSortOptionRemover()\n remover.run(db)\n", "id": "5798474", "language": "Python", "matching_score": 0.0012861669529229403, "max_stars_count": 0, "path": "migration/20211009-remove-random-sort-option.py" }, { "content": "\"\"\"Test logic surrounding classification schemes.\"\"\"\n\nfrom collections import Counter\n\nfrom psycopg2.extras import NumericRange\n\nfrom core import classifier\nfrom core.classifier import (\n Classifier,\n FreeformAudienceClassifier,\n GenreData,\n Lowercased,\n WorkClassifier,\n fiction_genres,\n nonfiction_genres,\n)\nfrom core.classifier.age import (\n AgeClassifier,\n GradeLevelClassifier,\n InterestLevelClassifier,\n)\nfrom core.classifier.ddc import DeweyDecimalClassifier as DDC\nfrom core.classifier.keyword import FASTClassifier as FAST\nfrom core.classifier.keyword import LCSHClassifier as LCSH\nfrom core.classifier.lcc import LCCClassifier as LCC\nfrom core.classifier.simplified import SimplifiedGenreClassifier\nfrom core.model import DataSource, Genre, Subject\nfrom core.testing import DatabaseTest\n\ngenres = dict()\nGenreData.populate(globals(), genres, fiction_genres, nonfiction_genres)\n\n\nclass TestLowercased(object):\n def test_constructor(self):\n\n l = Lowercased(\"A string\")\n\n # A string is lowercased.\n assert \"a string\" == l\n\n # A Lowercased object is returned rather than creating a new\n # object.\n assert Lowercased(l) is l\n\n # A number such as a Dewey Decimal number is converted to a string.\n assert \"301\" == Lowercased(301)\n\n # A trailing period is removed.\n l = Lowercased(\"A string.\")\n assert \"a string\" == l\n\n # The original value is still available.\n assert \"A string.\" == l.original\n\n\nclass TestGenreData(object):\n def test_fiction_default(self):\n # In general, genres are restricted to either fiction or\n # nonfiction.\n assert True == Science_Fiction.is_fiction\n assert False == Science.is_fiction\n\n\nclass TestClassifier(object):\n def test_default_target_age_for_audience(self):\n\n assert (None, None) == Classifier.default_target_age_for_audience(\n Classifier.AUDIENCE_CHILDREN\n )\n assert (14, 17) == Classifier.default_target_age_for_audience(\n Classifier.AUDIENCE_YOUNG_ADULT\n )\n assert (18, None) == Classifier.default_target_age_for_audience(\n Classifier.AUDIENCE_ADULT\n )\n assert (18, None) == Classifier.default_target_age_for_audience(\n Classifier.AUDIENCE_ADULTS_ONLY\n )\n\n def test_default_audience_for_target_age(self):\n def aud(low, high, expect):\n assert expect == Classifier.default_audience_for_target_age((low, high))\n\n assert None == Classifier.default_audience_for_target_age(None)\n aud(None, None, None)\n aud(None, 17, Classifier.AUDIENCE_YOUNG_ADULT)\n aud(None, 4, Classifier.AUDIENCE_CHILDREN)\n aud(None, 44, Classifier.AUDIENCE_ADULT)\n aud(18, 44, Classifier.AUDIENCE_ADULT)\n aud(14, 14, Classifier.AUDIENCE_YOUNG_ADULT)\n aud(14, 19, Classifier.AUDIENCE_YOUNG_ADULT)\n aud(2, 14, Classifier.AUDIENCE_CHILDREN)\n aud(2, 8, Classifier.AUDIENCE_CHILDREN)\n\n # We treat this as YA because its target age range overlaps\n # our YA age range, and many external sources consider books\n # for twelve-year-olds to be \"YA\".\n aud(12, 15, Classifier.AUDIENCE_YOUNG_ADULT)\n\n # Whereas this is unambiguously 'Children' as far as we're concerned.\n aud(12, 13, Classifier.AUDIENCE_CHILDREN)\n\n # All ages for audiences that are younger than the \"all ages\n # age cutoff\" and older than the \"adult age cutoff\".\n aud(5, 18, Classifier.AUDIENCE_ALL_AGES)\n aud(5, 25, Classifier.AUDIENCE_ALL_AGES)\n\n def test_and_up(self):\n \"\"\"Test the code that determines what \"x and up\" actually means.\"\"\"\n\n def u(young, keyword):\n return Classifier.and_up(young, keyword)\n\n assert None == u(None, None)\n assert None == u(6, \"6 years old only\")\n assert 5 == u(3, \"3 and up\")\n assert 8 == u(6, \"6+\")\n assert 12 == u(8, \"8+\")\n assert 14 == u(10, \"10+\")\n assert 17 == u(12, \"12 and up\")\n assert 17 == u(14, \"14+.\")\n assert 18 == u(18, \"18+\")\n\n def test_scrub_identifier_can_override_name(self):\n \"\"\"Test the ability of scrub_identifier to override the name\n of the subject for classification purposes.\n\n This is used e.g. in the BISACClassifier to ensure that a known BISAC\n code is always mapped to its canonical name.\n \"\"\"\n\n class SetsNameForOneIdentifier(Classifier):\n \"A Classifier that insists on a certain name for one specific identifier\"\n\n @classmethod\n def scrub_identifier(self, identifier):\n if identifier == \"A\":\n return (\"A\", \"Use this name!\")\n else:\n return identifier\n\n @classmethod\n def scrub_name(self, name):\n \"\"\"This verifies that the override name still gets passed\n into scrub_name.\n \"\"\"\n return name.upper()\n\n m = SetsNameForOneIdentifier.scrub_identifier_and_name\n assert (\"A\", \"USE THIS NAME!\") == m(\"A\", \"name a\")\n assert (\"B\", \"NAME B\") == m(\"B\", \"name b\")\n\n def test_scrub_identifier(self):\n m = Classifier.scrub_identifier\n assert None == m(None)\n assert Lowercased(\"Foo\") == m(\"Foo\")\n\n def test_scrub_name(self):\n m = Classifier.scrub_name\n assert None == m(None)\n assert Lowercased(\"Foo\") == m(\"Foo\")\n\n\nclass TestClassifierLookup(object):\n def test_lookup(self):\n assert DDC == Classifier.lookup(Classifier.DDC)\n assert LCC == Classifier.lookup(Classifier.LCC)\n assert LCSH == Classifier.lookup(Classifier.LCSH)\n assert FAST == Classifier.lookup(Classifier.FAST)\n assert GradeLevelClassifier == Classifier.lookup(Classifier.GRADE_LEVEL)\n assert AgeClassifier == Classifier.lookup(Classifier.AGE_RANGE)\n assert InterestLevelClassifier == Classifier.lookup(Classifier.INTEREST_LEVEL)\n assert None == Classifier.lookup(\"no-such-key\")\n\n\nclass TestNestedSubgenres(object):\n def test_parents(self):\n assert [classifier.Romance] == list(classifier.Romantic_Suspense.parents)\n\n # eq_([classifier.Crime_Thrillers_Mystery, classifier.Mystery],\n # list(classifier.Police_Procedurals.parents))\n\n def test_self_and_subgenres(self):\n # Fantasy\n # - Epic Fantasy\n # - Historical Fantasy\n # - Urban Fantasy\n assert (\n set(\n [\n classifier.Fantasy,\n classifier.Epic_Fantasy,\n classifier.Historical_Fantasy,\n classifier.Urban_Fantasy,\n ]\n )\n == set(list(classifier.Fantasy.self_and_subgenres))\n )\n\n\nclass TestConsolidateWeights(object):\n def test_consolidate(self):\n # Asian History is a subcategory of the top-level category History.\n weights = dict()\n weights[classifier.History] = 10\n weights[classifier.Asian_History] = 4\n weights[classifier.Middle_East_History] = 1\n w2 = WorkClassifier.consolidate_genre_weights(weights)\n assert 14 == w2[classifier.Asian_History]\n assert 1 == w2[classifier.Middle_East_History]\n assert classifier.History not in w2\n\n # Paranormal Romance is a subcategory of Romance, which is itself\n # a subcategory.\n weights = dict()\n weights[classifier.Romance] = 100\n weights[classifier.Paranormal_Romance] = 4\n w2 = WorkClassifier.consolidate_genre_weights(weights)\n assert 104 == w2[classifier.Paranormal_Romance]\n assert classifier.Romance not in w2\n\n def test_consolidate_through_multiple_levels(self):\n # Romance is the parent of the parent of Paranormal\n # Romance, but its weight successfully flows down into\n # Paranormal Romance.\n weights = dict()\n weights[classifier.Romance] = 100\n weights[classifier.Paranormal_Romance] = 4\n w2 = WorkClassifier.consolidate_genre_weights(weights)\n assert 104 == w2[classifier.Paranormal_Romance]\n assert classifier.Romance not in w2\n\n def test_consolidate_consolidates_multiple_subgenres(self):\n # This work is classified under two different sets of\n # genres/subgenres. Both of the genre/subgenre pairs get\n # rolled up properly, not just the heavier one.\n weights = dict()\n weights[classifier.Women_Detectives] = 150\n weights[classifier.Mystery] = 10\n weights[classifier.Historical_Romance] = 200\n weights[classifier.Romance] = 10\n w2 = WorkClassifier.consolidate_genre_weights(weights)\n assert 210 == w2[classifier.Historical_Romance]\n assert 160 == w2[classifier.Women_Detectives]\n\n def test_consolidate_through_multiple_levels_from_multiple_sources(self):\n # This test can't work anymore because we no longer have a\n # triply-nested category like Romance/Erotica -> Romance ->\n # Paranormal Romance.\n #\n # weights = dict()\n # weights[classifier.Romance_Erotica] = 50\n # weights[classifier.Romance] = 50\n # weights[classifier.Paranormal_Romance] = 4\n # w2 = WorkClassifier.consolidate_genre_weights(weights)\n # eq_(104, w2[classifier.Paranormal_Romance])\n # assert classifier.Romance not in w2\n pass\n\n def test_consolidate_fails_when_threshold_not_met(self):\n weights = dict()\n weights[classifier.History] = 100\n weights[classifier.Middle_East_History] = 1\n w2 = WorkClassifier.consolidate_genre_weights(weights)\n assert 100 == w2[classifier.History]\n assert 1 == w2[classifier.Middle_East_History]\n\n\nclass TestFreeformAudienceClassifier(DatabaseTest):\n def test_audience(self):\n def audience(aud):\n # The second param, `name`, is not used in the audience method\n return FreeformAudienceClassifier.audience(aud, None)\n\n for val in [\"children\", \"pre-adolescent\", \"beginning reader\"]:\n assert Classifier.AUDIENCE_CHILDREN == audience(val)\n\n for val in [\n \"young adult\",\n \"ya\",\n \"teenagers\",\n \"adolescent\",\n \"early adolescents\",\n ]:\n assert Classifier.AUDIENCE_YOUNG_ADULT == audience(val)\n\n assert audience(\"adult\") == Classifier.AUDIENCE_ADULT\n assert audience(\"adults only\") == Classifier.AUDIENCE_ADULTS_ONLY\n assert audience(\"all ages\") == Classifier.AUDIENCE_ALL_AGES\n assert audience(\"research\") == Classifier.AUDIENCE_RESEARCH\n\n assert audience(\"books for all ages\") == None\n\n def test_target_age(self):\n def target_age(age):\n return FreeformAudienceClassifier.target_age(age, None)\n\n assert target_age(\"beginning reader\") == (5, 8)\n assert target_age(\"pre-adolescent\") == (9, 12)\n assert target_age(\"all ages\") == (Classifier.ALL_AGES_AGE_CUTOFF, None)\n\n assert target_age(\"babies\") == (None, None)\n\n\nclass TestWorkClassifier(DatabaseTest):\n def setup_method(self):\n super(TestWorkClassifier, self).setup_method()\n self.work = self._work(with_license_pool=True)\n self.identifier = self.work.presentation_edition.primary_identifier\n self.classifier = WorkClassifier(self.work, test_session=self._db)\n\n def _genre(self, genre_data):\n expected_genre, ignore = Genre.lookup(self._db, genre_data.name)\n return expected_genre\n\n def test_no_assumptions(self):\n \"\"\"If we have no data whatsoever, we make no assumptions\n about a work's classification.\n \"\"\"\n self.classifier.weigh_metadata()\n assert None == self.classifier.fiction()\n assert None == self.classifier.audience()\n assert {} == self.classifier.genres(None)\n assert (None, None) == self.classifier.target_age(None)\n\n def test_weight_metadata_title(self):\n self.work.presentation_edition.title = \"Star Trek: The Book\"\n expected_genre = self._genre(classifier.Media_Tie_in_SF)\n self.classifier.weigh_metadata()\n assert 100 == self.classifier.genre_weights[expected_genre]\n\n def test_weight_metadata_publisher(self):\n # Genre publisher and imprint\n self.work.presentation_edition.publisher = \"Harlequin\"\n expected_genre = self._genre(classifier.Romance)\n self.classifier.weigh_metadata()\n assert 100 == self.classifier.genre_weights[expected_genre]\n\n def test_weight_metadata_imprint(self):\n # Imprint is more specific than publisher, so it takes precedence.\n self.work.presentation_edition.publisher = \"Harlequin\"\n self.work.presentation_edition.imprint = \"<NAME>\"\n expected_genre = self._genre(classifier.Romantic_Suspense)\n general_romance = self._genre(classifier.Romance)\n\n self.classifier.weigh_metadata()\n assert general_romance not in self.classifier.genre_weights\n assert 100 == self.classifier.genre_weights[expected_genre]\n\n def test_metadata_implies_audience_and_genre(self):\n # Genre and audience publisher\n self.work.presentation_edition.publisher = \"Harlequin\"\n self.work.presentation_edition.imprint = \"Harlequin Teen\"\n expected_genre = self._genre(classifier.Romance)\n\n self.classifier.weigh_metadata()\n assert 100 == self.classifier.genre_weights[expected_genre]\n assert 100 == self.classifier.audience_weights[Classifier.AUDIENCE_YOUNG_ADULT]\n\n def test_metadata_implies_fiction_status(self):\n self.work.presentation_edition.publisher = \"Harlequin\"\n self.work.presentation_edition.imprint = \"Harlequin Nonfiction\"\n self.classifier.weigh_metadata()\n\n assert 100 == self.classifier.fiction_weights[False]\n assert True not in self.classifier.fiction_weights\n\n def test_publisher_excludes_adult_audience(self):\n # We don't know if this is a children's book or a young adult\n # book, but we're confident it's not a book for adults.\n self.work.presentation_edition.publisher = \"Scholastic Inc.\"\n\n self.classifier.weigh_metadata()\n assert -100 == self.classifier.audience_weights[Classifier.AUDIENCE_ADULT]\n assert -100 == self.classifier.audience_weights[Classifier.AUDIENCE_ADULTS_ONLY]\n\n def test_imprint_excludes_adult_audience(self):\n self.work.presentation_edition.imprint = \"Delacorte Books for Young Readers\"\n\n self.classifier.weigh_metadata()\n assert -100 == self.classifier.audience_weights[Classifier.AUDIENCE_ADULT]\n assert -100 == self.classifier.audience_weights[Classifier.AUDIENCE_ADULTS_ONLY]\n\n def test_no_children_or_ya_signal_from_distributor_implies_book_is_for_adults(self):\n # Create some classifications that end up in\n # direct_from_license_source, but don't imply that the book is\n # from children or\n # YA. classifier.audience_weights[AUDIENCE_ADULT] will be set\n # to 500.\n i = self.identifier\n source = DataSource.lookup(self._db, DataSource.OVERDRIVE)\n for subject in (\"Nonfiction\", \"Science Fiction\", \"History\"):\n c = i.classify(source, Subject.OVERDRIVE, subject, weight=1000)\n self.classifier.add(c)\n\n # There's a little bit of evidence that it's a children's book,\n # but not enough to outweight the distributor's silence.\n c2 = self.identifier.classify(source, Subject.TAG, \"Children's books\", weight=1)\n self.classifier.add(c2)\n self.classifier.prepare_to_classify()\n # Overdrive classifications are regarded as 50 times more reliable\n # than their actual weight, as per Classification.scaled_weight\n assert 50000 == self.classifier.audience_weights[Classifier.AUDIENCE_ADULT]\n\n def test_adults_only_indication_from_distributor_has_no_implication_for_audience(\n self,\n ):\n # Create some classifications that end up in\n # direct_from_license_source, one of which implies the book is\n # for adults only.\n i = self.identifier\n source = DataSource.lookup(self._db, DataSource.OVERDRIVE)\n for subject in (\"Erotic Literature\", \"Science Fiction\", \"History\"):\n c = i.classify(source, Subject.OVERDRIVE, subject, weight=1)\n self.classifier.add(c)\n\n self.classifier.prepare_to_classify()\n\n # Again, Overdrive classifications are regarded as 50 times\n # more reliable than their actual weight, as per\n # Classification.scaled_weight\n assert 50 == self.classifier.audience_weights[Classifier.AUDIENCE_ADULTS_ONLY]\n\n # No boost was given to AUDIENCE_ADULT, because a distributor\n # classification implied AUDIENCE_ADULTS_ONLY.\n assert 0 == self.classifier.audience_weights[Classifier.AUDIENCE_ADULT]\n\n def test_no_signal_from_distributor_has_no_implication_for_audience(self):\n # This work has no classifications that end up in\n # direct_from_license_source. In the absence of any such\n # classifications we cannot determine whether the\n # distributor's silence about the audience is because it's a\n # book for adults or because there's just no data from the\n # distributor.\n assert {} == self.classifier.audience_weights\n\n def test_children_or_ya_signal_from_distributor_has_no_immediate_implication_for_audience(\n self,\n ):\n # This work has a classification direct from the distributor\n # that implies the book is for children, so no conclusions are\n # drawn in the prepare_to_classify() step.\n source = DataSource.lookup(self._db, DataSource.OVERDRIVE)\n c = self.identifier.classify(\n source, Subject.OVERDRIVE, \"Picture Books\", weight=1000\n )\n self.classifier.prepare_to_classify()\n assert {} == self.classifier.audience_weights\n\n self.classifier.add(c)\n assert 50000 == self.classifier.audience_weights[Classifier.AUDIENCE_CHILDREN]\n\n def test_juvenile_classification_is_split_between_children_and_ya(self):\n\n # LCC files both children's and YA works under 'PZ'.\n # Here's how we deal with that.\n #\n i = self.identifier\n source = DataSource.lookup(self._db, DataSource.OCLC)\n c = i.classify(source, Subject.LCC, \"PZ\", weight=100)\n self.classifier.add(c)\n\n # (This classification has no bearing on audience and its\n # weight will be ignored.)\n c2 = i.classify(source, Subject.TAG, \"Pets\", weight=1000)\n self.classifier.add(c2)\n self.classifier.prepare_to_classify\n genres, fiction, audience, target_age = self.classifier.classify()\n\n # Young Adult wins because we err on the side of showing books\n # to kids who are too old, rather than too young.\n assert Classifier.AUDIENCE_YOUNG_ADULT == audience\n\n # But behind the scenes, more is going on. The weight of the\n # classifier has been split 60/40 between YA and children.\n weights = self.classifier.audience_weights\n assert 60 == weights[Classifier.AUDIENCE_YOUNG_ADULT]\n assert 40 == weights[Classifier.AUDIENCE_CHILDREN]\n # If this is in fact a children's book, this will make it\n # relatively easy for data from some other source to come in\n # and tip the balance.\n\n # The adult audiences have been reduced, to reduce the chance\n # that splitting up the weight between YA and Children will\n # cause the work to be mistakenly classified as Adult.\n assert -50 == weights[Classifier.AUDIENCE_ADULT]\n assert -50 == weights[Classifier.AUDIENCE_ADULTS_ONLY]\n # The juvenile classification doesn't make the all ages less likely.\n assert 0 == weights[Classifier.AUDIENCE_ALL_AGES]\n\n def test_childrens_book_when_evidence_is_overwhelming(self):\n # There is some evidence in the 'adult' and 'adults only'\n # bucket, but there's a lot more evidence that it's a\n # children's book, so we go with childrens or YA.\n\n # The evidence that this is a children's book is strong but\n # not overwhelming.\n self.classifier.audience_weights = {\n Classifier.AUDIENCE_ADULT: 10,\n Classifier.AUDIENCE_ADULTS_ONLY: 1,\n Classifier.AUDIENCE_CHILDREN: 22,\n }\n assert Classifier.AUDIENCE_ADULT == self.classifier.audience()\n\n # Now it's overwhelming. (the 'children' weight is more than twice\n # the combined 'adult' + 'adults only' weight.\n self.classifier.audience_weights[Classifier.AUDIENCE_CHILDREN] = 23\n assert Classifier.AUDIENCE_CHILDREN == self.classifier.audience()\n\n # Now it's overwhelmingly likely to be a YA book.\n del self.classifier.audience_weights[Classifier.AUDIENCE_CHILDREN]\n self.classifier.audience_weights[Classifier.AUDIENCE_YOUNG_ADULT] = 23\n assert Classifier.AUDIENCE_YOUNG_ADULT == self.classifier.audience()\n\n def test_ya_book_when_childrens_and_ya_combined_beat_adult(self):\n # Individually, the 'children' and 'ya' buckets don't beat the\n # combined 'adult' + 'adults only' bucket by the appropriate\n # factor, but combined they do. In this case\n # we should classify the book as YA. It might be inaccurate,\n # but it's more accurate than 'adult' and less likely to be\n # a costly mistake than 'children'.\n self.classifier.audience_weights = {\n Classifier.AUDIENCE_ADULT: 9,\n Classifier.AUDIENCE_ADULTS_ONLY: 0,\n Classifier.AUDIENCE_CHILDREN: 10,\n Classifier.AUDIENCE_YOUNG_ADULT: 9,\n }\n assert Classifier.AUDIENCE_YOUNG_ADULT == self.classifier.audience()\n\n def test_genre_may_restrict_audience(self):\n\n # The audience info says this is a YA book.\n self.classifier.audience_weights = {Classifier.AUDIENCE_YOUNG_ADULT: 1000}\n\n # Without any genre information, it's classified as YA.\n assert Classifier.AUDIENCE_YOUNG_ADULT == self.classifier.audience()\n\n # But if it's Erotica, it is always classified as Adults Only.\n genres = {classifier.Erotica: 50, classifier.Science_Fiction: 50}\n assert Classifier.AUDIENCE_ADULTS_ONLY == self.classifier.audience(genres)\n\n def test_all_ages_audience(self):\n # If the All Ages weight is more than the total adult weight and\n # the total juvenile weight, then assign all ages as the audience.\n self.classifier.audience_weights = {\n Classifier.AUDIENCE_ADULT: 50,\n Classifier.AUDIENCE_ADULTS_ONLY: 30,\n Classifier.AUDIENCE_ALL_AGES: 100,\n Classifier.AUDIENCE_CHILDREN: 30,\n Classifier.AUDIENCE_YOUNG_ADULT: 40,\n }\n assert Classifier.AUDIENCE_ALL_AGES == self.classifier.audience()\n\n # This works even if 'Children' looks much better than 'Adult'.\n # 'All Ages' looks even better than that, so it wins.\n self.classifier.audience_weights = {\n Classifier.AUDIENCE_ADULT: 1,\n Classifier.AUDIENCE_ADULTS_ONLY: 0,\n Classifier.AUDIENCE_ALL_AGES: 1000,\n Classifier.AUDIENCE_CHILDREN: 30,\n Classifier.AUDIENCE_YOUNG_ADULT: 29,\n }\n assert Classifier.AUDIENCE_ALL_AGES == self.classifier.audience()\n\n # If the All Ages weight is smaller than the total adult weight,\n # the audience is adults.\n self.classifier.audience_weights = {\n Classifier.AUDIENCE_ADULT: 70,\n Classifier.AUDIENCE_ADULTS_ONLY: 10,\n Classifier.AUDIENCE_ALL_AGES: 79,\n Classifier.AUDIENCE_CHILDREN: 30,\n Classifier.AUDIENCE_YOUNG_ADULT: 40,\n }\n assert Classifier.AUDIENCE_ADULT == self.classifier.audience()\n\n def test_research_audience(self):\n # If the research weight is larger than the total adult weight +\n # all ages weight and larger than the total juvenile weight +\n # all ages weight, then assign research as the audience\n self.classifier.audience_weights = {\n Classifier.AUDIENCE_ADULT: 50,\n Classifier.AUDIENCE_ADULTS_ONLY: 30,\n Classifier.AUDIENCE_ALL_AGES: 10,\n Classifier.AUDIENCE_CHILDREN: 30,\n Classifier.AUDIENCE_YOUNG_ADULT: 150,\n Classifier.AUDIENCE_RESEARCH: 200,\n }\n assert Classifier.AUDIENCE_RESEARCH == self.classifier.audience()\n\n # If the research weight is not larger than either total adults weight\n # and all ages weight or total juvenile weight and all ages weight,\n # then we get those audience values instead.\n self.classifier.audience_weights = {\n Classifier.AUDIENCE_ADULT: 80,\n Classifier.AUDIENCE_ADULTS_ONLY: 10,\n Classifier.AUDIENCE_ALL_AGES: 20,\n Classifier.AUDIENCE_CHILDREN: 35,\n Classifier.AUDIENCE_YOUNG_ADULT: 40,\n Classifier.AUDIENCE_RESEARCH: 100,\n }\n assert Classifier.AUDIENCE_ADULT == self.classifier.audience()\n\n def test_format_classification_from_license_source_is_used(self):\n # This book will be classified as a comic book, because\n # the \"comic books\" classification comes from its license source.\n source = self.work.license_pools[0].data_source\n self.identifier.classify(source, Subject.TAG, \"Comic Books\", weight=100)\n self.classifier.add(self.identifier.classifications[0])\n genres = self.classifier.genres(fiction=True)\n assert [(classifier.Comics_Graphic_Novels, 100)] == list(genres.items())\n\n def test_format_classification_not_from_license_source_is_ignored(self):\n # This book will be not classified as a comic book, because\n # the \"comic books\" classification does not come from its\n # license source.\n source = self.work.license_pools[0].data_source\n oclc = DataSource.lookup(self._db, DataSource.OCLC)\n self.identifier.classify(oclc, Subject.TAG, \"Comic Books\", weight=100)\n self.classifier.add(self.identifier.classifications[0])\n genres = self.classifier.genres(fiction=True)\n assert [] == list(genres.items())\n\n def test_childrens_book_when_no_evidence_for_adult_book(self):\n # There is no evidence in the 'adult' or 'adults only'\n # buckets, so minimal evidence in the 'children' bucket is\n # sufficient to be confident.\n self.classifier.audience_weights = {\n Classifier.AUDIENCE_ADULT: 0,\n Classifier.AUDIENCE_ADULTS_ONLY: 0,\n Classifier.AUDIENCE_CHILDREN: 1,\n Classifier.AUDIENCE_RESEARCH: 0,\n Classifier.AUDIENCE_ALL_AGES: 0,\n }\n assert Classifier.AUDIENCE_CHILDREN == self.classifier.audience()\n\n def test_adults_only_threshold(self):\n # The 'adults only' weight here is not even close to a\n # majority, but it's high enough that we classify this work as\n # 'adults only' to be safe.\n self.classifier.audience_weights = {\n Classifier.AUDIENCE_ADULT: 4,\n Classifier.AUDIENCE_ADULTS_ONLY: 2,\n Classifier.AUDIENCE_CHILDREN: 4,\n Classifier.AUDIENCE_RESEARCH: 0,\n Classifier.AUDIENCE_ALL_AGES: 0,\n }\n assert Classifier.AUDIENCE_ADULTS_ONLY == self.classifier.audience()\n\n def test_target_age_is_default_for_adult_books(self):\n # Target age data can't override an independently determined\n # audience.\n overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)\n c1 = self.identifier.classify(\n overdrive, Subject.OVERDRIVE, \"Picture Books\", weight=10000\n )\n self.classifier.add(c1)\n\n target_age = self.classifier.target_age(Classifier.AUDIENCE_ADULT)\n assert (18, None) == target_age\n\n def test_target_age_weight_scaling(self):\n # We have a weak but reliable signal that this is a book for\n # ages 5 to 7.\n overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)\n c1 = self.identifier.classify(\n overdrive, Subject.OVERDRIVE, \"Beginning Readers\", weight=2\n )\n self.classifier.add(c1)\n\n # We have a louder but less reliable signal that this is a\n # book for eleven-year-olds.\n oclc = DataSource.lookup(self._db, DataSource.OCLC)\n c2 = self.identifier.classify(oclc, Subject.TAG, \"Grade 6\", weight=3)\n self.classifier.add(c2)\n\n # Both signals make it into the dataset, but they are weighted\n # differently, and the more reliable signal becomes stronger.\n lower = self.classifier.target_age_lower_weights\n upper = self.classifier.target_age_upper_weights\n assert lower[5] > lower[11]\n assert upper[8] > upper[11]\n assert lower[11] == upper[11]\n assert lower[5] == upper[8]\n\n # And this affects the target age we choose.\n a = self.classifier.target_age(Classifier.AUDIENCE_CHILDREN)\n assert (5, 8) == self.classifier.target_age(Classifier.AUDIENCE_CHILDREN)\n\n def test_target_age_errs_towards_wider_span(self):\n i = self._identifier()\n source = DataSource.lookup(self._db, DataSource.OVERDRIVE)\n c1 = i.classify(source, Subject.AGE_RANGE, \"8-9\", weight=1)\n c2 = i.classify(source, Subject.AGE_RANGE, \"6-7\", weight=1)\n\n overdrive_edition, lp = self._edition(\n data_source_name=source.name,\n with_license_pool=True,\n identifier_id=i.identifier,\n )\n self.classifier.work = self._work(presentation_edition=overdrive_edition)\n for classification in i.classifications:\n self.classifier.add(classification)\n genres, fiction, audience, target_age = self.classifier.classify()\n\n assert Classifier.AUDIENCE_CHILDREN == audience\n assert (6, 9) == target_age\n\n def test_fiction_status_restricts_genre(self):\n # Classify a book to imply that it's 50% science fiction and\n # 50% history. Then call .genres() twice. With fiction=True,\n # it's 100% science fiction. With fiction=False, it's 100% history.\n\n # This book is classified as 50% science fiction and 50% history.\n fiction_genre = self._genre(classifier.Science_Fiction)\n nonfiction_genre = self._genre(classifier.History)\n self.classifier.genre_weights[fiction_genre] = 100\n self.classifier.genre_weights[nonfiction_genre] = 100\n\n # But any given book is either fiction or nonfiction. If we say this\n # book is fiction, it's classified as 100% SF.\n genres = self.classifier.genres(True)\n assert [(fiction_genre.genredata, 100)] == list(genres.items())\n\n # If we say it's nonfiction, it ends up 100% history.\n genres = self.classifier.genres(False)\n assert [(nonfiction_genre.genredata, 100)] == list(genres.items())\n\n def test_genres_consolidated_before_classification(self):\n # A book with Romance=100, Historical Romance=5, Romantic\n # Suspense=4 will be classified by .genres() as 100%\n # Historical Romance.\n historical_romance = self._genre(classifier.Historical_Romance)\n romance = self._genre(classifier.Romance)\n romantic_suspense = self._genre(classifier.Romantic_Suspense)\n nonfiction_genre = self._genre(classifier.History)\n\n self.classifier.genre_weights[romance] = 100\n\n # Give Historical Romance enough weight to 'swallow' its\n # parent genre. (5% of the weight of its parent.)\n self.classifier.genre_weights[historical_romance] = 5\n\n # Romantic Suspense does pretty well but it doesn't have\n # enough weight to swallow the parent genre, and it's\n # eliminated by the low-pass filter.\n self.classifier.genre_weights[romantic_suspense] = 4\n\n [genre] = list(self.classifier.genres(True).items())\n assert (historical_romance.genredata, 105) == genre\n\n # TODO: This behavior is a little random. As in, it's\n # random which genre comes out on top.\n #\n # self.classifier.genre_weights[romantic_suspense] = 5\n # [genre] = self.classifier.genres(True).items()\n # eq_((historical_romance.genredata, 105), genre)\n\n def test_overdrive_juvenile_implicit_target_age(self):\n # An Overdrive book that is classified under \"Juvenile\" but\n # not under any more specific category is believed to have a\n # target age range of 9-12.\n i = self.identifier\n source = DataSource.lookup(self._db, DataSource.OVERDRIVE)\n c = i.classify(source, Subject.OVERDRIVE, \"Juvenile Fiction\", weight=1)\n self.classifier.add(c)\n self.classifier.prepare_to_classify()\n assert [9] == list(self.classifier.target_age_lower_weights.keys())\n assert [12] == list(self.classifier.target_age_upper_weights.keys())\n\n def test_overdrive_juvenile_explicit_target_age(self):\n # An Overdrive book that is classified under \"Juvenile\" and\n # also under some more specific category is believed to have\n # the target age range associated with that more specific\n # category.\n i = self.identifier\n source = DataSource.lookup(self._db, DataSource.OVERDRIVE)\n for subject in (\"Juvenile Fiction\", \"Picture Books\"):\n c = i.classify(source, Subject.OVERDRIVE, subject, weight=1)\n self.classifier.add(c)\n self.classifier.prepare_to_classify()\n assert [0] == list(self.classifier.target_age_lower_weights.keys())\n assert [4] == list(self.classifier.target_age_upper_weights.keys())\n\n def test_genre_low_pass_filter(self):\n\n romance = self._genre(classifier.Romance)\n self.classifier.genre_weights[romance] = 100\n\n sf = self._genre(classifier.Science_Fiction)\n self.classifier.genre_weights[sf] = 15\n\n # The default cutoff value of 0.15 requires that a genre have\n # a weight of at least the total weight * 0.15 to qualify. In\n # this case, the total weight is 115 and the cutoff weight is\n # 17.25.\n [[genre, weight]] = list(self.classifier.genres(True).items())\n assert romance.genredata == genre\n\n # Increase SF's weight past the cutoff and we get both genres.\n self.classifier.genre_weights[sf] = 18\n\n [[g1, weight], [g2, weight]] = list(self.classifier.genres(True).items())\n assert set([g1, g2]) == set([romance.genredata, sf.genredata])\n\n def test_classify_sets_minimum_age_high_if_minimum_lower_than_maximum(self):\n\n # We somehow end up in a situation where the proposed low end\n # of the target age is higher than the proposed high end.\n self.classifier.audience_weights[Classifier.AUDIENCE_CHILDREN] = 1\n self.classifier.target_age_lower_weights[10] = 1\n self.classifier.target_age_upper_weights[4] = 1\n\n # We set the low end equal to the high end, erring on the side\n # of making the book available to fewer people.\n genres, fiction, audience, target_age = self.classifier.classify()\n assert 10 == target_age[0]\n assert 10 == target_age[1]\n\n def test_classify_uses_default_fiction_status(self):\n genres, fiction, audience, target_age = self.classifier.classify(\n default_fiction=True\n )\n assert True == fiction\n genres, fiction, audience, target_age = self.classifier.classify(\n default_fiction=False\n )\n assert False == fiction\n genres, fiction, audience, target_age = self.classifier.classify(\n default_fiction=None\n )\n assert None == fiction\n\n # The default isn't used if there's any information about the fiction status.\n self.classifier.fiction_weights[False] = 1\n genres, fiction, audience, target_age = self.classifier.classify(\n default_fiction=None\n )\n assert False == fiction\n\n def test_classify_uses_default_audience(self):\n genres, fiction, audience, target_age = self.classifier.classify()\n assert None == audience\n genres, fiction, audience, target_age = self.classifier.classify(\n default_audience=Classifier.AUDIENCE_ADULT\n )\n assert Classifier.AUDIENCE_ADULT == audience\n genres, fiction, audience, target_age = self.classifier.classify(\n default_audience=Classifier.AUDIENCE_CHILDREN\n )\n assert Classifier.AUDIENCE_CHILDREN == audience\n\n # The default isn't used if there's any information about the audience.\n self.classifier.audience_weights[Classifier.AUDIENCE_ADULT] = 1\n genres, fiction, audience, target_age = self.classifier.classify(\n default_audience=None\n )\n assert Classifier.AUDIENCE_ADULT == audience\n\n def test_classify(self):\n # At this point we've tested all the components of classify, so just\n # do an overall test to verify that classify() returns a 4-tuple\n # (genres, fiction, audience, target_age)\n\n self.work.presentation_edition.title = (\n \"Science Fiction: A Comprehensive History\"\n )\n i = self.identifier\n source = DataSource.lookup(self._db, DataSource.OVERDRIVE)\n c1 = i.classify(source, Subject.OVERDRIVE, \"History\", weight=10)\n c2 = i.classify(source, Subject.OVERDRIVE, \"Science Fiction\", weight=100)\n c3 = i.classify(source, Subject.OVERDRIVE, \"Young Adult Nonfiction\", weight=100)\n for classification in i.classifications:\n self.classifier.add(classification)\n self.classifier.prepare_to_classify()\n\n genres, fiction, audience, target_age = self.classifier.classify()\n\n # This work really looks like science fiction (w=100), but it\n # looks *even more* like nonfiction (w=100+10), and science\n # fiction is not a genre of nonfiction. So this book can't be\n # science fiction. It must be history.\n assert \"History\" == list(genres.keys())[0].name\n assert False == fiction\n assert Classifier.AUDIENCE_YOUNG_ADULT == audience\n assert (12, 17) == target_age\n\n def test_top_tier_values(self):\n c = Counter()\n assert set() == WorkClassifier.top_tier_values(c)\n\n c = Counter([\"a\"])\n assert set([\"a\"]) == WorkClassifier.top_tier_values(c)\n\n c = Counter([1, 1, 1, 2, 2, 3, 4, 4, 4])\n assert set([1, 4]) == WorkClassifier.top_tier_values(c)\n c = Counter([1, 1, 1, 2])\n assert set([1]) == WorkClassifier.top_tier_values(c)\n\n def test_duplicate_classification_ignored(self):\n \"\"\"A given classification is only used once from\n a given data source.\n \"\"\"\n history = self._genre(classifier.History)\n i = self.identifier\n source = DataSource.lookup(self._db, DataSource.AMAZON)\n c1 = i.classify(source, Subject.TAG, \"History\", weight=1)\n assert [] == self.classifier.classifications\n\n self.classifier.add(c1)\n old_weight = self.classifier.genre_weights[history]\n\n c2 = i.classify(source, Subject.TAG, \"History\", weight=100)\n self.classifier.add(c2)\n # No effect -- the weights are the same as before.\n assert old_weight == self.classifier.genre_weights[history]\n\n # The same classification can come in from another data source and\n # it will be taken into consideration.\n source2 = DataSource.lookup(self._db, DataSource.OCLC_LINKED_DATA)\n c3 = i.classify(source2, Subject.TAG, \"History\", weight=1)\n self.classifier.add(c3)\n assert self.classifier.genre_weights[history] > old_weight\n\n def test_staff_genre_overrides_others(self):\n genre1, is_new = Genre.lookup(self._db, \"Psychology\")\n genre2, is_new = Genre.lookup(self._db, \"Cooking\")\n subject1 = self._subject(type=\"type1\", identifier=\"subject1\")\n subject1.genre = genre1\n subject2 = self._subject(type=\"type2\", identifier=\"subject2\")\n subject2.genre = genre2\n source = DataSource.lookup(self._db, DataSource.AXIS_360)\n staff_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)\n classification1 = self._classification(\n identifier=self.identifier, subject=subject1, data_source=source, weight=10\n )\n classification2 = self._classification(\n identifier=self.identifier,\n subject=subject2,\n data_source=staff_source,\n weight=1,\n )\n self.classifier.add(classification1)\n self.classifier.add(classification2)\n (genre_weights, fiction, audience, target_age) = self.classifier.classify()\n assert [genre2.name] == [genre.name for genre in list(genre_weights.keys())]\n\n def test_staff_none_genre_overrides_others(self):\n source = DataSource.lookup(self._db, DataSource.AXIS_360)\n staff_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)\n genre1, is_new = Genre.lookup(self._db, \"Poetry\")\n subject1 = self._subject(type=\"type1\", identifier=\"subject1\")\n subject1.genre = genre1\n subject2 = self._subject(\n type=Subject.SIMPLIFIED_GENRE, identifier=SimplifiedGenreClassifier.NONE\n )\n classification1 = self._classification(\n identifier=self.identifier, subject=subject1, data_source=source, weight=10\n )\n classification2 = self._classification(\n identifier=self.identifier,\n subject=subject2,\n data_source=staff_source,\n weight=1,\n )\n self.classifier.add(classification1)\n self.classifier.add(classification2)\n (genre_weights, fiction, audience, target_age) = self.classifier.classify()\n assert 0 == len(list(genre_weights.keys()))\n\n def test_staff_fiction_overrides_others(self):\n source = DataSource.lookup(self._db, DataSource.AXIS_360)\n staff_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)\n subject1 = self._subject(type=\"type1\", identifier=\"Cooking\")\n subject1.fiction = False\n subject2 = self._subject(type=\"type2\", identifier=\"Psychology\")\n subject2.fiction = False\n subject3 = self._subject(\n type=Subject.SIMPLIFIED_FICTION_STATUS, identifier=\"Fiction\"\n )\n classification1 = self._classification(\n identifier=self.identifier, subject=subject1, data_source=source, weight=10\n )\n classification2 = self._classification(\n identifier=self.identifier, subject=subject2, data_source=source, weight=10\n )\n classification3 = self._classification(\n identifier=self.identifier,\n subject=subject3,\n data_source=staff_source,\n weight=1,\n )\n self.classifier.add(classification1)\n self.classifier.add(classification2)\n self.classifier.add(classification3)\n (genre_weights, fiction, audience, target_age) = self.classifier.classify()\n assert True == fiction\n\n def test_staff_audience_overrides_others(self):\n pool = self._licensepool(None, data_source_name=DataSource.AXIS_360)\n license_source = pool.data_source\n staff_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)\n subject1 = self._subject(type=\"type1\", identifier=\"subject1\")\n subject1.audience = \"Adult\"\n subject2 = self._subject(type=\"type2\", identifier=\"subject2\")\n subject2.audience = \"Adult\"\n subject3 = self._subject(type=Subject.FREEFORM_AUDIENCE, identifier=\"Children\")\n classification1 = self._classification(\n identifier=pool.identifier,\n subject=subject1,\n data_source=license_source,\n weight=10,\n )\n classification2 = self._classification(\n identifier=pool.identifier,\n subject=subject2,\n data_source=license_source,\n weight=10,\n )\n classification3 = self._classification(\n identifier=pool.identifier,\n subject=subject3,\n data_source=staff_source,\n weight=1,\n )\n self.classifier.add(classification1)\n self.classifier.add(classification2)\n self.classifier.add(classification3)\n (genre_weights, fiction, audience, target_age) = self.classifier.classify()\n assert \"Children\" == audience\n\n def test_staff_target_age_overrides_others(self):\n source = DataSource.lookup(self._db, DataSource.AXIS_360)\n staff_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)\n subject1 = self._subject(type=\"type1\", identifier=\"subject1\")\n subject1.target_age = NumericRange(6, 8, \"[)\")\n subject1.weight_as_indicator_of_target_age = 1\n subject2 = self._subject(type=\"type2\", identifier=\"subject2\")\n subject2.target_age = NumericRange(6, 8, \"[)\")\n subject2.weight_as_indicator_of_target_age = 1\n subject3 = self._subject(type=Subject.AGE_RANGE, identifier=\"10-13\")\n classification1 = self._classification(\n identifier=self.identifier, subject=subject1, data_source=source, weight=10\n )\n classification2 = self._classification(\n identifier=self.identifier, subject=subject2, data_source=source, weight=10\n )\n classification3 = self._classification(\n identifier=self.identifier,\n subject=subject3,\n data_source=staff_source,\n weight=1,\n )\n self.classifier.add(classification1)\n self.classifier.add(classification2)\n self.classifier.add(classification3)\n (genre_weights, fiction, audience, target_age) = self.classifier.classify()\n assert (10, 13) == target_age\n\n def test_not_inclusive_target_age(self):\n staff_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)\n subject = self._subject(type=Subject.AGE_RANGE, identifier=\"10-12\")\n subject.target_age = NumericRange(9, 13, \"()\")\n classification = self._classification(\n identifier=self.identifier,\n subject=subject,\n data_source=staff_source,\n weight=1,\n )\n self.classifier.add(classification)\n (genre_weights, fiction, audience, target_age) = self.classifier.classify()\n assert (10, 12) == target_age\n", "id": "7850302", "language": "Python", "matching_score": 4.046891689300537, "max_stars_count": 0, "path": "tests/core/classifiers/test_classifier.py" }, { "content": "# encoding: utf-8\nimport csv\nimport os\nimport re\n\nfrom . import *\nfrom .keyword import KeywordBasedClassifier\n\n\nclass CustomMatchToken(object):\n \"\"\"A custom token used in matching rules.\"\"\"\n\n def matches(self, subject_token):\n \"\"\"Does the given token match this one?\"\"\"\n raise NotImplementedError()\n\n\nclass Something(CustomMatchToken):\n \"\"\"A CustomMatchToken that will match any single token.\"\"\"\n\n def matches(self, subject_token):\n return True\n\n\nclass RE(CustomMatchToken):\n \"\"\"A CustomMatchToken that performs a regular expression search.\"\"\"\n\n def __init__(self, pattern):\n self.re = re.compile(pattern, re.I)\n\n def matches(self, subject_token):\n return self.re.search(subject_token)\n\n\nclass Interchangeable(CustomMatchToken):\n \"\"\"A token that matches a list of strings.\"\"\"\n\n def __init__(self, *choices):\n \"\"\"All of these strings are interchangeable for matching purposes.\"\"\"\n self.choices = set([Lowercased(x) for x in choices])\n\n def matches(self, subject_token):\n return Lowercased(subject_token) in self.choices\n\n\n# Special tokens for use in matching rules.\nsomething = Something()\nfiction = Interchangeable(\"Juvenile Fiction\", \"Young Adult Fiction\", \"Fiction\")\njuvenile = Interchangeable(\"Juvenile Fiction\", \"Juvenile Nonfiction\")\nya = Interchangeable(\"Young Adult Fiction\", \"Young Adult Nonfiction\")\n\n# These need special code because they can modify the token stack.\nanything = object()\nnonfiction = object()\n\n# These are BISAC categories that changed their names. We want to treat both\n# names as equivalent. In most cases, the name change is cosmetic.\nbody_mind_spirit = Interchangeable(\"Body, Mind & Spirit\", \"Mind & Spirit\")\npsychology = Interchangeable(\"Psychology\", \"Psychology & Psychiatry\")\ntechnology = Interchangeable(\"Technology & Engineering\", \"Technology\")\nsocial_topics = Interchangeable(\"Social Situations\", \"Social Topics\")\n\n# This name change is _not_ cosmetic. The category was split into\n# two, and we're putting everything that was in the old category into\n# one of the two.\nliterary_criticism = Interchangeable(\n \"Literary Criticism\", \"Literary Criticism & Collections\"\n)\n\n# If these variables are used in a rule, they must be the first token in\n# that rule.\nspecial_variables = {\n nonfiction: \"nonfiction\",\n fiction: \"fiction\",\n juvenile: \"juvenile\",\n ya: \"ya\",\n}\n\n\nclass MatchingRule(object):\n \"\"\"A rule that takes a list of subject parts and returns\n an appropriate classification.\n \"\"\"\n\n def __init__(self, result, *ruleset):\n if result is None:\n raise ValueError(\n \"MatchingRule returns None on a non-match, it can't also return None on a match.\"\n )\n\n self.result = result\n self.ruleset = []\n\n # Track the subjects that were 'caught' by this rule,\n # for debugging purposes.\n self.caught = []\n\n for i, rule in enumerate(ruleset):\n if i > 0 and rule in special_variables:\n raise ValueError(\n \"Special token '%s' must be the first in a ruleset.\"\n % special_variables[rule]\n )\n\n if isinstance(rule, (bytes, str)):\n # It's a string. We do case-insensitive comparisons,\n # so lowercase it.\n self.ruleset.append(Lowercased(rule))\n else:\n # It's a special object. Add it to the ruleset as-is.\n self.ruleset.append(rule)\n\n def match(self, *subject):\n \"\"\"If `subject` matches this ruleset, return the appropriate\n result. Otherwise, return None.\n \"\"\"\n # Create parallel lists of the subject and the things it has to\n # match.\n must_match = list(self.ruleset)\n remaining_subject = list(subject)\n\n # Consume tokens from both lists until we've confirmed no\n # match or there is nothing left to match.\n match_so_far = True\n while match_so_far and must_match:\n match_so_far, must_match, remaining_subject = self._consume(\n must_match, remaining_subject\n )\n\n if match_so_far:\n # Everything that had to match, did.\n self.caught.append(subject)\n return self.result\n\n # Something that had to match, didn't.\n return None\n\n def _consume(self, rules, subject):\n \"\"\"The first token (and possibly more) of the rules must match the\n first token (and possibly more) of the subject.\n\n All matched rule and subject tokens are consumed.\n\n :return: A 3-tuple (could_match, new_rules, new_subject)\n\n could_match is a boolean that is False if we now know that the\n subject does not match the rule, and True if it might still\n match the rule.\n\n new_rules contains the tokens in the ruleset that have yet to\n be activated.\n\n new_subject contains the tokens in the subject that have yet\n to be checked.\n \"\"\"\n if not rules:\n # An empty ruleset matches everything.\n return True, rules, subject\n\n if not subject and rules != [anything]:\n # Apart from [anything], no non-empty ruleset matches an\n # empty subject.\n return False, rules, subject\n\n # Figure out which rule we'll be applying. We won't need it\n # again, so we can remove it from the ruleset.\n rule_token = rules.pop(0)\n if rule_token == anything:\n # This is the complicated one.\n\n if not rules:\n # If the final rule is 'anything', then that's redundant,\n # but we can declare success and stop.\n return True, rules, subject\n\n # At this point we know that 'anything' is followed by some\n # other rule token.\n next_rule = rules.pop(0)\n\n # We can consume as many subject tokens as necessary, but\n # eventually a subject token must match this subsequent\n # rule token.\n while subject:\n subject_token = subject.pop(0)\n submatch, ignore1, ignore2 = self._consume([next_rule], [subject_token])\n if submatch:\n # We had to remove some number of subject tokens,\n # but we found one that matches the next rule.\n return True, rules, subject\n else:\n # That token didn't match, but maybe the next one will.\n pass\n\n # We went through the entire remaining subject and didn't\n # find a match for the rule token that follows 'anything'.\n return False, rules, subject\n\n # We're comparing two individual tokens.\n subject_token = subject.pop(0)\n if isinstance(rule_token, CustomMatchToken):\n match = rule_token.matches(subject_token)\n elif rule_token == nonfiction:\n # This is too complex to be a CustomMatchToken because\n # we may be modifying the subject token list.\n match = subject_token not in (\n \"juvenile fiction\",\n \"young adult fiction\",\n \"fiction\",\n )\n if match and subject_token not in (\n \"juvenile nonfiction\",\n \"young adult nonfiction\",\n ):\n # The implicit top-level lane is 'nonfiction',\n # which means we popped a token like 'History' that\n # needs to go back on the stack.\n subject.insert(0, subject_token)\n else:\n # The strings must match exactly.\n match = rule_token == subject_token\n return match, rules, subject\n\n\ndef m(result, *ruleset):\n \"\"\"Alias for the MatchingRule constructor with a short name.\"\"\"\n return MatchingRule(result, *ruleset)\n\n\nclass BISACClassifier(Classifier):\n \"\"\"Handle real, genuine, according-to-Hoyle BISAC classifications.\n\n Subclasses of this method can use the same basic classification logic\n to classify classifications that are based on BISAC but have cosmetic\n differences.\n\n First, a BISAC code is mapped to its human-readable name.\n\n Second, the name is split into parts (e.g. [\"Fiction\", \"War &\n Military\"]).\n\n To determine fiction status, audience, target age, or genre, the\n list of name parts is compared against each of a list of matching\n rules.\n \"\"\"\n\n # Map identifiers to human-readable names.\n NAMES = dict(\n [i.strip() for i in l]\n for l in csv.reader(open(os.path.join(resource_dir, \"bisac.csv\")))\n )\n\n # Indicates that even though this rule doesn't match a subject, no\n # further rules in the same category should be run on it, because they\n # will lead to inaccurate information.\n stop = object()\n\n # If none of these rules match, a lane's fiction status depends on the\n # genre assigned to it.\n FICTION = [\n m(True, \"Fiction\"),\n m(True, \"Juvenile Fiction\"),\n m(False, \"Juvenile Nonfiction\"),\n m(True, \"Young Adult Fiction\"),\n m(False, \"Young Adult Nonfiction\"),\n m(False, anything, \"Essays\"),\n m(False, anything, \"Letters\"),\n m(True, \"Literary Collections\"),\n m(stop, \"Humor\"),\n m(stop, \"Drama\"),\n m(stop, \"Poetry\"),\n m(False, anything),\n ]\n\n # In BISAC, juvenile fiction and YA fiction are kept in separate\n # spaces. Nearly everything outside that space can be presumed to\n # have AUDIENCE_ADULT.\n AUDIENCE = [\n m(Classifier.AUDIENCE_CHILDREN, \"Bibles\", anything, \"Children\"),\n m(Classifier.AUDIENCE_CHILDREN, juvenile, anything),\n m(Classifier.AUDIENCE_YOUNG_ADULT, ya, anything),\n m(Classifier.AUDIENCE_YOUNG_ADULT, \"Bibles\", anything, \"Youth & Teen\"),\n m(Classifier.AUDIENCE_ADULTS_ONLY, anything, \"Erotica\"),\n m(Classifier.AUDIENCE_ADULTS_ONLY, \"Humor\", \"Topic\", \"Adult\"),\n m(Classifier.AUDIENCE_ADULT, anything),\n ]\n\n TARGET_AGE = [\n m((0, 4), juvenile, anything, \"Readers\", \"Beginner\"),\n m((5, 7), juvenile, anything, \"Readers\", \"Intermediate\"),\n m((5, 7), juvenile, anything, \"Early Readers\"),\n m((8, 13), juvenile, anything, \"Chapter Books\"),\n ]\n\n GENRE = [\n # Put all erotica in Erotica, to keep the other lanes at\n # \"Adult\" level or lower.\n m(Erotica, anything, \"Erotica\"),\n # Put all non-erotica comics into the same bucket, regardless\n # of their content.\n m(Comics_Graphic_Novels, \"Comics & Graphic Novels\"),\n m(Comics_Graphic_Novels, nonfiction, \"Comics & Graphic Novels\"),\n m(Comics_Graphic_Novels, fiction, \"Comics & Graphic Novels\"),\n # \"Literary Criticism / Foo\" implies Literary Criticism, not Foo.\n m(Literary_Criticism, anything, literary_criticism),\n # \"Fiction / Christian / Foo\" implies Religious Fiction\n # more strongly than it implies Foo.\n m(Religious_Fiction, fiction, anything, \"Christian\"),\n # \"Fiction / Foo / Short Stories\" implies Short Stories more\n # strongly than it implies Foo. This assumes that a short\n # story collection within a genre will also be classified\n # separately under that genre. This could definitely be\n # improved but would require a Subject to map to multiple\n # Genres.\n m(Short_Stories, fiction, anything, RE(\"^Anthologies\")),\n m(Short_Stories, fiction, anything, RE(\"^Short Stories\")),\n m(Short_Stories, \"Literary Collections\"),\n m(Short_Stories, fiction, anything, \"Collections & Anthologies\"),\n # Classify top-level fiction categories into fiction genres.\n #\n # First, handle large overarching genres that have subgenres\n # and adjacent genres.\n #\n # Fantasy\n m(Epic_Fantasy, fiction, \"Fantasy\", \"Epic\"),\n m(Historical_Fantasy, fiction, \"Fantasy\", \"Historical\"),\n m(Urban_Fantasy, fiction, \"Fantasy\", \"Urban\"),\n m(Fantasy, fiction, \"Fantasy\"),\n m(Fantasy, fiction, \"Romance\", \"Fantasy\"),\n m(Fantasy, fiction, \"Sagas\"),\n # Mystery\n # n.b. no BISAC for Paranormal_Mystery\n m(\n Crime_Detective_Stories,\n fiction,\n \"Mystery & Detective\",\n \"Private Investigators\",\n ),\n m(Crime_Detective_Stories, fiction, \"Crime\"),\n m(Crime_Detective_Stories, fiction, \"Thrillers\", \"Crime\"),\n m(Hard_Boiled_Mystery, fiction, \"Mystery & Detective\", \"Hard-Boiled\"),\n m(Police_Procedural, fiction, \"Mystery & Detective\", \"Police Procedural\"),\n m(Cozy_Mystery, fiction, \"Mystery & Detective\", \"Cozy\"),\n m(Historical_Mystery, fiction, \"Mystery & Detective\", \"Historical\"),\n m(Women_Detectives, fiction, \"Mystery & Detective\", \"Women Sleuths\"),\n m(Mystery, fiction, anything, \"Mystery & Detective\"),\n # Horror\n m(Ghost_Stories, fiction, \"Ghost\"),\n m(Occult_Horror, fiction, \"Occult & Supernatural\"),\n m(Gothic_Horror, fiction, \"Gothic\"),\n m(Horror, fiction, \"Horror\"),\n # Romance\n # n.b. no BISAC for Gothic Romance\n m(Contemporary_Romance, fiction, \"Romance\", \"Contemporary\"),\n m(Historical_Romance, fiction, \"Romance\", \"Historical\"),\n m(Paranormal_Romance, fiction, \"Romance\", \"Paranormal\"),\n m(Western_Romance, fiction, \"Romance\", \"Western\"),\n m(Romantic_Suspense, fiction, \"Romance\", \"Suspense\"),\n m(Romantic_SF, fiction, \"Romance\", \"Time Travel\"),\n m(Romantic_SF, fiction, \"Romance\", \"Science Fiction\"),\n m(Romance, fiction, \"Romance\"),\n # Science fiction\n # n.b. no BISAC for Cyberpunk\n m(Dystopian_SF, fiction, \"Dystopian\"),\n m(Space_Opera, fiction, \"Science Fiction\", \"Space Opera\"),\n m(Military_SF, fiction, \"Science Fiction\", \"Military\"),\n m(Alternative_History, fiction, \"Alternative History\"),\n # Juvenile steampunk is classified directly beneath 'fiction'.\n m(Steampunk, fiction, anything, \"Steampunk\"),\n m(Science_Fiction, fiction, \"Science Fiction\"),\n # Thrillers\n # n.b. no BISAC for Supernatural_Thriller\n m(Historical_Thriller, fiction, \"Thrillers\", \"Historical\"),\n m(Espionage, fiction, \"Thrillers\", \"Espionage\"),\n m(Medical_Thriller, fiction, \"Thrillers\", \"Medical\"),\n m(Political_Thriller, fiction, \"Thrillers\", \"Political\"),\n m(Legal_Thriller, fiction, \"Thrillers\", \"Legal\"),\n m(Technothriller, fiction, \"Thrillers\", \"Technological\"),\n m(Military_Thriller, fiction, \"Thrillers\", \"Military\"),\n m(Suspense_Thriller, fiction, \"Thrillers\"),\n # Then handle the less complicated genres of fiction.\n m(Adventure, fiction, \"Action & Adventure\"),\n m(Adventure, fiction, \"Sea Stories\"),\n m(Adventure, fiction, \"War & Military\"),\n m(Classics, fiction, \"Classics\"),\n m(Folklore, fiction, \"Fairy Tales, Folk Tales, Legends & Mythology\"),\n m(Historical_Fiction, anything, \"Historical\"),\n m(Humorous_Fiction, fiction, \"Humorous\"),\n m(Humorous_Fiction, fiction, \"Satire\"),\n m(Literary_Fiction, fiction, \"Literary\"),\n m(LGBTQ_Fiction, fiction, \"Gay\"),\n m(LGBTQ_Fiction, fiction, \"Lesbian\"),\n m(LGBTQ_Fiction, fiction, \"Gay & Lesbian\"),\n m(Religious_Fiction, fiction, \"Religious\"),\n m(Religious_Fiction, fiction, \"Jewish\"),\n m(Religious_Fiction, fiction, \"Visionary & Metaphysical\"),\n m(Womens_Fiction, fiction, anything, \"Contemporary Women\"),\n m(Westerns, fiction, \"Westerns\"),\n # n.b. BISAC \"Fiction / Urban\" is distinct from \"Fiction /\n # African-American / Urban\", and does not map to any of our\n # genres.\n m(Urban_Fiction, fiction, \"African American\", \"Urban\"),\n # BISAC classifies these genres at the top level, which we\n # treat as 'nonfiction', but we classify them as fiction. It\n # doesn't matter because they're neither, really.\n m(Drama, nonfiction, \"Drama\"),\n m(Poetry, nonfiction, \"Poetry\"),\n # Now on to nonfiction.\n # Classify top-level nonfiction categories into fiction genres.\n #\n # First, handle large overarching genres that have subgenres\n # and adjacent genres.\n #\n # Art & Design\n m(Architecture, nonfiction, \"Architecture\"),\n m(Art_Criticism_Theory, nonfiction, \"Art\", \"Criticism & Theory\"),\n m(Art_History, nonfiction, \"Art\", \"History\"),\n m(Fashion, nonfiction, \"Design\", \"Fashion\"),\n m(Design, nonfiction, \"Design\"),\n m(Art_Design, nonfiction, \"Art\"),\n m(Photography, nonfiction, \"Photography\"),\n # Personal Finance & Business\n m(Business, nonfiction, \"Business & Economics\", RE(\"^Business.*\")),\n m(Business, nonfiction, \"Business & Economics\", \"Accounting\"),\n m(Economics, nonfiction, \"Business & Economics\", \"Economics\"),\n m(Economics, nonfiction, \"Business & Economics\", \"Environmental Economics\"),\n m(Economics, nonfiction, \"Business & Economics\", RE(\"^Econo.*\")),\n m(Management_Leadership, nonfiction, \"Business & Economics\", \"Management\"),\n m(\n Management_Leadership,\n nonfiction,\n \"Business & Economics\",\n \"Management Science\",\n ),\n m(Management_Leadership, nonfiction, \"Business & Economics\", \"Leadership\"),\n m(\n Personal_Finance_Investing,\n nonfiction,\n \"Business & Economics\",\n \"Personal Finance\",\n ),\n m(\n Personal_Finance_Investing,\n nonfiction,\n \"Business & Economics\",\n \"Personal Success\",\n ),\n m(\n Personal_Finance_Investing,\n nonfiction,\n \"Business & Economics\",\n \"Investments & Securities\",\n ),\n m(Real_Estate, nonfiction, \"Business & Economics\", \"Real Estate\"),\n m(Personal_Finance_Business, nonfiction, \"Business & Economics\"),\n # Parenting & Family\n m(Parenting, nonfiction, \"Family & Relationships\", \"Parenting\"),\n m(Family_Relationships, nonfiction, \"Family & Relationships\"),\n # Food & Health\n m(Bartending_Cocktails, nonfiction, \"Cooking\", \"Beverages\"),\n m(Health_Diet, nonfiction, \"Cooking\", \"Health & Healing\"),\n m(Health_Diet, nonfiction, \"Health & Fitness\"),\n m(Vegetarian_Vegan, nonfiction, \"Cooking\", \"Vegetarian & Vegan\"),\n m(Cooking, nonfiction, \"Cooking\"),\n # History\n m(African_History, nonfiction, \"History\", \"Africa\"),\n m(Ancient_History, nonfiction, \"History\", \"Ancient\"),\n m(Asian_History, nonfiction, \"History\", \"Asia\"),\n m(Civil_War_History, nonfiction, \"History\", \"United States\", RE(\"^Civil War\")),\n m(European_History, nonfiction, \"History\", \"Europe\"),\n m(Latin_American_History, nonfiction, \"History\", \"Latin America\"),\n m(Medieval_History, nonfiction, \"History\", \"Medieval\"),\n m(Military_History, nonfiction, \"History\", \"Military\"),\n m(Renaissance_Early_Modern_History, nonfiction, \"History\", \"Renaissance\"),\n m(\n Renaissance_Early_Modern_History,\n nonfiction,\n \"History\",\n \"Modern\",\n RE(\"^1[678]th Century\"),\n ),\n m(Modern_History, nonfiction, \"History\", \"Modern\"),\n m(United_States_History, nonfiction, \"History\", \"Native American\"),\n m(United_States_History, nonfiction, \"History\", \"United States\"),\n m(World_History, nonfiction, \"History\", \"World\"),\n m(World_History, nonfiction, \"History\", \"Civilization\"),\n m(History, nonfiction, \"History\"),\n # Hobbies & Home\n m(Antiques_Collectibles, nonfiction, \"Antiques & Collectibles\"),\n m(Crafts_Hobbies, nonfiction, \"Crafts & Hobbies\"),\n m(Gardening, nonfiction, \"Gardening\"),\n m(Games, nonfiction, \"Games\"),\n m(House_Home, nonfiction, \"House & Home\"),\n m(Pets, nonfiction, \"Pets\"),\n # Entertainment\n m(Film_TV, nonfiction, \"Performing Arts\", \"Film & Video\"),\n m(Film_TV, nonfiction, \"Performing Arts\", \"Television\"),\n m(Music, nonfiction, \"Music\"),\n m(Performing_Arts, nonfiction, \"Performing Arts\"),\n # Reference & Study Aids\n m(Dictionaries, nonfiction, \"Reference\", \"Dictionaries\"),\n m(Foreign_Language_Study, nonfiction, \"Foreign Language Study\"),\n m(Law, nonfiction, \"Law\"),\n m(Study_Aids, nonfiction, \"Study Aids\"),\n m(Reference_Study_Aids, nonfiction, \"Reference\"),\n m(Reference_Study_Aids, nonfiction, \"Language Arts & Disciplines\"),\n # Religion & Spirituality\n m(Body_Mind_Spirit, nonfiction, body_mind_spirit),\n m(Buddhism, nonfiction, \"Religion\", \"Buddhism\"),\n m(Christianity, nonfiction, \"Religion\", RE(\"^Biblical\")),\n m(Christianity, nonfiction, \"Religion\", RE(\"^Christian\")),\n m(Christianity, nonfiction, \"Bibles\"),\n m(Hinduism, nonfiction, \"Religion\", \"Hinduism\"),\n m(Islam, nonfiction, \"Religion\", \"Islam\"),\n m(Judaism, nonfiction, \"Religion\", \"Judaism\"),\n m(Religion_Spirituality, nonfiction, \"Religion\"),\n # Science & Technology\n m(Computers, nonfiction, \"Computers\"),\n m(Mathematics, nonfiction, \"Mathematics\"),\n m(Medical, nonfiction, \"Medical\"),\n m(Nature, nonfiction, \"Nature\"),\n m(Psychology, nonfiction, psychology),\n m(Political_Science, nonfiction, \"Social Science\", \"Politics & Government\"),\n m(Social_Sciences, nonfiction, \"Social Science\"),\n m(Technology, nonfiction, technology),\n m(Technology, nonfiction, \"Transportation\"),\n m(Science, nonfiction, \"Science\"),\n # Then handle the less complicated genres of nonfiction.\n # n.b. no BISAC for Periodicals.\n # n.b. no BISAC for Humorous Nonfiction per se.\n m(Music, nonfiction, \"Biography & Autobiography\", \"Composers & Musicians\"),\n m(\n Entertainment,\n nonfiction,\n \"Biography & Autobiography\",\n \"Entertainment & Performing Arts\",\n ),\n m(Biography_Memoir, nonfiction, \"Biography & Autobiography\"),\n m(Education, nonfiction, \"Education\"),\n m(Philosophy, nonfiction, \"Philosophy\"),\n m(Political_Science, nonfiction, \"Political Science\"),\n m(Self_Help, nonfiction, \"Self-Help\"),\n m(Sports, nonfiction, \"Sports & Recreation\"),\n m(Travel, nonfiction, \"Travel\"),\n m(True_Crime, nonfiction, \"True Crime\"),\n # Handle cases where Juvenile/YA uses different terms than\n # would be used for the same books for adults.\n m(Business, nonfiction, \"Careers\"),\n m(Christianity, nonfiction, \"Religious\", \"Christian\"),\n m(Cooking, nonfiction, \"Cooking & Food\"),\n m(Education, nonfiction, \"School & Education\"),\n m(Family_Relationships, nonfiction, \"Family\"),\n m(Fantasy, fiction, \"Fantasy & Magic\"),\n m(Ghost_Stories, fiction, \"Ghost Stories\"),\n m(Fantasy, fiction, \"Magical Realism\"),\n m(Fantasy, fiction, \"Mermaids\"),\n m(Fashion, nonfiction, \"Fashion\"),\n m(Folklore, fiction, \"Fairy Tales & Folklore\"),\n m(Folklore, fiction, \"Legends, Myths, Fables\"),\n m(Games, nonfiction, \"Games & Activities\"),\n m(Health_Diet, nonfiction, \"Health & Daily Living\"),\n m(Horror, fiction, \"Horror & Ghost Stories\"),\n m(Horror, fiction, \"Monsters\"),\n m(Horror, fiction, \"Paranormal\"),\n m(Horror, fiction, \"Paranormal, Occult & Supernatural\"),\n m(Horror, fiction, \"Vampires\"),\n m(Horror, fiction, \"Werewolves & Shifters\"),\n m(Horror, fiction, \"Zombies\"),\n m(Humorous_Fiction, fiction, \"Humorous Stories\"),\n m(Humorous_Nonfiction, \"Young Adult Nonfiction\", \"Humor\"),\n m(LGBTQ_Fiction, fiction, \"LGBT\"),\n m(Law, nonfiction, \"Law & Crime\"),\n m(Mystery, fiction, \"Mysteries & Detective Stories\"),\n m(Nature, nonfiction, \"Animals\"),\n m(Personal_Finance_Investing, nonfiction, \"Personal Finance\"),\n m(Poetry, fiction, \"Nursery Rhymes\"),\n m(Poetry, fiction, \"Stories in Verse\"),\n m(Poetry, fiction, \"Novels in Verse\"),\n m(Poetry, fiction, \"Poetry\"),\n m(Reference_Study_Aids, nonfiction, \"Language Arts\"),\n m(Romance, fiction, \"Love & Romance\"),\n m(Science_Fiction, fiction, \"Robots\"),\n m(Science_Fiction, fiction, \"Time Travel\"),\n m(Social_Sciences, nonfiction, \"Media Studies\"),\n m(Suspense_Thriller, fiction, \"Superheroes\"),\n m(Suspense_Thriller, fiction, \"Thrillers & Suspense\"),\n # Most of the subcategories of 'Science & Nature' go into Nature,\n # but these go into Science.\n m(Science, nonfiction, \"Science & Nature\", \"Discoveries\"),\n m(Science, nonfiction, \"Science & Nature\", \"Experiments & Projects\"),\n m(Science, nonfiction, \"Science & Nature\", \"History of Science\"),\n m(Science, nonfiction, \"Science & Nature\", \"Physics\"),\n m(Science, nonfiction, \"Science & Nature\", \"Weights & Measures\"),\n m(Science, nonfiction, \"Science & Nature\", \"General\"),\n # Any other subcategory of 'Science & Nature' goes under Nature\n m(Nature, nonfiction, \"Science & Nature\", something),\n # Life Strategies is juvenile/YA-specific, and contains both\n # fiction and nonfiction. It's called \"Social Issues\" for\n # juvenile fiction/nonfiction, and \"Social Topics\" for YA\n # nonfiction. \"Social Themes\" in YA fiction is _not_\n # classified as Life Strategies.\n m(Life_Strategies, fiction, \"social issues\"),\n m(Life_Strategies, nonfiction, \"social issues\"),\n m(Life_Strategies, nonfiction, social_topics),\n ]\n\n @classmethod\n def is_fiction(cls, identifier, name):\n for ruleset in cls.FICTION:\n fiction = ruleset.match(*name)\n if fiction is cls.stop:\n return None\n if fiction is not None:\n return fiction\n keyword = \"/\".join(name)\n return KeywordBasedClassifier.is_fiction(identifier, keyword)\n\n @classmethod\n def audience(cls, identifier, name):\n for ruleset in cls.AUDIENCE:\n audience = ruleset.match(*name)\n if audience is cls.stop:\n return None\n if audience is not None:\n return audience\n keyword = \"/\".join(name)\n return KeywordBasedClassifier.audience(identifier, keyword)\n\n @classmethod\n def target_age(cls, identifier, name):\n for ruleset in cls.TARGET_AGE:\n target_age = ruleset.match(*name)\n if target_age is cls.stop:\n return None\n if target_age is not None:\n return target_age\n\n # If all else fails, try the keyword-based classifier.\n keyword = \"/\".join(name)\n return KeywordBasedClassifier.target_age(identifier, keyword)\n\n @classmethod\n def genre(cls, identifier, name, fiction, audience):\n for ruleset in cls.GENRE:\n genre = ruleset.match(*name)\n if genre is cls.stop:\n return None\n if genre is not None:\n return genre\n\n # If all else fails, try a keyword-based classifier.\n keyword = \"/\".join(name)\n return KeywordBasedClassifier.genre(identifier, keyword, fiction, audience)\n\n # A BISAC name copied from the BISAC website may end with this\n # human-readable note, which is not part of the official name.\n see_also = re.compile(r\"\\(see also .*\")\n\n @classmethod\n def scrub_identifier(cls, identifier):\n if not identifier:\n return identifier\n if identifier.startswith(\"FB\"):\n identifier = identifier[2:]\n if identifier in cls.NAMES:\n # We know the canonical name for this BISAC identifier,\n # and we are better equipped to classify the canonical\n # names, so use the canonical name in preference to\n # whatever name the distributor provided.\n return (identifier, cls.NAMES[identifier])\n return identifier\n\n @classmethod\n def scrub_name(cls, name):\n \"\"\"Split the name into a list of lowercase keywords.\"\"\"\n\n # All of our comparisons are case-insensitive.\n name = Lowercased(name)\n\n # Take corrective action to finame a number of common problems\n # seen in the wild.\n #\n\n # A comma may have been replaced with a space.\n name = name.replace(\" \", \", \")\n\n # The name may be enclosed in an extra set of quotes.\n for quote in \"'\\\"\":\n if name.startswith(quote):\n name = name[1:]\n if name.endswith(quote):\n name = name[:-1]\n\n # The name may end with an extraneous marker character or\n # (if it was copied from the BISAC website) an asterisk.\n for separator in \"|/*\":\n if name.endswith(separator):\n name = name[:-1]\n\n # A name copied from the BISAC website may end with a\n # human-readable cross-reference.\n name = cls.see_also.sub(\"\", name)\n\n # The canonical separator character is a slash, but a pipe\n # has also been used.\n for separator in \"|/\":\n if separator in name:\n parts = [name.strip() for name in name.split(separator) if name.strip()]\n break\n else:\n parts = [name]\n return parts\n\n\nClassifier.classifiers[Classifier.BISAC] = BISACClassifier\n", "id": "9072811", "language": "Python", "matching_score": 3.697061061859131, "max_stars_count": 0, "path": "core/classifier/bisac.py" }, { "content": "from core import classifier\nfrom core.classifier import *\nfrom core.classifier.bic import BICClassifier as BIC\n\n\nclass TestBIC(object):\n def test_is_fiction(self):\n def fic(bic):\n return BIC.is_fiction(BIC.scrub_identifier(bic), None)\n\n assert True == fic(\"FCA\")\n assert True == fic(\"YFL\")\n assert False == fic(\"YWR\")\n assert False == fic(\"HB\")\n\n def test_audience(self):\n young_adult = Classifier.AUDIENCE_YOUNG_ADULT\n adult = Classifier.AUDIENCE_ADULT\n\n def aud(bic):\n return BIC.audience(BIC.scrub_identifier(bic), None)\n\n assert adult == aud(\"DD\")\n assert young_adult == aud(\"YFA\")\n\n def test_genre(self):\n def gen(bic):\n return BIC.genre(BIC.scrub_identifier(bic), None)\n\n assert classifier.Art_Design == gen(\"A\")\n assert classifier.Art_Design == gen(\"AB\")\n assert classifier.Music == gen(\"AV\")\n assert classifier.Fantasy == gen(\"FM\")\n assert classifier.Economics == gen(\"KC\")\n assert classifier.Short_Stories == gen(\"FYB\")\n assert classifier.Music == gen(\"YNC\")\n assert classifier.European_History == gen(\"HBJD\")\n", "id": "2077882", "language": "Python", "matching_score": 0.49892547726631165, "max_stars_count": 0, "path": "tests/core/classifiers/test_bic.py" }, { "content": "import logging\n\nimport core.classifier as genres\nfrom core import classifier\nfrom core.classifier import Classifier, GenreData, fiction_genres, nonfiction_genres\nfrom core.lane import (\n DatabaseBackedWorkList,\n DefaultSortOrderFacets,\n Facets,\n Lane,\n WorkList,\n)\nfrom core.model import (\n CachedFeed,\n Contributor,\n DataSource,\n Edition,\n ExternalIntegration,\n Library,\n Session,\n create,\n get_one,\n)\nfrom core.util import LanguageCodes\n\nfrom .config import CannotLoadConfiguration, Configuration\nfrom .novelist import NoveListAPI\n\n\ndef load_lanes(_db, library):\n \"\"\"Return a WorkList that reflects the current lane structure of the\n Library.\n\n If no top-level visible lanes are configured, the WorkList will be\n configured to show every book in the collection.\n\n If a single top-level Lane is configured, it will returned as the\n WorkList.\n\n Otherwise, a WorkList containing the visible top-level lanes is\n returned.\n \"\"\"\n top_level = WorkList.top_level_for_library(_db, library)\n\n # It's likely this WorkList will be used across sessions, so\n # expunge any data model objects from the database session.\n #\n # TODO: This is the cause of a lot of problems in the cached OPDS\n # feed generator. There, these Lanes are used in a normal database\n # session and we end up needing hacks to merge them back into the\n # session.\n if isinstance(top_level, Lane):\n to_expunge = [top_level]\n else:\n to_expunge = [x for x in top_level.children if isinstance(x, Lane)]\n list(map(_db.expunge, to_expunge))\n return top_level\n\n\ndef _lane_configuration_from_collection_sizes(estimates):\n \"\"\"Sort a library's collections into 'large', 'small', and 'tiny'\n subcollections based on language.\n\n :param estimates: A Counter.\n\n :return: A 3-tuple (large, small, tiny). 'large' will contain the\n collection with the largest language, and any languages with a\n collection more than 10% the size of the largest\n collection. 'small' will contain any languages with a collection\n more than 1% the size of the largest collection, and 'tiny' will\n contain all other languages represented in `estimates`.\n \"\"\"\n if not estimates:\n # There are no holdings. Assume we have a large English\n # collection and nothing else.\n return [\"eng\"], [], []\n\n large = []\n small = []\n tiny = []\n\n [(ignore, largest)] = estimates.most_common(1)\n for language, count in estimates.most_common():\n if count > largest * 0.1:\n large.append(language)\n elif count > largest * 0.01:\n small.append(language)\n else:\n tiny.append(language)\n return large, small, tiny\n\n\ndef create_default_lanes(_db, library):\n \"\"\"Reset the lanes for the given library to the default.\n\n The database will have the following top-level lanes for\n each large-collection:\n 'Adult Fiction', 'Adult Nonfiction', 'Young Adult Fiction',\n 'Young Adult Nonfiction', and 'Children'.\n Each lane contains additional sublanes.\n If an NYT integration is configured, there will also be a\n 'Best Sellers' top-level lane.\n\n If there are any small- or tiny-collection languages, the database\n will also have a top-level lane called 'World Languages'. The\n 'World Languages' lane will have a sublane for every small- and\n tiny-collection languages. The small-collection languages will\n have \"Adult Fiction\", \"Adult Nonfiction\", and \"Children/YA\"\n sublanes; the tiny-collection languages will not have any sublanes.\n\n If run on a Library that already has Lane configuration, this can\n be an extremely destructive method. All new Lanes will be visible\n and all Lanes based on CustomLists (but not the CustomLists\n themselves) will be destroyed.\n\n \"\"\"\n # Delete existing lanes.\n for lane in _db.query(Lane).filter(Lane.library_id == library.id):\n _db.delete(lane)\n\n top_level_lanes = []\n\n # Hopefully this library is configured with explicit guidance as\n # to how the languages should be set up.\n large = Configuration.large_collection_languages(library) or []\n small = Configuration.small_collection_languages(library) or []\n tiny = Configuration.tiny_collection_languages(library) or []\n\n # If there are no language configuration settings, we can estimate\n # the current collection size to determine the lanes.\n if not large and not small and not tiny:\n estimates = library.estimated_holdings_by_language()\n large, small, tiny = _lane_configuration_from_collection_sizes(estimates)\n priority = 0\n for language in large:\n priority = create_lanes_for_large_collection(\n _db, library, language, priority=priority\n )\n\n create_world_languages_lane(_db, library, small, tiny, priority)\n\n\ndef lane_from_genres(\n _db,\n library,\n genres,\n display_name=None,\n exclude_genres=None,\n priority=0,\n audiences=None,\n **extra_args\n):\n \"\"\"Turn genre info into a Lane object.\"\"\"\n\n genre_lane_instructions = {\n \"Dystopian SF\": dict(display_name=\"Dystopian\"),\n \"Erotica\": dict(audiences=[Classifier.AUDIENCE_ADULTS_ONLY]),\n \"Humorous Fiction\": dict(display_name=\"Humor\"),\n \"Media Tie-in SF\": dict(display_name=\"Movie and TV Novelizations\"),\n \"Suspense/Thriller\": dict(display_name=\"Thriller\"),\n \"Humorous Nonfiction\": dict(display_name=\"Humor\"),\n \"Political Science\": dict(display_name=\"Politics & Current Events\"),\n \"Periodicals\": dict(visible=False),\n }\n\n # Create sublanes first.\n sublanes = []\n for genre in genres:\n if isinstance(genre, dict):\n sublane_priority = 0\n for subgenre in genre.get(\"subgenres\", []):\n sublanes.append(\n lane_from_genres(\n _db,\n library,\n [subgenre],\n priority=sublane_priority,\n **extra_args\n )\n )\n sublane_priority += 1\n\n # Now that we have sublanes we don't care about subgenres anymore.\n genres = [\n genre.get(\"name\")\n if isinstance(genre, dict)\n else genre.name\n if isinstance(genre, GenreData)\n else genre\n for genre in genres\n ]\n\n exclude_genres = [\n genre.get(\"name\")\n if isinstance(genre, dict)\n else genre.name\n if isinstance(genre, GenreData)\n else genre\n for genre in exclude_genres or []\n ]\n\n fiction = None\n visible = True\n if len(genres) == 1:\n if classifier.genres.get(genres[0]):\n genredata = classifier.genres[genres[0]]\n else:\n genredata = GenreData(genres[0], False)\n fiction = genredata.is_fiction\n\n if genres[0] in list(genre_lane_instructions.keys()):\n instructions = genre_lane_instructions[genres[0]]\n if not display_name and \"display_name\" in instructions:\n display_name = instructions.get(\"display_name\")\n if \"audiences\" in instructions:\n audiences = instructions.get(\"audiences\")\n if \"visible\" in instructions:\n visible = instructions.get(\"visible\")\n\n if not display_name:\n display_name = \", \".join(sorted(genres))\n\n lane, ignore = create(\n _db,\n Lane,\n library_id=library.id,\n display_name=display_name,\n fiction=fiction,\n audiences=audiences,\n sublanes=sublanes,\n priority=priority,\n **extra_args\n )\n lane.visible = visible\n for genre in genres:\n lane.add_genre(genre)\n for genre in exclude_genres:\n lane.add_genre(genre, inclusive=False)\n return lane\n\n\ndef create_lanes_for_large_collection(_db, library, languages, priority=0):\n \"\"\"Ensure that the lanes appropriate to a large collection are all\n present.\n\n This means:\n\n * A \"%(language)s Adult Fiction\" lane containing sublanes for each fiction\n genre.\n * A \"%(language)s Adult Nonfiction\" lane containing sublanes for\n each nonfiction genre.\n * A \"%(language)s YA Fiction\" lane containing sublanes for the\n most popular YA fiction genres.\n * A \"%(language)s YA Nonfiction\" lane containing sublanes for the\n most popular YA fiction genres.\n * A \"%(language)s Children and Middle Grade\" lane containing\n sublanes for childrens' books at different age levels.\n\n :param library: Newly created lanes will be associated with this\n library.\n :param languages: Newly created lanes will contain only books\n in these languages.\n :return: A list of top-level Lane objects.\n\n TODO: If there are multiple large collections, their top-level lanes do\n not have distinct display names.\n \"\"\"\n if isinstance(languages, str):\n languages = [languages]\n\n ADULT = Classifier.AUDIENCES_ADULT\n YA = [Classifier.AUDIENCE_YOUNG_ADULT]\n CHILDREN = [Classifier.AUDIENCE_CHILDREN]\n\n common_args = dict(languages=languages, media=None)\n adult_common_args = dict(common_args)\n adult_common_args[\"audiences\"] = ADULT\n\n include_best_sellers = False\n nyt_data_source = DataSource.lookup(_db, DataSource.NYT)\n nyt_integration = get_one(\n _db,\n ExternalIntegration,\n goal=ExternalIntegration.METADATA_GOAL,\n protocol=ExternalIntegration.NYT,\n )\n if nyt_integration:\n include_best_sellers = True\n\n sublanes = []\n if include_best_sellers:\n best_sellers, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"<NAME>ers\",\n priority=priority,\n **common_args\n )\n priority += 1\n best_sellers.list_datasource = nyt_data_source\n sublanes.append(best_sellers)\n\n adult_fiction_sublanes = []\n adult_fiction_priority = 0\n if include_best_sellers:\n adult_fiction_best_sellers, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"<NAME>ers\",\n fiction=True,\n priority=adult_fiction_priority,\n **adult_common_args\n )\n adult_fiction_priority += 1\n adult_fiction_best_sellers.list_datasource = nyt_data_source\n adult_fiction_sublanes.append(adult_fiction_best_sellers)\n\n for genre in fiction_genres:\n if isinstance(genre, str):\n genre_name = genre\n else:\n genre_name = genre.get(\"name\")\n genre_lane = lane_from_genres(\n _db, library, [genre], priority=adult_fiction_priority, **adult_common_args\n )\n adult_fiction_priority += 1\n adult_fiction_sublanes.append(genre_lane)\n\n adult_fiction, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Fiction\",\n genres=[],\n sublanes=adult_fiction_sublanes,\n fiction=True,\n priority=priority,\n **adult_common_args\n )\n priority += 1\n sublanes.append(adult_fiction)\n\n adult_nonfiction_sublanes = []\n adult_nonfiction_priority = 0\n if include_best_sellers:\n adult_nonfiction_best_sellers, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Best Sellers\",\n fiction=False,\n priority=adult_nonfiction_priority,\n **adult_common_args\n )\n adult_nonfiction_priority += 1\n adult_nonfiction_best_sellers.list_datasource = nyt_data_source\n adult_nonfiction_sublanes.append(adult_nonfiction_best_sellers)\n\n for genre in nonfiction_genres:\n # \"Life Strategies\" is a YA-specific genre that should not be\n # included in the Adult Nonfiction lane.\n if genre != genres.Life_Strategies:\n if isinstance(genre, str):\n genre_name = genre\n else:\n genre_name = genre.get(\"name\")\n genre_lane = lane_from_genres(\n _db,\n library,\n [genre],\n priority=adult_nonfiction_priority,\n **adult_common_args\n )\n adult_nonfiction_priority += 1\n adult_nonfiction_sublanes.append(genre_lane)\n\n adult_nonfiction, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Nonfiction\",\n genres=[],\n sublanes=adult_nonfiction_sublanes,\n fiction=False,\n priority=priority,\n **adult_common_args\n )\n priority += 1\n sublanes.append(adult_nonfiction)\n\n ya_common_args = dict(common_args)\n ya_common_args[\"audiences\"] = YA\n\n ya_fiction, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Young Adult Fiction\",\n genres=[],\n fiction=True,\n sublanes=[],\n priority=priority,\n **ya_common_args\n )\n priority += 1\n sublanes.append(ya_fiction)\n\n ya_fiction_priority = 0\n if include_best_sellers:\n ya_fiction_best_sellers, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Best Sellers\",\n fiction=True,\n priority=ya_fiction_priority,\n **ya_common_args\n )\n ya_fiction_priority += 1\n ya_fiction_best_sellers.list_datasource = nyt_data_source\n ya_fiction.sublanes.append(ya_fiction_best_sellers)\n\n ya_fiction.sublanes.append(\n lane_from_genres(\n _db,\n library,\n [genres.Dystopian_SF],\n priority=ya_fiction_priority,\n **ya_common_args\n )\n )\n ya_fiction_priority += 1\n ya_fiction.sublanes.append(\n lane_from_genres(\n _db,\n library,\n [genres.Fantasy],\n priority=ya_fiction_priority,\n **ya_common_args\n )\n )\n ya_fiction_priority += 1\n ya_fiction.sublanes.append(\n lane_from_genres(\n _db,\n library,\n [genres.Comics_Graphic_Novels],\n priority=ya_fiction_priority,\n **ya_common_args\n )\n )\n ya_fiction_priority += 1\n ya_fiction.sublanes.append(\n lane_from_genres(\n _db,\n library,\n [genres.Literary_Fiction],\n display_name=\"Contemporary Fiction\",\n priority=ya_fiction_priority,\n **ya_common_args\n )\n )\n ya_fiction_priority += 1\n ya_fiction.sublanes.append(\n lane_from_genres(\n _db,\n library,\n [genres.LGBTQ_Fiction],\n priority=ya_fiction_priority,\n **ya_common_args\n )\n )\n ya_fiction_priority += 1\n ya_fiction.sublanes.append(\n lane_from_genres(\n _db,\n library,\n [genres.Suspense_Thriller, genres.Mystery],\n display_name=\"Mystery & Thriller\",\n priority=ya_fiction_priority,\n **ya_common_args\n )\n )\n ya_fiction_priority += 1\n ya_fiction.sublanes.append(\n lane_from_genres(\n _db,\n library,\n [genres.Romance],\n priority=ya_fiction_priority,\n **ya_common_args\n )\n )\n ya_fiction_priority += 1\n ya_fiction.sublanes.append(\n lane_from_genres(\n _db,\n library,\n [genres.Science_Fiction],\n exclude_genres=[genres.Dystopian_SF, genres.Steampunk],\n priority=ya_fiction_priority,\n **ya_common_args\n )\n )\n ya_fiction_priority += 1\n ya_fiction.sublanes.append(\n lane_from_genres(\n _db,\n library,\n [genres.Steampunk],\n priority=ya_fiction_priority,\n **ya_common_args\n )\n )\n ya_fiction_priority += 1\n\n ya_nonfiction, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Young Adult Nonfiction\",\n genres=[],\n fiction=False,\n sublanes=[],\n priority=priority,\n **ya_common_args\n )\n priority += 1\n sublanes.append(ya_nonfiction)\n\n ya_nonfiction_priority = 0\n if include_best_sellers:\n ya_nonfiction_best_sellers, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Best Sellers\",\n fiction=False,\n priority=ya_nonfiction_priority,\n **ya_common_args\n )\n ya_nonfiction_priority += 1\n ya_nonfiction_best_sellers.list_datasource = nyt_data_source\n ya_nonfiction.sublanes.append(ya_nonfiction_best_sellers)\n\n ya_nonfiction.sublanes.append(\n lane_from_genres(\n _db,\n library,\n [genres.Biography_Memoir],\n display_name=\"Biography\",\n priority=ya_nonfiction_priority,\n **ya_common_args\n )\n )\n ya_nonfiction_priority += 1\n ya_nonfiction.sublanes.append(\n lane_from_genres(\n _db,\n library,\n [genres.History, genres.Social_Sciences],\n display_name=\"History & Sociology\",\n priority=ya_nonfiction_priority,\n **ya_common_args\n )\n )\n ya_nonfiction_priority += 1\n ya_nonfiction.sublanes.append(\n lane_from_genres(\n _db,\n library,\n [genres.Life_Strategies],\n priority=ya_nonfiction_priority,\n **ya_common_args\n )\n )\n ya_nonfiction_priority += 1\n ya_nonfiction.sublanes.append(\n lane_from_genres(\n _db,\n library,\n [genres.Religion_Spirituality],\n priority=ya_nonfiction_priority,\n **ya_common_args\n )\n )\n ya_nonfiction_priority += 1\n\n children_common_args = dict(common_args)\n children_common_args[\"audiences\"] = CHILDREN\n\n children, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Children and Middle Grade\",\n genres=[],\n fiction=None,\n sublanes=[],\n priority=priority,\n **children_common_args\n )\n priority += 1\n sublanes.append(children)\n\n children_priority = 0\n if include_best_sellers:\n children_best_sellers, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Best Sellers\",\n priority=children_priority,\n **children_common_args\n )\n children_priority += 1\n children_best_sellers.list_datasource = nyt_data_source\n children.sublanes.append(children_best_sellers)\n\n picture_books, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Picture Books\",\n target_age=(0, 4),\n genres=[],\n fiction=None,\n priority=children_priority,\n languages=languages,\n )\n children_priority += 1\n children.sublanes.append(picture_books)\n\n easy_readers, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Easy Readers\",\n target_age=(5, 8),\n genres=[],\n fiction=None,\n priority=children_priority,\n languages=languages,\n )\n children_priority += 1\n children.sublanes.append(easy_readers)\n\n chapter_books, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Chapter Books\",\n target_age=(9, 12),\n genres=[],\n fiction=None,\n priority=children_priority,\n languages=languages,\n )\n children_priority += 1\n children.sublanes.append(chapter_books)\n\n children_poetry, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Poetry Books\",\n priority=children_priority,\n **children_common_args\n )\n children_priority += 1\n children_poetry.add_genre(genres.Poetry.name)\n children.sublanes.append(children_poetry)\n\n children_folklore, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Folklore\",\n priority=children_priority,\n **children_common_args\n )\n children_priority += 1\n children_folklore.add_genre(genres.Folklore.name)\n children.sublanes.append(children_folklore)\n\n children_fantasy, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Fantasy\",\n fiction=True,\n priority=children_priority,\n **children_common_args\n )\n children_priority += 1\n children_fantasy.add_genre(genres.Fantasy.name)\n children.sublanes.append(children_fantasy)\n\n children_sf, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Science Fiction\",\n fiction=True,\n priority=children_priority,\n **children_common_args\n )\n children_priority += 1\n children_sf.add_genre(genres.Science_Fiction.name)\n children.sublanes.append(children_sf)\n\n realistic_fiction, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Realistic Fiction\",\n fiction=True,\n priority=children_priority,\n **children_common_args\n )\n children_priority += 1\n realistic_fiction.add_genre(genres.Literary_Fiction.name)\n children.sublanes.append(realistic_fiction)\n\n children_graphic_novels, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Comics & Graphic Novels\",\n priority=children_priority,\n **children_common_args\n )\n children_priority += 1\n children_graphic_novels.add_genre(genres.Comics_Graphic_Novels.name)\n children.sublanes.append(children_graphic_novels)\n\n children_biography, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Biography\",\n priority=children_priority,\n **children_common_args\n )\n children_priority += 1\n children_biography.add_genre(genres.Biography_Memoir.name)\n children.sublanes.append(children_biography)\n\n children_historical_fiction, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Historical Fiction\",\n priority=children_priority,\n **children_common_args\n )\n children_priority += 1\n children_historical_fiction.add_genre(genres.Historical_Fiction.name)\n children.sublanes.append(children_historical_fiction)\n\n informational, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Informational Books\",\n fiction=False,\n genres=[],\n priority=children_priority,\n **children_common_args\n )\n children_priority += 1\n informational.add_genre(genres.Biography_Memoir.name, inclusive=False)\n children.sublanes.append(informational)\n\n return priority\n\n\ndef create_world_languages_lane(\n _db,\n library,\n small_languages,\n tiny_languages,\n priority=0,\n):\n \"\"\"Create a lane called 'World Languages' whose sublanes represent\n the non-large language collections available to this library.\n \"\"\"\n if not small_languages and not tiny_languages:\n # All the languages on this system have large collections, so\n # there is no need for a 'World Languages' lane.\n return priority\n\n complete_language_set = set()\n for list in (small_languages, tiny_languages):\n for languageset in list:\n if isinstance(languageset, str):\n complete_language_set.add(languageset)\n else:\n complete_language_set.update(languageset)\n\n world_languages, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"World Languages\",\n fiction=None,\n priority=priority,\n languages=complete_language_set,\n media=[Edition.BOOK_MEDIUM],\n genres=[],\n )\n priority += 1\n\n language_priority = 0\n for small in small_languages:\n # Create a lane (with sublanes) for each small collection.\n language_priority = create_lane_for_small_collection(\n _db, library, world_languages, small, language_priority\n )\n for tiny in tiny_languages:\n # Create a lane (no sublanes) for each tiny collection.\n language_priority = create_lane_for_tiny_collection(\n _db, library, world_languages, tiny, language_priority\n )\n return priority\n\n\ndef create_lane_for_small_collection(_db, library, parent, languages, priority=0):\n \"\"\"Create a lane (with sublanes) for a small collection based on language,\n if the language exists in the lookup table.\n\n :param parent: The parent of the new lane.\n \"\"\"\n if isinstance(languages, str):\n languages = [languages]\n\n ADULT = Classifier.AUDIENCES_ADULT\n YA_CHILDREN = [Classifier.AUDIENCE_YOUNG_ADULT, Classifier.AUDIENCE_CHILDREN]\n\n common_args = dict(\n languages=languages,\n media=[Edition.BOOK_MEDIUM],\n genres=[],\n )\n\n try:\n language_identifier = LanguageCodes.name_for_languageset(languages)\n except ValueError as e:\n logging.getLogger().warning(\n \"Could not create a lane for small collection with languages %s\", languages\n )\n return 0\n\n sublane_priority = 0\n\n adult_fiction, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Fiction\",\n fiction=True,\n audiences=ADULT,\n priority=sublane_priority,\n **common_args\n )\n sublane_priority += 1\n\n adult_nonfiction, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Nonfiction\",\n fiction=False,\n audiences=ADULT,\n priority=sublane_priority,\n **common_args\n )\n sublane_priority += 1\n\n ya_children, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=\"Children & Young Adult\",\n fiction=None,\n audiences=YA_CHILDREN,\n priority=sublane_priority,\n **common_args\n )\n sublane_priority += 1\n\n lane, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=language_identifier,\n parent=parent,\n sublanes=[adult_fiction, adult_nonfiction, ya_children],\n priority=priority,\n **common_args\n )\n priority += 1\n return priority\n\n\ndef create_lane_for_tiny_collection(_db, library, parent, languages, priority=0):\n \"\"\"Create a single lane for a tiny collection based on language,\n if the language exists in the lookup table.\n\n :param parent: The parent of the new lane.\n \"\"\"\n if not languages:\n return None\n\n if isinstance(languages, str):\n languages = [languages]\n\n try:\n name = LanguageCodes.name_for_languageset(languages)\n except ValueError as e:\n logging.getLogger().warning(\n \"Could not create a lane for tiny collection with languages %s\", languages\n )\n return 0\n\n language_lane, ignore = create(\n _db,\n Lane,\n library=library,\n display_name=name,\n parent=parent,\n genres=[],\n media=[Edition.BOOK_MEDIUM],\n fiction=None,\n priority=priority,\n languages=languages,\n )\n return priority + 1\n\n\nclass DynamicLane(WorkList):\n \"\"\"A WorkList that's used to from an OPDS lane, but isn't a Lane\n in the database.\"\"\"\n\n\nclass DatabaseExclusiveWorkList(DatabaseBackedWorkList):\n \"\"\"A DatabaseBackedWorkList that can _only_ get Works through the database.\"\"\"\n\n def works(self, *args, **kwargs):\n return self.works_from_database(*args, **kwargs)\n\n\nclass WorkBasedLane(DynamicLane):\n \"\"\"A lane that shows works related to one particular Work.\"\"\"\n\n DISPLAY_NAME = None\n ROUTE = None\n\n def __init__(self, library, work, display_name=None, children=None, **kwargs):\n self.work = work\n self.edition = work.presentation_edition\n\n # To avoid showing the same book in other languages, the value\n # of this lane's .languages is always derived from the\n # language of the work. All children of this lane will be put\n # under a similar restriction.\n self.source_language = self.edition.language\n kwargs[\"languages\"] = [self.source_language]\n\n # To avoid showing inappropriate material, the value of this\n # lane's .audiences setting is always derived from the\n # audience of the work. All children of this lane will be\n # under a similar restriction.\n self.source_audience = self.work.audience\n kwargs[\"audiences\"] = self.audiences_list_from_source()\n\n display_name = display_name or self.DISPLAY_NAME\n\n children = children or list()\n\n super(WorkBasedLane, self).initialize(\n library, display_name=display_name, children=children, **kwargs\n )\n\n @property\n def url_arguments(self):\n if not self.ROUTE:\n raise NotImplementedError()\n identifier = self.edition.primary_identifier\n kwargs = dict(identifier_type=identifier.type, identifier=identifier.identifier)\n return self.ROUTE, kwargs\n\n def audiences_list_from_source(self):\n if (\n not self.source_audience\n or self.source_audience in Classifier.AUDIENCES_ADULT\n ):\n return Classifier.AUDIENCES\n if self.source_audience == Classifier.AUDIENCE_YOUNG_ADULT:\n return Classifier.AUDIENCES_JUVENILE\n else:\n return [Classifier.AUDIENCE_CHILDREN]\n\n def append_child(self, worklist):\n \"\"\"Add another Worklist as a child of this one and change its\n configuration to make sure its results fit in with this lane.\n \"\"\"\n super(WorkBasedLane, self).append_child(worklist)\n worklist.languages = self.languages\n worklist.audiences = self.audiences\n\n def accessible_to(self, patron):\n \"\"\"In addition to the restrictions imposed by the superclass, a lane\n based on a specific Work is accessible to a Patron only if the\n Work itself is age-appropriate for the patron.\n\n :param patron: A Patron\n :return: A boolean\n \"\"\"\n superclass_ok = super(WorkBasedLane, self).accessible_to(patron)\n return superclass_ok and (\n not self.work or self.work.age_appropriate_for_patron(patron)\n )\n\n\nclass RecommendationLane(WorkBasedLane):\n \"\"\"A lane of recommended Works based on a particular Work\"\"\"\n\n DISPLAY_NAME = \"Titles recommended by NoveList\"\n ROUTE = \"recommendations\"\n\n # Cache for 24 hours -- would ideally be much longer but availability\n # information goes stale.\n MAX_CACHE_AGE = 24 * 60 * 60\n CACHED_FEED_TYPE = CachedFeed.RECOMMENDATIONS_TYPE\n\n def __init__(\n self, library, work, display_name=None, novelist_api=None, parent=None\n ):\n \"\"\"Constructor.\n\n :raises: CannotLoadConfiguration if `novelist_api` is not provided\n and no Novelist integration is configured for this library.\n \"\"\"\n super(RecommendationLane, self).__init__(\n library,\n work,\n display_name=display_name,\n )\n self.novelist_api = novelist_api or NoveListAPI.from_config(library)\n if parent:\n parent.append_child(self)\n _db = Session.object_session(library)\n self.recommendations = self.fetch_recommendations(_db)\n\n def fetch_recommendations(self, _db):\n \"\"\"Get identifiers of recommendations for this LicensePool\"\"\"\n metadata = self.novelist_api.lookup(self.edition.primary_identifier)\n if metadata:\n metadata.filter_recommendations(_db)\n return metadata.recommendations\n return []\n\n def overview_facets(self, _db, facets):\n \"\"\"Convert a generic FeaturedFacets to some other faceting object,\n suitable for showing an overview of this WorkList in a grouped\n feed.\n \"\"\"\n # TODO: Since the purpose of the recommendation feed is to\n # suggest books that can be borrowed immediately, it would be\n # better to set availability=AVAILABLE_NOW. However, this feed\n # is cached for so long that we can't rely on the availability\n # information staying accurate. It would be especially bad if\n # people borrowed all of the recommendations that were\n # available at the time this feed was generated, and then\n # recommendations that were unavailable when the feed was\n # generated became available.\n #\n # For now, it's better to show all books and let people put\n # the unavailable ones on hold if they want.\n #\n # TODO: It would be better to order works in the same order\n # they come from the recommendation engine, since presumably\n # the best recommendations are in the front.\n return Facets.default(\n self.get_library(_db),\n collection=facets.COLLECTION_FULL,\n availability=facets.AVAILABLE_ALL,\n entrypoint=facets.entrypoint,\n )\n\n def modify_search_filter_hook(self, filter):\n \"\"\"Find Works whose Identifiers include the ISBNs returned\n by an external recommendation engine.\n\n :param filter: A Filter object.\n \"\"\"\n if not self.recommendations:\n # There are no recommendations. The search should not even\n # be executed.\n filter.match_nothing = True\n else:\n filter.identifiers = self.recommendations\n return filter\n\n\nclass SeriesFacets(DefaultSortOrderFacets):\n \"\"\"A list with a series restriction is ordered by series position by\n default.\n \"\"\"\n\n DEFAULT_SORT_ORDER = Facets.ORDER_SERIES_POSITION\n\n\nclass SeriesLane(DynamicLane):\n \"\"\"A lane of Works in a particular series.\"\"\"\n\n ROUTE = \"series\"\n # Cache for 24 hours -- would ideally be longer but availability\n # information goes stale.\n MAX_CACHE_AGE = 24 * 60 * 60\n CACHED_FEED_TYPE = CachedFeed.SERIES_TYPE\n\n def __init__(self, library, series_name, parent=None, **kwargs):\n if not series_name:\n raise ValueError(\"SeriesLane can't be created without series\")\n super(SeriesLane, self).initialize(library, display_name=series_name, **kwargs)\n self.series = series_name\n if parent:\n parent.append_child(self)\n if isinstance(parent, WorkBasedLane) and parent.source_audience:\n # WorkBasedLane forces self.audiences to values\n # compatible with the work in the WorkBasedLane, but\n # that's not enough for us. We want to force\n # self.audiences to *the specific audience* of the\n # work in the WorkBasedLane. If we're looking at a YA\n # series, we don't want to see books in a children's\n # series with the same name, even if it would be\n # appropriate to show those books.\n self.audiences = [parent.source_audience]\n\n @property\n def url_arguments(self):\n kwargs = dict(series_name=self.series)\n if self.language_key:\n kwargs[\"languages\"] = self.language_key\n if self.audience_key:\n kwargs[\"audiences\"] = self.audience_key\n return self.ROUTE, kwargs\n\n def overview_facets(self, _db, facets):\n \"\"\"Convert a FeaturedFacets to a SeriesFacets suitable for\n use in a grouped feed. Our contribution to a grouped feed will\n be ordered by series position.\n \"\"\"\n return SeriesFacets.default(\n self.get_library(_db),\n collection=facets.COLLECTION_FULL,\n availability=facets.AVAILABLE_ALL,\n entrypoint=facets.entrypoint,\n )\n\n def modify_search_filter_hook(self, filter):\n filter.series = self.series\n return filter\n\n\nclass ContributorFacets(DefaultSortOrderFacets):\n \"\"\"A list with a contributor restriction is, by default, sorted by\n title.\n \"\"\"\n\n DEFAULT_SORT_ORDER = Facets.ORDER_TITLE\n\n\nclass ContributorLane(DynamicLane):\n \"\"\"A lane of Works written by a particular contributor\"\"\"\n\n ROUTE = \"contributor\"\n # Cache for 24 hours -- would ideally be longer but availability\n # information goes stale.\n MAX_CACHE_AGE = 24 * 60 * 60\n CACHED_FEED_TYPE = CachedFeed.CONTRIBUTOR_TYPE\n\n def __init__(\n self, library, contributor, parent=None, languages=None, audiences=None\n ):\n \"\"\"Constructor.\n\n :param library: A Library.\n :param contributor: A Contributor or ContributorData object.\n :param parent: A WorkList.\n :param languages: An extra restriction on the languages of Works.\n :param audiences: An extra restriction on the audience for Works.\n \"\"\"\n if not contributor:\n raise ValueError(\"ContributorLane can't be created without contributor\")\n\n self.contributor = contributor\n self.contributor_key = (\n self.contributor.display_name or self.contributor.sort_name\n )\n super(ContributorLane, self).initialize(\n library,\n display_name=self.contributor_key,\n audiences=audiences,\n languages=languages,\n )\n if parent:\n parent.append_child(self)\n\n @property\n def url_arguments(self):\n kwargs = dict(\n contributor_name=self.contributor_key,\n languages=self.language_key,\n audiences=self.audience_key,\n )\n return self.ROUTE, kwargs\n\n def overview_facets(self, _db, facets):\n \"\"\"Convert a FeaturedFacets to a ContributorFacets suitable for\n use in a grouped feed.\n \"\"\"\n return ContributorFacets.default(\n self.get_library(_db),\n collection=facets.COLLECTION_FULL,\n availability=facets.AVAILABLE_ALL,\n entrypoint=facets.entrypoint,\n )\n\n def modify_search_filter_hook(self, filter):\n filter.author = self.contributor\n return filter\n\n\nclass RelatedBooksLane(WorkBasedLane):\n \"\"\"A lane of Works all related to a given Work by various criteria.\n\n Each criterion is represented by another WorkBaseLane class:\n\n * ContributorLane: Works by one of the contributors to this work.\n * SeriesLane: Works in the same series.\n * RecommendationLane: Works provided by a third-party recommendation\n service.\n \"\"\"\n\n CACHED_FEED_TYPE = CachedFeed.RELATED_TYPE\n DISPLAY_NAME = \"Related Books\"\n ROUTE = \"related_books\"\n\n # Cache this lane for the shortest amount of time any of its\n # component lane should be cached.\n MAX_CACHE_AGE = min(\n ContributorLane.MAX_CACHE_AGE,\n SeriesLane.MAX_CACHE_AGE,\n RecommendationLane.MAX_CACHE_AGE,\n )\n\n def __init__(self, library, work, display_name=None, novelist_api=None):\n super(RelatedBooksLane, self).__init__(\n library,\n work,\n display_name=display_name,\n )\n _db = Session.object_session(library)\n sublanes = self._get_sublanes(_db, novelist_api)\n if not sublanes:\n raise ValueError(\n \"No related books for %s by %s\" % (self.work.title, self.work.author)\n )\n self.children = sublanes\n\n def works(self, _db, *args, **kwargs):\n \"\"\"This lane never has works of its own.\n\n Only its sublanes have works.\n \"\"\"\n return []\n\n def _get_sublanes(self, _db, novelist_api):\n sublanes = list()\n\n for contributor_lane in self._contributor_sublanes(_db):\n sublanes.append(contributor_lane)\n\n for recommendation_lane in self._recommendation_sublane(_db, novelist_api):\n sublanes.append(recommendation_lane)\n\n # Create a series sublane.\n series_name = self.edition.series\n if series_name:\n sublanes.append(\n SeriesLane(\n self.get_library(_db),\n series_name,\n parent=self,\n languages=self.languages,\n )\n )\n\n return sublanes\n\n def _contributor_sublanes(self, _db):\n \"\"\"Create contributor sublanes\"\"\"\n viable_contributors = list()\n roles_by_priority = list(Contributor.author_contributor_tiers())[1:]\n\n while roles_by_priority and not viable_contributors:\n author_roles = roles_by_priority.pop(0)\n viable_contributors = [\n c.contributor\n for c in self.edition.contributions\n if c.role in author_roles\n ]\n\n library = self.get_library(_db)\n for contributor in viable_contributors:\n contributor_lane = ContributorLane(\n library,\n contributor,\n parent=self,\n languages=self.languages,\n audiences=self.audiences,\n )\n yield contributor_lane\n\n def _recommendation_sublane(self, _db, novelist_api):\n \"\"\"Create a recommendations sublane.\"\"\"\n lane_name = \"Similar titles recommended by NoveList\"\n try:\n recommendation_lane = RecommendationLane(\n library=self.get_library(_db),\n work=self.work,\n display_name=lane_name,\n novelist_api=novelist_api,\n parent=self,\n )\n if recommendation_lane.recommendations:\n yield recommendation_lane\n except CannotLoadConfiguration as e:\n # NoveList isn't configured. This isn't fatal -- we just won't\n # use this sublane.\n pass\n\n\nclass CrawlableFacets(Facets):\n \"\"\"A special Facets class for crawlable feeds.\"\"\"\n\n CACHED_FEED_TYPE = CachedFeed.CRAWLABLE_TYPE\n\n # These facet settings are definitive of a crawlable feed.\n # Library configuration settings don't matter.\n SETTINGS = {\n Facets.ORDER_FACET_GROUP_NAME: Facets.ORDER_LAST_UPDATE,\n Facets.AVAILABILITY_FACET_GROUP_NAME: Facets.AVAILABLE_ALL,\n Facets.COLLECTION_FACET_GROUP_NAME: Facets.COLLECTION_FULL,\n }\n\n @classmethod\n def available_facets(cls, config, facet_group_name):\n return [cls.SETTINGS[facet_group_name]]\n\n @classmethod\n def default_facet(cls, config, facet_group_name):\n return cls.SETTINGS[facet_group_name]\n\n\nclass CrawlableLane(DynamicLane):\n\n # By default, crawlable feeds are cached for 12 hours.\n MAX_CACHE_AGE = 12 * 60 * 60\n\n\nclass CrawlableCollectionBasedLane(CrawlableLane):\n\n # Since these collections may be shared collections, for which\n # recent information is very important, these feeds are only\n # cached for 2 hours.\n MAX_CACHE_AGE = 2 * 60 * 60\n\n LIBRARY_ROUTE = \"crawlable_library_feed\"\n COLLECTION_ROUTE = \"crawlable_collection_feed\"\n\n def initialize(self, library_or_collections):\n\n self.collection_feed = False\n\n if isinstance(library_or_collections, Library):\n # We're looking at all the collections in a given library.\n library = library_or_collections\n collections = library.collections\n identifier = library.name\n else:\n # We're looking at collections directly, without respect\n # to the libraries that might use them.\n library = None\n collections = library_or_collections\n identifier = \" / \".join(sorted([x.name for x in collections]))\n if len(collections) == 1:\n self.collection_feed = True\n self.collection_name = collections[0].name\n\n super(CrawlableCollectionBasedLane, self).initialize(\n library,\n \"Crawlable feed: %s\" % identifier,\n )\n if collections is not None:\n # initialize() set the collection IDs to all collections\n # associated with the library. We may want to restrict that\n # further.\n self.collection_ids = [x.id for x in collections]\n\n @property\n def url_arguments(self):\n if not self.collection_feed:\n return self.LIBRARY_ROUTE, dict()\n else:\n kwargs = dict(\n collection_name=self.collection_name,\n )\n return self.COLLECTION_ROUTE, kwargs\n\n\nclass CrawlableCustomListBasedLane(CrawlableLane):\n \"\"\"A lane that consists of all works in a single CustomList.\"\"\"\n\n ROUTE = \"crawlable_list_feed\"\n\n uses_customlists = True\n\n def initialize(self, library, customlist):\n self.customlist_name = customlist.name\n super(CrawlableCustomListBasedLane, self).initialize(\n library,\n \"Crawlable feed: %s\" % self.customlist_name,\n customlists=[customlist],\n )\n\n @property\n def url_arguments(self):\n kwargs = dict(list_name=self.customlist_name)\n return self.ROUTE, kwargs\n\n\nclass KnownOverviewFacetsWorkList(WorkList):\n \"\"\"A WorkList whose defining feature is that the Facets object\n to be used when generating a grouped feed is known in advance.\n \"\"\"\n\n def __init__(self, facets, *args, **kwargs):\n \"\"\"Constructor.\n\n :param facets: A Facets object to be used when generating a grouped\n feed.\n \"\"\"\n super(KnownOverviewFacetsWorkList, self).__init__(*args, **kwargs)\n self.facets = facets\n\n def overview_facets(self, _db, facets):\n \"\"\"Return the faceting object to be used when generating a grouped\n feed.\n\n :param _db: Ignored -- only present for API compatibility.\n :param facets: Ignored -- only present for API compatibility.\n \"\"\"\n return self.facets\n\n\nclass JackpotFacets(Facets):\n \"\"\"A faceting object for a jackpot feed.\n\n Unlike other faceting objects, AVAILABLE_NOT_NOW is an acceptable\n option for the availability facet.\n \"\"\"\n\n @classmethod\n def default_facet(cls, config, facet_group_name):\n if facet_group_name != cls.AVAILABILITY_FACET_GROUP_NAME:\n return super(JackpotFacets, cls).default_facet(config, facet_group_name)\n return cls.AVAILABLE_NOW\n\n @classmethod\n def available_facets(cls, config, facet_group_name):\n if facet_group_name != cls.AVAILABILITY_FACET_GROUP_NAME:\n return super(JackpotFacets, cls).available_facets(config, facet_group_name)\n\n return [\n cls.AVAILABLE_NOW,\n cls.AVAILABLE_NOT_NOW,\n cls.AVAILABLE_ALL,\n cls.AVAILABLE_OPEN_ACCESS,\n ]\n\n\nclass HasSeriesFacets(Facets):\n \"\"\"A faceting object for a feed containg books guaranteed\n to belong to _some_ series.\n \"\"\"\n\n def modify_search_filter(self, filter):\n filter.series = True\n\n\nclass JackpotWorkList(WorkList):\n \"\"\"A WorkList guaranteed to, so far as possible, contain the exact\n selection of books necessary to perform common QA tasks.\n\n This makes it easy to write integration tests that work on real\n circulation managers and real books.\n \"\"\"\n\n def __init__(self, library, facets):\n \"\"\"Constructor.\n\n :param library: A Library\n :param facets: A Facets object.\n \"\"\"\n super(JackpotWorkList, self).initialize(library)\n\n # Initialize a list of child Worklists; one for each test that\n # a client might need to run.\n self.children = []\n\n # Add one or more WorkLists for every collection in the\n # system, so that a client can test borrowing a book from\n # every collection.\n for collection in sorted(library.collections, key=lambda x: x.name):\n for medium in Edition.FULFILLABLE_MEDIA:\n # Give each Worklist a name that is distinctive\n # and easy for a client to parse.\n if collection.data_source:\n data_source_name = collection.data_source.name\n else:\n data_source_name = \"[Unknown]\"\n display_name = (\n \"License source {%s} - Medium {%s} - Collection name {%s}\"\n % (data_source_name, medium, collection.name)\n )\n child = KnownOverviewFacetsWorkList(facets)\n child.initialize(library, media=[medium], display_name=display_name)\n child.collection_ids = [collection.id]\n self.children.append(child)\n\n def works(self, _db, *args, **kwargs):\n \"\"\"This worklist never has works of its own.\n\n Only its children have works.\n \"\"\"\n return []\n", "id": "5095338", "language": "Python", "matching_score": 8.131508827209473, "max_stars_count": 0, "path": "api/lanes.py" }, { "content": "# encoding: utf-8\nimport datetime\nimport logging\nimport time\nfrom collections import defaultdict\nfrom urllib.parse import quote_plus\n\nimport elasticsearch\nfrom flask_babel import lazy_gettext as _\nfrom sqlalchemy import (\n Boolean,\n Column,\n ForeignKey,\n Integer,\n Table,\n Unicode,\n UniqueConstraint,\n and_,\n event,\n not_,\n or_,\n)\nfrom sqlalchemy.dialects.postgresql import ARRAY, INT4RANGE, JSON\nfrom sqlalchemy.ext.associationproxy import association_proxy\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm import (\n aliased,\n backref,\n contains_eager,\n defer,\n joinedload,\n relationship,\n)\nfrom sqlalchemy.sql import select\nfrom sqlalchemy.sql.expression import Select\n\nfrom .classifier import Classifier\nfrom .config import Configuration\nfrom .entrypoint import EntryPoint, EverythingEntryPoint\nfrom .facets import FacetConstants\nfrom .model import (\n Base,\n CachedFeed,\n Collection,\n CustomList,\n CustomListEntry,\n DataSource,\n Edition,\n Genre,\n Library,\n LicensePool,\n Session,\n Work,\n WorkGenre,\n directly_modified,\n get_one_or_create,\n site_configuration_has_changed,\n tuple_to_numericrange,\n)\nfrom .model.constants import EditionConstants\nfrom .problem_details import *\nfrom .util import LanguageCodes\nfrom .util.accept_language import parse_accept_language\nfrom .util.datetime_helpers import utc_now\nfrom .util.opds_writer import OPDSFeed\nfrom .util.problem_detail import ProblemDetail\n\n\nclass BaseFacets(FacetConstants):\n \"\"\"Basic faceting class that doesn't modify a search filter at all.\n\n This is intended solely for use as a base class.\n \"\"\"\n\n # If the use of a certain faceting object has implications for the\n # type of feed (the way FeaturedFacets always implies a 'groups' feed),\n # set the type of feed here. This will override any CACHED_FEED_TYPE\n # associated with the WorkList.\n CACHED_FEED_TYPE = None\n\n # By default, faceting objects have no opinion on how long the feeds\n # generated using them should be cached.\n max_cache_age = None\n\n def items(self):\n \"\"\"Yields a 2-tuple for every active facet setting.\n\n These tuples are used to generate URLs that can identify\n specific facet settings, and to distinguish between CachedFeed\n objects that represent the same feed with different facet\n settings.\n \"\"\"\n return []\n\n @property\n def cached(self):\n \"\"\"This faceting object's opinion on whether feeds should be cached.\n\n :return: A boolean, or None for 'no opinion'.\n \"\"\"\n if self.max_cache_age is None:\n return None\n return self.max_cache_age != 0\n\n @property\n def query_string(self):\n \"\"\"A query string fragment that propagates all active facet\n settings.\n \"\"\"\n return \"&\".join(\"=\".join(x) for x in sorted(self.items()))\n\n @property\n def facet_groups(self):\n \"\"\"Yield a list of 4-tuples\n (facet group, facet value, new Facets object, selected)\n for use in building OPDS facets.\n\n This does not include the 'entry point' facet group,\n which must be handled separately.\n \"\"\"\n return []\n\n @classmethod\n def selectable_entrypoints(cls, worklist):\n \"\"\"Ignore all entry points, even if the WorkList supports them.\"\"\"\n return []\n\n def modify_search_filter(self, filter):\n \"\"\"Modify an external_search.Filter object to filter out works\n excluded by the business logic of this faceting class.\n \"\"\"\n return filter\n\n def modify_database_query(cls, _db, qu):\n \"\"\"If necessary, modify a database query so that resulting\n items conform the constraints of this faceting object.\n\n The default behavior is to not modify the query.\n \"\"\"\n return qu\n\n def scoring_functions(self, filter):\n \"\"\"Create a list of ScoringFunction objects that modify how\n works in the given WorkList should be ordered.\n\n Most subclasses will not use this because they order\n works using the 'order' feature.\n \"\"\"\n return []\n\n\nclass FacetsWithEntryPoint(BaseFacets):\n \"\"\"Basic Facets class that knows how to filter a query based on a\n selected EntryPoint.\n \"\"\"\n\n def __init__(\n self, entrypoint=None, entrypoint_is_default=False, max_cache_age=None, **kwargs\n ):\n \"\"\"Constructor.\n\n :param entrypoint: An EntryPoint (optional).\n :param entrypoint_is_default: If this is True, then `entrypoint`\n is a default value and was not determined by a user's\n explicit choice.\n :param max_cache_age: Any feeds generated by this faceting object\n will be cached for this amount of time. The default is to have\n no opinion and let the Worklist manage this.\n :param kwargs: Other arguments may be supplied based on user\n input, but the default implementation is to ignore them.\n \"\"\"\n self.entrypoint = entrypoint\n self.entrypoint_is_default = entrypoint_is_default\n self.max_cache_age = max_cache_age\n self.constructor_kwargs = kwargs\n\n @classmethod\n def selectable_entrypoints(cls, worklist):\n \"\"\"Which EntryPoints can be selected for these facets on this\n WorkList?\n\n In most cases, there are no selectable EntryPoints; this generally\n happens only at the top level.\n\n By default, this is completely determined by the WorkList.\n See SearchFacets for an example that changes this.\n \"\"\"\n if not worklist:\n return []\n return worklist.entrypoints\n\n def navigate(self, entrypoint):\n \"\"\"Create a very similar FacetsWithEntryPoint that points to\n a different EntryPoint.\n \"\"\"\n return self.__class__(\n entrypoint=entrypoint,\n entrypoint_is_default=False,\n max_cache_age=self.max_cache_age,\n **self.constructor_kwargs\n )\n\n @classmethod\n def from_request(\n cls,\n library,\n facet_config,\n get_argument,\n get_header,\n worklist,\n default_entrypoint=None,\n **extra_kwargs\n ):\n \"\"\"Load a faceting object from an HTTP request.\n\n :param facet_config: A Library (or mock of one) that knows\n which subset of the available facets are configured.\n\n :param get_argument: A callable that takes one argument and\n retrieves (or pretends to retrieve) a query string\n parameter of that name from an incoming HTTP request.\n\n :param get_header: A callable that takes one argument and\n retrieves (or pretends to retrieve) an HTTP header\n of that name from an incoming HTTP request.\n\n :param worklist: A WorkList associated with the current request,\n if any.\n\n :param default_entrypoint: Select this EntryPoint if the\n incoming request does not specify an enabled EntryPoint.\n If this is None, the first enabled EntryPoint will be used\n as the default.\n\n :param extra_kwargs: A dictionary of keyword arguments to pass\n into the constructor when a faceting object is instantiated.\n\n :return: A FacetsWithEntryPoint, or a ProblemDetail if there's\n a problem with the input from the request.\n \"\"\"\n return cls._from_request(\n facet_config,\n get_argument,\n get_header,\n worklist,\n default_entrypoint,\n **extra_kwargs\n )\n\n @classmethod\n def _from_request(\n cls,\n facet_config,\n get_argument,\n get_header,\n worklist,\n default_entrypoint=None,\n **extra_kwargs\n ):\n \"\"\"Load a faceting object from an HTTP request.\n\n Subclasses of FacetsWithEntryPoint can override `from_request`,\n but call this method to load the EntryPoint and actually\n instantiate the faceting class.\n \"\"\"\n entrypoint_name = get_argument(Facets.ENTRY_POINT_FACET_GROUP_NAME, None)\n valid_entrypoints = list(cls.selectable_entrypoints(facet_config))\n entrypoint = cls.load_entrypoint(\n entrypoint_name, valid_entrypoints, default=default_entrypoint\n )\n if isinstance(entrypoint, ProblemDetail):\n return entrypoint\n entrypoint, is_default = entrypoint\n\n max_cache_age = get_argument(Facets.MAX_CACHE_AGE_NAME, None)\n max_cache_age = cls.load_max_cache_age(max_cache_age)\n if isinstance(max_cache_age, ProblemDetail):\n return max_cache_age\n\n return cls(\n entrypoint=entrypoint,\n entrypoint_is_default=is_default,\n max_cache_age=max_cache_age,\n **extra_kwargs\n )\n\n @classmethod\n def load_entrypoint(cls, name, valid_entrypoints, default=None):\n \"\"\"Look up an EntryPoint by name, assuming it's valid in the\n given WorkList.\n\n :param valid_entrypoints: The EntryPoints that might be\n valid. This is probably not the value of\n WorkList.selectable_entrypoints, because an EntryPoint\n selected in a WorkList remains valid (but not selectable) for\n all of its children.\n\n :param default: A class to use as the default EntryPoint if\n none is specified. If no default is specified, the first\n enabled EntryPoint will be used.\n\n :return: A 2-tuple (EntryPoint class, is_default).\n \"\"\"\n if not valid_entrypoints:\n return None, True\n if default is None:\n default = valid_entrypoints[0]\n ep = EntryPoint.BY_INTERNAL_NAME.get(name)\n if not ep or ep not in valid_entrypoints:\n return default, True\n return ep, False\n\n @classmethod\n def load_max_cache_age(cls, value):\n \"\"\"Convert a value for the MAX_CACHE_AGE_NAME parameter to a value\n that CachedFeed will understand.\n\n :param value: A string.\n :return: For now, either CachedFeed.IGNORE_CACHE or None.\n \"\"\"\n if value is None:\n return value\n\n try:\n value = int(value)\n except ValueError as e:\n value = None\n\n # At the moment, the only acceptable value that can be set\n # through the web is zero -- i.e. don't use the cache at\n # all. We can't give web clients fine-grained control over\n # the internal workings of our cache; the most we can do\n # is give them the opportunity to opt out.\n #\n # Thus, any nonzero value will be ignored.\n if value == 0:\n value = CachedFeed.IGNORE_CACHE\n else:\n value = None\n return value\n\n def items(self):\n \"\"\"Yields a 2-tuple for every active facet setting.\n\n In this class that just means the entrypoint and any max_cache_age.\n \"\"\"\n if self.entrypoint:\n yield (self.ENTRY_POINT_FACET_GROUP_NAME, self.entrypoint.INTERNAL_NAME)\n if self.max_cache_age not in (None, CachedFeed.CACHE_FOREVER):\n if self.max_cache_age == CachedFeed.IGNORE_CACHE:\n value = 0\n else:\n value = self.max_cache_age\n yield (self.MAX_CACHE_AGE_NAME, str(value))\n\n def modify_search_filter(self, filter):\n \"\"\"Modify the given external_search.Filter object\n so that it reflects this set of facets.\n \"\"\"\n if self.entrypoint:\n self.entrypoint.modify_search_filter(filter)\n return filter\n\n def modify_database_query(self, _db, qu):\n \"\"\"Modify the given database query so that it reflects this set of\n facets.\n \"\"\"\n if self.entrypoint:\n qu = self.entrypoint.modify_database_query(_db, qu)\n return qu\n\n\nclass Facets(FacetsWithEntryPoint):\n \"\"\"A full-fledged facet class that supports complex navigation between\n multiple facet groups.\n\n Despite the generic name, this is only used in 'page' type OPDS\n feeds that list all the works in some WorkList.\n \"\"\"\n\n ORDER_BY_RELEVANCE = \"relevance\"\n\n @classmethod\n def default(\n cls, library, collection=None, availability=None, order=None, entrypoint=None\n ):\n return cls(\n library,\n collection=collection,\n availability=availability,\n order=order,\n entrypoint=entrypoint,\n )\n\n @classmethod\n def available_facets(cls, config, facet_group_name):\n \"\"\"Which facets are enabled for the given facet group?\n\n You can override this to forcible enable or disable facets\n that might not be enabled in library configuration, but you\n can't make up totally new facets.\n\n TODO: This sytem would make more sense if you _could_ make up\n totally new facets, maybe because each facet was represented\n as a policy object rather than a key to code implemented\n elsewhere in this class. Right now this method implies more\n flexibility than actually exists.\n \"\"\"\n available = config.enabled_facets(facet_group_name)\n\n # \"The default facet isn't available\" makes no sense. If the\n # default facet is not in the available list for any reason,\n # add it to the beginning of the list. This makes other code\n # elsewhere easier to write.\n default = cls.default_facet(config, facet_group_name)\n if default not in available:\n available = [default] + available\n return available\n\n @classmethod\n def default_facet(cls, config, facet_group_name):\n \"\"\"The default value for the given facet group.\n\n The default value must be one of the values returned by available_facets() above.\n \"\"\"\n return config.default_facet(facet_group_name)\n\n @classmethod\n def _values_from_request(cls, config, get_argument, get_header):\n g = Facets.ORDER_FACET_GROUP_NAME\n order = get_argument(g, cls.default_facet(config, g))\n order_facets = cls.available_facets(config, g)\n if order and not order in order_facets:\n return INVALID_INPUT.detailed(\n _(\"I don't know how to order a feed by '%(order)s'\", order=order), 400\n )\n\n g = Facets.AVAILABILITY_FACET_GROUP_NAME\n availability = get_argument(g, cls.default_facet(config, g))\n availability_facets = cls.available_facets(config, g)\n if availability and not availability in availability_facets:\n return INVALID_INPUT.detailed(\n _(\n \"I don't understand the availability term '%(availability)s'\",\n availability=availability,\n ),\n 400,\n )\n\n g = Facets.COLLECTION_FACET_GROUP_NAME\n collection = get_argument(g, cls.default_facet(config, g))\n collection_facets = cls.available_facets(config, g)\n if collection and not collection in collection_facets:\n return INVALID_INPUT.detailed(\n _(\n \"I don't understand what '%(collection)s' refers to.\",\n collection=collection,\n ),\n 400,\n )\n\n enabled = {\n Facets.ORDER_FACET_GROUP_NAME: order_facets,\n Facets.AVAILABILITY_FACET_GROUP_NAME: availability_facets,\n Facets.COLLECTION_FACET_GROUP_NAME: collection_facets,\n }\n\n return dict(\n order=order,\n availability=availability,\n collection=collection,\n enabled_facets=enabled,\n )\n\n @classmethod\n def from_request(\n cls,\n library,\n config,\n get_argument,\n get_header,\n worklist,\n default_entrypoint=None,\n **extra\n ):\n \"\"\"Load a faceting object from an HTTP request.\"\"\"\n\n values = cls._values_from_request(config, get_argument, get_header)\n if isinstance(values, ProblemDetail):\n return values\n extra.update(values)\n extra[\"library\"] = library\n\n return cls._from_request(\n config, get_argument, get_header, worklist, default_entrypoint, **extra\n )\n\n def __init__(\n self,\n library,\n collection,\n availability,\n order,\n order_ascending=None,\n enabled_facets=None,\n entrypoint=None,\n entrypoint_is_default=False,\n **constructor_kwargs\n ):\n \"\"\"Constructor.\n\n :param collection: This is not a Collection object; it's a value for\n the 'collection' facet, e.g. 'main' or 'featured'.\n\n :param entrypoint: An EntryPoint class. The 'entry point'\n facet group is configured on a per-WorkList basis rather than\n a per-library basis.\n \"\"\"\n super(Facets, self).__init__(\n entrypoint, entrypoint_is_default, **constructor_kwargs\n )\n collection = collection or self.default_facet(\n library, self.COLLECTION_FACET_GROUP_NAME\n )\n availability = availability or self.default_facet(\n library, self.AVAILABILITY_FACET_GROUP_NAME\n )\n order = order or self.default_facet(library, self.ORDER_FACET_GROUP_NAME)\n if order_ascending is None:\n if order in Facets.ORDER_DESCENDING_BY_DEFAULT:\n order_ascending = self.ORDER_DESCENDING\n else:\n order_ascending = self.ORDER_ASCENDING\n\n if (\n availability == self.AVAILABLE_ALL\n and (library and not library.allow_holds)\n and (\n self.AVAILABLE_NOW\n in self.available_facets(library, self.AVAILABILITY_FACET_GROUP_NAME)\n )\n ):\n # Under normal circumstances we would show all works, but\n # library configuration says to hide books that aren't\n # available.\n availability = self.AVAILABLE_NOW\n\n self.library = library\n self.collection = collection\n self.availability = availability\n self.order = order\n if order_ascending == self.ORDER_ASCENDING:\n order_ascending = True\n elif order_ascending == self.ORDER_DESCENDING:\n order_ascending = False\n self.order_ascending = order_ascending\n self.facets_enabled_at_init = enabled_facets\n\n def navigate(self, collection=None, availability=None, order=None, entrypoint=None):\n \"\"\"Create a slightly different Facets object from this one.\"\"\"\n return self.__class__(\n library=self.library,\n collection=collection or self.collection,\n availability=availability or self.availability,\n order=order or self.order,\n enabled_facets=self.facets_enabled_at_init,\n entrypoint=(entrypoint or self.entrypoint),\n entrypoint_is_default=False,\n max_cache_age=self.max_cache_age,\n )\n\n def items(self):\n for k, v in list(super(Facets, self).items()):\n yield k, v\n if self.order:\n yield (self.ORDER_FACET_GROUP_NAME, self.order)\n if self.availability:\n yield (self.AVAILABILITY_FACET_GROUP_NAME, self.availability)\n if self.collection:\n yield (self.COLLECTION_FACET_GROUP_NAME, self.collection)\n\n @property\n def enabled_facets(self):\n \"\"\"Yield a 3-tuple of lists (order, availability, collection)\n representing facet values enabled via initialization or configuration\n\n The 'entry point' facet group is handled separately, since it\n is not always used.\n \"\"\"\n if self.facets_enabled_at_init:\n # When this Facets object was initialized, a list of enabled\n # facets was passed. We'll only work with those facets.\n facet_types = [\n self.ORDER_FACET_GROUP_NAME,\n self.AVAILABILITY_FACET_GROUP_NAME,\n self.COLLECTION_FACET_GROUP_NAME,\n ]\n for facet_type in facet_types:\n yield self.facets_enabled_at_init.get(facet_type, [])\n else:\n library = self.library\n for group_name in (\n Facets.ORDER_FACET_GROUP_NAME,\n Facets.AVAILABILITY_FACET_GROUP_NAME,\n Facets.COLLECTION_FACET_GROUP_NAME,\n ):\n yield self.available_facets(self.library, group_name)\n\n @property\n def facet_groups(self):\n \"\"\"Yield a list of 4-tuples\n (facet group, facet value, new Facets object, selected)\n for use in building OPDS facets.\n\n This does not yield anything for the 'entry point' facet group,\n which must be handled separately.\n \"\"\"\n\n order_facets, availability_facets, collection_facets = self.enabled_facets\n\n def dy(new_value):\n group = self.ORDER_FACET_GROUP_NAME\n current_value = self.order\n facets = self.navigate(order=new_value)\n return (group, new_value, facets, current_value == new_value)\n\n # First, the order facets.\n if len(order_facets) > 1:\n for facet in order_facets:\n yield dy(facet)\n\n # Next, the availability facets.\n def dy(new_value):\n group = self.AVAILABILITY_FACET_GROUP_NAME\n current_value = self.availability\n facets = self.navigate(availability=new_value)\n return (group, new_value, facets, new_value == current_value)\n\n if len(availability_facets) > 1:\n for facet in availability_facets:\n yield dy(facet)\n\n # Next, the collection facets.\n def dy(new_value):\n group = self.COLLECTION_FACET_GROUP_NAME\n current_value = self.collection\n facets = self.navigate(collection=new_value)\n return (group, new_value, facets, new_value == current_value)\n\n if len(collection_facets) > 1:\n for facet in collection_facets:\n yield dy(facet)\n\n def modify_search_filter(self, filter):\n \"\"\"Modify the given external_search.Filter object\n so that it reflects the settings of this Facets object.\n\n This is the Elasticsearch equivalent of apply(). However, the\n Elasticsearch implementation of (e.g.) the meaning of the\n different availabilty statuses is kept in Filter.build().\n \"\"\"\n super(Facets, self).modify_search_filter(filter)\n\n if self.library:\n filter.minimum_featured_quality = self.library.minimum_featured_quality\n\n filter.availability = self.availability\n filter.subcollection = self.collection\n\n # No order and relevance order both signify the default and,\n # thus, either should leave `filter.order` unset.\n if self.order and self.order != self.ORDER_BY_RELEVANCE:\n order = self.SORT_ORDER_TO_ELASTICSEARCH_FIELD_NAME.get(self.order)\n if order:\n filter.order = order\n filter.order_ascending = self.order_ascending\n else:\n logging.error(\"Unrecognized sort order: %s\", self.order)\n\n def modify_database_query(self, _db, qu):\n \"\"\"Restrict a query against Work+LicensePool+Edition so that it\n matches only works that fit the criteria of this Faceting object.\n\n Sort order facet cannot be handled in this method, but can be\n handled in subclasses that override this method.\n \"\"\"\n\n # Apply any superclass criteria\n qu = super(Facets, self).modify_database_query(_db, qu)\n\n available_now = or_(\n LicensePool.open_access == True,\n LicensePool.self_hosted == True,\n LicensePool.unlimited_access,\n LicensePool.licenses_available > 0,\n )\n\n if self.availability == self.AVAILABLE_NOW:\n availability_clause = available_now\n elif self.availability == self.AVAILABLE_ALL:\n availability_clause = or_(\n LicensePool.open_access == True,\n LicensePool.self_hosted == True,\n LicensePool.licenses_owned > 0,\n LicensePool.unlimited_access,\n )\n elif self.availability == self.AVAILABLE_OPEN_ACCESS:\n # TODO: self-hosted content could be allowed here\n # depending on what exactly the wording is.\n availability_clause = LicensePool.open_access == True\n elif self.availability == self.AVAILABLE_NOT_NOW:\n # The book must be licensed but currently unavailable.\n availability_clause = and_(\n not_(available_now), LicensePool.licenses_owned > 0\n )\n\n qu = qu.filter(availability_clause)\n\n if self.collection == self.COLLECTION_FULL:\n # Include everything.\n pass\n elif self.collection == self.COLLECTION_FEATURED:\n # Exclude books with a quality of less than the library's\n # minimum featured quality.\n qu = qu.filter(Work.quality >= self.library.minimum_featured_quality)\n\n return qu\n\n\nclass DefaultSortOrderFacets(Facets):\n \"\"\"A faceting object that changes the default sort order.\n\n Subclasses must set DEFAULT_SORT_ORDER\n \"\"\"\n\n @classmethod\n def available_facets(cls, config, facet_group_name):\n \"\"\"Make sure the default sort order is the first item\n in the list of available sort orders.\n \"\"\"\n if facet_group_name != cls.ORDER_FACET_GROUP_NAME:\n return super(DefaultSortOrderFacets, cls).available_facets(\n config, facet_group_name\n )\n default = config.enabled_facets(facet_group_name)\n\n # Promote the default sort order to the front of the list,\n # adding it if necessary.\n order = cls.DEFAULT_SORT_ORDER\n if order in default:\n default = [x for x in default if x != order]\n return [order] + default\n\n @classmethod\n def default_facet(cls, config, facet_group_name):\n if facet_group_name == cls.ORDER_FACET_GROUP_NAME:\n return cls.DEFAULT_SORT_ORDER\n return super(DefaultSortOrderFacets, cls).default_facet(\n config, facet_group_name\n )\n\n\nclass DatabaseBackedFacets(Facets):\n \"\"\"A generic faceting object designed for managing queries against the\n database. (Other faceting objects are designed for managing\n Elasticsearch searches.)\n \"\"\"\n\n # Of the sort orders in Facets, these are the only available ones\n # -- they map directly onto a field of one of the tables we're\n # querying.\n ORDER_FACET_TO_DATABASE_FIELD = {\n FacetConstants.ORDER_WORK_ID: Work.id,\n FacetConstants.ORDER_TITLE: Edition.sort_title,\n FacetConstants.ORDER_AUTHOR: Edition.sort_author,\n FacetConstants.ORDER_LAST_UPDATE: Work.last_update_time,\n }\n\n @classmethod\n def available_facets(cls, config, facet_group_name):\n \"\"\"Exclude search orders not available through database queries.\"\"\"\n standard = config.enabled_facets(facet_group_name)\n if facet_group_name != cls.ORDER_FACET_GROUP_NAME:\n return standard\n return [\n order for order in standard if order in cls.ORDER_FACET_TO_DATABASE_FIELD\n ]\n\n @classmethod\n def default_facet(cls, config, facet_group_name):\n \"\"\"Exclude search orders not available through database queries.\"\"\"\n standard_default = super(DatabaseBackedFacets, cls).default_facet(\n config, facet_group_name\n )\n if facet_group_name != cls.ORDER_FACET_GROUP_NAME:\n return standard_default\n if standard_default in cls.ORDER_FACET_TO_DATABASE_FIELD:\n # This default sort order is supported.\n return standard_default\n\n # The default sort order is not supported. Just pick the first\n # enabled sort order.\n enabled = config.enabled_facets(facet_group_name)\n for i in enabled:\n if i in cls.ORDER_FACET_TO_DATABASE_FIELD:\n return i\n\n # None of the enabled sort orders are usable. Order by work ID.\n return cls.ORDER_WORK_ID\n\n def order_by(self):\n \"\"\"Given these Facets, create a complete ORDER BY clause for queries\n against WorkModelWithGenre.\n \"\"\"\n default_sort_order = [Edition.sort_author, Edition.sort_title, Work.id]\n\n primary_order_by = self.ORDER_FACET_TO_DATABASE_FIELD.get(self.order)\n if primary_order_by is not None:\n # Promote the field designated by the sort facet to the top of\n # the order-by list.\n order_by = [primary_order_by]\n\n for i in default_sort_order:\n if i not in order_by:\n order_by.append(i)\n else:\n # Use the default sort order\n order_by = default_sort_order\n\n # order_ascending applies only to the first field in the sort order.\n # Everything else is ordered ascending.\n if self.order_ascending:\n order_by_sorted = [x.asc() for x in order_by]\n else:\n order_by_sorted = [order_by[0].desc()] + [x.asc() for x in order_by[1:]]\n return order_by_sorted, order_by\n\n def modify_database_query(self, _db, qu):\n \"\"\"Restrict a query so that it matches only works\n that fit the criteria of this faceting object. Ensure\n query is appropriately ordered and made distinct.\n \"\"\"\n\n # Filter by facet criteria\n qu = super(DatabaseBackedFacets, self).modify_database_query(_db, qu)\n\n # Set the ORDER BY clause.\n order_by, order_distinct = self.order_by()\n qu = qu.order_by(*order_by)\n qu = qu.distinct(*order_distinct)\n return qu\n\n\nclass FeaturedFacets(FacetsWithEntryPoint):\n\n \"\"\"A simple faceting object that configures a query so that the 'most\n featurable' items are at the front.\n\n This is mainly a convenient thing to pass into\n AcquisitionFeed.groups().\n \"\"\"\n\n # This Facets class is used exclusively for grouped feeds.\n CACHED_FEED_TYPE = CachedFeed.GROUPS_TYPE\n\n def __init__(\n self, minimum_featured_quality, entrypoint=None, random_seed=None, **kwargs\n ):\n \"\"\"Set up an object that finds featured books in a given\n WorkList.\n\n :param kwargs: Other arguments may be supplied based on user\n input, but the default implementation is to ignore them.\n \"\"\"\n super(FeaturedFacets, self).__init__(entrypoint=entrypoint, **kwargs)\n self.minimum_featured_quality = minimum_featured_quality\n self.random_seed = random_seed\n\n @classmethod\n def default(cls, lane, **kwargs):\n library = None\n if lane:\n if isinstance(lane, Library):\n library = lane\n else:\n library = lane.library\n\n if library:\n quality = library.minimum_featured_quality\n else:\n quality = Configuration.DEFAULT_MINIMUM_FEATURED_QUALITY\n return cls(quality, **kwargs)\n\n def navigate(self, minimum_featured_quality=None, entrypoint=None):\n \"\"\"Create a slightly different FeaturedFacets object based on this\n one.\n \"\"\"\n minimum_featured_quality = (\n minimum_featured_quality or self.minimum_featured_quality\n )\n entrypoint = entrypoint or self.entrypoint\n return self.__class__(\n minimum_featured_quality, entrypoint, max_cache_age=self.max_cache_age\n )\n\n def modify_search_filter(self, filter):\n super(FeaturedFacets, self).modify_search_filter(filter)\n filter.minimum_featured_quality = self.minimum_featured_quality\n\n def scoring_functions(self, filter):\n \"\"\"Generate scoring functions that weight works randomly, but\n with 'more featurable' works tending to be at the top.\n \"\"\"\n return filter.featurability_scoring_functions(self.random_seed)\n\n\nclass SearchFacets(Facets):\n \"\"\"A Facets object designed to filter search results.\n\n Most search result filtering is handled by WorkList, but this\n allows someone to, e.g., search a multi-lingual WorkList in their\n preferred language.\n \"\"\"\n\n # If search results are to be ordered by some field other than\n # score, we need a cutoff point so that marginal matches don't get\n # top billing just because they're first alphabetically. This is\n # the default cutoff point, determined empirically.\n DEFAULT_MIN_SCORE = 500\n\n def __init__(self, **kwargs):\n languages = kwargs.pop(\"languages\", None)\n media = kwargs.pop(\"media\", None)\n\n # Our default_facets implementation will fill in values for\n # the facet groups defined by the Facets class. This\n # eliminates the need to explicitly specify a library, since\n # the library is mainly used to determine these defaults --\n # SearchFacets itself doesn't need one. However, in real\n # usage, a Library will be provided via\n # SearchFacets.from_request.\n kwargs.setdefault(\"library\", None)\n kwargs.setdefault(\"collection\", None)\n kwargs.setdefault(\"availability\", None)\n order = kwargs.setdefault(\"order\", None)\n\n if order in (None, self.ORDER_BY_RELEVANCE):\n # Search results are ordered by score, so there is no\n # need for a score cutoff.\n default_min_score = None\n else:\n default_min_score = self.DEFAULT_MIN_SCORE\n self.min_score = kwargs.pop(\"min_score\", default_min_score)\n\n super(SearchFacets, self).__init__(**kwargs)\n if media == Edition.ALL_MEDIUM:\n self.media = media\n else:\n self.media = self._ensure_list(media)\n self.media_argument = media\n\n self.languages = self._ensure_list(languages)\n\n @classmethod\n def default_facet(cls, ignore, group_name):\n \"\"\"The default facet settings for SearchFacets are hard-coded.\n\n By default, we will search the full collection and all\n availabilities, and order by match quality rather than any\n bibliographic field.\n \"\"\"\n if group_name == cls.COLLECTION_FACET_GROUP_NAME:\n return cls.COLLECTION_FULL\n\n if group_name == cls.AVAILABILITY_FACET_GROUP_NAME:\n return cls.AVAILABLE_ALL\n\n if group_name == cls.ORDER_FACET_GROUP_NAME:\n return cls.ORDER_BY_RELEVANCE\n return None\n\n def _ensure_list(self, x):\n \"\"\"Make sure x is a list of values, if there is a value at all.\"\"\"\n if x is None:\n return None\n if isinstance(x, list):\n return x\n return [x]\n\n @classmethod\n def from_request(\n cls,\n library,\n config,\n get_argument,\n get_header,\n worklist,\n default_entrypoint=EverythingEntryPoint,\n **extra\n ):\n\n values = cls._values_from_request(config, get_argument, get_header)\n if isinstance(values, ProblemDetail):\n return values\n extra.update(values)\n extra[\"library\"] = library\n # Searches against a WorkList will use the union of the\n # languages allowed by the WorkList and the languages found in\n # the client's Accept-Language header.\n language_header = get_header(\"Accept-Language\")\n languages = get_argument(\"language\") or None\n if not languages:\n if language_header:\n languages = parse_accept_language(language_header)\n languages = [l[0] for l in languages]\n languages = list(map(LanguageCodes.iso_639_2_for_locale, languages))\n languages = [l for l in languages if l]\n languages = languages or None\n\n # The client can request a minimum score for search results.\n min_score = get_argument(\"min_score\", None)\n if min_score is not None:\n try:\n min_score = int(min_score)\n except ValueError as e:\n min_score = None\n if min_score is not None:\n extra[\"min_score\"] = min_score\n\n # The client can request an additional restriction on\n # the media types to be returned by searches.\n\n media = get_argument(\"media\", None)\n if media not in EditionConstants.KNOWN_MEDIA:\n media = None\n extra[\"media\"] = media\n languageQuery = get_argument(\"language\", None)\n # Currently, the only value passed to the language query from the client is\n # `all`. This will remove the default browser's Accept-Language header value\n # in the search request.\n if languageQuery != \"all\":\n extra[\"languages\"] = languages\n\n return cls._from_request(\n config, get_argument, get_header, worklist, default_entrypoint, **extra\n )\n\n @classmethod\n def selectable_entrypoints(cls, worklist):\n \"\"\"If the WorkList has more than one facet, an 'everything' facet\n is added for search purposes.\n \"\"\"\n if not worklist:\n return []\n entrypoints = list(worklist.entrypoints)\n if len(entrypoints) < 2:\n return entrypoints\n if EverythingEntryPoint not in entrypoints:\n entrypoints.insert(0, EverythingEntryPoint)\n return entrypoints\n\n def modify_search_filter(self, filter):\n \"\"\"Modify the given external_search.Filter object\n so that it reflects this SearchFacets object.\n \"\"\"\n super(SearchFacets, self).modify_search_filter(filter)\n\n if filter.order is not None and filter.min_score is None:\n # The user wants search results to be ordered by one of\n # the data fields, not the match score; and no overriding\n # score cutoff has been provided yet. Use ours.\n filter.min_score = self.min_score\n\n # The incoming 'media' argument takes precedence over any\n # media restriction defined by the WorkList or the EntryPoint.\n if self.media == Edition.ALL_MEDIUM:\n # Clear any preexisting media restrictions.\n filter.media = None\n elif self.media:\n filter.media = self.media\n\n # The languages matched by the filter are the union of the\n # languages allowed by the WorkList (which were set to\n # filter.languages upon instantiation) and the languages\n # mentioned in the the user's Accept-Language header (which\n # were stuck into the SearchFacets object when _it_ was\n # instantiated).\n #\n # We don't rely solely on the WorkList languages because at\n # the moment it's hard for people who don't read the dominant\n # language of the circulation manager to find the right place\n # to search.\n #\n # We don't rely solely on the SearchFacets languages because a\n # lot of people read in languages other than the one they've\n # set for their device UI.\n all_languages = set()\n for language_list in (self.languages, filter.languages):\n for language in self._ensure_list(language_list) or []:\n all_languages.add(language)\n filter.languages = sorted(all_languages) or None\n\n def items(self):\n \"\"\"Yields a 2-tuple for every active facet setting.\n\n This means the EntryPoint (handled by the superclass)\n as well as possible settings for 'media' and \"min_score\".\n \"\"\"\n for k, v in list(super(SearchFacets, self).items()):\n yield k, v\n if self.media_argument:\n yield (\"media\", self.media_argument)\n\n if self.min_score is not None:\n yield (\"min_score\", str(self.min_score))\n\n def navigate(self, **kwargs):\n min_score = kwargs.pop(\"min_score\", self.min_score)\n new_facets = super(SearchFacets, self).navigate(**kwargs)\n new_facets.min_score = min_score\n return new_facets\n\n\nclass Pagination(object):\n\n DEFAULT_SIZE = 50\n DEFAULT_SEARCH_SIZE = 10\n DEFAULT_FEATURED_SIZE = 10\n DEFAULT_CRAWLABLE_SIZE = 100\n MAX_SIZE = 100\n\n @classmethod\n def default(cls):\n return Pagination(0, cls.DEFAULT_SIZE)\n\n def __init__(self, offset=0, size=DEFAULT_SIZE):\n \"\"\"Constructor.\n\n :param offset: Start pulling entries from the query at this index.\n :param size: Pull no more than this number of entries from the query.\n \"\"\"\n self.offset = offset\n self.size = size\n self.total_size = None\n self.this_page_size = None\n self.page_has_loaded = False\n self.max_size = self.MAX_SIZE\n\n @classmethod\n def _int_from_request(cls, key, get_arg, make_detail, default):\n \"\"\"Helper method to get and parse an integer value from\n a URL query argument in a Flask request.\n\n :param key: Name of the argument.\n :param get_arg: A function which when called with (key, default)\n returns the value of the query argument.\n :pass make_detail: A function, called with the value\n obtained from the request, which returns the detail\n information that should be included in a problem detail\n document if the input isn't convertable to an integer.\n :param default: Use this value if none is specified.\n \"\"\"\n raw = get_arg(key, default)\n try:\n as_int = int(raw)\n except ValueError:\n return INVALID_INPUT.detailed(make_detail(raw))\n return as_int\n\n @classmethod\n def size_from_request(cls, get_arg, default):\n make_detail = lambda size: (_(\"Invalid page size: %(size)s\", size=size))\n size = cls._int_from_request(\n \"size\", get_arg, make_detail, default or cls.DEFAULT_SIZE\n )\n if isinstance(size, ProblemDetail):\n return size\n return min(size, cls.MAX_SIZE)\n\n @classmethod\n def from_request(cls, get_arg, default_size=None):\n \"\"\"Instantiate a Pagination object from a Flask request.\"\"\"\n default_size = default_size or cls.DEFAULT_SIZE\n size = cls.size_from_request(get_arg, default_size)\n if isinstance(size, ProblemDetail):\n return size\n offset = cls._int_from_request(\n \"after\",\n get_arg,\n lambda offset: _(\"Invalid offset: %(offset)s\", offset=offset),\n 0,\n )\n if isinstance(offset, ProblemDetail):\n return offset\n return cls(offset, size)\n\n def items(self):\n yield (\"after\", self.offset)\n yield (\"size\", self.size)\n\n @property\n def query_string(self):\n return \"&\".join(\"=\".join(map(str, x)) for x in list(self.items()))\n\n @property\n def first_page(self):\n return Pagination(0, self.size)\n\n @property\n def next_page(self):\n if not self.has_next_page:\n return None\n return Pagination(self.offset + self.size, self.size)\n\n @property\n def previous_page(self):\n if self.offset <= 0:\n return None\n previous_offset = self.offset - self.size\n previous_offset = max(0, previous_offset)\n return Pagination(previous_offset, self.size)\n\n @property\n def has_next_page(self):\n \"\"\"Returns boolean reporting whether pagination is done for a query\n\n Either `total_size` or `this_page_size` must be set for this\n method to be accurate.\n \"\"\"\n if self.total_size is not None:\n # We know the total size of the result set, so we know\n # whether or not there are more results.\n return self.offset + self.size < self.total_size\n if self.this_page_size is not None:\n # We know the number of items on the current page. If this\n # page was empty, we can assume there is no next page; if\n # not, we can assume there is a next page. This is a little\n # more conservative than checking whether we have a 'full'\n # page.\n return self.this_page_size > 0\n\n # We don't know anything about this result set, so assume there is\n # a next page.\n return True\n\n def modify_database_query(self, _db, qu):\n \"\"\"Modify the given database query with OFFSET and LIMIT.\"\"\"\n return qu.offset(self.offset).limit(self.size)\n\n def modify_search_query(self, search):\n \"\"\"Modify a Search object so that it retrieves only a single 'page'\n of results.\n\n :return: A Search object.\n \"\"\"\n return search[self.offset : self.offset + self.size]\n\n def page_loaded(self, page):\n \"\"\"An actual page of results has been fetched. Keep any internal state\n that would be useful to know when reasoning about earlier or\n later pages.\n \"\"\"\n self.this_page_size = len(page)\n self.page_has_loaded = True\n\n\nclass WorkList(object):\n \"\"\"An object that can obtain a list of Work objects for use\n in generating an OPDS feed.\n\n By default, these Work objects come from a search index.\n \"\"\"\n\n # The default maximum cache time of a feed derived from a WorkList\n # is the default cache time for any OPDS feed.\n MAX_CACHE_AGE = OPDSFeed.DEFAULT_MAX_AGE\n\n # If a certain type of Worklist should always have its OPDS feeds\n # cached under a specific type, define that type as\n # CACHED_FEED_TYPE.\n CACHED_FEED_TYPE = None\n\n # By default, a WorkList is always visible.\n visible = True\n\n # By default, a WorkList does not draw from CustomLists\n uses_customlists = False\n\n def max_cache_age(self, type):\n \"\"\"Determine how long a feed for this WorkList should be cached\n internally.\n \"\"\"\n return self.MAX_CACHE_AGE\n\n @classmethod\n def top_level_for_library(self, _db, library):\n \"\"\"Create a WorkList representing this library's collection\n as a whole.\n\n If no top-level visible lanes are configured, the WorkList\n will be configured to show every book in the collection.\n\n If a single top-level Lane is configured, it will returned as\n the WorkList.\n\n Otherwise, a WorkList containing the visible top-level lanes\n is returned.\n \"\"\"\n # Load all of this Library's visible top-level Lane objects\n # from the database.\n top_level_lanes = (\n _db.query(Lane)\n .filter(Lane.library == library)\n .filter(Lane.parent == None)\n .filter(Lane._visible == True)\n .order_by(Lane.priority)\n .all()\n )\n\n if len(top_level_lanes) == 1:\n # The site configuration includes a single top-level lane;\n # this can stand in for the library on its own.\n return top_level_lanes[0]\n\n # This WorkList contains every title available to this library\n # in one of the media supported by the default client.\n wl = TopLevelWorkList()\n\n wl.initialize(\n library,\n display_name=library.name,\n children=top_level_lanes,\n media=Edition.FULFILLABLE_MEDIA,\n entrypoints=library.entrypoints,\n )\n return wl\n\n def initialize(\n self,\n library,\n display_name=None,\n genres=None,\n audiences=None,\n languages=None,\n media=None,\n customlists=None,\n list_datasource=None,\n list_seen_in_previous_days=None,\n children=None,\n priority=None,\n entrypoints=None,\n fiction=None,\n license_datasource=None,\n target_age=None,\n ):\n \"\"\"Initialize with basic data.\n\n This is not a constructor, to avoid conflicts with `Lane`, an\n ORM object that subclasses this object but does not use this\n initialization code.\n\n :param library: Only Works available in this Library will be\n included in lists.\n\n :param display_name: Name to display for this WorkList in the\n user interface.\n\n :param genres: Only Works classified under one of these Genres\n will be included in lists.\n\n :param audiences: Only Works classified under one of these audiences\n will be included in lists.\n\n :param languages: Only Works in one of these languages will be\n included in lists.\n\n :param media: Only Works in one of these media will be included\n in lists.\n\n :param fiction: Only Works with this fiction status will be included\n in lists.\n\n :param target_age: Only Works targeted at readers in this age range\n will be included in lists.\n\n :param license_datasource: Only Works with a LicensePool from this\n DataSource will be included in lists.\n\n :param customlists: Only Works included on one of these CustomLists\n will be included in lists.\n\n :param list_datasource: Only Works included on a CustomList\n associated with this DataSource will be included in\n lists. This overrides any specific CustomLists provided in\n `customlists`.\n\n :param list_seen_in_previous_days: Only Works that were added\n to a matching CustomList within this number of days will be\n included in lists.\n\n :param children: This WorkList has children, which are also\n WorkLists.\n\n :param priority: A number indicating where this WorkList should\n show up in relation to its siblings when it is the child of\n some other WorkList.\n\n :param entrypoints: A list of EntryPoint classes representing\n different ways of slicing up this WorkList.\n\n \"\"\"\n self.library_id = None\n self.collection_ids = None\n if library:\n self.library_id = library.id\n self.collection_ids = [\n collection.id for collection in library.all_collections\n ]\n self.display_name = display_name\n if genres:\n self.genre_ids = [x.id for x in genres]\n else:\n self.genre_ids = None\n self.audiences = audiences\n self.languages = languages\n self.media = media\n self.fiction = fiction\n\n if license_datasource:\n self.license_datasource_id = license_datasource.id\n else:\n self.license_datasource_id = None\n\n # If a specific set of CustomLists was passed in, store their IDs.\n #\n # If a custom list DataSource was passed in, gather the IDs for\n # every CustomList associated with that DataSource, and store\n # those IDs.\n #\n # Either way, WorkList starts out with a specific list of IDs,\n # which simplifies the WorkList code in a way that isn't\n # available to Lane.\n self._customlist_ids = None\n self.list_datasource_id = None\n if list_datasource:\n customlists = list_datasource.custom_lists\n\n # We do also store the CustomList ID, which is used as an\n # optimization in customlist_filter_clauses().\n self.list_datasource_id = list_datasource.id\n\n # The custom list IDs are stored in _customlist_ids, for\n # compatibility with Lane.\n if customlists:\n self._customlist_ids = [x.id for x in customlists]\n self.list_seen_in_previous_days = list_seen_in_previous_days\n\n self.fiction = fiction\n self.target_age = target_age\n\n self.children = []\n if children:\n for child in children:\n self.append_child(child)\n self.priority = priority or 0\n\n if entrypoints:\n self.entrypoints = list(entrypoints)\n else:\n self.entrypoints = []\n\n def append_child(self, child):\n \"\"\"Add one child to the list of children in this WorkList.\n\n This hook method can be overridden to modify the child's\n configuration so as to make it fit with what the parent is\n offering.\n \"\"\"\n self.children.append(child)\n\n @property\n def customlist_ids(self):\n \"\"\"Return the custom list IDs.\"\"\"\n return self._customlist_ids\n\n @property\n def uses_customlists(self):\n \"\"\"Does the works() implementation for this WorkList look for works on\n CustomLists?\n \"\"\"\n if self._customlist_ids or self.list_datasource_id:\n return True\n return False\n\n def get_library(self, _db):\n \"\"\"Find the Library object associated with this WorkList.\"\"\"\n return Library.by_id(_db, self.library_id)\n\n def get_customlists(self, _db):\n \"\"\"Get customlists associated with the Worklist.\"\"\"\n if hasattr(self, \"_customlist_ids\") and self._customlist_ids is not None:\n return (\n _db.query(CustomList)\n .filter(CustomList.id.in_(self._customlist_ids))\n .all()\n )\n return []\n\n @property\n def display_name_for_all(self):\n \"\"\"The display name to use when referring to the set of all books in\n this WorkList, as opposed to the WorkList itself.\n \"\"\"\n return _(\"All %(worklist)s\", worklist=self.display_name)\n\n @property\n def visible_children(self):\n \"\"\"A WorkList's children can be used to create a grouped acquisition\n feed for that WorkList.\n \"\"\"\n return sorted(\n [x for x in self.children if x.visible],\n key=lambda x: (x.priority, x.display_name or \"\"),\n )\n\n @property\n def has_visible_children(self):\n for lane in self.visible_children:\n if lane:\n return True\n return False\n\n @property\n def parent(self):\n \"\"\"A WorkList has no parent. This method is defined for compatibility\n with Lane.\n \"\"\"\n return None\n\n @property\n def parentage(self):\n \"\"\"WorkLists have no parentage. This method is defined for compatibility\n with Lane.\n \"\"\"\n return []\n\n def is_self_or_descendant(self, ancestor):\n \"\"\"Is this WorkList the given WorkList or one of its descendants?\n\n :param ancestor: A WorkList.\n :return: A boolean.\n \"\"\"\n for candidate in [self] + list(self.parentage):\n if candidate == ancestor:\n return True\n return False\n\n @property\n def inherit_parent_restrictions(self):\n \"\"\"Since a WorkList has no parent, it cannot inherit any restrictions\n from its parent. This method is defined for compatibility\n with Lane.\n \"\"\"\n return False\n\n @property\n def hierarchy(self):\n \"\"\"The portion of the WorkList hierarchy that culminates in this\n WorkList.\n \"\"\"\n return list(reversed(list(self.parentage))) + [self]\n\n def inherited_value(self, k):\n \"\"\"Try to find this WorkList's value for the given key (e.g. 'fiction'\n or 'audiences').\n\n If it's not set, try to inherit a value from the WorkList's\n parent. This only works if this WorkList has a parent and is\n configured to inherit values from its parent.\n\n Note that inheritance works differently for genre_ids and\n customlist_ids -- use inherited_values() for that.\n \"\"\"\n value = getattr(self, k)\n if value not in (None, []):\n return value\n else:\n if not self.parent or not self.inherit_parent_restrictions:\n return None\n parent = self.parent\n return parent.inherited_value(k)\n\n def inherited_values(self, k):\n \"\"\"Find the values for the given key (e.g. 'genre_ids' or\n 'customlist_ids') imposed by this WorkList and its parentage.\n\n This is for values like .genre_ids and .customlist_ids, where\n each member of the WorkList hierarchy can impose a restriction\n on query results, and the effects of the restrictions are\n additive.\n \"\"\"\n values = []\n if not self.inherit_parent_restrictions:\n hierarchy = [self]\n else:\n hierarchy = self.hierarchy\n for wl in hierarchy:\n value = getattr(wl, k)\n if value not in (None, []):\n values.append(value)\n return values\n\n @property\n def full_identifier(self):\n \"\"\"A human-readable identifier for this WorkList that\n captures its position within the heirarchy.\n \"\"\"\n full_parentage = [str(x.display_name) for x in self.hierarchy]\n if getattr(self, \"library\", None):\n # This WorkList is associated with a specific library.\n # incorporate the library's name to distinguish between it\n # and other lanes in the same position in another library.\n full_parentage.insert(0, self.library.short_name)\n return \" / \".join(full_parentage)\n\n @property\n def language_key(self):\n \"\"\"Return a string identifying the languages used in this WorkList.\n This will usually be in the form of 'eng,spa' (English and Spanish).\n \"\"\"\n key = \"\"\n if self.languages:\n key += \",\".join(sorted(self.languages))\n return key\n\n @property\n def audience_key(self):\n \"\"\"Translates audiences list into url-safe string\"\"\"\n key = \"\"\n if self.audiences and Classifier.AUDIENCES.difference(self.audiences):\n # There are audiences and they're not the default\n # \"any audience\", so add them to the URL.\n audiences = [quote_plus(a) for a in sorted(self.audiences)]\n key += \",\".join(audiences)\n return key\n\n @property\n def unique_key(self):\n \"\"\"A string key that uniquely describes this WorkList within\n its Library.\n\n This is used when caching feeds for this WorkList. For Lanes,\n the lane_id is used instead.\n \"\"\"\n return \"%s-%s-%s\" % (self.display_name, self.language_key, self.audience_key)\n\n def accessible_to(self, patron):\n \"\"\"As a matter of library policy, is the given `Patron` allowed\n to access this `WorkList`?\n \"\"\"\n if not patron:\n # We have no lanes that are private, per se, so if there\n # is no active patron, every lane is accessible.\n return True\n\n _db = Session.object_session(patron)\n if patron.library != self.get_library(_db):\n # You can't access a WorkList from another library.\n return False\n\n if not patron.library.has_root_lanes:\n # The patron's library has no root lanes, so it's not necessary\n # to run the somewhat expensive check for a patron's root lane.\n # All lanes are accessible to all patrons.\n return True\n\n # Get the patron's root lane, if any.\n root = patron.root_lane\n if not root:\n # A patron with no root lane can access every one of the\n # library's WorkLists.\n return True\n\n # A WorkList is only accessible if the audiences and target age\n # of the WorkList are fully compatible with that of the\n # patron's root lane.\n if self.audiences:\n for work_audience in self.audiences:\n # work_audience represents a type of book that _might_\n # show up in this WorkList.\n if not patron.work_is_age_appropriate(work_audience, self.target_age):\n # Books of this type would not be appropriate to show to\n # this patron, so the lane itself is not accessible.\n return False\n\n return True\n\n def overview_facets(self, _db, facets):\n \"\"\"Convert a generic FeaturedFacets to some other faceting object,\n suitable for showing an overview of this WorkList in a grouped\n feed.\n \"\"\"\n return facets\n\n def groups(\n self,\n _db,\n include_sublanes=True,\n pagination=None,\n facets=None,\n search_engine=None,\n debug=False,\n ):\n \"\"\"Extract a list of samples from each child of this WorkList. This\n can be used to create a grouped acquisition feed for the WorkList.\n\n :param pagination: A Pagination object which may affect how many\n works each child of this WorkList may contribute.\n :param facets: A FeaturedFacets object that may restrict the works on view.\n :param search_engine: An ExternalSearchIndex to use when\n asking for the featured works in a given WorkList.\n :param debug: A debug argument passed into `search_engine` when\n running the search.\n :yield: A sequence of (Work, WorkList) 2-tuples, with each\n WorkList representing the child WorkList in which the Work is\n found.\n \"\"\"\n if not include_sublanes:\n # We only need to find featured works for this lane,\n # not this lane plus its sublanes.\n adapted = self.overview_facets(_db, facets)\n for work in self.works(_db, pagination=pagination, facets=adapted):\n yield work, self\n return\n\n # This is a list rather than a dict because we want to\n # preserve the ordering of the children.\n relevant_lanes = []\n relevant_children = []\n\n # We use an explicit check for Lane.visible here, instead of\n # iterating over self.visible_children, because Lane.visible only\n # works when the Lane is merged into a database session.\n for child in self.children:\n if isinstance(child, Lane):\n child = _db.merge(child)\n\n if not child.visible:\n continue\n\n if isinstance(child, Lane):\n # Children that turn out to be Lanes go into\n # relevant_lanes. Their Works will be obtained from\n # the search index.\n relevant_lanes.append(child)\n # Both Lanes and WorkLists go into relevant_children.\n # This controls the yield order for Works.\n relevant_children.append(child)\n\n # _groups_for_lanes will run a query to pull featured works\n # for any children that are Lanes, and call groups()\n # recursively for any children that are not.\n for work, worklist in self._groups_for_lanes(\n _db,\n relevant_children,\n relevant_lanes,\n pagination=pagination,\n facets=facets,\n search_engine=search_engine,\n debug=debug,\n ):\n yield work, worklist\n\n def works(\n self,\n _db,\n facets=None,\n pagination=None,\n search_engine=None,\n debug=False,\n **kwargs\n ):\n\n \"\"\"Use a search engine to obtain Work or Work-like objects that belong\n in this WorkList.\n\n Compare DatabaseBackedWorkList.works_from_database, which uses\n a database query to obtain the same Work objects.\n\n :param _db: A database connection.\n :param facets: A Facets object which may put additional\n constraints on WorkList membership.\n :param pagination: A Pagination object indicating which part of\n the WorkList the caller is looking at, and/or a limit on the\n number of works to fetch.\n :param kwargs: Different implementations may fetch the\n list of works from different sources and may need different\n keyword arguments.\n :return: A list of Work or Work-like objects, or a database query\n that generates such a list when executed.\n\n \"\"\"\n from .external_search import ExternalSearchIndex\n\n search_engine = search_engine or ExternalSearchIndex.load(_db)\n filter = self.filter(_db, facets)\n hits = search_engine.query_works(\n query_string=None, filter=filter, pagination=pagination, debug=debug\n )\n return self.works_for_hits(_db, hits, facets=facets)\n\n def filter(self, _db, facets):\n \"\"\"Helper method to instantiate a Filter object for this WorkList.\n\n Using this ensures that modify_search_filter_hook() is always\n called.\n \"\"\"\n from .external_search import Filter\n\n filter = Filter.from_worklist(_db, self, facets)\n modified = self.modify_search_filter_hook(filter)\n if modified is None:\n # The Filter was modified in place, rather than a new\n # Filter being returned.\n modified = filter\n return modified\n\n def modify_search_filter_hook(self, filter):\n \"\"\"A hook method allowing subclasses to modify a Filter\n object that's about to find all the works in this WorkList.\n\n This can avoid the need for complex subclasses of Facets.\n \"\"\"\n return filter\n\n def works_for_hits(self, _db, hits, facets=None):\n \"\"\"Convert a list of search results into Work objects.\n\n This works by calling works_for_resultsets() on a list\n containing a single list of search results.\n\n :param _db: A database connection\n :param hits: A list of Hit objects from ElasticSearch.\n :return: A list of Work or (if the search results include\n script fields), WorkSearchResult objects.\n \"\"\"\n\n [results] = self.works_for_resultsets(_db, [hits], facets=facets)\n return results\n\n def works_for_resultsets(self, _db, resultsets, facets=None):\n \"\"\"Convert a list of lists of Hit objects into a list\n of lists of Work objects.\n \"\"\"\n from .external_search import Filter, WorkSearchResult\n\n has_script_fields = None\n work_ids = set()\n for resultset in resultsets:\n for result in resultset:\n work_ids.add(result.work_id)\n if has_script_fields is None:\n # We don't know whether any script fields were\n # included, and now we're in a position to find\n # out.\n has_script_fields = any(\n x in result for x in Filter.KNOWN_SCRIPT_FIELDS\n )\n\n if has_script_fields is None:\n # This can only happen when there are no results. The code\n # will work even if has_script_fields is None, but just to\n # be safe.\n has_script_fields = False\n\n # The simplest way to turn Hits into Works is to create a\n # DatabaseBackedWorkList that fetches those specific Works\n # while applying the general availability filters.\n #\n # If facets were passed in, then they are used to further\n # filter the list.\n #\n # TODO: There's a lot of room for improvement here, but\n # performance isn't a big concern -- it's just ugly.\n wl = SpecificWorkList(work_ids)\n wl.initialize(self.get_library(_db))\n qu = wl.works_from_database(_db, facets=facets)\n a = time.time()\n all_works = qu.all()\n\n # Create a list of lists with the same membership as the original\n # `resultsets`, but with Hit objects replaced with Work objects.\n work_by_id = dict()\n for w in all_works:\n work_by_id[w.id] = w\n\n work_lists = []\n for resultset in resultsets:\n works = []\n work_lists.append(works)\n for hit in resultset:\n if hit.work_id in work_by_id:\n work = work_by_id[hit.work_id]\n if has_script_fields:\n # Wrap the Work objects in WorkSearchResult so the\n # data from script fields isn't lost.\n work = WorkSearchResult(work, hit)\n works.append(work)\n\n b = time.time()\n logging.info(\"Obtained %sxWork in %.2fsec\", len(all_works), b - a)\n return work_lists\n\n @property\n def search_target(self):\n \"\"\"By default, a WorkList is searchable.\"\"\"\n return self\n\n def search(\n self, _db, query, search_client, pagination=None, facets=None, debug=False\n ):\n \"\"\"Find works in this WorkList that match a search query.\n\n :param _db: A database connection.\n :param query: Search for this string.\n :param search_client: An ExternalSearchIndex object.\n :param pagination: A Pagination object.\n :param facets: A faceting object, probably a SearchFacets.\n :param debug: Pass in True to see a summary of results returned\n from the search index.\n \"\"\"\n results = []\n hits = None\n if not search_client:\n # We have no way of actually doing a search. Return nothing.\n return results\n\n if not pagination:\n pagination = Pagination(offset=0, size=Pagination.DEFAULT_SEARCH_SIZE)\n\n filter = self.filter(_db, facets)\n try:\n hits = search_client.query_works(query, filter, pagination, debug)\n except elasticsearch.exceptions.ElasticsearchException as e:\n logging.error(\n \"Problem communicating with ElasticSearch. Returning empty list of search results.\",\n exc_info=e,\n )\n if hits:\n results = self.works_for_hits(_db, hits)\n\n return results\n\n def _groups_for_lanes(\n self,\n _db,\n relevant_lanes,\n queryable_lanes,\n pagination,\n facets,\n search_engine=None,\n debug=False,\n ):\n \"\"\"Ask the search engine for groups of featurable works in the\n given lanes. Fill in gaps as necessary.\n\n :param pagination: An optional Pagination object which will be\n used to paginate each group individually. Note that this\n means Pagination.page_loaded() method will be called once\n for each group.\n :param facets: A FeaturedFacets object.\n\n :param search_engine: An ExternalSearchIndex to use when\n asking for the featured works in a given WorkList.\n :param debug: A debug argument passed into `search_engine` when\n running the search.\n :yield: A sequence of (Work, WorkList) 2-tuples, with each\n WorkList representing the child WorkList in which the Work is\n found.\n\n \"\"\"\n library = self.get_library(_db)\n if pagination is None:\n # No pagination object was provided. Our target size is\n # the featured lane size, but we'll ask for a few extra\n # works for each lane, to reduce the risk that we end up\n # reusing a book in two different lanes.\n target_size = library.featured_lane_size\n\n # We ask for a few extra works for each lane, to reduce the\n # risk that we'll end up reusing a book in two different\n # lanes.\n ask_for_size = max(target_size + 1, int(target_size * 1.10))\n pagination = Pagination(size=ask_for_size)\n else:\n target_size = pagination.size\n\n from .external_search import ExternalSearchIndex\n\n search_engine = search_engine or ExternalSearchIndex.load(_db)\n\n if isinstance(self, Lane):\n parent_lane = self\n else:\n parent_lane = None\n\n queryable_lane_set = set(queryable_lanes)\n works_and_lanes = list(\n self._featured_works_with_lanes(\n _db,\n queryable_lanes,\n pagination=pagination,\n facets=facets,\n search_engine=search_engine,\n debug=debug,\n )\n )\n\n def _done_with_lane(lane):\n \"\"\"Called when we're done with a Lane, either because\n the lane changes or we've reached the end of the list.\n \"\"\"\n # Did we get enough items?\n num_missing = target_size - len(by_lane[lane])\n if num_missing > 0 and might_need_to_reuse:\n # No, we need to use some works we used in a\n # previous lane to fill out this lane. Stick\n # them at the end.\n by_lane[lane].extend(list(might_need_to_reuse.values())[:num_missing])\n\n used_works = set()\n by_lane = defaultdict(list)\n working_lane = None\n might_need_to_reuse = dict()\n for work, lane in works_and_lanes:\n if lane != working_lane:\n # Either we're done with the old lane, or we're just\n # starting and there was no old lane.\n if working_lane:\n _done_with_lane(working_lane)\n working_lane = lane\n used_works_this_lane = set()\n might_need_to_reuse = dict()\n if len(by_lane[lane]) >= target_size:\n # We've already filled this lane.\n continue\n\n if work.id in used_works:\n if work.id not in used_works_this_lane:\n # We already used this work in another lane, but we\n # might need to use it again to fill out this lane.\n might_need_to_reuse[work.id] = work\n else:\n by_lane[lane].append(work)\n used_works.add(work.id)\n used_works_this_lane.add(work.id)\n\n # Close out the last lane encountered.\n _done_with_lane(working_lane)\n for lane in relevant_lanes:\n if lane in queryable_lane_set:\n # We found results for this lane through the main query.\n # Yield those results.\n for work in by_lane.get(lane, []):\n yield (work, lane)\n else:\n # We didn't try to use the main query to find results\n # for this lane because we knew the results, if there\n # were any, wouldn't be representative. This is most\n # likely because this 'lane' is a WorkList and not a\n # Lane at all. Do a whole separate query and plug it\n # in at this point.\n for x in lane.groups(\n _db,\n include_sublanes=False,\n pagination=pagination,\n facets=facets,\n ):\n yield x\n\n def _featured_works_with_lanes(\n self, _db, lanes, pagination, facets, search_engine, debug=False\n ):\n \"\"\"Find a sequence of works that can be used to\n populate this lane's grouped acquisition feed.\n\n :param lanes: Classify Work objects\n as belonging to one of these WorkLists (presumably sublanes\n of `self`).\n :param facets: A faceting object, presumably a FeaturedFacets\n :param pagination: A Pagination object explaining how many\n items to ask for. In most cases this should be slightly more than\n the number of items you actually want, so that you have some\n slack to remove duplicates across multiple lanes.\n :param search_engine: An ExternalSearchIndex to use when\n asking for the featured works in a given WorkList.\n :param debug: A debug argument passed into `search_engine` when\n running the search.\n\n :yield: A sequence of (Work, Lane) 2-tuples.\n \"\"\"\n if not lanes:\n # We can't run this query at all.\n return\n\n # Ask the search engine for works from every lane we're given.\n\n # NOTE: At the moment, every WorkList in the system can be\n # generated using an Elasticsearch query. That is, there are\n # no subclasses of the DatabaseExclusiveWorkList class defined\n # in circulation/api/lanes.py. If that ever changes, we'll\n # need to change this code.\n #\n # The simplest change would probably be to return a dictionary\n # mapping WorkList to Works and let the caller figure out the\n # ordering. In fact, we could start doing that now.\n queries = []\n for lane in lanes:\n overview_facets = lane.overview_facets(_db, facets)\n from .external_search import Filter\n\n filter = Filter.from_worklist(_db, lane, overview_facets)\n queries.append((None, filter, pagination))\n resultsets = list(search_engine.query_works_multi(queries))\n works = self.works_for_resultsets(_db, resultsets, facets=facets)\n\n for i, lane in enumerate(lanes):\n results = works[i]\n for work in results:\n yield work, lane\n\n\nclass HierarchyWorkList(WorkList):\n \"\"\"A WorkList representing part of a hierarchical view of a a\n library's collection. (As opposed to a non-hierarchical view such\n as search results or \"books by author X\".)\n \"\"\"\n\n def accessible_to(self, patron):\n \"\"\"As a matter of library policy, is the given `Patron` allowed\n to access this `WorkList`?\n\n Most of the logic is inherited from `WorkList`, but there's also\n a restriction based on the site hierarchy.\n\n :param patron: A Patron\n :return: A boolean\n \"\"\"\n\n # All the rules of WorkList apply.\n if not super(HierarchyWorkList, self).accessible_to(patron):\n return False\n\n if patron is None:\n return True\n\n root_lane = patron.root_lane\n if root_lane and not self.is_self_or_descendant(root_lane):\n # In addition, a HierarchyWorkList that's not in\n # scope of the patron's root lane is not accessible,\n # period. Even if all of the books in the WorkList are\n # age-appropriate, it's in a different part of the\n # navigational structure and navigating to it is not\n # allowed.\n return False\n\n return True\n\n\nclass TopLevelWorkList(HierarchyWorkList):\n \"\"\"A special WorkList representing the top-level view of\n a library's collection.\n \"\"\"\n\n\nclass DatabaseBackedWorkList(WorkList):\n \"\"\"A WorkList that can get its works from the database in addition to\n (or possibly instead of) the search index.\n\n Even when works _are_ obtained through the search index, a\n DatabaseBackedWorkList is then created to look up the Work objects\n for use in an OPDS feed.\n \"\"\"\n\n def works_from_database(self, _db, facets=None, pagination=None, **kwargs):\n \"\"\"Create a query against the `works` table that finds Work objects\n corresponding to all the Works that belong in this WorkList.\n\n The apply_filters() implementation defines which Works qualify\n for membership in a WorkList of this type.\n\n This tends to be slower than WorkList.works, but not all\n lanes can be generated through search engine queries.\n\n :param _db: A database connection.\n :param facets: A faceting object, which may place additional\n constraints on WorkList membership.\n :param pagination: A Pagination object indicating which part of\n the WorkList the caller is looking at.\n :param kwargs: Ignored -- only included for compatibility with works().\n :return: A Query.\n \"\"\"\n\n qu = self.base_query(_db)\n\n # In general, we only show books that are present in one of\n # the WorkList's collections and ready to be delivered to\n # patrons.\n qu = self.only_show_ready_deliverable_works(_db, qu)\n\n # Apply to the database the bibliographic restrictions with\n # which this WorkList was initialized -- genre, audience, and\n # whatnot.\n qu, bibliographic_clauses = self.bibliographic_filter_clauses(_db, qu)\n if bibliographic_clauses:\n bibliographic_clause = and_(*bibliographic_clauses)\n qu = qu.filter(bibliographic_clause)\n\n # Allow the faceting object to modify the database query.\n if facets is not None:\n qu = facets.modify_database_query(_db, qu)\n\n # Allow a subclass to modify the database query.\n qu = self.modify_database_query_hook(_db, qu)\n\n if qu._distinct is False:\n # This query must always be made distinct, since a Work\n # can have more than one LicensePool. If no one else has\n # taken the opportunity to make it distinct (e.g. the\n # faceting object, while setting sort order), we'll make\n # it distinct based on work ID.\n qu = qu.distinct(Work.id)\n\n # Allow the pagination object to modify the database query.\n if pagination is not None:\n qu = pagination.modify_database_query(_db, qu)\n\n return qu\n\n @classmethod\n def base_query(cls, _db):\n \"\"\"Return a query that contains the joins set up as necessary to\n create OPDS feeds.\n \"\"\"\n qu = (\n _db.query(Work)\n .join(Work.license_pools)\n .join(Work.presentation_edition)\n .filter(LicensePool.superceded == False)\n )\n\n # Apply optimizations.\n qu = cls._modify_loading(qu)\n qu = cls._defer_unused_fields(qu)\n return qu\n\n @classmethod\n def _modify_loading(cls, qu):\n \"\"\"Optimize a query for use in generating OPDS feeds, by modifying\n which related objects get pulled from the database.\n \"\"\"\n # Avoid eager loading of objects that are already being loaded.\n qu = qu.options(\n contains_eager(Work.presentation_edition),\n contains_eager(Work.license_pools),\n )\n license_pool_name = \"license_pools\"\n\n # Load some objects that wouldn't normally be loaded, but\n # which are necessary when generating OPDS feeds.\n\n # TODO: Strictly speaking, these joinedload calls are\n # only needed by the circulation manager. This code could\n # be moved to circulation and everyone else who uses this\n # would be a little faster. (But right now there is no one\n # else who uses this.)\n qu = qu.options(\n # These speed up the process of generating acquisition links.\n joinedload(license_pool_name, \"delivery_mechanisms\"),\n joinedload(license_pool_name, \"delivery_mechanisms\", \"delivery_mechanism\"),\n joinedload(license_pool_name, \"identifier\"),\n # These speed up the process of generating the open-access link\n # for open-access works.\n joinedload(license_pool_name, \"delivery_mechanisms\", \"resource\"),\n joinedload(\n license_pool_name, \"delivery_mechanisms\", \"resource\", \"representation\"\n ),\n )\n return qu\n\n def only_show_ready_deliverable_works(self, _db, query, show_suppressed=False):\n \"\"\"Restrict a query to show only presentation-ready works present in\n an appropriate collection which the default client can\n fulfill.\n\n Note that this assumes the query has an active join against\n LicensePool.\n \"\"\"\n return Collection.restrict_to_ready_deliverable_works(\n query, show_suppressed=show_suppressed, collection_ids=self.collection_ids\n )\n\n @classmethod\n def _defer_unused_fields(cls, query):\n \"\"\"Some applications use the simple OPDS entry and some\n applications use the verbose. Whichever one we don't need,\n we can stop from even being sent over from the\n database.\n \"\"\"\n if Configuration.DEFAULT_OPDS_FORMAT == \"simple_opds_entry\":\n return query.options(defer(Work.verbose_opds_entry))\n else:\n return query.options(defer(Work.simple_opds_entry))\n\n def bibliographic_filter_clauses(self, _db, qu):\n \"\"\"Create a SQLAlchemy filter that excludes books whose bibliographic\n metadata doesn't match what we're looking for.\n\n query is either `qu`, or a new query that has been modified to\n join against additional tables.\n\n :return: A 2-tuple (query, clauses).\n\n \"\"\"\n # Audience language, and genre restrictions are allowed on all\n # WorkLists. (So are collection restrictions, but those are\n # applied by only_show_ready_deliverable_works().\n clauses = self.audience_filter_clauses(_db, qu)\n if self.languages:\n clauses.append(Edition.language.in_(self.languages))\n if self.media:\n clauses.append(Edition.medium.in_(self.media))\n if self.fiction is not None:\n clauses.append(Work.fiction == self.fiction)\n if self.license_datasource_id:\n clauses.append(LicensePool.data_source_id == self.license_datasource_id)\n\n if self.genre_ids:\n qu, clause = self.genre_filter_clause(qu)\n if clause is not None:\n clauses.append(clause)\n\n if self.customlist_ids:\n qu, customlist_clauses = self.customlist_filter_clauses(qu)\n clauses.extend(customlist_clauses)\n\n clauses.extend(self.age_range_filter_clauses())\n\n if self.parent and self.inherit_parent_restrictions:\n # In addition to the other any other restrictions, books\n # will show up here only if they would also show up in the\n # parent WorkList.\n qu, parent_clauses = self.parent.bibliographic_filter_clauses(_db, qu)\n if parent_clauses:\n clauses.extend(parent_clauses)\n\n return qu, clauses\n\n def audience_filter_clauses(self, _db, qu):\n \"\"\"Create a SQLAlchemy filter that excludes books whose intended\n audience doesn't match what we're looking for.\n \"\"\"\n if not self.audiences:\n return []\n return [Work.audience.in_(self.audiences)]\n\n def customlist_filter_clauses(self, qu):\n \"\"\"Create a filter clause that only books that are on one of the\n CustomLists allowed by Lane configuration.\n\n :return: A 3-tuple (query, clauses).\n\n `query` is the same query as `qu`, possibly extended with\n additional table joins.\n\n `clauses` is a list of SQLAlchemy statements for use in a\n filter() or case() statement.\n \"\"\"\n if not self.uses_customlists:\n # This lane does not require that books be on any particular\n # CustomList.\n return qu, []\n\n # We will be joining against CustomListEntry at least\n # once. For a lane derived from the intersection of two or\n # more custom lists, we may be joining CustomListEntry\n # multiple times. To avoid confusion, we make a new alias for\n # the table every time.\n a_entry = aliased(CustomListEntry)\n\n clause = a_entry.work_id == Work.id\n qu = qu.join(a_entry, clause)\n\n # Actually build the restriction clauses.\n clauses = []\n customlist_ids = None\n if self.list_datasource_id:\n # Use a subquery to obtain the CustomList IDs of all\n # CustomLists from this DataSource. This is significantly\n # simpler than adding a join against CustomList.\n customlist_ids = Select(\n [CustomList.id], CustomList.data_source_id == self.list_datasource_id\n )\n else:\n customlist_ids = self.customlist_ids\n if customlist_ids is not None:\n clauses.append(a_entry.list_id.in_(customlist_ids))\n if self.list_seen_in_previous_days:\n cutoff = utc_now() - datetime.timedelta(self.list_seen_in_previous_days)\n clauses.append(a_entry.most_recent_appearance >= cutoff)\n\n return qu, clauses\n\n def genre_filter_clause(self, qu):\n wg = aliased(WorkGenre)\n qu = qu.join(wg, wg.work_id == Work.id)\n return qu, wg.genre_id.in_(self.genre_ids)\n\n def age_range_filter_clauses(self):\n \"\"\"Create a clause that filters out all books not classified as\n suitable for this DatabaseBackedWorkList's age range.\n \"\"\"\n if self.target_age is None:\n return []\n\n # self.target_age will be a NumericRange for Lanes and a tuple for\n # most other WorkLists. Make sure it's always a NumericRange.\n target_age = self.target_age\n if isinstance(target_age, tuple):\n target_age = tuple_to_numericrange(target_age)\n\n audiences = self.audiences or []\n adult_audiences = [Classifier.AUDIENCE_ADULT, Classifier.AUDIENCE_ADULTS_ONLY]\n if target_age.upper >= 18 or (any(x in audiences for x in adult_audiences)):\n # Books for adults don't have target ages. If we're\n # including books for adults, either due to the audience\n # setting or the target age setting, allow the target age\n # to be empty.\n audience_has_no_target_age = Work.target_age == None\n else:\n audience_has_no_target_age = False\n\n # The lane's target age is an inclusive NumericRange --\n # set_target_age makes sure of that. The work's target age\n # must overlap that of the lane.\n\n return [or_(Work.target_age.overlaps(target_age), audience_has_no_target_age)]\n\n def modify_database_query_hook(self, _db, qu):\n \"\"\"A hook method allowing subclasses to modify a database query\n that's about to find all the works in this WorkList.\n\n This can avoid the need for complex subclasses of\n DatabaseBackedFacets.\n \"\"\"\n return qu\n\n\nclass SpecificWorkList(DatabaseBackedWorkList):\n \"\"\"A WorkList that only finds specific works, identified by ID.\"\"\"\n\n def __init__(self, work_ids):\n super(SpecificWorkList, self).__init__()\n self.work_ids = work_ids\n\n def modify_database_query_hook(self, _db, qu):\n qu = qu.filter(\n Work.id.in_(self.work_ids),\n LicensePool.work_id.in_(self.work_ids), # Query optimization\n )\n return qu\n\n\nclass LaneGenre(Base):\n \"\"\"Relationship object between Lane and Genre.\"\"\"\n\n __tablename__ = \"lanes_genres\"\n id = Column(Integer, primary_key=True)\n lane_id = Column(Integer, ForeignKey(\"lanes.id\"), index=True, nullable=False)\n genre_id = Column(Integer, ForeignKey(\"genres.id\"), index=True, nullable=False)\n\n # An inclusive relationship means that books classified under the\n # genre are included in the lane. An exclusive relationship means\n # that books classified under the genre are excluded, even if they\n # would otherwise be included.\n inclusive = Column(Boolean, default=True, nullable=False)\n\n # By default, this relationship applies not only to the genre\n # itself but to all of its subgenres. Setting recursive=false\n # means that only the genre itself is affected.\n recursive = Column(Boolean, default=True, nullable=False)\n\n __table_args__ = (UniqueConstraint(\"lane_id\", \"genre_id\"),)\n\n @classmethod\n def from_genre(cls, genre):\n \"\"\"Used in the Lane.genres association proxy.\"\"\"\n lg = LaneGenre()\n lg.genre = genre\n return lg\n\n\nGenre.lane_genres = relationship(\n \"LaneGenre\", foreign_keys=LaneGenre.genre_id, backref=\"genre\"\n)\n\n\nclass Lane(Base, DatabaseBackedWorkList, HierarchyWorkList):\n \"\"\"A WorkList that draws its search criteria from a row in a\n database table.\n\n A Lane corresponds roughly to a section in a branch library or\n bookstore. Lanes are the primary means by which patrons discover\n books.\n \"\"\"\n\n # The set of Works in a standard Lane is cacheable for twenty\n # minutes. Note that this only applies to paginated feeds --\n # grouped feeds are cached indefinitely.\n MAX_CACHE_AGE = 20 * 60\n\n __tablename__ = \"lanes\"\n id = Column(Integer, primary_key=True)\n library_id = Column(Integer, ForeignKey(\"libraries.id\"), index=True, nullable=False)\n parent_id = Column(Integer, ForeignKey(\"lanes.id\"), index=True, nullable=True)\n priority = Column(Integer, index=True, nullable=False, default=0)\n\n # How many titles are in this lane? This is periodically\n # calculated and cached.\n size = Column(Integer, nullable=False, default=0)\n\n # How many titles are in this lane when viewed through a specific\n # entry point? This is periodically calculated and cached.\n size_by_entrypoint = Column(JSON, nullable=True)\n\n # A lane may have one parent lane and many sublanes.\n sublanes = relationship(\n \"Lane\",\n backref=backref(\"parent\", remote_side=[id]),\n )\n\n # A lane may have multiple associated LaneGenres. For most lanes,\n # this is how the contents of the lanes are defined.\n genres = association_proxy(\"lane_genres\", \"genre\", creator=LaneGenre.from_genre)\n lane_genres = relationship(\n \"LaneGenre\",\n foreign_keys=\"LaneGenre.lane_id\",\n backref=\"lane\",\n cascade=\"all, delete-orphan\",\n )\n\n # display_name is the name of the lane as shown to patrons. It's\n # okay for this to be duplicated within a library, but it's not\n # okay to have two lanes with the same parent and the same display\n # name -- that would be confusing.\n display_name = Column(Unicode)\n\n # True = Fiction only\n # False = Nonfiction only\n # null = Both fiction and nonfiction\n #\n # This may interact with lane_genres, for genres such as Humor\n # which can apply to either fiction or nonfiction.\n fiction = Column(Boolean, index=True, nullable=True)\n\n # A lane may be restricted to works classified for specific audiences\n # (e.g. only Young Adult works).\n _audiences = Column(ARRAY(Unicode), name=\"audiences\")\n\n # A lane may further be restricted to works classified as suitable\n # for a specific age range.\n _target_age = Column(INT4RANGE, name=\"target_age\", index=True)\n\n # A lane may be restricted to works available in certain languages.\n languages = Column(ARRAY(Unicode))\n\n # A lane may be restricted to works in certain media (e.g. only\n # audiobooks).\n media = Column(ARRAY(Unicode))\n\n # TODO: At some point it may be possible to restrict a lane to certain\n # formats (e.g. only electronic materials or only codices).\n\n # Only books licensed through this DataSource will be shown.\n license_datasource_id = Column(\n Integer, ForeignKey(\"datasources.id\"), index=True, nullable=True\n )\n\n # Only books on one or more CustomLists obtained from this\n # DataSource will be shown.\n _list_datasource_id = Column(\n Integer, ForeignKey(\"datasources.id\"), index=True, nullable=True\n )\n\n # Only the books on these specific CustomLists will be shown.\n customlists = relationship(\n \"CustomList\", secondary=lambda: lanes_customlists, backref=\"lane\"\n )\n\n # This has no effect unless list_datasource_id or\n # list_identifier_id is also set. If this is set, then a book will\n # only be shown if it has a CustomListEntry on an appropriate list\n # where `most_recent_appearance` is within this number of days. If\n # the number is zero, then the lane contains _every_ book with a\n # CustomListEntry associated with an appropriate list.\n list_seen_in_previous_days = Column(Integer, nullable=True)\n\n # If this is set to True, then a book will show up in a lane only\n # if it would _also_ show up in its parent lane.\n inherit_parent_restrictions = Column(Boolean, default=True, nullable=False)\n\n # Patrons whose external type is in this list will be sent to this\n # lane when they ask for the root lane.\n #\n # This is almost never necessary.\n root_for_patron_type = Column(ARRAY(Unicode), nullable=True)\n\n # A grouped feed for a Lane contains a swim lane from each\n # sublane, plus a swim lane at the bottom for the Lane itself. In\n # some cases that final swim lane should not be shown. This\n # generally happens because a) the sublanes are so varied that no\n # one would want to see a big list containing everything, and b)\n # the sublanes are exhaustive of the Lane's content, so there's\n # nothing new to be seen by going into that big list.\n include_self_in_grouped_feed = Column(Boolean, default=True, nullable=False)\n\n # Only a visible lane will show up in the user interface. The\n # admin interface can see all the lanes, visible or not.\n _visible = Column(Boolean, default=True, nullable=False, name=\"visible\")\n\n # A Lane may have many CachedFeeds.\n cachedfeeds = relationship(\n \"CachedFeed\",\n backref=\"lane\",\n cascade=\"all, delete-orphan\",\n )\n\n # A Lane may have many CachedMARCFiles.\n cachedmarcfiles = relationship(\n \"CachedMARCFile\",\n backref=\"lane\",\n cascade=\"all, delete-orphan\",\n )\n\n __table_args__ = (UniqueConstraint(\"parent_id\", \"display_name\"),)\n\n def get_library(self, _db):\n \"\"\"For compatibility with WorkList.get_library().\"\"\"\n return self.library\n\n @property\n def list_datasource_id(self):\n return self._list_datasource_id\n\n @property\n def collection_ids(self):\n return [x.id for x in self.library.collections]\n\n @property\n def children(self):\n return self.sublanes\n\n @property\n def visible_children(self):\n children = [lane for lane in self.sublanes if lane.visible]\n return sorted(children, key=lambda x: (x.priority, x.display_name or \"\"))\n\n @property\n def parentage(self):\n \"\"\"Yield the parent, grandparent, etc. of this Lane.\n\n The Lane may be inside one or more non-Lane WorkLists, but those\n WorkLists are not counted in the parentage.\n \"\"\"\n if not self.parent:\n return\n parent = self.parent\n if Session.object_session(parent) is None:\n # This lane's parent was disconnected from its database session,\n # probably when an app server started up.\n # Reattach it to the database session used by this lane.\n parent = Session.object_session(self).merge(parent)\n\n yield parent\n seen = set([self, parent])\n for grandparent in parent.parentage:\n if grandparent in seen:\n raise ValueError(\"Lane parentage loop detected\")\n seen.add(grandparent)\n yield grandparent\n\n def is_self_or_descendant(self, ancestor):\n \"\"\"Is this WorkList the given WorkList or one of its descendants?\n\n :param ancestor: A WorkList.\n :return: A boolean.\n \"\"\"\n if super(Lane, self).is_self_or_descendant(ancestor):\n return True\n\n # A TopLevelWorkList won't show up in a Lane's parentage,\n # because it's not a Lane, but if they share the same library\n # it can be presumed to be the lane's ultimate ancestor.\n if (\n isinstance(ancestor, TopLevelWorkList)\n and self.library_id == ancestor.library_id\n ):\n return True\n return False\n\n @property\n def depth(self):\n \"\"\"How deep is this lane in this site's hierarchy?\n i.e. how many times do we have to follow .parent before we get None?\n \"\"\"\n return len(list(self.parentage))\n\n @property\n def entrypoints(self):\n \"\"\"Lanes cannot currently have EntryPoints.\"\"\"\n return []\n\n @hybrid_property\n def visible(self):\n return self._visible and (not self.parent or self.parent.visible)\n\n @visible.setter\n def visible(self, value):\n self._visible = value\n\n @property\n def url_name(self):\n \"\"\"Return the name of this lane to be used in URLs.\n\n Since most aspects of the lane can change through administrative\n action, we use the internal database ID of the lane in URLs.\n \"\"\"\n return self.id\n\n @hybrid_property\n def audiences(self):\n return self._audiences or []\n\n @audiences.setter\n def audiences(self, value):\n \"\"\"The `audiences` field cannot be set to a value that\n contradicts the current value to the `target_age` field.\n \"\"\"\n if self._audiences and self._target_age and value != self._audiences:\n raise ValueError(\n \"Cannot modify Lane.audiences when Lane.target_age is set!\"\n )\n if isinstance(value, (bytes, str)):\n value = [value]\n self._audiences = value\n\n @hybrid_property\n def target_age(self):\n return self._target_age\n\n @target_age.setter\n def target_age(self, value):\n \"\"\"Setting .target_age will lock .audiences to appropriate values.\n\n If you set target_age to 16-18, you're saying that the audiences\n are [Young Adult, Adult].\n\n If you set target_age 12-15, you're saying that the audiences are\n [Young Adult, Children].\n\n If you set target age 0-2, you're saying that the audiences are\n [Children].\n\n In no case is the \"Adults Only\" audience allowed, since target\n age only makes sense in lanes intended for minors.\n \"\"\"\n if value is None:\n self._target_age = None\n return\n audiences = []\n if isinstance(value, int):\n value = (value, value)\n if isinstance(value, tuple):\n value = tuple_to_numericrange(value)\n if value.lower >= Classifier.ADULT_AGE_CUTOFF:\n # Adults are adults and there's no point in tracking\n # precise age gradations for them.\n value = tuple_to_numericrange((Classifier.ADULT_AGE_CUTOFF, value.upper))\n if value.upper >= Classifier.ADULT_AGE_CUTOFF:\n value = tuple_to_numericrange((value.lower, Classifier.ADULT_AGE_CUTOFF))\n self._target_age = value\n\n if value.upper >= Classifier.ADULT_AGE_CUTOFF:\n audiences.append(Classifier.AUDIENCE_ADULT)\n if value.lower < Classifier.YOUNG_ADULT_AGE_CUTOFF:\n audiences.append(Classifier.AUDIENCE_CHILDREN)\n if value.upper >= Classifier.YOUNG_ADULT_AGE_CUTOFF:\n audiences.append(Classifier.AUDIENCE_YOUNG_ADULT)\n self._audiences = audiences\n\n @hybrid_property\n def list_datasource(self):\n return self._list_datasource\n\n @list_datasource.setter\n def list_datasource(self, value):\n \"\"\"Setting .list_datasource to a non-null value wipes out any specific\n CustomLists previously associated with this Lane.\n \"\"\"\n if value:\n self.customlists = []\n if hasattr(self, \"_customlist_ids\"):\n # The next time someone asks for .customlist_ids,\n # the list will be refreshed.\n del self._customlist_ids\n\n # TODO: It's not clear to me why it's necessary to set these two\n # values separately.\n self._list_datasource = value\n self._list_datasource_id = value.id\n\n @property\n def list_datasource_id(self):\n if self._list_datasource_id:\n return self._list_datasource_id\n return None\n\n @property\n def uses_customlists(self):\n \"\"\"Does the works() implementation for this Lane look for works on\n CustomLists?\n \"\"\"\n if self.customlists or self.list_datasource:\n return True\n if (\n self.parent\n and self.inherit_parent_restrictions\n and self.parent.uses_customlists\n ):\n return True\n return False\n\n def max_cache_age(self, type):\n \"\"\"Determine how long a feed for this WorkList should be cached\n internally.\n\n :param type: The type of feed.\n \"\"\"\n if type == CachedFeed.GROUPS_TYPE:\n # Generating grouped feeds on the fly for Lanes is not incredibly\n # expensive, but it's slow enough that we prefer to regenerate\n # them in the background (using force_refresh=True) rather\n # than while someone is waiting for an HTTP response.\n return CachedFeed.CACHE_FOREVER\n\n # Other than that, we have no opinion -- use the default.\n return super(Lane, self).max_cache_age(type)\n\n def update_size(self, _db, search_engine=None):\n \"\"\"Update the stored estimate of the number of Works in this Lane.\"\"\"\n library = self.get_library(_db)\n from .external_search import ExternalSearchIndex\n\n search_engine = search_engine or ExternalSearchIndex.load(_db)\n\n # Do the estimate for every known entry point.\n by_entrypoint = dict()\n for entrypoint in EntryPoint.ENTRY_POINTS:\n facets = DatabaseBackedFacets(\n library,\n FacetConstants.COLLECTION_FULL,\n FacetConstants.AVAILABLE_ALL,\n order=FacetConstants.ORDER_WORK_ID,\n entrypoint=entrypoint,\n )\n filter = self.filter(_db, facets)\n by_entrypoint[entrypoint.URI] = search_engine.count_works(filter)\n self.size_by_entrypoint = by_entrypoint\n self.size = by_entrypoint[EverythingEntryPoint.URI]\n\n @property\n def genre_ids(self):\n \"\"\"Find the database ID of every Genre such that a Work classified in\n that Genre should be in this Lane.\n\n :return: A list of genre IDs, or None if this Lane does not\n consider genres at all.\n \"\"\"\n if not hasattr(self, \"_genre_ids\"):\n self._genre_ids = self._gather_genre_ids()\n return self._genre_ids\n\n def _gather_genre_ids(self):\n \"\"\"Method that does the work of `genre_ids`.\"\"\"\n if not self.lane_genres:\n return None\n\n included_ids = set()\n excluded_ids = set()\n for lanegenre in self.lane_genres:\n genre = lanegenre.genre\n if lanegenre.inclusive:\n bucket = included_ids\n else:\n bucket = excluded_ids\n if (\n self.fiction != None\n and genre.default_fiction != None\n and self.fiction != genre.default_fiction\n ):\n logging.error(\n \"Lane %s has a genre %s that does not match its fiction restriction.\",\n (self.full_identifier, genre.name),\n )\n bucket.add(genre.id)\n if lanegenre.recursive:\n for subgenre in genre.subgenres:\n bucket.add(subgenre.id)\n if not included_ids:\n # No genres have been explicitly included, so this lane\n # includes all genres that aren't excluded.\n _db = Session.object_session(self)\n included_ids = set([genre.id for genre in _db.query(Genre)])\n genre_ids = included_ids - excluded_ids\n if not genre_ids:\n # This can happen if you create a lane where 'Epic\n # Fantasy' is included but 'Fantasy' and its subgenres are\n # excluded.\n logging.error(\n \"Lane %s has a self-negating set of genre IDs.\", self.full_identifier\n )\n return genre_ids\n\n @property\n def customlist_ids(self):\n \"\"\"Find the database ID of every CustomList such that a Work filed\n in that List should be in this Lane.\n\n :return: A list of CustomList IDs, possibly empty.\n \"\"\"\n if not hasattr(self, \"_customlist_ids\"):\n self._customlist_ids = self._gather_customlist_ids()\n return self._customlist_ids\n\n def _gather_customlist_ids(self):\n \"\"\"Method that does the work of `customlist_ids`.\"\"\"\n if self.list_datasource:\n # Find the ID of every CustomList from a certain\n # DataSource.\n _db = Session.object_session(self)\n query = select(\n [CustomList.id], CustomList.data_source_id == self.list_datasource.id\n )\n ids = [x[0] for x in _db.execute(query)]\n else:\n # Find the IDs of some specific CustomLists.\n ids = [x.id for x in self.customlists]\n if len(ids) == 0:\n if self.list_datasource:\n # We are restricted to all lists from a given data\n # source, and there are no such lists, so we want to\n # exclude everything.\n return []\n else:\n # There is no custom list restriction at all.\n return None\n return ids\n\n @classmethod\n def affected_by_customlist(self, customlist):\n \"\"\"Find all Lanes whose membership is partially derived\n from the membership of the given CustomList.\n \"\"\"\n _db = Session.object_session(customlist)\n\n # Either the data source must match, or there must be a specific link\n # between the Lane and the CustomList.\n data_source_matches = Lane._list_datasource_id == customlist.data_source_id\n specific_link = CustomList.id == customlist.id\n\n return (\n _db.query(Lane)\n .outerjoin(Lane.customlists)\n .filter(or_(data_source_matches, specific_link))\n )\n\n def add_genre(self, genre, inclusive=True, recursive=True):\n \"\"\"Create a new LaneGenre for the given genre and\n associate it with this Lane.\n\n Mainly used in tests.\n \"\"\"\n _db = Session.object_session(self)\n if isinstance(genre, (bytes, str)):\n genre, ignore = Genre.lookup(_db, genre)\n lanegenre, is_new = get_one_or_create(_db, LaneGenre, lane=self, genre=genre)\n lanegenre.inclusive = inclusive\n lanegenre.recursive = recursive\n self._genre_ids = self._gather_genre_ids()\n return lanegenre, is_new\n\n @property\n def search_target(self):\n \"\"\"Obtain the WorkList that should be searched when someone\n initiates a search from this Lane.\"\"\"\n\n # See if this Lane is the root lane for a patron type, or has an\n # ancestor that's the root lane for a patron type. If so, search\n # that Lane.\n if self.root_for_patron_type:\n return self\n\n for parent in self.parentage:\n if parent.root_for_patron_type:\n return parent\n\n # Otherwise, we want to use the lane's languages, media, and\n # juvenile audiences in search.\n languages = self.languages\n media = self.media\n audiences = None\n if (\n Classifier.AUDIENCE_YOUNG_ADULT in self.audiences\n or Classifier.AUDIENCE_CHILDREN in self.audiences\n ):\n audiences = self.audiences\n\n # If there are too many languages or audiences, the description\n # could get too long to be useful, so we'll leave them out.\n # Media isn't part of the description yet.\n\n display_name_parts = []\n if languages and len(languages) <= 2:\n display_name_parts.append(LanguageCodes.name_for_languageset(languages))\n\n if audiences:\n if len(audiences) <= 2:\n display_name_parts.append(\" and \".join(audiences))\n\n display_name = \" \".join(display_name_parts)\n\n wl = WorkList()\n wl.initialize(\n self.library,\n display_name=display_name,\n languages=languages,\n media=media,\n audiences=audiences,\n )\n return wl\n\n def _size_for_facets(self, facets):\n \"\"\"How big is this lane under the given `Facets` object?\n\n :param facets: A Facets object.\n :return: An int.\n \"\"\"\n # Default to the total size of the lane.\n size = self.size\n\n entrypoint_name = EverythingEntryPoint.URI\n if facets and facets.entrypoint:\n entrypoint_name = facets.entrypoint.URI\n\n if self.size_by_entrypoint and entrypoint_name in self.size_by_entrypoint:\n size = self.size_by_entrypoint[entrypoint_name]\n return size\n\n def groups(\n self,\n _db,\n include_sublanes=True,\n pagination=None,\n facets=None,\n search_engine=None,\n debug=False,\n ):\n \"\"\"Return a list of (Work, Lane) 2-tuples\n describing a sequence of featured items for this lane and\n (optionally) its children.\n\n :param pagination: A Pagination object which may affect how many\n works each child of this WorkList may contribute.\n :param facets: A FeaturedFacets object.\n \"\"\"\n clauses = []\n library = self.get_library(_db)\n target_size = library.featured_lane_size\n\n if self.include_self_in_grouped_feed:\n relevant_lanes = [self]\n else:\n relevant_lanes = []\n if include_sublanes:\n # The child lanes go first.\n relevant_lanes = list(self.visible_children) + relevant_lanes\n\n # We can use a single query to build the featured feeds for\n # this lane, as well as any of its sublanes that inherit this\n # lane's restrictions. Lanes that don't inherit this lane's\n # restrictions will need to be handled in a separate call to\n # groups().\n queryable_lanes = [\n x for x in relevant_lanes if x == self or x.inherit_parent_restrictions\n ]\n return self._groups_for_lanes(\n _db,\n relevant_lanes,\n queryable_lanes,\n pagination=pagination,\n facets=facets,\n search_engine=search_engine,\n debug=debug,\n )\n\n def search(self, _db, query_string, search_client, pagination=None, facets=None):\n \"\"\"Find works in this lane that also match a search query.\n\n :param _db: A database connection.\n :param query_string: Search for this string.\n :param search_client: An ExternalSearchIndex object.\n :param pagination: A Pagination object.\n :param facets: A faceting object, probably a SearchFacets.\n \"\"\"\n search_target = self.search_target\n\n if search_target == self:\n # The actual implementation happens in WorkList.\n m = super(Lane, self).search\n else:\n # Searches in this Lane actually go against some other WorkList.\n # Tell that object to run the search.\n m = search_target.search\n\n return m(_db, query_string, search_client, pagination, facets=facets)\n\n def explain(self):\n \"\"\"Create a series of human-readable strings to explain a lane's settings.\"\"\"\n lines = []\n lines.append(\"ID: %s\" % self.id)\n lines.append(\"Library: %s\" % self.library.short_name)\n if self.parent:\n lines.append(\n \"Parent ID: %s (%s)\" % (self.parent.id, self.parent.display_name)\n )\n lines.append(\"Priority: %s\" % self.priority)\n lines.append(\"Display name: %s\" % self.display_name)\n return lines\n\n\nLibrary.lanes = relationship(\n \"Lane\",\n backref=\"library\",\n foreign_keys=Lane.library_id,\n cascade=\"all, delete-orphan\",\n)\nDataSource.list_lanes = relationship(\n \"Lane\", backref=\"_list_datasource\", foreign_keys=Lane._list_datasource_id\n)\nDataSource.license_lanes = relationship(\n \"Lane\", backref=\"license_datasource\", foreign_keys=Lane.license_datasource_id\n)\n\n\nlanes_customlists = Table(\n \"lanes_customlists\",\n Base.metadata,\n Column(\"lane_id\", Integer, ForeignKey(\"lanes.id\"), index=True, nullable=False),\n Column(\n \"customlist_id\",\n Integer,\n ForeignKey(\"customlists.id\"),\n index=True,\n nullable=False,\n ),\n UniqueConstraint(\"lane_id\", \"customlist_id\"),\n)\n\n\n@event.listens_for(Lane, \"after_insert\")\n@event.listens_for(Lane, \"after_delete\")\n@event.listens_for(LaneGenre, \"after_insert\")\n@event.listens_for(LaneGenre, \"after_delete\")\ndef configuration_relevant_lifecycle_event(mapper, connection, target):\n site_configuration_has_changed(target)\n\n\n@event.listens_for(Lane, \"after_update\")\n@event.listens_for(LaneGenre, \"after_update\")\ndef configuration_relevant_update(mapper, connection, target):\n if directly_modified(target):\n site_configuration_has_changed(target)\n\n\n@event.listens_for(Lane.library_id, \"set\")\n@event.listens_for(Lane.root_for_patron_type, \"set\")\ndef receive_modified(target, value, oldvalue, initiator):\n # Some elements of Lane configuration are stored in the\n # corresponding Library objects for performance reasons.\n\n # Remove this information whenever the Lane configuration\n # changes. This will force it to be recalculated.\n Library._has_root_lane_cache.clear()\n", "id": "9016313", "language": "Python", "matching_score": 8.413565635681152, "max_stars_count": 0, "path": "core/lane.py" }, { "content": "# encoding: utf-8\n# Library\nimport logging\nfrom collections import Counter\n\nfrom expiringdict import ExpiringDict\nfrom sqlalchemy import (\n Boolean,\n Column,\n ForeignKey,\n Integer,\n Table,\n Unicode,\n UniqueConstraint,\n)\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.orm.session import Session\nfrom sqlalchemy.sql.functions import func\n\nfrom ..config import Configuration\nfrom ..entrypoint import EntryPoint\nfrom ..facets import FacetConstants\nfrom . import Base, get_one\nfrom .edition import Edition\nfrom .hassessioncache import HasSessionCache\nfrom .licensing import LicensePool\nfrom .work import Work\n\n\nclass Library(Base, HasSessionCache):\n \"\"\"A library that uses this circulation manager to authenticate\n its patrons and manage access to its content.\n A circulation manager may serve many libraries.\n \"\"\"\n\n __tablename__ = \"libraries\"\n\n id = Column(Integer, primary_key=True)\n\n # The human-readable name of this library. Used in the library's\n # Authentication for OPDS document.\n name = Column(Unicode, unique=True)\n\n # A short name of this library, to use when identifying it in\n # scripts. e.g. \"NYPL\" for NYPL.\n short_name = Column(Unicode, unique=True, nullable=False)\n\n # A UUID that uniquely identifies the library among all libraries\n # in the world. This is used to serve the library's Authentication\n # for OPDS document, and it also goes to the library registry.\n uuid = Column(Unicode, unique=True)\n\n # One, and only one, library may be the default. The default\n # library is the one chosen when an incoming request does not\n # designate a library.\n _is_default = Column(Boolean, index=True, default=False, name=\"is_default\")\n\n # The name of this library to use when signing short client tokens\n # for consumption by the library registry. e.g. \"NYNYPL\" for NYPL.\n # This name must be unique across the library registry.\n _library_registry_short_name = Column(\n Unicode, unique=True, name=\"library_registry_short_name\"\n )\n\n # The shared secret to use when signing short client tokens for\n # consumption by the library registry.\n library_registry_shared_secret = Column(Unicode, unique=True)\n\n # A library may have many Patrons.\n patrons = relationship(\"Patron\", backref=\"library\", cascade=\"all, delete-orphan\")\n\n # An Library may have many admin roles.\n adminroles = relationship(\n \"AdminRole\", backref=\"library\", cascade=\"all, delete-orphan\"\n )\n\n # A Library may have many CachedFeeds.\n cachedfeeds = relationship(\n \"CachedFeed\",\n backref=\"library\",\n cascade=\"all, delete-orphan\",\n )\n\n # A Library may have many CachedMARCFiles.\n cachedmarcfiles = relationship(\n \"CachedMARCFile\",\n backref=\"library\",\n cascade=\"all, delete-orphan\",\n )\n\n # A Library may have many CustomLists.\n custom_lists = relationship(\n \"CustomList\",\n backref=\"library\",\n lazy=\"joined\",\n )\n\n # A Library may have many ExternalIntegrations.\n integrations = relationship(\n \"ExternalIntegration\",\n secondary=lambda: externalintegrations_libraries,\n backref=\"libraries\",\n )\n\n # Any additional configuration information is stored as\n # ConfigurationSettings.\n settings = relationship(\n \"ConfigurationSetting\",\n backref=\"library\",\n lazy=\"joined\",\n cascade=\"all, delete\",\n )\n\n # A Library may have many CirculationEvents\n circulation_events = relationship(\n \"CirculationEvent\", backref=\"library\", cascade=\"all, delete-orphan\"\n )\n\n # A class-wide cache mapping library ID to the calculated value\n # used for Library.has_root_lane. This is invalidated whenever\n # Lane configuration changes, and it will also expire on its own.\n _has_root_lane_cache = ExpiringDict(max_len=1000, max_age_seconds=3600)\n\n def __repr__(self):\n return (\n '<Library: name=\"%s\", short name=\"%s\", uuid=\"%s\", library registry short name=\"%s\">'\n % (self.name, self.short_name, self.uuid, self.library_registry_short_name)\n )\n\n def cache_key(self):\n return self.short_name\n\n @classmethod\n def lookup(cls, _db, short_name):\n \"\"\"Look up a library by short name.\"\"\"\n\n def _lookup():\n library = get_one(_db, Library, short_name=short_name)\n return library, False\n\n library, is_new = cls.by_cache_key(_db, short_name, _lookup)\n return library\n\n @classmethod\n def default(cls, _db):\n \"\"\"Find the default Library.\"\"\"\n # If for some reason there are multiple default libraries in\n # the database, they're not actually interchangeable, but\n # raising an error here might make it impossible to fix the\n # problem.\n defaults = (\n _db.query(Library)\n .filter(Library._is_default == True)\n .order_by(Library.id.asc())\n .all()\n )\n if len(defaults) == 1:\n # This is the normal case.\n return defaults[0]\n\n default_library = None\n if not defaults:\n # There is no current default. Find the library with the\n # lowest ID and make it the default.\n libraries = _db.query(Library).order_by(Library.id.asc()).limit(1)\n if not libraries.count():\n # There are no libraries in the system, so no default.\n return None\n [default_library] = libraries\n logging.warning(\n \"No default library, setting %s as default.\"\n % (default_library.short_name)\n )\n else:\n # There is more than one default, probably caused by a\n # race condition. Fix it by arbitrarily designating one\n # of the libraries as the default.\n default_library = defaults[0]\n logging.warning(\n \"Multiple default libraries, setting %s as default.\"\n % (default_library.short_name)\n )\n default_library.is_default = True\n return default_library\n\n @hybrid_property\n def library_registry_short_name(self):\n \"\"\"Gets library_registry_short_name from database\"\"\"\n return self._library_registry_short_name\n\n @library_registry_short_name.setter\n def library_registry_short_name(self, value):\n \"\"\"Uppercase the library registry short name on the way in.\"\"\"\n if value:\n value = value.upper()\n if \"|\" in value:\n raise ValueError(\n \"Library registry short name cannot contain the pipe character.\"\n )\n value = str(value)\n self._library_registry_short_name = value\n\n def setting(self, key):\n \"\"\"Find or create a ConfigurationSetting on this Library.\n :param key: Name of the setting.\n :return: A ConfigurationSetting\n \"\"\"\n from .configuration import ConfigurationSetting\n\n return ConfigurationSetting.for_library(key, self)\n\n @property\n def all_collections(self):\n for collection in self.collections:\n yield collection\n for parent in collection.parents:\n yield parent\n\n # Some specific per-library configuration settings.\n\n # The name of the per-library regular expression used to derive a patron's\n # external_type from their authorization_identifier.\n EXTERNAL_TYPE_REGULAR_EXPRESSION = \"external_type_regular_expression\"\n\n # The name of the per-library configuration policy that controls whether\n # books may be put on hold.\n ALLOW_HOLDS = Configuration.ALLOW_HOLDS\n\n # Each facet group has two associated per-library keys: one\n # configuring which facets are enabled for that facet group, and\n # one configuring which facet is the default.\n ENABLED_FACETS_KEY_PREFIX = Configuration.ENABLED_FACETS_KEY_PREFIX\n DEFAULT_FACET_KEY_PREFIX = Configuration.DEFAULT_FACET_KEY_PREFIX\n\n # Each library may set a minimum quality for the books that show\n # up in the 'featured' lanes that show up on the front page.\n MINIMUM_FEATURED_QUALITY = Configuration.MINIMUM_FEATURED_QUALITY\n\n # Each library may configure the maximum number of books in the\n # 'featured' lanes.\n FEATURED_LANE_SIZE = Configuration.FEATURED_LANE_SIZE\n\n @property\n def allow_holds(self):\n \"\"\"Does this library allow patrons to put items on hold?\"\"\"\n value = self.setting(self.ALLOW_HOLDS).bool_value\n if value is None:\n # If the library has not set a value for this setting,\n # holds are allowed.\n value = True\n return value\n\n @property\n def minimum_featured_quality(self):\n \"\"\"The minimum quality a book must have to be 'featured'.\"\"\"\n value = self.setting(self.MINIMUM_FEATURED_QUALITY).float_value\n if value is None:\n value = 0.65\n return value\n\n @property\n def featured_lane_size(self):\n \"\"\"The minimum quality a book must have to be 'featured'.\"\"\"\n value = self.setting(self.FEATURED_LANE_SIZE).int_value\n if value is None:\n value = 15\n return value\n\n @property\n def entrypoints(self):\n \"\"\"The EntryPoints enabled for this library.\"\"\"\n values = self.setting(EntryPoint.ENABLED_SETTING).json_value\n if values is None:\n # No decision has been made about enabled EntryPoints.\n for cls in EntryPoint.DEFAULT_ENABLED:\n yield cls\n else:\n # It's okay for `values` to be an empty list--that means\n # the library wants to only use lanes, no entry points.\n for v in values:\n cls = EntryPoint.BY_INTERNAL_NAME.get(v)\n if cls:\n yield cls\n\n def enabled_facets(self, group_name):\n \"\"\"Look up the enabled facets for a given facet group.\"\"\"\n setting = self.enabled_facets_setting(group_name)\n value = None\n\n try:\n value = setting.json_value\n except ValueError as e:\n logging.error(\n \"Invalid list of enabled facets for %s: %s\", group_name, setting.value\n )\n if value is None:\n value = list(FacetConstants.DEFAULT_ENABLED_FACETS.get(group_name, []))\n return value\n\n def enabled_facets_setting(self, group_name):\n key = self.ENABLED_FACETS_KEY_PREFIX + group_name\n return self.setting(key)\n\n @property\n def has_root_lanes(self):\n \"\"\"Does this library have any lanes that act as the root\n lane for a certain patron type?\n\n :return: A boolean\n \"\"\"\n\n # NOTE: Although this fact is derived from the Lanes, not the\n # Library, the result is stored in the Library object for\n # performance reasons.\n #\n # This makes it important to clear the cache of Library\n # objects whenever the Lane configuration changes. Otherwise a\n # library that went from not having root lanes, to having them\n # (or vice versa) might not see the change take effect without\n # a server restart.\n value = Library._has_root_lane_cache.get(self.id, None)\n if value is None:\n from ..lane import Lane\n\n _db = Session.object_session(self)\n root_lanes = (\n _db.query(Lane)\n .filter(Lane.library == self)\n .filter(Lane.root_for_patron_type != None)\n )\n value = root_lanes.count() > 0\n Library._has_root_lane_cache[self.id] = value\n return value\n\n def restrict_to_ready_deliverable_works(\n self,\n query,\n collection_ids=None,\n show_suppressed=False,\n ):\n \"\"\"Restrict a query to show only presentation-ready works present in\n an appropriate collection which the default client can\n fulfill.\n Note that this assumes the query has an active join against\n LicensePool.\n :param query: The query to restrict.\n :param collection_ids: Only include titles in the given\n collections.\n :param show_suppressed: Include titles that have nothing but\n suppressed LicensePools.\n \"\"\"\n from .collection import Collection\n\n collection_ids = collection_ids or [x.id for x in self.all_collections]\n return Collection.restrict_to_ready_deliverable_works(\n query,\n collection_ids=collection_ids,\n show_suppressed=show_suppressed,\n allow_holds=self.allow_holds,\n )\n\n def estimated_holdings_by_language(self, include_open_access=True):\n \"\"\"Estimate how many titles this library has in various languages.\n The estimate is pretty good but should not be relied upon as\n exact.\n :return: A Counter mapping languages to the estimated number\n of titles in that language.\n \"\"\"\n _db = Session.object_session(self)\n qu = (\n _db.query(Edition.language, func.count(Work.id).label(\"work_count\"))\n .select_from(Work)\n .join(Work.license_pools)\n .join(Work.presentation_edition)\n .filter(Edition.language != None)\n .group_by(Edition.language)\n )\n qu = self.restrict_to_ready_deliverable_works(qu)\n if not include_open_access:\n qu = qu.filter(LicensePool.open_access == False)\n counter = Counter()\n for language, count in qu:\n counter[language] = count\n return counter\n\n def default_facet(self, group_name):\n \"\"\"Look up the default facet for a given facet group.\"\"\"\n value = self.default_facet_setting(group_name).value\n if not value:\n value = FacetConstants.DEFAULT_FACET.get(group_name)\n return value\n\n def default_facet_setting(self, group_name):\n key = self.DEFAULT_FACET_KEY_PREFIX + group_name\n return self.setting(key)\n\n def explain(self, include_secrets=False):\n \"\"\"Create a series of human-readable strings to explain a library's\n settings.\n\n :param include_secrets: For security reasons, secrets are not\n displayed by default.\n :return: A list of explanatory strings.\n \"\"\"\n lines = []\n if self.uuid:\n lines.append('Library UUID: \"%s\"' % self.uuid)\n if self.name:\n lines.append('Name: \"%s\"' % self.name)\n if self.short_name:\n lines.append('Short name: \"%s\"' % self.short_name)\n\n if self.library_registry_short_name:\n lines.append(\n 'Short name (for library registry): \"%s\"'\n % self.library_registry_short_name\n )\n if self.library_registry_shared_secret and include_secrets:\n lines.append(\n 'Shared secret (for library registry): \"%s\"'\n % self.library_registry_shared_secret\n )\n\n # Find all ConfigurationSettings that are set on the library\n # itself and are not on the library + an external integration.\n settings = [x for x in self.settings if not x.external_integration]\n if settings:\n lines.append(\"\")\n lines.append(\"Configuration settings:\")\n lines.append(\"-----------------------\")\n for setting in settings:\n if (include_secrets or not setting.is_secret) and setting.value is not None:\n lines.append(\"%s='%s'\" % (setting.key, setting.value))\n\n integrations = list(self.integrations)\n if integrations:\n lines.append(\"\")\n lines.append(\"External integrations:\")\n lines.append(\"----------------------\")\n for integration in integrations:\n lines.extend(integration.explain(self, include_secrets=include_secrets))\n lines.append(\"\")\n return lines\n\n @property\n def is_default(self):\n return self._is_default\n\n @is_default.setter\n def is_default(self, new_is_default):\n \"\"\"Set this library, and only this library, as the default.\"\"\"\n if self._is_default and not new_is_default:\n raise ValueError(\n \"You cannot stop a library from being the default library; you must designate a different library as the default.\"\n )\n\n _db = Session.object_session(self)\n for library in _db.query(Library):\n if library == self:\n library._is_default = True\n else:\n library._is_default = False\n\n\nexternalintegrations_libraries = Table(\n \"externalintegrations_libraries\",\n Base.metadata,\n Column(\n \"externalintegration_id\",\n Integer,\n ForeignKey(\"externalintegrations.id\"),\n index=True,\n nullable=False,\n ),\n Column(\n \"library_id\", Integer, ForeignKey(\"libraries.id\"), index=True, nullable=False\n ),\n UniqueConstraint(\"externalintegration_id\", \"library_id\"),\n)\n", "id": "7924939", "language": "Python", "matching_score": 3.8303797245025635, "max_stars_count": 0, "path": "core/model/library.py" }, { "content": "# encoding: utf-8\n# CachedFeed, WillNotGenerateExpensiveFeed\n\nimport datetime\nimport logging\nfrom collections import namedtuple\n\nfrom sqlalchemy import Column, DateTime, ForeignKey, Index, Integer, Unicode\nfrom sqlalchemy.sql.expression import and_\n\nfrom ..util.datetime_helpers import utc_now\nfrom ..util.flask_util import OPDSFeedResponse\nfrom . import Base, flush, get_one, get_one_or_create\n\n\nclass CachedFeed(Base):\n\n __tablename__ = \"cachedfeeds\"\n id = Column(Integer, primary_key=True)\n\n # Every feed is associated with a lane. If null, this is a feed\n # for a WorkList. If work_id is also null, it's a feed for the\n # top-level.\n lane_id = Column(Integer, ForeignKey(\"lanes.id\"), nullable=True, index=True)\n\n # Every feed has a timestamp reflecting when it was created.\n timestamp = Column(DateTime(timezone=True), nullable=True, index=True)\n\n # A feed is of a certain type--such as 'page' or 'groups'.\n type = Column(Unicode, nullable=False)\n\n # A feed associated with a WorkList can have a unique key.\n # This should be null if the feed is associated with a Lane.\n unique_key = Column(Unicode, nullable=True)\n\n # A 'page' feed is associated with a set of values for the facet\n # groups.\n facets = Column(Unicode, nullable=True)\n\n # A 'page' feed is associated with a set of values for pagination.\n pagination = Column(Unicode, nullable=False)\n\n # The content of the feed.\n content = Column(Unicode, nullable=True)\n\n # Every feed is associated with a Library.\n library_id = Column(Integer, ForeignKey(\"libraries.id\"), index=True)\n\n # A feed may be associated with a Work.\n work_id = Column(Integer, ForeignKey(\"works.id\"), nullable=True, index=True)\n\n # Distinct types of feeds that might be cached.\n GROUPS_TYPE = \"groups\"\n PAGE_TYPE = \"page\"\n NAVIGATION_TYPE = \"navigation\"\n CRAWLABLE_TYPE = \"crawlable\"\n RELATED_TYPE = \"related\"\n RECOMMENDATIONS_TYPE = \"recommendations\"\n SERIES_TYPE = \"series\"\n CONTRIBUTOR_TYPE = \"contributor\"\n\n # Special constants for cache durations.\n CACHE_FOREVER = object()\n IGNORE_CACHE = object()\n\n log = logging.getLogger(\"CachedFeed\")\n\n @classmethod\n def fetch(\n cls,\n _db,\n worklist,\n facets,\n pagination,\n refresher_method,\n max_age=None,\n raw=False,\n **response_kwargs\n ):\n \"\"\"Retrieve a cached feed from the database if possible.\n\n Generate it from scratch and store it in the database if\n necessary.\n\n Return it in the most useful form to the caller.\n\n :param _db: A database connection.\n :param worklist: The WorkList associated with this feed.\n :param facets: A Facets object that distinguishes this feed from\n others (for instance, by its sort order).\n :param pagination: A Pagination object that explains which\n page of a larger feed is being cached.\n :param refresher_method: A function to call if it turns out\n the contents of the feed need to be regenerated. This\n function must take no arguments and return an object that\n implements __unicode__. (A Unicode string or an OPDSFeed is fine.)\n :param max_age: If a cached feed is older than this, it will\n be considered stale and regenerated. This may be either a\n number of seconds or a timedelta. If no value is\n specified, a default value will be calculated based on\n WorkList and Facets configuration. Setting this value to\n zero will force a refresh.\n :param raw: If this is False (the default), a Response ready to be\n converted into a Flask Response object will be returned. If this\n is True, the CachedFeed object itself will be returned. In most\n non-test situations the default is better.\n\n :return: A Response or CachedFeed containing up-to-date content.\n \"\"\"\n\n # Gather the information necessary to uniquely identify this\n # page of this feed.\n keys = cls._prepare_keys(_db, worklist, facets, pagination)\n\n # Calculate the maximum cache age, converting from timedelta\n # to seconds if necessary.\n max_age = cls.max_cache_age(worklist, keys.feed_type, facets, max_age)\n\n # These arguments will probably be passed into get_one, and\n # will be passed into get_one_or_create in the event of a cache\n # miss.\n\n # TODO: this constraint_clause might not be necessary anymore.\n # ISTR it was an attempt to avoid race conditions, and we do a\n # better job of that now.\n constraint_clause = and_(cls.content != None, cls.timestamp != None)\n kwargs = dict(\n on_multiple=\"interchangeable\",\n constraint=constraint_clause,\n type=keys.feed_type,\n library=keys.library,\n work=keys.work,\n lane_id=keys.lane_id,\n unique_key=keys.unique_key,\n facets=keys.facets_key,\n pagination=keys.pagination_key,\n )\n feed_data = None\n if max_age is cls.IGNORE_CACHE or isinstance(max_age, int) and max_age <= 0:\n # Don't even bother checking for a CachedFeed: we're\n # just going to replace it.\n feed_obj = None\n else:\n feed_obj = get_one(_db, cls, **kwargs)\n\n should_refresh = cls._should_refresh(feed_obj, max_age)\n if should_refresh:\n # This is a cache miss. Either feed_obj is None or\n # it's no good. We need to generate a new feed.\n feed_data = str(refresher_method())\n generation_time = utc_now()\n\n if max_age is not cls.IGNORE_CACHE:\n # Having gone through all the trouble of generating\n # the feed, we want to cache it in the database.\n\n # Since it can take a while to generate a feed, and we know\n # that the feed in the database is stale, it's possible that\n # another thread _also_ noticed that feed was stale, and\n # generated a similar feed while we were working.\n #\n # To avoid a database error, fetch the feed _again_ from the\n # database rather than assuming we have the up-to-date\n # object.\n feed_obj, is_new = get_one_or_create(_db, cls, **kwargs)\n if feed_obj.timestamp is None or feed_obj.timestamp < generation_time:\n # Either there was no contention for this object, or there\n # was contention but our feed is more up-to-date than\n # the other thread(s). Our feed takes priority.\n feed_obj.content = feed_data\n feed_obj.timestamp = generation_time\n elif feed_obj:\n feed_data = feed_obj.content\n\n if raw and feed_obj:\n return feed_obj\n\n # We have the information necessary to create a useful\n # response-type object.\n #\n # Set some defaults in case the caller didn't pass them in.\n if isinstance(max_age, int):\n response_kwargs.setdefault(\"max_age\", max_age)\n\n if max_age == cls.IGNORE_CACHE:\n # If we were asked to ignore our internal cache, we should\n # also tell the client not to store this document in _its_\n # internal cache.\n response_kwargs[\"max_age\"] = 0\n\n if keys.library and keys.library.has_root_lanes:\n # If this feed is associated with a Library that guides\n # patrons to different lanes based on their patron type,\n # all CachedFeeds need to be treated as private (but\n # cacheable) on the client side. Otherwise, a change of\n # client credentials might cause a cached representation\n # to be reused when it should have been discarded.\n #\n # TODO: it might be possible to make this decision in a\n # more fine-grained way, which would allow intermediaries\n # to cache these feeds.\n response_kwargs[\"private\"] = True\n\n return OPDSFeedResponse(response=feed_data, **response_kwargs)\n\n @classmethod\n def feed_type(cls, worklist, facets):\n \"\"\"Determine the 'type' of the feed.\n\n This may be defined either by `worklist` or by `facets`, with\n `facets` taking priority.\n\n :return: A string that can go into cachedfeeds.type.\n \"\"\"\n type = CachedFeed.PAGE_TYPE\n if worklist:\n type = worklist.CACHED_FEED_TYPE or type\n if facets:\n type = facets.CACHED_FEED_TYPE or type\n return type\n\n @classmethod\n def max_cache_age(cls, worklist, type, facets, override=None):\n \"\"\"Determine the number of seconds that a cached feed\n of a given type can remain fresh.\n\n Order of precedence: `override`, `facets`, `worklist`.\n\n :param worklist: A WorkList which may have an opinion on this\n topic.\n :param type: The type of feed being generated.\n :param facets: A faceting object that may have an opinion on this\n topic.\n :param override: A specific value passed in by the caller. This\n may either be a number of seconds or a timedelta.\n\n :return: A number of seconds, or CACHE_FOREVER or IGNORE_CACHE\n \"\"\"\n value = override\n if value is None and facets is not None:\n value = facets.max_cache_age\n if value is None and worklist is not None:\n value = worklist.max_cache_age(type)\n\n if value in (cls.CACHE_FOREVER, cls.IGNORE_CACHE):\n # Special caching rules apply.\n return value\n\n if value is None:\n # Assume the feed should not be cached at all.\n value = 0\n\n if isinstance(value, datetime.timedelta):\n value = value.total_seconds()\n return value\n\n @classmethod\n def _should_refresh(cls, feed_obj, max_age):\n \"\"\"Should we try to get a new representation of this CachedFeed?\n\n :param feed_obj: A CachedFeed. This may be None, which is why\n this is a class method.\n\n :param max_age: Either a number of seconds, or one of the constants\n CACHE_FOREVER or IGNORE_CACHE.\n \"\"\"\n should_refresh = False\n if feed_obj is None:\n # If we didn't find a CachedFeed (maybe because we didn't\n # bother looking), we must always refresh.\n should_refresh = True\n elif max_age == cls.IGNORE_CACHE:\n # If we are ignoring the cache, we must always refresh.\n should_refresh = True\n elif max_age == cls.CACHE_FOREVER:\n # If we found *anything*, and the cache time is CACHE_FOREVER,\n # we will never refresh.\n should_refresh = False\n elif (\n feed_obj.timestamp\n and feed_obj.timestamp + datetime.timedelta(seconds=max_age) <= utc_now()\n ):\n # Here it comes down to a date comparison: how old is the\n # CachedFeed?\n should_refresh = True\n return should_refresh\n\n # This named tuple makes it easy to manage the return value of\n # _prepare_keys.\n CachedFeedKeys = namedtuple(\n \"CachedFeedKeys\",\n [\n \"feed_type\",\n \"library\",\n \"work\",\n \"lane_id\",\n \"unique_key\",\n \"facets_key\",\n \"pagination_key\",\n ],\n )\n\n @classmethod\n def _prepare_keys(cls, _db, worklist, facets, pagination):\n \"\"\"Prepare various unique keys that will go into the database\n and be used to distinguish CachedFeeds from one another.\n\n This is kept in a helper method for ease of testing.\n\n :param worklist: A WorkList.\n :param facets: A Facets object.\n :param pagination: A Pagination object.\n\n :return: A CachedFeedKeys object.\n \"\"\"\n if not worklist:\n raise ValueError(\"Cannot prepare a CachedFeed without a WorkList.\")\n\n feed_type = cls.feed_type(worklist, facets)\n\n # The Library is the one associated with `worklist`.\n library = worklist.get_library(_db)\n\n # A feed may be associated with a specific Work,\n # e.g. recommendations for readers of that Work.\n work = getattr(worklist, \"work\", None)\n\n # Either lane_id or unique_key must be set, but not both.\n from ..lane import Lane\n\n if isinstance(worklist, Lane):\n lane_id = worklist.id\n unique_key = None\n else:\n lane_id = None\n unique_key = worklist.unique_key\n\n facets_key = \"\"\n if facets is not None:\n if isinstance(facets.query_string, bytes):\n facets_key = facets.query_string.decode(\"utf-8\")\n else:\n facets_key = facets.query_string\n\n pagination_key = \"\"\n if pagination is not None:\n if isinstance(pagination.query_string, bytes):\n pagination_key = pagination.query_string.decode(\"utf-8\")\n else:\n pagination_key = pagination.query_string\n\n return cls.CachedFeedKeys(\n feed_type=feed_type,\n library=library,\n work=work,\n lane_id=lane_id,\n unique_key=unique_key,\n facets_key=facets_key,\n pagination_key=pagination_key,\n )\n\n def update(self, _db, content):\n self.content = content\n self.timestamp = utc_now()\n flush(_db)\n\n def __repr__(self):\n if self.content:\n length = len(self.content)\n else:\n length = \"No content\"\n return \"<CachedFeed #%s %s %s %s %s %s %s >\" % (\n self.id,\n self.lane_id,\n self.type,\n self.facets,\n self.pagination,\n self.timestamp,\n length,\n )\n\n\nIndex(\n \"ix_cachedfeeds_library_id_lane_id_type_facets_pagination\",\n CachedFeed.library_id,\n CachedFeed.lane_id,\n CachedFeed.type,\n CachedFeed.facets,\n CachedFeed.pagination,\n)\n\n\nclass WillNotGenerateExpensiveFeed(Exception):\n \"\"\"This exception is raised when a feed is not cached, but it's too\n expensive to generate.\n \"\"\"\n\n\nclass CachedMARCFile(Base):\n \"\"\"A record that a MARC file has been created and cached for a particular lane.\"\"\"\n\n __tablename__ = \"cachedmarcfiles\"\n id = Column(Integer, primary_key=True)\n\n # Every MARC file is associated with a library and a lane. If the\n # lane is null, the file is for the top-level WorkList.\n library_id = Column(Integer, ForeignKey(\"libraries.id\"), nullable=False, index=True)\n\n lane_id = Column(Integer, ForeignKey(\"lanes.id\"), nullable=True, index=True)\n\n # The representation for this file stores the URL where it was mirrored.\n representation_id = Column(\n Integer, ForeignKey(\"representations.id\"), nullable=False\n )\n\n start_time = Column(DateTime(timezone=True), nullable=True, index=True)\n end_time = Column(DateTime(timezone=True), nullable=True, index=True)\n", "id": "6443628", "language": "Python", "matching_score": 1.8277546167373657, "max_stars_count": 0, "path": "core/model/cachedfeed.py" }, { "content": "# encoding: utf-8\nimport json\n\nimport pytest\n\nfrom api.authenticator import BasicAuthenticationProvider\nfrom api.circulation import CirculationAPI\nfrom api.circulation_exceptions import *\nfrom api.odilo import (\n MockOdiloAPI,\n OdiloAPI,\n OdiloBibliographicCoverageProvider,\n OdiloCirculationMonitor,\n OdiloRepresentationExtractor,\n)\nfrom core.metadata_layer import TimestampData\nfrom core.model import (\n Classification,\n Contributor,\n DataSource,\n DeliveryMechanism,\n Edition,\n ExternalIntegration,\n Hyperlink,\n Identifier,\n Representation,\n)\nfrom core.testing import DatabaseTest, MockRequestsResponse\nfrom core.util.datetime_helpers import datetime_utc, utc_now\nfrom core.util.http import BadResponseException\n\nfrom . import sample_data\n\n\nclass OdiloAPITest(DatabaseTest):\n PIN = \"c4ca4238a0b923820dcc509a6f75849b\"\n RECORD_ID = \"00010982\"\n\n def setup_method(self):\n super(OdiloAPITest, self).setup_method()\n library = self._default_library\n self.patron = self._patron()\n self.patron.authorization_identifier = \"0001000265\"\n self.collection = MockOdiloAPI.mock_collection(self._db)\n self.circulation = CirculationAPI(\n self._db, library, api_map={ExternalIntegration.ODILO: MockOdiloAPI}\n )\n self.api = self.circulation.api_for_collection[self.collection.id]\n\n self.edition, self.licensepool = self._edition(\n data_source_name=DataSource.ODILO,\n identifier_type=Identifier.ODILO_ID,\n collection=self.collection,\n identifier_id=self.RECORD_ID,\n with_license_pool=True,\n )\n\n @classmethod\n def sample_data(cls, filename):\n return sample_data(filename, \"odilo\")\n\n @classmethod\n def sample_json(cls, filename):\n data = cls.sample_data(filename)\n return data, json.loads(data)\n\n def error_message(self, error_code, message=None, token=None):\n \"\"\"Create a JSON document that simulates the message served by\n Odilo given a certain error condition.\n \"\"\"\n message = message or self._str\n token = token or self._str\n data = dict(errorCode=error_code, message=message, token=token)\n return json.dumps(data)\n\n\nclass TestOdiloAPI(OdiloAPITest):\n def test_token_post_success(self):\n self.api.queue_response(200, content=\"some content\")\n response = self.api.token_post(self._url, \"the payload\")\n assert 200 == response.status_code, (\n \"Status code != 200 --> %i\" % response.status_code\n )\n assert self.api.access_token_response.content == response.content\n self.api.log.info(\"Test token post success ok!\")\n\n def test_get_success(self):\n self.api.queue_response(200, content=\"some content\")\n status_code, headers, content = self.api.get(self._url, {})\n assert 200 == status_code\n assert b\"some content\" == content\n self.api.log.info(\"Test get success ok!\")\n\n def test_401_on_get_refreshes_bearer_token(self):\n assert \"bearer token\" == self.api.token\n\n # We try to GET and receive a 401.\n self.api.queue_response(401)\n\n # We refresh the bearer token. (This happens in\n # MockOdiloAPI.token_post, so we don't mock the response\n # in the normal way.)\n self.api.access_token_response = self.api.mock_access_token_response(\n \"new bearer token\"\n )\n\n # Then we retry the GET and it succeeds this time.\n self.api.queue_response(200, content=\"at last, the content\")\n status_code, headers, content = self.api.get(self._url, {})\n\n assert 200 == status_code\n assert b\"at last, the content\" == content\n\n # The bearer token has been updated.\n assert \"new bearer token\" == self.api.token\n\n self.api.log.info(\"Test 401 on get refreshes bearer token ok!\")\n\n def test_credential_refresh_success(self):\n \"\"\"Verify the process of refreshing the Odilo bearer token.\"\"\"\n credential = self.api.credential_object(lambda x: x)\n assert \"bearer token\" == credential.credential\n assert self.api.token == credential.credential\n\n self.api.access_token_response = self.api.mock_access_token_response(\n \"new bearer token\"\n )\n self.api.refresh_creds(credential)\n assert \"new bearer token\" == credential.credential\n assert self.api.token == credential.credential\n\n # By default, the access token's 'expiresIn' value is -1,\n # indicating that the token will never expire.\n #\n # To reflect this fact, credential.expires is set to None.\n assert None == credential.expires\n\n # But a token may specify a specific expiration time,\n # which is used to set a future value for credential.expires.\n self.api.access_token_response = self.api.mock_access_token_response(\n \"new bearer token 2\", 1000\n )\n self.api.refresh_creds(credential)\n assert \"new bearer token 2\" == credential.credential\n assert self.api.token == credential.credential\n assert credential.expires > utc_now()\n\n def test_credential_refresh_failure(self):\n \"\"\"Verify that a useful error message results when the Odilo bearer\n token cannot be refreshed, since this is the most likely point\n of failure on a new setup.\n \"\"\"\n self.api.access_token_response = MockRequestsResponse(\n 200, {\"Content-Type\": \"text/html\"}, \"Hi, this is the website, not the API.\"\n )\n credential = self.api.credential_object(lambda x: x)\n with pytest.raises(BadResponseException) as excinfo:\n self.api.refresh_creds(credential)\n assert \"Bad response from \" in str(excinfo.value)\n assert (\n \"may not be the right base URL. Response document was: 'Hi, this is the website, not the API.'\"\n in str(excinfo.value)\n )\n\n # Also test a 400 response code.\n self.api.access_token_response = MockRequestsResponse(\n 400,\n {\"Content-Type\": \"application/json\"},\n json.dumps(dict(errors=[dict(description=\"Oops\")])),\n )\n with pytest.raises(BadResponseException) as excinfo:\n self.api.refresh_creds(credential)\n assert \"Bad response from\" in str(excinfo.value)\n assert \"Oops\" in str(excinfo.value)\n\n # If there's a 400 response but no error information,\n # the generic error message is used.\n self.api.access_token_response = MockRequestsResponse(\n 400, {\"Content-Type\": \"application/json\"}, json.dumps(dict())\n )\n with pytest.raises(BadResponseException) as excinfo:\n self.api.refresh_creds(credential)\n assert \"Bad response from\" in str(excinfo.value)\n assert \"may not be the right base URL.\" in str(excinfo.value)\n\n def test_401_after_token_refresh_raises_error(self):\n assert \"bearer token\" == self.api.token\n\n # We try to GET and receive a 401.\n self.api.queue_response(401)\n\n # We refresh the bearer token.\n self.api.access_token_response = self.api.mock_access_token_response(\n \"new bearer token\"\n )\n\n # Then we retry the GET but we get another 401.\n self.api.queue_response(401)\n\n # That raises a BadResponseException\n with pytest.raises(BadResponseException) as excinfo:\n self.api.get(self._url, {})\n assert \"Something's wrong with the Odilo OAuth Bearer Token!\" in str(\n excinfo.value\n )\n\n # The bearer token has been updated.\n assert \"new bearer token\" == self.api.token\n\n def test_external_integration(self):\n assert self.collection.external_integration == self.api.external_integration(\n self._db\n )\n\n def test__run_self_tests(self):\n \"\"\"Verify that OdiloAPI._run_self_tests() calls the right\n methods.\n \"\"\"\n\n class Mock(MockOdiloAPI):\n \"Mock every method used by OdiloAPI._run_self_tests.\"\n\n def __init__(self, _db, collection):\n \"\"\"Stop the default constructor from running.\"\"\"\n self._db = _db\n self.collection_id = collection.id\n\n # First we will call check_creds() to get a fresh credential.\n mock_credential = object()\n\n def check_creds(self, force_refresh=False):\n self.check_creds_called_with = force_refresh\n return self.mock_credential\n\n # Finally, for every library associated with this\n # collection, we'll call get_patron_checkouts() using\n # the credentials of that library's test patron.\n mock_patron_checkouts = object()\n get_patron_checkouts_called_with = []\n\n def get_patron_checkouts(self, patron, pin):\n self.get_patron_checkouts_called_with.append((patron, pin))\n return self.mock_patron_checkouts\n\n # Now let's make sure two Libraries have access to this\n # Collection -- one library with a default patron and one\n # without.\n no_default_patron = self._library(name=\"no patron\")\n self.collection.libraries.append(no_default_patron)\n\n with_default_patron = self._default_library\n integration = self._external_integration(\n \"api.simple_authentication\",\n ExternalIntegration.PATRON_AUTH_GOAL,\n libraries=[with_default_patron],\n )\n p = BasicAuthenticationProvider\n integration.setting(p.TEST_IDENTIFIER).value = \"username1\"\n integration.setting(p.TEST_PASSWORD).value = \"<PASSWORD>\"\n\n # Now that everything is set up, run the self-test.\n api = Mock(self._db, self.collection)\n results = sorted(api._run_self_tests(self._db), key=lambda x: x.name)\n loans_failure, sitewide, loans_success = results\n\n # Make sure all three tests were run and got the expected result.\n #\n\n # We got a sitewide access token.\n assert \"Obtaining a sitewide access token\" == sitewide.name\n assert True == sitewide.success\n assert api.mock_credential == sitewide.result\n assert True == api.check_creds_called_with\n\n # We got the default patron's checkouts for the library that had\n # a default patron configured.\n assert (\n \"Viewing the active loans for the test patron for library %s\"\n % with_default_patron.name\n == loans_success.name\n )\n assert True == loans_success.success\n # get_patron_checkouts was only called once.\n [(patron, pin)] = api.get_patron_checkouts_called_with\n assert \"username1\" == patron.authorization_identifier\n assert \"<PASSWORD>\" == pin\n assert api.mock_patron_checkouts == loans_success.result\n\n # We couldn't get a patron access token for the other library.\n assert (\n \"Acquiring test patron credentials for library %s\" % no_default_patron.name\n == loans_failure.name\n )\n assert False == loans_failure.success\n assert \"Library has no test patron configured.\" == str(loans_failure.exception)\n\n def test_run_self_tests_short_circuit(self):\n \"\"\"If OdiloAPI.check_creds can't get credentials, the rest of\n the self-tests aren't even run.\n\n This probably doesn't matter much, because if check_creds doesn't\n work we won't be able to instantiate the OdiloAPI class.\n \"\"\"\n\n def explode(*args, **kwargs):\n raise Exception(\"Failure!\")\n\n self.api.check_creds = explode\n\n # Only one test will be run.\n [check_creds] = self.api._run_self_tests(self._db)\n assert \"Failure!\" == str(check_creds.exception)\n\n\nclass TestOdiloCirculationAPI(OdiloAPITest):\n #################\n # General tests\n #################\n\n # Test 404 Not Found --> patron not found --> 'patronNotFound'\n def test_01_patron_not_found(self):\n patron_not_found_data, patron_not_found_json = self.sample_json(\n \"error_patron_not_found.json\"\n )\n self.api.queue_response(404, content=patron_not_found_json)\n\n patron = self._patron()\n patron.authorization_identifier = \"no such patron\"\n pytest.raises(\n PatronNotFoundOnRemote,\n self.api.checkout,\n patron,\n self.PIN,\n self.licensepool,\n \"ACSM_EPUB\",\n )\n self.api.log.info(\"Test patron not found ok!\")\n\n # Test 404 Not Found --> record not found --> 'ERROR_DATA_NOT_FOUND'\n def test_02_data_not_found(self):\n data_not_found_data, data_not_found_json = self.sample_json(\n \"error_data_not_found.json\"\n )\n self.api.queue_response(404, content=data_not_found_json)\n\n self.licensepool.identifier.identifier = \"12345678\"\n pytest.raises(\n NotFoundOnRemote,\n self.api.checkout,\n self.patron,\n self.PIN,\n self.licensepool,\n \"ACSM_EPUB\",\n )\n self.api.log.info(\"Test resource not found on remote ok!\")\n\n def test_make_absolute_url(self):\n\n # A relative URL is made absolute using the API's base URL.\n relative = \"/relative-url\"\n absolute = self.api._make_absolute_url(relative)\n assert absolute == self.api.library_api_base_url.decode(\"utf-8\") + relative\n\n # An absolute URL is not modified.\n for protocol in (\"http\", \"https\"):\n already_absolute = \"%s://example.com/\" % protocol\n assert already_absolute == self.api._make_absolute_url(already_absolute)\n\n #################\n # Checkout tests\n #################\n\n # Test 400 Bad Request --> Invalid format for that resource\n def test_11_checkout_fake_format(self):\n self.api.queue_response(400, content=\"\")\n pytest.raises(\n NoAcceptableFormat,\n self.api.checkout,\n self.patron,\n self.PIN,\n self.licensepool,\n \"FAKE_FORMAT\",\n )\n self.api.log.info(\"Test invalid format for resource ok!\")\n\n def test_12_checkout_acsm_epub(self):\n checkout_data, checkout_json = self.sample_json(\"checkout_acsm_epub_ok.json\")\n self.api.queue_response(200, content=checkout_json)\n self.perform_and_validate_checkout(\"ACSM_EPUB\")\n\n def test_13_checkout_acsm_pdf(self):\n checkout_data, checkout_json = self.sample_json(\"checkout_acsm_pdf_ok.json\")\n self.api.queue_response(200, content=checkout_json)\n self.perform_and_validate_checkout(\"ACSM_PDF\")\n\n def test_14_checkout_ebook_streaming(self):\n checkout_data, checkout_json = self.sample_json(\n \"checkout_ebook_streaming_ok.json\"\n )\n self.api.queue_response(200, content=checkout_json)\n self.perform_and_validate_checkout(\"EBOOK_STREAMING\")\n\n def test_mechanism_set_on_borrow(self):\n \"\"\"The delivery mechanism for an Odilo title is set on checkout.\"\"\"\n assert OdiloAPI.SET_DELIVERY_MECHANISM_AT == OdiloAPI.BORROW_STEP\n\n def perform_and_validate_checkout(self, internal_format):\n loan_info = self.api.checkout(\n self.patron, self.PIN, self.licensepool, internal_format\n )\n assert loan_info, \"LoanInfo null --> checkout failed!\"\n self.api.log.info(\"Loan ok: %s\" % loan_info.identifier)\n\n #################\n # Fulfill tests\n #################\n\n def test_21_fulfill_acsm_epub(self):\n checkout_data, checkout_json = self.sample_json(\"patron_checkouts.json\")\n self.api.queue_response(200, content=checkout_json)\n\n acsm_data = self.sample_data(\"fulfill_ok_acsm_epub.acsm\")\n self.api.queue_response(200, content=acsm_data)\n\n fulfillment_info = self.fulfill(\"ACSM_EPUB\")\n assert fulfillment_info.content_type[0] == Representation.EPUB_MEDIA_TYPE\n assert fulfillment_info.content_type[1] == DeliveryMechanism.ADOBE_DRM\n\n def test_22_fulfill_acsm_pdf(self):\n checkout_data, checkout_json = self.sample_json(\"patron_checkouts.json\")\n self.api.queue_response(200, content=checkout_json)\n\n acsm_data = self.sample_data(\"fulfill_ok_acsm_pdf.acsm\")\n self.api.queue_response(200, content=acsm_data)\n\n fulfillment_info = self.fulfill(\"ACSM_PDF\")\n assert fulfillment_info.content_type[0] == Representation.PDF_MEDIA_TYPE\n assert fulfillment_info.content_type[1] == DeliveryMechanism.ADOBE_DRM\n\n def test_23_fulfill_ebook_streaming(self):\n checkout_data, checkout_json = self.sample_json(\"patron_checkouts.json\")\n self.api.queue_response(200, content=checkout_json)\n\n self.licensepool.identifier.identifier = \"00011055\"\n fulfillment_info = self.fulfill(\"EBOOK_STREAMING\")\n assert fulfillment_info.content_type[0] == Representation.TEXT_HTML_MEDIA_TYPE\n assert (\n fulfillment_info.content_type[1]\n == DeliveryMechanism.STREAMING_TEXT_CONTENT_TYPE\n )\n\n def fulfill(self, internal_format):\n fulfillment_info = self.api.fulfill(\n self.patron, self.PIN, self.licensepool, internal_format\n )\n assert fulfillment_info, \"Cannot Fulfill !!\"\n\n if fulfillment_info.content_link:\n self.api.log.info(\"Fulfill link: %s\" % fulfillment_info.content_link)\n if fulfillment_info.content:\n self.api.log.info(\"Fulfill content: %s\" % fulfillment_info.content)\n\n return fulfillment_info\n\n #################\n # Hold tests\n #################\n\n def test_31_already_on_hold(self):\n already_on_hold_data, already_on_hold_json = self.sample_json(\n \"error_hold_already_in_hold.json\"\n )\n self.api.queue_response(403, content=already_on_hold_json)\n\n pytest.raises(\n AlreadyOnHold,\n self.api.place_hold,\n self.patron,\n self.PIN,\n self.licensepool,\n \"<EMAIL>\",\n )\n\n self.api.log.info(\"Test hold already on hold ok!\")\n\n def test_32_place_hold(self):\n hold_ok_data, hold_ok_json = self.sample_json(\"place_hold_ok.json\")\n self.api.queue_response(200, content=hold_ok_json)\n\n hold_info = self.api.place_hold(\n self.patron, self.PIN, self.licensepool, \"<EMAIL>\"\n )\n assert hold_info, \"HoldInfo null --> place hold failed!\"\n self.api.log.info(\"Hold ok: %s\" % hold_info.identifier)\n\n #################\n # Patron Activity tests\n #################\n\n def test_41_patron_activity_invalid_patron(self):\n patron_not_found_data, patron_not_found_json = self.sample_json(\n \"error_patron_not_found.json\"\n )\n self.api.queue_response(404, content=patron_not_found_json)\n\n pytest.raises(\n PatronNotFoundOnRemote, self.api.patron_activity, self.patron, self.PIN\n )\n\n self.api.log.info(\"Test patron activity --> invalid patron ok!\")\n\n def test_42_patron_activity(self):\n patron_checkouts_data, patron_checkouts_json = self.sample_json(\n \"patron_checkouts.json\"\n )\n patron_holds_data, patron_holds_json = self.sample_json(\"patron_holds.json\")\n self.api.queue_response(200, content=patron_checkouts_json)\n self.api.queue_response(200, content=patron_holds_json)\n\n loans_and_holds = self.api.patron_activity(self.patron, self.PIN)\n assert loans_and_holds\n assert 12 == len(loans_and_holds)\n self.api.log.info(\"Test patron activity ok !!\")\n\n #################\n # Checkin tests\n #################\n\n def test_51_checkin_patron_not_found(self):\n patron_not_found_data, patron_not_found_json = self.sample_json(\n \"error_patron_not_found.json\"\n )\n self.api.queue_response(404, content=patron_not_found_json)\n\n pytest.raises(\n PatronNotFoundOnRemote,\n self.api.checkin,\n self.patron,\n self.PIN,\n self.licensepool,\n )\n\n self.api.log.info(\"Test checkin --> invalid patron ok!\")\n\n def test_52_checkin_checkout_not_found(self):\n checkout_not_found_data, checkout_not_found_json = self.sample_json(\n \"error_checkout_not_found.json\"\n )\n self.api.queue_response(404, content=checkout_not_found_json)\n\n pytest.raises(\n NotCheckedOut, self.api.checkin, self.patron, self.PIN, self.licensepool\n )\n\n self.api.log.info(\"Test checkin --> invalid checkout ok!\")\n\n def test_53_checkin(self):\n checkout_data, checkout_json = self.sample_json(\"patron_checkouts.json\")\n self.api.queue_response(200, content=checkout_json)\n\n checkin_data, checkin_json = self.sample_json(\"checkin_ok.json\")\n self.api.queue_response(200, content=checkin_json)\n\n response = self.api.checkin(self.patron, self.PIN, self.licensepool)\n assert response.status_code == 200, (\n \"Response code != 200, cannot perform checkin for record: \"\n + self.licensepool.identifier.identifier\n + \" patron: \"\n + self.patron.authorization_identifier\n )\n\n checkout_returned = response.json()\n\n assert checkout_returned\n assert \"4318\" == checkout_returned[\"id\"]\n self.api.log.info(\"Checkout returned: %s\" % checkout_returned[\"id\"])\n\n #################\n # Patron Activity tests\n #################\n\n def test_61_return_hold_patron_not_found(self):\n patron_not_found_data, patron_not_found_json = self.sample_json(\n \"error_patron_not_found.json\"\n )\n self.api.queue_response(404, content=patron_not_found_json)\n\n pytest.raises(\n PatronNotFoundOnRemote,\n self.api.release_hold,\n self.patron,\n self.PIN,\n self.licensepool,\n )\n\n self.api.log.info(\"Test release hold --> invalid patron ok!\")\n\n def test_62_return_hold_not_found(self):\n holds_data, holds_json = self.sample_json(\"patron_holds.json\")\n self.api.queue_response(200, content=holds_json)\n\n checkin_data, checkin_json = self.sample_json(\"error_hold_not_found.json\")\n self.api.queue_response(404, content=checkin_json)\n\n response = self.api.release_hold(self.patron, self.PIN, self.licensepool)\n assert response == True, (\n \"Cannot release hold, response false \"\n + self.licensepool.identifier.identifier\n + \" patron: \"\n + self.patron.authorization_identifier\n )\n\n self.api.log.info(\"Hold returned: %s\" % self.licensepool.identifier.identifier)\n\n def test_63_return_hold(self):\n holds_data, holds_json = self.sample_json(\"patron_holds.json\")\n self.api.queue_response(200, content=holds_json)\n\n release_hold_ok_data, release_hold_ok_json = self.sample_json(\n \"release_hold_ok.json\"\n )\n self.api.queue_response(200, content=release_hold_ok_json)\n\n response = self.api.release_hold(self.patron, self.PIN, self.licensepool)\n assert response == True, (\n \"Cannot release hold, response false \"\n + self.licensepool.identifier.identifier\n + \" patron: \"\n + self.patron.authorization_identifier\n )\n\n self.api.log.info(\"Hold returned: %s\" % self.licensepool.identifier.identifier)\n\n\nclass TestOdiloDiscoveryAPI(OdiloAPITest):\n def test_run(self):\n \"\"\"Verify that running the OdiloCirculationMonitor calls all_ids().\"\"\"\n\n class Mock(OdiloCirculationMonitor):\n def all_ids(self, modification_date=None):\n self.called_with = modification_date\n return 30, 15\n\n # The first time run() is called, all_ids is called with\n # a modification_date of None.\n monitor = Mock(self._db, self.collection, api_class=MockOdiloAPI)\n monitor.run()\n assert None == monitor.called_with\n progress = monitor.timestamp()\n completed = progress.finish\n\n # The return value of all_ids() is used to populate the\n # achievements field.\n assert \"Updated records: 30. New records: 15.\" == progress.achievements\n\n # The second time run() is called, all_ids() is called with a\n # modification date five minutes earlier than the completion\n # of the last run.\n monitor.run()\n expect = completed - monitor.OVERLAP\n assert (expect - monitor.called_with).total_seconds() < 2\n\n def test_all_ids_with_date(self):\n # TODO: This tests that all_ids doesn't crash when you pass in\n # a date. It doesn't test anything about all_ids except the\n # return value.\n monitor = OdiloCirculationMonitor(\n self._db, self.collection, api_class=MockOdiloAPI\n )\n assert monitor, \"Monitor null !!\"\n assert ExternalIntegration.ODILO == monitor.protocol, \"Wat??\"\n\n records_metadata_data, records_metadata_json = self.sample_json(\n \"records_metadata.json\"\n )\n monitor.api.queue_response(200, content=records_metadata_data)\n\n availability_data = self.sample_data(\"record_availability.json\")\n for record in records_metadata_json:\n monitor.api.queue_response(200, content=availability_data)\n\n monitor.api.queue_response(200, content=\"[]\") # No more resources retrieved\n\n timestamp = TimestampData(start=datetime_utc(2017, 9, 1))\n updated, new = monitor.all_ids(None)\n assert 10 == updated\n assert 10 == new\n\n self.api.log.info(\"Odilo circulation monitor with date finished ok!!\")\n\n def test_all_ids_without_date(self):\n # TODO: This tests that all_ids doesn't crash when you pass in\n # an empty date. It doesn't test anything about all_ids except the\n # return value.\n\n monitor = OdiloCirculationMonitor(\n self._db, self.collection, api_class=MockOdiloAPI\n )\n assert monitor, \"Monitor null !!\"\n assert ExternalIntegration.ODILO == monitor.protocol, \"Wat??\"\n\n records_metadata_data, records_metadata_json = self.sample_json(\n \"records_metadata.json\"\n )\n monitor.api.queue_response(200, content=records_metadata_data)\n\n availability_data = self.sample_data(\"record_availability.json\")\n for record in records_metadata_json:\n monitor.api.queue_response(200, content=availability_data)\n\n monitor.api.queue_response(200, content=\"[]\") # No more resources retrieved\n\n updated, new = monitor.all_ids(datetime_utc(2017, 9, 1))\n assert 10 == updated\n assert 10 == new\n\n self.api.log.info(\"Odilo circulation monitor without date finished ok!!\")\n\n\nclass TestOdiloBibliographicCoverageProvider(OdiloAPITest):\n def setup_method(self):\n super(TestOdiloBibliographicCoverageProvider, self).setup_method()\n self.provider = OdiloBibliographicCoverageProvider(\n self.collection, api_class=MockOdiloAPI\n )\n self.api = self.provider.api\n\n def test_process_item(self):\n record_metadata, record_metadata_json = self.sample_json(\"odilo_metadata.json\")\n self.api.queue_response(200, content=record_metadata_json)\n availability, availability_json = self.sample_json(\"odilo_availability.json\")\n self.api.queue_response(200, content=availability)\n\n identifier, made_new = self.provider.process_item(\"00010982\")\n\n # Check that the Identifier returned has the right .type and .identifier.\n assert identifier, \"Problem while testing process item !!!\"\n assert identifier.type == Identifier.ODILO_ID\n assert identifier.identifier == \"00010982\"\n\n # Check that metadata and availability information were imported properly\n [pool] = identifier.licensed_through\n assert \"Busy Brownies\" == pool.work.title\n\n assert 2 == pool.licenses_owned\n assert 1 == pool.licenses_available\n assert 2 == pool.patrons_in_hold_queue\n assert 1 == pool.licenses_reserved\n\n names = [x.delivery_mechanism.name for x in pool.delivery_mechanisms]\n assert (\n sorted(\n [\n Representation.EPUB_MEDIA_TYPE\n + \" (\"\n + DeliveryMechanism.ADOBE_DRM\n + \")\",\n Representation.TEXT_HTML_MEDIA_TYPE\n + \" (\"\n + DeliveryMechanism.STREAMING_TEXT_CONTENT_TYPE\n + \")\",\n ]\n )\n == sorted(names)\n )\n\n # Check that handle_success was called --> A Work was created and made presentation ready.\n assert True == pool.work.presentation_ready\n\n self.api.log.info(\"Testing process item finished ok !!\")\n\n def test_process_inactive_item(self):\n record_metadata, record_metadata_json = self.sample_json(\n \"odilo_metadata_inactive.json\"\n )\n self.api.queue_response(200, content=record_metadata_json)\n availability, availability_json = self.sample_json(\n \"odilo_availability_inactive.json\"\n )\n self.api.queue_response(200, content=availability)\n\n identifier, made_new = self.provider.process_item(\"00011135\")\n\n # Check that the Identifier returned has the right .type and .identifier.\n assert identifier, \"Problem while testing process inactive item !!!\"\n assert identifier.type == Identifier.ODILO_ID\n assert identifier.identifier == \"00011135\"\n\n [pool] = identifier.licensed_through\n assert (\n \"!Tention A Story of Boy-Life during the Peninsular War\" == pool.work.title\n )\n\n # Check work not available\n assert 0 == pool.licenses_owned\n assert 0 == pool.licenses_available\n\n assert True == pool.work.presentation_ready\n\n self.api.log.info(\"Testing process item inactive finished ok !!\")\n\n\nclass TestOdiloRepresentationExtractor(OdiloAPITest):\n def test_book_info_with_metadata(self):\n # Tests that can convert an odilo json block into a Metadata object.\n\n raw, book_json = self.sample_json(\"odilo_metadata.json\")\n raw, availability = self.sample_json(\"odilo_availability.json\")\n metadata, active = OdiloRepresentationExtractor.record_info_to_metadata(\n book_json, availability\n )\n\n assert \"Busy Brownies\" == metadata.title\n assert (\n \" (The Classic Fantasy Literature of Elves for Children)\"\n == metadata.subtitle\n )\n assert \"eng\" == metadata.language\n assert Edition.BOOK_MEDIUM == metadata.medium\n assert (\n \"The Classic Fantasy Literature for Children written in 1896 retold for Elves adventure.\"\n == metadata.series\n )\n assert \"1\" == metadata.series_position\n assert \"ANBOCO\" == metadata.publisher\n assert 2013 == metadata.published.year\n assert 2 == metadata.published.month\n assert 2 == metadata.published.day\n assert 2017 == metadata.data_source_last_updated.year\n assert 3 == metadata.data_source_last_updated.month\n assert 10 == metadata.data_source_last_updated.day\n # Related IDs.\n assert (Identifier.ODILO_ID, \"00010982\") == (\n metadata.primary_identifier.type,\n metadata.primary_identifier.identifier,\n )\n ids = [(x.type, x.identifier) for x in metadata.identifiers]\n assert [\n (Identifier.ISBN, \"9783736418837\"),\n (Identifier.ODILO_ID, \"00010982\"),\n ] == sorted(ids)\n\n subjects = sorted(metadata.subjects, key=lambda x: x.identifier)\n weight = Classification.TRUSTED_DISTRIBUTOR_WEIGHT\n assert [\n (\"Children\", \"tag\", weight),\n (\"Classics\", \"tag\", weight),\n (\"FIC004000\", \"BISAC\", weight),\n (\"Fantasy\", \"tag\", weight),\n (\"K-12\", \"Grade level\", weight),\n (\"LIT009000\", \"BISAC\", weight),\n (\"YAF019020\", \"BISAC\", weight),\n ] == [(x.identifier, x.type, x.weight) for x in subjects]\n\n [author] = metadata.contributors\n assert \"<NAME>.\" == author.sort_name\n assert \"<NAME>\" == author.display_name\n assert [Contributor.AUTHOR_ROLE] == author.roles\n\n # Available formats.\n [acsm_epub, ebook_streaming] = sorted(\n metadata.circulation.formats, key=lambda x: x.content_type\n )\n assert Representation.EPUB_MEDIA_TYPE == acsm_epub.content_type\n assert DeliveryMechanism.ADOBE_DRM == acsm_epub.drm_scheme\n\n assert Representation.TEXT_HTML_MEDIA_TYPE == ebook_streaming.content_type\n assert (\n DeliveryMechanism.STREAMING_TEXT_CONTENT_TYPE == ebook_streaming.drm_scheme\n )\n\n # Links to various resources.\n image, thumbnail, description = sorted(metadata.links, key=lambda x: x.rel)\n\n assert Hyperlink.IMAGE == image.rel\n assert (\n \"http://pruebasotk.odilotk.es/public/OdiloPlace_eduDistUS/pg54159.jpg\"\n == image.href\n )\n\n assert Hyperlink.THUMBNAIL_IMAGE == thumbnail.rel\n assert (\n \"http://pruebasotk.odilotk.es/public/OdiloPlace_eduDistUS/pg54159_225x318.jpg\"\n == thumbnail.href\n )\n\n assert Hyperlink.DESCRIPTION == description.rel\n assert description.content.startswith(\n \"All the <b>Brownies</b> had promised to help, and when a Brownie undertakes a thing he works as busily\"\n )\n\n circulation = metadata.circulation\n assert 2 == circulation.licenses_owned\n assert 1 == circulation.licenses_available\n assert 2 == circulation.patrons_in_hold_queue\n assert 1 == circulation.licenses_reserved\n\n self.api.log.info(\"Testing book info with metadata finished ok !!\")\n\n def test_book_info_missing_metadata(self):\n # Verify that we properly handle missing metadata from Odilo.\n raw, book_json = self.sample_json(\"odilo_metadata.json\")\n\n # This was seen in real data.\n book_json[\"series\"] = \" \"\n book_json[\"seriesPosition\"] = \" \"\n\n metadata, active = OdiloRepresentationExtractor.record_info_to_metadata(\n book_json, {}\n )\n assert None == metadata.series\n assert None == metadata.series_position\n\n def test_default_language_spanish(self):\n \"\"\"Since Odilo primarily distributes Spanish-language titles, if a\n title comes in with no specified language, we assume it's\n Spanish.\n \"\"\"\n raw, book_json = self.sample_json(\"odilo_metadata.json\")\n raw, availability = self.sample_json(\"odilo_availability.json\")\n del book_json[\"language\"]\n metadata, active = OdiloRepresentationExtractor.record_info_to_metadata(\n book_json, availability\n )\n assert \"spa\" == metadata.language\n", "id": "4245186", "language": "Python", "matching_score": 8.28515911102295, "max_stars_count": 0, "path": "tests/api/test_odilo.py" }, { "content": "# encoding: utf-8\nimport json\nimport random\nfrom datetime import timedelta\n\nimport pytest\n\nfrom api.authenticator import BasicAuthenticationProvider\nfrom api.circulation import CirculationAPI, FulfillmentInfo, HoldInfo, LoanInfo\nfrom api.circulation_exceptions import *\nfrom api.config import Configuration, temp_config\nfrom api.overdrive import (\n MockOverdriveAPI,\n NewTitlesOverdriveCollectionMonitor,\n OverdriveAPI,\n OverdriveCirculationMonitor,\n OverdriveCollectionReaper,\n OverdriveFormatSweep,\n OverdriveManifestFulfillmentInfo,\n RecentOverdriveCollectionMonitor,\n)\nfrom core.metadata_layer import TimestampData\nfrom core.model import (\n CirculationEvent,\n ConfigurationSetting,\n DataSource,\n DeliveryMechanism,\n Edition,\n ExternalIntegration,\n Identifier,\n LicensePool,\n MediaTypes,\n Representation,\n RightsStatus,\n)\nfrom core.testing import DatabaseTest, DummyHTTPClient, MockRequestsResponse\nfrom core.util.datetime_helpers import datetime_utc, utc_now\n\nfrom . import sample_data\n\n\nclass OverdriveAPITest(DatabaseTest):\n def setup_method(self):\n super(OverdriveAPITest, self).setup_method()\n library = self._default_library\n self.collection = MockOverdriveAPI.mock_collection(self._db)\n self.circulation = CirculationAPI(\n self._db, library, api_map={ExternalIntegration.OVERDRIVE: MockOverdriveAPI}\n )\n self.api = self.circulation.api_for_collection[self.collection.id]\n\n @classmethod\n def sample_data(self, filename):\n return sample_data(filename, \"overdrive\")\n\n @classmethod\n def sample_json(self, filename):\n data = self.sample_data(filename)\n return data, json.loads(data)\n\n def error_message(self, error_code, message=None, token=None):\n \"\"\"Create a JSON document that simulates the message served by\n Overdrive given a certain error condition.\n \"\"\"\n message = message or self._str\n token = token or self._str\n data = dict(errorCode=error_code, message=message, token=token)\n return json.dumps(data)\n\n\nclass TestOverdriveAPI(OverdriveAPITest):\n def test_external_integration(self):\n assert self.collection.external_integration == self.api.external_integration(\n self._db\n )\n\n def test_lock_in_format(self):\n # Verify which formats do or don't need to be locked in before\n # fulfillment.\n needs_lock_in = self.api.LOCK_IN_FORMATS\n\n # Streaming and manifest-based formats are exempt; all\n # other formats need lock-in.\n exempt = list(self.api.STREAMING_FORMATS) + list(\n self.api.MANIFEST_INTERNAL_FORMATS\n )\n for i in self.api.FORMATS:\n if i not in exempt:\n assert i in needs_lock_in\n for i in exempt:\n assert i not in needs_lock_in\n\n def test__run_self_tests(self):\n # Verify that OverdriveAPI._run_self_tests() calls the right\n # methods.\n\n class Mock(MockOverdriveAPI):\n \"Mock every method used by OverdriveAPI._run_self_tests.\"\n\n # First we will call check_creds() to get a fresh credential.\n mock_credential = object()\n\n def check_creds(self, force_refresh=False):\n self.check_creds_called_with = force_refresh\n return self.mock_credential\n\n # Then we will call get_advantage_accounts().\n mock_advantage_accounts = [object(), object()]\n\n def get_advantage_accounts(self):\n return self.mock_advantage_accounts\n\n # Then we will call get() on the _all_products_link.\n def get(self, url, extra_headers, exception_on_401=False):\n self.get_called_with = (url, extra_headers, exception_on_401)\n return 200, {}, json.dumps(dict(totalItems=2010))\n\n # Finally, for every library associated with this\n # collection, we'll call get_patron_credential() using\n # the credentials of that library's test patron.\n mock_patron_credential = object()\n get_patron_credential_called_with = []\n\n def get_patron_credential(self, patron, pin):\n self.get_patron_credential_called_with.append((patron, pin))\n return self.mock_patron_credential\n\n # Now let's make sure two Libraries have access to this\n # Collection -- one library with a default patron and one\n # without.\n no_default_patron = self._library()\n self.collection.libraries.append(no_default_patron)\n\n with_default_patron = self._default_library\n integration = self._external_integration(\n \"api.simple_authentication\",\n ExternalIntegration.PATRON_AUTH_GOAL,\n libraries=[with_default_patron],\n )\n p = BasicAuthenticationProvider\n integration.setting(p.TEST_IDENTIFIER).value = \"username1\"\n integration.setting(p.TEST_PASSWORD).value = \"<PASSWORD>\"\n\n # Now that everything is set up, run the self-test.\n api = Mock(self._db, self.collection)\n results = sorted(api._run_self_tests(self._db), key=lambda x: x.name)\n [\n no_patron_credential,\n default_patron_credential,\n global_privileges,\n collection_size,\n advantage,\n ] = results\n\n # Verify that each test method was called and returned the\n # expected SelfTestResult object.\n assert (\n \"Checking global Client Authentication privileges\" == global_privileges.name\n )\n assert True == global_privileges.success\n assert api.mock_credential == global_privileges.result\n\n assert \"Looking up Overdrive Advantage accounts\" == advantage.name\n assert True == advantage.success\n assert \"Found 2 Overdrive Advantage account(s).\" == advantage.result\n\n assert \"Counting size of collection\" == collection_size.name\n assert True == collection_size.success\n assert \"2010 item(s) in collection\" == collection_size.result\n url, headers, error_on_401 = api.get_called_with\n assert api._all_products_link == url\n\n assert (\n \"Acquiring test patron credentials for library %s\" % no_default_patron.name\n == no_patron_credential.name\n )\n assert False == no_patron_credential.success\n assert \"Library has no test patron configured.\" == str(\n no_patron_credential.exception\n )\n\n assert (\n \"Checking Patron Authentication privileges, using test patron for library %s\"\n % with_default_patron.name\n == default_patron_credential.name\n )\n assert True == default_patron_credential.success\n assert api.mock_patron_credential == default_patron_credential.result\n\n # Although there are two libraries associated with this\n # collection, get_patron_credential was only called once, because\n # one of the libraries doesn't have a default patron.\n [(patron1, password1)] = api.get_patron_credential_called_with\n assert \"username1\" == patron1.authorization_identifier\n assert \"password1\" == password1\n\n def test_run_self_tests_short_circuit(self):\n \"\"\"If OverdriveAPI.check_creds can't get credentials, the rest of\n the self-tests aren't even run.\n\n This probably doesn't matter much, because if check_creds doesn't\n work we won't be able to instantiate the OverdriveAPI class.\n \"\"\"\n\n def explode(*args, **kwargs):\n raise Exception(\"Failure!\")\n\n self.api.check_creds = explode\n\n # Only one test will be run.\n [check_creds] = self.api._run_self_tests(self._db)\n assert \"Failure!\" == str(check_creds.exception)\n\n def test_default_notification_email_address(self):\n \"\"\"Test the ability of the Overdrive API to detect an email address\n previously given by the patron to Overdrive for the purpose of\n notifications.\n \"\"\"\n ignore, patron_with_email = self.sample_json(\"patron_info.json\")\n self.api.queue_response(200, content=patron_with_email)\n patron = self._patron()\n\n # The site default for notification emails will never be used.\n configuration_setting = ConfigurationSetting.for_library(\n Configuration.DEFAULT_NOTIFICATION_EMAIL_ADDRESS, self._default_library\n )\n configuration_setting.value = \"<EMAIL>\"\n\n # If the patron has used a particular email address to put\n # books on hold, use that email address, not the site default.\n assert \"<EMAIL>\" == self.api.default_notification_email_address(\n patron, \"pin\"\n )\n\n # If the patron's email address according to Overdrive _is_\n # the site default, it is ignored. This can only happen if\n # this patron placed a hold using an older version of the\n # circulation manager.\n patron_with_email[\"lastHoldEmail\"] = configuration_setting.value\n self.api.queue_response(200, content=patron_with_email)\n assert None == self.api.default_notification_email_address(patron, \"pin\")\n\n # If the patron has never before put an Overdrive book on\n # hold, their JSON object has no `lastHoldEmail` key. In this\n # case we return None -- again, ignoring the site default.\n patron_with_no_email = dict(patron_with_email)\n del patron_with_no_email[\"lastHoldEmail\"]\n self.api.queue_response(200, content=patron_with_no_email)\n assert None == self.api.default_notification_email_address(patron, \"pin\")\n\n # If there's an error getting the information from Overdrive,\n # we return None.\n self.api.queue_response(404)\n assert None == self.api.default_notification_email_address(patron, \"pin\")\n\n def test_scope_string(self):\n # scope_string() puts the website ID of the Overdrive\n # integration and the ILS name associated with the library\n # into the form expected by Overdrive.\n expect = \"websiteid:%s authorizationname:%s\" % (\n self.api.website_id.decode(\"utf-8\"),\n self.api.ils_name(self._default_library),\n )\n assert expect == self.api.scope_string(self._default_library)\n\n def test_checkout(self):\n # Verify the process of checking out a book.\n patron = object()\n pin = object()\n pool = self._licensepool(edition=None, collection=self.collection)\n identifier = pool.identifier\n\n class Mock(MockOverdriveAPI):\n MOCK_EXPIRATION_DATE = object()\n PROCESS_CHECKOUT_ERROR_RESULT = Exception(\n \"exception in _process_checkout_error\"\n )\n\n def __init__(self, *args, **kwargs):\n super(Mock, self).__init__(*args, **kwargs)\n self.extract_expiration_date_called_with = []\n self._process_checkout_error_called_with = []\n\n def extract_expiration_date(self, loan):\n self.extract_expiration_date_called_with.append(loan)\n return self.MOCK_EXPIRATION_DATE\n\n def _process_checkout_error(self, patron, pin, licensepool, data):\n self._process_checkout_error_called_with.append(\n (patron, pin, licensepool, data)\n )\n result = self.PROCESS_CHECKOUT_ERROR_RESULT\n if isinstance(result, Exception):\n raise result\n return result\n\n # First, test the successful path.\n api = Mock(self._db, self.collection)\n api_response = json.dumps(\"some data\")\n api.queue_response(201, content=api_response)\n loan = api.checkout(patron, pin, pool, \"internal format is ignored\")\n\n # Verify that a good-looking patron request went out.\n endpoint, ignore, kwargs = api.requests.pop()\n assert endpoint.endswith(\"/me/checkouts\")\n assert patron == kwargs.pop(\"_patron\")\n extra_headers = kwargs.pop(\"extra_headers\")\n assert {\"Content-Type\": \"application/json\"} == extra_headers\n data = json.loads(kwargs.pop(\"data\"))\n assert {\n \"fields\": [{\"name\": \"reserveId\", \"value\": pool.identifier.identifier}]\n } == data\n\n # The API response was passed into extract_expiration_date.\n #\n # The most important thing here is not the content of the response but the\n # fact that the response code was not 400.\n assert \"some data\" == api.extract_expiration_date_called_with.pop()\n\n # The return value is a LoanInfo object with all relevant info.\n assert isinstance(loan, LoanInfo)\n assert pool.collection.id == loan.collection_id\n assert pool.data_source.name == loan.data_source_name\n assert identifier.type == loan.identifier_type\n assert identifier.identifier == loan.identifier\n assert None == loan.start_date\n assert api.MOCK_EXPIRATION_DATE == loan.end_date\n\n # _process_checkout_error was not called\n assert [] == api._process_checkout_error_called_with\n\n # Now let's test error conditions.\n\n # Most of the time, an error simply results in an exception.\n api.queue_response(400, content=api_response)\n with pytest.raises(Exception) as excinfo:\n api.checkout(patron, pin, pool, \"internal format is ignored\")\n assert \"exception in _process_checkout_error\" in str(excinfo.value)\n assert (\n patron,\n pin,\n pool,\n \"some data\",\n ) == api._process_checkout_error_called_with.pop()\n\n # However, if _process_checkout_error is able to recover from\n # the error and ends up returning something, the return value\n # is propagated from checkout().\n api.PROCESS_CHECKOUT_ERROR_RESULT = \"Actually, I was able to recover\"\n api.queue_response(400, content=api_response)\n assert \"Actually, I was able to recover\" == api.checkout(\n patron, pin, pool, \"internal format is ignored\"\n )\n assert (\n patron,\n pin,\n pool,\n \"some data\",\n ) == api._process_checkout_error_called_with.pop()\n\n def test__process_checkout_error(self):\n # Verify that _process_checkout_error handles common API-side errors,\n # making follow-up API calls if necessary.\n\n class Mock(MockOverdriveAPI):\n MOCK_LOAN = object()\n MOCK_EXPIRATION_DATE = object()\n\n def __init__(self, *args, **kwargs):\n super(Mock, self).__init__(*args, **kwargs)\n self.update_licensepool_called_with = []\n self.get_loan_called_with = []\n self.extract_expiration_date_called_with = []\n\n def update_licensepool(self, identifier):\n self.update_licensepool_called_with.append(identifier)\n\n def get_loan(self, patron, pin, identifier):\n self.get_loan_called_with.append((patron, pin, identifier))\n return self.MOCK_LOAN\n\n def extract_expiration_date(self, loan):\n self.extract_expiration_date_called_with.append(loan)\n return self.MOCK_EXPIRATION_DATE\n\n patron = object()\n pin = object()\n pool = self._licensepool(edition=None, collection=self.collection)\n identifier = pool.identifier\n api = Mock(self._db, self.collection)\n m = api._process_checkout_error\n\n # Most of the error handling is pretty straightforward.\n def with_error_code(code):\n # Simulate the response of the Overdrive API with a given error code.\n error = dict(errorCode=code)\n\n # Handle the error.\n return m(patron, pin, pool, error)\n\n # Errors not specifically known become generic CannotLoan exceptions.\n with pytest.raises(CannotLoan) as excinfo:\n with_error_code(\"WeirdError\")\n assert \"WeirdError\" in str(excinfo.value)\n\n # If the data passed in to _process_checkout_error is not what\n # the real Overdrive API would send, the error is even more\n # generic.\n with pytest.raises(CannotLoan) as excinfo:\n m(patron, pin, pool, \"Not a dict\")\n assert \"Unknown Error\" in str(excinfo.value)\n with pytest.raises(CannotLoan) as excinfo:\n m(patron, pin, pool, dict(errorCodePresent=False))\n assert \"Unknown Error\" in str(excinfo.value)\n\n # Some known errors become specific subclasses of CannotLoan.\n pytest.raises(\n PatronLoanLimitReached, with_error_code, \"PatronHasExceededCheckoutLimit\"\n )\n pytest.raises(\n PatronLoanLimitReached,\n with_error_code,\n \"PatronHasExceededCheckoutLimit_ForCPC\",\n )\n\n # There are two cases where we need to make follow-up API\n # requests as the result of a failure during the loan process.\n\n # First, if the error is \"NoCopiesAvailable\", we know we have\n # out-of-date availability information and we need to call\n # update_licensepool before raising NoAvailbleCopies().\n pytest.raises(NoAvailableCopies, with_error_code, \"NoCopiesAvailable\")\n assert identifier.identifier == api.update_licensepool_called_with.pop()\n\n # If the error is \"TitleAlreadyCheckedOut\", then the problem\n # is that the patron tried to take out a new loan instead of\n # fulfilling an existing loan. In this case we don't raise an\n # exception at all; we fulfill the loan and return a LoanInfo\n # object.\n loan = with_error_code(\"TitleAlreadyCheckedOut\")\n\n # get_loan was called with the patron's details.\n assert (patron, pin, identifier.identifier) == api.get_loan_called_with.pop()\n\n # extract_expiration_date was called on the return value of get_loan.\n assert api.MOCK_LOAN == api.extract_expiration_date_called_with.pop()\n\n # And a LoanInfo was created with all relevant information.\n assert isinstance(loan, LoanInfo)\n assert pool.collection.id == loan.collection_id\n assert pool.data_source.name == loan.data_source_name\n assert identifier.type == loan.identifier_type\n assert identifier.identifier == loan.identifier\n assert None == loan.start_date\n assert api.MOCK_EXPIRATION_DATE == loan.end_date\n\n def test_extract_expiration_date(self):\n # Test the code that finds and parses a loan expiration date.\n m = OverdriveAPI.extract_expiration_date\n\n # Success\n assert datetime_utc(2020, 1, 2, 3, 4, 5) == m(\n dict(expires=\"2020-01-02T03:04:05Z\")\n )\n\n # Various failure cases.\n assert None == m(dict(expiresPresent=False))\n assert None == m(dict(expires=\"Wrong date format\"))\n assert None == m(\"Not a dict\")\n assert None == m(None)\n\n def test_place_hold(self):\n # Verify that an appropriate request is made to HOLDS_ENDPOINT\n # to create a hold.\n #\n # The request will include different form fields depending on\n # whether default_notification_email_address returns something.\n class Mock(MockOverdriveAPI):\n def __init__(self, *args, **kwargs):\n super(Mock, self).__init__(*args, **kwargs)\n self.DEFAULT_NOTIFICATION_EMAIL_ADDRESS = None\n\n def default_notification_email_address(self, patron, pin):\n self.default_notification_email_address_called_with = (patron, pin)\n return self.DEFAULT_NOTIFICATION_EMAIL_ADDRESS\n\n def fill_out_form(self, **form_fields):\n # Record the form fields and return some dummy values.\n self.fill_out_form_called_with = form_fields\n return \"headers\", \"filled-out form\"\n\n def patron_request(self, *args, **kwargs):\n # Pretend to make a request to an API endpoint.\n self.patron_request_called_with = (args, kwargs)\n return \"A mock response\"\n\n def process_place_hold_response(self, response, patron, pin, licensepool):\n self.process_place_hold_response_called_with = (\n response,\n patron,\n pin,\n licensepool,\n )\n return \"OK, I processed it.\"\n\n # First, test the case where no notification email address is\n # provided and there is no default.\n patron = object()\n pin = object()\n pool = self._licensepool(edition=None, collection=self.collection)\n api = Mock(self._db, self.collection)\n response = api.place_hold(patron, pin, pool, None)\n\n # Now we can trace the path of the input through the method calls.\n\n # The patron and PIN were passed into\n # default_notification_email_address.\n assert (patron, pin) == api.default_notification_email_address_called_with\n\n # The return value was None, and so 'ignoreHoldEmail' was\n # added to the form to be filled out, rather than\n # 'emailAddress' being added.\n fields = api.fill_out_form_called_with\n identifier = str(pool.identifier.identifier)\n assert dict(ignoreHoldEmail=True, reserveId=identifier) == fields\n\n # patron_request was called with the filled-out form and other\n # information necessary to authenticate the request.\n args, kwargs = api.patron_request_called_with\n assert (patron, pin, api.HOLDS_ENDPOINT, \"headers\", \"filled-out form\") == args\n assert {} == kwargs\n\n # Finally, process_place_hold_response was called on\n # the return value of patron_request\n assert (\n \"A mock response\",\n patron,\n pin,\n pool,\n ) == api.process_place_hold_response_called_with\n assert \"OK, I processed it.\" == response\n\n # Now we need to test two more cases.\n #\n # First, the patron has a holds notification address\n # registered with Overdrive.\n email = \"<EMAIL>\"\n api.DEFAULT_NOTIFICATION_EMAIL_ADDRESS = email\n response = api.place_hold(patron, pin, pool, None)\n\n # Same result.\n assert \"OK, I processed it.\" == response\n\n # Different variables were passed in to fill_out_form.\n fields = api.fill_out_form_called_with\n assert dict(emailAddress=email, reserveId=identifier) == fields\n\n # Finally, test that when a specific address is passed in, it\n # takes precedence over the patron's holds notification address.\n\n response = api.place_hold(patron, pin, pool, \"<EMAIL>\")\n assert \"OK, I processed it.\" == response\n fields = api.fill_out_form_called_with\n assert dict(emailAddress=\"<EMAIL>\", reserveId=identifier) == fields\n\n def test_process_place_hold_response(self):\n # Verify that we can handle various error and non-error responses\n # to a HOLDS_ENDPOINT request.\n\n ignore, successful_hold = self.sample_json(\"successful_hold.json\")\n\n class Mock(MockOverdriveAPI):\n def get_hold(self, patron, pin, overdrive_id):\n # Return a sample hold representation rather than\n # making another API request.\n self.get_hold_called_with = (patron, pin, overdrive_id)\n return successful_hold\n\n api = Mock(self._db, self.collection)\n\n def process_error_response(message):\n # Attempt to process a response that resulted in an error.\n if isinstance(message, (bytes, str)):\n data = dict(errorCode=message)\n else:\n data = message\n response = MockRequestsResponse(400, content=data)\n return api.process_place_hold_response(response, None, None, None)\n\n # Some error messages result in specific CirculationExceptions.\n pytest.raises(CannotRenew, process_error_response, \"NotWithinRenewalWindow\")\n pytest.raises(\n PatronHoldLimitReached, process_error_response, \"PatronExceededHoldLimit\"\n )\n\n # An unrecognized error message results in a generic\n # CannotHold.\n pytest.raises(CannotHold, process_error_response, \"SomeOtherError\")\n\n # Same if the error message is missing or the response can't be\n # processed.\n pytest.raises(CannotHold, process_error_response, dict())\n pytest.raises(CannotHold, process_error_response, None)\n\n # Same if the error code isn't in the 4xx or 2xx range\n # (which shouldn't happen in real life).\n response = MockRequestsResponse(999)\n pytest.raises(\n CannotHold, api.process_place_hold_response, response, None, None, None\n )\n\n # At this point patron and book details become important --\n # we're going to return a HoldInfo object and potentially make\n # another API request.\n patron = self._patron()\n pin = object()\n licensepool = self._licensepool(edition=None)\n\n # The remaining tests will end up running the same code on the\n # same data, so they will return the same HoldInfo. Define a\n # helper method to make this easier.\n def assert_correct_holdinfo(x):\n assert isinstance(x, HoldInfo)\n assert licensepool.collection == x.collection(self._db)\n assert licensepool.data_source.name == x.data_source_name\n assert identifier.identifier == x.identifier\n assert identifier.type == x.identifier_type\n assert datetime_utc(2015, 3, 26, 11, 30, 29) == x.start_date\n assert None == x.end_date\n assert 1 == x.hold_position\n\n # Test the case where the 'error' is that the book is already\n # on hold.\n already_on_hold = dict(errorCode=\"AlreadyOnWaitList\")\n response = MockRequestsResponse(400, content=already_on_hold)\n result = api.process_place_hold_response(response, patron, pin, licensepool)\n\n # get_hold() was called with the arguments we expect.\n identifier = licensepool.identifier\n assert (patron, pin, identifier.identifier) == api.get_hold_called_with\n\n # The result was converted into a HoldInfo object. The\n # effective result is exactly as if we had successfully put\n # the book on hold.\n assert_correct_holdinfo(result)\n\n # Finally, let's test the case where there was no hold and now\n # there is.\n api.get_hold_called_with = None\n response = MockRequestsResponse(200, content=successful_hold)\n result = api.process_place_hold_response(response, patron, pin, licensepool)\n assert_correct_holdinfo(result)\n\n # Here, get_hold was _not_ called, because the hold didn't\n # already exist.\n assert None == api.get_hold_called_with\n\n def test_checkin(self):\n class Mock(MockOverdriveAPI):\n EARLY_RETURN_SUCCESS = False\n\n def perform_early_return(self, *args):\n self.perform_early_return_call = args\n return self.EARLY_RETURN_SUCCESS\n\n def patron_request(self, *args, **kwargs):\n self.patron_request_call = (args, kwargs)\n\n overdrive = Mock(self._db, self.collection)\n overdrive.perform_early_return_call = None\n\n # In most circumstances we do not bother calling\n # perform_early_return; we just call patron_request.\n pool = self._licensepool(None)\n patron = self._patron()\n pin = object()\n expect_url = overdrive.endpoint(\n overdrive.CHECKOUT_ENDPOINT, overdrive_id=pool.identifier.identifier\n )\n\n def assert_no_early_return():\n \"\"\"Call this to verify that patron_request is\n called within checkin() instead of perform_early_return.\n \"\"\"\n overdrive.checkin(patron, pin, pool)\n\n # perform_early_return was not called.\n assert None == overdrive.perform_early_return_call\n\n # patron_request was called in an attempt to\n # DELETE an active loan.\n args, kwargs = overdrive.patron_request_call\n assert (patron, pin, expect_url) == args\n assert dict(method=\"DELETE\") == kwargs\n overdrive.patron_request_call = None\n\n # If there is no loan, there is no perform_early_return.\n assert_no_early_return()\n\n # Same if the loan is not fulfilled...\n loan, ignore = pool.loan_to(patron)\n assert_no_early_return()\n\n # If the loan is fulfilled but its LicensePoolDeliveryMechanism has\n # no DeliveryMechanism for some reason...\n loan.fulfillment = pool.delivery_mechanisms[0]\n dm = loan.fulfillment.delivery_mechanism\n loan.fulfillment.delivery_mechanism = None\n assert_no_early_return()\n\n # If the loan is fulfilled but the delivery mechanism uses DRM...\n loan.fulfillment.delivery_mechanism = dm\n assert_no_early_return()\n\n # If the loan is fulfilled with a DRM-free delivery mechanism,\n # perform_early_return _is_ called.\n dm.drm_scheme = DeliveryMechanism.NO_DRM\n overdrive.checkin(patron, pin, pool)\n\n assert (patron, pin, loan) == overdrive.perform_early_return_call\n\n # But if it fails, patron_request is _also_ called.\n args, kwargs = overdrive.patron_request_call\n assert (patron, pin, expect_url) == args\n assert dict(method=\"DELETE\") == kwargs\n\n # Finally, if the loan is fulfilled with a DRM-free delivery mechanism\n # and perform_early_return succeeds, patron_request_call is not\n # called -- the title was already returned.\n overdrive.patron_request_call = None\n overdrive.EARLY_RETURN_SUCCESS = True\n overdrive.checkin(patron, pin, pool)\n assert (patron, pin, loan) == overdrive.perform_early_return_call\n assert None == overdrive.patron_request_call\n\n def test_perform_early_return(self):\n class Mock(MockOverdriveAPI):\n\n EARLY_RETURN_URL = \"http://early-return/\"\n\n def get_fulfillment_link(self, *args):\n self.get_fulfillment_link_call = args\n return (\"http://fulfillment/\", \"content/type\")\n\n def _extract_early_return_url(self, *args):\n self._extract_early_return_url_call = args\n return self.EARLY_RETURN_URL\n\n overdrive = Mock(self._db, self.collection)\n\n # This patron has a loan.\n pool = self._licensepool(None)\n patron = self._patron()\n pin = object()\n loan, ignore = pool.loan_to(patron)\n\n # The loan has been fulfilled and now the patron wants to\n # do early return.\n loan.fulfillment = pool.delivery_mechanisms[0]\n\n # Our mocked perform_early_return will make two HTTP requests.\n # The first will be to the fulfill link returned by our mock\n # get_fulfillment_link. The response to this request is a\n # redirect that includes an early return link.\n http = DummyHTTPClient()\n http.responses.append(\n MockRequestsResponse(\n 302, dict(location=\"http://fulfill-this-book/?or=return-early\")\n )\n )\n\n # The second HTTP request made will be to the early return\n # link 'extracted' from that link by our mock\n # _extract_early_return_url. The response here is a copy of\n # the actual response Overdrive sends in this situation.\n http.responses.append(MockRequestsResponse(200, content=\"Success\"))\n\n # Do the thing.\n success = overdrive.perform_early_return(patron, pin, loan, http.do_get)\n\n # The title was 'returned'.\n assert True == success\n\n # It worked like this:\n #\n # get_fulfillment_link was called with appropriate arguments.\n assert (\n patron,\n pin,\n pool.identifier.identifier,\n \"ebook-epub-adobe\",\n ) == overdrive.get_fulfillment_link_call\n\n # The URL returned by that method was 'requested'.\n assert \"http://fulfillment/\" == http.requests.pop(0)\n\n # The resulting URL was passed into _extract_early_return_url.\n assert (\n \"http://fulfill-this-book/?or=return-early\",\n ) == overdrive._extract_early_return_url_call\n\n # Then the URL returned by _that_ method was 'requested'.\n assert \"http://early-return/\" == http.requests.pop(0)\n\n # If no early return URL can be extracted from the fulfillment URL,\n # perform_early_return has no effect.\n #\n overdrive._extract_early_return_url_call = None\n overdrive.EARLY_RETURN_URL = None\n http.responses.append(\n MockRequestsResponse(302, dict(location=\"http://fulfill-this-book/\"))\n )\n success = overdrive.perform_early_return(patron, pin, loan, http.do_get)\n assert False == success\n\n # extract_early_return_url_call was called, but since it returned\n # None, no second HTTP request was made.\n assert \"http://fulfillment/\" == http.requests.pop(0)\n assert (\n \"http://fulfill-this-book/\",\n ) == overdrive._extract_early_return_url_call\n assert [] == http.requests\n\n # If we can't map the delivery mechanism to one of Overdrive's\n # internal formats, perform_early_return has no effect.\n #\n loan.fulfillment.delivery_mechanism.content_type = \"not-in/overdrive\"\n success = overdrive.perform_early_return(patron, pin, loan, http.do_get)\n assert False == success\n\n # In this case, no HTTP requests were made at all, since we\n # couldn't figure out which arguments to pass into\n # get_fulfillment_link.\n assert [] == http.requests\n\n # If the final attempt to hit the return URL doesn't result\n # in a 200 status code, perform_early_return has no effect.\n http.responses.append(\n MockRequestsResponse(\n 302, dict(location=\"http://fulfill-this-book/?or=return-early\")\n )\n )\n http.responses.append(MockRequestsResponse(401, content=\"Unauthorized!\"))\n success = overdrive.perform_early_return(patron, pin, loan, http.do_get)\n assert False == success\n\n def test_extract_early_return_url(self):\n m = OverdriveAPI._extract_early_return_url\n assert None == m(\"http://no-early-return/\")\n assert None == m(\"\")\n assert None == m(None)\n\n # This is based on a real Overdrive early return URL.\n has_early_return = \"https://openepub-gk.cdn.overdrive.com/OpenEPUBStore1/1577-1/%7B5880F6D0-48AC-44DE-8BF1-FD1CE62E97A8%7DFzr418.epub?e=1518753718&loanExpirationDate=2018-03-01T17%3a12%3a33Z&loanEarlyReturnUrl=https%3a%2f%2fnotifications-ofs.contentreserve.com%2fEarlyReturn%2fnypl%2f037-1374147-00279%2f5480F6E1-48F3-00DE-96C1-FD3CE32D94FD-312%3fh%3dVgvxBQHdQxtsbgb43AH6%252bEmpni9LoffkPczNiUz7%252b10%253d&sourceId=nypl&h=j7nGk7qxE71X2ZcdLw%2bqa04jqEw%3d\"\n assert (\n \"https://notifications-ofs.contentreserve.com/EarlyReturn/nypl/037-1374147-00279/5480F6E1-48F3-00DE-96C1-FD3CE32D94FD-312?h=VgvxBQHdQxtsbgb43AH6%2bEmpni9LoffkPczNiUz7%2b10%3d\"\n == m(has_early_return)\n )\n\n def test_place_hold_raises_exception_if_patron_over_hold_limit(self):\n over_hold_limit = self.error_message(\n \"PatronExceededHoldLimit\",\n \"Patron cannot place any more holds, already has maximum holds placed.\",\n )\n\n edition, pool = self._edition(\n identifier_type=Identifier.OVERDRIVE_ID,\n data_source_name=DataSource.OVERDRIVE,\n with_license_pool=True,\n )\n self.api.queue_response(400, content=over_hold_limit)\n pytest.raises(\n PatronHoldLimitReached,\n self.api.place_hold,\n self._patron(),\n \"pin\",\n pool,\n notification_email_address=\"<EMAIL>\",\n )\n\n def test_place_hold_looks_up_notification_address(self):\n edition, pool = self._edition(\n identifier_type=Identifier.OVERDRIVE_ID,\n data_source_name=DataSource.OVERDRIVE,\n with_license_pool=True,\n )\n\n # The first request we make will be to get patron info,\n # so that we know that the most recent email address used\n # to put a book on hold is <EMAIL>.\n ignore, patron_with_email = self.sample_json(\"patron_info.json\")\n\n # The second request we make will be to put a book on hold,\n # and when we do so we will ask for the notification to be\n # sent to <EMAIL>.\n ignore, successful_hold = self.sample_json(\"successful_hold.json\")\n\n self.api.queue_response(200, content=patron_with_email)\n self.api.queue_response(200, content=successful_hold)\n with temp_config() as config:\n config[\"default_notification_email_address\"] = \"<EMAIL>\"\n hold = self.api.place_hold(\n self._patron(), \"pin\", pool, notification_email_address=None\n )\n\n # The book was placed on hold.\n assert 1 == hold.hold_position\n assert pool.identifier.identifier == hold.identifier\n\n # And when we placed it on hold, we passed in <EMAIL>\n # as the email address -- not <EMAIL>.\n url, positional_args, kwargs = self.api.requests[-1]\n headers, body = positional_args\n assert '{\"name\": \"emailAddress\", \"value\": \"<EMAIL>\"}' in body\n\n def test_fulfill_returns_fulfillmentinfo_if_returned_by_get_fulfillment_link(self):\n # If get_fulfillment_link returns a FulfillmentInfo, it is returned\n # immediately and the rest of fulfill() does not run.\n\n fulfillment = FulfillmentInfo(self.collection, *[None] * 7)\n\n class MockAPI(OverdriveAPI):\n def get_fulfillment_link(*args, **kwargs):\n return fulfillment\n\n # Since most of the data is not provided, if fulfill() tried\n # to actually run to completion, it would crash.\n edition, pool = self._edition(with_license_pool=True)\n api = MockAPI(self._db, self.collection)\n result = api.fulfill(None, None, pool, None)\n assert fulfillment == result\n\n def test_fulfill_raises_exception_and_updates_formats_for_outdated_format(self):\n edition, pool = self._edition(\n identifier_type=Identifier.OVERDRIVE_ID,\n data_source_name=DataSource.OVERDRIVE,\n with_license_pool=True,\n )\n\n # This pool has a format that's no longer available from overdrive.\n pool.set_delivery_mechanism(\n Representation.PDF_MEDIA_TYPE,\n DeliveryMechanism.ADOBE_DRM,\n RightsStatus.IN_COPYRIGHT,\n None,\n )\n\n ignore, loan = self.sample_json(\"single_loan.json\")\n\n ignore, lock_in_format_not_available = self.sample_json(\n \"lock_in_format_not_available.json\"\n )\n\n # We will get the loan, try to lock in the format, and fail.\n self.api.queue_response(200, content=loan)\n self.api.queue_response(400, content=lock_in_format_not_available)\n\n # Trying to get a fulfillment link raises an exception.\n pytest.raises(\n FormatNotAvailable,\n self.api.get_fulfillment_link,\n self._patron(),\n \"pin\",\n pool.identifier.identifier,\n \"ebook-epub-adobe\",\n )\n\n # Fulfill will also update the formats.\n ignore, bibliographic = self.sample_json(\"bibliographic_information.json\")\n\n # To avoid a mismatch, make it look like the information is\n # for the correct Identifier.\n bibliographic[\"id\"] = pool.identifier.identifier\n\n # If we have the LicensePool available (as opposed to just the\n # identifier), we will get the loan, try to lock in the\n # format, fail, and then update the bibliographic information.\n self.api.queue_response(200, content=loan)\n self.api.queue_response(400, content=lock_in_format_not_available)\n self.api.queue_response(200, content=bibliographic)\n\n pytest.raises(\n FormatNotAvailable,\n self.api.fulfill,\n self._patron(),\n \"pin\",\n pool,\n \"ebook-epub-adobe\",\n )\n\n # The delivery mechanisms have been updated.\n assert 4 == len(pool.delivery_mechanisms)\n assert set(\n [\n MediaTypes.EPUB_MEDIA_TYPE,\n DeliveryMechanism.KINDLE_CONTENT_TYPE,\n DeliveryMechanism.STREAMING_TEXT_CONTENT_TYPE,\n MediaTypes.OVERDRIVE_EBOOK_MANIFEST_MEDIA_TYPE,\n ]\n ) == set(\n [lpdm.delivery_mechanism.content_type for lpdm in pool.delivery_mechanisms]\n )\n assert set(\n [\n DeliveryMechanism.ADOBE_DRM,\n DeliveryMechanism.KINDLE_DRM,\n DeliveryMechanism.LIBBY_DRM,\n DeliveryMechanism.STREAMING_DRM,\n ]\n ) == set(\n [lpdm.delivery_mechanism.drm_scheme for lpdm in pool.delivery_mechanisms]\n )\n\n def test_get_fulfillment_link_from_download_link(self):\n patron = self._patron()\n\n ignore, streaming_fulfill_link = self.sample_json(\n \"streaming_fulfill_link_response.json\"\n )\n\n self.api.queue_response(200, content=streaming_fulfill_link)\n\n href, type = self.api.get_fulfillment_link_from_download_link(\n patron, \"1234\", \"http://download-link\", fulfill_url=\"http://fulfill\"\n )\n assert (\n \"https://fulfill.contentreserve.com/PerfectLife9780345530967.epub-sample.overdrive.com?RetailerID=nypl&Expires=1469825647&Token=<PASSWORD>&Signature=asl67/G154KeeUsL1mHPwEbZfgc=\"\n == href\n )\n assert \"text/html\" == type\n\n def test_get_fulfillment_link_returns_fulfillmentinfo_for_manifest_format(self):\n # When the format requested would result in a link to a\n # manifest file, the manifest link is returned as-is (wrapped\n # in an OverdriveFulfillmentInfo) rather than being retrieved\n # and processed.\n\n # To keep things simple, our mock API will always return the same\n # fulfillment link.\n loan_info = {\"isFormatLockedIn\": False}\n\n class MockAPI(MockOverdriveAPI):\n def get_loan(self, patron, pin, overdrive_id):\n self.get_loan_called_with = (patron, pin, overdrive_id)\n return loan_info\n\n def get_download_link(self, loan, format_type, error_url):\n self.get_download_link_called_with = (loan, format_type, error_url)\n return \"http://fulfillment-link/\"\n\n def get_fulfillment_link_from_download_link(self, *args, **kwargs):\n # We want to verify that this method is never called.\n raise Exception(\"explode!\")\n\n api = MockAPI(self._db, self.collection)\n api.queue_response(200, content=json.dumps({\"some\": \"data\"}))\n\n # Randomly choose one of the formats that must be fulfilled as\n # a link to a manifest.\n overdrive_format = random.choice(list(OverdriveAPI.MANIFEST_INTERNAL_FORMATS))\n\n # Get the fulfillment link.\n patron = self._patron()\n fulfillmentinfo = api.get_fulfillment_link(\n patron,\n \"1234\",\n \"http://download-link\",\n overdrive_format,\n )\n assert isinstance(fulfillmentinfo, OverdriveManifestFulfillmentInfo)\n\n # Before looking at the OverdriveManifestFulfillmentInfo,\n # let's see how we got there.\n\n # First, our mocked get_loan() was called.\n assert (patron, \"1234\", \"http://download-link\") == api.get_loan_called_with\n\n # It returned a dictionary that contained no information\n # except isFormatLockedIn: false.\n\n # Since the manifest formats do not lock the loan, this\n # skipped most of the code in get_fulfillment_link, and the\n # loan info was passed into our mocked get_download_link.\n\n assert (\n loan_info,\n overdrive_format,\n api.DEFAULT_ERROR_URL,\n ) == api.get_download_link_called_with\n\n # Since the manifest formats cannot be retrieved by the\n # circulation manager, the result of get_download_link was\n # wrapped in an OverdriveManifestFulfillmentInfo and returned.\n # get_fulfillment_link_from_download_link was never called.\n assert \"http://fulfillment-link/\" == fulfillmentinfo.content_link\n assert None == fulfillmentinfo.content_type\n\n def test_update_formats(self):\n # Create a LicensePool with an inaccurate delivery mechanism\n # and the wrong medium.\n edition, pool = self._edition(\n data_source_name=DataSource.OVERDRIVE,\n identifier_type=Identifier.OVERDRIVE_ID,\n with_license_pool=True,\n )\n edition.medium = Edition.PERIODICAL_MEDIUM\n\n # Add the bad delivery mechanism.\n pool.set_delivery_mechanism(\n Representation.PDF_MEDIA_TYPE,\n DeliveryMechanism.ADOBE_DRM,\n RightsStatus.IN_COPYRIGHT,\n None,\n )\n\n # Prepare the bibliographic information.\n ignore, bibliographic = self.sample_json(\"bibliographic_information.json\")\n\n # To avoid a mismatch, make it look like the information is\n # for the new pool's Identifier.\n bibliographic[\"id\"] = pool.identifier.identifier\n\n self.api.queue_response(200, content=bibliographic)\n\n self.api.update_formats(pool)\n\n # The delivery mechanisms have been updated.\n assert 4 == len(pool.delivery_mechanisms)\n assert set(\n [\n MediaTypes.EPUB_MEDIA_TYPE,\n DeliveryMechanism.KINDLE_CONTENT_TYPE,\n DeliveryMechanism.STREAMING_TEXT_CONTENT_TYPE,\n MediaTypes.OVERDRIVE_EBOOK_MANIFEST_MEDIA_TYPE,\n ]\n ) == set(\n [lpdm.delivery_mechanism.content_type for lpdm in pool.delivery_mechanisms]\n )\n assert set(\n [\n DeliveryMechanism.ADOBE_DRM,\n DeliveryMechanism.KINDLE_DRM,\n DeliveryMechanism.LIBBY_DRM,\n DeliveryMechanism.STREAMING_DRM,\n ]\n ) == set(\n [lpdm.delivery_mechanism.drm_scheme for lpdm in pool.delivery_mechanisms]\n )\n\n # The Edition's medium has been corrected.\n assert Edition.BOOK_MEDIUM == edition.medium\n\n def test_update_availability(self):\n # Test the Overdrive implementation of the update_availability\n # method defined by the CirculationAPI interface.\n\n # Create a LicensePool that needs updating.\n edition, pool = self._edition(\n identifier_type=Identifier.OVERDRIVE_ID,\n data_source_name=DataSource.OVERDRIVE,\n with_license_pool=True,\n collection=self.collection,\n )\n\n # We have never checked the circulation information for this\n # LicensePool. Put some random junk in the pool to make sure\n # it gets replaced.\n pool.licenses_owned = 10\n pool.licenses_available = 4\n pool.patrons_in_hold_queue = 3\n assert None == pool.last_checked\n\n # Prepare availability information.\n ignore, availability = self.sample_json(\n \"overdrive_availability_information.json\"\n )\n # Since this is the first time we've seen this book,\n # we'll also be updating the bibliographic information.\n ignore, bibliographic = self.sample_json(\"bibliographic_information.json\")\n\n # To avoid a mismatch, make it look like the information is\n # for the new pool's Identifier.\n availability[\"id\"] = pool.identifier.identifier\n bibliographic[\"id\"] = pool.identifier.identifier\n\n self.api.queue_response(200, content=availability)\n self.api.queue_response(200, content=bibliographic)\n\n self.api.update_availability(pool)\n\n # The availability information has been updated, as has the\n # date the availability information was last checked.\n assert 5 == pool.licenses_owned\n assert 1 == pool.licenses_available\n assert 0 == pool.patrons_in_hold_queue\n assert pool.last_checked is not None\n\n def test_circulation_lookup(self):\n \"\"\"Test the method that actually looks up Overdrive circulation\n information.\n \"\"\"\n self.api.queue_response(200, content=\"foo\")\n\n # If passed an identifier, we'll use the endpoint() method to\n # construct a v2 availability URL and make a request to\n # it.\n book, (status_code, headers, content) = self.api.circulation_lookup(\n \"an-identifier\"\n )\n assert dict(id=\"an-identifier\") == book\n assert 200 == status_code\n assert b\"foo\" == content\n\n request_url, ignore1, ignore2 = self.api.requests.pop()\n expect_url = self.api.endpoint(\n self.api.AVAILABILITY_ENDPOINT,\n collection_token=self.api.collection_token,\n product_id=\"an-identifier\",\n )\n assert request_url == expect_url\n assert \"/v2/collections\" in request_url\n\n # If passed the result of an API call that includes an\n # availability link, we'll clean up the URL in the link and\n # use it to get our availability data.\n self.api.queue_response(200, content=\"foo\")\n v1 = \"https://qa.api.overdrive.com/v1/collections/abcde/products/12345/availability\"\n v2 = \"https://qa.api.overdrive.com/v2/collections/abcde/products/12345/availability\"\n previous_result = dict(availability_link=v1)\n book, (status_code, headers, content) = self.api.circulation_lookup(\n previous_result\n )\n assert previous_result == book\n assert 200 == status_code\n assert b\"foo\" == content\n request_url, ignore1, ignore2 = self.api.requests.pop()\n\n # The v1 URL was converted to a v2 url.\n assert v2 == request_url\n\n def test_update_licensepool_error(self):\n # Create an identifier.\n identifier = self._identifier(identifier_type=Identifier.OVERDRIVE_ID)\n ignore, availability = self.sample_json(\n \"overdrive_availability_information.json\"\n )\n self.api.queue_response(500, content=\"An error occured.\")\n book = dict(id=identifier.identifier, availability_link=self._url)\n pool, was_new, changed = self.api.update_licensepool(book)\n assert None == pool\n\n def test_update_licensepool_not_found(self):\n # If the Overdrive API says a book is not found in the\n # collection, that's treated as useful information, not an error.\n # Create an identifier.\n identifier = self._identifier(identifier_type=Identifier.OVERDRIVE_ID)\n ignore, not_found = self.sample_json(\"overdrive_availability_not_found.json\")\n\n # Queue the 'not found' response twice -- once for the circulation\n # lookup and once for the metadata lookup.\n self.api.queue_response(404, content=not_found)\n self.api.queue_response(404, content=not_found)\n\n book = dict(id=identifier.identifier, availability_link=self._url)\n pool, was_new, changed = self.api.update_licensepool(book)\n assert 0 == pool.licenses_owned\n assert 0 == pool.licenses_available\n assert 0 == pool.patrons_in_hold_queue\n\n def test_update_licensepool_provides_bibliographic_coverage(self):\n # Create an identifier.\n identifier = self._identifier(identifier_type=Identifier.OVERDRIVE_ID)\n\n # Prepare bibliographic and availability information\n # for this identifier.\n ignore, availability = self.sample_json(\n \"overdrive_availability_information.json\"\n )\n ignore, bibliographic = self.sample_json(\"bibliographic_information.json\")\n\n # To avoid a mismatch, make it look like the information is\n # for the newly created Identifier.\n availability[\"id\"] = identifier.identifier\n bibliographic[\"id\"] = identifier.identifier\n\n self.api.queue_response(200, content=availability)\n self.api.queue_response(200, content=bibliographic)\n\n # Now we're ready. When we call update_licensepool, the\n # OverdriveAPI will retrieve the availability information,\n # then the bibliographic information. It will then trigger the\n # OverdriveBibliographicCoverageProvider, which will\n # create an Edition and a presentation-ready Work.\n pool, was_new, changed = self.api.update_licensepool(identifier.identifier)\n assert True == was_new\n assert availability[\"copiesOwned\"] == pool.licenses_owned\n\n edition = pool.presentation_edition\n assert \"Ancillary Justice\" == edition.title\n\n assert True == pool.work.presentation_ready\n assert pool.work.cover_thumbnail_url.startswith(\n \"http://images.contentreserve.com/\"\n )\n\n # The book has been run through the bibliographic coverage\n # provider.\n coverage = [\n x\n for x in identifier.coverage_records\n if x.operation is None and x.data_source.name == DataSource.OVERDRIVE\n ]\n assert 1 == len(coverage)\n\n # Call update_licensepool on an identifier that is missing a work and make\n # sure that it provides bibliographic coverage in that case.\n self._db.delete(pool.work)\n self._db.commit()\n pool, is_new = LicensePool.for_foreign_id(\n self._db,\n DataSource.OVERDRIVE,\n Identifier.OVERDRIVE_ID,\n identifier.identifier,\n collection=self.collection,\n )\n assert not pool.work\n self.api.queue_response(200, content=availability)\n self.api.queue_response(200, content=bibliographic)\n pool, was_new, changed = self.api.update_licensepool(identifier.identifier)\n assert False == was_new\n assert True == pool.work.presentation_ready\n\n def test_update_new_licensepool(self):\n data, raw = self.sample_json(\"overdrive_availability_information.json\")\n\n # Create an identifier\n identifier = self._identifier(identifier_type=Identifier.OVERDRIVE_ID)\n\n # Make it look like the availability information is for the\n # newly created Identifier.\n raw[\"reserveId\"] = identifier.identifier\n\n pool, was_new = LicensePool.for_foreign_id(\n self._db,\n DataSource.OVERDRIVE,\n identifier.type,\n identifier.identifier,\n collection=self.collection,\n )\n\n pool, was_new, changed = self.api.update_licensepool_with_book_info(\n raw, pool, was_new\n )\n assert True == was_new\n assert True == changed\n\n self._db.commit()\n\n assert raw[\"copiesOwned\"] == pool.licenses_owned\n assert raw[\"copiesAvailable\"] == pool.licenses_available\n assert 0 == pool.licenses_reserved\n assert raw[\"numberOfHolds\"] == pool.patrons_in_hold_queue\n\n def test_update_existing_licensepool(self):\n data, raw = self.sample_json(\"overdrive_availability_information.json\")\n\n # Create a LicensePool.\n wr, pool = self._edition(\n data_source_name=DataSource.OVERDRIVE,\n identifier_type=Identifier.OVERDRIVE_ID,\n with_license_pool=True,\n )\n\n # Make it look like the availability information is for the\n # newly created LicensePool.\n raw[\"id\"] = pool.identifier.identifier\n\n wr.title = \"The real title.\"\n assert 1 == pool.licenses_owned\n assert 1 == pool.licenses_available\n assert 0 == pool.licenses_reserved\n assert 0 == pool.patrons_in_hold_queue\n\n p2, was_new, changed = self.api.update_licensepool_with_book_info(\n raw, pool, False\n )\n assert False == was_new\n assert True == changed\n assert p2 == pool\n # The title didn't change to that title given in the availability\n # information, because we already set a title for that work.\n assert \"The real title.\" == wr.title\n assert raw[\"copiesOwned\"] == pool.licenses_owned\n assert raw[\"copiesAvailable\"] == pool.licenses_available\n assert 0 == pool.licenses_reserved\n assert raw[\"numberOfHolds\"] == pool.patrons_in_hold_queue\n\n def test_update_new_licensepool_when_same_book_has_pool_in_different_collection(\n self,\n ):\n old_edition, old_pool = self._edition(\n data_source_name=DataSource.OVERDRIVE,\n identifier_type=Identifier.OVERDRIVE_ID,\n with_license_pool=True,\n )\n old_pool.calculate_work()\n collection = self._collection()\n\n data, raw = self.sample_json(\"overdrive_availability_information.json\")\n\n # Make it look like the availability information is for the\n # old pool's Identifier.\n identifier = old_pool.identifier\n raw[\"id\"] = identifier.identifier\n\n new_pool, was_new = LicensePool.for_foreign_id(\n self._db,\n DataSource.OVERDRIVE,\n identifier.type,\n identifier.identifier,\n collection=collection,\n )\n # The new pool doesn't have a presentation edition yet,\n # but it will be updated to share the old pool's edition.\n assert None == new_pool.presentation_edition\n\n new_pool, was_new, changed = self.api.update_licensepool_with_book_info(\n raw, new_pool, was_new\n )\n assert True == was_new\n assert True == changed\n assert old_edition == new_pool.presentation_edition\n assert old_pool.work == new_pool.work\n\n def test_update_licensepool_with_holds(self):\n data, raw = self.sample_json(\"overdrive_availability_information_holds.json\")\n identifier = self._identifier(identifier_type=Identifier.OVERDRIVE_ID)\n raw[\"id\"] = identifier.identifier\n\n license_pool, is_new = LicensePool.for_foreign_id(\n self._db,\n DataSource.OVERDRIVE,\n identifier.type,\n identifier.identifier,\n collection=self._default_collection,\n )\n pool, was_new, changed = self.api.update_licensepool_with_book_info(\n raw, license_pool, is_new\n )\n assert 10 == pool.patrons_in_hold_queue\n assert True == changed\n\n def test_refresh_patron_access_token(self):\n \"\"\"Verify that patron information is included in the request\n when refreshing a patron access token.\n \"\"\"\n patron = self._patron()\n patron.authorization_identifier = \"barcode\"\n credential = self._credential(patron=patron)\n\n data, raw = self.sample_json(\"patron_token.json\")\n self.api.queue_response(200, content=raw)\n\n # Try to refresh the patron access token with a PIN, and\n # then without a PIN.\n self.api.refresh_patron_access_token(credential, patron, \"a pin\")\n\n self.api.refresh_patron_access_token(credential, patron, None)\n\n # Verify that the requests that were made correspond to what\n # Overdrive is expecting.\n with_pin, without_pin = self.api.access_token_requests\n url, payload, headers, kwargs = with_pin\n assert \"https://oauth-patron.overdrive.com/patrontoken\" == url\n assert \"barcode\" == payload[\"username\"]\n expect_scope = \"websiteid:%s authorizationname:%s\" % (\n self.api.website_id.decode(\"utf-8\"),\n self.api.ils_name(patron.library),\n )\n assert expect_scope == payload[\"scope\"]\n assert \"a pin\" == payload[\"password\"]\n assert not \"password_required\" in payload\n\n url, payload, headers, kwargs = without_pin\n assert \"https://oauth-patron.overdrive.com/patrontoken\" == url\n assert \"barcode\" == payload[\"username\"]\n assert expect_scope == payload[\"scope\"]\n assert \"false\" == payload[\"password_required\"]\n assert \"[ignore]\" == payload[\"password\"]\n\n\nclass TestOverdriveAPICredentials(OverdriveAPITest):\n def test_patron_correct_credentials_for_multiple_overdrive_collections(self):\n # Verify that the correct credential will be used\n # when a library has more than one OverDrive collection.\n\n def _optional_value(self, obj, key):\n return obj.get(key, \"none\")\n\n def _make_token(scope, username, password, grant_type=\"password\"):\n return \"%s|%s|%s|%s\" % (grant_type, scope, username, password)\n\n class MockAPI(MockOverdriveAPI):\n def token_post(self, url, payload, headers={}, **kwargs):\n url = self.endpoint(url)\n self.access_token_requests.append((url, payload, headers, kwargs))\n token = _make_token(\n _optional_value(self, payload, \"scope\"),\n _optional_value(self, payload, \"username\"),\n _optional_value(self, payload, \"password\"),\n grant_type=_optional_value(self, payload, \"grant_type\"),\n )\n response = self.mock_access_token_response(token)\n\n from core.util.http import HTTP\n\n return HTTP._process_response(url, response, **kwargs)\n\n library = self._default_library\n patron = self._patron(library=library)\n patron.authorization_identifier = \"patron_barcode\"\n pin = \"patron_pin\"\n\n # clear out any collections added before we add ours\n library.collections = []\n\n # Distinct credentials for the two OverDrive collections in which our\n # library has membership.\n library_collection_properties = [\n dict(\n library=library,\n name=\"Test OD Collection 1\",\n client_key=\"client_key_1\",\n client_secret=\"client_secret_1\",\n library_id=\"lib_id_1\",\n website_id=\"ws_id_1\",\n ils_name=\"lib1_coll1_ils\",\n ),\n dict(\n library=library,\n name=\"Test OD Collection 2\",\n client_key=\"client_key_2\",\n client_secret=\"client_secret_2\",\n library_id=\"lib_id_2\",\n website_id=\"ws_id_2\",\n ils_name=\"lib1_coll2_ils\",\n ),\n ]\n\n # These are the credentials we'll expect for each of our collections.\n expected_credentials = {\n props[\"name\"]: _make_token(\n \"websiteid:%s authorizationname:%s\"\n % (props[\"website_id\"], props[\"ils_name\"]),\n patron.authorization_identifier,\n pin,\n )\n for props in library_collection_properties\n }\n\n # Add the collections.\n collections = [\n MockAPI.mock_collection(self._db, **props)\n for props in library_collection_properties\n ]\n\n circulation = CirculationAPI(\n self._db, library, api_map={ExternalIntegration.OVERDRIVE: MockAPI}\n )\n od_apis = {\n api.collection.name: api\n for api in list(circulation.api_for_collection.values())\n }\n\n # Ensure that we have the correct number of OverDrive collections.\n assert len(library_collection_properties) == len(od_apis)\n\n # Verify that the expected credentials match what we got.\n for name in list(expected_credentials.keys()) + list(\n reversed(list(expected_credentials.keys()))\n ):\n credential = od_apis[name].get_patron_credential(patron, pin)\n assert expected_credentials[name] == credential.credential\n\n\nclass TestExtractData(OverdriveAPITest):\n def test_get_download_link(self):\n data, json = self.sample_json(\"checkout_response_locked_in_format.json\")\n url = MockOverdriveAPI.get_download_link(\n json, \"ebook-epub-adobe\", \"http://foo.com/\"\n )\n assert (\n \"http://patron.api.overdrive.com/v1/patrons/me/checkouts/76C1B7D0-17F4-4C05-8397-C66C17411584/formats/ebook-epub-adobe/downloadlink?errorpageurl=http://foo.com/\"\n == url\n )\n\n pytest.raises(\n NoAcceptableFormat,\n MockOverdriveAPI.get_download_link,\n json,\n \"no-such-format\",\n \"http://foo.com/\",\n )\n\n def test_get_download_link_raises_exception_if_loan_fulfilled_on_incompatible_platform(\n self,\n ):\n data, json = self.sample_json(\"checkout_response_book_fulfilled_on_kindle.json\")\n pytest.raises(\n FulfilledOnIncompatiblePlatform,\n MockOverdriveAPI.get_download_link,\n json,\n \"ebook-epub-adobe\",\n \"http://foo.com/\",\n )\n\n def test_get_download_link_for_manifest_format(self):\n # If you ask for the download link for an 'x-manifest' format,\n # it's treated as a variant of the 'x' format.\n data, json = self.sample_json(\"checkout_response_book_fulfilled_on_kindle.json\")\n\n # This is part of the URL from `json` that we expect\n # get_download_link to use as a base.\n base_url = \"http://patron.api.overdrive.com/v1/patrons/me/checkouts/98EA8135-52C0-4480-9C0E-1D0779670D4A/formats/ebook-overdrive/downloadlink\"\n\n # First, let's ask for the streaming format.\n link = MockOverdriveAPI.get_download_link(\n json, \"ebook-overdrive\", \"http://foo.com/\"\n )\n\n # The base URL is returned, with {errorpageurl} filled in and\n # {odreadauthurl} left for other code to fill in.\n assert (\n base_url + \"?errorpageurl=http://foo.com/&odreadauthurl={odreadauthurl}\"\n == link\n )\n\n # Now let's ask for the manifest format.\n link = MockOverdriveAPI.get_download_link(\n json, \"ebook-overdrive-manifest\", \"http://bar.com/\"\n )\n\n # The {errorpageurl} and {odreadauthurl} parameters\n # have been removed, and contentfile=true has been appended.\n assert base_url + \"?contentfile=true\" == link\n\n def test_extract_download_link(self):\n # Verify that extract_download_link can or cannot find a\n # download link for a given format subdocument.\n\n class Mock(OverdriveAPI):\n called_with = None\n\n @classmethod\n def make_direct_download_link(cls, download_link):\n cls.called_with = download_link\n return \"http://manifest/\"\n\n m = Mock.extract_download_link\n error_url = \"http://error/\"\n\n # Here we don't even know the name of the format.\n empty = dict()\n with pytest.raises(IOError) as excinfo:\n m(empty, error_url)\n assert \"No linkTemplates for format (unknown)\" in str(excinfo.value)\n\n # Here we know the name, but there are no link templates.\n no_templates = dict(formatType=\"someformat\")\n with pytest.raises(IOError) as excinfo:\n m(no_templates, error_url)\n assert \"No linkTemplates for format someformat\" in str(excinfo.value)\n\n # Here there's a link template structure, but no downloadLink\n # inside.\n no_download_link = dict(formatType=\"someformat\", linkTemplates=dict())\n with pytest.raises(IOError) as excinfo:\n m(no_download_link, error_url)\n assert \"No downloadLink for format someformat\" in str(excinfo.value)\n\n # Here there's a downloadLink structure, but no href inside.\n href_is_missing = dict(\n formatType=\"someformat\", linkTemplates=dict(downloadLink=dict())\n )\n with pytest.raises(IOError) as excinfo:\n m(href_is_missing, error_url)\n assert \"No downloadLink href for format someformat\" in str(excinfo.value)\n\n # Now we finally get to the cases where there is an actual\n # download link. The behavior is different based on whether\n # or not we want to return a link to the manifest file.\n\n working = dict(\n formatType=\"someformat\",\n linkTemplates=dict(\n downloadLink=dict(href=\"http://download/?errorpageurl={errorpageurl}\")\n ),\n )\n\n # If we don't want a manifest, make_direct_download_link is\n # not called.\n do_not_fetch_manifest = m(working, error_url, fetch_manifest=False)\n assert None == Mock.called_with\n\n # The errorpageurl template is filled in.\n assert \"http://download/?errorpageurl=http://error/\" == do_not_fetch_manifest\n\n # If we do want a manifest, make_direct_download_link is called\n # without errorpageurl being affected.\n do_fetch_manifest = m(working, error_url, fetch_manifest=True)\n assert \"http://download/?errorpageurl={errorpageurl}\" == Mock.called_with\n assert \"http://manifest/\" == do_fetch_manifest\n\n def test_make_direct_download_link(self):\n # Verify that make_direct_download_link handles various more\n # or less weird URLs that the Overdrive might or might not\n # serve.\n base = \"http://overdrive/downloadlink\"\n m = OverdriveAPI.make_direct_download_link\n assert base + \"?contentfile=true\" == m(base)\n assert base + \"?contentfile=true\" == m(base + \"?odreadauthurl={odreadauthurl}\")\n assert base + \"?other=other&contentfile=true\" == m(\n base + \"?odreadauthurl={odreadauthurl}&other=other\"\n )\n\n def test_extract_data_from_checkout_resource(self):\n data, json = self.sample_json(\"checkout_response_locked_in_format.json\")\n expires, url = MockOverdriveAPI.extract_data_from_checkout_response(\n json, \"ebook-epub-adobe\", \"http://foo.com/\"\n )\n assert 2013 == expires.year\n assert 10 == expires.month\n assert 4 == expires.day\n assert (\n \"http://patron.api.overdrive.com/v1/patrons/me/checkouts/76C1B7D0-17F4-4C05-8397-C66C17411584/formats/ebook-epub-adobe/downloadlink?errorpageurl=http://foo.com/\"\n == url\n )\n\n def test_process_checkout_data(self):\n data, json = self.sample_json(\n \"shelf_with_book_already_fulfilled_on_kindle.json\"\n )\n [on_kindle, not_on_kindle] = json[\"checkouts\"]\n\n # The book already fulfilled on Kindle doesn't get turned into\n # LoanInfo at all.\n assert None == MockOverdriveAPI.process_checkout_data(\n on_kindle, self.collection\n )\n\n # The book not yet fulfilled does show up as a LoanInfo.\n loan_info = MockOverdriveAPI.process_checkout_data(\n not_on_kindle, self.collection\n )\n assert \"2fadd2ac-a8ec-4938-a369-4c3260e8922b\" == loan_info.identifier\n\n # Since there are two usable formats (Adobe EPUB and Adobe\n # PDF), the LoanInfo is not locked to any particular format.\n assert None == loan_info.locked_to\n\n # A book that's on loan and locked to a specific format has a\n # DeliveryMechanismInfo associated with that format.\n data, format_locked_in = self.sample_json(\n \"checkout_response_locked_in_format.json\"\n )\n loan_info = MockOverdriveAPI.process_checkout_data(\n format_locked_in, self.collection\n )\n delivery = loan_info.locked_to\n assert Representation.EPUB_MEDIA_TYPE == delivery.content_type\n assert DeliveryMechanism.ADOBE_DRM == delivery.drm_scheme\n\n # This book is on loan and the choice between Kindle and Adobe\n # EPUB has not yet been made, but as far as we're concerned,\n # Adobe EPUB is the only *usable* format, so it's effectively\n # locked.\n data, no_format_locked_in = self.sample_json(\n \"checkout_response_no_format_locked_in.json\"\n )\n loan_info = MockOverdriveAPI.process_checkout_data(\n no_format_locked_in, self.collection\n )\n assert loan_info != None\n delivery = loan_info.locked_to\n assert Representation.EPUB_MEDIA_TYPE == delivery.content_type\n assert DeliveryMechanism.ADOBE_DRM == delivery.drm_scheme\n\n # TODO: In the future both of these tests should return a\n # LoanInfo with appropriate FulfillmentInfo. The calling code\n # would then decide whether or not to show the loan.\n\n\nclass TestSyncBookshelf(OverdriveAPITest):\n def test_sync_bookshelf_creates_local_loans(self):\n loans_data, json_loans = self.sample_json(\n \"shelf_with_some_checked_out_books.json\"\n )\n holds_data, json_holds = self.sample_json(\"no_holds.json\")\n\n self.api.queue_response(200, content=loans_data)\n self.api.queue_response(200, content=holds_data)\n\n patron = self._patron()\n loans, holds = self.circulation.sync_bookshelf(patron, \"dummy pin\")\n\n # All four loans in the sample data were created.\n assert 4 == len(loans)\n assert loans.sort() == patron.loans.sort()\n\n # We have created previously unknown LicensePools and\n # Identifiers.\n identifiers = [loan.license_pool.identifier.identifier for loan in loans]\n assert (\n sorted(\n [\n \"a5a3d737-34d4-4d69-aad8-eba4e46019a3\",\n \"99409f99-45a5-4238-9e10-98d1435cde04\",\n \"993e4b33-823c-40af-8f61-cac54e1cba5d\",\n \"a2ec6f3a-ebfe-4c95-9638-2cb13be8de5a\",\n ]\n )\n == sorted(identifiers)\n )\n\n # We have recorded a new DeliveryMechanism associated with\n # each loan.\n mechanisms = []\n for loan in loans:\n if loan.fulfillment:\n mechanism = loan.fulfillment.delivery_mechanism\n mechanisms.append((mechanism.content_type, mechanism.drm_scheme))\n assert [\n (Representation.EPUB_MEDIA_TYPE, DeliveryMechanism.NO_DRM),\n (Representation.EPUB_MEDIA_TYPE, DeliveryMechanism.ADOBE_DRM),\n (Representation.PDF_MEDIA_TYPE, DeliveryMechanism.ADOBE_DRM),\n (Representation.EPUB_MEDIA_TYPE, DeliveryMechanism.ADOBE_DRM),\n ] == mechanisms\n\n # There are no holds.\n assert [] == holds\n\n # Running the sync again leaves all four loans in place.\n patron.last_loan_activity_sync = None\n self.api.queue_response(200, content=loans_data)\n self.api.queue_response(200, content=holds_data)\n loans, holds = self.circulation.sync_bookshelf(patron, \"dummy pin\")\n assert 4 == len(loans)\n assert loans.sort() == patron.loans.sort()\n\n def test_sync_bookshelf_removes_loans_not_present_on_remote(self):\n loans_data, json_loans = self.sample_json(\n \"shelf_with_some_checked_out_books.json\"\n )\n holds_data, json_holds = self.sample_json(\"no_holds.json\")\n\n self.api.queue_response(200, content=loans_data)\n self.api.queue_response(200, content=holds_data)\n\n # Create a loan not present in the sample data.\n patron = self._patron()\n overdrive_edition, new = self._edition(\n data_source_name=DataSource.OVERDRIVE,\n with_license_pool=True,\n collection=self.collection,\n )\n [pool] = overdrive_edition.license_pools\n overdrive_loan, new = pool.loan_to(patron)\n yesterday = utc_now() - timedelta(days=1)\n overdrive_loan.start = yesterday\n\n # Sync with Overdrive, and the loan not present in the sample\n # data is removed.\n loans, holds = self.circulation.sync_bookshelf(patron, \"dummy pin\")\n\n assert 4 == len(loans)\n assert set(loans) == set(patron.loans)\n assert overdrive_loan not in patron.loans\n\n def test_sync_bookshelf_ignores_loans_from_other_sources(self):\n patron = self._patron()\n gutenberg, new = self._edition(\n data_source_name=DataSource.GUTENBERG, with_license_pool=True\n )\n [pool] = gutenberg.license_pools\n gutenberg_loan, new = pool.loan_to(patron)\n loans_data, json_loans = self.sample_json(\n \"shelf_with_some_checked_out_books.json\"\n )\n holds_data, json_holds = self.sample_json(\"no_holds.json\")\n\n # Overdrive doesn't know about the Gutenberg loan, but it was\n # not destroyed, because it came from another source.\n self.api.queue_response(200, content=loans_data)\n self.api.queue_response(200, content=holds_data)\n\n loans, holds = self.circulation.sync_bookshelf(patron, \"dummy pin\")\n assert 5 == len(patron.loans)\n assert gutenberg_loan in patron.loans\n\n def test_sync_bookshelf_creates_local_holds(self):\n\n loans_data, json_loans = self.sample_json(\"no_loans.json\")\n holds_data, json_holds = self.sample_json(\"holds.json\")\n\n self.api.queue_response(200, content=loans_data)\n self.api.queue_response(200, content=holds_data)\n patron = self._patron()\n\n loans, holds = self.circulation.sync_bookshelf(patron, \"dummy pin\")\n # All four loans in the sample data were created.\n assert 4 == len(holds)\n assert sorted(holds) == sorted(patron.holds)\n\n # Running the sync again leaves all four holds in place.\n patron.last_loan_activity_sync = None\n self.api.queue_response(200, content=loans_data)\n self.api.queue_response(200, content=holds_data)\n loans, holds = self.circulation.sync_bookshelf(patron, \"dummy pin\")\n assert 4 == len(holds)\n assert sorted(holds) == sorted(patron.holds)\n\n def test_sync_bookshelf_removes_holds_not_present_on_remote(self):\n loans_data, json_loans = self.sample_json(\"no_loans.json\")\n holds_data, json_holds = self.sample_json(\"holds.json\")\n\n patron = self._patron()\n overdrive_edition, new = self._edition(\n data_source_name=DataSource.OVERDRIVE,\n with_license_pool=True,\n collection=self.collection,\n )\n [pool] = overdrive_edition.license_pools\n overdrive_hold, new = pool.on_hold_to(patron)\n\n self.api.queue_response(200, content=loans_data)\n self.api.queue_response(200, content=holds_data)\n\n # The hold not present in the sample data has been removed\n loans, holds = self.circulation.sync_bookshelf(patron, \"dummy pin\")\n assert 4 == len(holds)\n assert holds == patron.holds\n assert overdrive_hold not in patron.loans\n\n def test_sync_bookshelf_ignores_holds_from_other_collections(self):\n loans_data, json_loans = self.sample_json(\"no_loans.json\")\n holds_data, json_holds = self.sample_json(\"holds.json\")\n\n patron = self._patron()\n\n # This patron has an Overdrive book on hold, but it derives\n # from an Overdrive Collection that's not managed by\n # self.circulation.\n overdrive, new = self._edition(\n data_source_name=DataSource.OVERDRIVE,\n with_license_pool=True,\n collection=self._collection(),\n )\n [pool] = overdrive.license_pools\n overdrive_hold, new = pool.on_hold_to(patron)\n\n self.api.queue_response(200, content=loans_data)\n self.api.queue_response(200, content=holds_data)\n\n # self.api doesn't know about the hold, but it was not\n # destroyed, because it came from a different collection.\n loans, holds = self.circulation.sync_bookshelf(patron, \"dummy pin\")\n assert 5 == len(patron.holds)\n assert overdrive_hold in patron.holds\n\n\nclass TestOverdriveManifestFulfillmentInfo(OverdriveAPITest):\n def test_as_response(self):\n # An OverdriveManifestFulfillmentInfo just links the client\n # directly to the manifest file, bypassing normal FulfillmentInfo\n # processing.\n info = OverdriveManifestFulfillmentInfo(\n self._default_collection,\n \"http://content-link/\",\n \"abcd-efgh\",\n \"scope string\",\n )\n response = info.as_response\n assert 302 == response.status_code\n assert \"\" == response.get_data(as_text=True)\n headers = response.headers\n assert \"text/plain\" == headers[\"Content-Type\"]\n\n # These are the important headers; the location of the manifest file\n # and the scope necessary to initiate Patron Authentication for\n # it.\n assert \"scope string\" == headers[\"X-Overdrive-Scope\"]\n assert \"http://content-link/\" == headers[\"Location\"]\n\n\nclass TestOverdriveCirculationMonitor(OverdriveAPITest):\n def test_run(self):\n # An end-to-end test verifying that this Monitor manages its\n # state across multiple runs.\n #\n # This tests a lot of code that's technically not in Monitor,\n # but when the Monitor API changes, it may require changes to\n # this particular monitor, and it's good to have a test that\n # will fail if that's true.\n class Mock(OverdriveCirculationMonitor):\n def catch_up_from(self, start, cutoff, progress):\n self.catch_up_from_called_with = (start, cutoff, progress)\n\n monitor = Mock(self._db, self.collection)\n\n monitor.run()\n start, cutoff, progress = monitor.catch_up_from_called_with\n now = utc_now()\n\n # The first time this Monitor is called, its 'start time' is\n # the current time, and we ask for an overlap of one minute.\n # This isn't very effective, but we have to start somewhere.\n #\n # (This isn't how the Overdrive collection is initially\n # populated, BTW -- that's NewTitlesOverdriveCollectionMonitor.)\n self.time_eq(start, now - monitor.OVERLAP)\n self.time_eq(cutoff, now)\n timestamp = monitor.timestamp()\n assert start == timestamp.start\n assert cutoff == timestamp.finish\n\n # The second time the Monitor is called, its 'start time'\n # is one minute before the previous cutoff time.\n monitor.run()\n new_start, new_cutoff, new_progress = monitor.catch_up_from_called_with\n now = utc_now()\n assert new_start == cutoff - monitor.OVERLAP\n self.time_eq(new_cutoff, now)\n\n def test_catch_up_from(self):\n # catch_up_from() asks Overdrive about recent changes by\n # calling recently_changed_ids().\n #\n # It mirrors those changes locally by calling\n # update_licensepool().\n #\n # If this is our first time encountering a book, a\n # DISTRIBUTOR_TITLE_ADD analytics event is sent out.\n #\n # The method stops when should_stop() -- called on every book\n # -- returns True.\n class MockAPI(object):\n def __init__(self, *ignore, **kwignore):\n self.licensepools = []\n self.update_licensepool_calls = []\n\n def update_licensepool(self, book_id):\n pool, is_new, is_changed = self.licensepools.pop(0)\n self.update_licensepool_calls.append((book_id, pool))\n return pool, is_new, is_changed\n\n class MockAnalytics(object):\n def __init__(self, _db):\n self._db = _db\n self.events = []\n\n def collect_event(self, *args):\n self.events.append(args)\n\n class MockMonitor(OverdriveCirculationMonitor):\n\n recently_changed_ids_called_with = None\n should_stop_calls = []\n\n def recently_changed_ids(self, start, cutoff):\n self.recently_changed_ids_called_with = (start, cutoff)\n return [1, 2, None, 3, 4]\n\n def should_stop(self, start, book, is_changed):\n # We're going to stop after the third valid book,\n # ensuring that we never ask 'Overdrive' for the\n # fourth book.\n self.should_stop_calls.append((start, book, is_changed))\n if book == 3:\n return True\n return False\n\n monitor = MockMonitor(\n self._db, self.collection, api_class=MockAPI, analytics_class=MockAnalytics\n )\n api = monitor.api\n\n # A MockAnalytics object was created and is ready to receive analytics\n # events.\n assert isinstance(monitor.analytics, MockAnalytics)\n assert self._db == monitor.analytics._db\n\n # The 'Overdrive API' is ready to tell us about four books,\n # but only one of them (the first) represents a change from what\n # we already know.\n lp1 = self._licensepool(None)\n lp1.last_checked = utc_now()\n lp2 = self._licensepool(None)\n lp3 = self._licensepool(None)\n lp4 = object()\n api.licensepools.append((lp1, True, True))\n api.licensepools.append((lp2, False, False))\n api.licensepools.append((lp3, False, True))\n api.licensepools.append(lp4)\n\n progress = TimestampData()\n start = object()\n cutoff = object()\n monitor.catch_up_from(start, cutoff, progress)\n\n # The monitor called recently_changed_ids with the start and\n # cutoff times. It returned five 'books', one of which was None --\n # simulating a lack of data from Overdrive.\n assert (start, cutoff) == monitor.recently_changed_ids_called_with\n\n # The monitor ignored the empty book and called\n # update_licensepool on the first three valid 'books'. The\n # mock API delivered the first three LicensePools from the\n # queue.\n assert [(1, lp1), (2, lp2), (3, lp3)] == api.update_licensepool_calls\n\n # After each book was processed, should_stop was called, using\n # the LicensePool, the start date, plus information about\n # whether the LicensePool was changed (or created) during\n # update_licensepool().\n assert [\n (start, 1, True),\n (start, 2, False),\n (start, 3, True),\n ] == monitor.should_stop_calls\n\n # should_stop returned True on the third call, and at that\n # point we gave up.\n\n # The fourth (bogus) LicensePool is still in api.licensepools,\n # because we never asked for it.\n assert [lp4] == api.licensepools\n\n # A single analytics event was sent out, for the first LicensePool,\n # the one that update_licensepool said was new.\n [[library, licensepool, event, last_checked]] = monitor.analytics.events\n\n # The event commemerates the addition of this LicensePool to the\n # collection.\n assert lp1.collection.libraries == [library]\n assert lp1 == licensepool\n assert CirculationEvent.DISTRIBUTOR_TITLE_ADD == event\n assert lp1.last_checked == last_checked\n\n # The incoming TimestampData object was updated with\n # a summary of what happened.\n #\n # We processed four books: 1, 2, None (which was ignored)\n # and 3.\n assert \"Books processed: 4.\" == progress.achievements\n\n\nclass TestNewTitlesOverdriveCollectionMonitor(OverdriveAPITest):\n def test_recently_changed_ids(self):\n class MockAPI(object):\n def __init__(self, *args, **kwargs):\n pass\n\n def all_ids(self):\n return \"all of the ids\"\n\n monitor = NewTitlesOverdriveCollectionMonitor(\n self._db, self.collection, api_class=MockAPI\n )\n assert \"all of the ids\" == monitor.recently_changed_ids(object(), object())\n\n def test_should_stop(self):\n monitor = NewTitlesOverdriveCollectionMonitor(\n self._db, self.collection, api_class=MockOverdriveAPI\n )\n\n m = monitor.should_stop\n\n # If the monitor has never run before, we need to keep going\n # until we run out of books.\n assert False == m(None, object(), object())\n assert False == m(monitor.NEVER, object(), object())\n\n # If information is missing or invalid, we assume that we\n # should keep going.\n start = datetime_utc(2018, 1, 1)\n assert False == m(start, {}, object())\n assert False == m(start, {\"date_added\": None}, object())\n assert False == m(start, {\"date_added\": \"Not a date\"}, object())\n\n # Here, we're actually comparing real dates, using the date\n # format found in the Overdrive API. A date that's after the\n # `start` date means we should keep going backwards. A date before\n # the `start` date means we should stop.\n assert False == m(\n start, {\"date_added\": \"2019-07-12T11:06:38.157+01:00\"}, object()\n )\n assert True == m(\n start, {\"date_added\": \"2017-07-12T11:06:38.157-04:00\"}, object()\n )\n\n\nclass TestNewTitlesOverdriveCollectionMonitor(OverdriveAPITest):\n def test_should_stop(self):\n monitor = RecentOverdriveCollectionMonitor(\n self._db, self.collection, api_class=MockOverdriveAPI\n )\n assert 0 == monitor.consecutive_unchanged_books\n m = monitor.should_stop\n\n # This book hasn't been changed, but we're under the limit, so we should\n # keep going.\n assert False == m(object(), object(), False)\n assert 1 == monitor.consecutive_unchanged_books\n\n assert False == m(object(), object(), False)\n assert 2 == monitor.consecutive_unchanged_books\n\n # This book has changed, so our counter gets reset.\n assert False == m(object(), object(), True)\n assert 0 == monitor.consecutive_unchanged_books\n\n # When we're at the limit, and another book comes along that hasn't\n # been changed, _then_ we decide to stop.\n monitor.consecutive_unchanged_books = (\n monitor.MAXIMUM_CONSECUTIVE_UNCHANGED_BOOKS\n )\n assert True == m(object(), object(), False)\n assert (\n monitor.MAXIMUM_CONSECUTIVE_UNCHANGED_BOOKS + 1\n == monitor.consecutive_unchanged_books\n )\n\n\nclass TestOverdriveFormatSweep(OverdriveAPITest):\n def test_process_item(self):\n # Validate the standard CollectionMonitor interface.\n monitor = OverdriveFormatSweep(\n self._db, self.collection, api_class=MockOverdriveAPI\n )\n monitor.api.queue_collection_token()\n # We're not testing that the work actually gets done (that's\n # tested in test_update_formats), only that the monitor\n # implements the expected process_item API without crashing.\n monitor.api.queue_response(404)\n edition, pool = self._edition(with_license_pool=True)\n monitor.process_item(pool.identifier)\n\n def test_process_item_multiple_licence_pools(self):\n # Make sure that we only call update_formats once when an item\n # is part of multiple licensepools.\n\n class MockApi(MockOverdriveAPI):\n update_format_calls = 0\n\n def update_formats(self, licensepool):\n self.update_format_calls += 1\n\n monitor = OverdriveFormatSweep(self._db, self.collection, api_class=MockApi)\n monitor.api.queue_collection_token()\n monitor.api.queue_response(404)\n\n edition = self._edition()\n collection1 = self._collection(name=\"Collection 1\")\n pool1 = self._licensepool(edition, collection=collection1)\n\n collection2 = self._collection(name=\"Collection 2\")\n pool2 = self._licensepool(edition, collection=collection2)\n\n monitor.process_item(pool1.identifier)\n assert 1 == monitor.api.update_format_calls\n\n\nclass TestReaper(OverdriveAPITest):\n def test_instantiate(self):\n # Validate the standard CollectionMonitor interface.\n monitor = OverdriveCollectionReaper(\n self._db, self.collection, api_class=MockOverdriveAPI\n )\n", "id": "5360851", "language": "Python", "matching_score": 6.139962673187256, "max_stars_count": 0, "path": "tests/api/test_overdrive.py" }, { "content": "import datetime\nimport json\nimport os\nimport types\nimport urllib.parse\nimport uuid\nfrom typing import Callable, List, Optional, Tuple, Union\n\nimport dateutil\nimport pytest\nfrom freezegun import freeze_time\nfrom jinja2 import Template\n\nfrom api.circulation_exceptions import *\nfrom api.odl import (\n ODLAPI,\n MockSharedODLAPI,\n ODLAPIConfiguration,\n ODLHoldReaper,\n ODLImporter,\n SharedODLAPI,\n SharedODLImporter,\n)\nfrom core.model import (\n Collection,\n ConfigurationSetting,\n DataSource,\n DeliveryMechanism,\n Edition,\n ExternalIntegration,\n Hold,\n Hyperlink,\n Loan,\n MediaTypes,\n Representation,\n RightsStatus,\n get_one_or_create,\n)\nfrom core.testing import DatabaseTest, MockRequestsResponse\nfrom core.util import datetime_helpers\nfrom core.util.datetime_helpers import datetime_utc, utc_now\nfrom core.util.http import HTTP, BadResponseException, RemoteIntegrationException\nfrom core.util.string_helpers import base64\n\n\nclass LicenseHelper:\n \"\"\"Represents an ODL license.\"\"\"\n\n def __init__(\n self,\n identifier: Optional[str] = None,\n checkouts: Optional[int] = None,\n concurrency: Optional[int] = None,\n expires: Optional[Union[datetime.datetime, str]] = None,\n ) -> None:\n \"\"\"Initialize a new instance of LicenseHelper class.\n\n :param identifier: License's identifier\n :param checkouts: Total number of checkouts before a license expires\n :param concurrency: Number of concurrent checkouts allowed\n :param expires: Date & time when a license expires\n \"\"\"\n self.identifier: str = (\n identifier if identifier else \"urn:uuid:{}\".format(uuid.uuid1())\n )\n self.checkouts: Optional[int] = checkouts\n self.concurrency: Optional[int] = concurrency\n if isinstance(expires, datetime.datetime):\n self.expires = expires.isoformat()\n else:\n self.expires: Optional[str] = expires\n\n\nclass LicenseInfoHelper:\n \"\"\"Represents information about the current state of a license stored in the License Info Document.\"\"\"\n\n def __init__(\n self,\n license: LicenseHelper,\n available: int,\n status: str = \"available\",\n left: Optional[int] = None,\n ) -> None:\n \"\"\"Initialize a new instance of LicenseInfoHelper class.\"\"\"\n self.license: LicenseHelper = license\n self.status: str = status\n self.left: int = left\n self.available: int = available\n\n def __str__(self) -> str:\n \"\"\"Return a JSON representation of a part of the License Info Document.\"\"\"\n output = {\n \"identifier\": self.license.identifier,\n \"status\": self.status,\n \"terms\": {\n \"concurrency\": self.license.concurrency,\n },\n \"checkouts\": {\n \"available\": self.available,\n },\n }\n if self.license.expires is not None:\n output[\"terms\"][\"expires\"] = self.license.expires\n if self.left is not None:\n output[\"checkouts\"][\"left\"] = self.left\n return json.dumps(output)\n\n\nclass BaseODLTest:\n base_path = os.path.split(__file__)[0]\n resource_path = os.path.join(base_path, \"files\", \"odl\")\n\n @classmethod\n def get_data(cls, filename):\n path = os.path.join(cls.resource_path, filename)\n return open(path, \"r\").read()\n\n @pytest.fixture()\n def db(self):\n return self._db\n\n @pytest.fixture()\n def library(self, db):\n return DatabaseTest.make_default_library(db)\n\n @pytest.fixture()\n def integration_protocol(self):\n return ODLAPI.NAME\n\n @pytest.fixture()\n def collection(self, db, library, integration_protocol):\n \"\"\"Create a mock ODL collection to use in tests.\"\"\"\n collection, ignore = get_one_or_create(\n db,\n Collection,\n name=\"Test ODL Collection\",\n create_method_kwargs=dict(\n external_account_id=\"http://odl\",\n ),\n )\n integration = collection.create_external_integration(\n protocol=integration_protocol\n )\n integration.username = \"a\"\n integration.password = \"b\"\n integration.url = \"http://metadata\"\n collection.external_integration.set_setting(\n Collection.DATA_SOURCE_NAME_SETTING, \"Feedbooks\"\n )\n library.collections.append(collection)\n return collection\n\n @pytest.fixture()\n def patron(self):\n return self._patron()\n\n @pytest.fixture()\n def work(self, collection):\n return self._work(with_license_pool=True, collection=collection)\n\n @pytest.fixture()\n def pool(self, license):\n return license.license_pool\n\n @pytest.fixture()\n def license(self, work):\n def setup(self, available, concurrency, left=None, expires=None):\n self.checkouts_available = available\n self.checkouts_left = left\n self.terms_concurrency = concurrency\n self.expires = expires\n self.license_pool.update_availability_from_licenses()\n\n pool = work.license_pools[0]\n l = self._license(\n pool,\n checkout_url=\"https://loan.feedbooks.net/loan/get/{?id,checkout_id,expires,patron_id,notification_url,hint,hint_url}\",\n checkouts_available=1,\n terms_concurrency=1,\n )\n l.setup = types.MethodType(setup, l)\n pool.update_availability_from_licenses()\n return l\n\n\nclass BaseODLAPITest(BaseODLTest):\n @pytest.fixture()\n def api_class(self, monkeypatch, db):\n def queue_response(self, status_code, headers={}, content=None):\n self.responses.insert(\n 0, MockRequestsResponse(status_code, headers, content)\n )\n\n def _get(self, url, headers=None):\n self.requests.append([url, headers])\n response = self.responses.pop()\n return HTTP._process_response(url, response)\n\n def _url_for(self, *args, **kwargs):\n del kwargs[\"_external\"]\n return \"http://%s?%s\" % (\n \"/\".join(args),\n \"&\".join([\"%s=%s\" % (key, val) for key, val in list(kwargs.items())]),\n )\n\n monkeypatch.setattr(ODLAPI, \"_get\", _get)\n monkeypatch.setattr(ODLAPI, \"_url_for\", _url_for)\n monkeypatch.setattr(ODLAPI, \"queue_response\", queue_response, raising=False)\n return ODLAPI\n\n @pytest.fixture()\n def api(self, db, api_class, collection):\n api = api_class(db, collection)\n api.requests = []\n api.responses = []\n return api\n\n @pytest.fixture()\n def client(self):\n return self._integration_client()\n\n @pytest.fixture()\n def checkin(self, api, patron, pool):\n lsd = json.dumps(\n {\n \"status\": \"ready\",\n \"links\": [\n {\n \"rel\": \"return\",\n \"href\": \"http://return\",\n }\n ],\n }\n )\n returned_lsd = json.dumps(\n {\n \"status\": \"returned\",\n }\n )\n\n def c(patron=patron, pool=pool):\n api.queue_response(200, content=lsd)\n api.queue_response(200)\n api.queue_response(200, content=returned_lsd)\n api.checkin(patron, \"pin\", pool)\n\n return c\n\n @pytest.fixture()\n def checkout(self, api, patron, pool, db):\n def c(patron=patron, pool=pool, loan_url=None):\n loan_url = loan_url or self._str\n lsd = json.dumps(\n {\n \"status\": \"ready\",\n \"potential_rights\": {\"end\": \"3017-10-21T11:12:13Z\"},\n \"links\": [\n {\n \"rel\": \"self\",\n \"href\": loan_url,\n }\n ],\n }\n )\n api.queue_response(200, content=lsd)\n loan = api.checkout(patron, \"pin\", pool, Representation.EPUB_MEDIA_TYPE)\n loan_db = (\n db.query(Loan)\n .filter(Loan.license_pool == pool, Loan.patron == patron)\n .one()\n )\n return loan, loan_db\n\n return c\n\n\nclass TestODLAPI(DatabaseTest, BaseODLAPITest):\n def test_get_license_status_document_success(self, license, patron, api, library):\n # With a new loan.\n loan, _ = license.loan_to(patron)\n api.queue_response(200, content=json.dumps(dict(status=\"ready\")))\n api.get_license_status_document(loan)\n requested_url = api.requests[0][0]\n\n parsed = urllib.parse.urlparse(requested_url)\n assert \"https\" == parsed.scheme\n assert \"loan.feedbooks.net\" == parsed.netloc\n params = urllib.parse.parse_qs(parsed.query)\n\n assert ODLAPIConfiguration.passphrase_hint.default == params.get(\"hint\")[0]\n assert (\n ODLAPIConfiguration.passphrase_hint_url.default == params.get(\"hint_url\")[0]\n )\n\n assert license.identifier == params.get(\"id\")[0]\n\n # The checkout id and patron id are random UUIDs.\n checkout_id = params.get(\"checkout_id\")[0]\n assert len(checkout_id) > 0\n patron_id = params.get(\"patron_id\")[0]\n assert len(patron_id) > 0\n\n # Loans expire in 21 days by default.\n now = utc_now()\n after_expiration = now + datetime.timedelta(days=23)\n expires = urllib.parse.unquote(params.get(\"expires\")[0])\n\n # The expiration time passed to the server is associated with\n # the UTC time zone.\n assert expires.endswith(\"+00:00\")\n expires = dateutil.parser.parse(expires)\n assert expires.tzinfo == dateutil.tz.tz.tzutc()\n\n # It's a time in the future, but not _too far_ in the future.\n assert expires > now\n assert expires < after_expiration\n\n notification_url = urllib.parse.unquote_plus(params.get(\"notification_url\")[0])\n assert (\n \"http://odl_notify?library_short_name=%s&loan_id=%s\"\n % (library.short_name, loan.id)\n == notification_url\n )\n\n # With an existing loan.\n loan, _ = license.loan_to(patron)\n loan.external_identifier = self._str\n\n api.queue_response(200, content=json.dumps(dict(status=\"active\")))\n api.get_license_status_document(loan)\n requested_url = api.requests[1][0]\n assert loan.external_identifier == requested_url\n\n def test_get_license_status_document_errors(self, license, api, patron):\n loan, _ = license.loan_to(patron)\n\n api.queue_response(200, content=\"not json\")\n pytest.raises(\n BadResponseException,\n api.get_license_status_document,\n loan,\n )\n\n api.queue_response(200, content=json.dumps(dict(status=\"unknown\")))\n pytest.raises(\n BadResponseException,\n api.get_license_status_document,\n loan,\n )\n\n def test_checkin_success(self, license, patron, api, pool, db, checkin):\n # A patron has a copy of this book checked out.\n license.setup(concurrency=7, available=6)\n\n loan, _ = license.loan_to(patron)\n loan.external_identifier = \"http://loan/\" + self._str\n loan.end = utc_now() + datetime.timedelta(days=3)\n\n # The patron returns the book successfully.\n checkin()\n assert 3 == len(api.requests)\n assert \"http://loan\" in api.requests[0][0]\n assert \"http://return\" == api.requests[1][0]\n assert \"http://loan\" in api.requests[2][0]\n\n # The pool's availability has increased, and the local loan has\n # been deleted.\n assert 7 == pool.licenses_available\n assert 0 == db.query(Loan).count()\n\n # The license on the pool has also been updated\n assert 7 == license.checkouts_available\n\n def test_checkin_success_with_holds_queue(\n self, license, patron, checkin, pool, api, db\n ):\n # A patron has the only copy of this book checked out.\n license.setup(concurrency=1, available=0)\n loan, _ = license.loan_to(patron)\n loan.external_identifier = \"http://loan/\" + self._str\n loan.end = utc_now() + datetime.timedelta(days=3)\n\n # Another patron has the book on hold.\n patron_with_hold = self._patron()\n pool.patrons_in_hold_queue = 1\n hold, ignore = pool.on_hold_to(\n patron_with_hold, start=utc_now(), end=None, position=1\n )\n\n # The first patron returns the book successfully.\n checkin()\n assert 3 == len(api.requests)\n assert \"http://loan\" in api.requests[0][0]\n assert \"http://return\" == api.requests[1][0]\n assert \"http://loan\" in api.requests[2][0]\n\n # Now the license is reserved for the next patron.\n assert 0 == pool.licenses_available\n assert 1 == pool.licenses_reserved\n assert 1 == pool.patrons_in_hold_queue\n assert 0 == db.query(Loan).count()\n assert 0 == hold.position\n\n def test_checkin_already_fulfilled(self, license, patron, api, pool, db):\n # The loan is already fulfilled.\n license.setup(concurrency=7, available=6)\n loan, _ = license.loan_to(patron)\n loan.external_identifier = self._str\n loan.end = utc_now() + datetime.timedelta(days=3)\n\n lsd = json.dumps(\n {\n \"status\": \"active\",\n }\n )\n\n api.queue_response(200, content=lsd)\n # Checking in the book silently does nothing.\n api.checkin(patron, \"pinn\", pool)\n assert 1 == len(api.requests)\n assert 6 == pool.licenses_available\n assert 1 == db.query(Loan).count()\n\n def test_checkin_not_checked_out(self, api, patron, pool, license):\n # Not checked out locally.\n pytest.raises(\n NotCheckedOut,\n api.checkin,\n patron,\n \"pin\",\n pool,\n )\n\n # Not checked out according to the distributor.\n loan, _ = license.loan_to(patron)\n loan.external_identifier = self._str\n loan.end = utc_now() + datetime.timedelta(days=3)\n\n lsd = json.dumps(\n {\n \"status\": \"revoked\",\n }\n )\n\n api.queue_response(200, content=lsd)\n pytest.raises(\n NotCheckedOut,\n api.checkin,\n patron,\n \"pin\",\n pool,\n )\n\n def test_checkin_cannot_return(self, license, patron, pool, api):\n # Not fulfilled yet, but no return link from the distributor.\n loan, ignore = license.loan_to(patron)\n loan.external_identifier = self._str\n loan.end = utc_now() + datetime.timedelta(days=3)\n\n lsd = json.dumps(\n {\n \"status\": \"ready\",\n }\n )\n\n api.queue_response(200, content=lsd)\n # Checking in silently does nothing.\n api.checkin(patron, \"pin\", pool)\n\n # If the return link doesn't change the status, it still\n # silently ignores the problem.\n lsd = json.dumps(\n {\n \"status\": \"ready\",\n \"links\": [\n {\n \"rel\": \"return\",\n \"href\": \"http://return\",\n }\n ],\n }\n )\n\n api.queue_response(200, content=lsd)\n api.queue_response(200, content=\"Deleted\")\n api.queue_response(200, content=lsd)\n api.checkin(patron, \"pin\", pool)\n\n def test_checkout_success(self, license, checkout, collection, db, pool):\n # This book is available to check out.\n license.setup(concurrency=6, available=6, left=30)\n\n # A patron checks out the book successfully.\n loan_url = self._str\n loan, _ = checkout(loan_url=loan_url)\n\n assert collection == loan.collection(db)\n assert pool.data_source.name == loan.data_source_name\n assert pool.identifier.type == loan.identifier_type\n assert pool.identifier.identifier == loan.identifier\n assert loan.start_date > utc_now() - datetime.timedelta(minutes=1)\n assert loan.start_date < utc_now() + datetime.timedelta(minutes=1)\n assert datetime_utc(3017, 10, 21, 11, 12, 13) == loan.end_date\n assert loan_url == loan.external_identifier\n assert 1 == db.query(Loan).count()\n\n # Now the patron has a loan in the database that matches the LoanInfo\n # returned by the API.\n db_loan = db.query(Loan).one()\n assert pool == db_loan.license_pool\n assert license == db_loan.license\n assert loan.start_date == db_loan.start\n assert loan.end_date == db_loan.end\n\n # The pool's availability and the license's remaining checkouts have decreased.\n assert 5 == pool.licenses_available\n assert 29 == license.checkouts_left\n\n def test_checkout_success_with_hold(\n self, license, pool, checkout, patron, collection, db\n ):\n # A patron has this book on hold, and the book just became available to check out.\n pool.on_hold_to(\n patron, start=utc_now() - datetime.timedelta(days=1), position=0\n )\n license.setup(concurrency=1, available=1, left=5)\n\n assert pool.licenses_available == 0\n assert pool.licenses_reserved == 1\n assert pool.patrons_in_hold_queue == 1\n\n # The patron checks out the book.\n loan_url = self._str\n loan, _ = checkout(loan_url=loan_url)\n\n # The patron gets a loan successfully.\n assert collection == loan.collection(db)\n assert pool.data_source.name == loan.data_source_name\n assert pool.identifier.type == loan.identifier_type\n assert pool.identifier.identifier == loan.identifier\n assert loan.start_date > utc_now() - datetime.timedelta(minutes=1)\n assert loan.start_date < utc_now() + datetime.timedelta(minutes=1)\n assert datetime_utc(3017, 10, 21, 11, 12, 13) == loan.end_date\n assert loan_url == loan.external_identifier\n assert 1 == db.query(Loan).count()\n\n db_loan = db.query(Loan).one()\n assert pool == db_loan.license_pool\n assert license == db_loan.license\n assert 4 == license.checkouts_left\n\n # The book is no longer reserved for the patron, and the hold has been deleted.\n assert 0 == pool.licenses_reserved\n assert 0 == pool.licenses_available\n assert 0 == pool.patrons_in_hold_queue\n assert 0 == db.query(Hold).count()\n\n def test_checkout_already_checked_out(self, license, checkout, db):\n license.setup(concurrency=2, available=1)\n\n # Checkout succeeds the first time\n checkout()\n\n # But raises an exception the second time\n pytest.raises(AlreadyCheckedOut, checkout)\n\n assert 1 == db.query(Loan).count()\n\n def test_checkout_expired_hold(self, pool, patron, api, license):\n # The patron was at the beginning of the hold queue, but the hold already expired.\n yesterday = utc_now() - datetime.timedelta(days=1)\n hold, _ = pool.on_hold_to(patron, start=yesterday, end=yesterday, position=0)\n other_hold, _ = pool.on_hold_to(self._patron(), start=utc_now())\n license.setup(concurrency=2, available=1)\n\n pytest.raises(\n NoAvailableCopies,\n api.checkout,\n patron,\n \"pin\",\n pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n\n def test_checkout_no_available_copies(self, pool, license, api, patron, db):\n # A different patron has the only copy checked out.\n license.setup(concurrency=1, available=0)\n existing_loan, _ = license.loan_to(self._patron())\n\n pytest.raises(\n NoAvailableCopies,\n api.checkout,\n patron,\n \"pin\",\n pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n\n assert 1 == db.query(Loan).count()\n\n db.delete(existing_loan)\n\n now = utc_now()\n yesterday = now - datetime.timedelta(days=1)\n last_week = now - datetime.timedelta(weeks=1)\n\n # A different patron has the only copy reserved.\n other_patron_hold, _ = pool.on_hold_to(\n self._patron(), position=0, start=last_week\n )\n pool.update_availability_from_licenses()\n\n pytest.raises(\n NoAvailableCopies,\n api.checkout,\n patron,\n \"pin\",\n pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n\n assert 0 == db.query(Loan).count()\n\n # The patron has a hold, but another patron is ahead in the holds queue.\n hold, _ = pool.on_hold_to(self._patron(), position=1, start=yesterday)\n pool.update_availability_from_licenses()\n\n pytest.raises(\n NoAvailableCopies,\n api.checkout,\n patron,\n \"pin\",\n pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n\n assert 0 == db.query(Loan).count()\n\n # The patron has the first hold, but it's expired.\n hold.start = last_week - datetime.timedelta(days=1)\n hold.end = yesterday\n pool.update_availability_from_licenses()\n\n pytest.raises(\n NoAvailableCopies,\n api.checkout,\n patron,\n \"pin\",\n pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n\n assert 0 == db.query(Loan).count()\n\n def test_checkout_no_licenses(self, license, api, pool, patron, db):\n license.setup(concurrency=1, available=1, left=0)\n\n pytest.raises(\n NoLicenses,\n api.checkout,\n patron,\n \"pin\",\n pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n\n assert 0 == db.query(Loan).count()\n\n def test_checkout_when_all_licenses_expired(self, license, api, patron, pool):\n # license expired by expiration date\n license.setup(\n concurrency=1,\n available=2,\n left=1,\n expires=utc_now() - datetime.timedelta(weeks=1),\n )\n\n pytest.raises(\n NoLicenses,\n api.checkout,\n patron,\n \"pin\",\n pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n\n # license expired by no remaining checkouts\n license.setup(\n concurrency=1,\n available=2,\n left=0,\n expires=utc_now() + datetime.timedelta(weeks=1),\n )\n\n pytest.raises(\n NoLicenses,\n api.checkout,\n patron,\n \"pin\",\n pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n\n def test_checkout_cannot_loan(self, api, patron, pool, db):\n lsd = json.dumps(\n {\n \"status\": \"revoked\",\n }\n )\n\n api.queue_response(200, content=lsd)\n pytest.raises(\n CannotLoan,\n api.checkout,\n patron,\n \"pin\",\n pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n\n assert 0 == db.query(Loan).count()\n\n # No external identifier.\n lsd = json.dumps(\n {\n \"status\": \"ready\",\n \"potential_rights\": {\"end\": \"2017-10-21T11:12:13Z\"},\n }\n )\n\n api.queue_response(200, content=lsd)\n pytest.raises(\n CannotLoan,\n api.checkout,\n patron,\n \"pin\",\n pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n\n assert 0 == db.query(Loan).count()\n\n @pytest.mark.parametrize(\n \"delivery_mechanism, correct_type, correct_link, links\",\n [\n (\n DeliveryMechanism.ADOBE_DRM,\n DeliveryMechanism.ADOBE_DRM,\n \"http://acsm\",\n [\n {\n \"rel\": \"license\",\n \"href\": \"http://acsm\",\n \"type\": DeliveryMechanism.ADOBE_DRM,\n }\n ],\n ),\n (\n MediaTypes.AUDIOBOOK_MANIFEST_MEDIA_TYPE,\n MediaTypes.AUDIOBOOK_MANIFEST_MEDIA_TYPE,\n \"http://manifest\",\n [\n {\n \"rel\": \"manifest\",\n \"href\": \"http://manifest\",\n \"type\": MediaTypes.AUDIOBOOK_MANIFEST_MEDIA_TYPE,\n }\n ],\n ),\n (\n DeliveryMechanism.FEEDBOOKS_AUDIOBOOK_DRM,\n ODLImporter.FEEDBOOKS_AUDIO,\n \"http://correct\",\n [\n {\n \"rel\": \"license\",\n \"href\": \"http://acsm\",\n \"type\": DeliveryMechanism.ADOBE_DRM,\n },\n {\n \"rel\": \"manifest\",\n \"href\": \"http://correct\",\n \"type\": ODLImporter.FEEDBOOKS_AUDIO,\n },\n ],\n ),\n ],\n )\n def test_fulfill_success(\n self,\n license,\n patron,\n api,\n checkout,\n pool,\n collection,\n db,\n delivery_mechanism,\n correct_type,\n correct_link,\n links,\n ):\n # Fulfill a loan in a way that gives access to a license file.\n license.setup(concurrency=1, available=1)\n checkout()\n\n lsd = json.dumps(\n {\n \"status\": \"ready\",\n \"potential_rights\": {\"end\": \"2017-10-21T11:12:13Z\"},\n \"links\": links,\n }\n )\n\n api.queue_response(200, content=lsd)\n fulfillment = api.fulfill(patron, \"pin\", pool, delivery_mechanism)\n\n assert collection == fulfillment.collection(db)\n assert pool.data_source.name == fulfillment.data_source_name\n assert pool.identifier.type == fulfillment.identifier_type\n assert pool.identifier.identifier == fulfillment.identifier\n assert datetime_utc(2017, 10, 21, 11, 12, 13) == fulfillment.content_expires\n assert correct_link == fulfillment.content_link\n assert correct_type == fulfillment.content_type\n\n def test_fulfill_cannot_fulfill(self, license, checkout, db, api, patron, pool):\n license.setup(concurrency=7, available=7)\n checkout()\n\n assert 1 == db.query(Loan).count()\n assert 6 == pool.licenses_available\n\n lsd = json.dumps(\n {\n \"status\": \"revoked\",\n }\n )\n\n api.queue_response(200, content=lsd)\n pytest.raises(\n CannotFulfill,\n api.fulfill,\n patron,\n \"pin\",\n pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n\n # The pool's availability has been updated and the local\n # loan has been deleted, since we found out the loan is\n # no longer active.\n assert 7 == pool.licenses_available\n assert 0 == db.query(Loan).count()\n\n def test_count_holds_before(self, api, pool, patron):\n now = utc_now()\n yesterday = now - datetime.timedelta(days=1)\n tomorrow = now + datetime.timedelta(days=1)\n last_week = now - datetime.timedelta(weeks=1)\n\n hold, ignore = pool.on_hold_to(patron, start=now)\n\n assert 0 == api._count_holds_before(hold)\n\n # A previous hold.\n pool.on_hold_to(self._patron(), start=yesterday)\n assert 1 == api._count_holds_before(hold)\n\n # Expired holds don't count.\n pool.on_hold_to(self._patron(), start=last_week, end=yesterday, position=0)\n assert 1 == api._count_holds_before(hold)\n\n # Later holds don't count.\n pool.on_hold_to(self._patron(), start=tomorrow)\n assert 1 == api._count_holds_before(hold)\n\n # Holds on another pool don't count.\n other_pool = self._licensepool(None)\n other_pool.on_hold_to(patron, start=yesterday)\n assert 1 == api._count_holds_before(hold)\n\n for i in range(3):\n pool.on_hold_to(self._patron(), start=yesterday, end=tomorrow, position=1)\n assert 4 == api._count_holds_before(hold)\n\n def test_update_hold_end_date(self, pool, api, patron, license, db, collection):\n now = utc_now()\n tomorrow = now + datetime.timedelta(days=1)\n yesterday = now - datetime.timedelta(days=1)\n next_week = now + datetime.timedelta(days=7)\n last_week = now - datetime.timedelta(days=7)\n\n pool.licenses_owned = 1\n pool.licenses_reserved = 1\n\n hold, ignore = pool.on_hold_to(patron, start=now, position=0)\n\n # Set the reservation period and loan period.\n collection.external_integration.set_setting(\n Collection.DEFAULT_RESERVATION_PERIOD_KEY, 3\n )\n collection.external_integration.set_setting(\n Collection.EBOOK_LOAN_DURATION_KEY, 6\n )\n\n # A hold that's already reserved and has an end date doesn't change.\n hold.end = tomorrow\n api._update_hold_end_date(hold)\n assert tomorrow == hold.end\n hold.end = yesterday\n api._update_hold_end_date(hold)\n assert yesterday == hold.end\n\n # Updating a hold that's reserved but doesn't have an end date starts the\n # reservation period.\n hold.end = None\n api._update_hold_end_date(hold)\n assert hold.end < next_week\n assert hold.end > now\n\n # Updating a hold that has an end date but just became reserved starts\n # the reservation period.\n hold.end = yesterday\n hold.position = 1\n api._update_hold_end_date(hold)\n assert hold.end < next_week\n assert hold.end > now\n\n # When there's a holds queue, the end date is the maximum time it could take for\n # a license to become available.\n\n # One copy, one loan, hold position 1.\n # The hold will be available as soon as the loan expires.\n pool.licenses_available = 0\n pool.licenses_reserved = 0\n pool.licenses_owned = 1\n loan, ignore = license.loan_to(self._patron(), end=tomorrow)\n api._update_hold_end_date(hold)\n assert tomorrow == hold.end\n\n # One copy, one loan, hold position 2.\n # The hold will be available after the loan expires + 1 cycle.\n first_hold, ignore = pool.on_hold_to(self._patron(), start=last_week)\n api._update_hold_end_date(hold)\n assert tomorrow + datetime.timedelta(days=9) == hold.end\n\n # Two copies, one loan, one reserved hold, hold position 2.\n # The hold will be available after the loan expires.\n pool.licenses_reserved = 1\n pool.licenses_owned = 2\n license.checkouts_available = 2\n api._update_hold_end_date(hold)\n assert tomorrow == hold.end\n\n # Two copies, one loan, one reserved hold, hold position 3.\n # The hold will be available after the reserved hold is checked out\n # at the latest possible time and that loan expires.\n second_hold, ignore = pool.on_hold_to(self._patron(), start=yesterday)\n first_hold.end = next_week\n api._update_hold_end_date(hold)\n assert next_week + datetime.timedelta(days=6) == hold.end\n\n # One copy, no loans, one reserved hold, hold position 3.\n # The hold will be available after the reserved hold is checked out\n # at the latest possible time and that loan expires + 1 cycle.\n db.delete(loan)\n pool.licenses_owned = 1\n api._update_hold_end_date(hold)\n assert next_week + datetime.timedelta(days=15) == hold.end\n\n # One copy, no loans, one reserved hold, hold position 2.\n # The hold will be available after the reserved hold is checked out\n # at the latest possible time and that loan expires.\n db.delete(second_hold)\n pool.licenses_owned = 1\n api._update_hold_end_date(hold)\n assert next_week + datetime.timedelta(days=6) == hold.end\n\n db.delete(first_hold)\n\n # Ten copies, seven loans, three reserved holds, hold position 9.\n # The hold will be available after the sixth loan expires.\n pool.licenses_owned = 10\n for i in range(5):\n pool.loan_to(self._patron(), end=next_week)\n pool.loan_to(self._patron(), end=next_week + datetime.timedelta(days=1))\n pool.loan_to(self._patron(), end=next_week + datetime.timedelta(days=2))\n pool.licenses_reserved = 3\n for i in range(3):\n pool.on_hold_to(\n self._patron(),\n start=last_week + datetime.timedelta(days=i),\n end=next_week + datetime.timedelta(days=i),\n position=0,\n )\n for i in range(5):\n pool.on_hold_to(self._patron(), start=yesterday)\n api._update_hold_end_date(hold)\n assert next_week + datetime.timedelta(days=1) == hold.end\n\n # Ten copies, seven loans, three reserved holds, hold position 12.\n # The hold will be available after the second reserved hold is checked\n # out and that loan expires.\n for i in range(3):\n pool.on_hold_to(self._patron(), start=yesterday)\n api._update_hold_end_date(hold)\n assert next_week + datetime.timedelta(days=7) == hold.end\n\n # Ten copies, seven loans, three reserved holds, hold position 29.\n # The hold will be available after the sixth loan expires + 2 cycles.\n for i in range(17):\n pool.on_hold_to(self._patron(), start=yesterday)\n api._update_hold_end_date(hold)\n assert next_week + datetime.timedelta(days=19) == hold.end\n\n # Ten copies, seven loans, three reserved holds, hold position 32.\n # The hold will be available after the second reserved hold is checked\n # out and that loan expires + 2 cycles.\n for i in range(3):\n pool.on_hold_to(self._patron(), start=yesterday)\n api._update_hold_end_date(hold)\n assert next_week + datetime.timedelta(days=25) == hold.end\n\n def test_update_hold_position(self, pool, patron, license, api, db):\n now = utc_now()\n yesterday = now - datetime.timedelta(days=1)\n tomorrow = now + datetime.timedelta(days=1)\n\n hold, ignore = pool.on_hold_to(patron, start=now)\n\n pool.licenses_owned = 1\n\n # When there are no other holds and no licenses reserved, hold position is 1.\n loan, _ = license.loan_to(self._patron())\n api._update_hold_position(hold)\n assert 1 == hold.position\n\n # When a license is reserved, position is 0.\n db.delete(loan)\n api._update_hold_position(hold)\n assert 0 == hold.position\n\n # If another hold has the reserved licenses, position is 2.\n pool.on_hold_to(self._patron(), start=yesterday)\n api._update_hold_position(hold)\n assert 2 == hold.position\n\n # If another license is reserved, position goes back to 0.\n pool.licenses_owned = 2\n license.checkouts_available = 2\n api._update_hold_position(hold)\n assert 0 == hold.position\n\n # If there's an earlier hold but it expired, it doesn't\n # affect the position.\n pool.on_hold_to(self._patron(), start=yesterday, end=yesterday, position=0)\n api._update_hold_position(hold)\n assert 0 == hold.position\n\n # Hold position is after all earlier non-expired holds...\n for i in range(3):\n pool.on_hold_to(self._patron(), start=yesterday)\n api._update_hold_position(hold)\n assert 5 == hold.position\n\n # and before any later holds.\n for i in range(2):\n pool.on_hold_to(self._patron(), start=tomorrow)\n api._update_hold_position(hold)\n assert 5 == hold.position\n\n def test_update_hold_queue(\n self, license, collection, pool, work, api, db, checkout, checkin, patron\n ):\n licenses = [license]\n\n collection.external_integration.set_setting(\n Collection.DEFAULT_RESERVATION_PERIOD_KEY, 3\n )\n\n # If there's no holds queue when we try to update the queue, it\n # will remove a reserved license and make it available instead.\n pool.licenses_owned = 1\n pool.licenses_available = 0\n pool.licenses_reserved = 1\n pool.patrons_in_hold_queue = 0\n last_update = utc_now() - datetime.timedelta(minutes=5)\n work.last_update_time = last_update\n api.update_licensepool(pool)\n assert 1 == pool.licenses_available\n assert 0 == pool.licenses_reserved\n assert 0 == pool.patrons_in_hold_queue\n # The work's last update time is changed so it will be moved up in the crawlable OPDS feed.\n assert work.last_update_time > last_update\n\n # If there are holds, a license will get reserved for the next hold\n # and its end date will be set.\n hold, _ = pool.on_hold_to(patron, start=utc_now(), position=1)\n later_hold, _ = pool.on_hold_to(\n self._patron(), start=utc_now() + datetime.timedelta(days=1), position=2\n )\n api.update_licensepool(pool)\n\n # The pool's licenses were updated.\n assert 0 == pool.licenses_available\n assert 1 == pool.licenses_reserved\n assert 2 == pool.patrons_in_hold_queue\n\n # And the first hold changed.\n assert 0 == hold.position\n assert hold.end - utc_now() - datetime.timedelta(days=3) < datetime.timedelta(\n hours=1\n )\n\n # The later hold is the same.\n assert 2 == later_hold.position\n\n # Now there's a reserved hold. If we add another license, it's reserved and,\n # the later hold is also updated.\n l = self._license(pool, terms_concurrency=1, checkouts_available=1)\n licenses.append(l)\n api.update_licensepool(pool)\n\n assert 0 == pool.licenses_available\n assert 2 == pool.licenses_reserved\n assert 2 == pool.patrons_in_hold_queue\n assert 0 == later_hold.position\n assert later_hold.end - utc_now() - datetime.timedelta(\n days=3\n ) < datetime.timedelta(hours=1)\n\n # Now there are no more holds. If we add another license,\n # it ends up being available.\n l = self._license(pool, terms_concurrency=1, checkouts_available=1)\n licenses.append(l)\n api.update_licensepool(pool)\n assert 1 == pool.licenses_available\n assert 2 == pool.licenses_reserved\n assert 2 == pool.patrons_in_hold_queue\n\n # License pool is updated when the holds are removed.\n db.delete(hold)\n db.delete(later_hold)\n api.update_licensepool(pool)\n assert 3 == pool.licenses_available\n assert 0 == pool.licenses_reserved\n assert 0 == pool.patrons_in_hold_queue\n\n # We can also make multiple licenses reserved at once.\n loans = []\n holds = []\n for i in range(3):\n p = self._patron()\n loan, _ = checkout(patron=p)\n loans.append((loan, p))\n assert 0 == pool.licenses_available\n assert 0 == pool.licenses_reserved\n assert 0 == pool.patrons_in_hold_queue\n\n l = self._license(pool, terms_concurrency=2, checkouts_available=2)\n licenses.append(l)\n for i in range(3):\n hold, ignore = pool.on_hold_to(\n self._patron(),\n start=utc_now() - datetime.timedelta(days=3 - i),\n position=i + 1,\n )\n holds.append(hold)\n\n api.update_licensepool(pool)\n assert 2 == pool.licenses_reserved\n assert 0 == pool.licenses_available\n assert 3 == pool.patrons_in_hold_queue\n assert 0 == holds[0].position\n assert 0 == holds[1].position\n assert 3 == holds[2].position\n assert holds[0].end - utc_now() - datetime.timedelta(\n days=3\n ) < datetime.timedelta(hours=1)\n assert holds[1].end - utc_now() - datetime.timedelta(\n days=3\n ) < datetime.timedelta(hours=1)\n\n # If there are more licenses that change than holds, some of them become available.\n for i in range(2):\n _, p = loans[i]\n checkin(patron=p)\n assert 3 == pool.licenses_reserved\n assert 1 == pool.licenses_available\n assert 3 == pool.patrons_in_hold_queue\n for hold in holds:\n assert 0 == hold.position\n assert hold.end - utc_now() - datetime.timedelta(\n days=3\n ) < datetime.timedelta(hours=1)\n\n def test_place_hold_success(self, pool, api, db, collection, patron, checkout):\n loan, _ = checkout(patron=self._patron())\n\n hold = api.place_hold(\n patron, \"pin\", pool, \"<EMAIL>\"\n )\n\n assert 1 == pool.patrons_in_hold_queue\n assert collection == hold.collection(db)\n assert pool.data_source.name == hold.data_source_name\n assert pool.identifier.type == hold.identifier_type\n assert pool.identifier.identifier == hold.identifier\n assert hold.start_date > utc_now() - datetime.timedelta(minutes=1)\n assert hold.start_date < utc_now() + datetime.timedelta(minutes=1)\n assert loan.end_date == hold.end_date\n assert 1 == hold.hold_position\n assert 1 == db.query(Hold).count()\n\n def test_place_hold_already_on_hold(self, pool, patron, license, api):\n license.setup(concurrency=1, available=0)\n pool.on_hold_to(patron)\n pytest.raises(\n AlreadyOnHold,\n api.place_hold,\n patron,\n \"pin\",\n pool,\n \"<EMAIL>\",\n )\n\n def test_place_hold_currently_available(self, pool, api, patron):\n pytest.raises(\n CurrentlyAvailable,\n api.place_hold,\n patron,\n \"pin\",\n pool,\n \"<EMAIL>\",\n )\n\n def test_release_hold_success(self, checkout, pool, patron, api, db, checkin):\n loan_patron = self._patron()\n checkout(patron=loan_patron)\n pool.on_hold_to(patron, position=1)\n\n assert True == api.release_hold(patron, \"pin\", pool)\n assert 0 == pool.licenses_available\n assert 0 == pool.licenses_reserved\n assert 0 == pool.patrons_in_hold_queue\n assert 0 == db.query(Hold).count()\n\n pool.on_hold_to(patron, position=0)\n checkin(patron=loan_patron)\n\n assert True == api.release_hold(patron, \"pin\", pool)\n assert 1 == pool.licenses_available\n assert 0 == pool.licenses_reserved\n assert 0 == pool.patrons_in_hold_queue\n assert 0 == db.query(Hold).count()\n\n pool.on_hold_to(patron, position=0)\n other_hold, ignore = pool.on_hold_to(self._patron(), position=2)\n\n assert True == api.release_hold(patron, \"pin\", pool)\n assert 0 == pool.licenses_available\n assert 1 == pool.licenses_reserved\n assert 1 == pool.patrons_in_hold_queue\n assert 1 == db.query(Hold).count()\n assert 0 == other_hold.position\n\n def test_release_hold_not_on_hold(self, api, patron, pool):\n pytest.raises(\n NotOnHold,\n api.release_hold,\n patron,\n \"pin\",\n pool,\n )\n\n def test_patron_activity_loan(\n self, api, patron, license, db, pool, collection, checkout, checkin\n ):\n # No loans yet.\n assert [] == api.patron_activity(patron, \"pin\")\n\n # One loan.\n _, loan = checkout()\n\n activity = api.patron_activity(patron, \"pin\")\n assert 1 == len(activity)\n assert collection == activity[0].collection(db)\n assert pool.data_source.name == activity[0].data_source_name\n assert pool.identifier.type == activity[0].identifier_type\n assert pool.identifier.identifier == activity[0].identifier\n assert loan.start == activity[0].start_date\n assert loan.end == activity[0].end_date\n assert loan.external_identifier == activity[0].external_identifier\n\n # Two loans.\n pool2 = self._licensepool(None, collection=collection)\n license2 = self._license(pool2, terms_concurrency=1, checkouts_available=1)\n _, loan2 = checkout(pool=pool2)\n\n activity = api.patron_activity(patron, \"pin\")\n assert 2 == len(activity)\n [l1, l2] = sorted(activity, key=lambda x: x.start_date)\n\n assert collection == l1.collection(db)\n assert pool.data_source.name == l1.data_source_name\n assert pool.identifier.type == l1.identifier_type\n assert pool.identifier.identifier == l1.identifier\n assert loan.start == l1.start_date\n assert loan.end == l1.end_date\n assert loan.external_identifier == l1.external_identifier\n\n assert collection == l2.collection(db)\n assert pool2.data_source.name == l2.data_source_name\n assert pool2.identifier.type == l2.identifier_type\n assert pool2.identifier.identifier == l2.identifier\n assert loan2.start == l2.start_date\n assert loan2.end == l2.end_date\n assert loan2.external_identifier == l2.external_identifier\n\n # If a loan is expired already, it's left out.\n loan2.end = utc_now() - datetime.timedelta(days=2)\n activity = api.patron_activity(patron, \"pin\")\n assert 1 == len(activity)\n assert pool.identifier.identifier == activity[0].identifier\n checkin(pool=pool2)\n\n # One hold.\n other_patron = self._patron()\n checkout(patron=other_patron, pool=pool2)\n hold, _ = pool2.on_hold_to(patron)\n hold.start = utc_now() - datetime.timedelta(days=2)\n hold.end = hold.start + datetime.timedelta(days=3)\n hold.position = 3\n activity = api.patron_activity(patron, \"pin\")\n assert 2 == len(activity)\n [h1, l1] = sorted(activity, key=lambda x: x.start_date)\n\n assert collection == h1.collection(db)\n assert pool2.data_source.name == h1.data_source_name\n assert pool2.identifier.type == h1.identifier_type\n assert pool2.identifier.identifier == h1.identifier\n assert hold.start == h1.start_date\n assert hold.end == h1.end_date\n # Hold position was updated.\n assert 1 == h1.hold_position\n assert 1 == hold.position\n\n # If the hold is expired, it's deleted right away and the license\n # is made available again.\n checkin(patron=other_patron, pool=pool2)\n hold.end = utc_now() - datetime.timedelta(days=1)\n hold.position = 0\n activity = api.patron_activity(patron, \"pin\")\n assert 1 == len(activity)\n assert 0 == db.query(Hold).count()\n assert 1 == pool2.licenses_available\n assert 0 == pool2.licenses_reserved\n\n def test_update_loan_still_active(self, license, patron, api, pool, db):\n license.setup(concurrency=6, available=6)\n loan, _ = license.loan_to(patron)\n loan.external_identifier = self._str\n status_doc = {\n \"status\": \"active\",\n }\n\n api.update_loan(loan, status_doc)\n # Availability hasn't changed, and the loan still exists.\n assert 6 == pool.licenses_available\n assert 1 == db.query(Loan).count()\n\n def test_update_loan_removes_loan(self, checkout, license, patron, api, pool, db):\n license.setup(concurrency=7, available=7)\n _, loan = checkout()\n\n assert 6 == pool.licenses_available\n assert 1 == db.query(Loan).count()\n\n status_doc = {\n \"status\": \"cancelled\",\n }\n\n api.update_loan(loan, status_doc)\n\n # Availability has increased, and the loan is gone.\n assert 7 == pool.licenses_available\n assert 0 == db.query(Loan).count()\n\n def test_update_loan_removes_loan_with_hold_queue(\n self, checkout, pool, license, api, db\n ):\n _, loan = checkout()\n hold, _ = pool.on_hold_to(self._patron(), position=1)\n pool.update_availability_from_licenses()\n\n assert pool.licenses_owned == 1\n assert pool.licenses_available == 0\n assert pool.licenses_reserved == 0\n assert pool.patrons_in_hold_queue == 1\n\n status_doc = {\n \"status\": \"cancelled\",\n }\n\n api.update_loan(loan, status_doc)\n\n # The license is reserved for the next patron, and the loan is gone.\n assert 0 == pool.licenses_available\n assert 1 == pool.licenses_reserved\n assert 0 == hold.position\n assert 0 == db.query(Loan).count()\n\n def test_checkout_from_external_library(self, pool, license, api, client, db):\n # This book is available to check out.\n pool.licenses_owned = 6\n pool.licenses_available = 6\n license.checkouts_available = 6\n license.checkouts_left = 10\n\n # An integration client checks out the book successfully.\n loan_url = self._str\n lsd = json.dumps(\n {\n \"status\": \"ready\",\n \"potential_rights\": {\"end\": \"3017-10-21T11:12:13Z\"},\n \"links\": [\n {\n \"rel\": \"self\",\n \"href\": loan_url,\n }\n ],\n }\n )\n\n api.queue_response(200, content=lsd)\n loan = api.checkout_to_external_library(client, pool)\n assert client == loan.integration_client\n assert pool == loan.license_pool\n assert loan.start > utc_now() - datetime.timedelta(minutes=1)\n assert loan.start < utc_now() + datetime.timedelta(minutes=1)\n assert datetime_utc(3017, 10, 21, 11, 12, 13) == loan.end\n assert loan_url == loan.external_identifier\n assert 1 == db.query(Loan).count()\n\n # The pool's availability and the license's remaining checkouts have decreased.\n assert 5 == pool.licenses_available\n assert 9 == license.checkouts_left\n\n # The book can also be placed on hold to an external library,\n # if there are no copies available.\n license.setup(concurrency=1, available=0)\n\n hold = api.checkout_to_external_library(client, pool)\n\n assert 1 == pool.patrons_in_hold_queue\n assert client == hold.integration_client\n assert pool == hold.license_pool\n assert hold.start > utc_now() - datetime.timedelta(minutes=1)\n assert hold.start < utc_now() + datetime.timedelta(minutes=1)\n assert hold.end > utc_now() + datetime.timedelta(days=7)\n assert 1 == hold.position\n assert 1 == db.query(Hold).count()\n\n def test_checkout_from_external_library_with_hold(self, pool, client, api, db):\n # An integration client has this book on hold, and the book just became available to check out.\n pool.licenses_owned = 1\n pool.licenses_available = 0\n pool.licenses_reserved = 1\n pool.patrons_in_hold_queue = 1\n hold, ignore = pool.on_hold_to(\n client, start=utc_now() - datetime.timedelta(days=1), position=0\n )\n\n # The patron checks out the book.\n loan_url = self._str\n lsd = json.dumps(\n {\n \"status\": \"ready\",\n \"potential_rights\": {\"end\": \"3017-10-21T11:12:13Z\"},\n \"links\": [\n {\n \"rel\": \"self\",\n \"href\": loan_url,\n }\n ],\n }\n )\n\n api.queue_response(200, content=lsd)\n\n # The patron gets a loan successfully.\n loan = api.checkout_to_external_library(client, pool, hold)\n assert client == loan.integration_client\n assert pool == loan.license_pool\n assert loan.start > utc_now() - datetime.timedelta(minutes=1)\n assert loan.start < utc_now() + datetime.timedelta(minutes=1)\n assert datetime_utc(3017, 10, 21, 11, 12, 13) == loan.end\n assert loan_url == loan.external_identifier\n assert 1 == db.query(Loan).count()\n\n # The book is no longer reserved for the patron, and the hold has been deleted.\n assert 0 == pool.licenses_reserved\n assert 0 == pool.licenses_available\n assert 0 == pool.patrons_in_hold_queue\n assert 0 == db.query(Hold).count()\n\n def test_checkin_from_external_library(self, pool, license, api, client, db):\n # An integration client has a copy of this book checked out.\n license.setup(concurrency=7, available=6)\n loan, ignore = license.loan_to(client)\n loan.external_identifier = \"http://loan/\" + self._str\n loan.end = utc_now() + datetime.timedelta(days=3)\n\n # The patron returns the book successfully.\n lsd = json.dumps(\n {\n \"status\": \"ready\",\n \"links\": [\n {\n \"rel\": \"return\",\n \"href\": \"http://return\",\n }\n ],\n }\n )\n returned_lsd = json.dumps(\n {\n \"status\": \"returned\",\n }\n )\n\n api.queue_response(200, content=lsd)\n api.queue_response(200)\n api.queue_response(200, content=returned_lsd)\n api.checkin_from_external_library(client, loan)\n assert 3 == len(api.requests)\n assert \"http://loan\" in api.requests[0][0]\n assert \"http://return\" == api.requests[1][0]\n assert \"http://loan\" in api.requests[2][0]\n\n # The pool's availability has increased, and the local loan has\n # been deleted.\n assert 7 == pool.licenses_available\n assert 0 == db.query(Loan).count()\n\n def test_fulfill_for_external_library(\n self, license, client, api, collection, pool, db\n ):\n loan, ignore = license.loan_to(client)\n loan.external_identifier = self._str\n loan.end = utc_now() + datetime.timedelta(days=3)\n\n lsd = json.dumps(\n {\n \"status\": \"ready\",\n \"potential_rights\": {\"end\": \"2017-10-21T11:12:13Z\"},\n \"links\": [\n {\n \"rel\": \"license\",\n \"href\": \"http://acsm\",\n \"type\": DeliveryMechanism.ADOBE_DRM,\n }\n ],\n }\n )\n\n api.queue_response(200, content=lsd)\n fulfillment = api.fulfill_for_external_library(client, loan, None)\n assert collection == fulfillment.collection(db)\n assert pool.data_source.name == fulfillment.data_source_name\n assert pool.identifier.type == fulfillment.identifier_type\n assert pool.identifier.identifier == fulfillment.identifier\n assert datetime_utc(2017, 10, 21, 11, 12, 13) == fulfillment.content_expires\n assert \"http://acsm\" == fulfillment.content_link\n assert DeliveryMechanism.ADOBE_DRM == fulfillment.content_type\n\n def test_release_hold_from_external_library(\n self, pool, license, db, api, client, checkout, checkin\n ):\n license.setup(concurrency=1, available=1)\n other_patron = self._patron()\n checkout(patron=other_patron)\n hold, ignore = pool.on_hold_to(client, position=1)\n\n assert api.release_hold_from_external_library(client, hold) is True\n assert 0 == pool.licenses_available\n assert 0 == pool.licenses_reserved\n assert 0 == pool.patrons_in_hold_queue\n assert 0 == db.query(Hold).count()\n\n checkin(patron=other_patron)\n hold, ignore = pool.on_hold_to(client, position=0)\n\n assert api.release_hold_from_external_library(client, hold) is True\n assert 1 == pool.licenses_available\n assert 0 == pool.licenses_reserved\n assert 0 == pool.patrons_in_hold_queue\n assert 0 == db.query(Hold).count()\n\n hold, ignore = pool.on_hold_to(client, position=0)\n other_hold, ignore = pool.on_hold_to(self._patron(), position=2)\n\n assert api.release_hold_from_external_library(client, hold) is True\n assert 0 == pool.licenses_available\n assert 1 == pool.licenses_reserved\n assert 1 == pool.patrons_in_hold_queue\n assert 1 == db.query(Hold).count()\n assert 0 == other_hold.position\n\n\nclass TestODLImporter(DatabaseTest, BaseODLTest):\n class MockGet:\n def __init__(self):\n self.responses = []\n\n def get(self, *args, **kwargs):\n return 200, {}, str(self.responses.pop(0))\n\n def add(self, item):\n return self.responses.append(item)\n\n class MockMetadataClient(object):\n def canonicalize_author_name(self, identifier, working_display_name):\n return working_display_name\n\n @pytest.fixture()\n def mock_get(self) -> MockGet:\n return self.MockGet()\n\n @pytest.fixture()\n def importer(self, collection, db, mock_get, metadata_client) -> ODLImporter:\n return ODLImporter(\n db,\n collection=collection,\n http_get=mock_get.get,\n metadata_client=metadata_client,\n )\n\n @pytest.fixture()\n def datasource(self, db, collection) -> DataSource:\n data_source = DataSource.lookup(db, \"Feedbooks\", autocreate=True)\n collection.external_integration.set_setting(\n Collection.DATA_SOURCE_NAME_SETTING, data_source.name\n )\n return data_source\n\n @pytest.fixture()\n def metadata_client(self) -> MockMetadataClient:\n return self.MockMetadataClient()\n\n @pytest.fixture()\n def feed_template(self):\n return \"feed_template.xml.jinja\"\n\n @pytest.fixture()\n def import_templated(self, mock_get, importer, feed_template) -> Callable:\n def i(licenses: List[LicenseInfoHelper]) -> Tuple[List, List, List, List]:\n feed_licenses = [l.license for l in licenses]\n [mock_get.add(l) for l in licenses]\n feed = self.get_templated_feed(feed_template, feed_licenses)\n return importer.import_from_feed(feed)\n\n return i\n\n def get_templated_feed(self, filename: str, licenses: List[LicenseHelper]) -> str:\n \"\"\"Get the test ODL feed with specific licensing information.\n\n :param filename: Name of template to load\n :param licenses: List of ODL licenses\n\n :return: Test ODL feed\n \"\"\"\n template = Template(self.get_data(filename))\n feed = template.render(licenses=licenses)\n return feed\n\n @freeze_time(\"2019-01-01T00:00:00+00:00\")\n def test_import(self, importer, mock_get):\n \"\"\"Ensure that ODLImporter correctly processes and imports the ODL feed encoded using OPDS 1.x.\n\n NOTE: `freeze_time` decorator is required to treat the licenses in the ODL feed as non-expired.\n \"\"\"\n feed = self.get_data(\"feedbooks_bibliographic.atom\")\n\n warrior_time_limited = LicenseInfoHelper(\n license=LicenseHelper(\n identifier=\"1\", concurrency=1, expires=\"2019-03-31T03:13:35+02:00\"\n ),\n left=52,\n available=1,\n )\n canadianity_loan_limited = LicenseInfoHelper(\n license=LicenseHelper(identifier=\"2\", concurrency=10), left=40, available=10\n )\n canadianity_perpetual = LicenseInfoHelper(\n license=LicenseHelper(identifier=\"3\", concurrency=1), available=1\n )\n midnight_loan_limited_1 = LicenseInfoHelper(\n license=LicenseHelper(\n identifier=\"4\",\n concurrency=1,\n ),\n left=20,\n available=1,\n )\n midnight_loan_limited_2 = LicenseInfoHelper(\n license=LicenseHelper(identifier=\"5\", concurrency=1), left=52, available=1\n )\n dragons_loan = LicenseInfoHelper(\n license=LicenseHelper(\n identifier=\"urn:uuid:01234567-890a-bcde-f012-3456789abcde\",\n concurrency=5,\n ),\n left=10,\n available=5,\n )\n\n [\n mock_get.add(r)\n for r in [\n warrior_time_limited,\n canadianity_loan_limited,\n canadianity_perpetual,\n midnight_loan_limited_1,\n midnight_loan_limited_2,\n dragons_loan,\n ]\n ]\n\n (\n imported_editions,\n imported_pools,\n imported_works,\n failures,\n ) = importer.import_from_feed(feed)\n\n # This importer works the same as the base OPDSImporter, except that\n # it extracts format information from 'odl:license' tags and creates\n # LicensePoolDeliveryMechanisms.\n\n # The importer created 6 editions, pools, and works.\n assert {} == failures\n assert 6 == len(imported_editions)\n assert 6 == len(imported_pools)\n assert 6 == len(imported_works)\n\n [\n canadianity,\n everglades,\n dragons,\n warrior,\n blazing,\n midnight,\n ] = sorted(imported_editions, key=lambda x: x.title)\n assert \"The Blazing World\" == blazing.title\n assert \"Sun Warrior\" == warrior.title\n assert \"Canadianity\" == canadianity.title\n assert \"The Midnight Dance\" == midnight.title\n assert \"Everglades Wildguide\" == everglades.title\n assert \"Rise of the Dragons, Book 1\" == dragons.title\n\n # This book is open access and has no applicable DRM\n [blazing_pool] = [\n p for p in imported_pools if p.identifier == blazing.primary_identifier\n ]\n assert True == blazing_pool.open_access\n [lpdm] = blazing_pool.delivery_mechanisms\n assert Representation.EPUB_MEDIA_TYPE == lpdm.delivery_mechanism.content_type\n assert DeliveryMechanism.NO_DRM == lpdm.delivery_mechanism.drm_scheme\n\n # # This book has a single 'odl:license' tag.\n [warrior_pool] = [\n p for p in imported_pools if p.identifier == warrior.primary_identifier\n ]\n assert False == warrior_pool.open_access\n [lpdm] = warrior_pool.delivery_mechanisms\n assert Edition.BOOK_MEDIUM == warrior_pool.presentation_edition.medium\n assert Representation.EPUB_MEDIA_TYPE == lpdm.delivery_mechanism.content_type\n assert DeliveryMechanism.ADOBE_DRM == lpdm.delivery_mechanism.drm_scheme\n assert RightsStatus.IN_COPYRIGHT == lpdm.rights_status.uri\n assert (\n 52 == warrior_pool.licenses_owned\n ) # 52 remaining checkouts in the License Info Document\n assert 1 == warrior_pool.licenses_available\n [license] = warrior_pool.licenses\n assert \"1\" == license.identifier\n assert (\n \"https://loan.feedbooks.net/loan/get/{?id,checkout_id,expires,patron_id,notification_url}\"\n == license.checkout_url\n )\n assert (\n \"https://license.feedbooks.net/license/status/?uuid=1\" == license.status_url\n )\n\n # The original value for 'expires' in the ODL is:\n # 2019-03-31T03:13:35+02:00\n #\n # As stored in the database, license.expires may not have the\n # same tzinfo, but it does represent the same point in time.\n assert (\n datetime.datetime(\n 2019, 3, 31, 3, 13, 35, tzinfo=dateutil.tz.tzoffset(\"\", 3600 * 2)\n )\n == license.expires\n )\n assert (\n 52 == license.checkouts_left\n ) # 52 remaining checkouts in the License Info Document\n assert 1 == license.checkouts_available\n\n # This item is an open access audiobook.\n [everglades_pool] = [\n p for p in imported_pools if p.identifier == everglades.primary_identifier\n ]\n assert True == everglades_pool.open_access\n [lpdm] = everglades_pool.delivery_mechanisms\n assert Edition.AUDIO_MEDIUM == everglades_pool.presentation_edition.medium\n\n assert (\n Representation.AUDIOBOOK_MANIFEST_MEDIA_TYPE\n == lpdm.delivery_mechanism.content_type\n )\n assert DeliveryMechanism.NO_DRM == lpdm.delivery_mechanism.drm_scheme\n\n # This is a non-open access audiobook. There is no\n # <odl:protection> tag; the drm_scheme is implied by the value\n # of <dcterms:format>.\n [dragons_pool] = [\n p for p in imported_pools if p.identifier == dragons.primary_identifier\n ]\n assert Edition.AUDIO_MEDIUM == dragons_pool.presentation_edition.medium\n assert False == dragons_pool.open_access\n [lpdm] = dragons_pool.delivery_mechanisms\n\n assert (\n Representation.AUDIOBOOK_MANIFEST_MEDIA_TYPE\n == lpdm.delivery_mechanism.content_type\n )\n assert (\n DeliveryMechanism.FEEDBOOKS_AUDIOBOOK_DRM\n == lpdm.delivery_mechanism.drm_scheme\n )\n\n # This book has two 'odl:license' tags for the same format and drm scheme\n # (this happens if the library purchases two copies).\n [canadianity_pool] = [\n p for p in imported_pools if p.identifier == canadianity.primary_identifier\n ]\n assert False == canadianity_pool.open_access\n [lpdm] = canadianity_pool.delivery_mechanisms\n assert Representation.EPUB_MEDIA_TYPE == lpdm.delivery_mechanism.content_type\n assert DeliveryMechanism.ADOBE_DRM == lpdm.delivery_mechanism.drm_scheme\n assert RightsStatus.IN_COPYRIGHT == lpdm.rights_status.uri\n assert (\n 41 == canadianity_pool.licenses_owned\n ) # 40 remaining checkouts + 1 perpetual license in the License Info Documents\n assert 11 == canadianity_pool.licenses_available\n [license1, license2] = sorted(\n canadianity_pool.licenses, key=lambda x: x.identifier\n )\n assert \"2\" == license1.identifier\n assert (\n \"https://loan.feedbooks.net/loan/get/{?id,checkout_id,expires,patron_id,notification_url}\"\n == license1.checkout_url\n )\n assert (\n \"https://license.feedbooks.net/license/status/?uuid=2\"\n == license1.status_url\n )\n assert None == license1.expires\n assert 40 == license1.checkouts_left\n assert 10 == license1.checkouts_available\n assert \"3\" == license2.identifier\n assert (\n \"https://loan.feedbooks.net/loan/get/{?id,checkout_id,expires,patron_id,notification_url}\"\n == license2.checkout_url\n )\n assert (\n \"https://license.feedbooks.net/license/status/?uuid=3\"\n == license2.status_url\n )\n assert None == license2.expires\n assert None == license2.checkouts_left\n assert 1 == license2.checkouts_available\n\n # This book has two 'odl:license' tags, and they have different formats.\n # TODO: the format+license association is not handled yet.\n [midnight_pool] = [\n p for p in imported_pools if p.identifier == midnight.primary_identifier\n ]\n assert False == midnight_pool.open_access\n lpdms = midnight_pool.delivery_mechanisms\n assert 2 == len(lpdms)\n assert set(\n [Representation.EPUB_MEDIA_TYPE, Representation.PDF_MEDIA_TYPE]\n ) == set([lpdm.delivery_mechanism.content_type for lpdm in lpdms])\n assert [DeliveryMechanism.ADOBE_DRM, DeliveryMechanism.ADOBE_DRM] == [\n lpdm.delivery_mechanism.drm_scheme for lpdm in lpdms\n ]\n assert [RightsStatus.IN_COPYRIGHT, RightsStatus.IN_COPYRIGHT] == [\n lpdm.rights_status.uri for lpdm in lpdms\n ]\n assert (\n 72 == midnight_pool.licenses_owned\n ) # 20 + 52 remaining checkouts in corresponding License Info Documents\n assert 2 == midnight_pool.licenses_available\n [license1, license2] = sorted(\n midnight_pool.licenses, key=lambda x: x.identifier\n )\n assert \"4\" == license1.identifier\n assert (\n \"https://loan.feedbooks.net/loan/get/{?id,checkout_id,expires,patron_id,notification_url}\"\n == license1.checkout_url\n )\n assert (\n \"https://license.feedbooks.net/license/status/?uuid=4\"\n == license1.status_url\n )\n assert None == license1.expires\n assert 20 == license1.checkouts_left\n assert 1 == license1.checkouts_available\n assert \"5\" == license2.identifier\n assert (\n \"https://loan.feedbooks.net/loan/get/{?id,checkout_id,expires,patron_id,notification_url}\"\n == license2.checkout_url\n )\n assert (\n \"https://license.feedbooks.net/license/status/?uuid=5\"\n == license2.status_url\n )\n assert None == license2.expires\n assert 52 == license2.checkouts_left\n assert 1 == license2.checkouts_available\n\n @pytest.mark.parametrize(\n \"license\",\n [\n pytest.param(\n LicenseInfoHelper(\n license=LicenseHelper(\n concurrency=1, expires=\"2021-01-01T00:01:00+01:00\"\n ),\n left=52,\n available=1,\n ),\n id=\"expiration_date_in_the_past\",\n ),\n pytest.param(\n LicenseInfoHelper(\n license=LicenseHelper(\n concurrency=1,\n ),\n left=0,\n available=1,\n ),\n id=\"left_is_zero\",\n ),\n pytest.param(\n LicenseInfoHelper(\n license=LicenseHelper(\n concurrency=1,\n ),\n available=1,\n status=\"unavailable\",\n ),\n id=\"status_unavailable\",\n ),\n ],\n )\n @freeze_time(\"2021-01-01T00:00:00+00:00\")\n def test_odl_importer_expired_licenses(self, import_templated, license):\n \"\"\"Ensure ODLImporter imports expired licenses, but does not count them.\"\"\"\n # Import the test feed with an expired ODL license.\n imported_editions, imported_pools, imported_works, failures = import_templated(\n [license]\n )\n\n # The importer created 1 edition and 1 work with no failures.\n assert failures == {}\n assert len(imported_editions) == 1\n assert len(imported_works) == 1\n\n # Ensure that the license pool was successfully created, with no available copies.\n assert len(imported_pools) == 1\n\n [imported_pool] = imported_pools\n assert imported_pool.licenses_owned == 0\n assert imported_pool.licenses_available == 0\n assert len(imported_pool.licenses) == 1\n\n # Ensure the license was imported and is expired.\n [imported_license] = imported_pool.licenses\n assert imported_license.is_inactive is True\n\n def test_odl_importer_reimport_expired_licenses(self, import_templated):\n license_expiry = dateutil.parser.parse(\"2021-01-01T00:01:00+00:00\")\n licenses = [\n LicenseInfoHelper(\n license=LicenseHelper(concurrency=1, expires=license_expiry),\n available=1,\n )\n ]\n\n # First import the license when it is not expired\n with freeze_time(license_expiry - datetime.timedelta(days=1)):\n\n # Import the test feed.\n (\n imported_editions,\n imported_pools,\n imported_works,\n failures,\n ) = import_templated(licenses)\n\n # The importer created 1 edition and 1 work with no failures.\n assert failures == {}\n assert len(imported_editions) == 1\n assert len(imported_works) == 1\n assert len(imported_pools) == 1\n\n # Ensure that the license pool was successfully created, with available copies.\n [imported_pool] = imported_pools\n assert imported_pool.licenses_owned == 1\n assert imported_pool.licenses_available == 1\n assert len(imported_pool.licenses) == 1\n\n # Ensure the license was imported and is not expired.\n [imported_license] = imported_pool.licenses\n assert imported_license.is_inactive is False\n\n # Reimport the license when it is expired\n with freeze_time(license_expiry + datetime.timedelta(days=1)):\n\n # Import the test feed.\n (\n imported_editions,\n imported_pools,\n imported_works,\n failures,\n ) = import_templated(licenses)\n\n # The importer created 1 edition and 1 work with no failures.\n assert failures == {}\n assert len(imported_editions) == 1\n assert len(imported_works) == 1\n assert len(imported_pools) == 1\n\n # Ensure that the license pool was successfully created, with no available copies.\n [imported_pool] = imported_pools\n assert imported_pool.licenses_owned == 0\n assert imported_pool.licenses_available == 0\n assert len(imported_pool.licenses) == 1\n\n # Ensure the license was imported and is expired.\n [imported_license] = imported_pool.licenses\n assert imported_license.is_inactive is True\n\n @freeze_time(\"2021-01-01T00:00:00+00:00\")\n def test_odl_importer_multiple_expired_licenses(self, import_templated):\n \"\"\"Ensure ODLImporter imports expired licenses\n and does not count them in the total number of available licenses.\"\"\"\n\n # 1.1. Import the test feed with three inactive ODL licenses and two active licenses.\n inactive = [\n LicenseInfoHelper(\n # Expired\n # (expiry date in the past)\n license=LicenseHelper(\n concurrency=1,\n expires=datetime_helpers.utc_now() - datetime.timedelta(days=1),\n ),\n available=1,\n ),\n LicenseInfoHelper(\n # Expired\n # (left is 0)\n license=LicenseHelper(concurrency=1),\n available=1,\n left=0,\n ),\n LicenseInfoHelper(\n # Expired\n # (status is unavailable)\n license=LicenseHelper(concurrency=1),\n available=1,\n status=\"unavailable\",\n ),\n ]\n active = [\n LicenseInfoHelper(\n # Valid\n license=LicenseHelper(concurrency=1),\n available=1,\n ),\n LicenseInfoHelper(\n # Valid\n license=LicenseHelper(concurrency=5),\n available=5,\n left=40,\n ),\n ]\n imported_editions, imported_pools, imported_works, failures = import_templated(\n active + inactive\n )\n\n assert failures == {}\n\n # License pool was successfully created\n assert len(imported_pools) == 1\n [imported_pool] = imported_pools\n\n # All licenses were imported\n assert len(imported_pool.licenses) == 5\n\n # Make sure that the license statistics are correct and include only active licenses.\n assert imported_pool.licenses_owned == 41\n assert imported_pool.licenses_available == 6\n\n # Correct number of active and inactive licenses\n assert sum([not l.is_inactive for l in imported_pool.licenses]) == len(active)\n assert sum([l.is_inactive for l in imported_pool.licenses]) == len(inactive)\n\n def test_odl_importer_reimport_multiple_licenses(self, import_templated):\n \"\"\"Ensure ODLImporter correctly imports licenses that have already been imported.\"\"\"\n\n # 1.1. Import the test feed with ODL licenses that are not expired.\n license_expiry = dateutil.parser.parse(\"2021-01-01T00:01:00+00:00\")\n\n date = LicenseInfoHelper(\n license=LicenseHelper(\n concurrency=1,\n expires=license_expiry,\n ),\n available=1,\n )\n left = LicenseInfoHelper(\n license=LicenseHelper(concurrency=2), available=1, left=5\n )\n perpetual = LicenseInfoHelper(license=LicenseHelper(concurrency=1), available=0)\n licenses = [date, left, perpetual]\n\n # Import with all licenses valid\n with freeze_time(license_expiry - datetime.timedelta(days=1)):\n (\n imported_editions,\n imported_pools,\n imported_works,\n failures,\n ) = import_templated(licenses)\n\n # No failures in the import\n assert failures == {}\n\n assert len(imported_pools) == 1\n\n [imported_pool] = imported_pools\n assert len(imported_pool.licenses) == 3\n assert imported_pool.licenses_available == 2\n assert imported_pool.licenses_owned == 7\n\n # No licenses are expired\n assert sum([not l.is_inactive for l in imported_pool.licenses]) == len(\n licenses\n )\n\n # Expire the first two licenses\n\n # The first one is expired by changing the time\n with freeze_time(license_expiry + datetime.timedelta(days=1)):\n # The second one is expired by setting left to 0\n left.left = 0\n\n # The perpetual license has a copy available\n perpetual.available = 1\n\n # Reimport\n (\n imported_editions,\n imported_pools,\n imported_works,\n failures,\n ) = import_templated(licenses)\n\n # No failures in the import\n assert failures == {}\n\n assert len(imported_pools) == 1\n\n [imported_pool] = imported_pools\n assert len(imported_pool.licenses) == 3\n assert imported_pool.licenses_available == 1\n assert imported_pool.licenses_owned == 1\n\n # One license not expired\n assert sum([not l.is_inactive for l in imported_pool.licenses]) == 1\n\n # Two licenses expired\n assert sum([l.is_inactive for l in imported_pool.licenses]) == 2\n\n\nclass TestODLHoldReaper(DatabaseTest, BaseODLAPITest):\n def test_run_once(self, collection, api, db, pool, license):\n data_source = DataSource.lookup(self._db, \"Feedbooks\", autocreate=True)\n collection.external_integration.set_setting(\n Collection.DATA_SOURCE_NAME_SETTING, data_source.name\n )\n reaper = ODLHoldReaper(db, collection, api=api)\n\n now = utc_now()\n yesterday = now - datetime.timedelta(days=1)\n\n license.setup(concurrency=3, available=3)\n expired_hold1, ignore = pool.on_hold_to(\n self._patron(), end=yesterday, position=0\n )\n expired_hold2, ignore = pool.on_hold_to(\n self._patron(), end=yesterday, position=0\n )\n expired_hold3, ignore = pool.on_hold_to(\n self._patron(), end=yesterday, position=0\n )\n current_hold, ignore = pool.on_hold_to(self._patron(), position=3)\n # This hold has an end date in the past, but its position is greater than 0\n # so the end date is not reliable.\n bad_end_date, ignore = pool.on_hold_to(\n self._patron(), end=yesterday, position=4\n )\n\n progress = reaper.run_once(reaper.timestamp().to_data())\n\n # The expired holds have been deleted and the other holds have been updated.\n assert 2 == db.query(Hold).count()\n assert [current_hold, bad_end_date] == db.query(Hold).order_by(Hold.start).all()\n assert 0 == current_hold.position\n assert 0 == bad_end_date.position\n assert current_hold.end > now\n assert bad_end_date.end > now\n assert 1 == pool.licenses_available\n assert 2 == pool.licenses_reserved\n\n # The TimestampData returned reflects what work was done.\n assert \"Holds deleted: 3. License pools updated: 1\" == progress.achievements\n\n # The TimestampData does not include any timing information --\n # that will be applied by run().\n assert None == progress.start\n assert None == progress.finish\n\n\nclass TestSharedODLAPI(DatabaseTest, BaseODLTest):\n def setup_method(self):\n super(TestSharedODLAPI, self).setup_method()\n self.collection = MockSharedODLAPI.mock_collection(self._db)\n self.collection.external_integration.set_setting(\n Collection.DATA_SOURCE_NAME_SETTING, \"Feedbooks\"\n )\n self.api = MockSharedODLAPI(self._db, self.collection)\n self.pool = self._licensepool(None, collection=self.collection)\n self.pool.identifier.add_link(\n Hyperlink.BORROW, self._str, self.collection.data_source\n )\n self.patron = self._patron()\n\n def test_get(self):\n # Create a SharedODLAPI to test the _get method. The other tests use a\n # mock API class that overrides _get.\n api = SharedODLAPI(self._db, self.collection)\n\n # The library has not registered with the remote collection yet.\n def do_get(url, headers=None, allowed_response_codes=None):\n raise Exception(\"do_get should not be called\")\n\n pytest.raises(\n LibraryAuthorizationFailedException,\n api._get,\n \"test url\",\n patron=self.patron,\n do_get=do_get,\n )\n\n # Once the library registers, it gets a shared secret that is included\n # in request headers.\n ConfigurationSetting.for_library_and_externalintegration(\n self._db,\n ExternalIntegration.PASSWORD,\n self.patron.library,\n self.collection.external_integration,\n ).value = \"secret\"\n\n def do_get(url, headers=None, allowed_response_codes=None):\n assert \"test url\" == url\n assert \"test header value\" == headers.get(\"test_key\")\n assert \"Bearer \" + base64.b64encode(\"secret\") == headers.get(\n \"Authorization\"\n )\n assert [\"200\"] == allowed_response_codes\n\n api._get(\n \"test url\",\n headers=dict(test_key=\"test header value\"),\n patron=self.patron,\n allowed_response_codes=[\"200\"],\n do_get=do_get,\n )\n\n def test_checkout_success(self):\n response = self.get_data(\"shared_collection_borrow_success.opds\")\n self.api.queue_response(200, content=response)\n\n loan = self.api.checkout(\n self.patron, \"pin\", self.pool, Representation.EPUB_MEDIA_TYPE\n )\n assert self.collection == loan.collection(self._db)\n assert self.pool.data_source.name == loan.data_source_name\n assert self.pool.identifier.type == loan.identifier_type\n assert self.pool.identifier.identifier == loan.identifier\n assert datetime_utc(2018, 3, 8, 17, 41, 31) == loan.start_date\n assert datetime_utc(2018, 3, 29, 17, 41, 30) == loan.end_date\n assert (\n \"http://localhost:6500/AL/collections/DPLA%20Exchange/loans/31\"\n == loan.external_identifier\n )\n\n assert [self.pool.identifier.links[0].resource.url] == self.api.requests\n\n def test_checkout_from_hold(self):\n hold, ignore = self.pool.on_hold_to(self.patron, external_identifier=self._str)\n hold_info_response = self.get_data(\"shared_collection_hold_info_ready.opds\")\n self.api.queue_response(200, content=hold_info_response)\n borrow_response = self.get_data(\"shared_collection_borrow_success.opds\")\n self.api.queue_response(200, content=borrow_response)\n\n loan = self.api.checkout(\n self.patron, \"pin\", self.pool, Representation.EPUB_MEDIA_TYPE\n )\n assert self.collection == loan.collection(self._db)\n assert self.pool.data_source.name == loan.data_source_name\n assert self.pool.identifier.type == loan.identifier_type\n assert self.pool.identifier.identifier == loan.identifier\n assert datetime_utc(2018, 3, 8, 17, 41, 31) == loan.start_date\n assert datetime_utc(2018, 3, 29, 17, 41, 30) == loan.end_date\n assert (\n \"http://localhost:6500/AL/collections/DPLA%20Exchange/loans/31\"\n == loan.external_identifier\n )\n\n assert [\n hold.external_identifier,\n \"http://localhost:6500/AL/collections/DPLA%20Exchange/holds/17/borrow\",\n ] == self.api.requests\n\n def test_checkout_already_checked_out(self):\n loan, ignore = self.pool.loan_to(self.patron)\n pytest.raises(\n AlreadyCheckedOut,\n self.api.checkout,\n self.patron,\n \"pin\",\n self.pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n assert [] == self.api.requests\n\n def test_checkout_no_available_copies(self):\n self.api.queue_response(403)\n pytest.raises(\n NoAvailableCopies,\n self.api.checkout,\n self.patron,\n \"pin\",\n self.pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n assert [self.pool.identifier.links[0].resource.url] == self.api.requests\n\n def test_checkout_no_licenses(self):\n self.api.queue_response(\n NO_LICENSES.response[1],\n headers=NO_LICENSES.response[2],\n content=NO_LICENSES.response[0],\n )\n pytest.raises(\n NoLicenses,\n self.api.checkout,\n self.patron,\n \"pin\",\n self.pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n assert [self.pool.identifier.links[0].resource.url] == self.api.requests\n\n def test_checkout_from_hold_not_available(self):\n hold, ignore = self.pool.on_hold_to(self.patron)\n hold_info_response = self.get_data(\"shared_collection_hold_info_reserved.opds\")\n self.api.queue_response(200, content=hold_info_response)\n pytest.raises(\n NoAvailableCopies,\n self.api.checkout,\n self.patron,\n \"pin\",\n self.pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n assert [hold.external_identifier] == self.api.requests\n\n def test_checkout_cannot_loan(self):\n self.api.queue_response(500)\n pytest.raises(\n CannotLoan,\n self.api.checkout,\n self.patron,\n \"pin\",\n self.pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n assert [self.pool.identifier.links[0].resource.url] == self.api.requests\n\n # This pool has no borrow link.\n pool = self._licensepool(None, collection=self.collection)\n pytest.raises(\n CannotLoan,\n self.api.checkout,\n self.patron,\n \"pin\",\n pool,\n Representation.EPUB_MEDIA_TYPE,\n )\n\n def test_checkin_success(self):\n loan, ignore = self.pool.loan_to(self.patron, external_identifier=self._str)\n loan_info_response = self.get_data(\"shared_collection_loan_info.opds\")\n self.api.queue_response(200, content=loan_info_response)\n self.api.queue_response(200, content=\"Deleted\")\n response = self.api.checkin(self.patron, \"pin\", self.pool)\n assert True == response\n assert [\n loan.external_identifier,\n \"http://localhost:6500/AL/collections/DPLA%20Exchange/loans/33/revoke\",\n ] == self.api.requests\n\n def test_checkin_not_checked_out(self):\n pytest.raises(NotCheckedOut, self.api.checkin, self.patron, \"pin\", self.pool)\n assert [] == self.api.requests\n\n loan, ignore = self.pool.loan_to(self.patron, external_identifier=self._str)\n self.api.queue_response(404)\n pytest.raises(NotCheckedOut, self.api.checkin, self.patron, \"pin\", self.pool)\n assert [loan.external_identifier] == self.api.requests\n\n def test_checkin_cannot_return(self):\n loan, ignore = self.pool.loan_to(self.patron, external_identifier=self._str)\n self.api.queue_response(500)\n pytest.raises(CannotReturn, self.api.checkin, self.patron, \"pin\", self.pool)\n assert [loan.external_identifier] == self.api.requests\n\n loan_info_response = self.get_data(\"shared_collection_loan_info.opds\")\n self.api.queue_response(200, content=loan_info_response)\n self.api.queue_response(500)\n pytest.raises(CannotReturn, self.api.checkin, self.patron, \"pin\", self.pool)\n assert [\n loan.external_identifier,\n \"http://localhost:6500/AL/collections/DPLA%20Exchange/loans/33/revoke\",\n ] == self.api.requests[1:]\n\n def test_fulfill_success(self):\n loan, ignore = self.pool.loan_to(self.patron, external_identifier=self._str)\n loan_info_response = self.get_data(\"shared_collection_loan_info.opds\")\n self.api.queue_response(200, content=loan_info_response)\n self.api.queue_response(200, content=\"An ACSM file\")\n fulfillment = self.api.fulfill(\n self.patron, \"pin\", self.pool, self.pool.delivery_mechanisms[0]\n )\n assert self.collection == fulfillment.collection(self._db)\n assert self.pool.data_source.name == fulfillment.data_source_name\n assert self.pool.identifier.type == fulfillment.identifier_type\n assert self.pool.identifier.identifier == fulfillment.identifier\n assert None == fulfillment.content_link\n assert b\"An ACSM file\" == fulfillment.content\n assert datetime_utc(2018, 3, 29, 17, 44, 11) == fulfillment.content_expires\n\n assert [\n loan.external_identifier,\n \"http://localhost:6500/AL/collections/DPLA%20Exchange/loans/33/fulfill/2\",\n ] == self.api.requests\n\n def test_fulfill_not_checked_out(self):\n pytest.raises(\n NotCheckedOut,\n self.api.fulfill,\n self.patron,\n \"pin\",\n self.pool,\n self.pool.delivery_mechanisms[0],\n )\n assert [] == self.api.requests\n\n loan, ignore = self.pool.loan_to(self.patron, external_identifier=self._str)\n self.api.queue_response(404)\n pytest.raises(\n NotCheckedOut,\n self.api.fulfill,\n self.patron,\n \"pin\",\n self.pool,\n self.pool.delivery_mechanisms[0],\n )\n assert [loan.external_identifier] == self.api.requests\n\n def test_fulfill_cannot_fulfill(self):\n loan, ignore = self.pool.loan_to(self.patron, external_identifier=self._str)\n self.api.queue_response(500)\n pytest.raises(\n CannotFulfill,\n self.api.fulfill,\n self.patron,\n \"pin\",\n self.pool,\n self.pool.delivery_mechanisms[0],\n )\n assert [loan.external_identifier] == self.api.requests\n\n self.api.queue_response(200, content=\"not opds\")\n pytest.raises(\n CannotFulfill,\n self.api.fulfill,\n self.patron,\n \"pin\",\n self.pool,\n self.pool.delivery_mechanisms[0],\n )\n assert [loan.external_identifier] == self.api.requests[1:]\n\n loan_info_response = self.get_data(\"shared_collection_loan_info.opds\")\n self.api.queue_response(200, content=loan_info_response)\n self.api.queue_response(500)\n pytest.raises(\n CannotFulfill,\n self.api.fulfill,\n self.patron,\n \"pin\",\n self.pool,\n self.pool.delivery_mechanisms[0],\n )\n assert [\n loan.external_identifier,\n \"http://localhost:6500/AL/collections/DPLA%20Exchange/loans/33/fulfill/2\",\n ] == self.api.requests[2:]\n\n def test_fulfill_format_not_available(self):\n loan, ignore = self.pool.loan_to(self.patron)\n loan_info_response = self.get_data(\"shared_collection_loan_info_no_epub.opds\")\n self.api.queue_response(200, content=loan_info_response)\n pytest.raises(\n FormatNotAvailable,\n self.api.fulfill,\n self.patron,\n \"pin\",\n self.pool,\n self.pool.delivery_mechanisms[0],\n )\n assert [loan.external_identifier] == self.api.requests\n\n def test_place_hold_success(self):\n hold_response = self.get_data(\"shared_collection_hold_info_reserved.opds\")\n self.api.queue_response(200, content=hold_response)\n hold = self.api.place_hold(\n self.patron, \"pin\", self.pool, \"<EMAIL>\"\n )\n assert self.collection == hold.collection(self._db)\n assert self.pool.data_source.name == hold.data_source_name\n assert self.pool.identifier.type == hold.identifier_type\n assert self.pool.identifier.identifier == hold.identifier\n assert datetime_utc(2018, 3, 8, 18, 50, 18) == hold.start_date\n assert datetime_utc(2018, 3, 29, 17, 44, 1) == hold.end_date\n assert 1 == hold.hold_position\n assert (\n \"http://localhost:6500/AL/collections/DPLA%20Exchange/holds/18\"\n == hold.external_identifier\n )\n\n assert [self.pool.identifier.links[0].resource.url] == self.api.requests\n\n def test_place_hold_already_checked_out(self):\n loan, ignore = self.pool.loan_to(self.patron)\n pytest.raises(\n AlreadyCheckedOut,\n self.api.place_hold,\n self.patron,\n \"pin\",\n self.pool,\n \"<EMAIL>\",\n )\n assert [] == self.api.requests\n\n def test_release_hold_success(self):\n hold, ignore = self.pool.on_hold_to(self.patron, external_identifier=self._str)\n hold_response = self.get_data(\"shared_collection_hold_info_reserved.opds\")\n self.api.queue_response(200, content=hold_response)\n self.api.queue_response(200, content=\"Deleted\")\n response = self.api.release_hold(self.patron, \"pin\", self.pool)\n assert True == response\n assert [\n hold.external_identifier,\n \"http://localhost:6500/AL/collections/DPLA%20Exchange/holds/18/revoke\",\n ] == self.api.requests\n\n def test_release_hold_not_on_hold(self):\n pytest.raises(NotOnHold, self.api.release_hold, self.patron, \"pin\", self.pool)\n assert [] == self.api.requests\n\n hold, ignore = self.pool.on_hold_to(self.patron, external_identifier=self._str)\n self.api.queue_response(404)\n pytest.raises(NotOnHold, self.api.release_hold, self.patron, \"pin\", self.pool)\n assert [hold.external_identifier] == self.api.requests\n\n def test_release_hold_cannot_release_hold(self):\n hold, ignore = self.pool.on_hold_to(self.patron, external_identifier=self._str)\n self.api.queue_response(500)\n pytest.raises(\n CannotReleaseHold, self.api.release_hold, self.patron, \"pin\", self.pool\n )\n assert [hold.external_identifier] == self.api.requests\n\n hold_response = self.get_data(\"shared_collection_hold_info_reserved.opds\")\n self.api.queue_response(200, content=hold_response)\n self.api.queue_response(500)\n pytest.raises(\n CannotReleaseHold, self.api.release_hold, self.patron, \"pin\", self.pool\n )\n assert [\n hold.external_identifier,\n \"http://localhost:6500/AL/collections/DPLA%20Exchange/holds/18/revoke\",\n ] == self.api.requests[1:]\n\n def test_patron_activity_success(self):\n # The patron has one loan, and the remote circ manager returns it.\n loan, ignore = self.pool.loan_to(self.patron, external_identifier=self._str)\n loan_response = self.get_data(\"shared_collection_loan_info.opds\")\n self.api.queue_response(200, content=loan_response)\n activity = self.api.patron_activity(self.patron, \"pin\")\n assert 1 == len(activity)\n [loan_info] = activity\n assert self.collection == loan_info.collection(self._db)\n assert self.pool.data_source.name == loan_info.data_source_name\n assert self.pool.identifier.type == loan_info.identifier_type\n assert self.pool.identifier.identifier == loan_info.identifier\n assert datetime_utc(2018, 3, 8, 17, 44, 12) == loan_info.start_date\n assert datetime_utc(2018, 3, 29, 17, 44, 11) == loan_info.end_date\n assert [loan.external_identifier] == self.api.requests\n\n # The _get method was passed a patron - this is necessary because\n # the patron_activity method may be called from a thread without\n # access to the flask request.\n assert self.patron == self.api.request_args[0][0]\n\n # The patron's loan has been deleted on the remote.\n self.api.queue_response(404, content=\"No loan here\")\n activity = self.api.patron_activity(self.patron, \"pin\")\n assert 0 == len(activity)\n assert [loan.external_identifier] == self.api.requests[1:]\n\n # Now the patron has a hold instead.\n self._db.delete(loan)\n hold, ignore = self.pool.on_hold_to(self.patron, external_identifier=self._str)\n hold_response = self.get_data(\"shared_collection_hold_info_reserved.opds\")\n self.api.queue_response(200, content=hold_response)\n activity = self.api.patron_activity(self.patron, \"pin\")\n assert 1 == len(activity)\n [hold_info] = activity\n assert self.collection == hold_info.collection(self._db)\n assert self.pool.data_source.name == hold_info.data_source_name\n assert self.pool.identifier.type == hold_info.identifier_type\n assert self.pool.identifier.identifier == hold_info.identifier\n assert datetime_utc(2018, 3, 8, 18, 50, 18) == hold_info.start_date\n assert datetime_utc(2018, 3, 29, 17, 44, 1) == hold_info.end_date\n assert [hold.external_identifier] == self.api.requests[2:]\n\n # The patron's hold has been deleted on the remote.\n self.api.queue_response(404, content=\"No hold here\")\n activity = self.api.patron_activity(self.patron, \"pin\")\n assert 0 == len(activity)\n assert [hold.external_identifier] == self.api.requests[3:]\n\n def test_patron_activity_remote_integration_exception(self):\n loan, ignore = self.pool.loan_to(self.patron, external_identifier=self._str)\n self.api.queue_response(500)\n pytest.raises(\n RemoteIntegrationException, self.api.patron_activity, self.patron, \"pin\"\n )\n assert [loan.external_identifier] == self.api.requests\n self._db.delete(loan)\n\n hold, ignore = self.pool.on_hold_to(self.patron, external_identifier=self._str)\n self.api.queue_response(500)\n pytest.raises(\n RemoteIntegrationException, self.api.patron_activity, self.patron, \"pin\"\n )\n assert [hold.external_identifier] == self.api.requests[1:]\n\n\nclass TestSharedODLImporter(DatabaseTest, BaseODLTest):\n def test_get_fulfill_url(self):\n entry = self.get_data(\"shared_collection_loan_info.opds\")\n assert (\n \"http://localhost:6500/AL/collections/DPLA%20Exchange/loans/33/fulfill/2\"\n == SharedODLImporter.get_fulfill_url(\n entry, \"application/epub+zip\", \"application/vnd.adobe.adept+xml\"\n )\n )\n assert None == SharedODLImporter.get_fulfill_url(\n entry, \"application/pdf\", \"application/vnd.adobe.adept+xml\"\n )\n assert None == SharedODLImporter.get_fulfill_url(\n entry, \"application/epub+zip\", None\n )\n\n def test_import(self):\n feed = self.get_data(\"shared_collection_feed.opds\")\n data_source = DataSource.lookup(self._db, \"DPLA Exchange\", autocreate=True)\n collection = MockSharedODLAPI.mock_collection(self._db)\n collection.external_integration.set_setting(\n Collection.DATA_SOURCE_NAME_SETTING, data_source.name\n )\n\n class MockMetadataClient(object):\n def canonicalize_author_name(self, identifier, working_display_name):\n return working_display_name\n\n metadata_client = MockMetadataClient()\n importer = SharedODLImporter(\n self._db,\n collection=collection,\n metadata_client=metadata_client,\n )\n\n (\n imported_editions,\n imported_pools,\n imported_works,\n failures,\n ) = importer.import_from_feed(feed)\n\n # This importer works the same as the base OPDSImporter, except that\n # it extracts license pool information from acquisition links.\n\n # The importer created 3 editions, pools, and works.\n assert 3 == len(imported_editions)\n assert 3 == len(imported_pools)\n assert 3 == len(imported_works)\n\n [six_months, essex, gatsby] = sorted(imported_editions, key=lambda x: x.title)\n assert \"Six Months, Three Days, Five Others\" == six_months.title\n assert \"The Essex Serpent\" == essex.title\n assert \"The Great Gatsby\" == gatsby.title\n\n # This book is open access.\n [gatsby_pool] = [\n p for p in imported_pools if p.identifier == gatsby.primary_identifier\n ]\n assert True == gatsby_pool.open_access\n # This pool has two delivery mechanisms, from a borrow link and an open-access link.\n # Both are DRM-free epubs.\n lpdms = gatsby_pool.delivery_mechanisms\n assert 2 == len(lpdms)\n for lpdm in lpdms:\n assert (\n Representation.EPUB_MEDIA_TYPE == lpdm.delivery_mechanism.content_type\n )\n assert DeliveryMechanism.NO_DRM == lpdm.delivery_mechanism.drm_scheme\n\n # This book is already checked out and has a hold.\n [six_months_pool] = [\n p for p in imported_pools if p.identifier == six_months.primary_identifier\n ]\n assert False == six_months_pool.open_access\n assert 1 == six_months_pool.licenses_owned\n assert 0 == six_months_pool.licenses_available\n assert 1 == six_months_pool.patrons_in_hold_queue\n [lpdm] = six_months_pool.delivery_mechanisms\n assert Representation.EPUB_MEDIA_TYPE == lpdm.delivery_mechanism.content_type\n assert DeliveryMechanism.ADOBE_DRM == lpdm.delivery_mechanism.drm_scheme\n assert RightsStatus.IN_COPYRIGHT == lpdm.rights_status.uri\n [borrow_link] = [\n l for l in six_months_pool.identifier.links if l.rel == Hyperlink.BORROW\n ]\n assert (\n \"http://localhost:6500/AL/works/URI/http://www.feedbooks.com/item/2493650/borrow\"\n == borrow_link.resource.url\n )\n\n # This book is currently available.\n [essex_pool] = [\n p for p in imported_pools if p.identifier == essex.primary_identifier\n ]\n assert False == essex_pool.open_access\n assert 4 == essex_pool.licenses_owned\n assert 4 == essex_pool.licenses_available\n assert 0 == essex_pool.patrons_in_hold_queue\n [lpdm] = essex_pool.delivery_mechanisms\n assert Representation.EPUB_MEDIA_TYPE == lpdm.delivery_mechanism.content_type\n assert DeliveryMechanism.ADOBE_DRM == lpdm.delivery_mechanism.drm_scheme\n assert RightsStatus.IN_COPYRIGHT == lpdm.rights_status.uri\n [borrow_link] = [\n l for l in essex_pool.identifier.links if l.rel == Hyperlink.BORROW\n ]\n assert (\n \"http://localhost:6500/AL/works/URI/http://www.feedbooks.com/item/1946289/borrow\"\n == borrow_link.resource.url\n )\n", "id": "1454481", "language": "Python", "matching_score": 10.0615234375, "max_stars_count": 0, "path": "tests/api/test_odl.py" }, { "content": "import binascii\nimport datetime\nimport json\nimport logging\nimport uuid\nfrom io import StringIO\nfrom typing import Callable, Dict, List, Optional, Tuple, Union\n\nimport dateutil\nimport feedparser\nimport flask\nimport sqlalchemy\nfrom flask import url_for\nfrom flask_babel import lazy_gettext as _\nfrom lxml import etree\nfrom sqlalchemy.sql.expression import or_\nfrom uritemplate import URITemplate\n\nfrom core import util\nfrom core.analytics import Analytics\nfrom core.lcp.credential import LCPCredentialFactory\nfrom core.metadata_layer import FormatData, LicenseData, TimestampData\nfrom core.model import (\n Collection,\n ConfigurationSetting,\n DataSource,\n DeliveryMechanism,\n Edition,\n ExternalIntegration,\n Hold,\n Hyperlink,\n LicensePool,\n LicensePoolDeliveryMechanism,\n Loan,\n MediaTypes,\n Representation,\n RightsStatus,\n Session,\n get_one,\n get_one_or_create,\n)\nfrom core.model.configuration import (\n ConfigurationAttributeType,\n ConfigurationFactory,\n ConfigurationGrouping,\n ConfigurationMetadata,\n ConfigurationOption,\n ConfigurationStorage,\n HasExternalIntegration,\n)\nfrom core.model.licensing import LicenseStatus\nfrom core.monitor import CollectionMonitor\nfrom core.opds_import import OPDSImporter, OPDSImportMonitor, OPDSXMLParser\nfrom core.testing import DatabaseTest, MockRequestsResponse\nfrom core.util.datetime_helpers import to_utc, utc_now\nfrom core.util.http import HTTP, BadResponseException, RemoteIntegrationException\nfrom core.util.string_helpers import base64\n\nfrom .circulation import BaseCirculationAPI, FulfillmentInfo, HoldInfo, LoanInfo\nfrom .circulation_exceptions import *\nfrom .lcp.hash import Hasher, HasherFactory, HashingAlgorithm\nfrom .shared_collection import BaseSharedCollectionAPI\n\n\nclass ODLAPIConfiguration(ConfigurationGrouping):\n \"\"\"Contains LCP License Server's settings\"\"\"\n\n DEFAULT_PASSPHRASE_HINT = \"View the help page for more information.\"\n DEFAULT_PASSPHRASE_HINT_URL = \"https://lyrasis.zendesk.com/\"\n DEFAULT_ENCRYPTION_ALGORITHM = HashingAlgorithm.SHA256.value\n\n feed_url = ConfigurationMetadata(\n key=Collection.EXTERNAL_ACCOUNT_ID_KEY,\n label=_(\"ODL feed URL\"),\n description=\"\",\n type=ConfigurationAttributeType.TEXT,\n required=True,\n format=\"url\",\n )\n\n username = ConfigurationMetadata(\n key=ExternalIntegration.USERNAME,\n label=_(\"Library's API username\"),\n description=\"\",\n type=ConfigurationAttributeType.TEXT,\n required=True,\n )\n\n password = ConfigurationMetadata(\n key=ExternalIntegration.PASSWORD,\n label=_(\"Library's API password\"),\n description=\"\",\n type=ConfigurationAttributeType.TEXT,\n required=True,\n )\n\n datasource_name = ConfigurationMetadata(\n key=Collection.DATA_SOURCE_NAME_SETTING,\n label=_(\"Data source name\"),\n description=\"\",\n type=ConfigurationAttributeType.TEXT,\n required=True,\n )\n\n default_reservation_period = ConfigurationMetadata(\n key=Collection.DEFAULT_RESERVATION_PERIOD_KEY,\n label=_(\"Default Reservation Period (in Days)\"),\n description=_(\n \"The number of days a patron has to check out a book after a hold becomes available.\"\n ),\n type=ConfigurationAttributeType.NUMBER,\n required=False,\n default=Collection.STANDARD_DEFAULT_RESERVATION_PERIOD,\n )\n\n passphrase_hint = ConfigurationMetadata(\n key=\"passphrase_hint\",\n label=_(\"Passphrase hint\"),\n description=_(\n \"Hint displayed to the user when opening an LCP protected publication.\"\n ),\n type=ConfigurationAttributeType.TEXT,\n required=True,\n default=DEFAULT_PASSPHRASE_HINT,\n )\n\n passphrase_hint_url = ConfigurationMetadata(\n key=\"passphrase_hint_url\",\n label=_(\"Passphrase hint URL\"),\n description=_(\n \"Hint URL available to the user when opening an LCP protected publication.\"\n ),\n type=ConfigurationAttributeType.TEXT,\n required=True,\n default=DEFAULT_PASSPHRASE_HINT_URL,\n format=\"url\",\n )\n\n encryption_algorithm = ConfigurationMetadata(\n key=\"encryption_algorithm\",\n label=_(\"Passphrase encryption algorithm\"),\n description=_(\"Algorithm used for encrypting the passphrase.\"),\n type=ConfigurationAttributeType.SELECT,\n required=False,\n default=DEFAULT_ENCRYPTION_ALGORITHM,\n options=ConfigurationOption.from_enum(HashingAlgorithm),\n )\n\n\nclass ODLAPI(BaseCirculationAPI, BaseSharedCollectionAPI, HasExternalIntegration):\n \"\"\"ODL (Open Distribution to Libraries) is a specification that allows\n libraries to manage their own loans and holds. It offers a deeper level\n of control to the library, but it requires the circulation manager to\n keep track of individual copies rather than just license pools, and\n manage its own holds queues.\n\n In addition to circulating books to patrons of a library on the current circulation\n manager, this API can be used to circulate books to patrons of external libraries.\n Only one circulation manager per ODL collection should use an ODLAPI\n - the others should use a SharedODLAPI and configure it to connect to the main\n circulation manager.\n \"\"\"\n\n NAME = ExternalIntegration.ODL\n DESCRIPTION = _(\n \"Import books from a distributor that uses ODL (Open Distribution to Libraries).\"\n )\n\n SETTINGS = BaseSharedCollectionAPI.SETTINGS + ODLAPIConfiguration.to_settings()\n\n LIBRARY_SETTINGS = BaseCirculationAPI.LIBRARY_SETTINGS + [\n BaseCirculationAPI.EBOOK_LOAN_DURATION_SETTING\n ]\n\n SET_DELIVERY_MECHANISM_AT = BaseCirculationAPI.FULFILL_STEP\n\n # Possible status values in the License Status Document:\n\n # The license is available but the user hasn't fulfilled it yet.\n READY_STATUS = \"ready\"\n\n # The license is available and has been fulfilled on at least one device.\n ACTIVE_STATUS = \"active\"\n\n # The license has been revoked by the distributor.\n REVOKED_STATUS = \"revoked\"\n\n # The license has been returned early by the user.\n RETURNED_STATUS = \"returned\"\n\n # The license was returned early and was never fulfilled.\n CANCELLED_STATUS = \"cancelled\"\n\n # The license has expired.\n EXPIRED_STATUS = \"expired\"\n\n STATUS_VALUES = [\n READY_STATUS,\n ACTIVE_STATUS,\n REVOKED_STATUS,\n RETURNED_STATUS,\n CANCELLED_STATUS,\n EXPIRED_STATUS,\n ]\n\n def __init__(self, _db, collection):\n if collection.protocol != self.NAME:\n raise ValueError(\n \"Collection protocol is %s, but passed into ODLAPI!\"\n % collection.protocol\n )\n self.collection_id = collection.id\n self.data_source_name = collection.external_integration.setting(\n Collection.DATA_SOURCE_NAME_SETTING\n ).value\n # Create the data source if it doesn't exist yet.\n DataSource.lookup(_db, self.data_source_name, autocreate=True)\n\n self.username = collection.external_integration.username\n self.password = <PASSWORD>.external_integration.password\n self.analytics = Analytics(_db)\n\n self._configuration_storage = ConfigurationStorage(self)\n self._configuration_factory = ConfigurationFactory()\n self._hasher_factory = HasherFactory()\n self._credential_factory = LCPCredentialFactory()\n self._hasher_instance: Optional[Hasher] = None\n\n def external_integration(\n self, db: sqlalchemy.orm.session.Session\n ) -> ExternalIntegration:\n \"\"\"Return an external integration associated with this object.\n\n :param db: Database session\n :return: External integration associated with this object\n \"\"\"\n return self.collection(db).external_integration\n\n def internal_format(self, delivery_mechanism):\n \"\"\"Each consolidated copy is only available in one format, so we don't need\n a mapping to internal formats.\n \"\"\"\n return delivery_mechanism\n\n def collection(self, db) -> Collection:\n \"\"\"Return a collection associated with this object.\n\n :param db: Database session\n :return: Collection associated with this object\n \"\"\"\n return get_one(db, Collection, id=self.collection_id)\n\n def _get_hasher(self, configuration):\n \"\"\"Returns a Hasher instance\n\n :param configuration: Configuration object\n :type configuration: LCPServerConfiguration\n\n :return: Hasher instance\n :rtype: hash.Hasher\n \"\"\"\n if self._hasher_instance is None:\n self._hasher_instance = self._hasher_factory.create(\n configuration.encryption_algorithm\n if configuration.encryption_algorithm\n else ODLAPIConfiguration.DEFAULT_ENCRYPTION_ALGORITHM\n )\n\n return self._hasher_instance\n\n def _get(self, url, headers=None):\n \"\"\"Make a normal HTTP request, but include an authentication\n header with the credentials for the collection.\n \"\"\"\n\n username = self.username\n password = <PASSWORD>\n headers = dict(headers or {})\n auth_header = \"Basic %s\" % base64.b64encode(\"%s:%s\" % (username, password))\n headers[\"Authorization\"] = auth_header\n\n return HTTP.get_with_timeout(url, headers=headers)\n\n def _url_for(self, *args, **kwargs):\n \"\"\"Wrapper around flask's url_for to be overridden for tests.\"\"\"\n return url_for(*args, **kwargs)\n\n def get_license_status_document(self, loan):\n \"\"\"Get the License Status Document for a loan.\n\n For a new loan, create a local loan with no external identifier and\n pass it in to this method.\n\n This will create the remote loan if one doesn't exist yet. The loan's\n internal database id will be used to receive notifications from the\n distributor when the loan's status changes.\n \"\"\"\n _db = Session.object_session(loan)\n\n if loan.external_identifier:\n url = loan.external_identifier\n else:\n id = loan.license.identifier\n checkout_id = str(uuid.uuid1())\n if loan.patron:\n default_loan_period = self.collection(_db).default_loan_period(\n loan.patron.library\n )\n else:\n # TODO: should integration clients be able to specify their own loan period?\n default_loan_period = self.collection(_db).default_loan_period(\n loan.integration_client\n )\n expires = utc_now() + datetime.timedelta(days=default_loan_period)\n # The patron UUID is generated randomly on each loan, so the distributor\n # doesn't know when multiple loans come from the same patron.\n patron_id = str(uuid.uuid1())\n\n if loan.patron:\n library_short_name = loan.patron.library.short_name\n else:\n # If this is for an integration client, choose an arbitrary library.\n library_short_name = self.collection(_db).libraries[0].short_name\n\n db = Session.object_session(loan)\n patron = loan.patron\n\n with self._configuration_factory.create(\n self._configuration_storage, db, ODLAPIConfiguration\n ) as configuration:\n hasher = self._get_hasher(configuration)\n hashed_passphrase = <PASSWORD>(\n self._credential_factory.get_patron_passphrase(db, patron)\n )\n encoded_passphrase = base64.b64encode(\n binascii.unhexlify(hashed_passphrase)\n )\n\n self._credential_factory.set_hashed_passphrase(\n db, patron, hashed_passphrase\n )\n\n notification_url = self._url_for(\n \"odl_notify\",\n library_short_name=library_short_name,\n loan_id=loan.id,\n _external=True,\n )\n\n url_template = URITemplate(loan.license.checkout_url)\n url = url_template.expand(\n id=id,\n checkout_id=checkout_id,\n patron_id=patron_id,\n expires=expires.isoformat(),\n notification_url=notification_url,\n passphrase=<PASSWORD>,\n hint=configuration.passphrase_hint,\n hint_url=configuration.passphrase_hint_url,\n )\n\n response = self._get(url)\n\n try:\n status_doc = json.loads(response.content)\n except ValueError as e:\n raise BadResponseException(\n url, \"License Status Document was not valid JSON.\"\n )\n if status_doc.get(\"status\") not in self.STATUS_VALUES:\n raise BadResponseException(\n url, \"License Status Document had an unknown status value.\"\n )\n return status_doc\n\n def checkin(self, patron, pin, licensepool):\n \"\"\"Return a loan early.\"\"\"\n _db = Session.object_session(patron)\n\n loan = (\n _db.query(Loan)\n .filter(Loan.patron == patron)\n .filter(Loan.license_pool_id == licensepool.id)\n )\n if loan.count() < 1:\n raise NotCheckedOut()\n loan = loan.one()\n return self._checkin(loan)\n\n def _checkin(self, loan):\n _db = Session.object_session(loan)\n doc = self.get_license_status_document(loan)\n status = doc.get(\"status\")\n if status in [\n self.REVOKED_STATUS,\n self.RETURNED_STATUS,\n self.CANCELLED_STATUS,\n self.EXPIRED_STATUS,\n ]:\n # This loan was already returned early or revoked by the distributor, or it expired.\n self.update_loan(loan, doc)\n raise NotCheckedOut()\n\n return_url = None\n links = doc.get(\"links\", [])\n for link in links:\n if link.get(\"rel\") == \"return\":\n return_url = link.get(\"href\")\n break\n\n if not return_url:\n # The distributor didn't provide a link to return this loan.\n # This may be because the book has already been fulfilled and\n # must be returned through the DRM system. If that's true, the\n # app will already be doing that on its own, so we'll silently\n # do nothing.\n return\n\n # Hit the distributor's return link.\n self._get(return_url)\n # Get the status document again to make sure the return was successful,\n # and if so update the pool availability and delete the local loan.\n self.update_loan(loan)\n\n # At this point, if the loan still exists, something went wrong.\n # However, it might be because the loan has already been fulfilled\n # and must be returned through the DRM system, which the app will\n # do on its own, so we can ignore the problem.\n loan = get_one(_db, Loan, id=loan.id)\n if loan:\n return\n return True\n\n def checkout(self, patron, pin, licensepool, internal_format):\n \"\"\"Create a new loan.\"\"\"\n _db = Session.object_session(patron)\n\n loan = (\n _db.query(Loan)\n .filter(Loan.patron == patron)\n .filter(Loan.license_pool_id == licensepool.id)\n )\n if loan.count() > 0:\n raise AlreadyCheckedOut()\n\n hold = get_one(_db, Hold, patron=patron, license_pool_id=licensepool.id)\n loan = self._checkout(patron, licensepool, hold)\n return LoanInfo(\n licensepool.collection,\n licensepool.data_source.name,\n licensepool.identifier.type,\n licensepool.identifier.identifier,\n loan.start,\n loan.end,\n external_identifier=loan.external_identifier,\n )\n\n def _checkout(self, patron_or_client, licensepool, hold=None):\n _db = Session.object_session(patron_or_client)\n\n if not any((l for l in licensepool.licenses if not l.is_inactive)):\n raise NoLicenses()\n\n # Make sure pool info is updated.\n self.update_licensepool(licensepool)\n\n if hold:\n self._update_hold_end_date(hold)\n\n # If there's a holds queue, the patron or client must have a non-expired hold\n # with position 0 to check out the book.\n if (\n not hold or hold.position > 0 or (hold.end and hold.end < utc_now())\n ) and licensepool.licenses_available < 1:\n raise NoAvailableCopies()\n\n # Create a local loan so its database id can be used to\n # receive notifications from the distributor.\n license = licensepool.best_available_license()\n if not license:\n raise NoAvailableCopies()\n loan, ignore = license.loan_to(patron_or_client)\n\n doc = self.get_license_status_document(loan)\n status = doc.get(\"status\")\n\n if status not in [self.READY_STATUS, self.ACTIVE_STATUS]:\n # Something went wrong with this loan and we don't actually\n # have the book checked out. This should never happen.\n # Remove the loan we created.\n _db.delete(loan)\n raise CannotLoan()\n\n links = doc.get(\"links\", [])\n external_identifier = None\n for link in links:\n if link.get(\"rel\") == \"self\":\n external_identifier = link.get(\"href\")\n break\n if not external_identifier:\n _db.delete(loan)\n raise CannotLoan()\n\n start = utc_now()\n expires = doc.get(\"potential_rights\", {}).get(\"end\")\n if expires:\n expires = dateutil.parser.parse(expires)\n\n # We need to set the start and end dates on our local loan since\n # the code that calls this only sets them when a new loan is created.\n loan.start = start\n loan.end = expires\n loan.external_identifier = external_identifier\n\n # We also need to update the remaining checkouts for the license.\n loan.license.checkout()\n\n # We have successfully borrowed this book.\n if hold:\n _db.delete(hold)\n self.update_licensepool(licensepool)\n return loan\n\n def fulfill(self, patron, pin, licensepool, internal_format, **kwargs):\n \"\"\"Get the actual resource file to the patron.\n\n :param kwargs: A container for arguments to fulfill()\n which are not relevant to this vendor.\n\n :return: a FulfillmentInfo object.\n \"\"\"\n _db = Session.object_session(patron)\n\n loan = (\n _db.query(Loan)\n .filter(Loan.patron == patron)\n .filter(Loan.license_pool_id == licensepool.id)\n )\n loan = loan.one()\n return self._fulfill(loan, internal_format)\n\n @staticmethod\n def _find_content_link_and_type(\n links: List[Dict],\n drm_scheme: Optional[str],\n ) -> Tuple[Optional[str], Optional[str]]:\n \"\"\"Find a content link with the type information corresponding to the selected delivery mechanism.\n\n :param links: List of dict-like objects containing information about available links in the LCP license file\n :param drm_scheme: Selected delivery mechanism DRM scheme\n\n :return: Two-tuple containing a content link and content type\n \"\"\"\n candidates = []\n for link in links:\n # Depending on the format being served, the crucial information\n # may be in 'manifest' or in 'license'.\n if link.get(\"rel\") not in (\"manifest\", \"license\"):\n continue\n href = link.get(\"href\")\n type = link.get(\"type\")\n candidates.append((href, type))\n\n if len(candidates) == 0:\n # No candidates\n return None, None\n\n if not drm_scheme:\n # If we don't have a requested DRM scheme, so we use the first one.\n # TODO: Can this just be dropped?\n return candidates[0]\n\n # For DeMarque audiobook content, we need to translate the type property\n # to reflect what we have stored in our delivery mechanisms.\n if drm_scheme == DeliveryMechanism.FEEDBOOKS_AUDIOBOOK_DRM:\n drm_scheme = ODLImporter.FEEDBOOKS_AUDIO\n\n return next(filter(lambda x: x[1] == drm_scheme, candidates), (None, None))\n\n def _fulfill(\n self,\n loan: Loan,\n delivery_mechanism: Optional[Union[str, LicensePoolDeliveryMechanism]] = None,\n ) -> FulfillmentInfo:\n licensepool = loan.license_pool\n doc = self.get_license_status_document(loan)\n status = doc.get(\"status\")\n\n if status not in [self.READY_STATUS, self.ACTIVE_STATUS]:\n # This loan isn't available for some reason. It's possible\n # the distributor revoked it or the patron already returned it\n # through the DRM system, and we didn't get a notification\n # from the distributor yet.\n self.update_loan(loan, doc)\n raise CannotFulfill()\n\n expires = doc.get(\"potential_rights\", {}).get(\"end\")\n expires = dateutil.parser.parse(expires)\n\n links = doc.get(\"links\", [])\n if isinstance(delivery_mechanism, LicensePoolDeliveryMechanism):\n delivery_mechanism = delivery_mechanism.delivery_mechanism.drm_scheme\n\n content_link, content_type = self._find_content_link_and_type(\n links, delivery_mechanism\n )\n\n return FulfillmentInfo(\n licensepool.collection,\n licensepool.data_source.name,\n licensepool.identifier.type,\n licensepool.identifier.identifier,\n content_link,\n content_type,\n None,\n expires,\n )\n\n def _count_holds_before(self, hold):\n # Count holds on the license pool that started before this hold and\n # aren't expired.\n _db = Session.object_session(hold)\n return (\n _db.query(Hold)\n .filter(Hold.license_pool_id == hold.license_pool_id)\n .filter(Hold.start < hold.start)\n .filter(\n or_(\n Hold.end == None,\n Hold.end > utc_now(),\n Hold.position > 0,\n )\n )\n .count()\n )\n\n def _update_hold_end_date(self, hold):\n _db = Session.object_session(hold)\n pool = hold.license_pool\n\n # First make sure the hold position is up-to-date, since we'll\n # need it to calculate the end date.\n original_position = hold.position\n self._update_hold_position(hold)\n\n default_loan_period = self.collection(_db).default_loan_period(\n hold.library or hold.integration_client\n )\n default_reservation_period = self.collection(_db).default_reservation_period\n\n # If the hold was already to check out and already has an end date,\n # it doesn't need an update.\n if hold.position == 0 and original_position == 0 and hold.end:\n return\n\n # If the patron is in the queue, we need to estimate when the book\n # will be available for check out. We can do slightly better than the\n # default calculation since we know when all current loans will expire,\n # but we're still calculating the worst case.\n elif hold.position > 0:\n # Find the current loans and reserved holds for the licenses.\n current_loans = (\n _db.query(Loan)\n .filter(Loan.license_pool_id == pool.id)\n .filter(or_(Loan.end == None, Loan.end > utc_now()))\n .order_by(Loan.start)\n .all()\n )\n current_holds = (\n _db.query(Hold)\n .filter(Hold.license_pool_id == pool.id)\n .filter(\n or_(\n Hold.end == None,\n Hold.end > utc_now(),\n Hold.position > 0,\n )\n )\n .order_by(Hold.start)\n .all()\n )\n licenses_reserved = min(\n pool.licenses_owned - len(current_loans), len(current_holds)\n )\n current_reservations = current_holds[:licenses_reserved]\n\n # The licenses will have to go through some number of cycles\n # before one of them gets to this hold. This leavs out the first cycle -\n # it's already started so we'll handle it separately.\n cycles = (hold.position - licenses_reserved - 1) // pool.licenses_owned\n\n # Each of the owned licenses is currently either on loan or reserved.\n # Figure out which license this hold will eventually get if every\n # patron keeps their loans and holds for the maximum time.\n copy_index = (hold.position - licenses_reserved - 1) % pool.licenses_owned\n\n # In the worse case, the first cycle ends when a current loan expires, or\n # after a current reservation is checked out and then expires.\n if len(current_loans) > copy_index:\n next_cycle_start = current_loans[copy_index].end\n else:\n reservation = current_reservations[copy_index - len(current_loans)]\n next_cycle_start = reservation.end + datetime.timedelta(\n days=default_loan_period\n )\n\n # Assume all cycles after the first cycle take the maximum time.\n cycle_period = default_loan_period + default_reservation_period\n hold.end = next_cycle_start + datetime.timedelta(\n days=(cycle_period * cycles)\n )\n\n # If the end date isn't set yet or the position just became 0, the\n # hold just became available. The patron's reservation period starts now.\n else:\n hold.end = utc_now() + datetime.timedelta(days=default_reservation_period)\n\n def _update_hold_position(self, hold):\n _db = Session.object_session(hold)\n pool = hold.license_pool\n loans_count = (\n _db.query(Loan)\n .filter(\n Loan.license_pool_id == pool.id,\n )\n .filter(or_(Loan.end == None, Loan.end > utc_now()))\n .count()\n )\n holds_count = self._count_holds_before(hold)\n\n remaining_licenses = pool.licenses_owned - loans_count\n\n if remaining_licenses > holds_count:\n # The hold is ready to check out.\n hold.position = 0\n\n else:\n # Add 1 since position 0 indicates the hold is ready.\n hold.position = holds_count + 1\n\n def update_licensepool(self, licensepool: LicensePool):\n # Update the pool and the next holds in the queue when a license is reserved.\n licensepool.update_availability_from_licenses(\n analytics=self.analytics,\n as_of=utc_now(),\n )\n holds = licensepool.get_active_holds()\n for hold in holds[: licensepool.licenses_reserved]:\n if hold.position != 0:\n # This hold just got a reserved license.\n self._update_hold_end_date(hold)\n\n def place_hold(self, patron, pin, licensepool, notification_email_address):\n \"\"\"Create a new hold.\"\"\"\n hold = self._place_hold(patron, licensepool)\n return HoldInfo(\n licensepool.collection,\n licensepool.data_source.name,\n licensepool.identifier.type,\n licensepool.identifier.identifier,\n start_date=hold.start,\n end_date=hold.end,\n hold_position=hold.position,\n )\n\n def _place_hold(self, patron_or_client, licensepool):\n _db = Session.object_session(patron_or_client)\n\n # Make sure pool info is updated.\n self.update_licensepool(licensepool)\n\n if licensepool.licenses_available > 0:\n raise CurrentlyAvailable()\n\n # Create local hold.\n hold, is_new = licensepool.on_hold_to(patron_or_client)\n\n if not is_new:\n raise AlreadyOnHold()\n\n licensepool.patrons_in_hold_queue += 1\n self._update_hold_end_date(hold)\n return hold\n\n def release_hold(self, patron, pin, licensepool):\n \"\"\"Cancel a hold.\"\"\"\n _db = Session.object_session(patron)\n\n hold = get_one(\n _db,\n Hold,\n license_pool_id=licensepool.id,\n patron=patron,\n )\n if not hold:\n raise NotOnHold()\n return self._release_hold(hold)\n\n def _release_hold(self, hold):\n # If the book was ready and the patron revoked the hold instead\n # of checking it out, but no one else had the book on hold, the\n # book is now available for anyone to check out. If someone else\n # had a hold, the license is now reserved for the next patron.\n # If someone else had a hold, the license is now reserved for the\n # next patron, and we need to update that hold.\n _db = Session.object_session(hold)\n licensepool = hold.license_pool\n _db.delete(hold)\n self.update_licensepool(licensepool)\n return True\n\n def patron_activity(self, patron, pin):\n \"\"\"Look up non-expired loans for this collection in the database.\"\"\"\n _db = Session.object_session(patron)\n loans = (\n _db.query(Loan)\n .join(Loan.license_pool)\n .filter(LicensePool.collection_id == self.collection_id)\n .filter(Loan.patron == patron)\n .filter(Loan.end >= utc_now())\n )\n\n # Get the patron's holds. If there are any expired holds, delete them.\n # Update the end date and position for the remaining holds.\n holds = (\n _db.query(Hold)\n .join(Hold.license_pool)\n .filter(LicensePool.collection_id == self.collection_id)\n .filter(Hold.patron == patron)\n )\n remaining_holds = []\n for hold in holds:\n if hold.end and hold.end < utc_now():\n _db.delete(hold)\n self.update_licensepool(hold.license_pool)\n else:\n self._update_hold_end_date(hold)\n remaining_holds.append(hold)\n\n return [\n LoanInfo(\n loan.license_pool.collection,\n loan.license_pool.data_source.name,\n loan.license_pool.identifier.type,\n loan.license_pool.identifier.identifier,\n loan.start,\n loan.end,\n external_identifier=loan.external_identifier,\n )\n for loan in loans\n ] + [\n HoldInfo(\n hold.license_pool.collection,\n hold.license_pool.data_source.name,\n hold.license_pool.identifier.type,\n hold.license_pool.identifier.identifier,\n start_date=hold.start,\n end_date=hold.end,\n hold_position=hold.position,\n )\n for hold in remaining_holds\n ]\n\n def update_loan(self, loan, status_doc=None):\n \"\"\"Check a loan's status, and if it is no longer active, delete the loan\n and update its pool's availability.\n \"\"\"\n _db = Session.object_session(loan)\n\n if not status_doc:\n status_doc = self.get_license_status_document(loan)\n\n status = status_doc.get(\"status\")\n # We already check that the status is valid in get_license_status_document,\n # but if the document came from a notification it hasn't been checked yet.\n if status not in self.STATUS_VALUES:\n raise BadResponseException(\n \"The License Status Document had an unknown status value.\"\n )\n\n if status in [\n self.REVOKED_STATUS,\n self.RETURNED_STATUS,\n self.CANCELLED_STATUS,\n self.EXPIRED_STATUS,\n ]:\n # This loan is no longer active. Update the pool's availability\n # and delete the loan.\n\n # Update the license\n loan.license.checkin()\n\n # If there are holds, the license is reserved for the next patron.\n _db.delete(loan)\n self.update_licensepool(loan.license_pool)\n\n def checkout_to_external_library(self, client, licensepool, hold=None):\n try:\n return self._checkout(client, licensepool, hold)\n except NoAvailableCopies as e:\n return self._place_hold(client, licensepool)\n\n def checkin_from_external_library(self, client, loan):\n self._checkin(loan)\n\n def fulfill_for_external_library(self, client, loan, mechanism):\n return self._fulfill(loan)\n\n def release_hold_from_external_library(self, client, hold):\n return self._release_hold(hold)\n\n\nclass ODLXMLParser(OPDSXMLParser):\n NAMESPACES = dict(OPDSXMLParser.NAMESPACES, odl=\"http://opds-spec.org/odl\")\n\n\nclass ODLImporter(OPDSImporter):\n \"\"\"Import information and formats from an ODL feed.\n\n The only change from OPDSImporter is that this importer extracts\n format information from 'odl:license' tags.\n \"\"\"\n\n NAME = ODLAPI.NAME\n PARSER_CLASS = ODLXMLParser\n\n # The media type for a License Info Document, used to get information\n # about the license.\n LICENSE_INFO_DOCUMENT_MEDIA_TYPE = \"application/vnd.odl.info+json\"\n\n FEEDBOOKS_AUDIO = \"{0}; protection={1}\".format(\n MediaTypes.AUDIOBOOK_MANIFEST_MEDIA_TYPE,\n DeliveryMechanism.FEEDBOOKS_AUDIOBOOK_DRM,\n )\n\n CONTENT_TYPE = \"content-type\"\n DRM_SCHEME = \"drm-scheme\"\n\n LICENSE_FORMATS = {\n FEEDBOOKS_AUDIO: {\n CONTENT_TYPE: MediaTypes.AUDIOBOOK_MANIFEST_MEDIA_TYPE,\n DRM_SCHEME: DeliveryMechanism.FEEDBOOKS_AUDIOBOOK_DRM,\n }\n }\n\n @classmethod\n def fetch_license_info(cls, document_link: str, do_get: Callable) -> Optional[dict]:\n status_code, _, response = do_get(document_link, headers={})\n if status_code in (200, 201):\n license_info_document = json.loads(response)\n return license_info_document\n else:\n logging.warning(\n f\"License Info Document is not available. \"\n f\"Status link {document_link} failed with {status_code} code.\"\n )\n return None\n\n @classmethod\n def parse_license_info(\n cls,\n license_info_document: dict,\n license_info_link: str,\n checkout_link: str,\n ) -> Optional[LicenseData]:\n \"\"\"Check the license's attributes passed as parameters:\n - if they're correct, turn them into a LicenseData object\n - otherwise, return a None\n\n :param license_info_document: License Info Document\n :param license_info_link: Link to fetch License Info Document\n :param checkout_link: License's checkout link\n\n :return: LicenseData if all the license's attributes are correct, None, otherwise\n \"\"\"\n\n identifier = license_info_document.get(\"identifier\")\n document_status = license_info_document.get(\"status\")\n document_checkouts = license_info_document.get(\"checkouts\", {})\n document_left = document_checkouts.get(\"left\")\n document_available = document_checkouts.get(\"available\")\n document_terms = license_info_document.get(\"terms\", {})\n document_expires = document_terms.get(\"expires\")\n document_concurrency = document_terms.get(\"concurrency\")\n document_format = license_info_document.get(\"format\")\n\n if identifier is None:\n logging.error(\"License info document has no identifier.\")\n return None\n\n expires = None\n if document_expires is not None:\n expires = dateutil.parser.parse(document_expires)\n expires = util.datetime_helpers.to_utc(expires)\n\n if document_status is not None:\n status = LicenseStatus.get(document_status)\n if status.value != document_status:\n logging.warning(\n f\"Identifier # {identifier} unknown status value \"\n f\"{document_status} defaulting to {status.value}.\"\n )\n else:\n status = LicenseStatus.unavailable\n logging.warning(\n f\"Identifier # {identifier} license info document does not have \"\n f\"required key 'status'.\"\n )\n\n if document_available is not None:\n available = int(document_available)\n else:\n available = 0\n logging.warning(\n f\"Identifier # {identifier} license info document does not have \"\n f\"required key 'checkouts.available'.\"\n )\n\n left = None\n if document_left is not None:\n left = int(document_left)\n\n concurrency = None\n if document_concurrency is not None:\n concurrency = int(document_concurrency)\n\n content_types = None\n if document_format is not None:\n if isinstance(document_format, str):\n content_types = [document_format]\n elif isinstance(document_format, list):\n content_types = document_format\n\n return LicenseData(\n identifier=identifier,\n checkout_url=checkout_link,\n status_url=license_info_link,\n expires=expires,\n checkouts_left=left,\n checkouts_available=available,\n status=status,\n terms_concurrency=concurrency,\n content_types=content_types,\n )\n\n @classmethod\n def get_license_data(\n cls,\n license_info_link: str,\n checkout_link: str,\n feed_license_identifier: str,\n feed_license_expires: str,\n feed_concurrency: int,\n do_get: Callable,\n ) -> Optional[LicenseData]:\n license_info_document = cls.fetch_license_info(license_info_link, do_get)\n\n if not license_info_document:\n return None\n\n parsed_license = cls.parse_license_info(\n license_info_document, license_info_link, checkout_link\n )\n\n if not parsed_license:\n return None\n\n if parsed_license.identifier != feed_license_identifier:\n # There is a mismatch between the license info document and\n # the feed we are importing. Since we don't know which to believe\n # we log an error and continue.\n logging.error(\n f\"Mismatch between license identifier in the feed ({feed_license_identifier}) \"\n f\"and the identifier in the license info document \"\n f\"({parsed_license.identifier}) ignoring license completely.\"\n )\n return None\n\n if parsed_license.expires != feed_license_expires:\n logging.error(\n f\"License identifier {feed_license_identifier}. Mismatch between license \"\n f\"expiry in the feed ({feed_license_expires}) and the expiry in the license \"\n f\"info document ({parsed_license.expires}) setting license status \"\n f\"to unavailable.\"\n )\n parsed_license.status = LicenseStatus.unavailable\n\n if parsed_license.terms_concurrency != feed_concurrency:\n logging.error(\n f\"License identifier {feed_license_identifier}. Mismatch between license \"\n f\"concurrency in the feed ({feed_concurrency}) and the \"\n f\"concurrency in the license info document (\"\n f\"{parsed_license.terms_concurrency}) setting license status \"\n f\"to unavailable.\"\n )\n parsed_license.status = LicenseStatus.unavailable\n\n return parsed_license\n\n @classmethod\n def _detail_for_elementtree_entry(\n cls, parser, entry_tag, feed_url=None, do_get=None\n ):\n do_get = do_get or Representation.cautious_http_get\n\n # TODO: Review for consistency when updated ODL spec is ready.\n subtag = parser.text_of_optional_subtag\n data = OPDSImporter._detail_for_elementtree_entry(parser, entry_tag, feed_url)\n formats = []\n licenses = []\n\n odl_license_tags = parser._xpath(entry_tag, \"odl:license\") or []\n medium = None\n for odl_license_tag in odl_license_tags:\n identifier = subtag(odl_license_tag, \"dcterms:identifier\")\n full_content_type = subtag(odl_license_tag, \"dcterms:format\")\n\n if not medium:\n medium = Edition.medium_from_media_type(full_content_type)\n\n # By default, dcterms:format includes the media type of a\n # DRM-free resource.\n content_type = full_content_type\n drm_schemes = []\n\n # But it may instead describe an audiobook protected with\n # the Feedbooks access-control scheme.\n if full_content_type == cls.FEEDBOOKS_AUDIO:\n content_type = MediaTypes.AUDIOBOOK_MANIFEST_MEDIA_TYPE\n drm_schemes.append(DeliveryMechanism.FEEDBOOKS_AUDIOBOOK_DRM)\n\n # Additional DRM schemes may be described in <odl:protection>\n # tags.\n protection_tags = parser._xpath(odl_license_tag, \"odl:protection\") or []\n for protection_tag in protection_tags:\n drm_scheme = subtag(protection_tag, \"dcterms:format\")\n if drm_scheme:\n drm_schemes.append(drm_scheme)\n\n for drm_scheme in drm_schemes or [None]:\n formats.append(\n FormatData(\n content_type=content_type,\n drm_scheme=drm_scheme,\n rights_uri=RightsStatus.IN_COPYRIGHT,\n )\n )\n\n data[\"medium\"] = medium\n\n checkout_link = None\n for link_tag in parser._xpath(odl_license_tag, \"odl:tlink\") or []:\n rel = link_tag.attrib.get(\"rel\")\n if rel == Hyperlink.BORROW:\n checkout_link = link_tag.attrib.get(\"href\")\n break\n\n # Look for a link to the License Info Document for this license.\n odl_status_link = None\n for link_tag in parser._xpath(odl_license_tag, \"atom:link\") or []:\n attrib = link_tag.attrib\n rel = attrib.get(\"rel\")\n type = attrib.get(\"type\", \"\")\n if rel == \"self\" and type.startswith(\n cls.LICENSE_INFO_DOCUMENT_MEDIA_TYPE\n ):\n odl_status_link = attrib.get(\"href\")\n break\n\n expires = None\n concurrent_checkouts = None\n\n terms = parser._xpath(odl_license_tag, \"odl:terms\")\n if terms:\n concurrent_checkouts = subtag(terms[0], \"odl:concurrent_checkouts\")\n expires = subtag(terms[0], \"odl:expires\")\n\n if concurrent_checkouts is not None:\n concurrent_checkouts = int(concurrent_checkouts)\n\n if expires is not None:\n expires = to_utc(dateutil.parser.parse(expires))\n\n if not odl_status_link:\n parsed_license = None\n else:\n parsed_license = cls.get_license_data(\n odl_status_link,\n checkout_link,\n identifier,\n expires,\n concurrent_checkouts,\n do_get,\n )\n\n if parsed_license is not None:\n licenses.append(parsed_license)\n\n if not data.get(\"circulation\"):\n data[\"circulation\"] = dict()\n if not data[\"circulation\"].get(\"formats\"):\n data[\"circulation\"][\"formats\"] = []\n data[\"circulation\"][\"formats\"].extend(formats)\n if not data[\"circulation\"].get(\"licenses\"):\n data[\"circulation\"][\"licenses\"] = []\n data[\"circulation\"][\"licenses\"].extend(licenses)\n data[\"circulation\"][\"licenses_owned\"] = None\n data[\"circulation\"][\"licenses_available\"] = None\n data[\"circulation\"][\"licenses_reserved\"] = None\n data[\"circulation\"][\"patrons_in_hold_queue\"] = None\n return data\n\n\nclass ODLImportMonitor(OPDSImportMonitor):\n \"\"\"Import information from an ODL feed.\"\"\"\n\n PROTOCOL = ODLImporter.NAME\n SERVICE_NAME = \"ODL Import Monitor\"\n\n def __init__(self, _db, collection, import_class, **import_class_kwargs):\n # Always force reimport ODL collections to get up to date license information\n super().__init__(\n _db, collection, import_class, force_reimport=True, **import_class_kwargs\n )\n\n\nclass ODLHoldReaper(CollectionMonitor):\n \"\"\"Check for holds that have expired and delete them, and update\n the holds queues for their pools.\"\"\"\n\n SERVICE_NAME = \"ODL Hold Reaper\"\n PROTOCOL = ODLAPI.NAME\n\n def __init__(self, _db, collection=None, api=None, **kwargs):\n super(ODLHoldReaper, self).__init__(_db, collection, **kwargs)\n self.api = api or ODLAPI(_db, collection)\n\n def run_once(self, progress):\n # Find holds that have expired.\n expired_holds = (\n self._db.query(Hold)\n .join(Hold.license_pool)\n .filter(LicensePool.collection_id == self.api.collection_id)\n .filter(Hold.end < utc_now())\n .filter(Hold.position == 0)\n )\n\n changed_pools = set()\n total_deleted_holds = 0\n for hold in expired_holds:\n changed_pools.add(hold.license_pool)\n self._db.delete(hold)\n total_deleted_holds += 1\n\n for pool in changed_pools:\n self.api.update_licensepool(pool)\n\n message = \"Holds deleted: %d. License pools updated: %d\" % (\n total_deleted_holds,\n len(changed_pools),\n )\n progress = TimestampData(achievements=message)\n return progress\n\n\nclass SharedODLAPI(BaseCirculationAPI):\n \"\"\"An API for circulation managers to use to connect to an ODL collection that's shared\n by another circulation manager.\n \"\"\"\n\n NAME = \"Shared ODL For Consortia\"\n DESCRIPTION = _(\n \"Import books from an ODL collection that's hosted by another circulation manager in the consortium. If this circulation manager will be the main host for the collection, select %(odl_name)s instead.\",\n odl_name=ODLAPI.NAME,\n )\n\n SETTINGS = [\n {\n \"key\": Collection.EXTERNAL_ACCOUNT_ID_KEY,\n \"label\": _(\"Base URL\"),\n \"description\": _(\n \"The base URL for the collection on the other circulation manager.\"\n ),\n \"required\": True,\n },\n {\n \"key\": Collection.DATA_SOURCE_NAME_SETTING,\n \"label\": _(\"Data source name\"),\n \"required\": True,\n },\n ]\n\n SUPPORTS_REGISTRATION = True\n SUPPORTS_STAGING = False\n\n def __init__(self, _db, collection):\n if collection.protocol != self.NAME:\n raise ValueError(\n \"Collection protocol is %s, but passed into SharedODLPI!\"\n % collection.protocol\n )\n self.collection_id = collection.id\n self.data_source_name = collection.external_integration.setting(\n Collection.DATA_SOURCE_NAME_SETTING\n ).value\n # Create the data source if it doesn't exist yet.\n DataSource.lookup(_db, self.data_source_name, autocreate=True)\n\n self.base_url = collection.external_account_id\n\n @staticmethod\n def _parse_feed_from_response(response):\n \"\"\"Parse ODL (Atom) feed from the HTTP response.\n\n :param response: HTTP response\n :type response: requests.Response\n\n :return: Parsed ODL (Atom) feed\n :rtype: dict\n \"\"\"\n response_content = response.content\n\n if not isinstance(response_content, (str, bytes)):\n raise ValueError(\"Response content must be a string or byte-encoded value\")\n\n feed = feedparser.parse(response_content)\n\n return feed\n\n def internal_format(self, delivery_mechanism):\n \"\"\"Each consolidated copy is only available in one format, so we don't need\n a mapping to internal formats.\n \"\"\"\n return delivery_mechanism\n\n def collection(self, _db):\n return get_one(_db, Collection, id=self.collection_id)\n\n def _get(\n self,\n url,\n headers=None,\n patron=None,\n allowed_response_codes=None,\n do_get=HTTP.get_with_timeout,\n ):\n \"\"\"Make a normal HTTP request, but include an authentication\n header with the credentials for the collection.\n \"\"\"\n\n allowed_response_codes = allowed_response_codes or [\"2xx\", \"3xx\"]\n patron = patron or flask.request.patron\n _db = Session.object_session(patron)\n collection = self.collection(_db)\n shared_secret = ConfigurationSetting.for_library_and_externalintegration(\n _db,\n ExternalIntegration.PASSWORD,\n patron.library,\n collection.external_integration,\n ).value\n if not shared_secret:\n raise LibraryAuthorizationFailedException(\n _(\n \"Library %(library)s is not registered with the collection.\",\n library=patron.library.name,\n )\n )\n headers = dict(headers or {})\n auth_header = \"Bearer \" + base64.b64encode(shared_secret)\n headers[\"Authorization\"] = auth_header\n\n return do_get(\n url, headers=headers, allowed_response_codes=allowed_response_codes\n )\n\n def checkout(self, patron, pin, licensepool, internal_format):\n _db = Session.object_session(patron)\n\n loans = (\n _db.query(Loan)\n .filter(Loan.patron == patron)\n .filter(Loan.license_pool_id == licensepool.id)\n )\n if loans.count() > 0:\n raise AlreadyCheckedOut()\n\n holds = (\n _db.query(Hold)\n .filter(Hold.patron == patron)\n .filter(Hold.license_pool_id == licensepool.id)\n )\n if holds.count() > 0:\n hold = holds.one()\n try:\n hold_info_response = self._get(hold.external_identifier)\n except RemoteIntegrationException as e:\n raise CannotLoan()\n\n feed = self._parse_feed_from_response(hold_info_response)\n entries = feed.get(\"entries\")\n if len(entries) < 1:\n raise CannotLoan()\n entry = entries[0]\n availability = entry.get(\"opds_availability\", {})\n if availability.get(\"status\") != \"ready\":\n raise NoAvailableCopies()\n checkout_links = [\n link\n for link in entry.get(\"links\")\n if link.get(\"rel\") == Hyperlink.BORROW\n ]\n if len(checkout_links) < 1:\n raise NoAvailableCopies()\n checkout_url = checkout_links[0].get(\"href\")\n else:\n borrow_links = [\n link\n for link in licensepool.identifier.links\n if link.rel == Hyperlink.BORROW\n ]\n if not borrow_links:\n raise CannotLoan()\n checkout_url = borrow_links[0].resource.url\n try:\n response = self._get(\n checkout_url, allowed_response_codes=[\"2xx\", \"3xx\", \"403\", \"404\"]\n )\n except RemoteIntegrationException as e:\n raise CannotLoan()\n if response.status_code == 403:\n raise NoAvailableCopies()\n elif response.status_code == 404:\n if (\n hasattr(response, \"json\")\n and response.json().get(\"type\", \"\") == NO_LICENSES.uri\n ):\n raise NoLicenses()\n\n feed = self._parse_feed_from_response(response)\n entries = feed.get(\"entries\")\n if len(entries) < 1:\n raise CannotLoan()\n entry = entries[0]\n availability = entry.get(\"opds_availability\", {})\n start = dateutil.parser.parse(availability.get(\"since\"))\n end = dateutil.parser.parse(availability.get(\"until\"))\n # Get the loan base url from a link.\n info_links = [link for link in entry.get(\"links\") if link.get(\"rel\") == \"self\"]\n if len(info_links) < 1:\n raise CannotLoan()\n external_identifier = info_links[0].get(\"href\")\n\n if availability.get(\"status\") == \"available\":\n return LoanInfo(\n licensepool.collection,\n licensepool.data_source.name,\n licensepool.identifier.type,\n licensepool.identifier.identifier,\n start,\n end,\n external_identifier=external_identifier,\n )\n elif availability.get(\"status\") in [\"ready\", \"reserved\"]:\n # We tried to borrow this book but it wasn't available,\n # so we got a hold.\n position = entry.get(\"opds_holds\", {}).get(\"position\")\n if position:\n position = int(position)\n return HoldInfo(\n licensepool.collection,\n licensepool.data_source.name,\n licensepool.identifier.type,\n licensepool.identifier.identifier,\n start,\n end,\n hold_position=position,\n external_identifier=external_identifier,\n )\n else:\n # We didn't get an error, but something went wrong and we don't have a\n # loan or hold either.\n raise CannotLoan()\n\n def checkin(self, patron, pin, licensepool):\n _db = Session.object_session(patron)\n\n loan = (\n _db.query(Loan)\n .filter(Loan.patron == patron)\n .filter(Loan.license_pool_id == licensepool.id)\n )\n if loan.count() < 1:\n raise NotCheckedOut()\n loan = loan.one()\n\n info_url = loan.external_identifier\n try:\n response = self._get(info_url, allowed_response_codes=[\"2xx\", \"3xx\", \"404\"])\n except RemoteIntegrationException as e:\n raise CannotReturn()\n if response.status_code == 404:\n raise NotCheckedOut()\n\n feed = self._parse_feed_from_response(response)\n entries = feed.get(\"entries\")\n if len(entries) < 1:\n raise CannotReturn()\n entry = entries[0]\n revoke_links = [\n link\n for link in entry.get(\"links\")\n if link.get(\"rel\") == \"http://librarysimplified.org/terms/rel/revoke\"\n ]\n if len(revoke_links) < 1:\n raise CannotReturn()\n revoke_url = revoke_links[0].get(\"href\")\n try:\n self._get(revoke_url)\n except RemoteIntegrationException as e:\n raise CannotReturn()\n return True\n\n def fulfill(self, patron, pin, licensepool, internal_format, **kwargs):\n \"\"\"Get the actual resource file to the patron.\n\n :param kwargs: A container for arguments to fulfill()\n which are not relevant to this vendor.\n\n :return: a FulfillmentInfo object.\n \"\"\"\n _db = Session.object_session(patron)\n\n loan = (\n _db.query(Loan)\n .filter(Loan.patron == patron)\n .filter(Loan.license_pool_id == licensepool.id)\n )\n if loan.count() < 1:\n raise NotCheckedOut()\n loan = loan.one()\n\n info_url = loan.external_identifier\n try:\n response = self._get(info_url, allowed_response_codes=[\"2xx\", \"3xx\", \"404\"])\n except RemoteIntegrationException as e:\n raise CannotFulfill()\n if response.status_code == 404:\n raise NotCheckedOut()\n\n requested_content_type = internal_format.delivery_mechanism.content_type\n requested_drm_scheme = internal_format.delivery_mechanism.drm_scheme\n\n # The response data comes in as a byte string that we must\n # convert into a string.\n response_content = response.content.decode(\"utf-8\")\n feed = feedparser.parse(response_content)\n entries = feed.get(\"entries\")\n if len(entries) < 1:\n raise CannotFulfill()\n entry = entries[0]\n availability = entry.get(\"opds_availability\")\n if availability.get(\"status\") != \"available\":\n raise CannotFulfill()\n expires = dateutil.parser.parse(availability.get(\"until\"))\n\n # The entry is parsed with etree to get indirect acquisitions\n parser = SharedODLImporter.PARSER_CLASS()\n root = etree.parse(StringIO(response_content))\n\n fulfill_url = SharedODLImporter.get_fulfill_url(\n response_content, requested_content_type, requested_drm_scheme\n )\n if not fulfill_url:\n raise FormatNotAvailable()\n\n # We need to hit the fulfill link here instead of returning it so we can\n # authenticate the library.\n try:\n response = self._get(fulfill_url)\n except RemoteIntegrationException as e:\n raise CannotFulfill()\n return FulfillmentInfo(\n licensepool.collection,\n licensepool.data_source.name,\n licensepool.identifier.type,\n licensepool.identifier.identifier,\n None,\n response.headers.get(\"Content-Type\"),\n response.content,\n expires,\n )\n\n def place_hold(self, patron, pin, licensepool, notification_email_address):\n # Just try to check out the book. If it's not available, we'll get a hold.\n return self.checkout(patron, pin, licensepool, None)\n\n def release_hold(self, patron, pin, licensepool):\n _db = Session.object_session(patron)\n\n hold = get_one(\n _db,\n Hold,\n license_pool_id=licensepool.id,\n patron=patron,\n )\n if not hold:\n raise NotOnHold()\n\n info_url = hold.external_identifier\n try:\n response = self._get(info_url, allowed_response_codes=[\"2xx\", \"3xx\", \"404\"])\n except RemoteIntegrationException as e:\n raise CannotReleaseHold()\n if response.status_code == 404:\n raise NotOnHold()\n\n feed = self._parse_feed_from_response(response)\n entries = feed.get(\"entries\")\n if len(entries) < 1:\n raise CannotReleaseHold()\n entry = entries[0]\n availability = entry.get(\"opds_availability\", {})\n if availability.get(\"status\") not in [\"reserved\", \"ready\"]:\n raise CannotReleaseHold()\n revoke_links = [\n link\n for link in entry.get(\"links\")\n if link.get(\"rel\") == \"http://librarysimplified.org/terms/rel/revoke\"\n ]\n if len(revoke_links) < 1:\n raise CannotReleaseHold()\n revoke_url = revoke_links[0].get(\"href\")\n try:\n self._get(revoke_url)\n except RemoteIntegrationException as e:\n raise CannotReleaseHold()\n return True\n\n def patron_activity(self, patron, pin):\n _db = Session.object_session(patron)\n loans = (\n _db.query(Loan)\n .join(Loan.license_pool)\n .filter(LicensePool.collection_id == self.collection_id)\n .filter(Loan.patron == patron)\n )\n\n holds = (\n _db.query(Hold)\n .join(Hold.license_pool)\n .filter(LicensePool.collection_id == self.collection_id)\n .filter(Hold.patron == patron)\n )\n\n activity = []\n for loan in loans:\n info_url = loan.external_identifier\n response = self._get(\n info_url, patron=patron, allowed_response_codes=[\"2xx\", \"3xx\", \"404\"]\n )\n if response.status_code == 404:\n # 404 is returned when the loan has been deleted. Leave this loan out of the result.\n continue\n feed = self._parse_feed_from_response(response)\n entries = feed.get(\"entries\")\n if len(entries) < 1:\n raise CirculationException()\n entry = entries[0]\n availability = entry.get(\"opds_availability\", {})\n if availability.get(\"status\") != \"available\":\n # This loan might be expired.\n continue\n start = dateutil.parser.parse(availability.get(\"since\"))\n end = dateutil.parser.parse(availability.get(\"until\"))\n\n activity.append(\n LoanInfo(\n loan.license_pool.collection,\n loan.license_pool.data_source.name,\n loan.license_pool.identifier.type,\n loan.license_pool.identifier.identifier,\n start,\n end,\n external_identifier=loan.external_identifier,\n )\n )\n for hold in holds:\n info_url = hold.external_identifier\n response = self._get(\n info_url, patron=patron, allowed_response_codes=[\"2xx\", \"3xx\", \"404\"]\n )\n if response.status_code == 404:\n # 404 is returned when the hold has been deleted. Leave this hold out of the result.\n continue\n feed = self._parse_feed_from_response(response)\n entries = feed.get(\"entries\")\n if len(entries) < 1:\n raise CirculationException()\n entry = entries[0]\n availability = entry.get(\"opds_availability\", {})\n if availability.get(\"status\") not in [\"ready\", \"reserved\"]:\n # This hold might be expired.\n continue\n start = dateutil.parser.parse(availability.get(\"since\"))\n end = dateutil.parser.parse(availability.get(\"until\"))\n position = entry.get(\"opds_holds\", {}).get(\"position\")\n\n activity.append(\n HoldInfo(\n hold.license_pool.collection,\n hold.license_pool.data_source.name,\n hold.license_pool.identifier.type,\n hold.license_pool.identifier.identifier,\n start,\n end,\n hold_position=position,\n external_identifier=hold.external_identifier,\n )\n )\n return activity\n\n\nclass SharedODLImporter(OPDSImporter):\n NAME = SharedODLAPI.NAME\n\n @classmethod\n def get_fulfill_url(cls, entry, requested_content_type, requested_drm_scheme):\n parser = cls.PARSER_CLASS()\n # The entry may come from an HTTP response which is a bytestring.\n if isinstance(entry, bytes):\n entry = entry.decode(\"utf-8\")\n root = etree.parse(StringIO(entry))\n\n fulfill_url = None\n for link_tag in parser._xpath(root, \"atom:link\"):\n if link_tag.attrib.get(\"rel\") == Hyperlink.GENERIC_OPDS_ACQUISITION:\n content_type = None\n drm_scheme = link_tag.attrib.get(\"type\")\n\n indirect_acquisition = parser._xpath(\n link_tag, \"opds:indirectAcquisition\"\n )\n if indirect_acquisition:\n content_type = indirect_acquisition[0].get(\"type\")\n else:\n content_type = drm_scheme\n drm_scheme = None\n\n if (\n content_type == requested_content_type\n and drm_scheme == requested_drm_scheme\n ):\n fulfill_url = link_tag.attrib.get(\"href\")\n break\n return fulfill_url\n\n @classmethod\n def _detail_for_elementtree_entry(\n cls, parser, entry_tag, feed_url=None, do_get=None\n ):\n data = OPDSImporter._detail_for_elementtree_entry(parser, entry_tag, feed_url)\n borrow_links = [\n link for link in data.get(\"links\") if link.rel == Hyperlink.BORROW\n ]\n\n licenses_available = 0\n licenses_owned = 0\n patrons_in_hold_queue = 0\n formats = []\n\n for link_tag in parser._xpath(entry_tag, \"atom:link\"):\n if link_tag.attrib.get(\"rel\") == Hyperlink.BORROW:\n content_type = None\n drm_scheme = None\n\n indirect_acquisition = parser._xpath(\n link_tag, \"opds:indirectAcquisition\"\n )\n if indirect_acquisition:\n drm_scheme = indirect_acquisition[0].attrib.get(\"type\")\n\n second_indirect_acquisition = parser._xpath(\n indirect_acquisition[0], \"opds:indirectAcquisition\"\n )\n if second_indirect_acquisition:\n content_type = second_indirect_acquisition[0].attrib.get(\"type\")\n else:\n content_type = drm_scheme\n drm_scheme = None\n\n copies_tags = parser._xpath(link_tag, \"opds:copies\")\n if copies_tags:\n copies_tag = copies_tags[0]\n licenses_available = copies_tag.attrib.get(\"available\")\n if licenses_available != None:\n licenses_available = int(licenses_available)\n licenses_owned = copies_tag.attrib.get(\"total\")\n if licenses_owned != None:\n licenses_owned = int(licenses_owned)\n holds_tags = parser._xpath(link_tag, \"opds:holds\")\n if holds_tags:\n holds_tag = holds_tags[0]\n patrons_in_hold_queue = holds_tag.attrib.get(\"total\")\n if patrons_in_hold_queue != None:\n patrons_in_hold_queue = int(patrons_in_hold_queue)\n\n format = FormatData(\n content_type=content_type,\n drm_scheme=drm_scheme,\n link=borrow_links[0],\n rights_uri=RightsStatus.IN_COPYRIGHT,\n )\n formats.append(format)\n circulation = dict(\n licenses_available=licenses_available,\n licenses_owned=licenses_owned,\n patrons_in_hold_queue=patrons_in_hold_queue,\n formats=formats,\n )\n\n data[\"circulation\"] = circulation\n return data\n\n\nclass SharedODLImportMonitor(OPDSImportMonitor):\n PROTOCOL = SharedODLImporter.NAME\n SERVICE_NAME = \"Shared ODL Import Monitor\"\n\n def opds_url(self, collection):\n base_url = collection.external_account_id\n return base_url + \"/crawlable\"\n\n\nclass MockSharedODLAPI(SharedODLAPI):\n \"\"\"Mock API for tests that overrides _get and tracks requests.\"\"\"\n\n @classmethod\n def mock_collection(cls, _db):\n \"\"\"Create a mock ODL collection to use in tests.\"\"\"\n library = DatabaseTest.make_default_library(_db)\n collection, ignore = get_one_or_create(\n _db,\n Collection,\n name=\"Test Shared ODL Collection\",\n create_method_kwargs=dict(\n external_account_id=\"http://shared-odl\",\n ),\n )\n integration = collection.create_external_integration(protocol=SharedODLAPI.NAME)\n library.collections.append(collection)\n return collection\n\n def __init__(self, _db, collection, *args, **kwargs):\n self.responses = []\n self.requests = []\n self.request_args = []\n super(MockSharedODLAPI, self).__init__(_db, collection, *args, **kwargs)\n\n def queue_response(self, status_code, headers={}, content=None):\n self.responses.insert(0, MockRequestsResponse(status_code, headers, content))\n\n def _get(self, url, patron=None, headers=None, allowed_response_codes=None):\n allowed_response_codes = allowed_response_codes or [\"2xx\", \"3xx\"]\n self.requests.append(url)\n self.request_args.append((patron, headers, allowed_response_codes))\n response = self.responses.pop()\n return HTTP._process_response(\n url, response, allowed_response_codes=allowed_response_codes\n )\n", "id": "201616", "language": "Python", "matching_score": 7.4050798416137695, "max_stars_count": 0, "path": "api/odl.py" }, { "content": "import datetime\nimport json\nfrom io import BytesIO\n\nfrom flask import send_file\nfrom sqlalchemy import or_\n\nfrom api.circulation import BaseCirculationAPI, FulfillmentInfo, LoanInfo\nfrom api.lcp.encrypt import LCPEncryptionConfiguration\nfrom api.lcp.hash import HasherFactory\nfrom api.lcp.server import LCPServer, LCPServerConfiguration\nfrom core.lcp.credential import LCPCredentialFactory\nfrom core.model import (\n Collection,\n DeliveryMechanism,\n ExternalIntegration,\n LicensePool,\n Loan,\n get_one,\n)\nfrom core.model.configuration import (\n ConfigurationFactory,\n ConfigurationStorage,\n HasExternalIntegration,\n)\nfrom core.util.datetime_helpers import utc_now\n\n\nclass LCPFulfilmentInfo(FulfillmentInfo):\n \"\"\"Sends LCP licenses as fulfilment info\"\"\"\n\n def __init__(\n self,\n identifier,\n collection,\n data_source_name,\n identifier_type,\n content_link=None,\n content_type=None,\n content=None,\n content_expires=None,\n ):\n \"\"\"Initializes a new instance of LCPFulfilmentInfo class\n\n :param identifier: Identifier\n :type identifier: string\n\n :param collection: Collection\n :type collection: Collection\n\n :param data_source_name: Data source's name\n :type data_source_name: string\n\n :param identifier_type: Identifier's type\n :type identifier_type: string\n\n :param content_link: Content link\n :type content_link: Optional[string]\n\n :param content_link: Identifier's type\n :type content_link: string\n\n :param content: Identifier's type\n :type content: Any\n\n :param content_expires: Time when the content expires\n :type content_expires: Optional[datetime.datetime]\n \"\"\"\n super(LCPFulfilmentInfo, self).__init__(\n collection,\n data_source_name,\n identifier_type,\n identifier,\n content_link,\n content_type,\n content,\n content_expires,\n )\n\n @property\n def as_response(self):\n \"\"\"Returns LCP license as a Flask response\n\n :return: LCP license as a Flask response\n :rtype: Response\n \"\"\"\n return send_file(\n BytesIO(json.dumps(self.content)),\n mimetype=DeliveryMechanism.LCP_DRM,\n as_attachment=True,\n attachment_filename=\"{0}.lcpl\".format(self.identifier),\n )\n\n\nclass LCPAPI(BaseCirculationAPI, HasExternalIntegration):\n \"\"\"Implements LCP workflow\"\"\"\n\n NAME = ExternalIntegration.LCP\n SERVICE_NAME = \"LCP\"\n DESCRIPTION = \"Manually imported collection protected using Readium LCP DRM\"\n\n SETTINGS = (\n LCPServerConfiguration.to_settings() + LCPEncryptionConfiguration.to_settings()\n )\n\n def __init__(self, db, collection):\n \"\"\"Initializes a new instance of LCPAPI class\n\n :param db: Database session\n :type db: sqlalchemy.orm.session.Session\n\n :param collection: Book collection\n :type collection: Collection\n \"\"\"\n if collection.protocol != ExternalIntegration.LCP:\n raise ValueError(\n \"Collection protocol is {0} but must be LCPAPI\".format(\n collection.protocol\n )\n )\n\n self._db = db\n self._collection_id = collection.id\n self._lcp_server_instance = None\n\n def internal_format(self, delivery_mechanism):\n \"\"\"Look up the internal format for this delivery mechanism or\n raise an exception.\n\n :param delivery_mechanism: A LicensePoolDeliveryMechanism\n :type delivery_mechanism: LicensePoolDeliveryMechanism\n \"\"\"\n return delivery_mechanism\n\n @property\n def collection(self):\n \"\"\"Returns an associated Collection object\n\n :return: Associated Collection object\n :rtype: Collection\n \"\"\"\n return Collection.by_id(self._db, id=self._collection_id)\n\n def external_integration(self, db):\n \"\"\"Returns an external integration associated with this object\n\n :param db: Database session\n :type db: sqlalchemy.orm.session.Session\n\n :return: External integration associated with this object\n :rtype: core.model.configuration.ExternalIntegration\n \"\"\"\n return self.collection.external_integration\n\n def _create_lcp_server(self):\n \"\"\"Creates a new instance of LCPServer\n\n :return: New instance of LCPServer\n :rtype: LCPServer\n \"\"\"\n configuration_storage = ConfigurationStorage(self)\n configuration_factory = ConfigurationFactory()\n hasher_factory = HasherFactory()\n credential_factory = LCPCredentialFactory()\n lcp_server = LCPServer(\n configuration_storage,\n configuration_factory,\n hasher_factory,\n credential_factory,\n )\n\n return lcp_server\n\n @property\n def _lcp_server(self):\n \"\"\"Returns an instance of LCPServer\n\n :return: Instance of LCPServer\n :rtype: LCPServer\n \"\"\"\n if self._lcp_server_instance is None:\n self._lcp_server_instance = self._create_lcp_server()\n\n return self._lcp_server_instance\n\n def checkout(self, patron, pin, licensepool, internal_format):\n \"\"\"Checks out a book on behalf of a patron\n\n :param patron: A Patron object for the patron who wants to check out the book\n :type patron: Patron\n\n :param pin: The patron's alleged password\n :type pin: string\n\n :param licensepool: Contains lending info as well as link to parent Identifier\n :type licensepool: LicensePool\n\n :param internal_format: Represents the patron's desired book format.\n :type internal_format: Any\n\n :return: a LoanInfo object\n :rtype: LoanInfo\n \"\"\"\n days = self.collection.default_loan_period(patron.library)\n today = utc_now()\n expires = today + datetime.timedelta(days=days)\n loan = get_one(\n self._db,\n Loan,\n patron=patron,\n license_pool=licensepool,\n on_multiple=\"interchangeable\",\n )\n\n if loan:\n license = self._lcp_server.get_license(\n self._db, loan.external_identifier, patron\n )\n else:\n license = self._lcp_server.generate_license(\n self._db, licensepool.identifier.identifier, patron, today, expires\n )\n\n loan = LoanInfo(\n licensepool.collection,\n licensepool.data_source.name,\n identifier_type=licensepool.identifier.type,\n identifier=licensepool.identifier.identifier,\n start_date=today,\n end_date=expires,\n fulfillment_info=None,\n external_identifier=license[\"id\"],\n )\n\n return loan\n\n def fulfill(\n self,\n patron,\n pin,\n licensepool,\n internal_format=None,\n part=None,\n fulfill_part_url=None,\n ):\n \"\"\"Get the actual resource file to the patron.\n\n :param patron: A Patron object for the patron who wants to check out the book\n :type patron: Patron\n\n :param pin: The patron's alleged password\n :type pin: string\n\n :param licensepool: Contains lending info as well as link to parent Identifier\n :type licensepool: LicensePool\n\n :param internal_format: A vendor-specific name indicating the format requested by the patron\n :type internal_format:\n\n :param part: A vendor-specific identifier indicating that the\n patron wants to fulfill one specific part of the book\n (e.g. one chapter of an audiobook), not the whole thing\n :type part: Any\n\n :param fulfill_part_url: A function that takes one argument (a\n vendor-specific part identifier) and returns the URL to use\n when fulfilling that part\n :type fulfill_part_url: Any\n\n :return: a FulfillmentInfo object\n :rtype: FulfillmentInfo\n \"\"\"\n loan = get_one(\n self._db,\n Loan,\n patron=patron,\n license_pool=licensepool,\n on_multiple=\"interchangeable\",\n )\n license = self._lcp_server.get_license(\n self._db, loan.external_identifier, patron\n )\n fulfillment_info = LCPFulfilmentInfo(\n licensepool.identifier.identifier,\n licensepool.collection,\n licensepool.data_source.name,\n licensepool.identifier.type,\n content_link=None,\n content_type=DeliveryMechanism.LCP_DRM,\n content=license,\n content_expires=None,\n )\n\n return fulfillment_info\n\n def patron_activity(self, patron, pin):\n \"\"\"Returns patron's loans\n\n :param patron: A Patron object for the patron who wants to check out the book\n :type patron: Patron\n\n :param pin: The patron's alleged password\n :type pin: string\n\n :return: List of patron's loans\n :rtype: List[LoanInfo]\n \"\"\"\n now = utc_now()\n loans = (\n self._db.query(Loan)\n .join(LicensePool)\n .join(Collection)\n .filter(\n Collection.id == self._collection_id,\n Loan.patron == patron,\n or_(Loan.start is None, Loan.start <= now),\n or_(Loan.end is None, Loan.end > now),\n )\n )\n\n loan_info_objects = []\n\n for loan in loans:\n licensepool = get_one(self._db, LicensePool, id=loan.license_pool_id)\n\n loan_info_objects.append(\n LoanInfo(\n collection=self.collection,\n data_source_name=licensepool.data_source.name,\n identifier_type=licensepool.identifier.type,\n identifier=licensepool.identifier.identifier,\n start_date=loan.start,\n end_date=loan.end,\n fulfillment_info=None,\n external_identifier=loan.external_identifier,\n )\n )\n\n return loan_info_objects\n\n # TODO: Implement place_hold and release_hold (https://jira.nypl.org/browse/SIMPLY-3013)\n", "id": "1765501", "language": "Python", "matching_score": 3.856673240661621, "max_stars_count": 0, "path": "api/lcp/collection.py" }, { "content": "import tempfile\n\nfrom flask_babel import lazy_gettext as _\nfrom sqlalchemy.orm import Session\n\nfrom api.lcp.encrypt import LCPEncryptor\nfrom api.lcp.hash import HasherFactory\nfrom api.lcp.importer import LCPImporter\nfrom api.lcp.server import LCPServer\nfrom core.lcp.credential import LCPCredentialFactory\nfrom core.mirror import MirrorUploader\nfrom core.model import Collection, ExternalIntegration\nfrom core.model.collection import (\n CollectionConfigurationStorage,\n HasExternalIntegrationPerCollection,\n)\nfrom core.model.configuration import (\n ConfigurationAttributeType,\n ConfigurationFactory,\n ConfigurationMetadata,\n)\nfrom core.s3 import MinIOUploader, MinIOUploaderConfiguration, S3UploaderConfiguration\n\n\nclass LCPMirrorConfiguration(S3UploaderConfiguration):\n endpoint_url = ConfigurationMetadata(\n key=MinIOUploaderConfiguration.endpoint_url.key,\n label=_(\"Endpoint URL\"),\n description=_(\"S3 endpoint URL\"),\n type=ConfigurationAttributeType.TEXT,\n required=False,\n )\n\n\nclass LCPMirror(MinIOUploader, HasExternalIntegrationPerCollection):\n \"\"\"Implements LCP import workflow:\n 1. Encrypts unencrypted books using lcpencrypt\n 2. Sends encrypted books to the LCP License Server\n 3. LCP License Server generates license metadata and uploads encrypted books to the encrypted_repository\n \"\"\"\n\n NAME = ExternalIntegration.LCP\n SETTINGS = [\n S3UploaderConfiguration.access_key.to_settings(),\n S3UploaderConfiguration.secret_key.to_settings(),\n S3UploaderConfiguration.protected_access_content_bucket.to_settings(),\n S3UploaderConfiguration.s3_region.to_settings(),\n S3UploaderConfiguration.s3_addressing_style.to_settings(),\n S3UploaderConfiguration.s3_presigned_url_expiration.to_settings(),\n S3UploaderConfiguration.url_template.to_settings(),\n LCPMirrorConfiguration.endpoint_url.to_settings(),\n ]\n\n def __init__(self, integration):\n \"\"\"Initializes a new instance of LCPMirror class\n\n :param integration: External integration containing mirror's properties\n :type integration: ExternalIntegration\n \"\"\"\n super(LCPMirror, self).__init__(integration)\n\n self._lcp_importer_instance = None\n\n def _create_lcp_importer(self, collection):\n \"\"\"Creates a new instance of LCPImporter\n\n :param collection: Collection object\n :type collection: Collection\n\n :return: New instance of LCPImporter\n :rtype: LCPImporter\n \"\"\"\n configuration_storage = CollectionConfigurationStorage(self, collection)\n configuration_factory = ConfigurationFactory()\n hasher_factory = HasherFactory()\n credential_factory = LCPCredentialFactory()\n lcp_encryptor = LCPEncryptor(configuration_storage, configuration_factory)\n lcp_server = LCPServer(\n configuration_storage,\n configuration_factory,\n hasher_factory,\n credential_factory,\n )\n lcp_importer = LCPImporter(lcp_encryptor, lcp_server)\n\n return lcp_importer\n\n def collection_external_integration(self, collection):\n \"\"\"Returns an external integration associated with the collection\n\n :param collection: Collection\n :type collection: core.model.Collection\n\n :return: External integration associated with the collection\n :rtype: core.model.configuration.ExternalIntegration\n \"\"\"\n db = Session.object_session(collection)\n external_integration = (\n db.query(ExternalIntegration)\n .join(Collection)\n .filter(Collection.id == collection.id)\n .one()\n )\n\n return external_integration\n\n def cover_image_root(self, bucket, data_source, scaled_size=None):\n raise NotImplementedError()\n\n def marc_file_root(self, bucket, library):\n raise NotImplementedError()\n\n def book_url(\n self,\n identifier,\n extension=\".epub\",\n open_access=False,\n data_source=None,\n title=None,\n ):\n \"\"\"Returns the path to the hosted EPUB file for the given identifier.\"\"\"\n bucket = self.get_bucket(\n S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY\n if open_access\n else S3UploaderConfiguration.PROTECTED_CONTENT_BUCKET_KEY\n )\n root = self.content_root(bucket)\n book_url = root + self.key_join([identifier.identifier])\n\n return book_url\n\n def cover_image_url(self, data_source, identifier, filename, scaled_size=None):\n raise NotImplementedError()\n\n def marc_file_url(self, library, lane, end_time, start_time=None):\n raise NotImplementedError()\n\n def mirror_one(self, representation, mirror_to, collection=None):\n \"\"\"Uploads an encrypted book to the encrypted_repository via LCP License Server\n\n :param representation: Book's representation\n :type representation: Representation\n\n :param mirror_to: Mirror URL\n :type mirror_to: string\n\n :param collection: Collection\n :type collection: Optional[Collection]\n \"\"\"\n db = Session.object_session(representation)\n bucket = self.get_bucket(S3UploaderConfiguration.PROTECTED_CONTENT_BUCKET_KEY)\n content_root = self.content_root(bucket)\n identifier = mirror_to.replace(content_root, \"\")\n lcp_importer = self._create_lcp_importer(collection)\n\n # First, we need to copy unencrypted book's content to a temporary file\n with tempfile.NamedTemporaryFile(\n suffix=representation.extension(representation.media_type)\n ) as temporary_file:\n temporary_file.write(representation.content_fh().read())\n temporary_file.flush()\n\n # Secondly, we execute import:\n # 1. Encrypt the temporary file containing the unencrypted book using lcpencrypt\n # 2. Send the encrypted book to the LCP License Server\n # 3. LCP License Server generates license metadata\n # 4. LCP License Server uploads the encrypted book to the encrypted_repository (S3 or EFS)\n lcp_importer.import_book(db, temporary_file.name, identifier)\n\n # Thirdly, we remove unencrypted content from the database\n transaction = db.begin_nested()\n representation.content = None\n transaction.commit()\n\n def do_upload(self, representation):\n raise NotImplementedError()\n\n\nMirrorUploader.IMPLEMENTATION_REGISTRY[LCPMirror.NAME] = LCPMirror\n", "id": "10617502", "language": "Python", "matching_score": 4.165935516357422, "max_stars_count": 0, "path": "api/lcp/mirror.py" }, { "content": "import pytest\nfrom parameterized import parameterized\n\nfrom core.config import CannotLoadConfiguration\nfrom core.mirror import MirrorUploader\nfrom core.model import ExternalIntegration\nfrom core.model.configuration import ExternalIntegrationLink\nfrom core.s3 import (\n MinIOUploader,\n MinIOUploaderConfiguration,\n S3Uploader,\n S3UploaderConfiguration,\n)\nfrom core.testing import DatabaseTest\nfrom core.util.datetime_helpers import utc_now\n\n\nclass DummySuccessUploader(MirrorUploader):\n def __init__(self, integration=None):\n pass\n\n def book_url(\n self,\n identifier,\n extension=\".epub\",\n open_access=True,\n data_source=None,\n title=None,\n ):\n pass\n\n def cover_image_url(self, data_source, identifier, filename=None, scaled_size=None):\n pass\n\n def sign_url(self, url, expiration=None):\n pass\n\n def split_url(self, url, unquote=True):\n pass\n\n def do_upload(self, representation):\n return None\n\n\nclass DummyFailureUploader(MirrorUploader):\n def __init__(self, integration=None):\n pass\n\n def book_url(\n self,\n identifier,\n extension=\".epub\",\n open_access=True,\n data_source=None,\n title=None,\n ):\n pass\n\n def cover_image_url(self, data_source, identifier, filename=None, scaled_size=None):\n pass\n\n def sign_url(self, url, expiration=None):\n pass\n\n def split_url(self, url, unquote=True):\n pass\n\n def do_upload(self, representation):\n return \"I always fail.\"\n\n\nclass TestInitialization(DatabaseTest):\n \"\"\"Test the ability to get a MirrorUploader for various aspects of site\n configuration.\n \"\"\"\n\n @property\n def _integration(self):\n \"\"\"Helper method to make a storage ExternalIntegration.\"\"\"\n storage_name = \"some storage\"\n integration = self._external_integration(\"my protocol\")\n integration.goal = ExternalIntegration.STORAGE_GOAL\n integration.name = storage_name\n return integration\n\n @parameterized.expand(\n [\n (\"s3_uploader\", ExternalIntegration.S3, S3Uploader),\n (\n \"minio_uploader\",\n ExternalIntegration.MINIO,\n MinIOUploader,\n {MinIOUploaderConfiguration.ENDPOINT_URL: \"http://localhost\"},\n ),\n ]\n )\n def test_mirror(self, name, protocol, uploader_class, settings=None):\n storage_name = \"some storage\"\n # If there's no integration with goal=STORAGE or name=storage_name,\n # MirrorUploader.mirror raises an exception.\n with pytest.raises(CannotLoadConfiguration) as excinfo:\n MirrorUploader.mirror(self._db, storage_name)\n assert \"No storage integration with name 'some storage' is configured\" in str(\n excinfo.value\n )\n\n # If there's only one, mirror() uses it to initialize a\n # MirrorUploader.\n integration = self._integration\n integration.protocol = protocol\n\n if settings:\n for key, value in settings.items():\n integration.setting(key).value = value\n\n uploader = MirrorUploader.mirror(self._db, integration=integration)\n\n assert isinstance(uploader, uploader_class)\n\n def test_integration_by_name(self):\n integration = self._integration\n\n # No name was passed so nothing is found\n with pytest.raises(CannotLoadConfiguration) as excinfo:\n MirrorUploader.integration_by_name(self._db)\n assert \"No storage integration with name 'None' is configured\" in str(\n excinfo.value\n )\n\n # Correct name was passed\n integration = MirrorUploader.integration_by_name(self._db, integration.name)\n assert isinstance(integration, ExternalIntegration)\n\n def test_for_collection(self):\n # This collection has no mirror_integration, so\n # there is no MirrorUploader for it.\n collection = self._collection()\n assert None == MirrorUploader.for_collection(\n collection, ExternalIntegrationLink.COVERS\n )\n\n # This collection has a properly configured mirror_integration,\n # so it can have an MirrorUploader.\n integration = self._external_integration(\n ExternalIntegration.S3,\n ExternalIntegration.STORAGE_GOAL,\n username=\"username\",\n password=\"password\",\n settings={S3UploaderConfiguration.BOOK_COVERS_BUCKET_KEY: \"some-covers\"},\n )\n integration_link = self._external_integration_link(\n integration=collection._external_integration,\n other_integration=integration,\n purpose=ExternalIntegrationLink.COVERS,\n )\n\n uploader = MirrorUploader.for_collection(\n collection, ExternalIntegrationLink.COVERS\n )\n assert isinstance(uploader, MirrorUploader)\n\n @parameterized.expand(\n [\n (\"s3_uploader\", ExternalIntegration.S3, S3Uploader),\n (\n \"minio_uploader\",\n ExternalIntegration.MINIO,\n MinIOUploader,\n {MinIOUploaderConfiguration.ENDPOINT_URL: \"http://localhost\"},\n ),\n ]\n )\n def test_constructor(self, name, protocol, uploader_class, settings=None):\n # You can't create a MirrorUploader with an integration\n # that's not designed for storage.\n integration = self._integration\n integration.goal = ExternalIntegration.LICENSE_GOAL\n integration.protocol = protocol\n\n if settings:\n for key, value in settings.items():\n integration.setting(key).value = value\n with pytest.raises(CannotLoadConfiguration) as excinfo:\n uploader_class(integration)\n assert \"from an integration with goal=licenses\" in str(excinfo.value)\n\n def test_implementation_registry(self):\n # The implementation class used for a given ExternalIntegration\n # is controlled by the integration's protocol and the contents\n # of the MirrorUploader's implementation registry.\n MirrorUploader.IMPLEMENTATION_REGISTRY[\"my protocol\"] = DummyFailureUploader\n\n integration = self._integration\n uploader = MirrorUploader.mirror(self._db, integration=integration)\n assert isinstance(uploader, DummyFailureUploader)\n del MirrorUploader.IMPLEMENTATION_REGISTRY[\"my protocol\"]\n\n\nclass TestMirrorUploader(DatabaseTest):\n \"\"\"Test the basic workflow of MirrorUploader.\"\"\"\n\n def test_mirror_batch(self):\n r1, ignore = self._representation()\n r2, ignore = self._representation()\n uploader = DummySuccessUploader()\n uploader.mirror_batch([r1, r2])\n assert r1.mirrored_at != None\n assert r2.mirrored_at != None\n\n def test_success_and_then_failure(self):\n r, ignore = self._representation()\n now = utc_now()\n DummySuccessUploader().mirror_one(r, \"\")\n assert r.mirrored_at > now\n assert None == r.mirror_exception\n\n # Even if the original upload succeeds, a subsequent upload\n # may fail in a way that leaves the image in an inconsistent\n # state.\n DummyFailureUploader().mirror_one(r, \"\")\n assert None == r.mirrored_at\n assert \"I always fail.\" == r.mirror_exception\n", "id": "11522458", "language": "Python", "matching_score": 1.1786843538284302, "max_stars_count": 0, "path": "tests/core/test_mirror_uploader.py" }, { "content": "from api.admin.password_admin_authentication_provider import (\n PasswordAdminAuthenticationProvider,\n)\nfrom api.admin.problem_details import *\nfrom core.model import Admin, create\nfrom core.testing import DatabaseTest\n\n\nclass TestPasswordAdminAuthenticationProvider(DatabaseTest):\n def test_sign_in(self):\n password_auth = PasswordAdminAuthenticationProvider(None)\n\n # There are two admins with passwords.\n admin1, ignore = create(self._db, Admin, email=\"<EMAIL>\")\n admin1.password = \"<PASSWORD>\"\n admin2, ignore = create(self._db, Admin, email=\"<EMAIL>\")\n admin2.password = \"<PASSWORD>\"\n\n # This admin doesn't have a password.\n admin3, ignore = create(self._db, Admin, email=\"<EMAIL>\")\n\n # Both admins with passwords can sign in.\n admin_details, redirect = password_auth.sign_in(\n self._db, dict(email=\"<EMAIL>\", password=\"<PASSWORD>\", redirect=\"foo\")\n )\n assert \"<EMAIL>\" == admin_details.get(\"email\")\n assert PasswordAdminAuthenticationProvider.NAME == admin_details.get(\"type\")\n assert \"foo\" == redirect\n\n admin_details, redirect = password_auth.sign_in(\n self._db, dict(email=\"<EMAIL>\", password=\"<PASSWORD>\", redirect=\"foo\")\n )\n assert \"<EMAIL>\" == admin_details.get(\"email\")\n assert PasswordAdminAuthenticationProvider.NAME == admin_details.get(\"type\")\n assert \"foo\" == redirect\n\n # An admin can't sign in with an incorrect password..\n admin_details, redirect = password_auth.sign_in(\n self._db,\n dict(email=\"<EMAIL>\", password=\"<PASSWORD>\", redirect=\"foo\"),\n )\n assert INVALID_ADMIN_CREDENTIALS == admin_details\n assert None == redirect\n\n # An admin can't sign in with a different admin's password.\n admin_details, redirect = password_auth.sign_in(\n self._db, dict(email=\"<EMAIL>\", password=\"<PASSWORD>\", redirect=\"foo\")\n )\n assert INVALID_ADMIN_CREDENTIALS == admin_details\n assert None == redirect\n\n # The admin with no password can't sign in.\n admin_details, redirect = password_auth.sign_in(\n self._db, dict(email=\"<EMAIL>\", redirect=\"foo\")\n )\n assert INVALID_ADMIN_CREDENTIALS == admin_details\n assert None == redirect\n\n # An admin email that's not in the db at all can't sign in.\n admin_details, redirect = password_auth.sign_in(\n self._db, dict(email=\"<EMAIL>\", password=\"<PASSWORD>\", redirect=\"foo\")\n )\n assert INVALID_ADMIN_CREDENTIALS == admin_details\n assert None == redirect\n", "id": "7656669", "language": "Python", "matching_score": 1.4492151737213135, "max_stars_count": 0, "path": "tests/api/admin/test_password_admin_authentication_provider.py" }, { "content": "from .problem_details import *\n\n\nclass AdminNotAuthorized(Exception):\n status_code = 403\n\n def as_problem_detail_document(self, debug=False):\n return ADMIN_NOT_AUTHORIZED\n", "id": "2711473", "language": "Python", "matching_score": 1.4684087038040161, "max_stars_count": 0, "path": "api/admin/exceptions.py" }, { "content": "from flask_babel import lazy_gettext as _\n\nfrom api.circulation_exceptions import *\nfrom api.config import Configuration\nfrom api.problem_details import *\nfrom core.testing import DatabaseTest\nfrom core.util.problem_detail import ProblemDetail\n\n\nclass TestCirculationExceptions(object):\n def test_as_problem_detail_document(self):\n \"\"\"Verify that circulation exceptions can be turned into ProblemDetail\n documents.\n \"\"\"\n\n e = RemoteInitiatedServerError(\"message\", \"some service\")\n doc = e.as_problem_detail_document()\n assert \"Integration error communicating with some service\" == doc.detail\n\n e = AuthorizationExpired()\n assert EXPIRED_CREDENTIALS == e.as_problem_detail_document()\n\n e = AuthorizationBlocked()\n assert BLOCKED_CREDENTIALS == e.as_problem_detail_document()\n\n e = PatronHoldLimitReached()\n assert HOLD_LIMIT_REACHED == e.as_problem_detail_document()\n\n e = NoLicenses()\n assert NO_LICENSES == e.as_problem_detail_document()\n\n\nclass TestLimitReached(DatabaseTest):\n \"\"\"Test LimitReached, which may send different messages depending on the value of a\n library ConfigurationSetting.\n \"\"\"\n\n def test_as_problem_detail_document(self):\n generic_message = _(\n \"You exceeded the limit, but I don't know what the limit was.\"\n )\n pd = ProblemDetail(\"http://uri/\", 403, _(\"Limit exceeded.\"), generic_message)\n setting = \"some setting\"\n\n class Mock(LimitReached):\n BASE_DOC = pd\n SETTING_NAME = setting\n MESSAGE_WITH_LIMIT = _(\"The limit was %(limit)d.\")\n\n # No limit -> generic message.\n ex = Mock(library=self._default_library)\n pd = ex.as_problem_detail_document()\n assert None == ex.limit\n assert generic_message == pd.detail\n\n # Limit but no library -> generic message.\n self._default_library.setting(setting).value = 14\n ex = Mock()\n assert None == ex.limit\n pd = ex.as_problem_detail_document()\n assert generic_message == pd.detail\n\n # Limit and library -> specific message.\n ex = Mock(library=self._default_library)\n assert 14 == ex.limit\n pd = ex.as_problem_detail_document()\n assert \"The limit was 14.\" == pd.detail\n\n def test_subclasses(self):\n # Use end-to-end tests to verify that the subclasses of\n # LimitReached define the right constants.\n library = self._default_library\n\n library.setting(Configuration.LOAN_LIMIT).value = 2\n pd = PatronLoanLimitReached(library=library).as_problem_detail_document()\n assert (\n \"You have reached your loan limit of 2. You cannot borrow anything further until you return something.\"\n == pd.detail\n )\n\n library.setting(Configuration.HOLD_LIMIT).value = 3\n pd = PatronHoldLimitReached(library=library).as_problem_detail_document()\n assert (\n \"You have reached your hold limit of 3. You cannot place another item on hold until you borrow something or remove a hold.\"\n == pd.detail\n )\n", "id": "5675725", "language": "Python", "matching_score": 2.3987011909484863, "max_stars_count": 0, "path": "tests/api/test_circulation_exceptions.py" }, { "content": "import flask\nfrom flask import Response\nfrom flask_babel import lazy_gettext as _\n\nfrom core.model import Library, Loan, Patron, get_one\nfrom core.util.problem_detail import ProblemDetail\n\nfrom .circulation_exceptions import *\nfrom .problem_details import *\n\n\nclass BaseCirculationManagerController(object):\n \"\"\"Define minimal standards for a circulation manager controller,\n mainly around authentication.\n \"\"\"\n\n def __init__(self, manager):\n \"\"\":param manager: A CirculationManager object.\"\"\"\n self.manager = manager\n self._db = self.manager._db\n self.url_for = self.manager.url_for\n self.cdn_url_for = self.manager.cdn_url_for\n\n def authorization_header(self):\n \"\"\"Get the authentication header.\"\"\"\n\n # This is the basic auth header.\n header = flask.request.authorization\n\n # If we're using a token instead, flask doesn't extract it for us.\n if not header:\n if \"Authorization\" in flask.request.headers:\n header = flask.request.headers[\"Authorization\"]\n\n return header\n\n @property\n def request_patron(self):\n \"\"\"The currently authenticated patron for this request, if any.\n\n Most of the time you can use flask.request.patron, but\n sometimes it's not clear whether\n authenticated_patron_from_request() (which sets\n flask.request.patron) has been called, and\n authenticated_patron_from_request has a complicated return\n value.\n\n :return: A Patron, if one could be authenticated; None otherwise.\n \"\"\"\n if not hasattr(flask.request, \"patron\"):\n # Call authenticated_patron_from_request for its side effect\n # of setting flask.request.patron\n self.authenticated_patron_from_request()\n\n return flask.request.patron\n\n def authenticated_patron_from_request(self):\n \"\"\"Try to authenticate a patron for the incoming request.\n\n When this method returns, flask.request.patron will\n be set, though the value it's set to may be None.\n\n :return: A Patron, if possible. If no authentication was\n provided, a Flask Response. If a problem occured during\n authentication, a ProblemDetail.\n \"\"\"\n # Start off by assuming authentication will not work.\n flask.request.patron = None\n\n header = self.authorization_header()\n\n if not header:\n # No credentials were provided.\n return self.authenticate()\n\n try:\n patron = self.authenticated_patron(header)\n except RemoteInitiatedServerError as e:\n return REMOTE_INTEGRATION_FAILED.detailed(\n _(\"Error in authentication service\")\n )\n if patron is None:\n # Credentials were provided but they turned out not\n # to identify anyone in particular.\n return self.authenticate()\n if isinstance(patron, Patron):\n flask.request.patron = patron\n return patron\n\n def authenticated_patron(self, authorization_header):\n\n \"\"\"Look up the patron authenticated by the given authorization header.\n\n The header could contain a barcode and pin or a token for an\n external service.\n\n If there's a problem, return a Problem Detail Document.\n\n If there's no problem, return a Patron object.\n \"\"\"\n patron = self.manager.auth.authenticated_patron(self._db, authorization_header)\n if not patron:\n return INVALID_CREDENTIALS\n\n if isinstance(patron, ProblemDetail):\n return patron\n\n return patron\n\n def authenticate(self):\n \"\"\"Sends a 401 response that demands authentication.\"\"\"\n headers = self.manager.auth.create_authentication_headers()\n data = self.manager.authentication_for_opds_document\n return Response(data, 401, headers)\n\n def library_through_external_loan_identifier(self, loan_external_identifier):\n \"\"\"Look up the library the user is trying to access using a loan's external identifier.\n We assume that the external identifier is globally unique which is true, for example,\n in the case of using Readium LCP.\n\n :param loan_external_identifier: External identifier of the patron's loan\n :type loan_external_identifier: basestring\n\n :return: Library the patron is trying to access\n :rtype: Library\n \"\"\"\n self.manager.reload_settings_if_changed()\n\n loan = get_one(self._db, Loan, external_identifier=loan_external_identifier)\n\n if loan is None:\n return LOAN_NOT_FOUND\n\n library = loan.patron.library\n flask.request.library = library\n\n return library\n\n def library_for_request(self, library_short_name):\n \"\"\"Look up the library the user is trying to access.\n\n Since this is called on pretty much every request, it's also\n an appropriate time to check whether the site configuration\n has been changed and needs to be updated.\n \"\"\"\n self.manager.reload_settings_if_changed()\n\n if library_short_name:\n library = Library.lookup(self._db, short_name=library_short_name)\n else:\n library = Library.default(self._db)\n\n if not library:\n return LIBRARY_NOT_FOUND\n flask.request.library = library\n return library\n", "id": "4741711", "language": "Python", "matching_score": 2.686042547225952, "max_stars_count": 0, "path": "api/base_controller.py" }, { "content": "import logging\n\nfrom flask_babel import lazy_gettext as _\nfrom lxml import etree\n\nfrom core.model import ExternalIntegration\nfrom core.util.http import HTTP\n\nfrom .authenticator import BasicAuthenticationProvider, PatronData\nfrom .config import CannotLoadConfiguration\n\n\nclass KansasAuthenticationAPI(BasicAuthenticationProvider):\n\n NAME = \"Kansas\"\n\n DESCRIPTION = _(\n \"\"\"\n An authentication service for the Kansas State Library.\n \"\"\"\n )\n\n DISPLAY_NAME = NAME\n\n SETTINGS = [\n {\n \"key\": ExternalIntegration.URL,\n \"format\": \"url\",\n \"label\": _(\"URL\"),\n \"default\": \"https://ks-kansaslibrary3m.civicplus.com/api/UserDetails\",\n \"required\": True,\n },\n ] + BasicAuthenticationProvider.SETTINGS\n\n log = logging.getLogger(\"Kansas authentication API\")\n\n def __init__(self, library_id, integration, analytics=None, base_url=None):\n super(KansasAuthenticationAPI, self).__init__(\n library_id, integration, analytics\n )\n if base_url is None:\n base_url = integration.url\n if not base_url:\n raise CannotLoadConfiguration(\"Kansas server url not configured.\")\n self.base_url = base_url\n\n # Begin implementation of BasicAuthenticationProvider abstract\n # methods.\n\n def remote_authenticate(self, username, password):\n # Create XML doc for request\n authorization_request = self.create_authorize_request(username, password)\n # Post request to the server\n response = self.post_request(authorization_request)\n # Parse response from server\n authorized, patron_name, library_identifier = self.parse_authorize_response(\n response.content\n )\n if not authorized:\n return False\n # Kansas auth gives very little data about the patron. Only name and a library identifier.\n return PatronData(\n permanent_id=username,\n authorization_identifier=username,\n personal_name=patron_name,\n library_identifier=library_identifier,\n complete=True,\n )\n\n # End implementation of BasicAuthenticationProvider abstract methods.\n\n @staticmethod\n def create_authorize_request(barcode, pin):\n # Create the authentication document\n authorize_request = etree.Element(\"AuthorizeRequest\")\n user_id = etree.Element(\"UserID\")\n user_id.text = barcode\n password = etree.Element(\"Password\")\n password.text = pin\n authorize_request.append(user_id)\n authorize_request.append(password)\n return etree.tostring(authorize_request, encoding=\"utf8\")\n\n def parse_authorize_response(self, response):\n try:\n authorize_response = etree.fromstring(response)\n except etree.XMLSyntaxError:\n self.log.error(\n \"Unable to parse response from API. Deny Access. Response: \\n%s\",\n response,\n )\n return False, None, None\n patron_names = []\n for tag in [\"FirstName\", \"LastName\"]:\n element = authorize_response.find(tag)\n if element is not None and element.text is not None:\n patron_names.append(element.text)\n patron_name = \" \".join(patron_names) if len(patron_names) != 0 else None\n element = authorize_response.find(\"LibraryID\")\n library_identifier = element.text if element is not None else None\n element = authorize_response.find(\"Status\")\n if element is None:\n self.log.info(\n \"Status element not found in response from server. Deny Access.\"\n )\n authorized = True if element is not None and element.text == \"1\" else False\n return authorized, patron_name, library_identifier\n\n def post_request(self, data):\n \"\"\"Make an HTTP request.\n\n Defined solely so it can be overridden in the mock.\n \"\"\"\n return HTTP.post_with_timeout(\n self.base_url,\n data,\n headers={\"Content-Type\": \"application/xml\"},\n allowed_response_codes=[\"2xx\"],\n )\n\n\n# Specify which of the classes defined in this module is the\n# authentication provider.\nAuthenticationProvider = KansasAuthenticationAPI\n", "id": "6257747", "language": "Python", "matching_score": 3.7236146926879883, "max_stars_count": 0, "path": "api/kansas_patron.py" }, { "content": "from api.authenticator import BasicAuthenticationProvider\nfrom core.util.http import RemoteIntegrationException\n\n\nclass MockExplodingAuthenticationProvider(BasicAuthenticationProvider):\n def __init__(\n self,\n library,\n integration,\n analytics=None,\n patron=None,\n patrondata=None,\n *args,\n **kwargs\n ):\n raise RemoteIntegrationException(\"Mock\", \"Mock exploded.\")\n\n def authenticate(self, _db, header):\n pass\n\n def remote_authenticate(self, username, password):\n pass\n\n def remote_patron_lookup(self, patrondata):\n pass\n\n\nAuthenticationProvider = MockExplodingAuthenticationProvider\n", "id": "3200235", "language": "Python", "matching_score": 1.782031774520874, "max_stars_count": 0, "path": "tests/api/mock_authentication_provider.py" }, { "content": "import json\nfrom datetime import datetime\n\nimport pytest\n\nfrom api.authenticator import PatronData\nfrom api.sip import SIP2AuthenticationProvider\nfrom api.sip.client import MockSIPClient, MockSIPClientFactory\nfrom core.config import CannotLoadConfiguration\nfrom core.testing import DatabaseTest\nfrom core.util.http import RemoteIntegrationException\n\n\nclass TestSIP2AuthenticationProvider(DatabaseTest):\n\n # We feed sample data into the MockSIPClient, even though it adds\n # an extra step of indirection, because it lets us use as a\n # starting point the actual (albeit redacted) SIP2 messages we\n # receive from servers.\n\n sierra_valid_login_unicode = \"64 000201610210000142637000000000000000000000000AOnypl |AA12345|AE<NAME>|BZ0030|CA0050|CB0050|BLY|CQY|BV0|CC15.00|<EMAIL>|AY1AZD1B7\"\n sierra_valid_login = sierra_valid_login_unicode.encode(\"cp850\")\n sierra_valid_login_utf8 = sierra_valid_login_unicode.encode(\"utf-8\")\n sierra_excessive_fines = b\"64 000201610210000142637000000000000000000000000AOnypl |AA12345|AESHELDON, ALICE|BZ0030|CA0050|CB0050|BLY|CQY|BV20.00|CC15.00|<EMAIL>|AY1AZD1B7\"\n sierra_invalid_login = b\"64Y YYYYYYYYYYY000201610210000142725000000000000000000000000AOnypl |AA12345|AESHELDON, ALICE|BZ0030|CA0050|CB0050|BLY|CQN|BV0|CC15.00|<EMAIL>|AFInvalid PIN entered. Please try again or see a staff member for assistance.|AFThere are unresolved issues with your account. Please see a staff member for assistance.|AY1AZ91A8\"\n\n evergreen_active_user = b\"64 Y 00020161021 142851000000000000000000000000AA12345|AEBooth Active Test|BHUSD|BDAdult Circ Desk 1 Newtown, CT USA 06470|AQNEWTWN|BLY|PA20191004|PCAdult|PIAllowed|XI863715|AOBiblioTest|AY2AZ0000\"\n evergreen_expired_card = b\"64YYYY 00020161021 142937000000000000000000000000AA12345|AEBooth Expired Test|BHUSD|BDAdult Circ Desk #2 Newtown, CT USA 06470|AQNEWTWN|BLY|PA20080907|PCAdult|PIAllowed|XI863716|AFblocked|AOBiblioTest|AY2AZ0000\"\n evergreen_excessive_fines = b\"64 Y 00020161021 143002000000000000000100000000AA12345|AEBooth Excessive Fines Test|BHUSD|BV100.00|BDChildrens Circ Desk 1 Newtown, CT USA 06470|AQNEWTWN|BLY|PA20191004|PCAdult|PIAllowed|XI863718|AOBiblioTest|AY2AZ0000\"\n evergreen_hold_privileges_denied = b\"64 Y 00020161021 143002000000000000000100000000AA12345|AEBooth Excessive Fines Test|BHUSD|BV100.00|BDChildrens Circ Desk 1 Newtown, CT USA 06470|AQNEWTWN|BLY|PA20191004|PCAdult|PIAllowed|XI863718|AOBiblioTest|AY2AZ0000\"\n evergreen_card_reported_lost = b\"64 Y 00020161021 143002000000000000000100000000AA12345|AEBooth Excessive Fines Test|BHUSD|BV100.00|BDChildrens Circ Desk 1 Newtown, CT USA 06470|AQNEWTWN|BLY|PA20191004|PCAdult|PIAllowed|XI863718|AOBiblioTest|AY2AZ0000\"\n evergreen_inactive_account = b\"64YYYY 00020161021 143028000000000000000000000000AE|AA12345|BLN|AOBiblioTest|AY2AZ0000\"\n\n polaris_valid_pin = b\"64 00120161121 143327000000000000000000000000AO3|AA25891000331441|AEFalk, Jen|BZ0050|CA0075|CB0075|BLY|CQY|BHUSD|BV9.25|CC9.99|BD123 Charlotte Hall, MD 20622|<EMAIL>|BF501-555-1212|BC19710101 000000|PA1|PEHALL|PSSt. Mary's|U1|U2|U3|U4|U5|PZ20622|PX20180609 235959|PYN|FA0.00|AFPatron status is ok.|AGPatron status is ok.|AY2AZ94F3\"\n\n polaris_wrong_pin = b\"64YYYY 00120161121 143157000000000000000000000000AO3|AA25891000331441|AEFalk, Jen|BZ0050|CA0075|CB0075|BLY|CQN|BHUSD|BV9.25|CC9.99|BD123 Charlotte Hall, MD 20622|<EMAIL>|BF501-555-1212|BC19710101 000000|PA1|PEHALL|PSSt. Mary's|U1|U2|U3|U4|U5|PZ20622|PX20180609 235959|PYN|FA0.00|AFInvalid patron password. Passwords do not match.|AGInvalid patron password.|<PASSWORD>\"\n\n polaris_expired_card = b\"64YYYY 00120161121 143430000000000000000000000000AO3|AA25891000224613|AETester, Tess|BZ0050|CA0075|CB0075|BLY|CQY|BHUSD|BV0.00|CC9.99|BD|BE<EMAIL>|BF|BC19710101 000000|PA1|PELEON|PSSt. Mary's|U1|U2|U3|U4|U5|PZ|PX20161025 235959|PYY|FA0.00|AFPatron has blocks.|AGPatron has blocks.|AY2AZA4F8\"\n\n polaris_excess_fines = b\"64YYYY Y 00120161121 144438000000000000000000000000AO3|AA25891000115879|AEFalk, Jen|BZ0050|CA0075|CB0075|BLY|CQY|BHUSD|BV11.50|CC9.99|BD123, Charlotte Hall, MD 20622|BE|BF501-555-1212|BC20140610 000000|PA1|PEHALL|PS|U1No|U2|U3|U4|U5|PZ20622|PX20170424 235959|PYN|FA0.00|AFPatron has blocks.|AGPatron has blocks.|AY2AZA27B\"\n\n polaris_no_such_patron = b\"64YYYY 00120161121 143126000000000000000000000000AO3|AA1112|AE, |BZ0000|CA0000|CB0000|BLN|CQN|BHUSD|BV0.00|CC0.00|BD|BE|BF|BC|PA0|PE|PS|U1|U2|U3|U4|U5|PZ|PX|PYN|FA0.00|AFPatron does not exist.|AGPatron does not exist.|AY2AZBCF2\"\n\n tlc_no_such_patron = b\"64YYYY 00020171031 092000000000000000000000000000AOhq|AA2642|AE|BLN|AF#Unknown borrower barcode - please refer to the circulation desk.|AY1AZD46E\"\n\n end_session_response = b\"36Y201610210000142637AO3|AA25891000331441|AF|AG\"\n\n def test_initialize_from_integration(self):\n p = SIP2AuthenticationProvider\n integration = self._external_integration(self._str)\n integration.url = \"server.com\"\n integration.username = \"user1\"\n integration.password = \"<PASSWORD>\"\n integration.setting(p.FIELD_SEPARATOR).value = \"\\t\"\n integration.setting(p.INSTITUTION_ID).value = \"MAIN\"\n provider = p(self._default_library, integration)\n\n # A SIP2AuthenticationProvider was initialized based on the\n # integration values.\n assert \"user1\" == provider.login_user_id\n assert \"<PASSWORD>\" == provider.login_password\n assert \"\\t\" == provider.field_separator\n assert \"MAIN\" == provider.institution_id\n assert \"server.com\" == provider.server\n\n # Default port is 6001.\n assert None == provider.port\n\n # And it's possible to get a SIP2Client that's configured\n # based on the same values.\n client = provider._client\n assert \"user1\" == client.login_user_id\n assert \"pass1\" == client.login_password\n assert \"\\t\" == client.separator\n assert \"MAIN\" == client.institution_id\n assert \"server.com\" == client.target_server\n assert 6001 == client.target_port\n\n # Try again, specifying a port.\n integration.setting(p.PORT).value = \"1234\"\n provider = p(self._default_library, integration)\n assert 1234 == provider.port\n assert 1234 == provider._client.target_port\n\n def test_remote_authenticate(self):\n integration = self._external_integration(self._str)\n client = MockSIPClient()\n auth = SIP2AuthenticationProvider(\n self._default_library, integration, client=client\n )\n\n # Some examples taken from a Sierra SIP API.\n client.queue_response(self.sierra_valid_login)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user\", \"pass\")\n assert \"12345\" == patrondata.authorization_identifier\n assert \"<EMAIL>\" == patrondata.email_address\n assert \"<NAME>\" == patrondata.personal_name\n assert 0 == patrondata.fines\n assert None == patrondata.authorization_expires\n assert None == patrondata.external_type\n assert PatronData.NO_VALUE == patrondata.block_reason\n\n client.queue_response(self.sierra_invalid_login)\n client.queue_response(self.end_session_response)\n assert None == auth.remote_authenticate(\"user\", \"pass\")\n\n # Since Sierra provides both the patron's fine amount and the\n # maximum allowable amount, we can determine just by looking\n # at the SIP message that this patron is blocked for excessive\n # fines.\n client.queue_response(self.sierra_excessive_fines)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user\", \"pass\")\n assert PatronData.EXCESSIVE_FINES == patrondata.block_reason\n\n # A patron with an expired card.\n client.queue_response(self.evergreen_expired_card)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user\", \"pass\")\n assert \"12345\" == patrondata.authorization_identifier\n # SIP extension field XI becomes sipserver_internal_id which\n # becomes PatronData.permanent_id.\n assert \"863716\" == patrondata.permanent_id\n assert \"Booth Expired Test\" == patrondata.personal_name\n assert 0 == patrondata.fines\n assert datetime(2008, 9, 7) == patrondata.authorization_expires\n assert PatronData.NO_BORROWING_PRIVILEGES == patrondata.block_reason\n\n # A patron with excessive fines\n client.queue_response(self.evergreen_excessive_fines)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user\", \"pass\")\n assert \"12345\" == patrondata.authorization_identifier\n assert \"863718\" == patrondata.permanent_id\n assert \"Booth Excessive Fines Test\" == patrondata.personal_name\n assert 100 == patrondata.fines\n assert datetime(2019, 10, 4) == patrondata.authorization_expires\n\n # We happen to know that this patron can't borrow books due to\n # excessive fines, but that information doesn't show up as a\n # block, because Evergreen doesn't also provide the\n # fine limit. This isn't a big deal -- we'll pick it up later\n # when we apply the site policy.\n #\n # This patron also has \"Recall privileges denied\" set, but\n # that's not a reason to block them.\n assert PatronData.NO_VALUE == patrondata.block_reason\n\n # \"Hold privileges denied\" is not a block because you can\n # still borrow books.\n client.queue_response(self.evergreen_hold_privileges_denied)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user\", \"pass\")\n assert PatronData.NO_VALUE == patrondata.block_reason\n\n client.queue_response(self.evergreen_card_reported_lost)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user\", \"pass\")\n assert PatronData.CARD_REPORTED_LOST == patrondata.block_reason\n\n # Some examples taken from a Polaris instance.\n client.queue_response(self.polaris_valid_pin)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user\", \"pass\")\n assert \"25891000331441\" == patrondata.authorization_identifier\n assert \"<EMAIL>\" == patrondata.email_address\n assert 9.25 == patrondata.fines\n assert \"<NAME>\" == patrondata.personal_name\n assert datetime(2018, 6, 9, 23, 59, 59) == patrondata.authorization_expires\n\n client.queue_response(self.polaris_wrong_pin)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user\", \"pass\")\n assert None == patrondata\n\n client.queue_response(self.polaris_expired_card)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user\", \"pass\")\n assert datetime(2016, 10, 25, 23, 59, 59) == patrondata.authorization_expires\n\n client.queue_response(self.polaris_excess_fines)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user\", \"pass\")\n assert 11.50 == patrondata.fines\n\n # Two cases where the patron's authorization identifier was\n # just not recognized. One on an ILS that sets\n # valid_patron_password='N' when that happens.\n client.queue_response(self.polaris_no_such_patron)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user\", \"pass\")\n assert None == patrondata\n\n # And once on an ILS that leaves valid_patron_password blank\n # when that happens.\n client.queue_response(self.tlc_no_such_patron)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user\", \"pass\")\n assert None == patrondata\n\n def test_remote_authenticate_no_password(self):\n\n integration = self._external_integration(self._str)\n p = SIP2AuthenticationProvider\n integration.setting(p.PASSWORD_KEYBOARD).value = p.NULL_KEYBOARD\n auth = p(self._default_library, integration, client=MockSIPClientFactory())\n client = auth._client\n # This Evergreen instance doesn't use passwords.\n client.queue_response(self.evergreen_active_user)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user\", None)\n assert \"12345\" == patrondata.authorization_identifier\n assert \"863715\" == patrondata.permanent_id\n assert \"Booth Active Test\" == patrondata.personal_name\n assert 0 == patrondata.fines\n assert datetime(2019, 10, 4) == patrondata.authorization_expires\n assert \"Adult\" == patrondata.external_type\n\n # If a password is specified, it is not sent over the wire.\n client.queue_response(self.evergreen_active_user)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user2\", \"some password\")\n assert \"12345\" == patrondata.authorization_identifier\n request = client.requests[-1]\n assert b\"user2\" in request\n assert b\"some password\" not in request\n\n def test_encoding(self):\n # It's possible to specify an encoding other than CP850\n # for communication with the SIP2 server.\n #\n # Here, we'll try it with UTF-8.\n p = SIP2AuthenticationProvider\n integration = self._external_integration(self._str)\n integration.setting(p.ENCODING).value = \"utf-8\"\n auth = p(self._default_library, integration, client=MockSIPClientFactory())\n\n # Queue the UTF-8 version of the patron information\n # as opposed to the CP850 version.\n client = auth._client\n client.queue_response(self.sierra_valid_login_utf8)\n client.queue_response(self.end_session_response)\n patrondata = auth.remote_authenticate(\"user\", \"pass\")\n\n # We're able to parse the message from the server and parse\n # out patron data, including the É character, with the proper\n # encoding.\n assert \"12345\" == patrondata.authorization_identifier\n assert \"<EMAIL>\" == patrondata.email_address\n assert \"<NAME>\" == patrondata.personal_name\n assert 0 == patrondata.fines\n assert None == patrondata.authorization_expires\n assert None == patrondata.external_type\n assert PatronData.NO_VALUE == patrondata.block_reason\n\n def test_ioerror_during_connect_becomes_remoteintegrationexception(self):\n \"\"\"If the IP of the circulation manager has not been whitelisted,\n we generally can't even connect to the server.\n \"\"\"\n\n class CannotConnect(MockSIPClient):\n def connect(self):\n raise IOError(\"Doom!\")\n\n integration = self._external_integration(self._str)\n provider = SIP2AuthenticationProvider(\n self._default_library, integration, client=CannotConnect\n )\n\n with pytest.raises(RemoteIntegrationException) as excinfo:\n provider.remote_authenticate(\n \"username\",\n \"password\",\n )\n assert \"Error accessing unknown server: Doom!\" in str(excinfo.value)\n\n def test_ioerror_during_send_becomes_remoteintegrationexception(self):\n \"\"\"If there's an IOError communicating with the server,\n it becomes a RemoteIntegrationException.\n \"\"\"\n\n class CannotSend(MockSIPClient):\n def do_send(self, data):\n raise IOError(\"Doom!\")\n\n integration = self._external_integration(self._str)\n integration.url = \"server.local\"\n provider = SIP2AuthenticationProvider(\n self._default_library, integration, client=CannotSend\n )\n with pytest.raises(RemoteIntegrationException) as excinfo:\n provider.remote_authenticate(\n \"username\",\n \"password\",\n )\n assert \"Error accessing server.local: Doom!\" in str(excinfo.value)\n\n def test_parse_date(self):\n parse = SIP2AuthenticationProvider.parse_date\n assert datetime(2011, 1, 2) == parse(\"20110102\")\n assert datetime(2011, 1, 2, 10, 20, 30) == parse(\"20110102 102030\")\n assert datetime(2011, 1, 2, 10, 20, 30) == parse(\"20110102UTC102030\")\n\n def test_remote_patron_lookup(self):\n # When the SIP authentication provider needs to look up a patron,\n # it calls patron_information on its SIP client and passes in None\n # for the password.\n patron = self._patron()\n patron.authorization_identifier = \"1234\"\n integration = self._external_integration(self._str)\n\n class Mock(MockSIPClient):\n def patron_information(self, identifier, password):\n self.patron_information = identifier\n self.password = password\n return self.patron_information_parser(\n TestSIP2AuthenticationProvider.polaris_wrong_pin\n )\n\n client = Mock()\n client.queue_response(self.end_session_response)\n auth = SIP2AuthenticationProvider(\n self._default_library, integration, client=client\n )\n patron = auth._remote_patron_lookup(patron)\n assert patron.__class__ == PatronData\n assert \"25891000331441\" == patron.authorization_identifier\n assert \"<EMAIL>\" == patron.email_address\n assert 9.25 == patron.fines\n assert \"<NAME>\" == patron.personal_name\n assert datetime(2018, 6, 9, 23, 59, 59) == patron.authorization_expires\n assert client.patron_information == \"1234\"\n assert client.password == None\n\n def test_info_to_patrondata_validate_password(self):\n integration = self._external_integration(self._str)\n integration.url = \"server.local\"\n provider = SIP2AuthenticationProvider(\n self._default_library, integration, client=MockSIPClientFactory()\n )\n\n client = provider._client\n # Test with valid login, should return PatronData\n info = client.patron_information_parser(\n TestSIP2AuthenticationProvider.sierra_valid_login\n )\n patron = provider.info_to_patrondata(info)\n assert patron.__class__ == PatronData\n assert \"12345\" == patron.authorization_identifier\n assert \"<EMAIL>\" == patron.email_address\n assert \"<NAME>\" == patron.personal_name\n assert 0 == patron.fines\n assert None == patron.authorization_expires\n assert None == patron.external_type\n assert PatronData.NO_VALUE == patron.block_reason\n\n # Test with invalid login, should return None\n info = client.patron_information_parser(\n TestSIP2AuthenticationProvider.sierra_invalid_login\n )\n patron = provider.info_to_patrondata(info)\n assert None == patron\n\n def test_info_to_patrondata_no_validate_password(self):\n integration = self._external_integration(self._str)\n integration.url = \"server.local\"\n provider = SIP2AuthenticationProvider(\n self._default_library, integration, client=MockSIPClientFactory()\n )\n client = provider._client\n\n # Test with valid login, should return PatronData\n info = client.patron_information_parser(\n TestSIP2AuthenticationProvider.sierra_valid_login\n )\n patron = provider.info_to_patrondata(info, validate_password=False)\n assert patron.__class__ == PatronData\n assert \"12345\" == patron.authorization_identifier\n assert \"<EMAIL>\" == patron.email_address\n assert \"<NAME>\" == patron.personal_name\n assert 0 == patron.fines\n assert None == patron.authorization_expires\n assert None == patron.external_type\n assert PatronData.NO_VALUE == patron.block_reason\n\n # Test with invalid login, should return PatronData\n info = client.patron_information_parser(\n TestSIP2AuthenticationProvider.sierra_invalid_login\n )\n patron = provider.info_to_patrondata(info, validate_password=False)\n assert patron.__class__ == PatronData\n assert \"12345\" == patron.authorization_identifier\n assert \"<EMAIL>\" == patron.email_address\n assert \"<NAME>\" == patron.personal_name\n assert 0 == patron.fines\n assert None == patron.authorization_expires\n assert None == patron.external_type\n assert \"no borrowing privileges\" == patron.block_reason\n\n def test_patron_block_setting(self):\n integration_block = self._external_integration(\n self._str, settings={SIP2AuthenticationProvider.PATRON_STATUS_BLOCK: \"true\"}\n )\n integration_noblock = self._external_integration(\n self._str,\n settings={SIP2AuthenticationProvider.PATRON_STATUS_BLOCK: \"false\"},\n )\n\n # Test with blocked patron, block should be set\n p = SIP2AuthenticationProvider(\n self._default_library, integration_block, client=MockSIPClientFactory()\n )\n client = p._client\n info = client.patron_information_parser(\n TestSIP2AuthenticationProvider.evergreen_expired_card\n )\n patron = p.info_to_patrondata(info)\n assert patron.__class__ == PatronData\n assert \"12345\" == patron.authorization_identifier\n assert \"863716\" == patron.permanent_id\n assert \"Booth Expired Test\" == patron.personal_name\n assert 0 == patron.fines\n assert datetime(2008, 9, 7) == patron.authorization_expires\n assert PatronData.NO_BORROWING_PRIVILEGES == patron.block_reason\n\n # Test with blocked patron, block should not be set\n p = SIP2AuthenticationProvider(\n self._default_library, integration_noblock, client=MockSIPClientFactory()\n )\n client = p._client\n info = client.patron_information_parser(\n TestSIP2AuthenticationProvider.evergreen_expired_card\n )\n patron = p.info_to_patrondata(info)\n assert patron.__class__ == PatronData\n assert \"12345\" == patron.authorization_identifier\n assert \"863716\" == patron.permanent_id\n assert \"Booth Expired Test\" == patron.personal_name\n assert 0 == patron.fines\n assert datetime(2008, 9, 7) == patron.authorization_expires\n assert PatronData.NO_VALUE == patron.block_reason\n\n def test_run_self_tests(self):\n integration = self._external_integration(self._str)\n integration.url = \"server.com\"\n\n class MockBadConnection(MockSIPClient):\n def connect(self):\n # probably a timeout if the server or port values are not valid\n raise IOError(\"Could not connect\")\n\n class MockSIPLogin(MockSIPClient):\n def now(self):\n return datetime(2019, 1, 1).strftime(\"%Y%m%d0000%H%M%S\")\n\n def login(self):\n if not self.login_user_id and not self.login_password:\n raise IOError(\"Error logging in\")\n\n def patron_information(self, username, password):\n return self.patron_information_parser(\n TestSIP2AuthenticationProvider.sierra_valid_login\n )\n\n auth = SIP2AuthenticationProvider(\n self._default_library, integration, client=MockBadConnection\n )\n results = [r for r in auth._run_self_tests(self._db)]\n\n # If the connection doesn't work then don't bother running the other tests\n assert len(results) == 1\n assert results[0].name == \"Test Connection\"\n assert results[0].success == False\n assert isinstance(results[0].exception, IOError)\n assert results[0].exception.args == (\"Could not connect\",)\n\n auth = SIP2AuthenticationProvider(\n self._default_library, integration, client=MockSIPLogin\n )\n results = [x for x in auth._run_self_tests(self._db)]\n\n assert len(results) == 2\n assert results[0].name == \"Test Connection\"\n assert results[0].success == True\n\n assert results[1].name == \"Test Login with username 'None' and password 'None'\"\n assert results[1].success == False\n assert isinstance(results[1].exception, IOError)\n assert results[1].exception.args == (\"Error logging in\",)\n\n # Set the log in username and password\n integration.username = \"user1\"\n integration.password = \"<PASSWORD>\"\n goodLoginClient = MockSIPLogin(login_user_id=\"user1\", login_password=\"<PASSWORD>\")\n auth = SIP2AuthenticationProvider(\n self._default_library, integration, client=goodLoginClient\n )\n results = [x for x in auth._run_self_tests(self._db)]\n\n assert len(results) == 3\n assert results[0].name == \"Test Connection\"\n assert results[0].success == True\n\n assert (\n results[1].name == \"Test Login with username 'user1' and password '<PASSWORD>'\"\n )\n assert results[1].success == True\n\n assert results[2].name == \"Authenticating test patron\"\n assert results[2].success == False\n assert isinstance(results[2].exception, CannotLoadConfiguration)\n assert results[2].exception.args == (\n \"No test patron identifier is configured.\",\n )\n\n # Now add the test patron credentials into the mocked client and SIP2 authenticator provider\n patronDataClient = MockSIPLogin(login_user_id=\"user1\", login_password=\"<PASSWORD>\")\n valid_login_patron = patronDataClient.patron_information_parser(\n TestSIP2AuthenticationProvider.sierra_valid_login\n )\n\n class MockSIP2PatronInformation(SIP2AuthenticationProvider):\n def patron_information(self, username, password):\n return valid_login_patron\n\n auth = MockSIP2PatronInformation(\n self._default_library, integration, client=patronDataClient\n )\n # The actual test patron credentials\n auth.test_username = \"usertest1\"\n auth.test_password = \"<PASSWORD>\"\n results = [x for x in auth._run_self_tests(self._db)]\n\n assert len(results) == 6\n assert results[0].name == \"Test Connection\"\n assert results[0].success == True\n\n assert (\n results[1].name == \"Test Login with username 'user1' and password '<PASSWORD>'\"\n )\n assert results[1].success == True\n\n assert results[2].name == \"Authenticating test patron\"\n assert results[2].success == True\n\n # Since test patron authentication is true, we can now see self\n # test results for syncing metadata and the raw data from `patron_information`\n assert results[3].name == \"Syncing patron metadata\"\n assert results[3].success == True\n\n assert results[4].name == \"Patron information request\"\n assert results[4].success == True\n assert results[4].result == patronDataClient.patron_information_request(\n \"usertest1\", \"<PASSWORD>\"\n )\n\n assert results[5].name == \"Raw test patron information\"\n assert results[5].success == True\n assert results[5].result == json.dumps(valid_login_patron, indent=1)\n", "id": "1876373", "language": "Python", "matching_score": 6.558243274688721, "max_stars_count": 0, "path": "tests/api/sip/test_authentication_provider.py" }, { "content": "import json\nfrom datetime import datetime\n\nfrom flask_babel import lazy_gettext as _\n\nfrom api.authenticator import BasicAuthenticationProvider, PatronData\nfrom api.sip.client import SIPClient\nfrom api.sip.dialect import Dialect as Sip2Dialect\nfrom core.model import ExternalIntegration\nfrom core.util import MoneyUtility\nfrom core.util.http import RemoteIntegrationException\n\n\nclass SIP2AuthenticationProvider(BasicAuthenticationProvider):\n\n NAME = \"SIP2\"\n\n DATE_FORMATS = [\"%Y%m%d\", \"%Y%m%d%Z%H%M%S\", \"%Y%m%d %H%M%S\"]\n\n # Constants for integration configuration settings.\n PORT = \"port\"\n LOCATION_CODE = \"location code\"\n FIELD_SEPARATOR = \"field separator\"\n ENCODING = \"encoding\"\n DEFAULT_ENCODING = SIPClient.DEFAULT_ENCODING\n USE_SSL = \"use_ssl\"\n SSL_CERTIFICATE = \"ssl_certificate\"\n SSL_KEY = \"ssl_key\"\n ILS = \"ils\"\n PATRON_STATUS_BLOCK = \"patron status block\"\n\n SETTINGS = [\n {\"key\": ExternalIntegration.URL, \"label\": _(\"Server\"), \"required\": True},\n {\"key\": PORT, \"label\": _(\"Port\"), \"required\": True, \"type\": \"number\"},\n {\"key\": ExternalIntegration.USERNAME, \"label\": _(\"Login User ID\")},\n {\"key\": ExternalIntegration.PASSWORD, \"label\": _(\"Login Password\")},\n {\"key\": LOCATION_CODE, \"label\": _(\"Location Code\")},\n {\n \"key\": ENCODING,\n \"label\": _(\"Data encoding\"),\n \"default\": DEFAULT_ENCODING,\n \"description\": _(\n \"By default, SIP2 servers encode outgoing data using the Code Page 850 encoding, but some ILSes allow some other encoding to be used, usually UTF-8.\"\n ),\n },\n {\n \"key\": USE_SSL,\n \"label\": _(\"Connect over SSL?\"),\n \"description\": _(\n \"Some SIP2 servers require or allow clients to connect securely over SSL. Other servers don't support SSL, and require clients to use an ordinary socket connection.\"\n ),\n \"type\": \"select\",\n \"options\": [\n {\"key\": \"true\", \"label\": _(\"Connect to the SIP2 server over SSL\")},\n {\n \"key\": \"false\",\n \"label\": _(\n \"Connect to the SIP2 server over an ordinary socket connection\"\n ),\n },\n ],\n \"default\": \"false\",\n \"required\": True,\n },\n {\n \"key\": ILS,\n \"label\": _(\"ILS\"),\n \"description\": _(\n \"Some ILS require specific SIP2 settings. If the ILS you are using is in the list please pick it otherwise select 'Generic ILS'.\"\n ),\n \"type\": \"select\",\n \"options\": [\n {\"key\": Sip2Dialect.GENERIC_ILS, \"label\": _(\"Generic ILS\")},\n {\"key\": Sip2Dialect.AG_VERSO, \"label\": _(\"Auto-Graphics VERSO\")},\n ],\n \"default\": Sip2Dialect.GENERIC_ILS,\n \"required\": True,\n },\n {\n \"key\": SSL_CERTIFICATE,\n \"label\": _(\"SSL Certificate\"),\n \"description\": _(\n \"The SSL certificate used to securely connect to an SSL-enabled SIP2 server. Not all SSL-enabled SIP2 servers require a custom certificate, but some do. This should be a string beginning with <code>-----BEGIN CERTIFICATE-----</code> and ending with <code>-----END CERTIFICATE-----</code>\"\n ),\n \"type\": \"textarea\",\n },\n {\n \"key\": SSL_KEY,\n \"label\": _(\"SSL Key\"),\n \"description\": _(\n \"The private key, if any, used to sign the SSL certificate above. If present, this should be a string beginning with <code>-----BEGIN PRIVATE KEY-----</code> and ending with <code>-----END PRIVATE KEY-----</code>\"\n ),\n \"type\": \"textarea\",\n },\n {\n \"key\": FIELD_SEPARATOR,\n \"label\": _(\"Field Separator\"),\n \"default\": \"|\",\n \"required\": True,\n },\n {\n \"key\": PATRON_STATUS_BLOCK,\n \"label\": _(\"SIP2 Patron Status Block\"),\n \"description\": _(\n \"Block patrons from borrowing based on the status of the SIP2 <em>patron status</em> field.\"\n ),\n \"type\": \"select\",\n \"options\": [\n {\"key\": \"true\", \"label\": _(\"Block based on patron status field\")},\n {\"key\": \"false\", \"label\": _(\"No blocks based on patron status field\")},\n ],\n \"default\": \"true\",\n },\n ] + BasicAuthenticationProvider.SETTINGS\n\n # Map the reasons why SIP2 might report a patron is blocked to the\n # protocol-independent block reason used by PatronData.\n SPECIFIC_BLOCK_REASONS = {\n SIPClient.CARD_REPORTED_LOST: PatronData.CARD_REPORTED_LOST,\n SIPClient.EXCESSIVE_FINES: PatronData.EXCESSIVE_FINES,\n SIPClient.EXCESSIVE_FEES: PatronData.EXCESSIVE_FEES,\n SIPClient.TOO_MANY_ITEMS_BILLED: PatronData.TOO_MANY_ITEMS_BILLED,\n SIPClient.CHARGE_PRIVILEGES_DENIED: PatronData.NO_BORROWING_PRIVILEGES,\n SIPClient.TOO_MANY_ITEMS_CHARGED: PatronData.TOO_MANY_LOANS,\n SIPClient.TOO_MANY_ITEMS_OVERDUE: PatronData.TOO_MANY_OVERDUE,\n SIPClient.TOO_MANY_RENEWALS: PatronData.TOO_MANY_RENEWALS,\n SIPClient.TOO_MANY_LOST: PatronData.TOO_MANY_LOST,\n SIPClient.RECALL_OVERDUE: PatronData.RECALL_OVERDUE,\n }\n\n def __init__(\n self, library, integration, analytics=None, client=SIPClient, connect=True\n ):\n \"\"\"An object capable of communicating with a SIP server.\n\n :param server: Hostname of the SIP server.\n :param port: The port number to connect to on the SIP server.\n\n :param login_user_id: SIP field CN; the user ID to use when\n initiating a SIP session, if necessary. This is _not_ a\n patron identifier (SIP field AA); it identifies the SC\n creating the SIP session. SIP2 defines SC as \"...any library\n automation device dealing with patrons or library materials.\"\n\n :param login_password: Sip field CO; the password to use when\n initiating a SIP session, if necessary.\n\n :param location_code: SIP field CP; the location code to use\n when initiating a SIP session. A location code supposedly\n refers to the physical location of a self-checkout machine\n within a library system. Some libraries require a special\n location code to be provided when authenticating patrons;\n others may require the circulation manager to be treated as\n its own special 'location'.\n\n :param field_separator: The field delimiter (see\n \"Variable-length fields\" in the SIP2 spec). If no value is\n specified, the default (the pipe character) will be used.\n\n :param client: A SIPClient, or a class that works like\n SIPClient. If a class, the class will be initialized with the\n appropriate configuration whenever necessary. Only intended to be\n overridden during testing.\n\n :param connect: If this is false, the generated SIPClient will\n not attempt to connect to the server. Only intended for use\n during testing.\n\n \"\"\"\n super(SIP2AuthenticationProvider, self).__init__(\n library, integration, analytics\n )\n\n self.server = integration.url\n self.port = integration.setting(self.PORT).int_value\n self.login_user_id = integration.username\n self.login_password = <PASSWORD>\n self.location_code = integration.setting(self.LOCATION_CODE).value\n self.encoding = integration.setting(self.ENCODING).value_or_default(\n self.DEFAULT_ENCODING\n )\n self.field_separator = integration.setting(self.FIELD_SEPARATOR).value or \"|\"\n self.use_ssl = integration.setting(self.USE_SSL).json_value\n self.ssl_cert = integration.setting(self.SSL_CERTIFICATE).value\n self.ssl_key = integration.setting(self.SSL_KEY).value\n self.dialect = Sip2Dialect.load_dialect(integration.setting(self.ILS).value)\n self.client = client\n patron_status_block = integration.setting(self.PATRON_STATUS_BLOCK).json_value\n if patron_status_block is None or patron_status_block:\n self.fields_that_deny_borrowing = (\n SIPClient.PATRON_STATUS_FIELDS_THAT_DENY_BORROWING_PRIVILEGES\n )\n else:\n self.fields_that_deny_borrowing = []\n\n @property\n def _client(self):\n \"\"\"Initialize a SIPClient object using the default settings.\n\n :return: A SIPClient\n \"\"\"\n if isinstance(self.client, SIPClient):\n # A specific SIPClient was provided, hopefully during\n # a test scenario.\n return self.client\n\n return self.client(\n target_server=self.server,\n target_port=self.port,\n login_user_id=self.login_user_id,\n login_password=<PASSWORD>,\n location_code=self.location_code,\n institution_id=self.institution_id,\n separator=self.field_separator,\n use_ssl=self.use_ssl,\n ssl_cert=self.ssl_cert,\n ssl_key=self.ssl_key,\n encoding=self.encoding.lower(),\n dialect=self.dialect,\n )\n\n def patron_information(self, username, password):\n try:\n sip = self._client\n sip.connect()\n sip.login()\n info = sip.patron_information(username, password)\n sip.end_session(username, password)\n sip.disconnect()\n return info\n\n except IOError as e:\n raise RemoteIntegrationException(self.server or \"unknown server\", str(e))\n\n def _remote_patron_lookup(self, patron_or_patrondata):\n info = self.patron_information(\n patron_or_patrondata.authorization_identifier, None\n )\n return self.info_to_patrondata(info, False)\n\n def remote_authenticate(self, username, password):\n \"\"\"Authenticate a patron with the SIP2 server.\n\n :param username: The patron's username/barcode/card\n number/authorization identifier.\n :param password: The patron's password/pin/access code.\n \"\"\"\n if not self.collects_password:\n # Even if we were somehow given a password, we won't be\n # passing it on.\n password = None\n info = self.patron_information(username, password)\n return self.info_to_patrondata(info)\n\n def _run_self_tests(self, _db):\n def makeConnection(sip):\n sip.connect()\n return sip.connection\n\n sip = self._client\n connection = self.run_test((\"Test Connection\"), makeConnection, sip)\n yield connection\n\n if not connection.success:\n return\n\n login = self.run_test(\n (\n \"Test Login with username '%s' and password '%s'\"\n % (self.login_user_id, self.login_password)\n ),\n sip.login,\n )\n yield login\n\n # Log in was successful so test patron's test credentials\n if login.success:\n results = [\n r for r in super(SIP2AuthenticationProvider, self)._run_self_tests(_db)\n ]\n for result in results:\n yield result\n\n if results[0].success:\n\n def raw_patron_information():\n info = sip.patron_information(\n self.test_username, self.test_password\n )\n return json.dumps(info, indent=1)\n\n yield self.run_test(\n \"Patron information request\",\n sip.patron_information_request,\n self.test_username,\n patron_password=self.test_password,\n )\n\n yield self.run_test(\n (\"Raw test patron information\"), raw_patron_information\n )\n\n def info_to_patrondata(self, info, validate_password=True):\n\n \"\"\"Convert the SIP-specific dictionary obtained from\n SIPClient.patron_information() to an abstract,\n authenticator-independent PatronData object.\n \"\"\"\n if info.get(\"valid_patron\", \"N\") == \"N\":\n # The patron could not be identified as a patron of this\n # library. Don't return any data.\n return None\n\n if info.get(\"valid_patron_password\") == \"N\" and validate_password:\n # The patron did not authenticate correctly. Don't\n # return any data.\n return None\n\n # TODO: I'm not 100% convinced that a missing CQ field\n # always means \"we don't have passwords so you're\n # authenticated,\" rather than \"you didn't provide a\n # password so we didn't check.\"\n patrondata = PatronData()\n if \"sipserver_internal_id\" in info:\n patrondata.permanent_id = info[\"sipserver_internal_id\"]\n if \"patron_identifier\" in info:\n patrondata.authorization_identifier = info[\"patron_identifier\"]\n if \"email_address\" in info:\n patrondata.email_address = info[\"email_address\"]\n if \"personal_name\" in info:\n patrondata.personal_name = info[\"personal_name\"]\n if \"fee_amount\" in info:\n fines = info[\"fee_amount\"]\n else:\n fines = \"0\"\n patrondata.fines = MoneyUtility.parse(fines)\n if \"sipserver_patron_class\" in info:\n patrondata.external_type = info[\"sipserver_patron_class\"]\n for expire_field in [\n \"sipserver_patron_expiration\",\n \"polaris_patron_expiration\",\n ]:\n if expire_field in info:\n value = info.get(expire_field)\n value = self.parse_date(value)\n if value:\n patrondata.authorization_expires = value\n break\n\n # A True value in most (but not all) subfields of the\n # patron_status field will prohibit the patron from borrowing\n # books.\n status = info[\"patron_status_parsed\"]\n block_reason = PatronData.NO_VALUE\n for field in self.fields_that_deny_borrowing:\n if status.get(field) is True:\n block_reason = self.SPECIFIC_BLOCK_REASONS.get(\n field, PatronData.UNKNOWN_BLOCK\n )\n if block_reason not in (PatronData.NO_VALUE, PatronData.UNKNOWN_BLOCK):\n # Even if there are multiple problems with this\n # patron's account, we can now present a specific\n # error message. There's no need to look through\n # more fields.\n break\n patrondata.block_reason = block_reason\n\n # If we can tell by looking at the SIP2 message that the\n # patron has excessive fines, we can use that as the reason\n # they're blocked.\n if \"fee_limit\" in info:\n fee_limit = MoneyUtility.parse(info[\"fee_limit\"]).amount\n if fee_limit and patrondata.fines > fee_limit:\n patrondata.block_reason = PatronData.EXCESSIVE_FINES\n\n return patrondata\n\n @classmethod\n def parse_date(cls, value):\n \"\"\"Try to parse `value` using any of several common date formats.\"\"\"\n date_value = None\n for format in cls.DATE_FORMATS:\n try:\n date_value = datetime.strptime(value, format)\n break\n except ValueError as e:\n continue\n return date_value\n\n # NOTE: It's not necessary to implement remote_patron_lookup\n # because authentication gets patron data as a side effect.\n\n\nAuthenticationProvider = SIP2AuthenticationProvider\n", "id": "323448", "language": "Python", "matching_score": 2.659895181655884, "max_stars_count": 0, "path": "api/sip/__init__.py" }, { "content": "\"\"\"Test circulation-specific extensions to the self-test infrastructure.\"\"\"\nimport datetime\nfrom io import StringIO\nfrom unittest import mock\n\nimport pytest\n\nfrom api.authenticator import BasicAuthenticationProvider\nfrom api.circulation import CirculationAPI\nfrom api.feedbooks import FeedbooksImportMonitor\nfrom api.selftest import (\n HasCollectionSelfTests,\n HasSelfTests,\n RunSelfTestsScript,\n SelfTestResult,\n)\nfrom core.model import ExternalIntegration, Patron\nfrom core.opds_import import OPDSImportMonitor\nfrom core.testing import DatabaseTest\nfrom core.util.problem_detail import ProblemDetail\n\n\nclass TestHasSelfTests(DatabaseTest):\n def test__determine_self_test_patron(self):\n \"\"\"Test per-library default patron lookup for self-tests.\n\n Ensure that the tested method either:\n - returns a 2-tuple of (patron, password) or\n - raises the expected _NoValidLibrarySelfTestPatron exception.\n \"\"\"\n\n test_patron_lookup_method = HasSelfTests._determine_self_test_patron\n test_patron_lookup_exception = HasSelfTests._NoValidLibrarySelfTestPatron\n\n # This library has no patron authentication integration configured.\n library_without_default_patron = self._library()\n with pytest.raises(test_patron_lookup_exception) as excinfo:\n test_patron_lookup_method(library_without_default_patron)\n assert \"Library has no test patron configured.\" == excinfo.value.message\n assert (\n \"You can specify a test patron when you configure the library's patron authentication service.\"\n == excinfo.value.detail\n )\n\n # Add a patron authentication integration, but don't set the patron.\n integration = self._external_integration(\n \"api.simple_authentication\",\n ExternalIntegration.PATRON_AUTH_GOAL,\n libraries=[self._default_library],\n )\n\n # # No default patron set up in the patron authentication integration.\n with pytest.raises(test_patron_lookup_exception) as excinfo:\n test_patron_lookup_method(library_without_default_patron)\n assert \"Library has no test patron configured.\" == excinfo.value.message\n assert (\n \"You can specify a test patron when you configure the library's patron authentication service.\"\n == excinfo.value.detail\n )\n\n # Set the patron / password on this integration.\n p = BasicAuthenticationProvider\n integration.setting(p.TEST_IDENTIFIER).value = \"username1\"\n integration.setting(p.TEST_PASSWORD).value = \"<PASSWORD>\"\n\n # This library's patron authentication integration has a default\n # patron (for this library).\n patron, password = test_patron_lookup_method(self._default_library)\n assert isinstance(patron, Patron)\n assert \"username1\" == patron.authorization_identifier\n assert \"password1\" == password\n\n # Patron authentication integration returns a problem detail.\n expected_message = \"fake-pd-1 detail\"\n expected_detail = \"fake-pd-1 debug message\"\n result_patron = ProblemDetail(\n \"https://example.com/fake-problemdetail-1\",\n title=\"fake-pd-1\",\n detail=expected_message,\n debug_message=expected_detail,\n )\n result_password = None\n with mock.patch.object(\n BasicAuthenticationProvider, \"testing_patron\"\n ) as testing_patron:\n testing_patron.return_value = (result_patron, result_password)\n with pytest.raises(test_patron_lookup_exception) as excinfo:\n test_patron_lookup_method(self._default_library)\n assert expected_message == excinfo.value.message\n assert expected_detail == excinfo.value.detail\n\n # Patron authentication integration returns something that is neither\n # a Patron nor a ProblemDetail.\n result_patron = ()\n result_patron_type = type(result_patron)\n expected_message = f\"Authentication provider returned unexpected type ({result_patron_type}) instead of patron.\"\n with mock.patch.object(\n BasicAuthenticationProvider, \"testing_patron\"\n ) as testing_patron:\n testing_patron.return_value = (result_patron, None)\n with pytest.raises(test_patron_lookup_exception) as excinfo:\n test_patron_lookup_method(self._default_library)\n assert not isinstance(result_patron, (Patron, ProblemDetail))\n assert expected_message == excinfo.value.message\n assert excinfo.value.detail is None\n\n def test_default_patrons(self):\n \"\"\"Some self-tests must run with a patron's credentials. The\n default_patrons() method finds the default Patron for every\n Library associated with a given Collection.\n \"\"\"\n h = HasSelfTests()\n\n # This collection is not in any libraries, so there's no way\n # to test it.\n not_in_library = self._collection()\n [result] = h.default_patrons(not_in_library)\n assert \"Acquiring test patron credentials.\" == result.name\n assert False == result.success\n assert \"Collection is not associated with any libraries.\" == str(\n result.exception\n )\n assert (\n \"Add the collection to a library that has a patron authentication service.\"\n == result.exception.debug_message\n )\n\n # This collection is in two libraries.\n collection = self._default_collection\n\n # This library has no default patron set up.\n no_default_patron = self._library()\n collection.libraries.append(no_default_patron)\n\n # This library has a default patron set up.\n integration = self._external_integration(\n \"api.simple_authentication\",\n ExternalIntegration.PATRON_AUTH_GOAL,\n libraries=[self._default_library],\n )\n p = BasicAuthenticationProvider\n integration.setting(p.TEST_IDENTIFIER).value = \"username1\"\n integration.setting(p.TEST_PASSWORD).value = \"<PASSWORD>\"\n\n # Calling default_patrons on the Collection returns one result for\n # each Library associated with that Collection.\n\n results = list(h.default_patrons(collection))\n assert 2 == len(results)\n [failure] = [x for x in results if isinstance(x, SelfTestResult)]\n [success] = [x for x in results if x != failure]\n\n # A SelfTestResult indicating failure was returned for the library\n # without a test patron, since the test cannot proceed without one.\n assert failure.success is False\n assert (\n \"Acquiring test patron credentials for library %s\" % no_default_patron.name\n == failure.name\n )\n assert \"Library has no test patron configured.\" == str(failure.exception)\n assert (\n \"You can specify a test patron when you configure the library's patron authentication service.\"\n == failure.exception.debug_message\n )\n\n # The test patron for the library that has one was looked up,\n # and the test can proceed using this patron.\n library, patron, password = success\n assert self._default_library == library\n assert \"username1\" == patron.authorization_identifier\n assert \"password1\" == password\n\n\nclass TestRunSelfTestsScript(DatabaseTest):\n def test_do_run(self):\n library1 = self._default_library\n library2 = self._library(name=\"library2\")\n out = StringIO()\n\n class MockParsed(object):\n pass\n\n class MockScript(RunSelfTestsScript):\n tested = []\n\n def parse_command_line(self, *args, **kwargs):\n parsed = MockParsed()\n parsed.libraries = [library1, library2]\n return parsed\n\n def test_collection(self, collection, api_map):\n self.tested.append((collection, api_map))\n\n script = MockScript(self._db, out)\n script.do_run()\n # Both libraries were tested.\n assert out.getvalue() == \"Testing %s\\nTesting %s\\n\" % (\n library1.name,\n library2.name,\n )\n\n # The default library is the only one with a collection;\n # test_collection() was called on that collection.\n [(collection, api_map)] = script.tested\n assert [collection] == library1.collections\n\n # The API lookup map passed into test_collection() is based on\n # CirculationAPI's default API map.\n default_api_map = CirculationAPI(\n self._db, self._default_library\n ).default_api_map\n for k, v in list(default_api_map.items()):\n assert api_map[k] == v\n\n # But a couple things were added to the map that are not in\n # CirculationAPI.\n assert api_map[ExternalIntegration.OPDS_IMPORT] == OPDSImportMonitor\n assert api_map[ExternalIntegration.FEEDBOOKS] == FeedbooksImportMonitor\n\n # If test_collection raises an exception, the exception is recorded,\n # and we move on.\n class MockScript2(MockScript):\n def test_collection(self, collection, api_map):\n raise Exception(\"blah\")\n\n out = StringIO()\n script = MockScript2(self._db, out)\n script.do_run()\n assert (\n out.getvalue()\n == \"Testing %s\\n Exception while running self-test: 'blah'\\nTesting %s\\n\"\n % (library1.name, library2.name)\n )\n\n def test_test_collection(self):\n class MockScript(RunSelfTestsScript):\n processed = []\n\n def process_result(self, result):\n self.processed.append(result)\n\n collection = self._default_collection\n\n # If the api_map does not map the collection's protocol to a\n # HasSelfTests class, nothing happens.\n out = StringIO()\n script = MockScript(self._db, out)\n script.test_collection(collection, api_map={})\n assert (\n out.getvalue()\n == \" Cannot find a self-test for %s, ignoring.\\n\" % collection.name\n )\n\n # If the api_map does map the colelction's protocol to a\n # HasSelfTests class, the class's run_self_tests class method\n # is invoked. Any extra arguments found in the extra_args dictionary\n # are passed in to run_self_tests.\n class MockHasSelfTests(object):\n @classmethod\n def run_self_tests(cls, _db, constructor_method, *constructor_args):\n cls.run_self_tests_called_with = (_db, constructor_method)\n cls.run_self_tests_constructor_args = constructor_args\n return {}, [\"result 1\", \"result 2\"]\n\n out = StringIO()\n script = MockScript(self._db, out)\n protocol = self._default_collection.protocol\n script.test_collection(\n collection,\n api_map={protocol: MockHasSelfTests},\n extra_args={MockHasSelfTests: [\"an extra arg\"]},\n )\n\n # run_self_tests() was called with the correct arguments,\n # including the extra one.\n assert (self._db, None) == MockHasSelfTests.run_self_tests_called_with\n assert (\n self._db,\n collection,\n \"an extra arg\",\n ) == MockHasSelfTests.run_self_tests_constructor_args\n\n # Each result was run through process_result().\n assert [\"result 1\", \"result 2\"] == script.processed\n\n def test_process_result(self):\n\n # Test a successful test that returned a result.\n success = SelfTestResult(\"i succeeded\")\n success.success = True\n success.end = success.start + datetime.timedelta(seconds=1.5)\n success.result = \"a result\"\n out = StringIO()\n script = RunSelfTestsScript(self._db, out)\n script.process_result(success)\n assert out.getvalue() == \" SUCCESS i succeeded (1.5sec)\\n Result: a result\\n\"\n\n # Test a failed test that raised an exception.\n failure = SelfTestResult(\"i failed\")\n failure.end = failure.start\n failure.exception = Exception(\"bah\")\n out = StringIO()\n script = RunSelfTestsScript(self._db, out)\n script.process_result(failure)\n assert out.getvalue() == \" FAILURE i failed (0.0sec)\\n Exception: 'bah'\\n\"\n\n\nclass TestHasCollectionSelfTests(DatabaseTest):\n def test__run_self_tests(self):\n # Verify that _run_self_tests calls all the test methods\n # we want it to.\n class Mock(HasCollectionSelfTests):\n # Mock the methods that run the actual tests.\n def _no_delivery_mechanisms_test(self):\n self._no_delivery_mechanisms_called = True\n return \"1\"\n\n mock = Mock()\n results = [x for x in mock._run_self_tests()]\n assert [\"1\"] == [x.result for x in results]\n assert True == mock._no_delivery_mechanisms_called\n\n def test__no_delivery_mechanisms_test(self):\n # Verify that _no_delivery_mechanisms_test works whether all\n # titles in the collection have delivery mechanisms or not.\n\n # There's one LicensePool, and it has a delivery mechanism,\n # so a string is returned.\n pool = self._licensepool(None)\n\n class Mock(HasCollectionSelfTests):\n collection = self._default_collection\n\n hastests = Mock()\n result = hastests._no_delivery_mechanisms_test()\n success = \"All titles in this collection have delivery mechanisms.\"\n assert success == result\n\n # Destroy the delivery mechanism.\n [self._db.delete(x) for x in pool.delivery_mechanisms]\n\n # Now a list of strings is returned, one for each problematic\n # book.\n [result] = hastests._no_delivery_mechanisms_test()\n assert \"[title unknown] (ID: %s)\" % pool.identifier.identifier == result\n\n # Change the LicensePool so it has no owned licenses.\n # Now the book is no longer considered problematic,\n # since it's not actually in the collection.\n pool.licenses_owned = 0\n result = hastests._no_delivery_mechanisms_test()\n assert success == result\n", "id": "10681686", "language": "Python", "matching_score": 7.236774921417236, "max_stars_count": 0, "path": "tests/api/test_selftest.py" }, { "content": "import sys\nfrom typing import Iterable, Optional, Tuple, Union\n\nfrom sqlalchemy.orm.session import Session\n\nfrom core.config import IntegrationException\nfrom core.exceptions import BaseError\nfrom core.model import Collection, ExternalIntegration, Library, LicensePool, Patron\nfrom core.opds_import import OPDSImporter, OPDSImportMonitor\nfrom core.scripts import LibraryInputScript\nfrom core.selftest import HasSelfTests as CoreHasSelfTests\nfrom core.selftest import SelfTestResult\nfrom core.util.problem_detail import ProblemDetail\n\nfrom .authenticator import LibraryAuthenticator\nfrom .circulation import CirculationAPI\nfrom .feedbooks import FeedbooksImportMonitor, FeedbooksOPDSImporter\n\n\nclass HasSelfTests(CoreHasSelfTests):\n \"\"\"Circulation-specific enhancements for HasSelfTests.\n\n Circulation self-tests frequently need to test the ability to act\n on behalf of a specific patron.\n \"\"\"\n\n class _NoValidLibrarySelfTestPatron(BaseError):\n \"\"\"Exception raised when no valid self-test patron found for library.\n\n Attributes:\n message -- primary error message.\n detail (optional) -- additional explanation of the error\n \"\"\"\n\n def __init__(self, message: str, *, detail: str = None):\n super().__init__(message=message)\n self.message = message\n self.detail = detail\n\n def default_patrons(\n self, collection: Collection\n ) -> Iterable[Union[Tuple[Library, Patron, Optional[str]], SelfTestResult]]:\n \"\"\"Find a usable default Patron for each of the libraries associated\n with the given Collection.\n\n :yield: If the collection has no associated libraries, yields a single\n failure SelfTestResult. Otherwise, for EACH associated library,\n yields either:\n - a (Library, Patron, (optional) password) 3-tuple, when a\n default patron can be determined; or\n - a failure SelfTestResult when it cannot.\n \"\"\"\n _db = Session.object_session(collection)\n if not collection.libraries:\n yield self.test_failure(\n \"Acquiring test patron credentials.\",\n \"Collection is not associated with any libraries.\",\n \"Add the collection to a library that has a patron authentication service.\",\n )\n # Not strictly necessary, but makes it obvious that we won't do anything else.\n return\n\n for library in collection.libraries:\n task = \"Acquiring test patron credentials for library %s\" % library.name\n try:\n patron, password = self._determine_self_test_patron(library, _db=_db)\n yield library, patron, password\n except self._NoValidLibrarySelfTestPatron as e:\n yield self.test_failure(task, e.message, e.detail)\n except IntegrationException as e:\n yield self.test_failure(task, e)\n except Exception as e:\n yield self.test_failure(\n task, \"Exception getting default patron: %r\" % e\n )\n\n @classmethod\n def _determine_self_test_patron(\n cls, library: Library, _db=None\n ) -> Tuple[Patron, Optional[str]]:\n \"\"\"Obtain the test Patron and optional password for a library's self-tests.\n\n :param library: The library being tested.\n :param _db: Database session object.\n :return: A 2-tuple with either (1) a patron and optional password.\n :raise: _NoValidLibrarySelfTestPatron when a valid patron is not found.\n \"\"\"\n _db = _db or Session.object_session(library)\n library_authenticator = LibraryAuthenticator.from_config(_db, library)\n auth = library_authenticator.basic_auth_provider\n patron, password = auth.testing_patron(_db) if auth else (None, None)\n if isinstance(patron, Patron):\n return patron, password\n\n # If we get here, then we have failed to find a valid test patron\n # and will raise an exception.\n if patron is None:\n message = \"Library has no test patron configured.\"\n detail = \"You can specify a test patron when you configure the library's patron authentication service.\"\n elif isinstance(patron, ProblemDetail):\n message = patron.detail\n detail = patron.debug_message\n else:\n message = f\"Authentication provider returned unexpected type ({type(patron)}) instead of patron.\"\n detail = None\n raise cls._NoValidLibrarySelfTestPatron(message, detail=detail)\n\n\nclass RunSelfTestsScript(LibraryInputScript):\n \"\"\"Run the self-tests for every collection in the given library\n where that's possible.\n \"\"\"\n\n def __init__(self, _db=None, output=sys.stdout):\n super(RunSelfTestsScript, self).__init__(_db)\n self.out = output\n\n def do_run(self, *args, **kwargs):\n parsed = self.parse_command_line(self._db, *args, **kwargs)\n for library in parsed.libraries:\n api_map = CirculationAPI(self._db, library).default_api_map\n api_map[ExternalIntegration.OPDS_IMPORT] = OPDSImportMonitor\n api_map[ExternalIntegration.FEEDBOOKS] = FeedbooksImportMonitor\n self.out.write(\"Testing %s\\n\" % library.name)\n for collection in library.collections:\n try:\n self.test_collection(collection, api_map)\n except Exception as e:\n self.out.write(\" Exception while running self-test: '%s'\\n\" % e)\n\n def test_collection(self, collection, api_map, extra_args=None):\n tester = api_map.get(collection.protocol)\n if not tester:\n self.out.write(\n \" Cannot find a self-test for %s, ignoring.\\n\" % collection.name\n )\n return\n\n self.out.write(\" Running self-test for %s.\\n\" % collection.name)\n # Some HasSelfTests classes require extra arguments to their\n # constructors.\n extra_args = extra_args or {\n OPDSImportMonitor: [OPDSImporter],\n FeedbooksImportMonitor: [FeedbooksOPDSImporter],\n }\n extra = extra_args.get(tester, [])\n constructor_args = [self._db, collection] + list(extra)\n results_dict, results_list = tester.run_self_tests(\n self._db, None, *constructor_args\n )\n for result in results_list:\n self.process_result(result)\n\n def process_result(self, result):\n \"\"\"Process a single TestResult object.\"\"\"\n if result.success:\n success = \"SUCCESS\"\n else:\n success = \"FAILURE\"\n self.out.write(\" %s %s (%.1fsec)\\n\" % (success, result.name, result.duration))\n if isinstance(result.result, (bytes, str)):\n self.out.write(\" Result: %s\\n\" % result.result)\n if result.exception:\n self.out.write(\" Exception: '%s'\\n\" % result.exception)\n\n\nclass HasCollectionSelfTests(HasSelfTests):\n \"\"\"Extra tests to verify the integrity of imported\n collections of books.\n\n This is a mixin method that requires that `self.collection`\n point to the Collection to be tested.\n \"\"\"\n\n def _no_delivery_mechanisms_test(self):\n # Find works in the tested collection that have no delivery\n # mechanisms.\n titles = []\n\n qu = self.collection.pools_with_no_delivery_mechanisms\n qu = qu.filter(LicensePool.licenses_owned > 0)\n for lp in qu:\n edition = lp.presentation_edition\n if edition:\n title = edition.title\n else:\n title = \"[title unknown]\"\n identifier = lp.identifier.identifier\n titles.append(\"%s (ID: %s)\" % (title, identifier))\n\n if titles:\n return titles\n else:\n return \"All titles in this collection have delivery mechanisms.\"\n\n def _run_self_tests(self):\n yield self.run_test(\n \"Checking for titles that have no delivery mechanisms.\",\n self._no_delivery_mechanisms_test,\n )\n", "id": "4743016", "language": "Python", "matching_score": 2.52569580078125, "max_stars_count": 0, "path": "api/selftest.py" }, { "content": "from api.admin.problem_details import *\nfrom api.simple_authentication import SimpleAuthenticationProvider\nfrom core.model import ExternalIntegration, create\nfrom core.selftest import HasSelfTests\n\nfrom .test_controller import SettingsControllerTest\n\n\nclass TestPatronAuthSelfTests(SettingsControllerTest):\n def _auth_service(self, libraries=[]):\n auth_service, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=SimpleAuthenticationProvider.__module__,\n goal=ExternalIntegration.PATRON_AUTH_GOAL,\n name=\"name\",\n libraries=libraries,\n )\n return auth_service\n\n def test_patron_auth_self_tests_with_no_identifier(self):\n with self.request_context_with_admin(\"/\"):\n response = self.manager.admin_patron_auth_service_self_tests_controller.process_patron_auth_service_self_tests(\n None\n )\n assert response.title == MISSING_IDENTIFIER.title\n assert response.detail == MISSING_IDENTIFIER.detail\n assert response.status_code == 400\n\n def test_patron_auth_self_tests_with_no_auth_service_found(self):\n with self.request_context_with_admin(\"/\"):\n response = self.manager.admin_patron_auth_service_self_tests_controller.process_patron_auth_service_self_tests(\n -1\n )\n assert response == MISSING_SERVICE\n assert response.status_code == 404\n\n def test_patron_auth_self_tests_get_with_no_libraries(self):\n auth_service = self._auth_service()\n with self.request_context_with_admin(\"/\"):\n response = self.manager.admin_patron_auth_service_self_tests_controller.process_patron_auth_service_self_tests(\n auth_service.id\n )\n results = response.get(\"self_test_results\").get(\"self_test_results\")\n assert results.get(\"disabled\") == True\n assert (\n results.get(\"exception\")\n == \"You must associate this service with at least one library before you can run self tests for it.\"\n )\n\n def test_patron_auth_self_tests_test_get(self):\n old_prior_test_results = HasSelfTests.prior_test_results\n HasSelfTests.prior_test_results = self.mock_prior_test_results\n auth_service = self._auth_service([self._library()])\n\n # Make sure that HasSelfTest.prior_test_results() was called and that\n # it is in the response's self tests object.\n with self.request_context_with_admin(\"/\"):\n response = self.manager.admin_patron_auth_service_self_tests_controller.process_patron_auth_service_self_tests(\n auth_service.id\n )\n response_auth_service = response.get(\"self_test_results\")\n\n assert response_auth_service.get(\"name\") == auth_service.name\n assert response_auth_service.get(\"protocol\") == auth_service.protocol\n assert response_auth_service.get(\"id\") == auth_service.id\n assert response_auth_service.get(\"goal\") == auth_service.goal\n assert (\n response_auth_service.get(\"self_test_results\") == self.self_test_results\n )\n\n HasSelfTests.prior_test_results = old_prior_test_results\n\n def test_patron_auth_self_tests_post_with_no_libraries(self):\n auth_service = self._auth_service()\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n response = self.manager.admin_patron_auth_service_self_tests_controller.process_patron_auth_service_self_tests(\n auth_service.id\n )\n assert response.title == FAILED_TO_RUN_SELF_TESTS.title\n assert (\n response.detail\n == \"Failed to run self tests for this patron authentication service.\"\n )\n assert response.status_code == 400\n\n def test_patron_auth_self_tests_test_post(self):\n old_run_self_tests = HasSelfTests.run_self_tests\n HasSelfTests.run_self_tests = self.mock_run_self_tests\n auth_service = self._auth_service([self._library()])\n\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n response = self.manager.admin_patron_auth_service_self_tests_controller.process_patron_auth_service_self_tests(\n auth_service.id\n )\n assert response._status == \"200 OK\"\n assert \"Successfully ran new self tests\" == response.get_data(as_text=True)\n\n # run_self_tests was called with the database twice (the\n # second time to be used in the ExternalSearchIntegration\n # constructor). There were no keyword arguments.\n assert (\n (self._db, None, auth_service.libraries[0], auth_service),\n {},\n ) == self.run_self_tests_called_with\n\n HasSelfTests.run_self_tests = old_run_self_tests\n", "id": "307457", "language": "Python", "matching_score": 5.369745254516602, "max_stars_count": 0, "path": "tests/api/admin/controller/test_patron_auth_self_tests.py" }, { "content": "from api.admin.problem_details import *\nfrom core.model import ExternalIntegration, create\nfrom core.selftest import HasSelfTests\n\nfrom .test_controller import SettingsControllerTest\n\n\nclass TestSearchServiceSelfTests(SettingsControllerTest):\n def test_search_service_self_tests_with_no_identifier(self):\n with self.request_context_with_admin(\"/\"):\n response = self.manager.admin_search_service_self_tests_controller.process_search_service_self_tests(\n None\n )\n assert response.title == MISSING_IDENTIFIER.title\n assert response.detail == MISSING_IDENTIFIER.detail\n assert response.status_code == 400\n\n def test_search_service_self_tests_with_no_search_service_found(self):\n with self.request_context_with_admin(\"/\"):\n response = self.manager.admin_search_service_self_tests_controller.process_search_service_self_tests(\n -1\n )\n assert response == MISSING_SERVICE\n assert response.status_code == 404\n\n def test_search_service_self_tests_test_get(self):\n old_prior_test_results = HasSelfTests.prior_test_results\n HasSelfTests.prior_test_results = self.mock_prior_test_results\n search_service, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=ExternalIntegration.ELASTICSEARCH,\n goal=ExternalIntegration.SEARCH_GOAL,\n )\n # Make sure that HasSelfTest.prior_test_results() was called and that\n # it is in the response's self tests object.\n with self.request_context_with_admin(\"/\"):\n response = self.manager.admin_search_service_self_tests_controller.process_search_service_self_tests(\n search_service.id\n )\n response_search_service = response.get(\"self_test_results\")\n\n assert response_search_service.get(\"id\") == search_service.id\n assert response_search_service.get(\"name\") == search_service.name\n assert (\n response_search_service.get(\"protocol\").get(\"label\")\n == search_service.protocol\n )\n assert response_search_service.get(\"goal\") == search_service.goal\n assert (\n response_search_service.get(\"self_test_results\")\n == HasSelfTests.prior_test_results()\n )\n\n HasSelfTests.prior_test_results = old_prior_test_results\n\n def test_search_service_self_tests_post(self):\n old_run_self_tests = HasSelfTests.run_self_tests\n HasSelfTests.run_self_tests = self.mock_run_self_tests\n\n search_service, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=ExternalIntegration.ELASTICSEARCH,\n goal=ExternalIntegration.SEARCH_GOAL,\n )\n m = (\n self.manager.admin_search_service_self_tests_controller.self_tests_process_post\n )\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n response = m(search_service.id)\n assert response._status == \"200 OK\"\n assert \"Successfully ran new self tests\" == response.get_data(as_text=True)\n\n positional, keyword = self.run_self_tests_called_with\n # run_self_tests was called with positional arguments:\n # * The database connection\n # * The method to call to instantiate a HasSelfTests implementation\n # (None -- this means to use the default ExternalSearchIndex\n # constructor.)\n # * The database connection again (to be passed into\n # the ExternalSearchIndex constructor).\n assert (self._db, None, self._db) == positional\n\n # run_self_tests was not called with any keyword arguments.\n assert {} == keyword\n\n # Undo the mock.\n HasSelfTests.run_self_tests = old_run_self_tests\n", "id": "10741509", "language": "Python", "matching_score": 5.682872295379639, "max_stars_count": 0, "path": "tests/api/admin/controller/test_search_service_self_tests.py" }, { "content": "from api.admin.problem_details import *\nfrom api.nyt import NYTBestSellerAPI\nfrom core.model import ExternalIntegration, create\nfrom core.selftest import HasSelfTests\n\nfrom .test_controller import SettingsControllerTest\n\n\nclass TestMetadataServiceSelfTests(SettingsControllerTest):\n def test_metadata_service_self_tests_with_no_identifier(self):\n with self.request_context_with_admin(\"/\"):\n response = self.manager.admin_metadata_service_self_tests_controller.process_metadata_service_self_tests(\n None\n )\n assert response.title == MISSING_IDENTIFIER.title\n assert response.detail == MISSING_IDENTIFIER.detail\n assert response.status_code == 400\n\n def test_metadata_service_self_tests_with_no_metadata_service_found(self):\n with self.request_context_with_admin(\"/\"):\n response = self.manager.admin_metadata_service_self_tests_controller.process_metadata_service_self_tests(\n -1\n )\n assert response == MISSING_SERVICE\n assert response.status_code == 404\n\n def test_metadata_service_self_tests_test_get(self):\n old_prior_test_results = HasSelfTests.prior_test_results\n HasSelfTests.prior_test_results = self.mock_prior_test_results\n metadata_service, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=ExternalIntegration.NYT,\n goal=ExternalIntegration.METADATA_GOAL,\n )\n # Make sure that HasSelfTest.prior_test_results() was called and that\n # it is in the response's self tests object.\n with self.request_context_with_admin(\"/\"):\n response = self.manager.admin_metadata_service_self_tests_controller.process_metadata_service_self_tests(\n metadata_service.id\n )\n response_metadata_service = response.get(\"self_test_results\")\n\n assert response_metadata_service.get(\"id\") == metadata_service.id\n assert response_metadata_service.get(\"name\") == metadata_service.name\n assert (\n response_metadata_service.get(\"protocol\").get(\"label\")\n == NYTBestSellerAPI.NAME\n )\n assert response_metadata_service.get(\"goal\") == metadata_service.goal\n assert (\n response_metadata_service.get(\"self_test_results\")\n == HasSelfTests.prior_test_results()\n )\n HasSelfTests.prior_test_results = old_prior_test_results\n\n def test_metadata_service_self_tests_post(self):\n old_run_self_tests = HasSelfTests.run_self_tests\n HasSelfTests.run_self_tests = self.mock_run_self_tests\n\n metadata_service, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=ExternalIntegration.NYT,\n goal=ExternalIntegration.METADATA_GOAL,\n )\n m = (\n self.manager.admin_metadata_service_self_tests_controller.self_tests_process_post\n )\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n response = m(metadata_service.id)\n assert response._status == \"200 OK\"\n assert \"Successfully ran new self tests\" == response.get_data(as_text=True)\n\n positional, keyword = self.run_self_tests_called_with\n # run_self_tests was called with positional arguments:\n # * The database connection\n # * The method to call to instantiate a HasSelfTests implementation\n # (NYTBestSellerAPI.from_config)\n # * The database connection again (to be passed into\n # NYTBestSellerAPI.from_config).\n assert (self._db, NYTBestSellerAPI.from_config, self._db) == positional\n\n # run_self_tests was not called with any keyword arguments.\n assert {} == keyword\n\n # Undo the mock.\n HasSelfTests.run_self_tests = old_run_self_tests\n", "id": "12037523", "language": "Python", "matching_score": 1.867396354675293, "max_stars_count": 0, "path": "tests/api/admin/controller/test_metadata_service_self_tests.py" }, { "content": "import flask\nfrom flask import Response\n\nfrom api.admin.problem_details import *\nfrom api.google_analytics_provider import GoogleAnalyticsProvider\nfrom core.local_analytics_provider import LocalAnalyticsProvider\nfrom core.model import ExternalIntegration\nfrom core.util.problem_detail import ProblemDetail\n\nfrom . import SettingsController\n\n\nclass AnalyticsServicesController(SettingsController):\n def __init__(self, manager):\n super(AnalyticsServicesController, self).__init__(manager)\n provider_apis = [\n GoogleAnalyticsProvider,\n LocalAnalyticsProvider,\n ]\n self.protocols = self._get_integration_protocols(provider_apis)\n self.goal = ExternalIntegration.ANALYTICS_GOAL\n\n def process_analytics_services(self):\n if flask.request.method == \"GET\":\n return self.process_get()\n else:\n return self.process_post()\n\n def process_get(self):\n if flask.request.method == \"GET\":\n services = self._get_integration_info(self.goal, self.protocols)\n # Librarians should be able to see, but not modify local analytics services.\n # Setting the level to 2 will communicate that to the front end.\n for x in services:\n if x[\"protocol\"] == \"core.local_analytics_provider\":\n x[\"level\"] = 2\n return dict(\n analytics_services=services,\n protocols=self.protocols,\n )\n\n def process_post(self):\n name = flask.request.form.get(\"name\")\n protocol = flask.request.form.get(\"protocol\")\n url = flask.request.form.get(\"url\")\n fields = {\"name\": name, \"protocol\": protocol, \"url\": url}\n\n # Don't let librarians create local analytics services.\n if protocol == \"core.local_analytics_provider\":\n self.require_higher_than_librarian()\n\n form_field_error = self.validate_form_fields(**fields)\n if form_field_error:\n return form_field_error\n\n is_new = False\n id = flask.request.form.get(\"id\")\n\n if id:\n # Find an existing service in order to edit it\n service = self.look_up_service_by_id(id, protocol)\n else:\n service, is_new = self._create_integration(\n self.protocols, protocol, self.goal\n )\n\n if isinstance(service, ProblemDetail):\n self._db.rollback()\n return service\n\n name_error = self.check_name_unique(service, name)\n if name_error:\n self._db.rollback()\n return name_error\n\n protocol_error = self.set_protocols(service, protocol)\n if protocol_error:\n self._db.rollback()\n return protocol_error\n\n service.name = name\n if is_new:\n return Response(str(service.id), 201)\n else:\n return Response(str(service.id), 200)\n\n def validate_form_fields(self, **fields):\n \"\"\"The 'name' and 'URL' fields cannot be blank, the URL must be valid,\n and the protocol must be selected from the list of recognized protocols.\"\"\"\n\n name = fields.get(\"name\")\n protocol = fields.get(\"protocol\")\n url = fields.get(\"url\")\n\n if not name:\n return MISSING_ANALYTICS_NAME\n if protocol:\n error = self.validate_protocol()\n if error:\n return error\n else:\n wrong_format = self.validate_formats()\n if wrong_format:\n return wrong_format\n\n # The URL is only relevant, and required, if the user is creating a Google Analytics\n # integration; the local analytics form doesn't have a URL field.\n if \"url\" in list(flask.request.form.keys()) and not url:\n return INCOMPLETE_CONFIGURATION\n\n def process_delete(self, service_id):\n return self._delete_integration(service_id, self.goal)\n", "id": "5024576", "language": "Python", "matching_score": 4.790971279144287, "max_stars_count": 0, "path": "api/admin/controller/analytics_services.py" }, { "content": "import flask\nfrom flask import Response\nfrom flask_babel import lazy_gettext as _\n\nfrom api.admin.problem_details import *\nfrom core.marc import MARCExporter\nfrom core.model import ExternalIntegration, get_one, get_one_or_create\nfrom core.model.configuration import ExternalIntegrationLink\nfrom core.s3 import S3UploaderConfiguration\nfrom core.util.problem_detail import ProblemDetail\n\nfrom . import SettingsController\n\n\nclass CatalogServicesController(SettingsController):\n def __init__(self, manager):\n super(CatalogServicesController, self).__init__(manager)\n service_apis = [MARCExporter]\n self.protocols = self._get_integration_protocols(\n service_apis, protocol_name_attr=\"NAME\"\n )\n self.update_protocol_settings()\n\n def update_protocol_settings(self):\n self.protocols[0][\"settings\"] = [MARCExporter.get_storage_settings(self._db)]\n\n def process_catalog_services(self):\n self.require_system_admin()\n\n if flask.request.method == \"GET\":\n return self.process_get()\n else:\n return self.process_post()\n\n def process_get(self):\n services = self._get_integration_info(\n ExternalIntegration.CATALOG_GOAL, self.protocols\n )\n self.update_protocol_settings()\n return dict(\n catalog_services=services,\n protocols=self.protocols,\n )\n\n def process_post(self):\n protocol = flask.request.form.get(\"protocol\")\n is_new = False\n error = self.validate_form_fields(protocol)\n if error:\n return error\n\n id = flask.request.form.get(\"id\")\n if id:\n # Find an existing service to edit\n service = get_one(\n self._db,\n ExternalIntegration,\n id=id,\n goal=ExternalIntegration.CATALOG_GOAL,\n )\n if not service:\n return MISSING_SERVICE\n if protocol != service.protocol:\n return CANNOT_CHANGE_PROTOCOL\n else:\n # Create a new service\n service, is_new = self._create_integration(\n self.protocols,\n protocol,\n ExternalIntegration.CATALOG_GOAL,\n )\n if isinstance(service, ProblemDetail):\n return service\n\n name = self.get_name(service)\n if isinstance(name, ProblemDetail):\n self._db.rollback()\n return name\n elif name:\n service.name = name\n\n [protocol] = [p for p in self.protocols if p.get(\"name\") == protocol]\n\n result = self._set_integration_settings_and_libraries(service, protocol)\n if isinstance(result, ProblemDetail):\n return result\n\n external_integration_link = self._set_external_integration_link(service)\n if isinstance(external_integration_link, ProblemDetail):\n return external_integration_link\n\n library_error = self.check_libraries(service)\n if library_error:\n self._db.rollback()\n return library_error\n\n if is_new:\n return Response(str(service.id), 201)\n else:\n return Response(str(service.id), 200)\n\n def _set_external_integration_link(self, service):\n \"\"\"Either set or delete the external integration link between the\n service and the storage integration.\n \"\"\"\n mirror_integration_id = flask.request.form.get(\"mirror_integration_id\")\n\n # If no storage integration was selected, then delete the existing\n # external integration link.\n current_integration_link, ignore = get_one_or_create(\n self._db,\n ExternalIntegrationLink,\n library_id=None,\n external_integration_id=service.id,\n purpose=ExternalIntegrationLink.MARC,\n )\n\n if mirror_integration_id == self.NO_MIRROR_INTEGRATION:\n if current_integration_link:\n self._db.delete(current_integration_link)\n else:\n storage_integration = get_one(\n self._db, ExternalIntegration, id=mirror_integration_id\n )\n # Only get storage integrations that have a MARC file option set\n if (\n not storage_integration\n or not storage_integration.setting(\n S3UploaderConfiguration.MARC_BUCKET_KEY\n ).value\n ):\n return MISSING_INTEGRATION\n current_integration_link.other_integration_id = storage_integration.id\n\n def validate_form_fields(self, protocol):\n \"\"\"Verify that the protocol which the user has selected is in the list\n of recognized protocol options.\"\"\"\n\n if protocol and protocol not in [p.get(\"name\") for p in self.protocols]:\n return UNKNOWN_PROTOCOL\n\n def get_name(self, service):\n \"\"\"Check that there isn't already a service with this name\"\"\"\n\n name = flask.request.form.get(\"name\")\n if name:\n if service.name != name:\n service_with_name = get_one(self._db, ExternalIntegration, name=name)\n if service_with_name:\n return INTEGRATION_NAME_ALREADY_IN_USE\n return name\n\n def check_libraries(self, service):\n \"\"\"Check that no library ended up with multiple MARC export integrations.\"\"\"\n\n for library in service.libraries:\n marc_export_count = 0\n for integration in library.integrations:\n if (\n integration.goal == ExternalIntegration.CATALOG_GOAL\n and integration.protocol == ExternalIntegration.MARC_EXPORT\n ):\n marc_export_count += 1\n if marc_export_count > 1:\n return MULTIPLE_SERVICES_FOR_LIBRARY.detailed(\n _(\n \"You tried to add a MARC export service to %(library)s, but it already has one.\",\n library=library.short_name,\n )\n )\n\n def process_delete(self, service_id):\n return self._delete_integration(service_id, ExternalIntegration.CATALOG_GOAL)\n", "id": "7276881", "language": "Python", "matching_score": 5.257748603820801, "max_stars_count": 0, "path": "api/admin/controller/catalog_services.py" }, { "content": "import json\n\nimport flask\nimport pytest\nfrom werkzeug.datastructures import MultiDict\n\nfrom api.admin.exceptions import *\nfrom core.marc import MARCExporter\nfrom core.model import (\n AdminRole,\n ConfigurationSetting,\n ExternalIntegration,\n create,\n get_one,\n)\nfrom core.model.configuration import ExternalIntegrationLink\nfrom core.s3 import S3UploaderConfiguration\n\nfrom .test_controller import SettingsControllerTest\n\n\nclass TestCatalogServicesController(SettingsControllerTest):\n def test_catalog_services_get_with_no_services(self):\n with self.request_context_with_admin(\"/\"):\n response = (\n self.manager.admin_catalog_services_controller.process_catalog_services()\n )\n assert response.get(\"catalog_services\") == []\n protocols = response.get(\"protocols\")\n assert 1 == len(protocols)\n assert MARCExporter.NAME == protocols[0].get(\"name\")\n assert \"settings\" in protocols[0]\n assert \"library_settings\" in protocols[0]\n\n self.admin.remove_role(AdminRole.SYSTEM_ADMIN)\n self._db.flush()\n pytest.raises(\n AdminNotAuthorized,\n self.manager.admin_catalog_services_controller.process_catalog_services,\n )\n\n def test_catalog_services_get_with_marc_exporter(self):\n integration, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=ExternalIntegration.MARC_EXPORT,\n goal=ExternalIntegration.CATALOG_GOAL,\n name=\"name\",\n )\n integration.libraries += [self._default_library]\n ConfigurationSetting.for_library_and_externalintegration(\n self._db,\n MARCExporter.MARC_ORGANIZATION_CODE,\n self._default_library,\n integration,\n ).value = \"US-MaBoDPL\"\n ConfigurationSetting.for_library_and_externalintegration(\n self._db,\n MARCExporter.INCLUDE_SUMMARY,\n self._default_library,\n integration,\n ).value = \"false\"\n ConfigurationSetting.for_library_and_externalintegration(\n self._db,\n MARCExporter.INCLUDE_SIMPLIFIED_GENRES,\n self._default_library,\n integration,\n ).value = \"true\"\n\n with self.request_context_with_admin(\"/\"):\n response = (\n self.manager.admin_catalog_services_controller.process_catalog_services()\n )\n [service] = response.get(\"catalog_services\")\n assert integration.id == service.get(\"id\")\n assert integration.name == service.get(\"name\")\n assert integration.protocol == service.get(\"protocol\")\n [library] = service.get(\"libraries\")\n assert self._default_library.short_name == library.get(\"short_name\")\n assert \"US-MaBoDPL\" == library.get(MARCExporter.MARC_ORGANIZATION_CODE)\n assert \"false\" == library.get(MARCExporter.INCLUDE_SUMMARY)\n assert \"true\" == library.get(MARCExporter.INCLUDE_SIMPLIFIED_GENRES)\n\n def test_catalog_services_post_errors(self):\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"protocol\", \"Unknown\"),\n ]\n )\n response = (\n self.manager.admin_catalog_services_controller.process_catalog_services()\n )\n assert response == UNKNOWN_PROTOCOL\n\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"id\", \"123\"),\n ]\n )\n response = (\n self.manager.admin_catalog_services_controller.process_catalog_services()\n )\n assert response == MISSING_SERVICE\n\n service, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=\"fake protocol\",\n goal=ExternalIntegration.CATALOG_GOAL,\n name=\"name\",\n )\n\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"id\", service.id),\n (\"protocol\", ExternalIntegration.MARC_EXPORT),\n ]\n )\n response = (\n self.manager.admin_catalog_services_controller.process_catalog_services()\n )\n assert response == CANNOT_CHANGE_PROTOCOL\n\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"name\", service.name),\n (\"protocol\", ExternalIntegration.MARC_EXPORT),\n ]\n )\n response = (\n self.manager.admin_catalog_services_controller.process_catalog_services()\n )\n assert response == INTEGRATION_NAME_ALREADY_IN_USE\n\n service, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=ExternalIntegration.MARC_EXPORT,\n goal=ExternalIntegration.CATALOG_GOAL,\n )\n\n # Attempt to set an S3 mirror external integration but it does not exist!\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n ME = MARCExporter\n flask.request.form = MultiDict(\n [\n (\"name\", \"exporter name\"),\n (\"id\", service.id),\n (\"protocol\", ME.NAME),\n (\"mirror_integration_id\", \"1234\"),\n ]\n )\n response = (\n self.manager.admin_catalog_services_controller.process_catalog_services()\n )\n assert response.uri == MISSING_INTEGRATION.uri\n\n s3, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=ExternalIntegration.S3,\n goal=ExternalIntegration.STORAGE_GOAL,\n )\n\n # Now an S3 integration exists, but it has no MARC bucket configured.\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n ME = MARCExporter\n flask.request.form = MultiDict(\n [\n (\"name\", \"exporter name\"),\n (\"id\", service.id),\n (\"protocol\", ME.NAME),\n (\"mirror_integration_id\", s3.id),\n ]\n )\n response = (\n self.manager.admin_catalog_services_controller.process_catalog_services()\n )\n assert response.uri == MISSING_INTEGRATION.uri\n\n self.admin.remove_role(AdminRole.SYSTEM_ADMIN)\n self._db.flush()\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"name\", \"new name\"),\n (\"protocol\", ME.NAME),\n (\"mirror_integration_id\", s3.id),\n ]\n )\n pytest.raises(\n AdminNotAuthorized,\n self.manager.admin_catalog_services_controller.process_catalog_services,\n )\n\n # This should be the last test to check since rolling back database\n # changes in the test can cause it to crash.\n s3.setting(S3UploaderConfiguration.MARC_BUCKET_KEY).value = \"marc-files\"\n service.libraries += [self._default_library]\n self.admin.add_role(AdminRole.SYSTEM_ADMIN)\n\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n ME = MARCExporter\n flask.request.form = MultiDict(\n [\n (\"name\", \"new name\"),\n (\"protocol\", ME.NAME),\n (\"mirror_integration_id\", s3.id),\n (\n \"libraries\",\n json.dumps(\n [\n {\n \"short_name\": self._default_library.short_name,\n ME.INCLUDE_SUMMARY: \"false\",\n ME.INCLUDE_SIMPLIFIED_GENRES: \"true\",\n }\n ]\n ),\n ),\n ]\n )\n response = (\n self.manager.admin_catalog_services_controller.process_catalog_services()\n )\n assert response.uri == MULTIPLE_SERVICES_FOR_LIBRARY.uri\n\n def test_catalog_services_post_create(self):\n ME = MARCExporter\n\n s3, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=ExternalIntegration.S3,\n goal=ExternalIntegration.STORAGE_GOAL,\n )\n s3.setting(S3UploaderConfiguration.MARC_BUCKET_KEY).value = \"marc-files\"\n\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"name\", \"exporter name\"),\n (\"protocol\", ME.NAME),\n (\"mirror_integration_id\", s3.id),\n (\n \"libraries\",\n json.dumps(\n [\n {\n \"short_name\": self._default_library.short_name,\n ME.INCLUDE_SUMMARY: \"false\",\n ME.INCLUDE_SIMPLIFIED_GENRES: \"true\",\n }\n ]\n ),\n ),\n ]\n )\n response = (\n self.manager.admin_catalog_services_controller.process_catalog_services()\n )\n assert response.status_code == 201\n\n service = get_one(\n self._db, ExternalIntegration, goal=ExternalIntegration.CATALOG_GOAL\n )\n # There was one S3 integration and it was selected. The service has an\n # External Integration Link to the storage integration that is created\n # in a POST with purpose of ExternalIntegrationLink.MARC.\n integration_link = get_one(\n self._db,\n ExternalIntegrationLink,\n external_integration_id=service.id,\n purpose=ExternalIntegrationLink.MARC,\n )\n\n assert service.id == int(response.get_data())\n assert ME.NAME == service.protocol\n assert \"exporter name\" == service.name\n assert [self._default_library] == service.libraries\n # We expect the Catalog external integration to have a link to the\n # S3 storage external integration\n assert s3.id == integration_link.other_integration_id\n assert (\n \"false\"\n == ConfigurationSetting.for_library_and_externalintegration(\n self._db, ME.INCLUDE_SUMMARY, self._default_library, service\n ).value\n )\n assert (\n \"true\"\n == ConfigurationSetting.for_library_and_externalintegration(\n self._db, ME.INCLUDE_SIMPLIFIED_GENRES, self._default_library, service\n ).value\n )\n\n def test_catalog_services_post_edit(self):\n ME = MARCExporter\n\n s3, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=ExternalIntegration.S3,\n goal=ExternalIntegration.STORAGE_GOAL,\n )\n s3.setting(S3UploaderConfiguration.MARC_BUCKET_KEY).value = \"marc-files\"\n\n service, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=ME.NAME,\n goal=ExternalIntegration.CATALOG_GOAL,\n name=\"name\",\n )\n\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"name\", \"exporter name\"),\n (\"id\", service.id),\n (\"protocol\", ME.NAME),\n (\"mirror_integration_id\", s3.id),\n (\n \"libraries\",\n json.dumps(\n [\n {\n \"short_name\": self._default_library.short_name,\n ME.INCLUDE_SUMMARY: \"false\",\n ME.INCLUDE_SIMPLIFIED_GENRES: \"true\",\n }\n ]\n ),\n ),\n ]\n )\n response = (\n self.manager.admin_catalog_services_controller.process_catalog_services()\n )\n assert response.status_code == 200\n\n integration_link = get_one(\n self._db,\n ExternalIntegrationLink,\n external_integration_id=service.id,\n purpose=ExternalIntegrationLink.MARC,\n )\n assert service.id == int(response.get_data())\n assert ME.NAME == service.protocol\n assert \"exporter name\" == service.name\n assert s3.id == integration_link.other_integration_id\n assert [self._default_library] == service.libraries\n assert (\n \"false\"\n == ConfigurationSetting.for_library_and_externalintegration(\n self._db, ME.INCLUDE_SUMMARY, self._default_library, service\n ).value\n )\n assert (\n \"true\"\n == ConfigurationSetting.for_library_and_externalintegration(\n self._db, ME.INCLUDE_SIMPLIFIED_GENRES, self._default_library, service\n ).value\n )\n\n def test_catalog_services_delete(self):\n ME = MARCExporter\n service, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=ME.NAME,\n goal=ExternalIntegration.CATALOG_GOAL,\n name=\"name\",\n )\n\n with self.request_context_with_admin(\"/\", method=\"DELETE\"):\n self.admin.remove_role(AdminRole.SYSTEM_ADMIN)\n pytest.raises(\n AdminNotAuthorized,\n self.manager.admin_catalog_services_controller.process_delete,\n service.id,\n )\n\n self.admin.add_role(AdminRole.SYSTEM_ADMIN)\n response = self.manager.admin_catalog_services_controller.process_delete(\n service.id\n )\n assert response.status_code == 200\n\n service = get_one(self._db, ExternalIntegration, id=service.id)\n assert None == service\n", "id": "11329646", "language": "Python", "matching_score": 3.7742888927459717, "max_stars_count": 0, "path": "tests/api/admin/controller/test_catalog_services.py" }, { "content": "from api.admin.controller.storage_services import StorageServicesController\nfrom core.model import ExternalIntegration\nfrom core.s3 import S3Uploader\n\nfrom .test_controller import SettingsControllerTest\n\n\nclass TestStorageServices(SettingsControllerTest):\n def test_storage_service_management(self):\n class MockStorage(StorageServicesController):\n def _get_integration_protocols(self, apis, protocol_name_attr):\n self.manage_called_with = (apis, protocol_name_attr)\n\n def _delete_integration(self, *args):\n self.delete_called_with = args\n\n controller = MockStorage(self.manager)\n EI = ExternalIntegration\n with self.request_context_with_admin(\"/\"):\n controller.process_services()\n (apis, procotol_name) = controller.manage_called_with\n\n assert S3Uploader in apis\n assert procotol_name == \"NAME\"\n\n with self.request_context_with_admin(\"/\"):\n id = object()\n controller.process_delete(id)\n assert (id, EI.STORAGE_GOAL) == controller.delete_called_with\n", "id": "7279636", "language": "Python", "matching_score": 1.8478899002075195, "max_stars_count": 0, "path": "tests/api/admin/controller/test_storage_services.py" }, { "content": "from api.saml.provider import SAMLWebSSOAuthenticationProvider\nfrom core.model import ExternalIntegration\nfrom tests.api.test_controller import ControllerTest as BaseControllerTest\n\n\nclass ControllerTest(BaseControllerTest):\n def setup_method(self):\n self._integration = None\n super(ControllerTest, self).setup_method()\n\n self._integration = self._external_integration(\n protocol=SAMLWebSSOAuthenticationProvider.NAME,\n goal=ExternalIntegration.PATRON_AUTH_GOAL,\n )\n", "id": "5211845", "language": "Python", "matching_score": 2.5222363471984863, "max_stars_count": 0, "path": "tests/api/saml/controller_test.py" }, { "content": "from api.lcp.collection import LCPAPI\nfrom core.model import ExternalIntegration\nfrom core.testing import DatabaseTest as BaseDatabaseTest\n\n\nclass DatabaseTest(BaseDatabaseTest):\n def setup_method(self):\n self._integration = None\n self._authentication_provider = None\n\n super(DatabaseTest, self).setup_method()\n\n self._integration = self._external_integration(\n protocol=LCPAPI.NAME, goal=ExternalIntegration.LICENSE_GOAL\n )\n", "id": "6065779", "language": "Python", "matching_score": 0.1925155073404312, "max_stars_count": 0, "path": "tests/api/lcp/database_test.py" }, { "content": "import datetime\nimport json\nimport os\n\nimport pytest\nfrom freezegun import freeze_time\nfrom webpub_manifest_parser.core.ast import PresentationMetadata\nfrom webpub_manifest_parser.odl.ast import ODLPublication\nfrom webpub_manifest_parser.odl.semantic import (\n ODL_PUBLICATION_MUST_CONTAIN_EITHER_LICENSES_OR_OA_ACQUISITION_LINK_ERROR,\n)\n\nfrom api.odl2 import ODL2API, ODL2APIConfiguration, ODL2Importer\nfrom core.coverage import CoverageFailure\nfrom core.model import (\n Contribution,\n Contributor,\n DeliveryMechanism,\n Edition,\n EditionConstants,\n LicensePool,\n MediaTypes,\n Work,\n)\nfrom core.model.configuration import ConfigurationFactory, ConfigurationStorage\nfrom tests.api.test_odl import LicenseHelper, LicenseInfoHelper, TestODLImporter\n\n\nclass TestODL2Importer(TestODLImporter):\n base_path = os.path.split(__file__)[0]\n resource_path = os.path.join(base_path, \"files\", \"odl2\")\n\n @staticmethod\n def _get_delivery_mechanism_by_drm_scheme_and_content_type(\n delivery_mechanisms, content_type, drm_scheme\n ):\n \"\"\"Find a license pool in the list by its identifier.\n\n :param delivery_mechanisms: List of delivery mechanisms\n :type delivery_mechanisms: List[DeliveryMechanism]\n\n :param content_type: Content type\n :type content_type: str\n\n :param drm_scheme: DRM scheme\n :type drm_scheme: str\n\n :return: Delivery mechanism with the the specified DRM scheme and content type (if any)\n :rtype: Optional[DeliveryMechanism]\n \"\"\"\n for delivery_mechanism in delivery_mechanisms:\n delivery_mechanism = delivery_mechanism.delivery_mechanism\n\n if (\n delivery_mechanism.drm_scheme == drm_scheme\n and delivery_mechanism.content_type == content_type\n ):\n return delivery_mechanism\n\n return None\n\n @pytest.fixture\n def integration_protocol(self):\n return ODL2API.NAME\n\n @pytest.fixture()\n def importer(self, collection, db, mock_get, metadata_client) -> ODL2Importer:\n return ODL2Importer(\n db,\n collection=collection,\n http_get=mock_get.get,\n metadata_client=metadata_client,\n )\n\n @pytest.fixture()\n def feed_template(self):\n return \"feed_template.json.jinja\"\n\n @freeze_time(\"2016-01-01T00:00:00+00:00\")\n def test_import(self, importer, mock_get, datasource, db):\n \"\"\"Ensure that ODL2Importer2 correctly processes and imports the ODL feed encoded using OPDS 2.x.\n\n NOTE: `freeze_time` decorator is required to treat the licenses in the ODL feed as non-expired.\n \"\"\"\n # Arrange\n moby_dick_license = LicenseInfoHelper(\n license=LicenseHelper(\n identifier=\"urn:uuid:f7847120-fc6f-11e3-8158-56847afe9799\",\n concurrency=10,\n checkouts=30,\n expires=\"2016-04-25T12:25:21+02:00\",\n ),\n left=30,\n available=10,\n )\n\n mock_get.add(moby_dick_license)\n feed = self.get_data(\"feed.json\")\n\n configuration_storage = ConfigurationStorage(importer)\n configuration_factory = ConfigurationFactory()\n\n with configuration_factory.create(\n configuration_storage, db, ODL2APIConfiguration\n ) as configuration:\n configuration.skipped_license_formats = json.dumps([\"text/html\"])\n\n # Act\n imported_editions, pools, works, failures = importer.import_from_feed(feed)\n\n # Assert\n\n # 1. Make sure that there is a single edition only\n assert isinstance(imported_editions, list)\n assert 1 == len(imported_editions)\n\n [moby_dick_edition] = imported_editions\n assert isinstance(moby_dick_edition, Edition)\n assert moby_dick_edition.primary_identifier.identifier == \"978-3-16-148410-0\"\n assert moby_dick_edition.primary_identifier.type == \"ISBN\"\n\n assert u\"Moby-Dick\" == moby_dick_edition.title\n assert u\"eng\" == moby_dick_edition.language\n assert u\"eng\" == moby_dick_edition.language\n assert EditionConstants.BOOK_MEDIUM == moby_dick_edition.medium\n assert u\"<NAME>\" == moby_dick_edition.author\n\n assert 1 == len(moby_dick_edition.author_contributors)\n [moby_dick_author] = moby_dick_edition.author_contributors\n assert isinstance(moby_dick_author, Contributor)\n assert u\"<NAME>\" == moby_dick_author.display_name\n assert u\"<NAME>\" == moby_dick_author.sort_name\n\n assert 1 == len(moby_dick_author.contributions)\n [moby_dick_author_author_contribution] = moby_dick_author.contributions\n assert isinstance(moby_dick_author_author_contribution, Contribution)\n assert moby_dick_author == moby_dick_author_author_contribution.contributor\n assert moby_dick_edition == moby_dick_author_author_contribution.edition\n assert Contributor.AUTHOR_ROLE == moby_dick_author_author_contribution.role\n\n assert datasource == moby_dick_edition.data_source\n\n assert u\"Test Publisher\" == moby_dick_edition.publisher\n assert datetime.date(2015, 9, 29) == moby_dick_edition.published\n\n assert u\"http://example.org/cover.jpg\" == moby_dick_edition.cover_full_url\n assert (\n u\"http://example.org/cover-small.jpg\"\n == moby_dick_edition.cover_thumbnail_url\n )\n\n # 2. Make sure that license pools have correct configuration\n assert isinstance(pools, list)\n assert 1 == len(pools)\n\n [moby_dick_license_pool] = pools\n assert isinstance(moby_dick_license_pool, LicensePool)\n assert moby_dick_license_pool.identifier.identifier == \"978-3-16-148410-0\"\n assert moby_dick_license_pool.identifier.type == \"ISBN\"\n assert not moby_dick_license_pool.open_access\n assert 30 == moby_dick_license_pool.licenses_owned\n assert 10 == moby_dick_license_pool.licenses_available\n\n assert 2 == len(moby_dick_license_pool.delivery_mechanisms)\n\n moby_dick_epub_adobe_drm_delivery_mechanism = (\n self._get_delivery_mechanism_by_drm_scheme_and_content_type(\n moby_dick_license_pool.delivery_mechanisms,\n MediaTypes.EPUB_MEDIA_TYPE,\n DeliveryMechanism.ADOBE_DRM,\n )\n )\n assert moby_dick_epub_adobe_drm_delivery_mechanism is not None\n\n moby_dick_epub_lcp_drm_delivery_mechanism = (\n self._get_delivery_mechanism_by_drm_scheme_and_content_type(\n moby_dick_license_pool.delivery_mechanisms,\n MediaTypes.EPUB_MEDIA_TYPE,\n DeliveryMechanism.LCP_DRM,\n )\n )\n assert moby_dick_epub_lcp_drm_delivery_mechanism is not None\n\n assert 1 == len(moby_dick_license_pool.licenses)\n [moby_dick_license] = moby_dick_license_pool.licenses\n assert (\n \"urn:uuid:f7847120-fc6f-11e3-8158-56847afe9799\"\n == moby_dick_license.identifier\n )\n assert (\n \"http://www.example.com/get{?id,checkout_id,expires,patron_id,passphrase,hint,hint_url,notification_url}\"\n == moby_dick_license.checkout_url\n )\n assert \"http://www.example.com/status/294024\" == moby_dick_license.status_url\n assert (\n datetime.datetime(2016, 4, 25, 10, 25, 21, tzinfo=datetime.timezone.utc)\n == moby_dick_license.expires\n )\n assert 30 == moby_dick_license.checkouts_left\n assert 10 == moby_dick_license.checkouts_available\n\n # 3. Make sure that work objects contain all the required metadata\n assert isinstance(works, list)\n assert 1 == len(works)\n\n [moby_dick_work] = works\n assert isinstance(moby_dick_work, Work)\n assert moby_dick_edition == moby_dick_work.presentation_edition\n assert 1 == len(moby_dick_work.license_pools)\n assert moby_dick_license_pool == moby_dick_work.license_pools[0]\n\n # 4. Make sure that the failure is covered\n assert 1 == len(failures)\n huck_finn_failures = failures[\"9781234567897\"]\n\n assert 1 == len(huck_finn_failures)\n [huck_finn_failure] = huck_finn_failures\n assert isinstance(huck_finn_failure, CoverageFailure)\n assert \"9781234567897\" == huck_finn_failure.obj.identifier\n\n huck_finn_semantic_error = (\n ODL_PUBLICATION_MUST_CONTAIN_EITHER_LICENSES_OR_OA_ACQUISITION_LINK_ERROR(\n node=ODLPublication(\n metadata=PresentationMetadata(identifier=\"urn:isbn:9781234567897\")\n ),\n node_property=None,\n )\n )\n assert str(huck_finn_semantic_error) == huck_finn_failure.exception\n\n @freeze_time(\"2016-01-01T00:00:00+00:00\")\n def test_import_audiobook(self, importer, mock_get, datasource, db):\n \"\"\"Ensure that ODL2Importer2 correctly processes and imports a feed with an audiobook.\"\"\"\n license = self.get_data(\"license-audiobook.json\")\n feed = self.get_data(\"feed-audiobook.json\")\n mock_get.add(license)\n\n configuration_storage = ConfigurationStorage(importer)\n configuration_factory = ConfigurationFactory()\n\n with configuration_factory.create(\n configuration_storage, db, ODL2APIConfiguration\n ) as configuration:\n configuration.skipped_license_formats = json.dumps([\"text/html\"])\n\n imported_editions, pools, works, failures = importer.import_from_feed(feed)\n\n # Make sure we imported one edition and it is an audiobook\n assert isinstance(imported_editions, list)\n assert 1 == len(imported_editions)\n\n [edition] = imported_editions\n assert isinstance(edition, Edition)\n assert edition.primary_identifier.identifier == \"9780792766919\"\n assert edition.primary_identifier.type == \"ISBN\"\n assert EditionConstants.AUDIO_MEDIUM == edition.medium\n\n # Make sure that license pools have correct configuration\n assert isinstance(pools, list)\n assert 1 == len(pools)\n\n [license_pool] = pools\n assert not license_pool.open_access\n assert 1 == license_pool.licenses_owned\n assert 1 == license_pool.licenses_available\n\n assert 2 == len(license_pool.delivery_mechanisms)\n\n lcp_delivery_mechanism = (\n self._get_delivery_mechanism_by_drm_scheme_and_content_type(\n license_pool.delivery_mechanisms,\n MediaTypes.AUDIOBOOK_PACKAGE_LCP_MEDIA_TYPE,\n DeliveryMechanism.LCP_DRM,\n )\n )\n assert lcp_delivery_mechanism is not None\n\n feedbooks_delivery_mechanism = (\n self._get_delivery_mechanism_by_drm_scheme_and_content_type(\n license_pool.delivery_mechanisms,\n MediaTypes.AUDIOBOOK_MANIFEST_MEDIA_TYPE,\n DeliveryMechanism.FEEDBOOKS_AUDIOBOOK_DRM,\n )\n )\n assert feedbooks_delivery_mechanism is not None\n", "id": "9471423", "language": "Python", "matching_score": 4.971029281616211, "max_stars_count": 0, "path": "tests/api/test_odl2.py" }, { "content": "import datetime\nimport os\n\nfrom parameterized import parameterized\nfrom webpub_manifest_parser.opds2 import OPDS2FeedParserFactory\n\nfrom core.model import (\n Contribution,\n Contributor,\n DataSource,\n DeliveryMechanism,\n Edition,\n EditionConstants,\n LicensePool,\n MediaTypes,\n Work,\n)\nfrom core.opds2_import import OPDS2Importer, RWPMManifestParser\n\nfrom .test_opds_import import OPDSTest\n\n\nclass OPDS2Test(OPDSTest):\n @staticmethod\n def _get_edition_by_identifier(editions, identifier):\n \"\"\"Find an edition in the list by its identifier.\n\n :param editions: List of editions\n :type editions: List[Edition]\n\n :return: Edition with the specified id (if any)\n :rtype: Optional[Edition]\n \"\"\"\n for edition in editions:\n if edition.primary_identifier.urn == identifier:\n return edition\n\n return None\n\n @staticmethod\n def _get_license_pool_by_identifier(pools, identifier):\n \"\"\"Find a license pool in the list by its identifier.\n\n :param pools: List of license pools\n :type pools: List[LicensePool]\n\n :return: Edition with the specified id (if any)\n :rtype: Optional[LicensePool]\n \"\"\"\n for pool in pools:\n if pool.identifier.urn == identifier:\n return pool\n\n return None\n\n @staticmethod\n def _get_work_by_identifier(works, identifier):\n \"\"\"Find a license pool in the list by its identifier.\n\n :param works: List of license pools\n :type works: List[Work]\n\n :return: Edition with the specified id (if any)\n :rtype: Optional[Work]\n \"\"\"\n for work in works:\n if work.presentation_edition.primary_identifier.urn == identifier:\n return work\n\n return None\n\n\nclass TestOPDS2Importer(OPDS2Test):\n def sample_opds(self, filename, file_type=\"r\"):\n base_path = os.path.split(__file__)[0]\n resource_path = os.path.join(base_path, \"files\", \"opds2\")\n return open(os.path.join(resource_path, filename)).read()\n\n @parameterized.expand(\n [\n (\"manifest encoded as a string\", \"string\"),\n (\"manifest encoded as a byte-string\", \"bytes\"),\n ]\n )\n def test(self, _, manifest_type):\n # Arrange\n collection = self._default_collection\n data_source = DataSource.lookup(\n self._db, \"OPDS 2.0 Data Source\", autocreate=True\n )\n\n collection.data_source = data_source\n\n importer = OPDS2Importer(\n self._db, collection, RWPMManifestParser(OPDS2FeedParserFactory())\n )\n content_server_feed = self.sample_opds(\"feed.json\")\n\n if manifest_type == \"bytes\":\n content_server_feed = content_server_feed.encode()\n\n # Act\n imported_editions, pools, works, failures = importer.import_from_feed(\n content_server_feed\n )\n\n # Assert\n\n # 1. Make sure that editions contain all required metadata\n assert isinstance(imported_editions, list)\n assert 2 == len(imported_editions)\n\n # 1.1. Edition with open-access links (Moby-Dick)\n moby_dick_edition = self._get_edition_by_identifier(\n imported_editions, \"urn:isbn:978-3-16-148410-0\"\n )\n assert isinstance(moby_dick_edition, Edition)\n\n assert \"Moby-Dick\" == moby_dick_edition.title\n assert \"eng\" == moby_dick_edition.language\n assert \"eng\" == moby_dick_edition.language\n assert EditionConstants.BOOK_MEDIUM == moby_dick_edition.medium\n assert \"<NAME>\" == moby_dick_edition.author\n\n assert 1 == len(moby_dick_edition.author_contributors)\n [moby_dick_author] = moby_dick_edition.author_contributors\n assert isinstance(moby_dick_author, Contributor)\n assert \"<NAME>\" == moby_dick_author.display_name\n assert \"<NAME>\" == moby_dick_author.sort_name\n\n assert 1 == len(moby_dick_author.contributions)\n [moby_dick_author_contribution] = moby_dick_author.contributions\n assert isinstance(moby_dick_author_contribution, Contribution)\n assert moby_dick_author == moby_dick_author_contribution.contributor\n assert moby_dick_edition == moby_dick_author_contribution.edition\n assert Contributor.AUTHOR_ROLE == moby_dick_author_contribution.role\n\n assert data_source == moby_dick_edition.data_source\n\n assert \"Test Publisher\" == moby_dick_edition.publisher\n assert datetime.date(2015, 9, 29) == moby_dick_edition.published\n\n assert \"http://example.org/cover.jpg\" == moby_dick_edition.cover_full_url\n assert (\n \"http://example.org/cover-small.jpg\"\n == moby_dick_edition.cover_thumbnail_url\n )\n\n # 1.2. Edition with non open-access acquisition links (Adventures of Huckleberry Finn)\n huckleberry_finn_edition = self._get_edition_by_identifier(\n imported_editions, \"urn:isbn:9781234567897\"\n )\n assert isinstance(huckleberry_finn_edition, Edition)\n\n assert \"Adventures of Huckleberry Finn\" == huckleberry_finn_edition.title\n assert \"eng\" == huckleberry_finn_edition.language\n assert EditionConstants.BOOK_MEDIUM == huckleberry_finn_edition.medium\n assert \"<NAME>, <NAME>\" == huckleberry_finn_edition.author\n\n assert 2 == len(huckleberry_finn_edition.author_contributors)\n huckleberry_finn_authors = huckleberry_finn_edition.author_contributors\n\n assert isinstance(huckleberry_finn_authors[0], Contributor)\n assert \"<NAME>\" == huckleberry_finn_authors[0].display_name\n assert \"<NAME>\" == huckleberry_finn_authors[0].sort_name\n\n assert 1 == len(huckleberry_finn_authors[0].contributions)\n [huckleberry_finn_author_contribution] = huckleberry_finn_authors[\n 0\n ].contributions\n assert isinstance(huckleberry_finn_author_contribution, Contribution)\n assert (\n huckleberry_finn_authors[0]\n == huckleberry_finn_author_contribution.contributor\n )\n assert huckleberry_finn_edition == huckleberry_finn_author_contribution.edition\n assert Contributor.AUTHOR_ROLE == huckleberry_finn_author_contribution.role\n\n assert isinstance(huckleberry_finn_authors[1], Contributor)\n assert \"<NAME>\" == huckleberry_finn_authors[1].display_name\n assert \"Clemens, <NAME>\" == huckleberry_finn_authors[1].sort_name\n\n assert 1 == len(huckleberry_finn_authors[1].contributions)\n [huckleberry_finn_author_contribution] = huckleberry_finn_authors[\n 1\n ].contributions\n assert isinstance(huckleberry_finn_author_contribution, Contribution)\n assert (\n huckleberry_finn_authors[1]\n == huckleberry_finn_author_contribution.contributor\n )\n assert huckleberry_finn_edition == huckleberry_finn_author_contribution.edition\n assert Contributor.AUTHOR_ROLE == huckleberry_finn_author_contribution.role\n\n assert data_source == huckleberry_finn_edition.data_source\n\n assert \"Test Publisher\" == huckleberry_finn_edition.publisher\n assert datetime.date(2014, 9, 28) == huckleberry_finn_edition.published\n\n assert \"http://example.org/cover.jpg\" == moby_dick_edition.cover_full_url\n\n # 2. Make sure that license pools have correct configuration\n assert isinstance(pools, list)\n assert 2 == len(pools)\n\n # 2.1. Edition with open-access links (Moby-Dick)\n moby_dick_license_pool = self._get_license_pool_by_identifier(\n pools, \"urn:isbn:978-3-16-148410-0\"\n )\n assert isinstance(moby_dick_license_pool, LicensePool)\n assert moby_dick_license_pool.open_access\n assert LicensePool.UNLIMITED_ACCESS == moby_dick_license_pool.licenses_owned\n assert LicensePool.UNLIMITED_ACCESS == moby_dick_license_pool.licenses_available\n\n assert 1 == len(moby_dick_license_pool.delivery_mechanisms)\n [moby_dick_delivery_mechanism] = moby_dick_license_pool.delivery_mechanisms\n assert (\n DeliveryMechanism.NO_DRM\n == moby_dick_delivery_mechanism.delivery_mechanism.drm_scheme\n )\n assert (\n MediaTypes.EPUB_MEDIA_TYPE\n == moby_dick_delivery_mechanism.delivery_mechanism.content_type\n )\n\n # 2.2. Edition with non open-access acquisition links (Adventures of Huckleberry Finn)\n huckleberry_finn_license_pool = self._get_license_pool_by_identifier(\n pools, \"urn:isbn:9781234567897\"\n )\n assert True == isinstance(huckleberry_finn_license_pool, LicensePool)\n assert False == huckleberry_finn_license_pool.open_access\n assert (\n LicensePool.UNLIMITED_ACCESS == huckleberry_finn_license_pool.licenses_owned\n )\n assert (\n LicensePool.UNLIMITED_ACCESS\n == huckleberry_finn_license_pool.licenses_available\n )\n\n assert 2 == len(huckleberry_finn_license_pool.delivery_mechanisms)\n huckleberry_finn_delivery_mechanisms = (\n huckleberry_finn_license_pool.delivery_mechanisms\n )\n\n assert (\n DeliveryMechanism.ADOBE_DRM\n == huckleberry_finn_delivery_mechanisms[0].delivery_mechanism.drm_scheme\n )\n assert (\n MediaTypes.EPUB_MEDIA_TYPE\n == huckleberry_finn_delivery_mechanisms[0].delivery_mechanism.content_type\n )\n\n assert (\n DeliveryMechanism.LCP_DRM\n == huckleberry_finn_delivery_mechanisms[1].delivery_mechanism.drm_scheme\n )\n assert (\n MediaTypes.EPUB_MEDIA_TYPE\n == huckleberry_finn_delivery_mechanisms[1].delivery_mechanism.content_type\n )\n\n # 3. Make sure that work objects contain all the required metadata\n assert isinstance(works, list)\n assert 2 == len(works)\n\n # 3.1. Edition with open-access links (Moby-Dick)\n moby_dick_work = self._get_work_by_identifier(\n works, \"urn:isbn:978-3-16-148410-0\"\n )\n assert isinstance(moby_dick_work, Work)\n assert moby_dick_edition == moby_dick_work.presentation_edition\n assert 1 == len(moby_dick_work.license_pools)\n assert moby_dick_license_pool == moby_dick_work.license_pools[0]\n\n # 3.2. Edition with open-access links (Moby-Dick)\n huckleberry_finn_work = self._get_work_by_identifier(\n works, \"urn:isbn:9781234567897\"\n )\n assert isinstance(huckleberry_finn_work, Work)\n assert huckleberry_finn_edition == huckleberry_finn_work.presentation_edition\n assert 1 == len(huckleberry_finn_work.license_pools)\n assert huckleberry_finn_license_pool == huckleberry_finn_work.license_pools[0]\n assert (\n \"Adventures of Huckleberry Finn is a novel by <NAME>, first published in the United Kingdom in \"\n \"December 1884 and in the United States in February 1885.\"\n == huckleberry_finn_work.summary_text\n )\n", "id": "7669923", "language": "Python", "matching_score": 1.3585785627365112, "max_stars_count": 0, "path": "tests/core/test_opds2_import.py" }, { "content": "import datetime\nimport random\n\nfrom api.monitor import (\n HoldReaper,\n IdlingAnnotationReaper,\n LoanlikeReaperMonitor,\n LoanReaper,\n)\nfrom api.odl import ODLAPI, SharedODLAPI\nfrom core.model import Annotation, DataSource, ExternalIntegration\nfrom core.testing import DatabaseTest\nfrom core.util.datetime_helpers import utc_now\n\n\nclass TestLoanlikeReaperMonitor(DatabaseTest):\n \"\"\"Tests the loan and hold reapers.\"\"\"\n\n def test_source_of_truth_protocols(self):\n \"\"\"Verify that well-known source of truth protocols\n will be exempt from the reaper.\n \"\"\"\n for i in (\n ODLAPI.NAME,\n SharedODLAPI.NAME,\n ExternalIntegration.OPDS_FOR_DISTRIBUTORS,\n ):\n assert i in LoanlikeReaperMonitor.SOURCE_OF_TRUTH_PROTOCOLS\n\n def test_reaping(self):\n # This patron stopped using the circulation manager a long time\n # ago.\n inactive_patron = self._patron()\n\n # This patron is still using the circulation manager.\n current_patron = self._patron()\n\n # We're going to give these patrons some loans and holds.\n edition, open_access = self._edition(\n with_license_pool=True, with_open_access_download=True\n )\n\n not_open_access_1 = self._licensepool(\n edition, open_access=False, data_source_name=DataSource.OVERDRIVE\n )\n not_open_access_2 = self._licensepool(\n edition, open_access=False, data_source_name=DataSource.BIBLIOTHECA\n )\n not_open_access_3 = self._licensepool(\n edition, open_access=False, data_source_name=DataSource.AXIS_360\n )\n not_open_access_4 = self._licensepool(\n edition, open_access=False, data_source_name=DataSource.ODILO\n )\n\n # Here's a collection that is the source of truth for its\n # loans and holds, rather than mirroring loan and hold information\n # from some remote source.\n sot_collection = self._collection(\n \"Source of Truth\",\n protocol=random.choice(LoanReaper.SOURCE_OF_TRUTH_PROTOCOLS),\n )\n\n edition2 = self._edition(with_license_pool=False)\n\n sot_lp1 = self._licensepool(\n edition2,\n open_access=False,\n data_source_name=DataSource.OVERDRIVE,\n collection=sot_collection,\n )\n\n sot_lp2 = self._licensepool(\n edition2,\n open_access=False,\n data_source_name=DataSource.BIBLIOTHECA,\n collection=sot_collection,\n )\n\n now = utc_now()\n a_long_time_ago = now - datetime.timedelta(days=1000)\n not_very_long_ago = now - datetime.timedelta(days=60)\n even_longer = now - datetime.timedelta(days=2000)\n the_future = now + datetime.timedelta(days=1)\n\n # This loan has expired.\n not_open_access_1.loan_to(\n inactive_patron, start=even_longer, end=a_long_time_ago\n )\n\n # This hold expired without ever becoming a loan (that we saw).\n not_open_access_2.on_hold_to(\n inactive_patron, start=even_longer, end=a_long_time_ago\n )\n\n # This hold has no end date and is older than a year.\n not_open_access_3.on_hold_to(\n inactive_patron,\n start=a_long_time_ago,\n end=None,\n )\n\n # This loan has no end date and is older than 90 days.\n not_open_access_4.loan_to(\n inactive_patron,\n start=a_long_time_ago,\n end=None,\n )\n\n # This loan has no end date, but it's for an open-access work.\n open_access_loan, ignore = open_access.loan_to(\n inactive_patron,\n start=a_long_time_ago,\n end=None,\n )\n\n # This loan has not expired yet.\n not_open_access_1.loan_to(current_patron, start=now, end=the_future)\n\n # This hold has not expired yet.\n not_open_access_2.on_hold_to(current_patron, start=now, end=the_future)\n\n # This loan has no end date but is pretty recent.\n not_open_access_3.loan_to(current_patron, start=not_very_long_ago, end=None)\n\n # This hold has no end date but is pretty recent.\n not_open_access_4.on_hold_to(current_patron, start=not_very_long_ago, end=None)\n\n # Reapers will not touch loans or holds from the\n # source-of-truth collection, even ones that have 'obviously'\n # expired.\n sot_loan, ignore = sot_lp1.loan_to(\n inactive_patron, start=a_long_time_ago, end=a_long_time_ago\n )\n\n sot_hold, ignore = sot_lp2.on_hold_to(\n inactive_patron, start=a_long_time_ago, end=a_long_time_ago\n )\n\n assert 4 == len(inactive_patron.loans)\n assert 3 == len(inactive_patron.holds)\n\n assert 2 == len(current_patron.loans)\n assert 2 == len(current_patron.holds)\n\n # Now we fire up the loan reaper.\n monitor = LoanReaper(self._db)\n monitor.run()\n\n # All of the inactive patron's loans have been reaped,\n # except for the loans for which the circulation manager is the\n # source of truth (the SOT loan and the open-access loan),\n # which will never be reaped.\n #\n # Holds are unaffected.\n assert set([open_access_loan, sot_loan]) == set(inactive_patron.loans)\n assert 3 == len(inactive_patron.holds)\n\n # The active patron's loans and holds are unaffected, either\n # because they have not expired or because they have no known\n # expiration date and were created relatively recently.\n assert 2 == len(current_patron.loans)\n assert 2 == len(current_patron.holds)\n\n # Now fire up the hold reaper.\n monitor = HoldReaper(self._db)\n monitor.run()\n\n # All of the inactive patron's holds have been reaped,\n # except for the one from the source-of-truth collection.\n # The active patron is unaffected.\n assert [sot_hold] == inactive_patron.holds\n assert 2 == len(current_patron.holds)\n\n\nclass TestIdlingAnnotationReaper(DatabaseTest):\n def test_where_clause(self):\n\n # Two books.\n ignore, lp1 = self._edition(with_license_pool=True)\n ignore, lp2 = self._edition(with_license_pool=True)\n\n # Two patrons who sync their annotations.\n p1 = self._patron()\n p2 = self._patron()\n for p in [p1, p2]:\n p.synchronize_annotations = True\n now = utc_now()\n not_that_old = now - datetime.timedelta(days=59)\n very_old = now - datetime.timedelta(days=61)\n\n def _annotation(\n patron, pool, content, motivation=Annotation.IDLING, timestamp=very_old\n ):\n annotation, ignore = Annotation.get_one_or_create(\n self._db,\n patron=patron,\n identifier=pool.identifier,\n motivation=motivation,\n )\n annotation.timestamp = timestamp\n annotation.content = content\n return annotation\n\n # The first patron will not be affected by the\n # reaper. Although their annotations are very old, they have\n # an active loan for one book and a hold on the other.\n loan = lp1.loan_to(p1)\n old_loan = _annotation(p1, lp1, \"old loan\")\n\n hold = lp2.on_hold_to(p1)\n old_hold = _annotation(p1, lp2, \"old hold\")\n\n # The second patron has a very old annotation for the first\n # book. This is the only annotation that will be affected by\n # the reaper.\n reapable = _annotation(p2, lp1, \"abandoned\")\n\n # The second patron also has a very old non-idling annotation\n # for the first book, which will not be reaped because only\n # idling annotations are reaped.\n not_idling = _annotation(\n p2, lp1, \"not idling\", motivation=\"some other motivation\"\n )\n\n # The second patron has a non-old idling annotation for the\n # second book, which will not be reaped (even though there is\n # no active loan or hold) because it's not old enough.\n new_idling = _annotation(p2, lp2, \"recent\", timestamp=not_that_old)\n reaper = IdlingAnnotationReaper(self._db)\n qu = self._db.query(Annotation).filter(reaper.where_clause)\n assert [reapable] == qu.all()\n", "id": "201154", "language": "Python", "matching_score": 4.662344932556152, "max_stars_count": 0, "path": "tests/api/test_monitor.py" }, { "content": "from sqlalchemy import and_, or_\n\nfrom core.model import (\n Annotation,\n Collection,\n ExternalIntegration,\n Hold,\n LicensePool,\n Loan,\n)\nfrom core.monitor import ReaperMonitor\nfrom core.util.datetime_helpers import utc_now\n\nfrom .odl import ODLAPI, SharedODLAPI\n\n\nclass LoanlikeReaperMonitor(ReaperMonitor):\n\n SOURCE_OF_TRUTH_PROTOCOLS = [\n ODLAPI.NAME,\n SharedODLAPI.NAME,\n ExternalIntegration.OPDS_FOR_DISTRIBUTORS,\n ]\n\n @property\n def where_clause(self):\n \"\"\"We never want to automatically reap loans or holds for situations\n where the circulation manager is the source of truth. If we\n delete something we shouldn't have, we won't be able to get\n the 'real' information back.\n\n This means loans of open-access content and loans from\n collections based on a protocol found in\n SOURCE_OF_TRUTH_PROTOCOLS.\n\n Subclasses will append extra clauses to this filter.\n \"\"\"\n source_of_truth = or_(\n LicensePool.open_access == True,\n ExternalIntegration.protocol.in_(self.SOURCE_OF_TRUTH_PROTOCOLS),\n )\n\n source_of_truth_subquery = (\n self._db.query(self.MODEL_CLASS.id)\n .join(self.MODEL_CLASS.license_pool)\n .join(LicensePool.collection)\n .join(\n ExternalIntegration,\n Collection.external_integration_id == ExternalIntegration.id,\n )\n .filter(source_of_truth)\n )\n return ~self.MODEL_CLASS.id.in_(source_of_truth_subquery)\n\n\nclass LoanReaper(LoanlikeReaperMonitor):\n \"\"\"Remove expired and abandoned loans from the database.\"\"\"\n\n MODEL_CLASS = Loan\n MAX_AGE = 90\n\n @property\n def where_clause(self):\n \"\"\"Find loans that have either expired, or that were created a long\n time ago and have no definite end date.\n \"\"\"\n start_field = self.MODEL_CLASS.start\n end_field = self.MODEL_CLASS.end\n superclause = super(LoanReaper, self).where_clause\n now = utc_now()\n expired = end_field < now\n very_old_with_no_clear_end_date = and_(\n start_field < self.cutoff, end_field == None\n )\n return and_(superclause, or_(expired, very_old_with_no_clear_end_date))\n\n\nReaperMonitor.REGISTRY.append(LoanReaper)\n\n\nclass HoldReaper(LoanlikeReaperMonitor):\n \"\"\"Remove seemingly abandoned holds from the database.\"\"\"\n\n MODEL_CLASS = Hold\n MAX_AGE = 365\n\n @property\n def where_clause(self):\n \"\"\"Find holds that were created a long time ago and either have\n no end date or have an end date in the past.\n\n The 'end date' for a hold is just an estimate, but if the estimate\n is in the future it's better to keep the hold around.\n \"\"\"\n start_field = self.MODEL_CLASS.start\n end_field = self.MODEL_CLASS.end\n superclause = super(HoldReaper, self).where_clause\n end_date_in_past = end_field < utc_now()\n probably_abandoned = and_(\n start_field < self.cutoff, or_(end_field == None, end_date_in_past)\n )\n return and_(superclause, probably_abandoned)\n\n\nReaperMonitor.REGISTRY.append(HoldReaper)\n\n\nclass IdlingAnnotationReaper(ReaperMonitor):\n \"\"\"Remove idling annotations for inactive loans.\"\"\"\n\n MODEL_CLASS = Annotation\n TIMESTAMP_FIELD = \"timestamp\"\n MAX_AGE = 60\n\n @property\n def where_clause(self):\n \"\"\"The annotation must have motivation=IDLING, must be at least 60\n days old (meaning there has been no attempt to read the book\n for 60 days), and must not be associated with one of the\n patron's active loans or holds.\n \"\"\"\n superclause = super(IdlingAnnotationReaper, self).where_clause\n\n restrictions = []\n for t in Loan, Hold:\n active_subquery = (\n self._db.query(Annotation.id)\n .join(t, t.patron_id == Annotation.patron_id)\n .join(\n LicensePool,\n and_(\n LicensePool.id == t.license_pool_id,\n LicensePool.identifier_id == Annotation.identifier_id,\n ),\n )\n )\n restrictions.append(~Annotation.id.in_(active_subquery))\n return and_(\n superclause, Annotation.motivation == Annotation.IDLING, *restrictions\n )\n\n\nReaperMonitor.REGISTRY.append(IdlingAnnotationReaper)\n", "id": "1224072", "language": "Python", "matching_score": 0.4984707236289978, "max_stars_count": 0, "path": "api/monitor.py" }, { "content": "from api.admin.controller.patron_auth_services import PatronAuthServicesController\nfrom api.admin.controller.self_tests import SelfTestsController\nfrom api.admin.problem_details import *\nfrom core.model import ExternalIntegration, get_one\n\n\nclass PatronAuthServiceSelfTestsController(\n SelfTestsController, PatronAuthServicesController\n):\n def process_patron_auth_service_self_tests(self, identifier):\n return self._manage_self_tests(identifier)\n\n def look_up_by_id(self, identifier):\n service = get_one(\n self._db,\n ExternalIntegration,\n id=identifier,\n goal=ExternalIntegration.PATRON_AUTH_GOAL,\n )\n if not service:\n return MISSING_SERVICE\n return service\n\n def get_info(self, patron_auth_service):\n [protocol] = [\n p\n for p in self._get_integration_protocols(self.provider_apis)\n if p.get(\"name\") == patron_auth_service.protocol\n ]\n info = dict(\n id=patron_auth_service.id,\n name=patron_auth_service.name,\n protocol=patron_auth_service.protocol,\n goal=patron_auth_service.goal,\n settings=protocol.get(\"settings\"),\n )\n return info\n\n def run_tests(self, patron_auth_service):\n # If the auth service doesn't have at least one library associated with it,\n # then admins will not be able to access the button to run self tests for it, so\n # this code will never be reached; hence, no need to check here that :library exists.\n value = None\n if len(patron_auth_service.libraries):\n library = patron_auth_service.libraries[0]\n value = self._find_protocol_class(patron_auth_service).run_self_tests(\n self._db, None, library, patron_auth_service\n )\n return value\n", "id": "350977", "language": "Python", "matching_score": 3.2773027420043945, "max_stars_count": 0, "path": "api/admin/controller/patron_auth_service_self_tests.py" }, { "content": "from flask_babel import lazy_gettext as _\n\nfrom api.admin.controller.self_tests import SelfTestsController\nfrom core.external_search import ExternalSearchIndex\nfrom core.model import ExternalIntegration\nfrom core.testing import ExternalSearchTest\n\n\nclass SearchServiceSelfTestsController(SelfTestsController, ExternalSearchTest):\n def __init__(self, manager):\n super(SearchServiceSelfTestsController, self).__init__(manager)\n self.type = _(\"search service\")\n\n def process_search_service_self_tests(self, identifier):\n return self._manage_self_tests(identifier)\n\n def _find_protocol_class(self, integration):\n # There's only one possibility for search integrations.\n return ExternalSearchIndex, (\n None,\n self._db,\n )\n\n def look_up_by_id(self, identifier):\n return self.look_up_service_by_id(\n identifier,\n ExternalIntegration.ELASTICSEARCH,\n ExternalIntegration.SEARCH_GOAL,\n )\n", "id": "4326740", "language": "Python", "matching_score": 0.07505661994218826, "max_stars_count": 0, "path": "api/admin/controller/search_service_self_tests.py" }, { "content": "from core.model import DataSource, Measurement, get_one_or_create\nfrom core.testing import DatabaseTest\nfrom core.util.datetime_helpers import datetime_utc\n\n\nclass TestMeasurement(DatabaseTest):\n def setup_method(self):\n super(TestMeasurement, self).setup_method()\n self.SOURCE_NAME = \"Test Data Source\"\n\n # Create a test DataSource\n obj, new = get_one_or_create(\n self._db,\n DataSource,\n name=self.SOURCE_NAME,\n )\n self.source = obj\n\n Measurement.PERCENTILE_SCALES[Measurement.POPULARITY][self.SOURCE_NAME] = [\n 1,\n 1,\n 1,\n 2,\n 2,\n 2,\n 3,\n 3,\n 4,\n 4,\n 5,\n 5,\n 6,\n 6,\n 7,\n 7,\n 8,\n 9,\n 9,\n 10,\n 10,\n 11,\n 12,\n 13,\n 14,\n 15,\n 15,\n 16,\n 18,\n 19,\n 20,\n 21,\n 22,\n 24,\n 25,\n 26,\n 28,\n 30,\n 31,\n 33,\n 35,\n 37,\n 39,\n 41,\n 43,\n 46,\n 48,\n 51,\n 53,\n 56,\n 59,\n 63,\n 66,\n 70,\n 74,\n 78,\n 82,\n 87,\n 92,\n 97,\n 102,\n 108,\n 115,\n 121,\n 128,\n 135,\n 142,\n 150,\n 159,\n 168,\n 179,\n 190,\n 202,\n 216,\n 230,\n 245,\n 260,\n 277,\n 297,\n 319,\n 346,\n 372,\n 402,\n 436,\n 478,\n 521,\n 575,\n 632,\n 702,\n 777,\n 861,\n 965,\n 1100,\n 1248,\n 1428,\n 1665,\n 2020,\n 2560,\n 3535,\n 5805,\n ]\n Measurement.RATING_SCALES[self.SOURCE_NAME] = [1, 10]\n\n def _measurement(self, quantity, value, source, weight):\n source = source or self.source\n return Measurement(\n data_source=source, quantity_measured=quantity, value=value, weight=weight\n )\n\n def _popularity(self, value, source=None, weight=1):\n return self._measurement(Measurement.POPULARITY, value, source, weight)\n\n def _rating(self, value, source=None, weight=1):\n return self._measurement(Measurement.RATING, value, source, weight)\n\n def _quality(self, value, weight=1):\n # The only source we recognize for quality scores is the metadata\n # wrangler.\n source = DataSource.lookup(self._db, DataSource.METADATA_WRANGLER)\n return self._measurement(Measurement.QUALITY, value, source, weight)\n\n def test_newer_measurement_displaces_earlier_measurement(self):\n wi = self._identifier()\n m1 = wi.add_measurement(self.source, Measurement.DOWNLOADS, 10)\n assert True == m1.is_most_recent\n\n m2 = wi.add_measurement(self.source, Measurement.DOWNLOADS, 11)\n assert False == m1.is_most_recent\n assert True == m2.is_most_recent\n\n m3 = wi.add_measurement(self.source, Measurement.POPULARITY, 11)\n assert True == m2.is_most_recent\n assert True == m3.is_most_recent\n\n def test_can_insert_measurement_after_the_fact(self):\n\n old = datetime_utc(2011, 1, 1)\n new = datetime_utc(2012, 1, 1)\n\n wi = self._identifier()\n m1 = wi.add_measurement(self.source, Measurement.DOWNLOADS, 10, taken_at=new)\n assert True == m1.is_most_recent\n\n m2 = wi.add_measurement(self.source, Measurement.DOWNLOADS, 5, taken_at=old)\n assert True == m1.is_most_recent\n\n def test_normalized_popularity(self):\n # Here's a very popular book on the scale defined in\n # PERCENTILE_SCALES[POPULARITY].\n p = self._popularity(6000)\n assert 1.0 == p.normalized_value\n\n # Here's a slightly less popular book.\n p = self._popularity(5804)\n assert 0.99 == p.normalized_value\n\n # Here's a very unpopular book\n p = self._popularity(1)\n assert 0 == p.normalized_value\n\n # Here's a book in the middle.\n p = self._popularity(59)\n assert 0.5 == p.normalized_value\n\n # So long as the data source and the quantity measured can be\n # found in PERCENTILE_SCALES, the data can be normalized.\n\n # This book is extremely unpopular.\n overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)\n m = self._measurement(Measurement.POPULARITY, 0, overdrive, 10)\n assert 0 == m.normalized_value\n\n # For some other data source, we don't know whether popularity=0\n # means 'very popular' or 'very unpopular'.\n gutenberg = DataSource.lookup(self._db, DataSource.GUTENBERG)\n m = self._measurement(Measurement.POPULARITY, 0, gutenberg, 10)\n assert None == m.normalized_value\n\n # We also don't know what it means if Overdrive were to say\n # that a book got 200 downloads. Is that a lot? Compared to\n # what? In what time period? We would have to measure it to\n # find out -- at that point we would put the percentile list\n # in PERCENTILE_SCALES and this would start working.\n m = self._measurement(Measurement.DOWNLOADS, 0, overdrive, 10)\n assert None == m.normalized_value\n\n def test_normalized_rating(self):\n # Here's a very good book on the scale defined in\n # RATING_SCALES.\n p = self._rating(10)\n assert 1.0 == p.normalized_value\n\n # Here's a slightly less good book.\n p = self._rating(9)\n assert 8.0 / 9 == p.normalized_value\n\n # Here's a very bad book\n p = self._rating(1)\n assert 0 == p.normalized_value\n\n def test_neglected_source_cannot_be_normalized(self):\n obj, new = get_one_or_create(self._db, DataSource, name=\"Neglected source\")\n neglected_source = obj\n p = self._popularity(100, neglected_source)\n assert None == p.normalized_value\n\n r = self._rating(100, neglected_source)\n assert None == r.normalized_value\n\n def test_overall_quality(self):\n popularity = self._popularity(59)\n rating = self._rating(4)\n irrelevant = self._measurement(\"Some other quantity\", 42, self.source, 1)\n pop = popularity.normalized_value\n rat = rating.normalized_value\n assert 0.5 == pop\n assert 1.0 / 3 == rat\n l = [popularity, rating, irrelevant]\n quality = Measurement.overall_quality(l)\n assert (0.7 * rat) + (0.3 * pop) == quality\n\n # Mess with the weights.\n assert (0.5 * rat) + (0.5 * pop) == Measurement.overall_quality(l, 0.5, 0.5)\n\n # Adding a non-popularity measurement that is _equated_ to\n # popularity via a percentile scale modifies the\n # normalized value -- we don't care exactly how, only that\n # it's taken into account.\n oclc = DataSource.lookup(self._db, DataSource.OCLC)\n popularityish = self._measurement(Measurement.HOLDINGS, 400, oclc, 10)\n new_quality = Measurement.overall_quality(l + [popularityish])\n assert quality != new_quality\n\n def test_overall_quality_based_solely_on_popularity_if_no_rating(self):\n pop = self._popularity(59)\n assert 0.5 == Measurement.overall_quality([pop])\n\n def test_overall_quality_with_rating_and_quality_but_not_popularity(self):\n rat = self._rating(4)\n qual = self._quality(0.5)\n\n # We would expect the final quality score to be 1/2 of the quality\n # score we got from the metadata wrangler, and 1/2 of the normalized\n # value of the 4-star rating.\n expect = (rat.normalized_value / 2) + 0.25\n assert expect == Measurement.overall_quality([rat, qual], 0.5, 0.5)\n\n def test_overall_quality_with_popularity_and_quality_but_not_rating(self):\n pop = self._popularity(4)\n qual = self._quality(0.5)\n\n # We would expect the final quality score to be 1/2 of the quality\n # score we got from the metadata wrangler, and 1/2 of the normalized\n # value of the 4-star rating.\n expect = (pop.normalized_value / 2) + (0.5 / 2)\n assert expect == Measurement.overall_quality([pop, qual], 0.5, 0.5)\n\n def test_overall_quality_with_popularity_quality_and_rating(self):\n pop = self._popularity(4)\n rat = self._rating(4)\n quality_score = 0.66\n qual = self._quality(quality_score)\n\n # The popularity and rating are scaled appropriately and\n # added together.\n expect_1 = (pop.normalized_value * 0.75) + (rat.normalized_value * 0.25)\n\n # Then the whole thing is divided in half and added to half of the\n # quality score\n expect_total = expect_1 / 2 + (quality_score / 2)\n assert expect_total == Measurement.overall_quality([pop, rat, qual], 0.75, 0.25)\n\n def test_overall_quality_takes_weights_into_account(self):\n rating1 = self._rating(10, weight=10)\n rating2 = self._rating(1, weight=1)\n assert 0.91 == round(Measurement.overall_quality([rating1, rating2]), 2)\n\n def test_overall_quality_is_zero_if_no_relevant_measurements(self):\n irrelevant = self._measurement(\"Some other quantity\", 42, self.source, 1)\n assert 0 == Measurement.overall_quality([irrelevant])\n\n def test_calculate_quality(self):\n w = self._work(with_open_access_download=True)\n\n # This book used to be incredibly popular.\n identifier = w.presentation_edition.primary_identifier\n old_popularity = identifier.add_measurement(\n self.source, Measurement.POPULARITY, 6000\n )\n\n # Now it's just so-so.\n popularity = identifier.add_measurement(self.source, Measurement.POPULARITY, 59)\n\n # This measurement is irrelevant because \"Test Data Source\"\n # doesn't have a mapping from number of editions to a\n # percentile range.\n irrelevant = identifier.add_measurement(\n self.source, Measurement.PUBLISHED_EDITIONS, 42\n )\n\n # If we calculate the quality based solely on the primary\n # identifier, only the most recent popularity is considered,\n # and the book ends up in the middle of the road in terms of\n # quality.\n w.calculate_quality([identifier.id])\n assert 0.5 == w.quality\n\n old_quality = w.quality\n\n # But let's say there's another identifier that's equivalent,\n # and it has a number of editions that was obtained from\n # OCLC Classify, which _does_ have a mapping from number\n # of editions to a percentile range.\n wi = self._identifier()\n oclc = DataSource.lookup(self._db, DataSource.OCLC)\n wi.add_measurement(oclc, Measurement.PUBLISHED_EDITIONS, 800)\n\n # Now the quality is higher--the large OCLC PUBLISHED_EDITIONS\n # measurement bumped it up.\n w.calculate_quality([identifier.id, wi.id])\n assert w.quality > old_quality\n\n def test_calculate_quality_default_quality(self):\n\n # Here's a work with no measurements whatsoever.\n w = self._work()\n\n # Its quality is dependent entirely on the default value we\n # pass into calculate_quality\n w.calculate_quality([])\n assert 0 == w.quality\n w.calculate_quality([], 0.4)\n assert 0.4 == w.quality\n", "id": "3087642", "language": "Python", "matching_score": 2.3071775436401367, "max_stars_count": 0, "path": "tests/core/models/test_measurement.py" }, { "content": "# encoding: utf-8\n# CustomList, CustomListEntry\n\nimport logging\nfrom functools import total_ordering\n\nfrom sqlalchemy import (\n Boolean,\n Column,\n DateTime,\n ForeignKey,\n Index,\n Integer,\n Unicode,\n UniqueConstraint,\n)\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.orm.session import Session\nfrom sqlalchemy.sql.expression import or_\n\nfrom ..util.datetime_helpers import utc_now\nfrom . import Base, get_one_or_create\nfrom .datasource import DataSource\nfrom .identifier import Identifier\nfrom .licensing import LicensePool\nfrom .work import Work\n\n\n@total_ordering\nclass CustomList(Base):\n \"\"\"A custom grouping of Editions.\"\"\"\n\n STAFF_PICKS_NAME = \"<NAME>\"\n\n __tablename__ = \"customlists\"\n id = Column(Integer, primary_key=True)\n primary_language = Column(Unicode, index=True)\n data_source_id = Column(Integer, ForeignKey(\"datasources.id\"), index=True)\n foreign_identifier = Column(Unicode, index=True)\n name = Column(Unicode, index=True)\n description = Column(Unicode)\n created = Column(DateTime(timezone=True), index=True)\n updated = Column(DateTime(timezone=True), index=True)\n responsible_party = Column(Unicode)\n library_id = Column(Integer, ForeignKey(\"libraries.id\"), index=True, nullable=True)\n\n # How many titles are in this list? This is calculated and\n # cached when the list contents change.\n size = Column(Integer, nullable=False, default=0)\n\n entries = relationship(\"CustomListEntry\", backref=\"customlist\")\n\n __table_args__ = (\n UniqueConstraint(\"data_source_id\", \"foreign_identifier\"),\n UniqueConstraint(\"name\", \"library_id\"),\n )\n\n # TODO: It should be possible to associate a CustomList with an\n # audience, fiction status, and subject, but there is no planned\n # interface for managing this.\n\n def __repr__(self):\n return '<Custom List name=\"%s\" foreign_identifier=\"%s\" [%d entries]>' % (\n self.name,\n self.foreign_identifier,\n len(self.entries),\n )\n\n def __eq__(self, other):\n \"\"\"Equality implementation for total_ordering.\"\"\"\n if other is None or not isinstance(other, CustomList):\n return False\n return (self.foreign_identifier, self.name) == (\n other.foreign_identifier,\n other.name,\n )\n\n def __lt__(self, other):\n \"\"\"Comparison implementation for total_ordering.\"\"\"\n if other is None or not isinstance(other, CustomList):\n return False\n return (self.foreign_identifier, self.name) < (\n other.foreign_identifier,\n other.name,\n )\n\n @classmethod\n def all_from_data_sources(cls, _db, data_sources):\n \"\"\"All custom lists from the given data sources.\"\"\"\n if not isinstance(data_sources, list):\n data_sources = [data_sources]\n ids = []\n for ds in data_sources:\n if isinstance(ds, (bytes, str)):\n ds = DataSource.lookup(_db, ds)\n ids.append(ds.id)\n return _db.query(CustomList).filter(CustomList.data_source_id.in_(ids))\n\n @classmethod\n def find(cls, _db, foreign_identifier_or_name, data_source=None, library=None):\n \"\"\"Finds a foreign list in the database by its foreign_identifier\n or its name.\n \"\"\"\n source_name = data_source\n if isinstance(data_source, DataSource):\n source_name = data_source.name\n foreign_identifier = str(foreign_identifier_or_name)\n\n qu = _db.query(cls)\n if source_name:\n qu = qu.join(CustomList.data_source).filter(\n DataSource.name == str(source_name)\n )\n\n qu = qu.filter(\n or_(\n CustomList.foreign_identifier == foreign_identifier,\n CustomList.name == foreign_identifier,\n )\n )\n if library:\n qu = qu.filter(CustomList.library_id == library.id)\n else:\n qu = qu.filter(CustomList.library_id == None)\n\n custom_lists = qu.all()\n\n if not custom_lists:\n return None\n return custom_lists[0]\n\n @property\n def featured_works(self):\n _db = Session.object_session(self)\n editions = [e.edition for e in self.entries if e.featured]\n if not editions:\n return None\n\n identifiers = [ed.primary_identifier for ed in editions]\n return Work.from_identifiers(_db, identifiers)\n\n def add_entry(\n self,\n work_or_edition,\n annotation=None,\n first_appearance=None,\n featured=None,\n update_external_index=True,\n ):\n \"\"\"Add a Work or Edition to a CustomList.\n\n :param work_or_edition: A Work or an Edition. If this is a\n Work, that specific Work will be added to the CustomList. If\n this is an Edition, that Edition will be added to the\n CustomList, assuming there's no equivalent Edition already\n in the list.\n\n :param update_external_index: When a Work is added to a list,\n its external index needs to be updated. The only reason not to\n do this is when the current database session already contains\n a new WorkCoverageRecord for this purpose (e.g. because the\n Work was just created) and creating another one would violate\n the workcoveragerecords table's unique constraint. TODO: This\n is probably no longer be necessary since we no longer update the\n external index in real time.\n \"\"\"\n first_appearance = first_appearance or utc_now()\n _db = Session.object_session(self)\n\n if isinstance(work_or_edition, Work):\n work = work_or_edition\n edition = work.presentation_edition\n\n # Don't look for duplicate entries. get_one_or_create will\n # find an existing entry for this Work, and any other Work\n # -- even for the same title -- is not considered a\n # 'duplicate'.\n existing_entries = []\n else:\n edition = work_or_edition\n work = edition.work\n\n # Look for other entries in this CustomList for this Edition,\n # or an equivalent Edition. This can avoid situations where\n # the same book shows up on a CustomList multiple times.\n existing_entries = list(self.entries_for_work(work_or_edition))\n\n # There's no guarantee this Edition _has_ a work, so don't\n # filter by Work when looking for a duplicate.\n kwargs = dict()\n\n if existing_entries:\n # There is a book equivalent to this one on the list.\n # Update one of the equivalent CustomListEntries,\n # potentially giving it a new .edition and .work\n was_new = False\n entry = existing_entries[0]\n if len(existing_entries) > 1:\n entry.update(_db, equivalent_entries=existing_entries[1:])\n entry.edition = edition\n entry.work = work\n else:\n # There is no equivalent book on the CustomList, but the\n # exact same book may already be on the list. Either find\n # an exact duplicate, or create a new entry.\n entry, was_new = get_one_or_create(\n _db,\n CustomListEntry,\n customlist=self,\n edition=edition,\n work=work,\n create_method_kwargs=dict(first_appearance=first_appearance),\n )\n\n if (\n not entry.most_recent_appearance\n or entry.most_recent_appearance < first_appearance\n ):\n entry.most_recent_appearance = first_appearance\n if annotation:\n entry.annotation = str(annotation)\n if work and not entry.work:\n entry.work = edition.work\n if featured is not None:\n entry.featured = featured\n\n if was_new:\n self.updated = utc_now()\n self.size += 1\n # Make sure the Work's search document is updated to reflect its new\n # list membership.\n if work and update_external_index:\n work.external_index_needs_updating()\n\n return entry, was_new\n\n def remove_entry(self, work_or_edition):\n \"\"\"Remove the entry for a particular Work or Edition and/or any of its\n equivalent Editions.\n \"\"\"\n _db = Session.object_session(self)\n\n existing_entries = list(self.entries_for_work(work_or_edition))\n for entry in existing_entries:\n if entry.work:\n # Make sure the Work's search document is updated to\n # reflect its new list membership.\n entry.work.external_index_needs_updating()\n\n _db.delete(entry)\n\n if existing_entries:\n self.updated = utc_now()\n self.size -= len(existing_entries)\n _db.commit()\n\n def entries_for_work(self, work_or_edition):\n \"\"\"Find all of the entries in the list representing a particular\n Edition or Work.\n \"\"\"\n if isinstance(work_or_edition, Work):\n work = work_or_edition\n edition = work_or_edition.presentation_edition\n else:\n edition = work_or_edition\n work = edition.work\n\n equivalent_ids = [x.id for x in edition.equivalent_editions()]\n\n _db = Session.object_session(work_or_edition)\n clauses = []\n if equivalent_ids:\n clauses.append(CustomListEntry.edition_id.in_(equivalent_ids))\n if work:\n clauses.append(CustomListEntry.work == work)\n if len(clauses) == 0:\n # This shouldn't happen, but if it does, there can be\n # no matching results.\n return _db.query(CustomListEntry).filter(False)\n elif len(clauses) == 1:\n clause = clauses[0]\n else:\n clause = or_(*clauses)\n\n qu = (\n _db.query(CustomListEntry)\n .filter(CustomListEntry.customlist == self)\n .filter(clause)\n )\n return qu\n\n def update_size(self):\n self.size = len(self.entries)\n\n\nclass CustomListEntry(Base):\n\n __tablename__ = \"customlistentries\"\n id = Column(Integer, primary_key=True)\n list_id = Column(Integer, ForeignKey(\"customlists.id\"), index=True)\n edition_id = Column(Integer, ForeignKey(\"editions.id\"), index=True)\n work_id = Column(Integer, ForeignKey(\"works.id\"), index=True)\n featured = Column(Boolean, nullable=False, default=False)\n annotation = Column(Unicode)\n\n # These two fields are for best-seller lists. Even after a book\n # drops off the list, the fact that it once was on the list is\n # still relevant.\n first_appearance = Column(DateTime(timezone=True), index=True)\n most_recent_appearance = Column(DateTime(timezone=True), index=True)\n\n def set_work(self, metadata=None, metadata_client=None, policy=None):\n \"\"\"If possible, identify a locally known Work that is the same\n title as the title identified by this CustomListEntry.\n\n :param policy: A PresentationCalculationPolicy, used to\n determine how far to go when looking for equivalent\n Identifiers.\n \"\"\"\n _db = Session.object_session(self)\n edition = self.edition\n if not self.edition:\n # This shouldn't happen, but no edition means no work\n self.work = None\n return self.work\n\n new_work = None\n if not metadata:\n from ..metadata_layer import Metadata\n\n metadata = Metadata.from_edition(edition)\n\n # Try to guess based on metadata, if we can get a high-quality\n # guess.\n potential_license_pools = metadata.guess_license_pools(_db, metadata_client)\n for lp, quality in sorted(\n list(potential_license_pools.items()), key=lambda x: -x[1]\n ):\n if lp.deliverable and lp.work and quality >= 0.8:\n # This work has at least one deliverable LicensePool\n # associated with it, so it's likely to be real\n # data and not leftover junk.\n new_work = lp.work\n break\n\n if not new_work:\n # Try using the less reliable, more expensive method of\n # matching based on equivalent identifiers.\n equivalent_identifier_id_subquery = (\n Identifier.recursively_equivalent_identifier_ids_query(\n self.edition.primary_identifier.id, policy=policy\n )\n )\n pool_q = (\n _db.query(LicensePool)\n .filter(\n LicensePool.identifier_id.in_(equivalent_identifier_id_subquery)\n )\n .order_by(\n LicensePool.licenses_available.desc(),\n LicensePool.patrons_in_hold_queue.asc(),\n )\n )\n pools = [x for x in pool_q if x.deliverable]\n for pool in pools:\n if pool.deliverable and pool.work:\n new_work = pool.work\n break\n\n old_work = self.work\n if old_work != new_work:\n if old_work:\n logging.info(\n \"Changing work for list entry %r to %r (was %r)\",\n self.edition,\n new_work,\n old_work,\n )\n else:\n logging.info(\n \"Setting work for list entry %r to %r\", self.edition, new_work\n )\n self.work = new_work\n return self.work\n\n def update(self, _db, equivalent_entries=None):\n \"\"\"Combines any number of equivalent entries into a single entry\n and updates the edition being used to represent the Work.\n \"\"\"\n work = None\n if not equivalent_entries:\n # There are no entries to compare against. Leave it be.\n return\n equivalent_entries += [self]\n equivalent_entries = list(set(equivalent_entries))\n\n # Confirm that all the entries are from the same CustomList.\n list_ids = set([e.list_id for e in equivalent_entries])\n if not len(list_ids) == 1:\n raise ValueError(\"Cannot combine entries on different CustomLists.\")\n\n # Confirm that all the entries are equivalent.\n error = \"Cannot combine entries that represent different Works.\"\n equivalents = self.edition.equivalent_editions()\n for equivalent_entry in equivalent_entries:\n if equivalent_entry.edition not in equivalents:\n raise ValueError(error)\n\n # And get a Work if one exists.\n works = set([])\n for e in equivalent_entries:\n work = e.edition.work\n if work:\n works.add(work)\n works = [w for w in works if w]\n\n if works:\n if not len(works) == 1:\n # This shouldn't happen, given all the Editions are equivalent.\n raise ValueError(error)\n [work] = works\n\n self.first_appearance = min([e.first_appearance for e in equivalent_entries])\n self.most_recent_appearance = max(\n [e.most_recent_appearance for e in equivalent_entries]\n )\n\n annotations = [str(e.annotation) for e in equivalent_entries if e.annotation]\n if annotations:\n if len(annotations) > 1:\n # Just pick the longest one?\n self.annotation = max(annotations, key=lambda a: len(a))\n else:\n self.annotation = annotations[0]\n\n # Reset the entry's edition to be the Work's presentation edition.\n if work:\n best_edition = work.presentation_edition\n else:\n best_edition = None\n if work and not best_edition:\n work.calculate_presentation()\n best_edition = work.presentation_edition\n if best_edition and not best_edition == self.edition:\n logging.info(\n \"Changing edition for list entry %r to %r from %r\",\n self,\n best_edition,\n self.edition,\n )\n self.edition = best_edition\n\n self.set_work()\n\n for entry in equivalent_entries:\n if entry != self:\n _db.delete(entry)\n _db.commit\n\n\n# TODO: This was originally designed to speed up queries against the\n# materialized view that use custom list membership as a way to cut\n# down on the result set. Now that we've removed the materialized\n# view, is this still necessary? It might still be necessary for\n# similar queries against Work.\nIndex(\n \"ix_customlistentries_work_id_list_id\",\n CustomListEntry.work_id,\n CustomListEntry.list_id,\n)\n", "id": "1110531", "language": "Python", "matching_score": 3.1154232025146484, "max_stars_count": 0, "path": "core/model/customlist.py" }, { "content": "import datetime\n\nimport pytest\n\nfrom core.coverage import (\n BaseCoverageProvider,\n CatalogCoverageProvider,\n CoverageFailure,\n CoverageProviderProgress,\n IdentifierCoverageProvider,\n MARCRecordWorkCoverageProvider,\n OPDSEntryWorkCoverageProvider,\n PresentationReadyWorkCoverageProvider,\n WorkClassificationCoverageProvider,\n WorkPresentationEditionCoverageProvider,\n)\nfrom core.metadata_layer import (\n CirculationData,\n ContributorData,\n FormatData,\n IdentifierData,\n LinkData,\n Metadata,\n ReplacementPolicy,\n SubjectData,\n)\nfrom core.model import (\n CollectionMissing,\n Contributor,\n CoverageRecord,\n DataSource,\n DeliveryMechanism,\n ExternalIntegration,\n Hyperlink,\n Identifier,\n PresentationCalculationPolicy,\n Representation,\n RightsStatus,\n Subject,\n Timestamp,\n Work,\n WorkCoverageRecord,\n)\nfrom core.model.configuration import ExternalIntegrationLink\nfrom core.s3 import MockS3Uploader\nfrom core.testing import (\n AlwaysSuccessfulBibliographicCoverageProvider,\n AlwaysSuccessfulCollectionCoverageProvider,\n AlwaysSuccessfulCoverageProvider,\n AlwaysSuccessfulWorkCoverageProvider,\n DatabaseTest,\n DummyHTTPClient,\n NeverSuccessfulBibliographicCoverageProvider,\n NeverSuccessfulCoverageProvider,\n NeverSuccessfulWorkCoverageProvider,\n TaskIgnoringCoverageProvider,\n TransientFailureCoverageProvider,\n TransientFailureWorkCoverageProvider,\n)\nfrom core.util.datetime_helpers import datetime_utc, utc_now\n\n\nclass TestCoverageFailure(DatabaseTest):\n \"\"\"Test the CoverageFailure class.\"\"\"\n\n def test_to_coverage_record(self):\n source = DataSource.lookup(self._db, DataSource.GUTENBERG)\n identifier = self._identifier()\n\n transient_failure = CoverageFailure(\n identifier, \"Bah!\", data_source=source, transient=True\n )\n rec = transient_failure.to_coverage_record(operation=\"the_operation\")\n assert isinstance(rec, CoverageRecord)\n assert identifier == rec.identifier\n assert source == rec.data_source\n assert \"the_operation\" == rec.operation\n assert CoverageRecord.TRANSIENT_FAILURE == rec.status\n assert \"Bah!\" == rec.exception\n\n persistent_failure = CoverageFailure(\n identifier, \"Bah forever!\", data_source=source, transient=False\n )\n rec = persistent_failure.to_coverage_record(operation=\"the_operation\")\n assert CoverageRecord.PERSISTENT_FAILURE == rec.status\n assert \"Bah forever!\" == rec.exception\n\n def test_to_work_coverage_record(self):\n work = self._work()\n\n transient_failure = CoverageFailure(work, \"Bah!\", transient=True)\n rec = transient_failure.to_work_coverage_record(\"the_operation\")\n assert isinstance(rec, WorkCoverageRecord)\n assert work == rec.work\n assert \"the_operation\" == rec.operation\n assert CoverageRecord.TRANSIENT_FAILURE == rec.status\n assert \"Bah!\" == rec.exception\n\n persistent_failure = CoverageFailure(work, \"Bah forever!\", transient=False)\n rec = persistent_failure.to_work_coverage_record(operation=\"the_operation\")\n assert CoverageRecord.PERSISTENT_FAILURE == rec.status\n assert \"Bah forever!\" == rec.exception\n\n\nclass TestCoverageProviderProgress(object):\n def test_achievements(self):\n progress = CoverageProviderProgress()\n progress.successes = 1\n progress.transient_failures = 2\n progress.persistent_failures = 0\n\n expect = \"Items processed: 3. Successes: 1, transient failures: 2, persistent failures: 0\"\n assert expect == progress.achievements\n\n # You can't set .achievements directly -- it's a calculated value.\n progress.achievements = \"new value\"\n assert expect == progress.achievements\n\n\nclass CoverageProviderTest(DatabaseTest):\n @pytest.fixture\n def bibliographic_data(self):\n return Metadata(\n DataSource.OVERDRIVE,\n publisher=\"Perfection Learning\",\n language=\"eng\",\n title=\"A Girl Named Disaster\",\n published=datetime_utc(1998, 3, 1, 0, 0),\n primary_identifier=IdentifierData(\n type=Identifier.OVERDRIVE_ID,\n identifier=\"ba9b3419-b0bd-4ca7-a24f-26c4246b6b44\",\n ),\n identifiers=[\n IdentifierData(\n type=Identifier.OVERDRIVE_ID,\n identifier=\"ba9b3419-b0bd-4ca7-a24f-26c4246b6b44\",\n ),\n IdentifierData(type=Identifier.ISBN, identifier=\"9781402550805\"),\n ],\n contributors=[\n ContributorData(\n sort_name=\"<NAME>\", roles=[Contributor.PRIMARY_AUTHOR_ROLE]\n )\n ],\n subjects=[\n SubjectData(type=Subject.TOPIC, identifier=\"Action & Adventure\"),\n SubjectData(type=Subject.FREEFORM_AUDIENCE, identifier=\"Young Adult\"),\n SubjectData(type=Subject.PLACE, identifier=\"Africa\"),\n ],\n )\n\n\nclass TestBaseCoverageProvider(CoverageProviderTest):\n def test_instantiation(self):\n \"\"\"Verify variable initialization.\"\"\"\n\n class ValidMock(BaseCoverageProvider):\n SERVICE_NAME = \"A Service\"\n OPERATION = \"An Operation\"\n DEFAULT_BATCH_SIZE = 50\n\n now = cutoff_time = utc_now()\n provider = ValidMock(self._db, cutoff_time=now)\n\n # Class variables defined in subclasses become appropriate\n # instance variables.\n assert \"A Service (An Operation)\" == provider.service_name\n assert \"An Operation\" == provider.operation\n assert 50 == provider.batch_size\n assert now == provider.cutoff_time\n\n # If you pass in an invalid value for batch_size, you get the default.\n provider = ValidMock(self._db, batch_size=-10)\n assert 50 == provider.batch_size\n\n def test_subclass_must_define_service_name(self):\n class NoServiceName(BaseCoverageProvider):\n pass\n\n with pytest.raises(ValueError) as excinfo:\n NoServiceName(self._db)\n assert \"NoServiceName must define SERVICE_NAME\" in str(excinfo.value)\n\n def test_run(self):\n \"\"\"Verify that run() calls run_once_and_update_timestamp().\"\"\"\n\n class MockProvider(BaseCoverageProvider):\n SERVICE_NAME = \"I do nothing\"\n was_run = False\n\n def run_once_and_update_timestamp(self):\n \"\"\"Set a variable.\"\"\"\n self.was_run = True\n return None\n\n provider = MockProvider(self._db)\n result = provider.run()\n\n # run_once_and_update_timestamp() was called.\n assert True == provider.was_run\n\n # run() returned a CoverageProviderProgress with basic\n # timing information, since run_once_and_update_timestamp()\n # didn't provide anything.\n assert isinstance(result, CoverageProviderProgress)\n now = utc_now()\n assert result.start < result.finish\n for time in (result.start, result.finish):\n assert (now - time).total_seconds() < 5\n\n def test_run_with_custom_result(self):\n\n start = datetime_utc(2011, 1, 1)\n finish = datetime_utc(2012, 1, 1)\n counter = -100\n\n class MockProvider(BaseCoverageProvider):\n \"\"\"A BaseCoverageProvider that returns a strange\n CoverageProviderProgress representing the work it did.\n \"\"\"\n\n SERVICE_NAME = \"I do nothing\"\n was_run = False\n\n custom_timestamp_data = CoverageProviderProgress(\n start=start, finish=finish, counter=counter\n )\n\n def run_once_and_update_timestamp(self):\n return self.custom_timestamp_data\n\n provider = MockProvider(self._db)\n result = provider.run()\n\n # The TimestampData returned by run_once_and_update_timestamp\n # is the return value of run().\n assert result == provider.custom_timestamp_data\n\n # The TimestampData data was written to the database, even\n # though some of it doesn't make apparent sense.\n assert start == provider.timestamp.start\n assert finish == provider.timestamp.finish\n assert counter == provider.timestamp.counter\n\n def test_run_once_and_update_timestamp(self):\n \"\"\"Test that run_once_and_update_timestamp calls run_once until all\n the work is done, and then updates a Timestamp.\n \"\"\"\n\n class MockProvider(BaseCoverageProvider):\n SERVICE_NAME = \"I do nothing\"\n run_once_calls = []\n expect_offset = 0\n\n def run_once(self, progress, count_as_covered=None):\n now = utc_now()\n\n # We never see progress.finish set to a non-None\n # value. When _we_ set it to a non-None value, it means\n # the work is done. If we get called again, it'll be\n # with different `count_as_covered` settings, and\n # .finish will have been reset to None.\n assert None == progress.finish\n\n # Verify that progress.offset is cleared when we\n # expect, and left alone when we expect. This lets\n assert self.expect_offset == progress.offset\n\n self.run_once_calls.append((count_as_covered, now))\n progress.offset = len(self.run_once_calls)\n\n if len(self.run_once_calls) == 1:\n # This is the first call. We will not be setting\n # .finish, so the offset will not be reset on the\n # next call. This simulates what happens when a\n # given `count_as_covered` setting can't be\n # handled in one batch.\n self.expect_offset = progress.offset\n else:\n # This is the second or third call. Set .finish to\n # indicate we're done with this `count_as_covered`\n # setting.\n progress.finish = now\n\n # If there is another call, progress.offset will be\n # reset to zero. (So will .finish.)\n self.expect_offset = 0\n return progress\n\n # We start with no Timestamp.\n service_name = \"I do nothing\"\n service_type = Timestamp.COVERAGE_PROVIDER_TYPE\n timestamp = Timestamp.value(\n self._db, service_name, service_type, collection=None\n )\n assert None == timestamp\n\n # Instantiate the Provider, and call\n # run_once_and_update_timestamp.\n provider = MockProvider(self._db)\n final_progress = provider.run_once_and_update_timestamp()\n\n # The Timestamp's .start and .finish are now set to recent\n # values -- the start and end points of run_once().\n timestamp = provider.timestamp\n now = utc_now()\n assert (now - timestamp.start).total_seconds() < 1\n assert (now - timestamp.finish).total_seconds() < 1\n assert timestamp.start < timestamp.finish\n\n # run_once was called three times: twice to exclude items that\n # have any coverage record whatsoever (PREVIOUSLY_ATTEMPTED),\n # and a third time to exclude only items that have coverage\n # records that indicate success or persistent failure\n # (DEFAULT_COUNT_AS_COVERED).\n first_call, second_call, third_call = provider.run_once_calls\n assert CoverageRecord.PREVIOUSLY_ATTEMPTED == first_call[0]\n assert CoverageRecord.PREVIOUSLY_ATTEMPTED == second_call[0]\n assert CoverageRecord.DEFAULT_COUNT_AS_COVERED == third_call[0]\n\n # On the second and third calls, final_progress.finish was set\n # to the current time, and .offset was set to the number of\n # calls so far.\n #\n # These values are cleared out before each run_once() call\n # -- we tested that above -- so the surviving values are the\n # ones associated with the third call.\n assert third_call[1] == final_progress.finish\n assert 3 == final_progress.offset\n\n def test_run_once_and_update_timestamp_catches_exception(self):\n # Test that run_once_and_update_timestamp catches an exception\n # and stores a stack trace in the CoverageProvider's Timestamp.\n class MockProvider(BaseCoverageProvider):\n SERVICE_NAME = \"I fail\"\n\n def run_once(self, progress, count_as_covered=None):\n raise Exception(\"Unhandled exception\")\n\n provider = MockProvider(self._db)\n provider.run_once_and_update_timestamp()\n\n timestamp = provider.timestamp\n now = utc_now()\n assert (now - timestamp.start).total_seconds() < 1\n assert (now - timestamp.finish).total_seconds() < 1\n assert timestamp.start < timestamp.finish\n\n assert \"Exception: Unhandled exception\" in timestamp.exception\n\n def test_run_once_and_update_timestamp_handled_exception(self):\n # Test that run_once_and_update_timestamp handles the\n # case where the run_once() implementation sets TimestampData.exception\n # rather than raising an exception.\n #\n # This also tests the case where run_once() modifies the\n # TimestampData in place rather than returning a new one.\n class MockProvider(BaseCoverageProvider):\n SERVICE_NAME = \"I fail\"\n\n def run_once(self, progress, count_as_covered=None):\n progress.exception = \"oops\"\n\n provider = MockProvider(self._db)\n provider.run_once_and_update_timestamp()\n\n timestamp = provider.timestamp\n now = utc_now()\n assert (now - timestamp.start).total_seconds() < 1\n assert (now - timestamp.finish).total_seconds() < 1\n assert timestamp.start < timestamp.finish\n\n assert \"oops\" == timestamp.exception\n\n def test_run_once(self):\n # Test run_once, showing how it covers items with different types of\n # CoverageRecord.\n\n # We start with no CoverageRecords.\n assert [] == self._db.query(CoverageRecord).all()\n\n # Four identifiers.\n transient = self._identifier()\n persistent = self._identifier()\n uncovered = self._identifier()\n covered = self._identifier()\n\n # This provider will try to cover them.\n provider = AlwaysSuccessfulCoverageProvider(self._db)\n data_source = provider.data_source\n\n # We previously tried to cover one of them, but got a\n # transient failure.\n self._coverage_record(\n transient, data_source, status=CoverageRecord.TRANSIENT_FAILURE\n )\n\n # Another of the four has a persistent failure.\n self._coverage_record(\n persistent, data_source, status=CoverageRecord.PERSISTENT_FAILURE\n )\n\n # The third one has no coverage record at all.\n\n # And the fourth one has been successfully covered.\n self._coverage_record(covered, data_source, status=CoverageRecord.SUCCESS)\n\n # Now let's run the coverage provider. Every Identifier\n # that's covered will succeed, so the question is which ones\n # get covered.\n progress = CoverageProviderProgress()\n assert 0 == progress.offset\n result = provider.run_once(progress)\n\n # The TimestampData we passed in was given back to us.\n assert progress == result\n\n # The offset (an extension specific to\n # CoverageProviderProgress, not stored in the database)\n # has not changed -- if we were to call run_once again we\n # would not need to skip any records.\n assert 0 == progress.offset\n\n # Various internal totals were updated and a value for .achievements\n # can be generated from those totals.\n assert 2 == progress.successes\n\n # By default, run_once() finds Identifiers that have no coverage\n # or which have transient failures.\n [transient_failure_has_gone] = transient.coverage_records\n assert CoverageRecord.SUCCESS == transient_failure_has_gone.status\n\n [now_has_coverage] = uncovered.coverage_records\n assert CoverageRecord.SUCCESS == now_has_coverage.status\n\n assert transient in provider.attempts\n assert uncovered in provider.attempts\n\n # Nothing happened to the identifier that had a persistent\n # failure or the identifier that was successfully covered.\n assert [CoverageRecord.PERSISTENT_FAILURE] == [\n x.status for x in persistent.coverage_records\n ]\n assert [CoverageRecord.SUCCESS] == [x.status for x in covered.coverage_records]\n\n assert persistent not in provider.attempts\n assert covered not in provider.attempts\n\n # We can change which identifiers get processed by changing\n # what counts as 'coverage'.\n result = provider.run_once(progress, count_as_covered=[CoverageRecord.SUCCESS])\n assert progress == result\n assert 0 == progress.offset\n\n # That processed the persistent failure, but not the success.\n assert persistent in provider.attempts\n assert covered not in provider.attempts\n\n # Let's call it again and say that we are covering everything\n # _except_ persistent failures.\n result = provider.run_once(\n progress, count_as_covered=[CoverageRecord.PERSISTENT_FAILURE]\n )\n assert progress == result\n\n # That got us to cover the identifier that had already been\n # successfully covered.\n assert covered in provider.attempts\n\n # *Now* the offset has changed, so that the first four results\n # -- which we've decided to skip -- won't be considered again\n # this run.\n assert 4 == progress.offset\n\n def test_run_once_records_successes_and_failures(self):\n class Mock(AlwaysSuccessfulCoverageProvider):\n def process_batch_and_handle_results(self, batch):\n # Simulate 1 success, 2 transient failures,\n # and 3 persistent failures.\n return (1, 2, 3), []\n\n # process_batch_and_handle_results won't even be called if the\n # batch is empty.\n provider = Mock(self._db)\n progress = CoverageProviderProgress()\n progress2 = provider.run_once(progress)\n assert progress2 == progress\n assert 0 == progress.successes\n\n # Let's register an identifier so that the method we're testing\n # will be called.\n needs_coverage = self._identifier()\n progress = provider.run_once(progress)\n\n # The numbers returned from process_batch_and_handle_results\n # were added to the CoverageProviderProgress object.\n assert 1 == progress.successes\n assert 2 == progress.transient_failures\n assert 3 == progress.persistent_failures\n\n assert (\n \"Items processed: 6. Successes: 1, transient failures: 2, persistent failures: 3\"\n == progress.achievements\n )\n\n def test_process_batch_and_handle_results(self):\n \"\"\"Test that process_batch_and_handle_results passes the identifiers\n its given into the appropriate BaseCoverageProvider, and deals\n correctly with the successes and failures it might return.\n \"\"\"\n e1, p1 = self._edition(with_license_pool=True)\n i1 = e1.primary_identifier\n\n e2, p2 = self._edition(with_license_pool=True)\n i2 = e2.primary_identifier\n\n class MockProvider(AlwaysSuccessfulCoverageProvider):\n OPERATION = \"i succeed\"\n\n def finalize_batch(self):\n self.finalized = True\n\n success_provider = MockProvider(self._db)\n\n batch = [i1, i2]\n counts, successes = success_provider.process_batch_and_handle_results(batch)\n\n # Two successes.\n assert (2, 0, 0) == counts\n\n # finalize_batch() was called.\n assert True == success_provider.finalized\n\n # Each represented with a CoverageRecord with status='success'\n assert all(isinstance(x, CoverageRecord) for x in successes)\n assert [CoverageRecord.SUCCESS] * 2 == [x.status for x in successes]\n\n # Each associated with one of the identifiers...\n assert set([i1, i2]) == set([x.identifier for x in successes])\n\n # ...and with the coverage provider's operation.\n assert [\"i succeed\"] * 2 == [x.operation for x in successes]\n\n # Now try a different CoverageProvider which creates transient\n # failures.\n class MockProvider(TransientFailureCoverageProvider):\n OPERATION = \"i fail transiently\"\n\n transient_failure_provider = MockProvider(self._db)\n counts, failures = transient_failure_provider.process_batch_and_handle_results(\n batch\n )\n # Two transient failures.\n assert (0, 2, 0) == counts\n\n # New coverage records were added to track the transient\n # failures.\n assert [CoverageRecord.TRANSIENT_FAILURE] * 2 == [x.status for x in failures]\n assert [\"i fail transiently\"] * 2 == [x.operation for x in failures]\n\n # Another way of getting transient failures is to just ignore every\n # item you're told to process.\n class MockProvider(TaskIgnoringCoverageProvider):\n OPERATION = \"i ignore\"\n\n task_ignoring_provider = MockProvider(self._db)\n counts, records = task_ignoring_provider.process_batch_and_handle_results(batch)\n\n assert (0, 2, 0) == counts\n assert [CoverageRecord.TRANSIENT_FAILURE] * 2 == [x.status for x in records]\n assert [\"i ignore\"] * 2 == [x.operation for x in records]\n\n # If a transient failure becomes a success, the it won't have\n # an exception anymore.\n assert [\"Was ignored by CoverageProvider.\"] * 2 == [\n x.exception for x in records\n ]\n records = success_provider.process_batch_and_handle_results(batch)[1]\n assert [None, None] == [x.exception for x in records]\n\n # Or you can go really bad and have persistent failures.\n class MockProvider(NeverSuccessfulCoverageProvider):\n OPERATION = \"i will always fail\"\n\n persistent_failure_provider = MockProvider(self._db)\n counts, results = persistent_failure_provider.process_batch_and_handle_results(\n batch\n )\n\n # Two persistent failures.\n assert (0, 0, 2) == counts\n assert all([isinstance(x, CoverageRecord) for x in results])\n assert [\"What did you expect?\", \"What did you expect?\"] == [\n x.exception for x in results\n ]\n assert [CoverageRecord.PERSISTENT_FAILURE] * 2 == [x.status for x in results]\n assert [\"i will always fail\"] * 2 == [x.operation for x in results]\n\n def test_process_batch(self):\n class Mock(BaseCoverageProvider):\n SERVICE_NAME = \"Some succeed, some fail.\"\n\n def __init__(self, *args, **kwargs):\n super(Mock, self).__init__(*args, **kwargs)\n self.processed = []\n self.successes = []\n\n def process_item(self, item):\n self.processed.append(item)\n if item.identifier == \"fail\":\n return CoverageFailure(item, \"oops\")\n return item\n\n def handle_success(self, item):\n self.successes.append(item)\n\n # Two Identifiers. One will succeed, one will fail.\n succeed = self._identifier(foreign_id=\"succeed\")\n fail = self._identifier(foreign_id=\"fail\")\n provider = Mock(self._db)\n\n r1, r2 = provider.process_batch([succeed, fail])\n\n # Here's the success.\n assert r1 == succeed\n\n # Here's the failure.\n assert isinstance(r2, CoverageFailure)\n assert \"oops\" == r2.exception\n\n # Both identifiers were added to .processed, indicating that\n # process_item was called twice, but only the success was\n # added to .success, indicating that handle_success was only\n # called once.\n assert [succeed, fail] == provider.processed\n assert [succeed] == provider.successes\n\n def test_should_update(self):\n \"\"\"Verify that should_update gives the correct answer when we\n ask if a CoverageRecord needs to be updated.\n \"\"\"\n cutoff = datetime_utc(2016, 1, 1)\n provider = AlwaysSuccessfulCoverageProvider(self._db, cutoff_time=cutoff)\n identifier = self._identifier()\n\n # If coverage is missing, we should update.\n assert True == provider.should_update(None)\n\n # If coverage is outdated, we should update.\n record, ignore = CoverageRecord.add_for(identifier, provider.data_source)\n record.timestamp = datetime_utc(2015, 1, 1)\n assert True == provider.should_update(record)\n\n # If coverage is up-to-date, we should not update.\n record.timestamp = cutoff\n assert False == provider.should_update(record)\n\n # If coverage is only 'registered', we should update.\n record.status = CoverageRecord.REGISTERED\n assert True == provider.should_update(record)\n\n\nclass TestIdentifierCoverageProvider(CoverageProviderTest):\n def setup_method(self):\n super(TestIdentifierCoverageProvider, self).setup_method()\n self.identifier = self._identifier()\n\n def test_input_identifier_types(self):\n \"\"\"Test various acceptable and unacceptable values for the class\n variable INPUT_IDENTIFIER_TYPES.\n \"\"\"\n # It's okay to set INPUT_IDENTIFIER_TYPES to None it means you\n # will cover any and all identifier types.\n class Base(IdentifierCoverageProvider):\n SERVICE_NAME = \"Test provider\"\n DATA_SOURCE_NAME = DataSource.GUTENBERG\n\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = None\n\n provider = MockProvider(self._db)\n assert None == provider.input_identifier_types\n\n # It's okay to set a single value.\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = Identifier.ISBN\n\n provider = MockProvider(self._db)\n assert [Identifier.ISBN] == provider.input_identifier_types\n\n # It's okay to set a list of values.\n class MockProvider(Base):\n INPUT_IDENTIFIER_TYPES = [Identifier.ISBN, Identifier.OVERDRIVE_ID]\n\n provider = MockProvider(self._db)\n assert [\n Identifier.ISBN,\n Identifier.OVERDRIVE_ID,\n ] == provider.input_identifier_types\n\n # It's not okay to do nothing.\n class MockProvider(Base):\n pass\n\n with pytest.raises(ValueError) as excinfo:\n MockProvider(self._db)\n assert (\n \"MockProvider must define INPUT_IDENTIFIER_TYPES, even if the value is None.\"\n in str(excinfo.value)\n )\n\n def test_can_cover(self):\n \"\"\"Verify that can_cover gives the correct answer when\n asked if an IdentifierCoverageProvider can handle a given Identifier.\n \"\"\"\n provider = AlwaysSuccessfulCoverageProvider(self._db)\n identifier = self._identifier(identifier_type=Identifier.ISBN)\n m = provider.can_cover\n\n # This provider handles all identifier types.\n provider.input_identifier_types = None\n assert True == m(identifier)\n\n # This provider handles ISBNs.\n provider.input_identifier_types = [Identifier.OVERDRIVE_ID, Identifier.ISBN]\n assert True == m(identifier)\n\n # This provider doesn't.\n provider.input_identifier_types = [Identifier.OVERDRIVE_ID]\n assert False == m(identifier)\n\n def test_replacement_policy(self):\n \"\"\"Unless a different replacement policy is passed in, the\n default is ReplacementPolicy.from_metadata_source().\n \"\"\"\n provider = AlwaysSuccessfulCoverageProvider(self._db)\n assert True == provider.replacement_policy.identifiers\n assert False == provider.replacement_policy.formats\n\n policy = ReplacementPolicy.from_license_source(self._db)\n provider = AlwaysSuccessfulCoverageProvider(self._db, replacement_policy=policy)\n assert policy == provider.replacement_policy\n\n def test_register(self):\n # The identifier has no coverage.\n assert 0 == len(self.identifier.coverage_records)\n\n provider = AlwaysSuccessfulCoverageProvider\n\n # If a CoverageRecord doesn't exist for the provider,\n # a 'registered' record is created.\n new_record, was_registered = provider.register(self.identifier)\n\n assert self.identifier.coverage_records == [new_record]\n assert provider.DATA_SOURCE_NAME == new_record.data_source.name\n assert CoverageRecord.REGISTERED == new_record.status\n assert None == new_record.exception\n\n # If a CoverageRecord exists already, it's returned.\n existing = new_record\n existing.status = CoverageRecord.SUCCESS\n\n new_record, was_registered = provider.register(self.identifier)\n assert existing == new_record\n assert False == was_registered\n # Its details haven't been changed in any way.\n assert CoverageRecord.SUCCESS == new_record.status\n assert None == new_record.exception\n\n def test_bulk_register(self):\n provider = AlwaysSuccessfulCoverageProvider\n source = DataSource.lookup(self._db, provider.DATA_SOURCE_NAME)\n\n i1 = self._identifier()\n covered = self._identifier()\n existing = self._coverage_record(covered, source, operation=provider.OPERATION)\n\n new_records, ignored_identifiers = provider.bulk_register([i1, covered])\n\n assert i1.coverage_records == new_records\n [new_record] = new_records\n assert provider.DATA_SOURCE_NAME == new_record.data_source.name\n assert provider.OPERATION == new_record.operation\n assert CoverageRecord.REGISTERED == new_record.status\n\n assert [covered] == ignored_identifiers\n # The existing CoverageRecord hasn't been changed.\n assert CoverageRecord.SUCCESS == existing.status\n\n def test_bulk_register_can_overwrite_existing_record_status(self):\n provider = AlwaysSuccessfulCoverageProvider\n\n # Create an existing record, and give it a SUCCESS status.\n provider.bulk_register([self.identifier])\n [existing] = self.identifier.coverage_records\n existing.status = CoverageRecord.SUCCESS\n self._db.commit()\n\n # If registration is forced, an existing record is updated.\n records, ignored = provider.bulk_register([self.identifier], force=True)\n assert [existing] == records\n assert CoverageRecord.REGISTERED == existing.status\n\n def test_bulk_register_with_collection(self):\n provider = AlwaysSuccessfulCoverageProvider\n collection = self._collection(data_source_name=DataSource.AXIS_360)\n\n try:\n # If a DataSource or data source name is provided and\n # autocreate is set True, the record is created with that source.\n provider.bulk_register(\n [self.identifier],\n data_source=collection.name,\n collection=collection,\n autocreate=True,\n )\n [record] = self.identifier.coverage_records\n\n # A DataSource with the given name has been created.\n collection_source = DataSource.lookup(self._db, collection.name)\n assert collection_source\n assert provider.DATA_SOURCE_NAME != record.data_source.name\n assert collection_source == record.data_source\n\n # Even though a collection was given, the record's collection isn't\n # set.\n assert None == record.collection\n\n # However, when coverage is collection-specific the\n # CoverageRecord is related to the given collection.\n provider.COVERAGE_COUNTS_FOR_EVERY_COLLECTION = False\n\n provider.bulk_register(\n [self.identifier], collection_source, collection=collection\n )\n records = self.identifier.coverage_records\n assert 2 == len(records)\n assert [r for r in records if r.collection == collection]\n finally:\n # Return the mock class to its original state for other tests.\n provider.COVERAGE_COUNTS_FOR_EVERY_COLLECTION = True\n\n def test_ensure_coverage(self):\n \"\"\"Verify that ensure_coverage creates a CoverageRecord for an\n Identifier, assuming that the CoverageProvider succeeds.\n \"\"\"\n provider = AlwaysSuccessfulCollectionCoverageProvider(self._default_collection)\n provider.OPERATION = self._str\n record = provider.ensure_coverage(self.identifier)\n assert isinstance(record, CoverageRecord)\n assert self.identifier == record.identifier\n assert provider.data_source == record.data_source\n assert provider.OPERATION == record.operation\n assert None == record.exception\n\n # There is now one CoverageRecord -- the one returned by\n # ensure_coverage().\n [record2] = self._db.query(CoverageRecord).all()\n assert record2 == record\n\n # Because this provider counts coverage in one Collection as\n # coverage for all Collections, the coverage record was not\n # associated with any particular collection.\n assert True == provider.COVERAGE_COUNTS_FOR_EVERY_COLLECTION\n assert None == record2.collection\n\n # The coverage provider's timestamp was not updated, because\n # we're using ensure_coverage on a single record.\n assert None == Timestamp.value(\n self._db,\n provider.service_name,\n Timestamp.COVERAGE_PROVIDER_TYPE,\n collection=None,\n )\n\n # Now let's try a CollectionCoverageProvider that needs to\n # grant coverage separately for every collection.\n provider.COVERAGE_COUNTS_FOR_EVERY_COLLECTION = False\n record3 = provider.ensure_coverage(self.identifier)\n\n # This creates a new CoverageRecord associated with the\n # provider's collection.\n assert record3 != record2\n assert provider.collection == record3.collection\n\n def test_ensure_coverage_works_on_edition(self):\n \"\"\"Verify that ensure_coverage() works on an Edition by covering\n its primary identifier.\n \"\"\"\n edition = self._edition()\n provider = AlwaysSuccessfulCoverageProvider(self._db)\n record = provider.ensure_coverage(edition)\n assert isinstance(record, CoverageRecord)\n assert edition.primary_identifier == record.identifier\n\n def test_ensure_coverage_respects_operation(self):\n # Two providers with the same output source but different operations.\n class Mock1(AlwaysSuccessfulCoverageProvider):\n OPERATION = \"foo\"\n\n provider1 = Mock1(self._db)\n\n class Mock2(NeverSuccessfulCoverageProvider):\n OPERATION = \"bar\"\n\n provider2 = Mock2(self._db)\n\n # Ensure coverage from both providers.\n coverage1 = provider1.ensure_coverage(self.identifier)\n assert \"foo\" == coverage1.operation\n old_timestamp = coverage1.timestamp\n\n coverage2 = provider2.ensure_coverage(self.identifier)\n assert \"bar\" == coverage2.operation\n\n # There are now two CoverageRecords, one for each operation.\n assert set([coverage1, coverage2]) == set(self._db.query(CoverageRecord))\n\n # If we try to ensure coverage again, no work is done and we\n # get the old coverage record back.\n new_coverage = provider1.ensure_coverage(self.identifier)\n assert new_coverage == coverage1\n new_coverage.timestamp = old_timestamp\n\n def test_ensure_coverage_persistent_coverage_failure(self):\n\n provider = NeverSuccessfulCoverageProvider(self._db)\n failure = provider.ensure_coverage(self.identifier)\n\n # A CoverageRecord has been created to memorialize the\n # persistent failure.\n assert isinstance(failure, CoverageRecord)\n assert \"What did you expect?\" == failure.exception\n\n # Here it is in the database.\n [record] = self._db.query(CoverageRecord).all()\n assert record == failure\n\n # The coverage provider's timestamp was not updated, because\n # we're using ensure_coverage.\n # The coverage provider's timestamp was not updated, because\n # we're using ensure_coverage on a single record.\n assert None == Timestamp.value(\n self._db,\n provider.service_name,\n service_type=Timestamp.COVERAGE_PROVIDER_TYPE,\n collection=None,\n )\n\n def test_ensure_coverage_transient_coverage_failure(self):\n\n provider = TransientFailureCoverageProvider(self._db)\n failure = provider.ensure_coverage(self.identifier)\n assert [failure] == self.identifier.coverage_records\n assert CoverageRecord.TRANSIENT_FAILURE == failure.status\n assert \"Oops!\" == failure.exception\n\n # Timestamp was not updated.\n assert None == Timestamp.value(\n self._db,\n provider.service_name,\n service_type=Timestamp.COVERAGE_PROVIDER_TYPE,\n collection=None,\n )\n\n def test_ensure_coverage_changes_status(self):\n \"\"\"Verify that processing an item that has a preexisting\n CoverageRecord can change the status of that CoverageRecord.\n \"\"\"\n always = AlwaysSuccessfulCoverageProvider(self._db)\n persistent = NeverSuccessfulCoverageProvider(self._db)\n transient = TransientFailureCoverageProvider(self._db)\n\n # Cover the same identifier multiple times, simulating all\n # possible states of a CoverageRecord. The same CoverageRecord\n # is used every time and the status is changed appropriately\n # after every run.\n c1 = persistent.ensure_coverage(self.identifier, force=True)\n assert CoverageRecord.PERSISTENT_FAILURE == c1.status\n\n c2 = transient.ensure_coverage(self.identifier, force=True)\n assert c2 == c1\n assert CoverageRecord.TRANSIENT_FAILURE == c1.status\n\n c3 = always.ensure_coverage(self.identifier, force=True)\n assert c3 == c1\n assert CoverageRecord.SUCCESS == c1.status\n\n c4 = persistent.ensure_coverage(self.identifier, force=True)\n assert c4 == c1\n assert CoverageRecord.PERSISTENT_FAILURE == c1.status\n\n def test_edition(self):\n \"\"\"Verify that CoverageProvider.edition() returns an appropriate\n Edition, even when there is no associated Collection.\n \"\"\"\n # This CoverageProvider fetches bibliographic information\n # from Overdrive. It is not capable of creating LicensePools\n # because it has no Collection.\n provider = AlwaysSuccessfulCoverageProvider(self._db)\n assert None == provider.collection\n\n # Here's an Identifier, with no Editions.\n identifier = self._identifier(identifier_type=Identifier.OVERDRIVE_ID)\n assert [] == identifier.primarily_identifies\n\n # Calling CoverageProvider.edition() on the Identifier gives\n # us a container for the provider's bibliographic information,\n # as given to us by the provider's data source.\n #\n # It doesn't matter that there's no Collection, because the\n # book's bibliographic information is the same across\n # Collections.\n edition = provider.edition(identifier)\n assert provider.data_source == edition.data_source\n assert [edition] == identifier.primarily_identifies\n\n # Calling edition() again gives us the same Edition as before.\n edition2 = provider.edition(identifier)\n assert edition == edition2\n\n def test_set_metadata(self, bibliographic_data):\n \"\"\"Test that set_metadata can create and populate an\n appropriate Edition.\n\n set_metadata is tested in more detail in\n TestCollectionCoverageProvider.\n \"\"\"\n # Here's a provider that is not associated with any particular\n # Collection.\n provider = AlwaysSuccessfulCoverageProvider(self._db)\n assert None == provider.collection\n\n # It can't set circulation data, because it's not a\n # CollectionCoverageProvider.\n assert not hasattr(provider, \"set_metadata_and_circulationdata\")\n\n # But it can set metadata.\n identifier = self._identifier(\n identifier_type=Identifier.OVERDRIVE_ID,\n foreign_id=bibliographic_data.primary_identifier.identifier,\n )\n assert [] == identifier.primarily_identifies\n result = provider.set_metadata(identifier, bibliographic_data)\n\n # Here's the proof.\n edition = provider.edition(identifier)\n assert \"A Girl Named Disaster\" == edition.title\n\n # If no metadata is passed in, a CoverageFailure results.\n result = provider.set_metadata(identifier, None)\n assert isinstance(result, CoverageFailure)\n assert \"Did not receive metadata from input source\" == result.exception\n\n # If there's an exception setting the metadata, a\n # CoverageFailure results. This call raises a ValueError\n # because the primary identifier & the edition's primary\n # identifier don't match.\n bibliographic_data.primary_identifier = IdentifierData(\n type=Identifier.OVERDRIVE_ID, identifier=\"abcd\"\n )\n result = provider.set_metadata(identifier, bibliographic_data)\n assert isinstance(result, CoverageFailure)\n assert \"ValueError\" in result.exception\n\n def test_items_that_need_coverage_respects_registration_reqs(self):\n provider = AlwaysSuccessfulCoverageProvider(self._db, registered_only=True)\n\n items = provider.items_that_need_coverage()\n assert self.identifier not in items\n\n # Once the identifier is registered, it shows up.\n provider.register(self.identifier)\n assert self.identifier in items\n\n # With a failing CoverageRecord, the item shows up.\n [record] = self.identifier.coverage_records\n record.status = CoverageRecord.TRANSIENT_FAILURE\n record.exception = \"Oh no!\"\n assert self.identifier in items\n\n def test_items_that_need_coverage_respects_operation(self):\n\n # Here's a provider that carries out the 'foo' operation.\n class Mock1(AlwaysSuccessfulCoverageProvider):\n OPERATION = \"foo\"\n\n provider = Mock1(self._db)\n\n # Here's a generic CoverageRecord for an identifier.\n record1 = CoverageRecord.add_for(self.identifier, provider.data_source)\n\n # That record doesn't count for purposes of\n # items_that_need_coverage, because the CoverageRecord doesn't\n # have an operation, and the CoverageProvider does.\n assert [self.identifier] == provider.items_that_need_coverage().all()\n\n # Here's a provider that has no operation set.\n provider = AlwaysSuccessfulCoverageProvider(self._db)\n assert None == provider.OPERATION\n\n # For purposes of items_that_need_coverage, the identifier is\n # considered covered, because the operations match.\n assert [] == provider.items_that_need_coverage().all()\n\n def test_run_on_specific_identifiers(self):\n provider = AlwaysSuccessfulCoverageProvider(self._db)\n provider.workset_size = 3\n to_be_tested = [self._identifier() for i in range(6)]\n not_to_be_tested = [self._identifier() for i in range(6)]\n counts, records = provider.run_on_specific_identifiers(to_be_tested)\n\n # Six identifiers were covered in two batches.\n assert (6, 0, 0) == counts\n assert 6 == len(records)\n\n # Only the identifiers in to_be_tested were covered.\n assert all(isinstance(x, CoverageRecord) for x in records)\n assert set(to_be_tested) == set([x.identifier for x in records])\n for i in to_be_tested:\n assert i in provider.attempts\n for i in not_to_be_tested:\n assert i not in provider.attempts\n\n def test_run_on_specific_identifiers_respects_cutoff_time(self):\n\n last_run = datetime_utc(2016, 1, 1)\n\n # Once upon a time we successfully added coverage for\n # self.identifier. But now something has gone wrong, and if we\n # ever run the coverage provider again we will get a\n # persistent failure.\n provider = NeverSuccessfulCoverageProvider(self._db)\n record, ignore = CoverageRecord.add_for(self.identifier, provider.data_source)\n record.timestamp = last_run\n\n # You might think this would result in a persistent failure...\n (\n success,\n transient_failure,\n persistent_failure,\n ), records = provider.run_on_specific_identifiers([self.identifier])\n\n # ...but we get an automatic success. We didn't even try to\n # run the coverage provider on self.identifier because the\n # coverage record was up-to-date.\n assert 1 == success\n assert 0 == persistent_failure\n assert [] == records\n\n # But if we move the cutoff time forward, the provider will run\n # on self.identifier and fail.\n provider.cutoff_time = datetime_utc(2016, 2, 1)\n (\n success,\n transient_failure,\n persistent_failure,\n ), records = provider.run_on_specific_identifiers([self.identifier])\n assert 0 == success\n assert 1 == persistent_failure\n\n # The formerly successful CoverageRecord will be updated to\n # reflect the failure.\n assert records[0] == record\n assert \"What did you expect?\" == record.exception\n\n def test_run_never_successful(self):\n \"\"\"Verify that NeverSuccessfulCoverageProvider works the\n way we'd expect.\n \"\"\"\n\n provider = NeverSuccessfulCoverageProvider(self._db)\n\n # We start with no CoverageRecords and no Timestamp.\n assert [] == self._db.query(CoverageRecord).all()\n assert None == Timestamp.value(\n self._db,\n provider.service_name,\n service_type=Timestamp.COVERAGE_PROVIDER_TYPE,\n collection=None,\n )\n\n provider.run()\n\n # We have a CoverageRecord that signifies failure.\n [record] = self._db.query(CoverageRecord).all()\n assert self.identifier == record.identifier\n assert record.data_source == provider.data_source\n assert \"What did you expect?\" == record.exception\n\n # But the coverage provider did run, and the timestamp is now set to\n # a recent value.\n value = Timestamp.value(\n self._db,\n provider.service_name,\n service_type=Timestamp.COVERAGE_PROVIDER_TYPE,\n collection=None,\n )\n assert (utc_now() - value).total_seconds() < 1\n\n def test_run_transient_failure(self):\n \"\"\"Verify that TransientFailureCoverageProvider works the\n way we'd expect.\n \"\"\"\n\n provider = TransientFailureCoverageProvider(self._db)\n\n # We start with no CoverageRecords and no Timestamp.\n assert [] == self._db.query(CoverageRecord).all()\n assert None == Timestamp.value(\n self._db,\n provider.service_name,\n service_type=Timestamp.COVERAGE_PROVIDER_TYPE,\n collection=None,\n )\n\n now = utc_now()\n provider.run()\n\n # We have a CoverageRecord representing the transient failure.\n [failure] = self.identifier.coverage_records\n assert CoverageRecord.TRANSIENT_FAILURE == failure.status\n\n # The timestamp was set.\n timestamp = Timestamp.value(\n self._db,\n provider.service_name,\n service_type=Timestamp.COVERAGE_PROVIDER_TYPE,\n collection=None,\n )\n assert (timestamp - now).total_seconds() < 1\n\n def test_add_coverage_record_for(self):\n \"\"\"Calling CollectionCoverageProvider.add_coverage_record is the same\n as calling CoverageRecord.add_for with the relevant\n information.\n \"\"\"\n provider = AlwaysSuccessfulCollectionCoverageProvider(self._default_collection)\n identifier = self._identifier()\n record = provider.add_coverage_record_for(identifier)\n\n # This is the same as calling CoverageRecord.add_for with\n # appropriate arguments.\n record2, is_new = CoverageRecord.add_for(\n identifier,\n data_source=provider.data_source,\n operation=provider.operation,\n collection=provider.collection_or_not,\n )\n assert False == is_new\n assert record == record2\n\n # By default, the CoverageRecord is not associated with any\n # particular collection.\n assert None == record.collection\n\n # Setting COVERAGE_COUNTS_FOR_EVERY_COLLECTION to False will\n # change that -- a CoverageRecord will only count for the\n # collection associated with the CoverageProvider.\n provider.COVERAGE_COUNTS_FOR_EVERY_COLLECTION = False\n record = provider.add_coverage_record_for(identifier)\n assert self._default_collection == record.collection\n\n record2, is_new = CoverageRecord.add_for(\n identifier,\n data_source=provider.data_source,\n operation=provider.operation,\n collection=provider.collection_or_not,\n )\n assert False == is_new\n assert record == record2\n\n def test_record_failure_as_coverage_record(self):\n \"\"\"TODO: We need test coverage here.\"\"\"\n\n def test_failure(self):\n provider = AlwaysSuccessfulCollectionCoverageProvider(self._default_collection)\n identifier = self._identifier()\n failure = provider.failure(identifier, error=\"an error\", transient=False)\n assert provider.data_source == failure.data_source\n assert \"an error\" == failure.exception\n assert False == failure.transient\n\n # By default, the failure is not associated with any\n # particular collection.\n assert None == failure.collection\n\n # Setting COVERAGE_COUNTS_FOR_EVERY_COLLECTION to False\n # will change that -- a failure will only count for the\n # collection associated with the CoverageProvider.\n provider.COVERAGE_COUNTS_FOR_EVERY_COLLECTION = False\n failure = provider.failure(identifier, error=\"an error\", transient=False)\n assert self._default_collection == failure.collection\n\n def test_failure_for_ignored_item(self):\n \"\"\"Test that failure_for_ignored_item creates an appropriate\n CoverageFailure.\n \"\"\"\n provider = NeverSuccessfulCoverageProvider(self._db)\n result = provider.failure_for_ignored_item(self.identifier)\n assert isinstance(result, CoverageFailure)\n assert True == result.transient\n assert \"Was ignored by CoverageProvider.\" == result.exception\n assert self.identifier == result.obj\n assert provider.data_source == result.data_source\n\n\nclass TestCollectionCoverageProvider(CoverageProviderTest):\n @pytest.fixture\n def circulation_data(self, bibliographic_data):\n # This data is used to test the insertion of circulation data\n # into a Collection.\n return CirculationData(\n DataSource.OVERDRIVE,\n primary_identifier=bibliographic_data.primary_identifier,\n formats=[\n FormatData(\n content_type=Representation.EPUB_MEDIA_TYPE,\n drm_scheme=DeliveryMechanism.NO_DRM,\n rights_uri=RightsStatus.IN_COPYRIGHT,\n )\n ],\n )\n\n def test_class_variables(self):\n \"\"\"Verify that class variables become appropriate instance\n variables.\n \"\"\"\n collection = self._collection(protocol=ExternalIntegration.OPDS_IMPORT)\n provider = AlwaysSuccessfulCollectionCoverageProvider(collection)\n assert provider.DATA_SOURCE_NAME == provider.data_source.name\n\n def test_must_have_collection(self):\n with pytest.raises(CollectionMissing) as excinfo:\n AlwaysSuccessfulCollectionCoverageProvider(None)\n assert (\n \"AlwaysSuccessfulCollectionCoverageProvider must be instantiated with a Collection.\"\n in str(excinfo.value)\n )\n\n def test_collection_protocol_must_match_class_protocol(self):\n collection = self._collection(protocol=ExternalIntegration.OVERDRIVE)\n with pytest.raises(ValueError) as excinfo:\n AlwaysSuccessfulCollectionCoverageProvider(collection)\n assert (\n \"Collection protocol (Overdrive) does not match CoverageProvider protocol (OPDS Import)\"\n in str(excinfo.value)\n )\n\n def test_items_that_need_coverage_ignores_collection_when_collection_is_irrelevant(\n self,\n ):\n\n # Two providers that do the same work, but one is associated\n # with a collection and the other is not.\n collection_provider = AlwaysSuccessfulCollectionCoverageProvider(\n self._default_collection\n )\n no_collection_provider = AlwaysSuccessfulCoverageProvider(self._db)\n\n # This distinction is irrelevant because they both consider an\n # Identifier covered when it has a CoverageRecord not\n # associated with any particular collection.\n assert True == collection_provider.COVERAGE_COUNTS_FOR_EVERY_COLLECTION\n assert True == no_collection_provider.COVERAGE_COUNTS_FOR_EVERY_COLLECTION\n\n assert collection_provider.data_source == no_collection_provider.data_source\n data_source = collection_provider.data_source\n\n # Create a license pool belonging to the default collection.\n pool = self._licensepool(None, collection=self._default_collection)\n identifier = pool.identifier\n\n def needs():\n \"\"\"Returns all items that need coverage from both test\n CoverageProviders.\n \"\"\"\n return tuple(\n p.items_that_need_coverage().all()\n for p in (collection_provider, no_collection_provider)\n )\n\n # We start out in the state where the identifier appears to need\n # coverage from both CoverageProviders.\n assert ([identifier], [identifier]) == needs()\n\n # Add coverage for the default collection, and both\n # CoverageProviders still consider the identifier\n # uncovered. (This shouldn't happen, but if it does, we don't\n # count it.)\n self._coverage_record(\n identifier, data_source, collection=self._default_collection\n )\n assert ([identifier], [identifier]) == needs()\n\n # Add coverage not associated with any collection, and both\n # CoverageProviders consider it covered.\n self._coverage_record(identifier, data_source, collection=None)\n assert ([], []) == needs()\n\n def test_items_that_need_coverage_respects_collection_when_collection_is_relevant(\n self,\n ):\n\n # Two providers that do the same work, but are associated\n # with different collections.\n collection_1_provider = AlwaysSuccessfulCollectionCoverageProvider(\n self._default_collection\n )\n collection_2 = self._collection()\n collection_2_provider = AlwaysSuccessfulCollectionCoverageProvider(collection_2)\n\n # And one that does the same work but is not associated with\n # any collection.\n no_collection_provider = AlwaysSuccessfulCoverageProvider(self._db)\n\n # The 'collection' distinction is relevant, because these\n # CoverageProviders consider an identifier covered only when\n # it has a CoverageRecord for _their_ collection.\n collection_1_provider.COVERAGE_COUNTS_FOR_EVERY_COLLECTION = False\n collection_2_provider.COVERAGE_COUNTS_FOR_EVERY_COLLECTION = False\n no_collection_provider.COVERAGE_COUNTS_FOR_EVERY_COLLECTION = False\n\n assert collection_1_provider.data_source == collection_2_provider.data_source\n data_source = collection_1_provider.data_source\n\n # Create a license pool belonging to the default collection so\n # that its Identifier will show up as needing coverage by the\n # CoverageProvider that manages that collection.\n pool = self._licensepool(None, collection=self._default_collection)\n identifier = pool.identifier\n\n def needs():\n \"\"\"Returns all items that need coverage from both test\n CoverageProviders.\n \"\"\"\n return tuple(\n p.items_that_need_coverage().all()\n for p in (collection_1_provider, no_collection_provider)\n )\n\n # We start out in the state where the identifier needs\n # coverage from the CoverageProvider not associated with\n # any Collection, and the CoverageProvider associated with\n # the Collection where the LicensePool lives.\n #\n assert ([identifier], [identifier]) == needs()\n\n # The CoverageProvider associated with a different Collection\n # doesn't care about this Identifier, because its Collection\n # doesn't include that Identiifer.\n assert [] == collection_2_provider.items_that_need_coverage().all()\n\n # Add coverage for an irrelevant collection, and nothing happens.\n self._coverage_record(identifier, data_source, collection=self._collection())\n assert ([identifier], [identifier]) == needs()\n\n # Add coverage for a relevant collection, and it's treated as\n # covered by the provider that uses that collection.\n self._coverage_record(\n identifier, data_source, collection=self._default_collection\n )\n assert ([], [identifier]) == needs()\n\n # Add coverage not associated with a collection, and it's\n # treated as covered by the provider not associated with\n # any collection.\n self._coverage_record(identifier, data_source, collection=None)\n assert ([], []) == needs()\n\n def test_replacement_policy(self):\n \"\"\"Unless a different replacement policy is passed in, the\n replacement policy is ReplacementPolicy.from_license_source().\n \"\"\"\n provider = AlwaysSuccessfulCollectionCoverageProvider(self._default_collection)\n assert True == provider.replacement_policy.identifiers\n assert True == provider.replacement_policy.formats\n\n policy = ReplacementPolicy.from_metadata_source()\n provider = AlwaysSuccessfulCollectionCoverageProvider(\n self._default_collection, replacement_policy=policy\n )\n assert policy == provider.replacement_policy\n\n def test_all(self):\n \"\"\"Verify that all() gives a sequence of CollectionCoverageProvider\n objects, one for each Collection that implements the\n appropriate protocol.\n \"\"\"\n opds1 = self._collection(protocol=ExternalIntegration.OPDS_IMPORT)\n opds2 = self._collection(protocol=ExternalIntegration.OPDS_IMPORT)\n overdrive = self._collection(protocol=ExternalIntegration.OVERDRIVE)\n providers = list(\n AlwaysSuccessfulCollectionCoverageProvider.all(self._db, batch_size=34)\n )\n\n # The providers were returned in a random order, but there's one\n # for each collection that supports the 'OPDS Import' protocol.\n assert 2 == len(providers)\n collections = set([x.collection for x in providers])\n assert set([opds1, opds2]) == collections\n\n # The providers are of the appropriate type and the keyword arguments\n # passed into all() were propagated to the constructor.\n for provider in providers:\n assert isinstance(provider, AlwaysSuccessfulCollectionCoverageProvider)\n assert 34 == provider.batch_size\n\n def test_set_circulationdata_errors(self):\n \"\"\"Verify that errors when setting circulation data\n are turned into CoverageFailure objects.\n \"\"\"\n provider = AlwaysSuccessfulCollectionCoverageProvider(self._default_collection)\n identifier = self._identifier()\n\n # No data.\n failure = provider._set_circulationdata(identifier, None)\n assert \"Did not receive circulationdata from input source\" == failure.exception\n\n # No identifier in CirculationData.\n empty = CirculationData(provider.data_source, primary_identifier=None)\n failure = provider._set_circulationdata(identifier, empty)\n assert (\n \"Identifier did not match CirculationData's primary identifier.\"\n == failure.exception\n )\n\n # Mismatched identifier in CirculationData.\n wrong = CirculationData(\n provider.data_source, primary_identifier=self._identifier()\n )\n failure = provider._set_circulationdata(identifier, empty)\n assert (\n \"Identifier did not match CirculationData's primary identifier.\"\n == failure.exception\n )\n\n # Here, the data is okay, but the ReplacementPolicy is\n # going to cause an error the first time we try to use it.\n correct = CirculationData(provider.data_source, identifier)\n provider.replacement_policy = object()\n failure = provider._set_circulationdata(identifier, correct)\n assert isinstance(failure, CoverageFailure)\n\n # Verify that the general error handling works whether or not\n # the provider is associated with a Collection.\n provider.collection_id = None\n failure = provider._set_circulationdata(identifier, correct)\n assert isinstance(failure, CoverageFailure)\n\n def test_set_metadata_incorporates_replacement_policy(self):\n # Make sure that if a ReplacementPolicy is passed in to\n # set_metadata(), the policy's settings (and those of its\n # .presentation_calculation_policy) are respected.\n #\n # This is tested in this class rather than in\n # TestIdentifierCoverageProvider because with a collection in\n # place we can test a lot more aspects of the ReplacementPolicy.\n\n edition, pool = self._edition(with_license_pool=True)\n identifier = edition.primary_identifier\n\n # All images and open-access content will be fetched through this\n # 'HTTP client'...\n http = DummyHTTPClient()\n http.queue_response(\n 200,\n content=\"I am an epub.\",\n media_type=Representation.EPUB_MEDIA_TYPE,\n )\n\n # ..and will then be uploaded to this 'mirror'.\n mirrors = dict(books_mirror=MockS3Uploader())\n mirror_type = ExternalIntegrationLink.OPEN_ACCESS_BOOKS\n\n class Tripwire(PresentationCalculationPolicy):\n # This class sets a variable if one of its properties is\n # accessed.\n def __init__(self, *args, **kwargs):\n self.tripped = False\n\n def __getattr__(self, name):\n self.tripped = True\n if name.startswith(\"equivalent_identifier_\"):\n # These need to be numbers rather than booleans,\n # but the exact number doesn't matter.\n return 100\n return True\n\n presentation_calculation_policy = Tripwire()\n replacement_policy = ReplacementPolicy(\n mirrors=mirrors,\n http_get=http.do_get,\n presentation_calculation_policy=presentation_calculation_policy,\n )\n\n provider = AlwaysSuccessfulCollectionCoverageProvider(\n self._default_collection, replacement_policy=replacement_policy\n )\n\n metadata = Metadata(provider.data_source, primary_identifier=identifier)\n # We've got a CirculationData object that includes an open-access download.\n link = LinkData(rel=Hyperlink.OPEN_ACCESS_DOWNLOAD, href=\"http://foo.com/\")\n\n # We get an error if the CirculationData's identifier is\n # doesn't match what we pass in.\n circulationdata = CirculationData(\n provider.data_source, primary_identifier=self._identifier(), links=[link]\n )\n failure = provider.set_metadata_and_circulation_data(\n identifier, metadata, circulationdata\n )\n assert (\n \"Identifier did not match CirculationData's primary identifier.\"\n == failure.exception\n )\n\n # Otherwise, the data is applied.\n circulationdata = CirculationData(\n provider.data_source,\n primary_identifier=metadata.primary_identifier,\n links=[link],\n )\n\n provider.set_metadata_and_circulation_data(\n identifier, metadata, circulationdata\n )\n\n # The open-access download was 'downloaded' and 'mirrored'.\n [mirrored] = mirrors[mirror_type].uploaded\n assert \"http://foo.com/\" == mirrored.url\n assert mirrored.mirror_url.endswith(\n \"/%s/%s.epub\" % (identifier.identifier, edition.title)\n )\n\n # The book content was removed from the db after it was\n # mirrored successfully.\n assert None == mirrored.content\n\n # Our custom PresentationCalculationPolicy was used when\n # determining whether to recalculate the work's\n # presentation. We know this because the tripwire was\n # triggered.\n assert True == presentation_calculation_policy.tripped\n\n def test_items_that_need_coverage(self):\n # Here's an Identifier that was covered on 01/01/2016.\n identifier = self._identifier()\n cutoff_time = datetime_utc(2016, 1, 1)\n provider = AlwaysSuccessfulCoverageProvider(self._db)\n record, is_new = CoverageRecord.add_for(\n identifier, provider.data_source, timestamp=cutoff_time\n )\n\n # Since the Identifier was covered, it doesn't show up in\n # items_that_need_coverage.\n assert [] == provider.items_that_need_coverage().all()\n\n # If we set the CoverageProvider's cutoff_time to the time of\n # coverage, the Identifier is still treated as covered.\n provider = AlwaysSuccessfulCoverageProvider(self._db, cutoff_time=cutoff_time)\n assert [] == provider.items_that_need_coverage().all()\n\n # But if we set the cutoff time to immediately after the time\n # the Identifier was covered...\n one_second_after = cutoff_time + datetime.timedelta(seconds=1)\n provider = AlwaysSuccessfulCoverageProvider(\n self._db, cutoff_time=one_second_after\n )\n\n # The identifier is treated as lacking coverage.\n assert [identifier] == provider.items_that_need_coverage().all()\n\n def test_work(self):\n \"\"\"Verify that a CollectionCoverageProvider can create a Work.\"\"\"\n # Here's an Gutenberg ID.\n identifier = self._identifier(identifier_type=Identifier.GUTENBERG_ID)\n\n # Here's a CollectionCoverageProvider that is associated\n # with an OPDS import-style Collection.\n provider = AlwaysSuccessfulCollectionCoverageProvider(self._default_collection)\n\n # This CoverageProvider cannot create a Work for the given\n # Identifier, because that would require creating a\n # LicensePool, and work() won't create a LicensePool if one\n # doesn't already exist.\n result = provider.work(identifier)\n assert isinstance(result, CoverageFailure)\n assert \"Cannot locate LicensePool\" == result.exception\n\n # The CoverageProvider _can_ automatically create a\n # LicensePool, but since there is no Edition associated with\n # the Identifier, a Work still can't be created.\n pool = provider.license_pool(identifier)\n result = provider.work(identifier)\n assert isinstance(result, CoverageFailure)\n assert \"Work could not be calculated\" == result.exception\n\n # So let's use the CoverageProvider to create an Edition\n # with minimal bibliographic information.\n edition = provider.edition(identifier)\n edition.title = \"A title\"\n\n # Now we can create a Work.\n work = provider.work(identifier)\n assert isinstance(work, Work)\n assert \"A title\" == work.title\n\n # If necessary, we can tell work() to use a specific\n # LicensePool when calculating the Work. This is an extreme\n # example in which the LicensePool to use has a different\n # Identifier (identifier2) than the Identifier we're\n # processing (identifier1).\n #\n # In a real case, this would be used by a CoverageProvider\n # that just had to create a LicensePool using an\n # INTERNAL_PROCESSING DataSource rather than the DataSource\n # associated with the CoverageProvider.\n identifier2 = self._identifier()\n identifier.licensed_through = []\n collection2 = self._collection()\n edition2 = self._edition(\n identifier_type=identifier2.type, identifier_id=identifier2.identifier\n )\n pool2 = self._licensepool(edition=edition2, collection=collection2)\n work2 = provider.work(identifier, pool2)\n assert work2 != work\n assert [pool2] == work2.license_pools\n\n # Once an identifier has a work associated with it,\n # that's always the one that's used, and the value of license_pool\n # is ignored.\n work3 = provider.work(identifier2, object())\n assert work2 == work3\n\n # Any keyword arguments passed into work() are propagated to\n # calculate_work(). This lets use (e.g.) create a Work even\n # when there is no title.\n edition, pool = self._edition(with_license_pool=True)\n edition.title = None\n work = provider.work(pool.identifier, pool, even_if_no_title=True)\n assert isinstance(work, Work)\n assert None == work.title\n\n # If a work exists but is not presentation-ready,\n # CollectionCoverageProvider.work() will call calculate_work()\n # in an attempt to fix it.\n edition.title = \"Finally a title\"\n work2 = provider.work(pool.identifier, pool)\n assert work2 == work\n assert \"Finally a title\" == work.title\n assert True == work.presentation_ready\n\n # Once the work is presentation_ready, calling\n # CollectionCoverageProvider.work() will no longer call\n # calculate_work() -- it will just return the work.\n def explode():\n raise Exception(\"don't call me!\")\n\n pool.calculate_work = explode\n work2 = provider.work(pool.identifier, pool)\n assert work2 == work\n\n def test_set_metadata_and_circulationdata(\n self, bibliographic_data, circulation_data\n ):\n \"\"\"Verify that a CollectionCoverageProvider can set both\n metadata (on an Edition) and circulation data (on a LicensePool).\n \"\"\"\n # Here's an Overdrive Identifier to work with.\n identifier = self._identifier(\n identifier_type=Identifier.OVERDRIVE_ID,\n foreign_id=bibliographic_data.primary_identifier.identifier,\n )\n\n # Here's a CollectionCoverageProvider that is associated with\n # an Overdrive-type Collection. (We have to subclass and talk\n # about Overdrive because BIBLIOGRAPHIC_DATA and\n # CIRCULATION_DATA are data for an Overdrive book.)\n class OverdriveProvider(AlwaysSuccessfulCollectionCoverageProvider):\n DATA_SOURCE_NAME = DataSource.OVERDRIVE\n PROTOCOL = ExternalIntegration.OVERDRIVE\n IDENTIFIER_TYPES = Identifier.OVERDRIVE_ID\n\n collection = self._collection(protocol=ExternalIntegration.OVERDRIVE)\n provider = OverdriveProvider(collection)\n\n # We get a CoverageFailure if we don't pass in any data at all.\n result = provider.set_metadata_and_circulation_data(identifier, None, None)\n assert isinstance(result, CoverageFailure)\n assert (\n \"Received neither metadata nor circulation data from input source\"\n == result.exception\n )\n\n # We get a CoverageFailure if no work can be created. In this\n # case, that happens because the metadata doesn't provide a\n # title.\n old_title = bibliographic_data.title\n bibliographic_data.title = None\n result = provider.set_metadata_and_circulation_data(\n identifier, bibliographic_data, circulation_data\n )\n assert isinstance(result, CoverageFailure)\n assert \"Work could not be calculated\" == result.exception\n\n # Restore the title and try again. This time it will work.\n bibliographic_data.title = old_title\n result = provider.set_metadata_and_circulation_data(\n identifier, bibliographic_data, circulation_data\n )\n assert result == identifier\n\n # An Edition was created to hold the metadata, a LicensePool\n # was created to hold the circulation data, and a Work\n # was created to bind everything together.\n [edition] = identifier.primarily_identifies\n assert \"A Girl Named Disaster\" == edition.title\n [pool] = identifier.licensed_through\n work = identifier.work\n assert work == pool.work\n\n # CoverageProviders that offer bibliographic information\n # typically don't have circulation information in the sense of\n # 'how many copies are in this Collection?', but sometimes\n # they do have circulation information in the sense of 'what\n # formats are available?'\n [lpdm] = pool.delivery_mechanisms\n mechanism = lpdm.delivery_mechanism\n assert \"application/epub+zip (DRM-free)\" == mechanism.name\n\n # If there's an exception setting the metadata, a\n # CoverageFailure results. This call raises a ValueError\n # because the identifier we're trying to cover doesn't match\n # the identifier found in the Metadata object.\n old_identifier = bibliographic_data.primary_identifier\n bibliographic_data.primary_identifier = IdentifierData(\n type=Identifier.OVERDRIVE_ID, identifier=\"abcd\"\n )\n result = provider.set_metadata_and_circulation_data(\n identifier, bibliographic_data, circulation_data\n )\n assert isinstance(result, CoverageFailure)\n assert \"ValueError\" in result.exception\n bibliographic_data.primary_identifier = old_identifier\n\n def test_autocreate_licensepool(self):\n \"\"\"A CollectionCoverageProvider can locate (or, if necessary, create)\n a LicensePool for an identifier.\n \"\"\"\n identifier = self._identifier()\n assert [] == identifier.licensed_through\n provider = AlwaysSuccessfulCollectionCoverageProvider(self._default_collection)\n pool = provider.license_pool(identifier)\n assert [pool] == identifier.licensed_through\n assert pool.data_source == provider.data_source\n assert pool.identifier == identifier\n assert pool.collection == provider.collection\n\n # Calling license_pool again finds the same LicensePool\n # as before.\n pool2 = provider.license_pool(identifier)\n assert pool == pool2\n\n # It's possible for a CollectionCoverageProvider to create a\n # LicensePool for a different DataSource than the one\n # associated with the Collection. Only the metadata wrangler\n # needs to do this -- it's so a CoverageProvider for a\n # third-party DataSource can create an 'Internal Processing'\n # LicensePool when some other part of the metadata wrangler\n # failed to do this earlier.\n\n # If a working pool already exists, it's returned and no new\n # pool is created.\n same_pool = provider.license_pool(identifier, DataSource.INTERNAL_PROCESSING)\n assert same_pool == pool2\n assert provider.data_source == same_pool.data_source\n\n # A new pool is only created if no working pool can be found.\n identifier2 = self._identifier()\n new_pool = provider.license_pool(identifier2, DataSource.INTERNAL_PROCESSING)\n assert new_pool.data_source.name == DataSource.INTERNAL_PROCESSING\n assert new_pool.identifier == identifier2\n assert new_pool.collection == provider.collection\n\n def test_set_presentation_ready(self):\n \"\"\"Test that a CollectionCoverageProvider can set a Work\n as presentation-ready.\n \"\"\"\n identifier = self._identifier()\n provider = AlwaysSuccessfulCollectionCoverageProvider(self._default_collection)\n\n # If there is no LicensePool for the Identifier,\n # set_presentation_ready will not try to create one,\n # and so no Work will be created.\n result = provider.set_presentation_ready(identifier)\n assert isinstance(result, CoverageFailure)\n assert \"Cannot locate LicensePool\" == result.exception\n\n # Once a LicensePool and a suitable Edition exist,\n # set_presentation_ready will create a Work for the item and\n # mark it presentation ready.\n pool = provider.license_pool(identifier)\n edition = provider.edition(identifier)\n edition.title = \"A title\"\n result = provider.set_presentation_ready(identifier)\n assert result == identifier\n assert True == pool.work.presentation_ready\n\n\nclass TestCatalogCoverageProvider(CoverageProviderTest):\n def test_items_that_need_coverage(self):\n\n c1 = self._collection()\n c2 = self._collection()\n\n i1 = self._identifier()\n c1.catalog_identifier(i1)\n\n i2 = self._identifier()\n c2.catalog_identifier(i2)\n\n i3 = self._identifier()\n\n # This Identifier is licensed through the Collection c1, but\n # it's not in the catalog--catalogs are used for different\n # things.\n edition, lp = self._edition(with_license_pool=True, collection=c1)\n\n # We have four identifiers, but only i1 shows up, because\n # it's the only one in c1's catalog.\n class Provider(CatalogCoverageProvider):\n SERVICE_NAME = \"test\"\n DATA_SOURCE_NAME = DataSource.OVERDRIVE\n\n provider = Provider(c1)\n assert [i1] == provider.items_that_need_coverage().all()\n\n\nclass TestBibliographicCoverageProvider(CoverageProviderTest):\n \"\"\"Test the features specific to BibliographicCoverageProvider.\"\"\"\n\n def setup_method(self):\n super(TestBibliographicCoverageProvider, self).setup_method()\n self.work = self._work(with_license_pool=True, with_open_access_download=True)\n self.work.presentation_ready = False\n [self.pool] = self.work.license_pools\n self.identifier = self.pool.identifier\n\n def test_work_set_presentation_ready_on_success(self):\n # When a Work is successfully run through a\n # BibliographicCoverageProvider, it's set as presentation-ready.\n provider = AlwaysSuccessfulBibliographicCoverageProvider(self.pool.collection)\n [result] = provider.process_batch([self.identifier])\n assert result == self.identifier\n assert True == self.work.presentation_ready\n\n # ensure_coverage does the same thing.\n self.work.presentation_ready = False\n result = provider.ensure_coverage(self.identifier)\n assert isinstance(result, CoverageRecord)\n assert result.identifier == self.identifier\n assert True == self.work.presentation_ready\n\n def test_failure_does_not_set_work_presentation_ready(self):\n \"\"\"A Work is not set as presentation-ready except on success.\"\"\"\n\n provider = NeverSuccessfulBibliographicCoverageProvider(self.pool.collection)\n result = provider.ensure_coverage(self.identifier)\n assert CoverageRecord.TRANSIENT_FAILURE == result.status\n assert False == self.work.presentation_ready\n\n\nclass TestWorkCoverageProvider(DatabaseTest):\n def setup_method(self):\n super(TestWorkCoverageProvider, self).setup_method()\n self.work = self._work()\n\n def test_success(self):\n class MockProvider(AlwaysSuccessfulWorkCoverageProvider):\n OPERATION = \"the_operation\"\n\n qu = self._db.query(WorkCoverageRecord).filter(\n WorkCoverageRecord.operation == MockProvider.OPERATION\n )\n provider = MockProvider(self._db)\n\n # We start with no relevant WorkCoverageRecord and no Timestamp.\n assert [] == qu.all()\n assert None == Timestamp.value(\n self._db,\n provider.service_name,\n service_type=Timestamp.COVERAGE_PROVIDER_TYPE,\n collection=None,\n )\n\n now = utc_now()\n provider.run()\n\n # There is now one relevant WorkCoverageRecord, for our single work.\n [record] = qu.all()\n assert self.work == record.work\n assert provider.operation == record.operation\n\n # The timestamp is now set.\n timestamp = Timestamp.value(\n self._db,\n provider.service_name,\n service_type=Timestamp.COVERAGE_PROVIDER_TYPE,\n collection=None,\n )\n assert (timestamp - now).total_seconds() < 1\n\n def test_transient_failure(self):\n class MockProvider(TransientFailureWorkCoverageProvider):\n OPERATION = \"the_operation\"\n\n provider = MockProvider(self._db)\n\n # We start with no relevant WorkCoverageRecords.\n qu = self._db.query(WorkCoverageRecord).filter(\n WorkCoverageRecord.operation == provider.operation\n )\n assert [] == qu.all()\n\n provider.run()\n\n # We now have a CoverageRecord for the transient failure.\n [failure] = [\n x for x in self.work.coverage_records if x.operation == provider.operation\n ]\n assert CoverageRecord.TRANSIENT_FAILURE == failure.status\n\n # The timestamp is now set to a recent value.\n service_name = \"Never successful (transient, works) (the_operation)\"\n value = Timestamp.value(\n self._db,\n service_name,\n service_type=Timestamp.COVERAGE_PROVIDER_TYPE,\n collection=None,\n )\n assert (utc_now() - value).total_seconds() < 2\n\n def test_persistent_failure(self):\n class MockProvider(NeverSuccessfulWorkCoverageProvider):\n OPERATION = \"the_operation\"\n\n provider = MockProvider(self._db)\n\n # We start with no relevant WorkCoverageRecords.\n qu = self._db.query(WorkCoverageRecord).filter(\n WorkCoverageRecord.operation == provider.operation\n )\n assert [] == qu.all()\n\n provider.run()\n\n # We have a WorkCoverageRecord, since the error was persistent.\n [record] = qu.all()\n assert self.work == record.work\n assert \"What did you expect?\" == record.exception\n\n # The timestamp is now set to a recent value.\n service_name = \"Never successful (works) (the_operation)\"\n value = Timestamp.value(\n self._db,\n service_name,\n service_type=Timestamp.COVERAGE_PROVIDER_TYPE,\n collection=None,\n )\n assert (utc_now() - value).total_seconds() < 2\n\n def test_items_that_need_coverage(self):\n # Here's a WorkCoverageProvider.\n provider = AlwaysSuccessfulWorkCoverageProvider(self._db)\n\n # Here are three works,\n w1 = self.work\n w2 = self._work(with_license_pool=True)\n w3 = self._work(with_license_pool=True)\n\n # w2 has coverage, the other two do not.\n record = self._work_coverage_record(w2, provider.operation)\n\n # By default, items_that_need_coverage returns the two\n # works that don't have coverage.\n assert set([w1, w3]) == set(provider.items_that_need_coverage().all())\n\n # If we pass in a list of Identifiers we further restrict\n # items_that_need_coverage to Works whose LicensePools have an\n # Identifier in that list.\n i2 = w2.license_pools[0].identifier\n i3 = w3.license_pools[0].identifier\n assert [w3] == provider.items_that_need_coverage([i2, i3]).all()\n\n # If we set a cutoff_time which is after the time the\n # WorkCoverageRecord was created, then that work starts\n # showing up again as needing coverage.\n provider.cutoff_time = record.timestamp + datetime.timedelta(seconds=1)\n assert set([w2, w3]) == set(provider.items_that_need_coverage([i2, i3]).all())\n\n def test_failure_for_ignored_item(self):\n class MockProvider(NeverSuccessfulWorkCoverageProvider):\n OPERATION = \"the_operation\"\n\n provider = NeverSuccessfulWorkCoverageProvider(self._db)\n result = provider.failure_for_ignored_item(self.work)\n assert isinstance(result, CoverageFailure)\n assert True == result.transient\n assert \"Was ignored by WorkCoverageProvider.\" == result.exception\n assert self.work == result.obj\n\n def test_add_coverage_record_for(self):\n \"\"\"TODO: We have coverage of code that calls this method,\n but not the method itself.\n \"\"\"\n\n def test_record_failure_as_coverage_record(self):\n \"\"\"TODO: We have coverage of code that calls this method,\n but not the method itself.\n \"\"\"\n\n\nclass TestPresentationReadyWorkCoverageProvider(DatabaseTest):\n def test_items_that_need_coverage(self):\n class Mock(PresentationReadyWorkCoverageProvider):\n SERVICE_NAME = \"mock\"\n\n provider = Mock(self._db)\n work = self._work()\n\n # The work is not presentation ready and so is not ready for\n # coverage.\n assert False == work.presentation_ready\n assert [] == provider.items_that_need_coverage().all()\n\n # Make it presentation ready, and it needs coverage.\n work.presentation_ready = True\n assert [work] == provider.items_that_need_coverage().all()\n\n\nclass MockWork(object):\n \"\"\"A Work-like object that keeps track of the policy that was used\n to recalculate its presentation.\n \"\"\"\n\n def calculate_presentation(self, policy):\n self.calculate_presentation_called_with = policy\n\n\nclass TestWorkPresentationEditionCoverageProvider(DatabaseTest):\n def test_process_item(self):\n work = MockWork()\n provider = WorkPresentationEditionCoverageProvider(self._db)\n provider.process_item(work)\n\n policy = work.calculate_presentation_called_with\n\n # Verify that the policy is configured correctly. It does\n # all the work that's not expensive.\n assert all(\n [\n policy.choose_edition,\n policy.set_edition_metadata,\n policy.choose_cover,\n policy.regenerate_opds_entries,\n policy.update_search_index,\n ]\n )\n assert not any(\n [policy.classify, policy.choose_summary, policy.calculate_quality]\n )\n\n\nclass TestWorkClassificationCoverageProvider(DatabaseTest):\n def test_process_item(self):\n work = MockWork()\n provider = WorkClassificationCoverageProvider(self._db)\n provider.process_item(work)\n\n # This coverage provider does all the work, even the expensive\n # work.\n policy = work.calculate_presentation_called_with\n assert all(\n [\n policy.choose_edition,\n policy.set_edition_metadata,\n policy.choose_cover,\n policy.regenerate_opds_entries,\n policy.update_search_index,\n policy.classify,\n policy.choose_summary,\n policy.calculate_quality,\n ]\n )\n\n\nclass TestOPDSEntryWorkCoverageProvider(DatabaseTest):\n def test_run(self):\n\n provider = OPDSEntryWorkCoverageProvider(self._db)\n work = self._work()\n work.simple_opds_entry = \"old junk\"\n work.verbose_opds_entry = \"old long junk\"\n\n # The work is not presentation-ready, so nothing happens.\n provider.run()\n assert \"old junk\" == work.simple_opds_entry\n assert \"old long junk\" == work.verbose_opds_entry\n\n # The work is presentation-ready, so its OPDS entries are\n # regenerated.\n work.presentation_ready = True\n provider.run()\n assert work.simple_opds_entry.startswith(\"<entry\")\n assert work.verbose_opds_entry.startswith(\"<entry\")\n\n\nclass TestMARCRecordWorkCoverageProvider(DatabaseTest):\n def test_run(self):\n\n provider = MARCRecordWorkCoverageProvider(self._db)\n work = self._work(with_license_pool=True)\n work.marc_record = \"old junk\"\n work.presentation_ready = False\n\n # The work is not presentation-ready, so nothing happens.\n provider.run()\n assert \"old junk\" == work.marc_record\n\n # The work is presentation-ready, so its MARC record is\n # regenerated.\n work.presentation_ready = True\n provider.run()\n assert work.title in work.marc_record\n assert \"online resource\" in work.marc_record\n", "id": "7217767", "language": "Python", "matching_score": 8.817548751831055, "max_stars_count": 0, "path": "tests/core/test_coverage.py" }, { "content": "import logging\nimport traceback\n\nfrom sqlalchemy.orm.session import Session\nfrom sqlalchemy.sql.functions import func\n\nfrom . import log # This sets the appropriate log format.\nfrom .metadata_layer import ReplacementPolicy, TimestampData\nfrom .model import (\n BaseCoverageRecord,\n Collection,\n CollectionMissing,\n CoverageRecord,\n DataSource,\n Edition,\n Identifier,\n LicensePool,\n PresentationCalculationPolicy,\n Timestamp,\n Work,\n WorkCoverageRecord,\n get_one,\n)\nfrom .util.datetime_helpers import utc_now\nfrom .util.worker_pools import DatabaseJob\n\n\nclass CoverageFailure(object):\n \"\"\"Object representing the failure to provide coverage.\"\"\"\n\n def __init__(\n self, obj, exception, data_source=None, transient=True, collection=None\n ):\n self.obj = obj\n self.data_source = data_source\n self.exception = exception\n self.transient = transient\n self.collection = collection\n\n def __repr__(self):\n if self.data_source:\n data_source = self.data_source.name\n else:\n data_source = None\n return \"<CoverageFailure: obj=%r data_source=%r transient=%r exception=%r>\" % (\n self.obj,\n data_source,\n self.transient,\n self.exception,\n )\n\n def to_coverage_record(self, operation=None):\n \"\"\"Convert this failure into a CoverageRecord.\"\"\"\n if not self.data_source:\n raise Exception(\n \"Cannot convert coverage failure to CoverageRecord because it has no output source.\"\n )\n\n record, ignore = CoverageRecord.add_for(\n self.obj, self.data_source, operation=operation, collection=self.collection\n )\n record.exception = self.exception\n if self.transient:\n record.status = CoverageRecord.TRANSIENT_FAILURE\n else:\n record.status = CoverageRecord.PERSISTENT_FAILURE\n return record\n\n def to_work_coverage_record(self, operation):\n \"\"\"Convert this failure into a WorkCoverageRecord.\"\"\"\n record, ignore = WorkCoverageRecord.add_for(self.obj, operation=operation)\n record.exception = self.exception\n if self.transient:\n record.status = CoverageRecord.TRANSIENT_FAILURE\n else:\n record.status = CoverageRecord.PERSISTENT_FAILURE\n return record\n\n\nclass CoverageProviderProgress(TimestampData):\n\n \"\"\"A TimestampData optimized for the special needs of\n CoverageProviders.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(CoverageProviderProgress, self).__init__(*args, **kwargs)\n\n # The offset is distinct from the counter, in that it's not written\n # to the database -- it's used to track state that's necessary within\n # a single run of the CoverageProvider.\n self.offset = 0\n\n self.successes = 0\n self.transient_failures = 0\n self.persistent_failures = 0\n\n @property\n def achievements(self):\n \"\"\"Represent the achievements of a CoverageProvider as a\n human-readable string.\n \"\"\"\n template = \"Items processed: %d. Successes: %d, transient failures: %d, persistent failures: %d\"\n total = self.successes + self.transient_failures + self.persistent_failures\n return template % (\n total,\n self.successes,\n self.transient_failures,\n self.persistent_failures,\n )\n\n @achievements.setter\n def achievements(self, value):\n # It's not possible to set .achievements directly. Do nothing.\n pass\n\n\nclass BaseCoverageProvider(object):\n\n \"\"\"Run certain objects through an algorithm. If the algorithm returns\n success, add a coverage record for that object, so the object\n doesn't need to be processed again. If the algorithm returns a\n CoverageFailure, that failure may itself be memorialized as a\n coverage record.\n\n Instead of instantiating this class directly, subclass one of its\n subclasses: either IdentifierCoverageProvider or\n WorkCoverageProvider.\n\n In IdentifierCoverageProvider the 'objects' are Identifier objects\n and the coverage records are CoverageRecord objects. In\n WorkCoverageProvider the 'objects' are Work objects and the\n coverage records are WorkCoverageRecord objects.\n \"\"\"\n\n # In your subclass, set this to the name of the service,\n # e.g. \"Overdrive Bibliographic Coverage Provider\".\n SERVICE_NAME = None\n\n # In your subclass, you _may_ set this to a string that distinguishes\n # two different CoverageProviders from the same data source.\n # (You may also override the operation method, if you need\n # database access to determine which operation to use.)\n OPERATION = None\n\n # The database session will be committed each time the\n # BaseCoverageProvider has (attempted to) provide coverage to this\n # number of Identifiers. You may change this in your subclass.\n # It's also possible to change it by passing in a value for\n # `batch_size` in the constructor, but generally nobody bothers\n # doing this.\n DEFAULT_BATCH_SIZE = 100\n\n def __init__(\n self,\n _db,\n batch_size=None,\n cutoff_time=None,\n registered_only=False,\n ):\n \"\"\"Constructor.\n\n :param batch_size: The maximum number of objects that will be processed\n at once.\n\n :param cutoff_time: Coverage records created before this time\n will be treated as though they did not exist.\n\n :param registered_only: Optional. Determines whether this\n CoverageProvider will only cover items that already have been\n \"preregistered\" with a CoverageRecord with a registered or failing\n status. This option is only used on the Metadata Wrangler.\n \"\"\"\n self._db = _db\n if not self.__class__.SERVICE_NAME:\n raise ValueError(\"%s must define SERVICE_NAME.\" % self.__class__.__name__)\n service_name = self.__class__.SERVICE_NAME\n operation = self.operation\n if operation:\n service_name += \" (%s)\" % operation\n self.service_name = service_name\n if not batch_size or batch_size < 0:\n batch_size = self.DEFAULT_BATCH_SIZE\n self.batch_size = batch_size\n self.cutoff_time = cutoff_time\n self.registered_only = registered_only\n self.collection_id = None\n\n @property\n def log(self):\n if not hasattr(self, \"_log\"):\n self._log = logging.getLogger(self.service_name)\n return self._log\n\n @property\n def collection(self):\n \"\"\"Retrieve the Collection object associated with this\n CoverageProvider.\n \"\"\"\n if not self.collection_id:\n return None\n return get_one(self._db, Collection, id=self.collection_id)\n\n @property\n def operation(self):\n \"\"\"Which operation should this CoverageProvider use to\n distinguish between multiple CoverageRecords from the same data\n source?\n \"\"\"\n return self.OPERATION\n\n def run(self):\n start = utc_now()\n result = self.run_once_and_update_timestamp()\n\n result = result or CoverageProviderProgress()\n self.finalize_timestampdata(result, start=start)\n return result\n\n def run_once_and_update_timestamp(self):\n # First prioritize items that have never had a coverage attempt before.\n # Then cover items that failed with a transient failure on a\n # previous attempt.\n covered_status_lists = [\n BaseCoverageRecord.PREVIOUSLY_ATTEMPTED,\n BaseCoverageRecord.DEFAULT_COUNT_AS_COVERED,\n ]\n start_time = utc_now()\n timestamp = self.timestamp\n\n # We'll use this TimestampData object to track our progress\n # as we grant coverage to items.\n progress = CoverageProviderProgress(start=start_time)\n\n for covered_statuses in covered_status_lists:\n # We may have completed our work for the previous value of\n # covered_statuses, but there's more work to do. Unset the\n # 'finish' date to guarantee that progress.is_complete\n # starts out False.\n #\n # Also set the offset to zero to ensure that we always start\n # at the start of the database table.\n original_finish = progress.finish = None\n progress.offset = 0\n\n # Call run_once() until we get an exception or\n # progress.finish is set.\n while not progress.is_complete:\n try:\n new_progress = self.run_once(\n progress, count_as_covered=covered_statuses\n )\n # run_once can either return a new\n # CoverageProviderProgress object, or modify\n # in-place the one it was passed.\n if new_progress is not None:\n progress = new_progress\n except Exception as e:\n logging.error(\n \"CoverageProvider %s raised uncaught exception.\",\n self.service_name,\n exc_info=e,\n )\n progress.exception = traceback.format_exc()\n progress.finish = utc_now()\n\n # The next run_once() call might raise an exception,\n # so let's write the work to the database as it's\n # done.\n original_finish = progress.finish\n self.finalize_timestampdata(progress)\n\n # That wrote a value for progress.finish to the\n # database, which is fine, but we don't necessarily\n # want that value for progress.finish to stand. It\n # might incorrectly make progress.is_complete appear\n # to be True, making us exit the loop before we mean\n # to.\n if not progress.exception:\n progress.finish = original_finish\n\n # TODO: We should be able to return a list of progress objects,\n # not just one.\n return progress\n\n @property\n def timestamp(self):\n \"\"\"Look up the Timestamp object for this CoverageProvider.\"\"\"\n return Timestamp.lookup(\n self._db,\n self.service_name,\n Timestamp.COVERAGE_PROVIDER_TYPE,\n collection=self.collection,\n )\n\n def finalize_timestampdata(self, timestamp, **kwargs):\n \"\"\"Finalize the given TimestampData and write it to the\n database.\n \"\"\"\n timestamp.finalize(\n self.service_name,\n Timestamp.COVERAGE_PROVIDER_TYPE,\n collection=self.collection,\n **kwargs\n )\n timestamp.apply(self._db)\n self._db.commit()\n\n def run_once(self, progress, count_as_covered=None):\n \"\"\"Try to grant coverage to a number of uncovered items.\n\n NOTE: If you override this method, it's very important that\n your implementation eventually do one of the following:\n * Set progress.finish\n * Set progress.exception\n * Raise an exception\n\n If you don't do any of these things, run() will assume you still\n have work to do, and will keep calling run_once() forever.\n\n :param progress: A CoverageProviderProgress representing the\n progress made so far, and the number of records that\n need to be ignored for the rest of the run.\n\n :param count_as_covered: Which values for CoverageRecord.status\n should count as meaning 'already covered'.\n\n :return: A CoverageProviderProgress representing whatever\n additional progress has been made.\n \"\"\"\n count_as_covered = (\n count_as_covered or BaseCoverageRecord.DEFAULT_COUNT_AS_COVERED\n )\n # Make it clear which class of items we're covering on this\n # run.\n count_as_covered_message = \" (counting %s as covered)\" % (\n \", \".join(count_as_covered)\n )\n\n qu = self.items_that_need_coverage(count_as_covered=count_as_covered)\n self.log.info(\"%d items need coverage%s\", qu.count(), count_as_covered_message)\n batch = qu.limit(self.batch_size).offset(progress.offset)\n\n if not batch.count():\n # The batch is empty. We're done.\n progress.finish = utc_now()\n return progress\n\n (\n successes,\n transient_failures,\n persistent_failures,\n ), results = self.process_batch_and_handle_results(batch)\n\n # Update the running totals so that the service's eventual timestamp\n # will have a useful .achievements.\n progress.successes += successes\n progress.transient_failures += transient_failures\n progress.persistent_failures += persistent_failures\n\n if BaseCoverageRecord.SUCCESS not in count_as_covered:\n # If any successes happened in this batch, increase the\n # offset to ignore them, or they will just show up again\n # the next time we run this batch.\n progress.offset += successes\n\n if BaseCoverageRecord.TRANSIENT_FAILURE not in count_as_covered:\n # If any transient failures happened in this batch,\n # increase the offset to ignore them, or they will\n # just show up again the next time we run this batch.\n progress.offset += transient_failures\n\n if BaseCoverageRecord.PERSISTENT_FAILURE not in count_as_covered:\n # If any persistent failures happened in this batch,\n # increase the offset to ignore them, or they will\n # just show up again the next time we run this batch.\n progress.offset += persistent_failures\n\n return progress\n\n def process_batch_and_handle_results(self, batch):\n \"\"\":return: A 2-tuple (counts, records).\n\n `counts` is a 3-tuple (successes, transient failures,\n persistent_failures).\n\n `records` is a mixed list of coverage record objects (for\n successes and persistent failures) and CoverageFailure objects\n (for transient failures).\n \"\"\"\n\n # Batch is a query that may not be ordered, so it may return\n # different results when executed multiple times. Converting to\n # a list ensures that all subsequent code will run on the same items.\n batch = list(batch)\n\n offset_increment = 0\n results = self.process_batch(batch)\n successes = 0\n transient_failures = 0\n persistent_failures = 0\n num_ignored = 0\n records = []\n\n unhandled_items = set(batch)\n success_items = []\n for item in results:\n if isinstance(item, CoverageFailure):\n if item.obj in unhandled_items:\n unhandled_items.remove(item.obj)\n record = self.record_failure_as_coverage_record(item)\n if item.transient:\n self.log.warning(\n \"Transient failure covering %r: %s\", item.obj, item.exception\n )\n record.status = BaseCoverageRecord.TRANSIENT_FAILURE\n transient_failures += 1\n else:\n self.log.error(\n \"Persistent failure covering %r: %s\", item.obj, item.exception\n )\n record.status = BaseCoverageRecord.PERSISTENT_FAILURE\n persistent_failures += 1\n records.append(record)\n else:\n # Count this as a success and prepare to add a\n # coverage record for it. It won't show up anymore, on\n # this run or subsequent runs.\n if item in unhandled_items:\n unhandled_items.remove(item)\n successes += 1\n success_items.append(item)\n\n records.extend(self.add_coverage_records_for(success_items))\n\n # Perhaps some records were ignored--they neither succeeded nor\n # failed. Treat them as transient failures.\n for item in unhandled_items:\n self.log.warning(\n \"%r was ignored by a coverage provider that was supposed to cover it.\",\n item,\n )\n failure = self.failure_for_ignored_item(item)\n record = self.record_failure_as_coverage_record(failure)\n record.status = BaseCoverageRecord.TRANSIENT_FAILURE\n records.append(record)\n num_ignored += 1\n\n self.log.info(\n \"Batch processed with %d successes, %d transient failures, %d persistent failures, %d ignored.\",\n successes,\n transient_failures,\n persistent_failures,\n num_ignored,\n )\n\n # Finalize this batch before moving on to the next one.\n self.finalize_batch()\n\n # For all purposes outside this method, treat an ignored identifier\n # as a transient failure.\n transient_failures += num_ignored\n\n return (successes, transient_failures, persistent_failures), records\n\n def process_batch(self, batch):\n \"\"\"Do what it takes to give coverage records to a batch of\n items.\n\n :return: A mixed list of coverage records and CoverageFailures.\n \"\"\"\n results = []\n for item in batch:\n result = self.process_item(item)\n if not isinstance(result, CoverageFailure):\n self.handle_success(item)\n results.append(result)\n return results\n\n def add_coverage_records_for(self, items):\n \"\"\"Add CoverageRecords for a group of items from a batch,\n each of which was successful.\n \"\"\"\n return [self.add_coverage_record_for(item) for item in items]\n\n def handle_success(self, item):\n \"\"\"Do something special to mark the successful coverage of the\n given item.\n \"\"\"\n\n def should_update(self, coverage_record):\n \"\"\"Should we do the work to update the given coverage record?\"\"\"\n if coverage_record is None:\n # An easy decision -- there is no existing coverage record,\n # so we need to do the work.\n return True\n\n if coverage_record.status == BaseCoverageRecord.REGISTERED:\n # There's a CoverageRecord, but coverage hasn't actually\n # been attempted. Try to get covered.\n return True\n\n if self.cutoff_time is None:\n # An easy decision -- without a cutoff_time, once we\n # create a coverage record we never update it.\n return False\n\n # We update a coverage record if it was last updated before\n # cutoff_time.\n return coverage_record.timestamp < self.cutoff_time\n\n def finalize_batch(self):\n \"\"\"Do whatever is necessary to complete this batch before moving on to\n the next one.\n\n e.g. committing the database session or uploading a bunch of\n assets to S3.\n \"\"\"\n self._db.commit()\n\n #\n # Subclasses must implement these virtual methods.\n #\n\n def items_that_need_coverage(self, identifiers=None, **kwargs):\n \"\"\"Create a database query returning only those items that\n need coverage.\n\n :param subset: A list of Identifier objects. If present, return\n only items that need coverage *and* are associated with one\n of these identifiers.\n\n Implemented in CoverageProvider and WorkCoverageProvider.\n \"\"\"\n raise NotImplementedError()\n\n def add_coverage_record_for(self, item):\n \"\"\"Add a coverage record for the given item.\n\n Implemented in IdentifierCoverageProvider and WorkCoverageProvider.\n \"\"\"\n raise NotImplementedError()\n\n def record_failure_as_coverage_record(self, failure):\n \"\"\"Convert the given CoverageFailure to a coverage record.\n\n Implemented in IdentifierCoverageProvider and WorkCoverageProvider.\n \"\"\"\n raise NotImplementedError()\n\n def failure_for_ignored_item(self, work):\n \"\"\"Create a CoverageFailure recording the coverage provider's\n failure to even try to process an item.\n\n Implemented in IdentifierCoverageProvider and\n WorkCoverageProvider.\n \"\"\"\n raise NotImplementedError()\n\n def process_item(self, item):\n \"\"\"Do the work necessary to give coverage to one specific item.\n\n Since this is where the actual work happens, this is not\n implemented in IdentifierCoverageProvider or\n WorkCoverageProvider, and must be handled in a subclass.\n \"\"\"\n raise NotImplementedError()\n\n\nclass IdentifierCoverageProvider(BaseCoverageProvider):\n\n \"\"\"Run Identifiers of certain types (ISBN, Overdrive, OCLC Number,\n etc.) through an algorithm associated with a certain DataSource.\n\n This class is designed to be subclassed rather than instantiated\n directly. Subclasses should define SERVICE_NAME, OPERATION\n (optional), DATA_SOURCE_NAME, and\n INPUT_IDENTIFIER_TYPES. SERVICE_NAME and OPERATION are described\n in BaseCoverageProvider; the rest are described in appropriate\n comments in this class.\n \"\"\"\n\n # In your subclass, set this to the name of the data source you\n # consult when providing coverage, e.g. DataSource.OVERDRIVE.\n DATA_SOURCE_NAME = None\n\n # In your subclass, set this to a single identifier type, or a list\n # of identifier types. The CoverageProvider will attempt to give\n # coverage to every Identifier of this type.\n #\n # Setting this to None will attempt to give coverage to every single\n # Identifier in the system, which is probably not what you want.\n NO_SPECIFIED_TYPES = object()\n INPUT_IDENTIFIER_TYPES = NO_SPECIFIED_TYPES\n\n # Set this to False if a given Identifier needs to be run through\n # this CoverageProvider once for every Collection that has this\n # Identifier in its catalog. If this is set to True, a given\n # Identifier will be considered completely covered the first time\n # it's run through this CoverageProvider, no matter how many\n # Collections the Identifier belongs to.\n COVERAGE_COUNTS_FOR_EVERY_COLLECTION = True\n\n def __init__(\n self,\n _db,\n collection=None,\n input_identifiers=None,\n replacement_policy=None,\n **kwargs\n ):\n \"\"\"Constructor.\n\n :param collection: Optional. If information comes in from a\n third party about a license pool associated with an\n Identifier, the LicensePool that belongs to this Collection\n will be used to contain that data. You may pass in None for\n this value, but that means that no circulation information\n (such as the formats in which a book is available) will be\n stored as a result of running this CoverageProvider. Only\n bibliographic information will be stored.\n :param input_identifiers: Optional. This CoverageProvider is\n requested to provide coverage for these specific\n Identifiers.\n :param replacement_policy: Optional. A ReplacementPolicy to use\n when updating local data with data from the third party.\n \"\"\"\n super(IdentifierCoverageProvider, self).__init__(_db, **kwargs)\n\n # We store the collection ID rather than the Collection to\n # avoid breakage if an app server with a scoped session ever\n # uses a IdentifierCoverageProvider.\n self.collection_id = None\n if collection:\n self.collection_id = collection.id\n self.input_identifiers = input_identifiers\n self.replacement_policy = (\n replacement_policy or self._default_replacement_policy(_db)\n )\n\n if not self.DATA_SOURCE_NAME:\n raise ValueError(\n \"%s must define DATA_SOURCE_NAME\" % self.__class__.__name__\n )\n\n # Get this information immediately so that an error happens immediately\n # if INPUT_IDENTIFIER_TYPES is not set properly.\n self.input_identifier_types = self._input_identifier_types()\n\n def _default_replacement_policy(self, _db):\n \"\"\"Unless told otherwise, assume that we are getting\n this data from a reliable metadata source.\n \"\"\"\n return ReplacementPolicy.from_metadata_source()\n\n @property\n def collection_or_not(self):\n \"\"\"If this CoverageProvider needs to be run multiple times on\n the same identifier in different collections, this\n returns the collection. Otherwise, this returns None.\n \"\"\"\n if self.COVERAGE_COUNTS_FOR_EVERY_COLLECTION:\n return None\n return self.collection\n\n @classmethod\n def _input_identifier_types(cls):\n \"\"\"Create a normalized value for `input_identifier_types`\n based on the INPUT_IDENTIFIER_TYPES class variable.\n \"\"\"\n value = cls.INPUT_IDENTIFIER_TYPES\n\n # Nip in the bud a situation where someone subclassed this\n # class without thinking about a value for\n # INPUT_IDENTIFIER_TYPES.\n if value is cls.NO_SPECIFIED_TYPES:\n raise ValueError(\n \"%s must define INPUT_IDENTIFIER_TYPES, even if the value is None.\"\n % (cls.__name__)\n )\n\n if not value:\n # We will be processing every single type of identifier in\n # the system. This (hopefully) means that the identifiers\n # are restricted in some other way, such as being licensed\n # to a specific Collection.\n return None\n elif not isinstance(value, list):\n # We will be processing every identifier of a given type.\n return [value]\n else:\n # We will be processing every identify whose type belongs to\n # a list of types.\n return value\n\n @classmethod\n def register(\n cls,\n identifier,\n data_source=None,\n collection=None,\n force=False,\n autocreate=False,\n ):\n \"\"\"Registers an identifier for future coverage.\n\n See `CoverageProvider.bulk_register` for more information about using\n this method.\n \"\"\"\n name = cls.SERVICE_NAME or cls.__name__\n log = logging.getLogger(name)\n\n new_records, ignored_identifiers = cls.bulk_register(\n [identifier],\n data_source=data_source,\n collection=collection,\n force=force,\n autocreate=autocreate,\n )\n was_registered = identifier not in ignored_identifiers\n\n new_record = None\n if new_records:\n [new_record] = new_records\n\n if was_registered and new_record:\n log.info(\"CREATED %r\" % new_record)\n return new_record, was_registered\n\n _db = Session.object_session(identifier)\n data_source = cls._data_source_for_registration(\n _db, data_source, autocreate=autocreate\n )\n\n if collection and cls.COVERAGE_COUNTS_FOR_EVERY_COLLECTION:\n # There's no need for a collection when registering this\n # Identifier, even if it provided the DataSource.\n collection = None\n\n existing_record = CoverageRecord.lookup(\n identifier, data_source, cls.OPERATION, collection=collection\n )\n log.info(\"FOUND %r\" % existing_record)\n return existing_record, was_registered\n\n @classmethod\n def bulk_register(\n cls,\n identifiers,\n data_source=None,\n collection=None,\n force=False,\n autocreate=False,\n ):\n \"\"\"Registers identifiers for future coverage.\n\n This method is primarily for use with CoverageProviders that use the\n `registered_only` flag to process items. It's currently only in use\n on the Metadata Wrangler.\n\n :param data_source: DataSource object or basestring representing a\n DataSource name.\n :param collection: Collection object to be associated with the\n CoverageRecords.\n :param force: When True, even existing CoverageRecords will have\n their status reset to CoverageRecord.REGISTERED.\n :param autocreate: When True, a basestring provided by data_source will\n be autocreated in the database if it didn't previously exist.\n\n :return: A tuple of two lists: the first has fresh new REGISTERED\n CoverageRecords and the second list already has Identifiers that\n were ignored because they already had coverage.\n\n TODO: Take identifier eligibility into account when registering.\n \"\"\"\n if not identifiers:\n return list(), list()\n\n _db = Session.object_session(identifiers[0])\n data_source = cls._data_source_for_registration(\n _db, data_source, autocreate=autocreate\n )\n\n if collection and cls.COVERAGE_COUNTS_FOR_EVERY_COLLECTION:\n # There's no need for a collection on this CoverageRecord.\n collection = None\n\n new_records, ignored_identifiers = CoverageRecord.bulk_add(\n identifiers,\n data_source,\n operation=cls.OPERATION,\n status=CoverageRecord.REGISTERED,\n collection=collection,\n force=force,\n )\n\n return new_records, ignored_identifiers\n\n @classmethod\n def _data_source_for_registration(cls, _db, data_source, autocreate=False):\n \"\"\"Finds or creates a DataSource for the registration methods\n `cls.register` and `cls.bulk_register`.\n \"\"\"\n if not data_source:\n return DataSource.lookup(_db, cls.DATA_SOURCE_NAME)\n if isinstance(data_source, DataSource):\n return data_source\n if isinstance(data_source, (bytes, str)):\n return DataSource.lookup(_db, data_source, autocreate=autocreate)\n\n @property\n def data_source(self):\n \"\"\"Look up the DataSource object corresponding to the\n service we're running this data through.\n\n Out of an excess of caution, we look up the DataSource every\n time, rather than storing it, in case a CoverageProvider is\n ever used in an environment where the database session is\n scoped (e.g. the circulation manager).\n \"\"\"\n return DataSource.lookup(self._db, self.DATA_SOURCE_NAME)\n\n def failure(self, identifier, error, transient=True):\n \"\"\"Create a CoverageFailure object to memorialize an error.\"\"\"\n return CoverageFailure(\n identifier,\n error,\n data_source=self.data_source,\n transient=transient,\n collection=self.collection_or_not,\n )\n\n def can_cover(self, identifier):\n \"\"\"Can this IdentifierCoverageProvider do anything with the given\n Identifier?\n\n This is not needed in the normal course of events, but a\n caller may need to decide whether to pass an Identifier\n into ensure_coverage() or register().\n \"\"\"\n return (\n not self.input_identifier_types\n or identifier.type in self.input_identifier_types\n )\n\n def run_on_specific_identifiers(self, identifiers):\n \"\"\"Split a specific set of Identifiers into batches and process one\n batch at a time.\n\n This is for use by IdentifierInputScript.\n\n :return: The same (counts, records) 2-tuple as\n process_batch_and_handle_results.\n \"\"\"\n index = 0\n successes = 0\n transient_failures = 0\n persistent_failures = 0\n records = []\n\n # Of all the items that need coverage, find the intersection\n # with the given list of items.\n need_coverage = self.items_that_need_coverage(identifiers).all()\n\n # Treat any items with up-to-date coverage records as\n # automatic successes.\n #\n # NOTE: We won't actually be returning those coverage records\n # in `records`, since items_that_need_coverage() filters them\n # out, but nobody who calls this method really needs those\n # records.\n automatic_successes = len(identifiers) - len(need_coverage)\n successes += automatic_successes\n self.log.info(\"%d automatic successes.\", successes)\n\n # Iterate over any items that were not automatic\n # successes.\n while index < len(need_coverage):\n batch = need_coverage[index : index + self.batch_size]\n (s, t, p), r = self.process_batch_and_handle_results(batch)\n successes += s\n transient_failures += t\n persistent_failures += p\n records += r\n index += self.batch_size\n return (successes, transient_failures, persistent_failures), records\n\n def ensure_coverage(self, item, force=False):\n \"\"\"Ensure coverage for one specific item.\n\n :param item: This should always be an Identifier, but this\n code will also work if it's an Edition. (The Edition's\n .primary_identifier will be covered.)\n :param force: Run the coverage code even if an existing\n coverage record for this item was created after `self.cutoff_time`.\n :return: Either a coverage record or a CoverageFailure.\n\n TODO: This could be abstracted and moved to BaseCoverageProvider.\n\n \"\"\"\n if isinstance(item, Identifier):\n identifier = item\n else:\n identifier = item.primary_identifier\n\n if self.COVERAGE_COUNTS_FOR_EVERY_COLLECTION:\n # We need to cover this Identifier once, and then we're\n # done, for all collections.\n collection = None\n else:\n # We need separate coverage for the specific Collection\n # associated with this CoverageProvider.\n collection = self.collection\n\n coverage_record = get_one(\n self._db,\n CoverageRecord,\n identifier=identifier,\n collection=collection,\n data_source=self.data_source,\n operation=self.operation,\n on_multiple=\"interchangeable\",\n )\n if not force and not self.should_update(coverage_record):\n return coverage_record\n\n counts, records = self.process_batch_and_handle_results([identifier])\n if records:\n coverage_record = records[0]\n else:\n coverage_record = None\n return coverage_record\n\n def edition(self, identifier):\n \"\"\"Finds or creates an Edition representing this coverage provider's\n view of a given Identifier.\n \"\"\"\n edition, ignore = Edition.for_foreign_id(\n self._db, self.data_source, identifier.type, identifier.identifier\n )\n return edition\n\n def set_metadata(self, identifier, metadata):\n \"\"\"Finds or creates the Edition for an Identifier, updates it\n with the given metadata.\n\n :return: The Identifier (if successful) or an appropriate\n CoverageFailure (if not).\n \"\"\"\n edition = self.edition(identifier)\n if isinstance(edition, CoverageFailure):\n return edition\n\n if not metadata:\n e = \"Did not receive metadata from input source\"\n return self.failure(identifier, e, transient=True)\n\n try:\n # We're passing in the Collection even if this\n # CoverageProvider has\n # COVERAGE_COUNTS_FOR_EVERY_COLLECTION set to False. If\n # we did happen to get some circulation information while\n # we were at it, we might as well store it properly.\n # The metadata layer will not use the collection when creating\n # CoverageRecords for the metadata actions.\n metadata.apply(\n edition,\n collection=self.collection,\n replace=self.replacement_policy,\n )\n except Exception as e:\n self.log.warning(\n \"Error applying metadata to edition %d: %s\", edition.id, e, exc_info=e\n )\n return self.failure(identifier, repr(e), transient=True)\n\n return identifier\n\n #\n # Implementation of BaseCoverageProvider virtual methods.\n #\n\n def items_that_need_coverage(self, identifiers=None, **kwargs):\n \"\"\"Find all items lacking coverage from this CoverageProvider.\n\n Items should be Identifiers, though Editions should also work.\n\n By default, all identifiers of the `INPUT_IDENTIFIER_TYPES` which\n don't already have coverage are chosen.\n\n :param identifiers: The batch of identifier objects to test\n for coverage. identifiers and self.input_identifiers can\n intersect -- if this provider was created for the purpose\n of running specific Identifiers, and within those\n Identifiers you want to batch, you can use both\n parameters.\n \"\"\"\n qu = Identifier.missing_coverage_from(\n self._db,\n self.input_identifier_types,\n self.data_source,\n count_as_missing_before=self.cutoff_time,\n operation=self.operation,\n identifiers=self.input_identifiers,\n collection=self.collection_or_not,\n **kwargs\n )\n\n if identifiers:\n qu = qu.filter(Identifier.id.in_([x.id for x in identifiers]))\n if not identifiers and identifiers != None:\n # An empty list was provided. The returned query should be empty.\n qu = qu.filter(Identifier.id == None)\n\n if self.registered_only:\n # Return Identifiers that have been \"registered\" for coverage\n # or already have a failure from previous coverage attempts.\n qu = qu.filter(CoverageRecord.id != None)\n\n return qu\n\n def add_coverage_record_for(self, item):\n \"\"\"Record this CoverageProvider's coverage for the given\n Edition/Identifier, as a CoverageRecord.\n \"\"\"\n record, is_new = CoverageRecord.add_for(\n item,\n data_source=self.data_source,\n operation=self.operation,\n collection=self.collection_or_not,\n )\n record.status = CoverageRecord.SUCCESS\n record.exception = None\n return record\n\n def record_failure_as_coverage_record(self, failure):\n \"\"\"Turn a CoverageFailure into a CoverageRecord object.\"\"\"\n return failure.to_coverage_record(operation=self.operation)\n\n def failure_for_ignored_item(self, item):\n \"\"\"Create a CoverageFailure recording the CoverageProvider's\n failure to even try to process an item.\n \"\"\"\n return self.failure(item, \"Was ignored by CoverageProvider.\", transient=True)\n\n\nclass CollectionCoverageProvider(IdentifierCoverageProvider):\n \"\"\"A CoverageProvider that covers all the Identifiers currently\n licensed to a given Collection.\n\n You should subclass this CoverageProvider if you want to create\n Works (as opposed to operating on existing Works) or update the\n circulation information for LicensePools. You can't use it to\n create new LicensePools, since it only operates on Identifiers\n that already have a LicencePool in the given Collection.\n\n If a book shows up in multiple Collections, the first Collection\n to process it takes care of it for the others. Any books that were\n processed through their membership in another Collection will be\n left alone.\n\n For this reason it's important that subclasses of this\n CoverageProvider only deal with bibliographic information and\n format availability information (such as links to open-access\n downloads). You'll have problems if you try to use\n CollectionCoverageProvider to keep track of information like the\n number of licenses available for a book.\n\n In addition to defining the class variables defined by\n CoverageProvider, you must define the class variable PROTOCOL when\n subclassing this class. This is the entity that provides the\n licenses for this Collection. It should be one of the\n collection-type provider constants defined in the\n `ExternalIntegration` class, such as\n ExternalIntegration.OPDS_IMPORT or ExternalIntegration.OVERDRIVE.\n \"\"\"\n\n # By default, this type of CoverageProvider will provide coverage to\n # all Identifiers in the given Collection, regardless of their type.\n INPUT_IDENTIFIER_TYPES = None\n\n DEFAULT_BATCH_SIZE = 10\n\n # Set this to the name of the protocol managed by this type of\n # CoverageProvider. If this CoverageProvider can manage collections\n # for any protocol, leave this as None.\n PROTOCOL = None\n\n # By default, Works calculated by a CollectionCoverageProvider update\n # the ExternalSearchIndex. Set this value to True for applications that\n # don't use external search, such as the Metadata Wrangler.\n EXCLUDE_SEARCH_INDEX = False\n\n def __init__(self, collection, **kwargs):\n \"\"\"Constructor.\n\n :param collection: Will provide coverage to all Identifiers with\n a LicensePool licensed to the given Collection.\n \"\"\"\n if not isinstance(collection, Collection):\n raise CollectionMissing(\n \"%s must be instantiated with a Collection.\" % (self.__class__.__name__)\n )\n\n if self.PROTOCOL and collection.protocol != self.PROTOCOL:\n raise ValueError(\n \"Collection protocol (%s) does not match CoverageProvider protocol (%s)\"\n % (collection.protocol, self.PROTOCOL)\n )\n _db = Session.object_session(collection)\n super(CollectionCoverageProvider, self).__init__(_db, collection, **kwargs)\n\n def _default_replacement_policy(self, _db):\n \"\"\"Unless told otherwise, assume that we are getting\n this data from a reliable source of both metadata and circulation\n information.\n \"\"\"\n return ReplacementPolicy.from_license_source(_db)\n\n @classmethod\n def collections(cls, _db):\n \"\"\"Returns a list of randomly sorted list of collections covered by the\n provider.\n \"\"\"\n if cls.PROTOCOL:\n collections = Collection.by_protocol(_db, cls.PROTOCOL)\n else:\n collections = _db.query(Collection)\n return collections.order_by(func.random()).all()\n\n @classmethod\n def all(cls, _db, **kwargs):\n \"\"\"Yield a sequence of CollectionCoverageProvider instances, one for\n every Collection that gets its licenses from cls.PROTOCOL.\n\n CollectionCoverageProviders will be yielded in a random order.\n\n :param kwargs: Keyword arguments passed into the constructor for\n CollectionCoverageProvider (or, more likely, one of its subclasses).\n\n \"\"\"\n for collection in cls.collections(_db):\n yield cls(collection, **kwargs)\n\n def run_once(self, *args, **kwargs):\n self.log.info(\"Considering collection %s\", self.collection.name)\n return super(CollectionCoverageProvider, self).run_once(*args, **kwargs)\n\n def items_that_need_coverage(self, identifiers=None, **kwargs):\n \"\"\"Find all Identifiers associated with this Collection but lacking\n coverage through this CoverageProvider.\n \"\"\"\n qu = super(CollectionCoverageProvider, self).items_that_need_coverage(\n identifiers, **kwargs\n )\n qu = qu.join(Identifier.licensed_through).filter(\n LicensePool.collection_id == self.collection_id\n )\n return qu\n\n def license_pool(self, identifier, data_source=None):\n \"\"\"Finds this Collection's LicensePool for the given Identifier,\n creating one if necessary.\n\n :param data_source: If it's necessary to create a LicensePool,\n the new LicensePool will have this DataSource. The default is to\n use the DataSource associated with the CoverageProvider. This\n should only be needed by the metadata wrangler.\n \"\"\"\n license_pools = [\n p for p in identifier.licensed_through if self.collection == p.collection\n ]\n\n if license_pools:\n # A given Collection may have at most one LicensePool for\n # a given identifier.\n return license_pools[0]\n\n data_source = data_source or self.data_source\n if isinstance(data_source, (bytes, str)):\n data_source = DataSource.lookup(self._db, data_source)\n\n # This Collection has no LicensePool for the given Identifier.\n # Create one.\n #\n # Under normal circumstances, this will never happen, because\n # CollectionCoverageProvider only operates on Identifiers that\n # already have a LicensePool in this Collection.\n #\n # However, this does happen in the metadata wrangler,\n # which typically has to manage information about books it has no\n # rights to.\n license_pool, ignore = LicensePool.for_foreign_id(\n self._db,\n data_source,\n identifier.type,\n identifier.identifier,\n collection=self.collection,\n )\n return license_pool\n\n def work(self, identifier, license_pool=None, **calculate_work_kwargs):\n \"\"\"Finds or creates a Work for this Identifier as licensed through\n this Collection.\n\n If the given Identifier already has a Work associated with it,\n that Work will always be used, since an Identifier can only have one\n Work associated with it.\n\n However, if there is no current Work, a Work will only be\n created if the given Identifier already has a LicensePool in\n the Collection associated with this CoverageProvider (or if a\n LicensePool to use is provided.) This method will not create\n new LicensePools.\n\n If the work is newly created or an existing work is not\n presentation-ready, a new Work will be created by calling\n LicensePool.calculate_work(). If there is an existing\n presentation-ready work, calculate_work() will not be called;\n instead, the work will be slated for recalculation when its\n metadata changes through Metadata.apply().\n\n :param calculate_work_kwargs: Keyword arguments to pass into\n calculate_work() if and when it is called.\n\n :return: A Work, if possible. Otherwise, a CoverageFailure explaining\n why no Work could be created.\n\n \"\"\"\n work = identifier.work\n if work and work.presentation_ready:\n # There is already a presentation-ready Work associated\n # with this Identifier. Return it.\n return work\n\n # There is no presentation-ready Work associated with this\n # Identifier. We need to create one, if possible.\n error = None\n if not license_pool:\n license_pool, ignore = LicensePool.for_foreign_id(\n self._db,\n self.data_source,\n identifier.type,\n identifier.identifier,\n collection=self.collection,\n autocreate=False,\n )\n\n if license_pool:\n if not license_pool.work or not license_pool.work.presentation_ready:\n for (v, default) in ((\"exclude_search\", self.EXCLUDE_SEARCH_INDEX),):\n if not v in calculate_work_kwargs:\n calculate_work_kwargs[v] = default\n\n # Calling calculate_work will calculate the work's\n # presentation and make it presentation-ready if\n # possible.\n work, created = license_pool.calculate_work(**calculate_work_kwargs)\n if not work:\n error = \"Work could not be calculated\"\n else:\n error = \"Cannot locate LicensePool\"\n\n if error:\n return self.failure(identifier, error, transient=True)\n return work\n\n def set_metadata_and_circulation_data(\n self,\n identifier,\n metadata,\n circulationdata,\n ):\n \"\"\"Makes sure that the given Identifier has a Work, Edition (in the\n context of this Collection), and LicensePool (ditto), and that\n all the information is up to date.\n\n :return: The Identifier (if successful) or an appropriate\n CoverageFailure (if not).\n \"\"\"\n\n if not metadata and not circulationdata:\n e = \"Received neither metadata nor circulation data from input source\"\n return self.failure(identifier, e, transient=True)\n\n if metadata:\n result = self.set_metadata(identifier, metadata)\n if isinstance(result, CoverageFailure):\n return result\n\n if circulationdata:\n result = self._set_circulationdata(identifier, circulationdata)\n if isinstance(result, CoverageFailure):\n return result\n\n # By this point the Identifier should have an appropriate\n # Edition and LicensePool. We should now be able to make a\n # Work.\n work = self.work(identifier)\n if isinstance(work, CoverageFailure):\n return work\n\n return identifier\n\n def _set_circulationdata(self, identifier, circulationdata):\n \"\"\"Finds or creates a LicensePool for an Identifier, updates it\n with the given circulationdata, then creates a Work for the book.\n\n :return: The Identifier (if successful) or an appropriate\n CoverageFailure (if not).\n \"\"\"\n error = None\n if circulationdata:\n primary_identifier = circulationdata.primary_identifier(self._db)\n if identifier != primary_identifier:\n error = \"Identifier did not match CirculationData's primary identifier.\"\n else:\n error = \"Did not receive circulationdata from input source\"\n\n if error:\n return self.failure(identifier, error, transient=True)\n\n try:\n circulationdata.apply(\n self._db, self.collection, replace=self.replacement_policy\n )\n except Exception as e:\n if self.collection:\n collection_name = \" to collection %s\" % self.collection.name\n else:\n collection_name = \"\"\n self.log.warning(\n \"Error applying circulationdata%s: %s\", collection_name, e, exc_info=e\n )\n return self.failure(identifier, repr(e), transient=True)\n\n return identifier\n\n def set_presentation_ready(self, identifier):\n \"\"\"Set a Work presentation-ready.\"\"\"\n work = self.work(identifier)\n if isinstance(work, CoverageFailure):\n return work\n work.set_presentation_ready(exclude_search=self.EXCLUDE_SEARCH_INDEX)\n return identifier\n\n\nclass CollectionCoverageProviderJob(DatabaseJob):\n def __init__(self, collection, provider_class, progress, **provider_kwargs):\n self.collection = collection\n self.progress = progress\n self.provider_class = provider_class\n self.provider_kwargs = provider_kwargs\n\n def run(self, _db, **kwargs):\n collection = _db.merge(self.collection)\n provider = self.provider_class(collection, **self.provider_kwargs)\n provider.run_once(self.progress)\n provider.finalize_timestampdata(self.progress)\n\n\nclass CatalogCoverageProvider(CollectionCoverageProvider):\n \"\"\"Most CollectionCoverageProviders provide coverage to Identifiers\n that are licensed through a given Collection.\n\n A CatalogCoverageProvider provides coverage to Identifiers that\n are present in a given Collection's catalog.\n \"\"\"\n\n def items_that_need_coverage(self, identifiers=None, **kwargs):\n \"\"\"Find all Identifiers in this Collection's catalog but lacking\n coverage through this CoverageProvider.\n \"\"\"\n qu = super(CollectionCoverageProvider, self).items_that_need_coverage(\n identifiers, **kwargs\n )\n qu = qu.join(Identifier.collections).filter(Collection.id == self.collection_id)\n return qu\n\n\nclass BibliographicCoverageProvider(CollectionCoverageProvider):\n \"\"\"Fill in bibliographic metadata for all books in a Collection.\n\n e.g. ensures that we get Overdrive coverage for all Overdrive IDs\n in a collection.\n\n Although a BibliographicCoverageProvider may gather\n CirculationData for a book, it cannot guarantee equal coverage for\n all Collections that contain that book. CirculationData should be\n limited to things like formats that don't vary between\n Collections, and you should use a CollectionMonitor to make sure\n your circulation information is up-to-date for each Collection.\n \"\"\"\n\n def handle_success(self, identifier):\n \"\"\"Once a book has bibliographic coverage, it can be given a\n work and made presentation ready.\n \"\"\"\n self.set_presentation_ready(identifier)\n\n\nclass WorkCoverageProvider(BaseCoverageProvider):\n\n \"\"\"Perform coverage operations on Works rather than Identifiers.\"\"\"\n\n @classmethod\n def register(cls, work, force=False):\n \"\"\"Registers a work for future coverage.\n\n This method is primarily for use with CoverageProviders that use the\n `registered_only` flag to process items. It's currently only in use\n on the Metadata Wrangler.\n\n :param force: Set to True to reset an existing CoverageRecord's status\n \"registered\", regardless of its current status.\n \"\"\"\n was_registered = True\n if not force:\n record = WorkCoverageRecord.lookup(work, cls.OPERATION)\n if record:\n was_registered = False\n return record, was_registered\n\n # WorkCoverageRecord.add_for overwrites the status already,\n # so it can be used to create and to force-register records.\n record, is_new = WorkCoverageRecord.add_for(\n work, cls.OPERATION, status=CoverageRecord.REGISTERED\n )\n return record, was_registered\n\n #\n # Implementation of BaseCoverageProvider virtual methods.\n #\n\n def items_that_need_coverage(self, identifiers=None, **kwargs):\n \"\"\"Find all Works lacking coverage from this CoverageProvider.\n\n By default, all Works which don't already have coverage are\n chosen.\n\n :param: Only Works connected with one of the given identifiers\n are chosen.\n \"\"\"\n qu = Work.missing_coverage_from(\n self._db,\n operation=self.operation,\n count_as_missing_before=self.cutoff_time,\n **kwargs\n )\n if identifiers:\n ids = [x.id for x in identifiers]\n qu = qu.join(Work.license_pools).filter(LicensePool.identifier_id.in_(ids))\n\n if self.registered_only:\n # Return Identifiers that have been \"registered\" for coverage\n # or already have a failure from previous coverage attempts.\n qu = qu.filter(WorkCoverageRecord.id != None)\n\n return qu\n\n def failure(self, work, error, transient=True):\n \"\"\"Create a CoverageFailure object.\"\"\"\n return CoverageFailure(work, error, transient=transient)\n\n def failure_for_ignored_item(self, work):\n \"\"\"Create a CoverageFailure recording the WorkCoverageProvider's\n failure to even try to process a Work.\n \"\"\"\n return CoverageFailure(\n work, \"Was ignored by WorkCoverageProvider.\", transient=True\n )\n\n def add_coverage_records_for(self, works):\n \"\"\"Add WorkCoverageRecords for a group of works from a batch,\n each of which was successful.\n \"\"\"\n WorkCoverageRecord.bulk_add(works, operation=self.operation)\n\n # We can't return the specific WorkCoverageRecords that were\n # created, but it doesn't matter because they're not used except\n # in tests.\n return []\n\n def add_coverage_record_for(self, work):\n \"\"\"Record this CoverageProvider's coverage for the given\n Edition/Identifier, as a WorkCoverageRecord.\n \"\"\"\n return WorkCoverageRecord.add_for(work, operation=self.operation)\n\n def record_failure_as_coverage_record(self, failure):\n \"\"\"Turn a CoverageFailure into a WorkCoverageRecord object.\"\"\"\n return failure.to_work_coverage_record(operation=self.operation)\n\n\nclass PresentationReadyWorkCoverageProvider(WorkCoverageProvider):\n \"\"\"A WorkCoverageProvider that only covers presentation-ready works.\"\"\"\n\n def items_that_need_coverage(self, identifiers=None, **kwargs):\n qu = super(\n PresentationReadyWorkCoverageProvider, self\n ).items_that_need_coverage(identifiers, **kwargs)\n qu = qu.filter(Work.presentation_ready == True)\n return qu\n\n\nclass WorkPresentationProvider(PresentationReadyWorkCoverageProvider):\n \"\"\"Recalculate some part of presentation for works that are\n presentation-ready.\n\n A Work's presentation is set when it's made presentation-ready\n (thus the name). When that happens, a number of WorkCoverageRecords\n are set for that Work.\n\n A migration script may remove a coverage record if it knows a work\n needs to have some aspect of its presentation recalculated. These\n providers give back the 'missing' coverage.\n \"\"\"\n\n DEFAULT_BATCH_SIZE = 100\n\n\nclass OPDSEntryWorkCoverageProvider(WorkPresentationProvider):\n \"\"\"Make sure all presentation-ready works have an up-to-date OPDS\n entry.\n\n This is different from the OPDSEntryCacheMonitor, which sweeps\n over all presentation-ready works, even ones which are already\n covered.\n \"\"\"\n\n SERVICE_NAME = \"OPDS Entry Work Coverage Provider\"\n OPERATION = WorkCoverageRecord.GENERATE_OPDS_OPERATION\n DEFAULT_BATCH_SIZE = 1000\n\n def process_item(self, work):\n work.calculate_opds_entries()\n return work\n\n\nclass MARCRecordWorkCoverageProvider(WorkPresentationProvider):\n \"\"\"Make sure all presentation-ready works have an up-to-date MARC\n record.\n \"\"\"\n\n SERVICE_NAME = \"MARC Record Work Coverage Provider\"\n OPERATION = WorkCoverageRecord.GENERATE_MARC_OPERATION\n DEFAULT_BATCH_SIZE = 1000\n\n def process_item(self, work):\n work.calculate_marc_record()\n return work\n\n\nclass WorkPresentationEditionCoverageProvider(WorkPresentationProvider):\n \"\"\"Make sure each Work has an up-to-date presentation edition.\n\n This basically means comparing all the Editions associated with the\n Work and building a composite Edition.\n\n Expensive operations -- calculating work quality, summary, and genre\n classification -- are reserved for WorkClassificationCoverageProvider\n \"\"\"\n\n SERVICE_NAME = \"Calculated presentation coverage provider\"\n\n OPERATION = WorkCoverageRecord.CHOOSE_EDITION_OPERATION\n\n POLICY = PresentationCalculationPolicy(\n choose_edition=True,\n set_edition_metadata=True,\n verbose=True,\n # These are the expensive ones, and they're covered by\n # WorkSummaryQualityClassificationCoverageProvider.\n classify=False,\n choose_summary=False,\n calculate_quality=False,\n # It would be better if there were a separate class for this\n # operation (COVER_OPERATION), but it's a little complicated because\n # that's not a WorkCoverageRecord operation.\n choose_cover=True,\n # We do this even though it's redundant with\n # OPDSEntryWorkCoverageProvider. If you change a\n # Work's presentation edition but don't update its OPDS entry,\n # it effectively didn't happen.\n regenerate_opds_entries=True,\n # Same logic for the search index. This will flag the Work as\n # needing a search index update, and SearchIndexCoverageProvider\n # will take care of it.\n update_search_index=True,\n )\n\n def process_item(self, work):\n \"\"\"Recalculate the presentation for a Work.\"\"\"\n\n # Calling calculate_presentation_edition won't, on its own,\n # regenerate the OPDS feeds or update the search index. So we\n # call calculate_presentation with a policy that ensures the\n # presentation edition will be reevaluated, but nothing\n # expensive will happen.\n work.calculate_presentation(self.POLICY)\n return work\n\n\nclass WorkClassificationCoverageProvider(WorkPresentationEditionCoverageProvider):\n \"\"\"Calculates the 'expensive' parts of a work's presentation:\n classifications, summary, and quality.\n\n We do all three at once because these gathering together all\n equivalent identifiers for the work, which can be, by far, the\n most expensive part of the work.\n\n This is called 'classification' because that's the most likely use\n of this coverage provider. If you want to make sure a bunch of\n works get their summaries recalculated, you need to remember that\n the coverage record to delete is CLASSIFY_OPERATION.\n \"\"\"\n\n SERVICE_NAME = \"Work classification coverage provider\"\n\n DEFAULT_BATCH_SIZE = 20\n\n OPERATION = WorkCoverageRecord.CLASSIFY_OPERATION\n\n # This is going to be expensive -- we might as well recalculate\n # everything.\n POLICY = PresentationCalculationPolicy.recalculate_everything()\n", "id": "3859857", "language": "Python", "matching_score": 3.6382009983062744, "max_stars_count": 0, "path": "core/coverage.py" }, { "content": "import base64\nimport json\nimport os\nimport textwrap\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nfrom collections import Counter\nfrom io import BytesIO\n\nimport flask\nfrom flask import Response\nfrom flask_babel import lazy_gettext as _\nfrom PIL import Image, ImageDraw, ImageFont\n\nfrom api.admin.opds import AdminAnnotator\nfrom api.admin.problem_details import *\nfrom api.admin.validator import Validator\nfrom api.config import CannotLoadConfiguration\nfrom api.metadata_wrangler import MetadataWranglerCollectionRegistrar\nfrom core.classifier import NO_NUMBER, NO_VALUE, SimplifiedGenreClassifier, genres\nfrom core.lane import Lane\nfrom core.metadata_layer import LinkData, Metadata, ReplacementPolicy\nfrom core.mirror import MirrorUploader\nfrom core.model import (\n Classification,\n Contributor,\n CustomList,\n DataSource,\n Edition,\n Genre,\n Hyperlink,\n Measurement,\n PresentationCalculationPolicy,\n Representation,\n RightsStatus,\n Subject,\n create,\n get_one,\n get_one_or_create,\n)\nfrom core.model.configuration import ExternalIntegrationLink\nfrom core.opds import AcquisitionFeed\nfrom core.util import LanguageCodes\nfrom core.util.datetime_helpers import strptime_utc, utc_now\nfrom core.util.problem_detail import ProblemDetail\n\nfrom . import AdminCirculationManagerController\n\n\nclass WorkController(AdminCirculationManagerController):\n\n STAFF_WEIGHT = 1000\n\n def details(self, identifier_type, identifier):\n \"\"\"Return an OPDS entry with detailed information for admins.\n\n This includes relevant links for editing the book.\n\n :return: An OPDSEntryResponse\n \"\"\"\n self.require_librarian(flask.request.library)\n\n work = self.load_work(flask.request.library, identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n annotator = AdminAnnotator(self.circulation, flask.request.library)\n\n # single_entry returns an OPDSEntryResponse that will not be\n # cached, which is perfect. We want the admin interface\n # to update immediately when an admin makes a change.\n return AcquisitionFeed.single_entry(self._db, work, annotator)\n\n def complaints(self, identifier_type, identifier):\n \"\"\"Return detailed complaint information for admins.\"\"\"\n self.require_librarian(flask.request.library)\n\n work = self.load_work(flask.request.library, identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n counter = self._count_complaints_for_work(work)\n response = dict(\n {\n \"book\": {\"identifier_type\": identifier_type, \"identifier\": identifier},\n \"complaints\": counter,\n }\n )\n\n return response\n\n def roles(self):\n \"\"\"Return a mapping from MARC codes to contributor roles.\"\"\"\n # TODO: The admin interface only allows a subset of the roles\n # listed in model.py since it uses the OPDS representation of\n # the data, and some of the roles map to the same MARC code.\n CODES = Contributor.MARC_ROLE_CODES\n marc_to_role = dict()\n for role in [\n Contributor.ACTOR_ROLE,\n Contributor.ADAPTER_ROLE,\n Contributor.AFTERWORD_ROLE,\n Contributor.ARTIST_ROLE,\n Contributor.ASSOCIATED_ROLE,\n Contributor.AUTHOR_ROLE,\n Contributor.COMPILER_ROLE,\n Contributor.COMPOSER_ROLE,\n Contributor.CONTRIBUTOR_ROLE,\n Contributor.COPYRIGHT_HOLDER_ROLE,\n Contributor.DESIGNER_ROLE,\n Contributor.DIRECTOR_ROLE,\n Contributor.EDITOR_ROLE,\n Contributor.ENGINEER_ROLE,\n Contributor.FOREWORD_ROLE,\n Contributor.ILLUSTRATOR_ROLE,\n Contributor.INTRODUCTION_ROLE,\n Contributor.LYRICIST_ROLE,\n Contributor.MUSICIAN_ROLE,\n Contributor.NARRATOR_ROLE,\n Contributor.PERFORMER_ROLE,\n Contributor.PHOTOGRAPHER_ROLE,\n Contributor.PRODUCER_ROLE,\n Contributor.TRANSCRIBER_ROLE,\n Contributor.TRANSLATOR_ROLE,\n ]:\n marc_to_role[CODES[role]] = role\n return marc_to_role\n\n def languages(self):\n \"\"\"Return the supported language codes and their English names.\"\"\"\n return LanguageCodes.english_names\n\n def media(self):\n \"\"\"Return the supported media types for a work and their schema.org values.\"\"\"\n return Edition.additional_type_to_medium\n\n def rights_status(self):\n \"\"\"Return the supported rights status values with their names and whether\n they are open access.\"\"\"\n return {\n uri: dict(\n name=name,\n open_access=(uri in RightsStatus.OPEN_ACCESS),\n allows_derivatives=(uri in RightsStatus.ALLOWS_DERIVATIVES),\n )\n for uri, name in list(RightsStatus.NAMES.items())\n }\n\n def edit(self, identifier_type, identifier):\n \"\"\"Edit a work's metadata.\"\"\"\n self.require_librarian(flask.request.library)\n\n # TODO: It would be nice to use the metadata layer for this, but\n # this code handles empty values differently than other metadata\n # sources. When a staff member deletes a value, that indicates\n # they think it should be empty. This needs to be indicated in the\n # db so that it can overrule other data sources that set a value,\n # unlike other sources which set empty fields to None.\n\n work = self.load_work(flask.request.library, identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n changed = False\n\n staff_data_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)\n primary_identifier = work.presentation_edition.primary_identifier\n staff_edition, is_new = get_one_or_create(\n self._db,\n Edition,\n primary_identifier_id=primary_identifier.id,\n data_source_id=staff_data_source.id,\n )\n self._db.expire(primary_identifier)\n\n new_title = flask.request.form.get(\"title\")\n if new_title and work.title != new_title:\n staff_edition.title = str(new_title)\n changed = True\n\n new_subtitle = flask.request.form.get(\"subtitle\")\n if work.subtitle != new_subtitle:\n if work.subtitle and not new_subtitle:\n new_subtitle = NO_VALUE\n staff_edition.subtitle = str(new_subtitle)\n changed = True\n\n # The form data includes roles and names for contributors in the same order.\n new_contributor_roles = flask.request.form.getlist(\"contributor-role\")\n new_contributor_names = [\n str(n) for n in flask.request.form.getlist(\"contributor-name\")\n ]\n # The first author in the form is considered the primary author, even\n # though there's no separate MARC code for that.\n for i, role in enumerate(new_contributor_roles):\n if role == Contributor.AUTHOR_ROLE:\n new_contributor_roles[i] = Contributor.PRIMARY_AUTHOR_ROLE\n break\n roles_and_names = list(zip(new_contributor_roles, new_contributor_names))\n\n # Remove any contributions that weren't in the form, and remove contributions\n # that already exist from the list so they won't be added again.\n deleted_contributions = False\n for contribution in staff_edition.contributions:\n if (\n contribution.role,\n contribution.contributor.display_name,\n ) not in roles_and_names:\n self._db.delete(contribution)\n deleted_contributions = True\n changed = True\n else:\n roles_and_names.remove(\n (contribution.role, contribution.contributor.display_name)\n )\n if deleted_contributions:\n # Ensure the staff edition's contributions are up-to-date when\n # calculating the presentation edition later.\n self._db.refresh(staff_edition)\n\n # Any remaining roles and names are new contributions.\n for role, name in roles_and_names:\n # There may be one extra role at the end from the input for\n # adding a contributor, in which case it will have no\n # corresponding name and can be ignored.\n if name:\n if role not in list(Contributor.MARC_ROLE_CODES.keys()):\n self._db.rollback()\n return UNKNOWN_ROLE.detailed(\n _(\n \"Role %(role)s is not one of the known contributor roles.\",\n role=role,\n )\n )\n contributor = staff_edition.add_contributor(name=name, roles=[role])\n contributor.display_name = name\n changed = True\n\n new_series = flask.request.form.get(\"series\")\n if work.series != new_series:\n if work.series and not new_series:\n new_series = NO_VALUE\n staff_edition.series = str(new_series)\n changed = True\n\n new_series_position = flask.request.form.get(\"series_position\")\n if new_series_position != None and new_series_position != \"\":\n try:\n new_series_position = int(new_series_position)\n except ValueError:\n self._db.rollback()\n return INVALID_SERIES_POSITION\n else:\n new_series_position = None\n if work.series_position != new_series_position:\n if work.series_position and new_series_position == None:\n new_series_position = NO_NUMBER\n staff_edition.series_position = new_series_position\n changed = True\n\n new_medium = flask.request.form.get(\"medium\")\n if new_medium:\n if new_medium not in list(Edition.medium_to_additional_type.keys()):\n self._db.rollback()\n return UNKNOWN_MEDIUM.detailed(\n _(\n \"Medium %(medium)s is not one of the known media.\",\n medium=new_medium,\n )\n )\n staff_edition.medium = new_medium\n changed = True\n\n new_language = flask.request.form.get(\"language\")\n if new_language != None and new_language != \"\":\n new_language = LanguageCodes.string_to_alpha_3(new_language)\n if not new_language:\n self._db.rollback()\n return UNKNOWN_LANGUAGE\n else:\n new_language = None\n if new_language != staff_edition.language:\n staff_edition.language = new_language\n changed = True\n\n new_publisher = flask.request.form.get(\"publisher\")\n if new_publisher != staff_edition.publisher:\n if staff_edition.publisher and not new_publisher:\n new_publisher = NO_VALUE\n staff_edition.publisher = str(new_publisher)\n changed = True\n\n new_imprint = flask.request.form.get(\"imprint\")\n if new_imprint != staff_edition.imprint:\n if staff_edition.imprint and not new_imprint:\n new_imprint = NO_VALUE\n staff_edition.imprint = str(new_imprint)\n changed = True\n\n new_issued = flask.request.form.get(\"issued\")\n if new_issued != None and new_issued != \"\":\n try:\n new_issued = strptime_utc(new_issued, \"%Y-%m-%d\")\n except ValueError:\n self._db.rollback()\n return INVALID_DATE_FORMAT\n else:\n new_issued = None\n if new_issued != staff_edition.issued:\n staff_edition.issued = new_issued\n changed = True\n\n # TODO: This lets library staff add a 1-5 rating, which is used in the\n # quality calculation. However, this doesn't work well if there are any\n # other measurements that contribute to the quality. The form will show\n # the calculated quality rather than the staff rating, which will be\n # confusing. It might also be useful to make it more clear how this\n # relates to the quality threshold in the library settings.\n changed_rating = False\n new_rating = flask.request.form.get(\"rating\")\n if new_rating != None and new_rating != \"\":\n try:\n new_rating = float(new_rating)\n except ValueError:\n self._db.rollback()\n return INVALID_RATING\n scale = Measurement.RATING_SCALES[DataSource.LIBRARY_STAFF]\n if new_rating < scale[0] or new_rating > scale[1]:\n self._db.rollback()\n return INVALID_RATING.detailed(\n _(\n \"The rating must be a number between %(low)s and %(high)s.\",\n low=scale[0],\n high=scale[1],\n )\n )\n if (new_rating - scale[0]) / (scale[1] - scale[0]) != work.quality:\n primary_identifier.add_measurement(\n staff_data_source,\n Measurement.RATING,\n new_rating,\n weight=WorkController.STAFF_WEIGHT,\n )\n changed = True\n changed_rating = True\n\n changed_summary = False\n new_summary = flask.request.form.get(\"summary\") or \"\"\n if new_summary != work.summary_text:\n old_summary = None\n if work.summary and work.summary.data_source == staff_data_source:\n old_summary = work.summary\n\n work.presentation_edition.primary_identifier.add_link(\n Hyperlink.DESCRIPTION, None, staff_data_source, content=new_summary\n )\n\n # Delete previous staff summary\n if old_summary:\n for link in old_summary.links:\n self._db.delete(link)\n self._db.delete(old_summary)\n\n changed = True\n changed_summary = True\n\n if changed:\n # Even if the presentation doesn't visibly change, we want\n # to regenerate the OPDS entries and update the search\n # index for the work, because that might be the 'real'\n # problem the user is trying to fix.\n policy = PresentationCalculationPolicy(\n classify=True,\n regenerate_opds_entries=True,\n regenerate_marc_record=True,\n update_search_index=True,\n calculate_quality=changed_rating,\n choose_summary=changed_summary,\n )\n work.calculate_presentation(policy=policy)\n\n return Response(\"\", 200)\n\n def suppress(self, identifier_type, identifier):\n \"\"\"Suppress the license pool associated with a book.\"\"\"\n self.require_librarian(flask.request.library)\n\n # Turn source + identifier into a LicensePool\n pools = self.load_licensepools(\n flask.request.library, identifier_type, identifier\n )\n if isinstance(pools, ProblemDetail):\n # Something went wrong.\n return pools\n\n # Assume that the Work is being suppressed from the catalog, and\n # not just the LicensePool.\n # TODO: Suppress individual LicensePools when it's not that deep.\n for pool in pools:\n pool.suppressed = True\n return Response(\"\", 200)\n\n def unsuppress(self, identifier_type, identifier):\n \"\"\"Unsuppress all license pools associated with a book.\n\n TODO: This will need to be revisited when we distinguish\n between complaints about a work and complaints about a\n LicensePoool.\n \"\"\"\n self.require_librarian(flask.request.library)\n\n # Turn source + identifier into a group of LicensePools\n pools = self.load_licensepools(\n flask.request.library, identifier_type, identifier\n )\n if isinstance(pools, ProblemDetail):\n # Something went wrong.\n return pools\n\n # Unsuppress each pool.\n for pool in pools:\n pool.suppressed = False\n return Response(\"\", 200)\n\n def refresh_metadata(self, identifier_type, identifier, provider=None):\n \"\"\"Refresh the metadata for a book from the content server\"\"\"\n self.require_librarian(flask.request.library)\n\n work = self.load_work(flask.request.library, identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n if not provider and work.license_pools:\n try:\n provider = MetadataWranglerCollectionRegistrar(\n work.license_pools[0].collection\n )\n except CannotLoadConfiguration:\n return METADATA_REFRESH_FAILURE\n\n identifier = work.presentation_edition.primary_identifier\n try:\n record = provider.ensure_coverage(identifier, force=True)\n except Exception:\n # The coverage provider may raise an HTTPIntegrationException.\n return REMOTE_INTEGRATION_FAILED\n\n if record.exception:\n # There was a coverage failure.\n if str(record.exception).startswith(\"201\") or str(\n record.exception\n ).startswith(\"202\"):\n # A 201/202 error means it's never looked up this work before\n # so it's started the resolution process or looking for sources.\n return METADATA_REFRESH_PENDING\n # Otherwise, it just doesn't know anything.\n return METADATA_REFRESH_FAILURE\n\n return Response(\"\", 200)\n\n def resolve_complaints(self, identifier_type, identifier):\n \"\"\"Resolve all complaints for a particular license pool and complaint type.\"\"\"\n self.require_librarian(flask.request.library)\n\n work = self.load_work(flask.request.library, identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n resolved = False\n found = False\n\n requested_type = flask.request.form.get(\"type\")\n if requested_type:\n for complaint in work.complaints:\n if complaint.type == requested_type:\n found = True\n if complaint.resolved == None:\n complaint.resolve()\n resolved = True\n\n if not found:\n return UNRECOGNIZED_COMPLAINT\n elif not resolved:\n return COMPLAINT_ALREADY_RESOLVED\n return Response(\"\", 200)\n\n def classifications(self, identifier_type, identifier):\n \"\"\"Return list of this work's classifications.\"\"\"\n self.require_librarian(flask.request.library)\n\n work = self.load_work(flask.request.library, identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n identifier_id = work.presentation_edition.primary_identifier.id\n results = (\n self._db.query(Classification)\n .join(Subject)\n .join(DataSource)\n .filter(Classification.identifier_id == identifier_id)\n .order_by(Classification.weight.desc())\n .all()\n )\n\n data = []\n for result in results:\n data.append(\n dict(\n {\n \"type\": result.subject.type,\n \"name\": result.subject.identifier,\n \"source\": result.data_source.name,\n \"weight\": result.weight,\n }\n )\n )\n\n return dict(\n {\n \"book\": {\"identifier_type\": identifier_type, \"identifier\": identifier},\n \"classifications\": data,\n }\n )\n\n def edit_classifications(self, identifier_type, identifier):\n \"\"\"Edit a work's audience, target age, fiction status, and genres.\"\"\"\n self.require_librarian(flask.request.library)\n\n work = self.load_work(flask.request.library, identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n staff_data_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)\n\n # Previous staff classifications\n primary_identifier = work.presentation_edition.primary_identifier\n old_classifications = (\n self._db.query(Classification)\n .join(Subject)\n .filter(\n Classification.identifier == primary_identifier,\n Classification.data_source == staff_data_source,\n )\n )\n old_genre_classifications = old_classifications.filter(Subject.genre_id != None)\n old_staff_genres = [\n c.subject.genre.name for c in old_genre_classifications if c.subject.genre\n ]\n old_computed_genres = [work_genre.genre.name for work_genre in work.work_genres]\n\n # New genres should be compared to previously computed genres\n new_genres = flask.request.form.getlist(\"genres\")\n genres_changed = sorted(new_genres) != sorted(old_computed_genres)\n\n # Update audience\n new_audience = flask.request.form.get(\"audience\")\n if new_audience != work.audience:\n # Delete all previous staff audience classifications\n for c in old_classifications:\n if c.subject.type == Subject.FREEFORM_AUDIENCE:\n self._db.delete(c)\n\n # Create a new classification with a high weight\n primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.FREEFORM_AUDIENCE,\n subject_identifier=new_audience,\n weight=WorkController.STAFF_WEIGHT,\n )\n\n # Update target age if present\n new_target_age_min = flask.request.form.get(\"target_age_min\")\n new_target_age_min = int(new_target_age_min) if new_target_age_min else None\n new_target_age_max = flask.request.form.get(\"target_age_max\")\n new_target_age_max = int(new_target_age_max) if new_target_age_max else None\n if (\n new_target_age_max is not None\n and new_target_age_min is not None\n and new_target_age_max < new_target_age_min\n ):\n return INVALID_EDIT.detailed(\n _(\"Minimum target age must be less than maximum target age.\")\n )\n\n if work.target_age:\n old_target_age_min = work.target_age.lower\n old_target_age_max = work.target_age.upper\n else:\n old_target_age_min = None\n old_target_age_max = None\n if (\n new_target_age_min != old_target_age_min\n or new_target_age_max != old_target_age_max\n ):\n # Delete all previous staff target age classifications\n for c in old_classifications:\n if c.subject.type == Subject.AGE_RANGE:\n self._db.delete(c)\n\n # Create a new classification with a high weight - higher than audience\n if new_target_age_min and new_target_age_max:\n age_range_identifier = \"%s-%s\" % (\n new_target_age_min,\n new_target_age_max,\n )\n primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.AGE_RANGE,\n subject_identifier=age_range_identifier,\n weight=WorkController.STAFF_WEIGHT * 100,\n )\n\n # Update fiction status\n # If fiction status hasn't changed but genres have changed,\n # we still want to ensure that there's a staff classification\n new_fiction = True if flask.request.form.get(\"fiction\") == \"fiction\" else False\n if new_fiction != work.fiction or genres_changed:\n # Delete previous staff fiction classifications\n for c in old_classifications:\n if c.subject.type == Subject.SIMPLIFIED_FICTION_STATUS:\n self._db.delete(c)\n\n # Create a new classification with a high weight (higher than genre)\n fiction_term = \"Fiction\" if new_fiction else \"Nonfiction\"\n classification = primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.SIMPLIFIED_FICTION_STATUS,\n subject_identifier=fiction_term,\n weight=WorkController.STAFF_WEIGHT,\n )\n classification.subject.fiction = new_fiction\n\n # Update genres\n # make sure all new genres are legit\n for name in new_genres:\n genre, is_new = Genre.lookup(self._db, name)\n if not isinstance(genre, Genre):\n return GENRE_NOT_FOUND\n if (\n genres[name].is_fiction is not None\n and genres[name].is_fiction != new_fiction\n ):\n return INCOMPATIBLE_GENRE\n if name == \"Erotica\" and new_audience != \"Adults Only\":\n return EROTICA_FOR_ADULTS_ONLY\n\n if genres_changed:\n # delete existing staff classifications for genres that aren't being kept\n for c in old_genre_classifications:\n if c.subject.genre.name not in new_genres:\n self._db.delete(c)\n\n # add new staff classifications for new genres\n for genre in new_genres:\n if genre not in old_staff_genres:\n classification = primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.SIMPLIFIED_GENRE,\n subject_identifier=genre,\n weight=WorkController.STAFF_WEIGHT,\n )\n\n # add NONE genre classification if we aren't keeping any genres\n if len(new_genres) == 0:\n primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.SIMPLIFIED_GENRE,\n subject_identifier=SimplifiedGenreClassifier.NONE,\n weight=WorkController.STAFF_WEIGHT,\n )\n else:\n # otherwise delete existing NONE genre classification\n none_classifications = (\n self._db.query(Classification)\n .join(Subject)\n .filter(\n Classification.identifier == primary_identifier,\n Subject.identifier == SimplifiedGenreClassifier.NONE,\n )\n .all()\n )\n for c in none_classifications:\n self._db.delete(c)\n\n # Update presentation\n policy = PresentationCalculationPolicy(\n classify=True,\n regenerate_opds_entries=True,\n regenerate_marc_record=True,\n update_search_index=True,\n )\n work.calculate_presentation(policy=policy)\n\n return Response(\"\", 200)\n\n MINIMUM_COVER_WIDTH = 600\n MINIMUM_COVER_HEIGHT = 900\n TOP = \"top\"\n CENTER = \"center\"\n BOTTOM = \"bottom\"\n TITLE_POSITIONS = [TOP, CENTER, BOTTOM]\n\n def _validate_cover_image(self, image):\n image_width, image_height = image.size\n if (\n image_width < self.MINIMUM_COVER_WIDTH\n or image_height < self.MINIMUM_COVER_HEIGHT\n ):\n return INVALID_IMAGE.detailed(\n _(\n \"Cover image must be at least %(width)spx in width and %(height)spx in height.\",\n width=self.MINIMUM_COVER_WIDTH,\n height=self.MINIMUM_COVER_HEIGHT,\n )\n )\n return True\n\n def _process_cover_image(self, work, image, title_position):\n title = work.presentation_edition.title\n author = work.presentation_edition.author\n if author == Edition.UNKNOWN_AUTHOR:\n author = \"\"\n\n if title_position in self.TITLE_POSITIONS:\n # Convert image to 'RGB' mode if it's not already, so drawing on it works.\n if image.mode != \"RGB\":\n image = image.convert(\"RGB\")\n\n draw = ImageDraw.Draw(image)\n image_width, image_height = image.size\n\n admin_dir = os.path.dirname(os.path.split(__file__)[0])\n package_dir = os.path.join(admin_dir, \"../..\")\n bold_font_path = os.path.join(package_dir, \"resources/OpenSans-Bold.ttf\")\n regular_font_path = os.path.join(\n package_dir, \"resources/OpenSans-Regular.ttf\"\n )\n font_size = image_width // 20\n bold_font = ImageFont.truetype(bold_font_path, font_size)\n regular_font = ImageFont.truetype(regular_font_path, font_size)\n\n padding = image_width / 40\n\n max_line_width = 0\n bold_char_width = bold_font.getsize(\"n\")[0]\n bold_char_count = image_width / bold_char_width\n regular_char_width = regular_font.getsize(\"n\")[0]\n regular_char_count = image_width / regular_char_width\n title_lines = textwrap.wrap(title, bold_char_count)\n author_lines = textwrap.wrap(author, regular_char_count)\n for lines, font in [(title_lines, bold_font), (author_lines, regular_font)]:\n for line in lines:\n line_width, ignore = font.getsize(line)\n if line_width > max_line_width:\n max_line_width = line_width\n\n ascent, descent = bold_font.getmetrics()\n line_height = ascent + descent\n\n total_text_height = line_height * (len(title_lines) + len(author_lines))\n rectangle_height = total_text_height + line_height\n\n rectangle_width = max_line_width + 2 * padding\n\n start_x = (image_width - rectangle_width) / 2\n if title_position == self.BOTTOM:\n start_y = image_height - rectangle_height - image_height / 14\n elif title_position == self.CENTER:\n start_y = (image_height - rectangle_height) / 2\n else:\n start_y = image_height / 14\n\n draw.rectangle(\n [\n (start_x, start_y),\n (start_x + rectangle_width, start_y + rectangle_height),\n ],\n fill=(255, 255, 255, 255),\n )\n\n current_y = start_y + line_height / 2\n for lines, font in [(title_lines, bold_font), (author_lines, regular_font)]:\n for line in lines:\n line_width, ignore = font.getsize(line)\n draw.text(\n (start_x + (rectangle_width - line_width) / 2, current_y),\n line,\n font=font,\n fill=(0, 0, 0, 255),\n )\n current_y += line_height\n\n del draw\n\n return image\n\n def preview_book_cover(self, identifier_type, identifier):\n \"\"\"Return a preview of the submitted cover image information.\"\"\"\n self.require_librarian(flask.request.library)\n work = self.load_work(flask.request.library, identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n image = self.generate_cover_image(work, identifier_type, identifier, True)\n if isinstance(image, ProblemDetail):\n return image\n\n buffer = BytesIO()\n image.save(buffer, format=\"PNG\")\n b64 = base64.b64encode(buffer.getvalue())\n value = \"data:image/png;base64,%s\" % b64\n\n return Response(value, 200)\n\n def generate_cover_image(self, work, identifier_type, identifier, preview=False):\n image_file = flask.request.files.get(\"cover_file\")\n image_url = flask.request.form.get(\"cover_url\")\n if not image_file and not image_url:\n return INVALID_IMAGE.detailed(_(\"Image file or image URL is required.\"))\n elif image_url and not Validator()._is_url(image_url, []):\n return INVALID_URL.detailed(\n _('\"%(url)s\" is not a valid URL.', url=image_url)\n )\n\n title_position = flask.request.form.get(\"title_position\")\n if image_url and not image_file:\n image_file = BytesIO(urllib.request.urlopen(image_url).read())\n\n image = Image.open(image_file)\n result = self._validate_cover_image(image)\n if isinstance(result, ProblemDetail):\n return result\n\n if preview:\n image = self._title_position(work, image)\n\n return image\n\n def _title_position(self, work, image):\n title_position = flask.request.form.get(\"title_position\")\n if title_position and title_position in self.TITLE_POSITIONS:\n return self._process_cover_image(work, image, title_position)\n return image\n\n def _original_cover_info(\n self, image, work, data_source, rights_uri, rights_explanation\n ):\n original, derivation_settings, cover_href = None, None, None\n cover_rights_explanation = rights_explanation\n title_position = flask.request.form.get(\"title_position\")\n cover_url = flask.request.form.get(\"cover_url\")\n if title_position in self.TITLE_POSITIONS:\n original_href = cover_url\n original_buffer = BytesIO()\n image.save(original_buffer, format=\"PNG\")\n original_content = original_buffer.getvalue()\n if not original_href:\n original_href = Hyperlink.generic_uri(\n data_source,\n work.presentation_edition.primary_identifier,\n Hyperlink.IMAGE,\n content=original_content,\n )\n\n image = self._process_cover_image(work, image, title_position)\n\n original_rights_explanation = None\n if rights_uri != RightsStatus.IN_COPYRIGHT:\n original_rights_explanation = rights_explanation\n original = LinkData(\n Hyperlink.IMAGE,\n original_href,\n rights_uri=rights_uri,\n rights_explanation=original_rights_explanation,\n content=original_content,\n )\n derivation_settings = dict(title_position=title_position)\n if rights_uri in RightsStatus.ALLOWS_DERIVATIVES:\n cover_rights_explanation = (\n \"The original image license allows derivatives.\"\n )\n else:\n cover_href = cover_url\n\n return original, derivation_settings, cover_href, cover_rights_explanation\n\n def _get_collection_from_pools(self, identifier_type, identifier):\n pools = self.load_licensepools(\n flask.request.library, identifier_type, identifier\n )\n if isinstance(pools, ProblemDetail):\n return pools\n if not pools:\n return NO_LICENSES\n collection = pools[0].collection\n return collection\n\n def change_book_cover(self, identifier_type, identifier, mirrors=None):\n \"\"\"Save a new book cover based on the submitted form.\"\"\"\n self.require_librarian(flask.request.library)\n\n data_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)\n\n work = self.load_work(flask.request.library, identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n rights_uri = flask.request.form.get(\"rights_status\")\n rights_explanation = flask.request.form.get(\"rights_explanation\")\n\n if not rights_uri:\n return INVALID_IMAGE.detailed(_(\"You must specify the image's license.\"))\n\n collection = self._get_collection_from_pools(identifier_type, identifier)\n if isinstance(collection, ProblemDetail):\n return collection\n\n # Look for an appropriate mirror to store this cover image. Since the\n # mirror should be used for covers, we don't need a mirror for books.\n mirrors = mirrors or dict(\n covers_mirror=MirrorUploader.for_collection(\n collection, ExternalIntegrationLink.COVERS\n ),\n books_mirror=None,\n )\n if not mirrors.get(ExternalIntegrationLink.COVERS):\n return INVALID_CONFIGURATION_OPTION.detailed(\n _(\"Could not find a storage integration for uploading the cover.\")\n )\n\n image = self.generate_cover_image(work, identifier_type, identifier)\n if isinstance(image, ProblemDetail):\n return image\n\n (\n original,\n derivation_settings,\n cover_href,\n cover_rights_explanation,\n ) = self._original_cover_info(\n image, work, data_source, rights_uri, rights_explanation\n )\n\n buffer = BytesIO()\n image.save(buffer, format=\"PNG\")\n content = buffer.getvalue()\n\n if not cover_href:\n cover_href = Hyperlink.generic_uri(\n data_source,\n work.presentation_edition.primary_identifier,\n Hyperlink.IMAGE,\n content=content,\n )\n\n cover_data = LinkData(\n Hyperlink.IMAGE,\n href=cover_href,\n media_type=Representation.PNG_MEDIA_TYPE,\n content=content,\n rights_uri=rights_uri,\n rights_explanation=cover_rights_explanation,\n original=original,\n transformation_settings=derivation_settings,\n )\n\n presentation_policy = PresentationCalculationPolicy(\n choose_edition=False,\n set_edition_metadata=False,\n classify=False,\n choose_summary=False,\n calculate_quality=False,\n choose_cover=True,\n regenerate_opds_entries=True,\n regenerate_marc_record=True,\n update_search_index=False,\n )\n\n replacement_policy = ReplacementPolicy(\n links=True,\n # link_content is false because we already have the content.\n # We don't want the metadata layer to try to fetch it again.\n link_content=False,\n mirrors=mirrors,\n presentation_calculation_policy=presentation_policy,\n )\n\n metadata = Metadata(data_source, links=[cover_data])\n metadata.apply(\n work.presentation_edition, collection, replace=replacement_policy\n )\n\n # metadata.apply only updates the edition, so we also need\n # to update the work.\n work.calculate_presentation(policy=presentation_policy)\n\n return Response(_(\"Success\"), 200)\n\n def _count_complaints_for_work(self, work):\n complaint_types = [\n complaint.type for complaint in work.complaints if not complaint.resolved\n ]\n return Counter(complaint_types)\n\n def custom_lists(self, identifier_type, identifier):\n self.require_librarian(flask.request.library)\n\n library = flask.request.library\n work = self.load_work(library, identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n staff_data_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)\n\n if flask.request.method == \"GET\":\n lists = []\n for entry in work.custom_list_entries:\n list = entry.customlist\n lists.append(dict(id=list.id, name=list.name))\n return dict(custom_lists=lists)\n\n if flask.request.method == \"POST\":\n lists = flask.request.form.get(\"lists\")\n if lists:\n lists = json.loads(lists)\n else:\n lists = []\n\n affected_lanes = set()\n\n # Remove entries for lists that were not in the submitted form.\n submitted_ids = [l.get(\"id\") for l in lists if l.get(\"id\")]\n for entry in work.custom_list_entries:\n if entry.list_id not in submitted_ids:\n list = entry.customlist\n list.remove_entry(work)\n for lane in Lane.affected_by_customlist(list):\n affected_lanes.add(lane)\n\n # Add entries for any new lists.\n for list_info in lists:\n id = list_info.get(\"id\")\n name = list_info.get(\"name\")\n\n if id:\n is_new = False\n list = get_one(\n self._db,\n CustomList,\n id=int(id),\n name=name,\n library=library,\n data_source=staff_data_source,\n )\n if not list:\n self._db.rollback()\n return MISSING_CUSTOM_LIST.detailed(\n _('Could not find list \"%(list_name)s\"', list_name=name)\n )\n else:\n list, is_new = create(\n self._db,\n CustomList,\n name=name,\n data_source=staff_data_source,\n library=library,\n )\n list.created = utc_now()\n entry, was_new = list.add_entry(work, featured=True)\n if was_new:\n for lane in Lane.affected_by_customlist(list):\n affected_lanes.add(lane)\n\n # If any list changes affected lanes, update their sizes.\n # NOTE: This may not make a difference until the\n # works are actually re-indexed.\n for lane in affected_lanes:\n lane.update_size(self._db, self.search_engine)\n\n return Response(str(_(\"Success\")), 200)\n", "id": "11662104", "language": "Python", "matching_score": 7.43546724319458, "max_stars_count": 0, "path": "api/admin/controller/work_editor.py" }, { "content": "\"\"\"An abstract way of representing incoming metadata and applying it\nto Identifiers and Editions.\n\nThis acts as an intermediary between the third-party integrations\n(which have this information in idiosyncratic formats) and the\nmodel. Doing a third-party integration should be as simple as putting\nthe information into this format.\n\"\"\"\nimport csv\nimport datetime\nimport logging\nimport re\nfrom collections import defaultdict\nfrom typing import Optional\n\nfrom dateutil.parser import parse\nfrom pymarc import MARCReader\nfrom sqlalchemy.orm.session import Session\nfrom sqlalchemy.sql.expression import and_, or_\n\nfrom .analytics import Analytics\nfrom .classifier import NO_NUMBER, NO_VALUE, Classifier\nfrom .model import (\n CirculationEvent,\n Classification,\n Collection,\n Contributor,\n CoverageRecord,\n DataSource,\n DeliveryMechanism,\n Edition,\n Hyperlink,\n Identifier,\n License,\n LicensePool,\n LicensePoolDeliveryMechanism,\n LinkRelations,\n PresentationCalculationPolicy,\n Representation,\n Resource,\n RightsStatus,\n Subject,\n Timestamp,\n get_one,\n get_one_or_create,\n)\nfrom .model.configuration import ExternalIntegrationLink\nfrom .model.licensing import LicenseFunctions, LicenseStatus\nfrom .util import LanguageCodes\nfrom .util.datetime_helpers import strptime_utc, to_utc, utc_now\nfrom .util.http import RemoteIntegrationException\nfrom .util.median import median\nfrom .util.personal_names import display_name_to_sort_name, name_tidy\n\n\nclass ReplacementPolicy(object):\n \"\"\"How serious should we be about overwriting old metadata with\n this new metadata?\n \"\"\"\n\n def __init__(\n self,\n identifiers=False,\n subjects=False,\n contributions=False,\n links=False,\n formats=False,\n rights=False,\n link_content=False,\n mirrors=None,\n content_modifier=None,\n analytics=None,\n http_get=None,\n even_if_not_apparently_updated=False,\n presentation_calculation_policy=None,\n ):\n self.identifiers = identifiers\n self.subjects = subjects\n self.contributions = contributions\n self.links = links\n self.rights = rights\n self.formats = formats\n self.link_content = link_content\n self.even_if_not_apparently_updated = even_if_not_apparently_updated\n self.mirrors = mirrors\n self.content_modifier = content_modifier\n self.analytics = analytics\n self.http_get = http_get\n self.presentation_calculation_policy = (\n presentation_calculation_policy or PresentationCalculationPolicy()\n )\n\n @classmethod\n def from_license_source(cls, _db, **args):\n \"\"\"When gathering data from the license source, overwrite all old data\n from this source with new data from the same source. Also\n overwrite an old rights status with an updated status and update\n the list of available formats. Log availability changes to the\n configured analytics services.\n \"\"\"\n return cls(\n identifiers=True,\n subjects=True,\n contributions=True,\n links=True,\n rights=True,\n formats=True,\n analytics=Analytics(_db),\n **args,\n )\n\n @classmethod\n def from_metadata_source(cls, **args):\n \"\"\"When gathering data from a metadata source, overwrite all old data\n from this source, but do not overwrite the rights status or\n the available formats. License sources are the authority on rights\n and formats, and metadata sources have no say in the matter.\n \"\"\"\n return cls(\n identifiers=True,\n subjects=True,\n contributions=True,\n links=True,\n rights=False,\n formats=False,\n **args,\n )\n\n @classmethod\n def append_only(cls, **args):\n \"\"\"Don't overwrite any information, just append it.\n\n This should probably never be used.\n \"\"\"\n return cls(\n identifiers=False,\n subjects=False,\n contributions=False,\n links=False,\n rights=False,\n formats=False,\n **args,\n )\n\n\nclass SubjectData(object):\n def __init__(self, type, identifier, name=None, weight=1):\n self.type = type\n\n # Because subjects are sometimes evaluated according to keyword\n # matching, it's important that any leading or trailing white\n # space is removed during import.\n self.identifier = identifier\n if identifier:\n self.identifier = identifier.strip()\n\n self.name = name\n if name:\n self.name = name.strip()\n\n self.weight = weight\n\n @property\n def key(self):\n return self.type, self.identifier, self.name, self.weight\n\n def __repr__(self):\n return '<SubjectData type=\"%s\" identifier=\"%s\" name=\"%s\" weight=%d>' % (\n self.type,\n self.identifier,\n self.name,\n self.weight,\n )\n\n\nclass ContributorData(object):\n def __init__(\n self,\n sort_name=None,\n display_name=None,\n family_name=None,\n wikipedia_name=None,\n roles=None,\n lc=None,\n viaf=None,\n biography=None,\n aliases=None,\n extra=None,\n ):\n self.sort_name = sort_name\n self.display_name = display_name\n self.family_name = family_name\n self.wikipedia_name = wikipedia_name\n if roles is None:\n roles = Contributor.AUTHOR_ROLE\n if not isinstance(roles, list):\n roles = [roles]\n self.roles = roles\n self.lc = lc\n self.viaf = viaf\n self.biography = biography\n self.aliases = aliases or []\n # extra is a dictionary of stuff like birthdates\n self.extra = extra or dict()\n # TODO: consider if it's time for ContributorData to connect back to Contributions\n\n def __repr__(self):\n return '<ContributorData sort=\"%s\" display=\"%s\" family=\"%s\" wiki=\"%s\" roles=%r lc=%s viaf=%s>' % (\n self.sort_name,\n self.display_name,\n self.family_name,\n self.wikipedia_name,\n self.roles,\n self.lc,\n self.viaf,\n )\n\n @classmethod\n def from_contribution(cls, contribution):\n \"\"\"Create a ContributorData object from a data-model Contribution\n object.\n \"\"\"\n c = contribution.contributor\n return cls(\n sort_name=c.sort_name,\n display_name=c.display_name,\n family_name=c.family_name,\n wikipedia_name=c.wikipedia_name,\n lc=c.lc,\n viaf=c.viaf,\n biography=c.biography,\n aliases=c.aliases,\n roles=[contribution.role],\n )\n\n @classmethod\n def lookup(cls, _db, sort_name=None, display_name=None, lc=None, viaf=None):\n \"\"\"Create a (potentially synthetic) ContributorData based on\n the best available information in the database.\n\n :return: A ContributorData.\n \"\"\"\n clauses = []\n if sort_name:\n clauses.append(Contributor.sort_name == sort_name)\n if display_name:\n clauses.append(Contributor.display_name == display_name)\n if lc:\n clauses.append(Contributor.lc == lc)\n if viaf:\n clauses.append(Contributor.viaf == viaf)\n\n if not clauses:\n raise ValueError(\"No Contributor information provided!\")\n\n or_clause = or_(*clauses)\n contributors = _db.query(Contributor).filter(or_clause).all()\n if len(contributors) == 0:\n # We have no idea who this person is.\n return None\n\n # We found at least one matching Contributor. Let's try to\n # build a composite ContributorData for the person.\n values_by_field = defaultdict(set)\n\n # If all the people we found share (e.g.) a VIAF field, then\n # we can use that as a clue when doing a search -- anyone with\n # that VIAF number is probably this person, even if their display\n # name doesn't match.\n for c in contributors:\n if c.sort_name:\n values_by_field[\"sort_name\"].add(c.sort_name)\n if c.display_name:\n values_by_field[\"display_name\"].add(c.display_name)\n if c.lc:\n values_by_field[\"lc\"].add(c.lc)\n if c.viaf:\n values_by_field[\"viaf\"].add(c.viaf)\n\n # Use any passed-in values as default values for the\n # ContributorData. Below, missing values may be filled in and\n # inaccurate values may be replaced.\n kwargs = dict(sort_name=sort_name, display_name=display_name, lc=lc, viaf=viaf)\n for k, values in list(values_by_field.items()):\n if len(values) == 1:\n # All the Contributors we found have the same\n # value for this field. We can use it.\n kwargs[k] = list(values)[0]\n\n return ContributorData(roles=[], **kwargs)\n\n def apply(self, destination, replace=None):\n \"\"\"Update the passed-in Contributor-type object with this\n ContributorData's information.\n\n :param: destination -- the Contributor or ContributorData object to\n write this ContributorData object's metadata to.\n :param: replace -- Replacement policy (not currently used).\n\n :return: the possibly changed Contributor object and a flag of whether it's been changed.\n \"\"\"\n log = logging.getLogger(\"Abstract metadata layer\")\n log.debug(\n \"Applying %r (%s) into %r (%s)\",\n self,\n self.viaf,\n destination,\n destination.viaf,\n )\n\n made_changes = False\n\n if self.sort_name and self.sort_name != destination.sort_name:\n destination.sort_name = self.sort_name\n made_changes = True\n\n existing_aliases = set(destination.aliases)\n new_aliases = list(destination.aliases)\n for name in [self.sort_name] + self.aliases:\n if name != destination.sort_name and name not in existing_aliases:\n new_aliases.append(name)\n made_changes = True\n if new_aliases != destination.aliases:\n destination.aliases = new_aliases\n made_changes = True\n\n for k, v in list(self.extra.items()):\n if not k in destination.extra:\n destination.extra[k] = v\n\n if self.lc and self.lc != destination.lc:\n destination.lc = self.lc\n made_changes = True\n if self.viaf and self.viaf != destination.viaf:\n destination.viaf = self.viaf\n made_changes = True\n if self.family_name and self.family_name != destination.family_name:\n destination.family_name = self.family_name\n made_changes = True\n if self.display_name and self.display_name != destination.display_name:\n destination.display_name = self.display_name\n made_changes = True\n if self.wikipedia_name and self.wikipedia_name != destination.wikipedia_name:\n destination.wikipedia_name = self.wikipedia_name\n made_changes = True\n\n if self.biography and self.biography != destination.biography:\n destination.biography = self.biography\n made_changes = True\n\n # TODO: Contributor.merge_into also looks at\n # contributions. Could maybe extract contributions from roles,\n # but not sure it'd be useful.\n\n return destination, made_changes\n\n def find_sort_name(self, _db, identifiers, metadata_client):\n\n \"\"\"Try as hard as possible to find this person's sort name.\"\"\"\n log = logging.getLogger(\"Abstract metadata layer\")\n if self.sort_name:\n # log.debug(\n # \"%s already has a sort name: %s\",\n # self.display_name,\n # self.sort_name\n # )\n return True\n\n if not self.display_name:\n raise ValueError(\n \"Cannot find sort name for a contributor with no display name!\"\n )\n\n # Is there a contributor already in the database with this\n # exact sort name? If so, use their display name.\n # If not, take our best guess based on the display name.\n sort_name = self.display_name_to_sort_name_from_existing_contributor(\n _db, self.display_name\n )\n if sort_name:\n self.sort_name = sort_name\n return True\n\n # Time to break out the big guns. Ask the metadata wrangler\n # if it can find a sort name for this display name.\n if metadata_client:\n try:\n sort_name = self.display_name_to_sort_name_through_canonicalizer(\n _db, identifiers, metadata_client\n )\n except RemoteIntegrationException as e:\n # There was some kind of problem with the metadata\n # wrangler. Act as though no metadata wrangler had\n # been provided.\n log = logging.getLogger(\"Abstract metadata layer\")\n log.error(\n \"Metadata client exception while determining sort name for %s\",\n self.display_name,\n exc_info=e,\n )\n if sort_name:\n self.sort_name = sort_name\n return True\n\n # If there's still no sort name, take our best guess based\n # on the display name.\n self.sort_name = display_name_to_sort_name(self.display_name)\n\n return self.sort_name is not None\n\n @classmethod\n def display_name_to_sort_name_from_existing_contributor(self, _db, display_name):\n \"\"\"Find the sort name for this book's author, assuming it's easy.\n\n 'Easy' means we already have an established sort name for a\n Contributor with this exact display name.\n\n If it's not easy, this will be taken care of later with a call to\n the metadata wrangler's author canonicalization service.\n\n If we have a copy of this book in our collection (the only\n time an external list item is relevant), this will probably be\n easy.\n \"\"\"\n contributors = (\n _db.query(Contributor)\n .filter(Contributor.display_name == display_name)\n .filter(Contributor.sort_name != None)\n .all()\n )\n if contributors:\n log = logging.getLogger(\"Abstract metadata layer\")\n log.debug(\n \"Determined that sort name of %s is %s based on previously existing contributor\",\n display_name,\n contributors[0].sort_name,\n )\n return contributors[0].sort_name\n return None\n\n def _display_name_to_sort_name(self, _db, metadata_client, identifier_obj):\n response = metadata_client.canonicalize_author_name(\n identifier_obj, self.display_name\n )\n sort_name = None\n\n if isinstance(response, (bytes, str)):\n sort_name = response\n else:\n log = logging.getLogger(\"Abstract metadata layer\")\n if response.status_code == 200 and response.headers[\n \"Content-Type\"\n ].startswith(\"text/plain\"):\n sort_name = response.content\n log.info(\n \"Canonicalizer found sort name for %r: %s => %s\",\n identifier_obj,\n self.display_name,\n sort_name,\n )\n else:\n log.warning(\n \"Canonicalizer could not find sort name for %r/%s\",\n identifier_obj,\n self.display_name,\n )\n return sort_name\n\n def display_name_to_sort_name_through_canonicalizer(\n self, _db, identifiers, metadata_client\n ):\n sort_name = None\n for identifier in identifiers:\n if identifier.type != Identifier.ISBN:\n continue\n identifier_obj, ignore = identifier.load(_db)\n sort_name = self._display_name_to_sort_name(\n _db, metadata_client, identifier_obj\n )\n if sort_name:\n break\n\n if not sort_name:\n sort_name = self._display_name_to_sort_name(_db, metadata_client, None)\n return sort_name\n\n\nclass IdentifierData(object):\n def __init__(self, type, identifier, weight=1):\n self.type = type\n self.weight = weight\n self.identifier = identifier\n\n def __repr__(self):\n return '<IdentifierData type=\"%s\" identifier=\"%s\" weight=\"%s\">' % (\n self.type,\n self.identifier,\n self.weight,\n )\n\n def load(self, _db):\n return Identifier.for_foreign_id(_db, self.type, self.identifier)\n\n\nclass LinkData(object):\n def __init__(\n self,\n rel,\n href=None,\n media_type=None,\n content=None,\n thumbnail=None,\n rights_uri=None,\n rights_explanation=None,\n original=None,\n transformation_settings=None,\n ):\n if not rel:\n raise ValueError(\"rel is required\")\n\n if not href and not content:\n raise ValueError(\"Either href or content is required\")\n self.rel = rel\n self.href = href\n self.media_type = media_type\n self.content = content\n self.thumbnail = thumbnail\n # This handles content sources like unglue.it that have rights for each link\n # rather than each edition, and rights for cover images.\n self.rights_uri = rights_uri\n self.rights_explanation = rights_explanation\n # If this LinkData is a derivative, it may also contain the original link\n # and the settings used to transform the original into the derivative.\n self.original = original\n self.transformation_settings = transformation_settings or {}\n\n @property\n def guessed_media_type(self):\n \"\"\"If the media type of a link is unknown, take a guess.\"\"\"\n if self.media_type:\n # We know.\n return self.media_type\n\n if self.href:\n # Take a guess.\n return Representation.guess_url_media_type_from_path(self.href)\n\n # No idea.\n # TODO: We might be able to take a further guess based on the\n # content and the link relation.\n return None\n\n def __repr__(self):\n if self.content:\n content = \", %d bytes content\" % len(self.content)\n else:\n content = \"\"\n if self.thumbnail:\n thumbnail = \", has thumbnail\"\n else:\n thumbnail = \"\"\n return '<LinkData: rel=\"%s\" href=\"%s\" media_type=%r%s%s>' % (\n self.rel,\n self.href,\n self.media_type,\n thumbnail,\n content,\n )\n\n def mirror_type(self):\n \"\"\"Returns the type of mirror that should be used for the link.\"\"\"\n if self.rel in [Hyperlink.IMAGE, Hyperlink.THUMBNAIL_IMAGE]:\n return ExternalIntegrationLink.COVERS\n\n if self.rel == Hyperlink.OPEN_ACCESS_DOWNLOAD:\n return ExternalIntegrationLink.OPEN_ACCESS_BOOKS\n elif self.rel == Hyperlink.GENERIC_OPDS_ACQUISITION:\n return ExternalIntegrationLink.PROTECTED_ACCESS_BOOKS\n\n\nclass MeasurementData(object):\n def __init__(self, quantity_measured, value, weight=1, taken_at=None):\n if not quantity_measured:\n raise ValueError(\"quantity_measured is required.\")\n if value is None:\n raise ValueError(\"measurement value is required.\")\n self.quantity_measured = quantity_measured\n if not isinstance(value, float) and not isinstance(value, int):\n value = float(value)\n self.value = value\n self.weight = weight\n self.taken_at = taken_at or utc_now()\n\n def __repr__(self):\n return '<MeasurementData quantity=\"%s\" value=%f weight=%d taken=%s>' % (\n self.quantity_measured,\n self.value,\n self.weight,\n self.taken_at,\n )\n\n\nclass FormatData(object):\n def __init__(self, content_type, drm_scheme, link=None, rights_uri=None):\n self.content_type = content_type\n self.drm_scheme = drm_scheme\n if link and not isinstance(link, LinkData):\n raise TypeError(\"Expected LinkData object, got %s\" % type(link))\n self.link = link\n self.rights_uri = rights_uri\n if (not self.rights_uri) and self.link and self.link.rights_uri:\n self.rights_uri = self.link.rights_uri\n\n\nclass LicenseData(LicenseFunctions):\n def __init__(\n self,\n identifier: str,\n checkout_url: str,\n status_url: str,\n status: LicenseStatus,\n checkouts_available: int,\n expires: Optional[datetime.datetime] = None,\n checkouts_left: Optional[int] = None,\n terms_concurrency: Optional[int] = None,\n content_types: Optional[str] = None,\n ):\n self.identifier = identifier\n self.checkout_url = checkout_url\n self.status_url = status_url\n self.status = status\n self.expires = expires\n self.checkouts_left = checkouts_left\n self.checkouts_available = checkouts_available\n self.terms_concurrency = terms_concurrency\n self.content_types = content_types\n\n def add_to_pool(self, db: Session, pool: LicensePool):\n license_obj, _ = get_one_or_create(\n db,\n License,\n identifier=self.identifier,\n license_pool=pool,\n )\n for key, value in vars(self).items():\n if key != \"content_types\":\n setattr(license_obj, key, value)\n return license_obj\n\n\nclass TimestampData(object):\n\n CLEAR_VALUE = Timestamp.CLEAR_VALUE\n\n def __init__(\n self, start=None, finish=None, achievements=None, counter=None, exception=None\n ):\n \"\"\"A constructor intended to be used by a service to customize its\n eventual Timestamp.\n\n service, service_type, and collection cannot be set through\n this constructor, because they are generally not under the\n control of the code that runs the service. They are set\n afterwards, in finalize().\n\n :param start: The time that the service should be considered to\n have started running.\n :param finish: The time that the service should be considered\n to have stopped running.\n :param achievements: A string describing what was achieved by the\n service.\n :param counter: A single integer item of state representing the point\n at which the service left off.\n :param exception: A traceback representing an exception that stopped\n the progress of the service.\n \"\"\"\n\n # These are set by finalize().\n self.service = None\n self.service_type = None\n self.collection_id = None\n\n self.start = start\n self.finish = finish\n self.achievements = achievements\n self.counter = counter\n self.exception = exception\n\n @property\n def is_failure(self):\n \"\"\"Does this TimestampData represent an unrecoverable failure?\"\"\"\n return self.exception not in (None, self.CLEAR_VALUE)\n\n @property\n def is_complete(self):\n \"\"\"Does this TimestampData represent an operation that has\n completed?\n\n An operation is completed if it has failed, or if the time of its\n completion is known.\n \"\"\"\n return self.is_failure or self.finish not in (None, self.CLEAR_VALUE)\n\n def finalize(\n self,\n service,\n service_type,\n collection,\n start=None,\n finish=None,\n achievements=None,\n counter=None,\n exception=None,\n ):\n \"\"\"Finalize any values that were not set during the constructor.\n\n This is intended to be run by the code that originally ran the\n service.\n\n The given values for `start`, `finish`, `achievements`,\n `counter`, and `exception` will be used only if the service\n did not specify its own values for those fields.\n \"\"\"\n self.service = service\n self.service_type = service_type\n if collection is None:\n self.collection_id = None\n else:\n self.collection_id = collection.id\n if self.start is None:\n self.start = start\n if self.finish is None:\n if finish is None:\n finish = utc_now()\n self.finish = finish\n if self.start is None:\n self.start = self.finish\n if self.counter is None:\n self.counter = counter\n if self.exception is None:\n self.exception = exception\n\n def collection(self, _db):\n return get_one(_db, Collection, id=self.collection_id)\n\n def apply(self, _db):\n if any(x is None for x in [self.service, self.service_type]):\n raise ValueError(\n \"Not enough information to write TimestampData to the database.\"\n )\n\n return Timestamp.stamp(\n _db,\n self.service,\n self.service_type,\n self.collection(_db),\n self.start,\n self.finish,\n self.achievements,\n self.counter,\n self.exception,\n )\n\n\nclass MetaToModelUtility(object):\n \"\"\"\n Contains functionality common to both CirculationData and Metadata.\n \"\"\"\n\n log = logging.getLogger(\"Abstract metadata layer - mirror code\")\n\n def mirror_link(self, model_object, data_source, link, link_obj, policy):\n \"\"\"Retrieve a copy of the given link and make sure it gets\n mirrored. If it's a full-size image, create a thumbnail and\n mirror that too.\n\n The model_object can be either a pool or an edition.\n \"\"\"\n if link_obj.rel not in Hyperlink.MIRRORED:\n # we only host locally open-source epubs and cover images\n if link.href:\n # The log message only makes sense if the resource is\n # hosted elsewhere.\n self.log.info(\"Not mirroring %s: rel=%s\", link.href, link_obj.rel)\n return\n\n if link.rights_uri and link.rights_uri == RightsStatus.IN_COPYRIGHT:\n self.log.info(\n \"Not mirroring %s: rights status=%s\" % (link.href, link.rights_uri)\n )\n return\n\n mirror_type = link.mirror_type()\n\n if mirror_type in policy.mirrors:\n mirror = policy.mirrors[mirror_type]\n if not mirror:\n return\n else:\n self.log.info(\"No mirror uploader with key %s found\" % mirror_type)\n return\n\n http_get = policy.http_get\n\n _db = Session.object_session(link_obj)\n original_url = link.href\n\n self.log.info(\"About to mirror %s\" % original_url)\n pools = []\n edition = None\n title = None\n identifier = None\n if model_object:\n if isinstance(model_object, LicensePool):\n pools = [model_object]\n identifier = model_object.identifier\n\n if (\n identifier\n and identifier.primarily_identifies\n and identifier.primarily_identifies[0]\n ):\n edition = identifier.primarily_identifies[0]\n elif isinstance(model_object, Edition):\n pools = model_object.license_pools\n identifier = model_object.primary_identifier\n edition = model_object\n if edition and edition.title:\n title = edition.title\n else:\n title = getattr(self, \"title\", None) or None\n\n if (not identifier) or (\n link_obj.identifier and identifier != link_obj.identifier\n ):\n # insanity found\n self.log.warning(\n \"Tried to mirror a link with an invalid identifier %r\" % identifier\n )\n return\n\n max_age = None\n if policy.link_content:\n # We want to fetch the representation again, even if we\n # already have a recent usable copy. If we fetch it and it\n # hasn't changed, we'll keep using the one we have.\n max_age = 0\n\n # This will fetch a representation of the original and\n # store it in the database.\n representation, is_new = Representation.get(\n _db,\n link.href,\n do_get=http_get,\n presumed_media_type=link.media_type,\n max_age=max_age,\n )\n\n # Make sure the (potentially newly-fetched) representation is\n # associated with the resource.\n link_obj.resource.representation = representation\n\n # If we couldn't fetch this representation, don't mirror it,\n # and if this was an open/protected access link, then suppress the associated\n # license pool until someone fixes it manually.\n # The license pool to suppress will be either the passed-in model_object (if it's of type pool),\n # or the license pool associated with the passed-in model object (if it's of type edition).\n if representation.fetch_exception:\n if pools and link.rel == Hyperlink.OPEN_ACCESS_DOWNLOAD:\n for pool in pools:\n pool.suppressed = True\n pool.license_exception = (\n \"Fetch exception: %s\" % representation.fetch_exception\n )\n self.log.error(pool.license_exception)\n return\n\n # If we fetched the representation and it hasn't changed,\n # the previously mirrored version is fine. Don't mirror it\n # again.\n if representation.status_code == 304 and representation.mirror_url:\n self.log.info(\n \"Representation has not changed, assuming mirror at %s is up to date.\",\n representation.mirror_url,\n )\n return\n\n if representation.status_code // 100 not in (2, 3):\n self.log.info(\n \"Representation %s gave %s status code, not mirroring.\",\n representation.url,\n representation.status_code,\n )\n return\n\n if policy.content_modifier:\n policy.content_modifier(representation)\n\n # The metadata may have some idea about the media type for this\n # LinkObject, but it could be wrong. If the representation we\n # actually just saw is a mirrorable media type, that takes\n # precedence. If we were expecting this link to be mirrorable\n # but we actually saw something that's not, assume our original\n # metadata was right and the server told us the wrong media type.\n if representation.media_type and representation.mirrorable_media_type:\n link.media_type = representation.media_type\n\n if not representation.mirrorable_media_type:\n if link.media_type:\n self.log.info(\n \"Saw unsupported media type for %s: %s. Assuming original media type %s is correct\",\n representation.url,\n representation.media_type,\n link.media_type,\n )\n representation.media_type = link.media_type\n else:\n self.log.info(\n \"Not mirroring %s: unsupported media type %s\",\n representation.url,\n representation.media_type,\n )\n return\n\n # Determine the best URL to use when mirroring this\n # representation.\n if (\n link.media_type in Representation.BOOK_MEDIA_TYPES\n or link.media_type in Representation.AUDIOBOOK_MEDIA_TYPES\n ):\n url_title = title or identifier.identifier\n extension = representation.extension()\n mirror_url = mirror.book_url(\n identifier,\n data_source=data_source,\n title=url_title,\n extension=extension,\n open_access=link.rel == Hyperlink.OPEN_ACCESS_DOWNLOAD,\n )\n else:\n filename = representation.default_filename(\n link_obj, representation.media_type\n )\n mirror_url = mirror.cover_image_url(data_source, identifier, filename)\n\n # Mirror it.\n collection = pools[0].collection if pools else None\n mirror.mirror_one(representation, mirror_to=mirror_url, collection=collection)\n\n # If we couldn't mirror an open/protected access link representation, suppress\n # the license pool until someone fixes it manually.\n if representation.mirror_exception:\n if pools and link.rel == Hyperlink.OPEN_ACCESS_DOWNLOAD:\n for pool in pools:\n pool.suppressed = True\n pool.license_exception = (\n \"Mirror exception: %s\" % representation.mirror_exception\n )\n self.log.error(pool.license_exception)\n\n if link_obj.rel == Hyperlink.IMAGE:\n # Create and mirror a thumbnail.\n thumbnail_filename = representation.default_filename(\n link_obj, Representation.PNG_MEDIA_TYPE\n )\n thumbnail_url = mirror.cover_image_url(\n data_source,\n identifier,\n thumbnail_filename,\n Edition.MAX_THUMBNAIL_HEIGHT,\n )\n thumbnail, is_new = representation.scale(\n max_height=Edition.MAX_THUMBNAIL_HEIGHT,\n max_width=Edition.MAX_THUMBNAIL_WIDTH,\n destination_url=thumbnail_url,\n destination_media_type=Representation.PNG_MEDIA_TYPE,\n force=True,\n )\n if is_new:\n # A thumbnail was created distinct from the original\n # image. Mirror it as well.\n mirror.mirror_one(\n thumbnail, mirror_to=thumbnail_url, collection=collection\n )\n\n if link_obj.rel in Hyperlink.SELF_HOSTED_BOOKS:\n # If we mirrored book content successfully, remove it from\n # the database to save space. We do keep images in case we\n # ever need to resize them or mirror them elsewhere.\n if representation.mirrored_at and not representation.mirror_exception:\n representation.content = None\n\n\nclass CirculationData(MetaToModelUtility):\n \"\"\"Information about actual copies of a book that can be delivered to\n patrons.\n\n As distinct from Metadata, which is a container for information\n about a book.\n\n Basically,\n Metadata : Edition :: CirculationData : Licensepool\n \"\"\"\n\n log = logging.getLogger(\"Abstract metadata layer - Circulation data\")\n\n def __init__(\n self,\n data_source,\n primary_identifier,\n licenses_owned=None,\n licenses_available=None,\n licenses_reserved=None,\n patrons_in_hold_queue=None,\n formats=None,\n default_rights_uri=None,\n links=None,\n licenses=None,\n last_checked=None,\n ):\n \"\"\"Constructor.\n\n :param data_source: The authority providing the lending licenses.\n This may be a DataSource object or the name of the data source.\n :param primary_identifier: An Identifier or IdentifierData representing\n how the lending authority distinguishes this book from others.\n \"\"\"\n self._data_source = data_source\n\n if isinstance(self._data_source, DataSource):\n self.data_source_obj = self._data_source\n self.data_source_name = self.data_source_obj.name\n else:\n self.data_source_obj = None\n self.data_source_name = data_source\n if isinstance(primary_identifier, Identifier):\n self.primary_identifier_obj = primary_identifier\n self._primary_identifier = IdentifierData(\n primary_identifier.type, primary_identifier.identifier\n )\n else:\n self.primary_identifier_obj = None\n self._primary_identifier = primary_identifier\n self.licenses_owned = licenses_owned\n self.licenses_available = licenses_available\n self.licenses_reserved = licenses_reserved\n self.patrons_in_hold_queue = patrons_in_hold_queue\n\n # If no 'last checked' data was provided, assume the data was\n # just gathered.\n self.last_checked = last_checked or utc_now()\n\n # format contains pdf/epub, drm, link\n self.formats = formats or []\n\n self.default_rights_uri = None\n self.set_default_rights_uri(\n data_source_name=self.data_source_name,\n default_rights_uri=default_rights_uri,\n )\n\n self.__links = None\n self.links = links\n\n # Information about individual terms for each license in a pool. If we are\n # given licenses then they are used to calculate values for the LicensePool\n # instead of directly using the values that are given to CirculationData.\n self.licenses = licenses\n\n @property\n def links(self):\n return self.__links\n\n @links.setter\n def links(self, arg_links):\n \"\"\"If got passed all links, indiscriminately, filter out to only those relevant to\n pools (the rights-related links).\n \"\"\"\n # start by deleting any old links\n self.__links = []\n\n if not arg_links:\n return\n\n for link in arg_links:\n if link.rel in Hyperlink.CIRCULATION_ALLOWED:\n # TODO: what about Hyperlink.SAMPLE?\n # only accept the types of links relevant to pools\n self.__links.append(link)\n\n # An open-access link or open-access rights implies a FormatData object.\n open_access_link = (\n link.rel == Hyperlink.OPEN_ACCESS_DOWNLOAD and link.href\n )\n # try to deduce if the link is open-access, even if it doesn't explicitly say it is\n rights_uri = link.rights_uri or self.default_rights_uri\n open_access_rights_link = (\n link.media_type in Representation.BOOK_MEDIA_TYPES\n and link.href\n and rights_uri in RightsStatus.OPEN_ACCESS\n )\n\n if open_access_link or open_access_rights_link:\n if (\n open_access_link\n and rights_uri != RightsStatus.IN_COPYRIGHT\n and not rights_uri in RightsStatus.OPEN_ACCESS\n ):\n # We don't know exactly what's going on here but\n # the link said it was an open-access book\n # and the rights URI doesn't contradict it,\n # so treat it as a generic open-access book.\n rights_uri = RightsStatus.GENERIC_OPEN_ACCESS\n format_found = False\n for format in self.formats:\n if format and format.link and format.link.href == link.href:\n if not format.rights_uri:\n format.rights_uri = rights_uri\n format_found = True\n break\n if not format_found:\n self.formats.append(\n FormatData(\n content_type=link.media_type,\n drm_scheme=DeliveryMechanism.NO_DRM,\n link=link,\n rights_uri=rights_uri,\n )\n )\n\n def __repr__(self):\n description_string = \"<CirculationData primary_identifier=%(primary_identifier)r| licenses_owned=%(licenses_owned)s|\"\n description_string += \" licenses_available=%(licenses_available)s| default_rights_uri=%(default_rights_uri)s|\"\n description_string += (\n \" links=%(links)r| formats=%(formats)r| data_source=%(data_source)s|>\"\n )\n\n description_data = {\"licenses_owned\": self.licenses_owned}\n if self._primary_identifier:\n description_data[\"primary_identifier\"] = self._primary_identifier\n else:\n description_data[\"primary_identifier\"] = self.primary_identifier_obj\n description_data[\"licenses_available\"] = self.licenses_available\n description_data[\"default_rights_uri\"] = self.default_rights_uri\n description_data[\"links\"] = self.links\n description_data[\"formats\"] = self.formats\n description_data[\"data_source\"] = self.data_source_name\n\n return description_string % description_data\n\n def data_source(self, _db):\n \"\"\"Find the DataSource associated with this circulation information.\"\"\"\n if not self.data_source_obj:\n if self._data_source:\n obj = DataSource.lookup(_db, self._data_source)\n if not obj:\n raise ValueError(\"Data source %s not found!\" % self._data_source)\n else:\n obj = None\n self.data_source_obj = obj\n return self.data_source_obj\n\n def primary_identifier(self, _db):\n \"\"\"Find the Identifier associated with this circulation information.\"\"\"\n if not self.primary_identifier_obj:\n if self._primary_identifier:\n obj, ignore = self._primary_identifier.load(_db)\n else:\n obj = None\n self.primary_identifier_obj = obj\n return self.primary_identifier_obj\n\n def license_pool(self, _db, collection, analytics=None):\n \"\"\"Find or create a LicensePool object for this CirculationData.\n\n :param collection: The LicensePool object will be associated with\n the given Collection.\n\n :param analytics: If the LicensePool is newly created, the event\n will be tracked with this.\n \"\"\"\n if not collection:\n raise ValueError(\"Cannot find license pool: no collection provided.\")\n identifier = self.primary_identifier(_db)\n if not identifier:\n raise ValueError(\n \"Cannot find license pool: CirculationData has no primary identifier.\"\n )\n\n data_source_obj = self.data_source(_db)\n license_pool, is_new = LicensePool.for_foreign_id(\n _db,\n data_source=data_source_obj,\n foreign_id_type=identifier.type,\n foreign_id=identifier.identifier,\n collection=collection,\n )\n\n if is_new:\n license_pool.open_access = self.has_open_access_link\n license_pool.availability_time = self.last_checked\n # This is our first time seeing this LicensePool. Log its\n # occurrence as a separate analytics event.\n if analytics:\n for library in collection.libraries:\n analytics.collect_event(\n library,\n license_pool,\n CirculationEvent.DISTRIBUTOR_TITLE_ADD,\n self.last_checked,\n old_value=0,\n new_value=1,\n )\n license_pool.last_checked = self.last_checked\n\n return license_pool, is_new\n\n @property\n def has_open_access_link(self):\n \"\"\"Does this Circulation object have an associated open-access link?\"\"\"\n return any(\n [\n x\n for x in self.links\n if x.rel == Hyperlink.OPEN_ACCESS_DOWNLOAD\n and x.href\n and x.rights_uri != RightsStatus.IN_COPYRIGHT\n ]\n )\n\n def set_default_rights_uri(self, data_source_name, default_rights_uri=None):\n if default_rights_uri:\n self.default_rights_uri = default_rights_uri\n\n elif data_source_name:\n # We didn't get rights passed in, so use the default rights for the data source if any.\n default = RightsStatus.DATA_SOURCE_DEFAULT_RIGHTS_STATUS.get(\n data_source_name, None\n )\n if default:\n self.default_rights_uri = default\n\n if not self.default_rights_uri:\n # We still haven't determined rights, so it's unknown.\n self.default_rights_uri = RightsStatus.UNKNOWN\n\n def apply(self, _db, collection, replace=None):\n \"\"\"Update the title with this CirculationData's information.\n\n :param collection: A Collection representing actual copies of\n this title. Availability information (e.g. number of copies)\n will be associated with a LicensePool in this Collection. If\n this is not present, only delivery information (e.g. format\n information and open-access downloads) will be processed.\n\n \"\"\"\n # Immediately raise an exception if there is information that\n # can only be stored in a LicensePool, but we have no\n # Collection to tell us which LicensePool to use. This is\n # indicative of an error in programming.\n if not collection and (\n self.licenses_owned is not None\n or self.licenses_available is not None\n or self.licenses_reserved is not None\n or self.patrons_in_hold_queue is not None\n ):\n raise ValueError(\n \"Cannot store circulation information because no \"\n \"Collection was provided.\"\n )\n\n made_changes = False\n if replace is None:\n replace = ReplacementPolicy()\n\n analytics = replace.analytics or Analytics(_db)\n\n pool = None\n if collection:\n pool, ignore = self.license_pool(_db, collection, analytics)\n\n data_source = self.data_source(_db)\n identifier = self.primary_identifier(_db)\n # First, make sure all links in self.links are mirrored (if necessary)\n # and associated with the book's identifier.\n\n # TODO: be able to handle the case where the URL to a link changes or\n # a link disappears.\n link_objects = {}\n for link in self.links:\n if link.rel in Hyperlink.CIRCULATION_ALLOWED:\n link_obj, ignore = identifier.add_link(\n rel=link.rel,\n href=link.href,\n data_source=data_source,\n media_type=link.media_type,\n content=link.content,\n )\n link_objects[link] = link_obj\n\n for link in self.links:\n if link.rel in Hyperlink.CIRCULATION_ALLOWED:\n link_obj = link_objects[link]\n if replace.mirrors:\n # We need to mirror this resource.\n self.mirror_link(pool, data_source, link, link_obj, replace)\n\n # Next, make sure the DeliveryMechanisms associated\n # with the book reflect the formats in self.formats.\n old_lpdms = new_lpdms = []\n if pool:\n old_lpdms = list(pool.delivery_mechanisms)\n\n # Before setting and unsetting delivery mechanisms, which may\n # change the open-access status of the work, see what it the\n # status currently is.\n pools = identifier.licensed_through\n old_open_access = any(pool.open_access for pool in pools)\n\n for format in self.formats:\n if format and format.link:\n link = format.link\n if not format.content_type:\n format.content_type = link.media_type\n link_obj = link_objects[format.link]\n resource = link_obj.resource\n else:\n resource = None\n # This can cause a non-open-access LicensePool to go open-access.\n lpdm = LicensePoolDeliveryMechanism.set(\n data_source,\n identifier,\n format.content_type,\n format.drm_scheme,\n format.rights_uri or self.default_rights_uri,\n resource,\n )\n new_lpdms.append(lpdm)\n\n if replace.formats:\n # If any preexisting LicensePoolDeliveryMechanisms were\n # not mentioned in self.formats, remove the corresponding\n # LicensePoolDeliveryMechanisms.\n for lpdm in old_lpdms:\n if lpdm not in new_lpdms:\n for loan in lpdm.fulfills:\n self.log.info(\n \"Loan %i is associated with a format that is no longer available. Deleting its delivery mechanism.\"\n % loan.id\n )\n loan.fulfillment = None\n # This can cause an open-access LicensePool to go\n # non-open-access.\n lpdm.delete()\n\n new_open_access = any(pool.open_access for pool in pools)\n open_access_status_changed = old_open_access != new_open_access\n\n # Finally, if we have data for a specific Collection's license\n # for this book, find its LicensePool and update it.\n changed_availability = False\n if pool and self._availability_needs_update(pool):\n # Update availability information. This may result in\n # the issuance of additional circulation events.\n if self.licenses is not None:\n # If we have licenses set, use those to set our availability\n old_licenses = list(pool.licenses or [])\n new_licenses = [\n license.add_to_pool(_db, pool) for license in self.licenses\n ]\n for license in old_licenses:\n if license not in new_licenses:\n self.log.warning(\n f\"License {license.identifier} has been removed from feed.\"\n )\n changed_availability = pool.update_availability_from_licenses(\n analytics=analytics,\n as_of=self.last_checked,\n )\n else:\n # Otherwise update the availability directly\n changed_availability = pool.update_availability(\n new_licenses_owned=self.licenses_owned,\n new_licenses_available=self.licenses_available,\n new_licenses_reserved=self.licenses_reserved,\n new_patrons_in_hold_queue=self.patrons_in_hold_queue,\n analytics=analytics,\n as_of=self.last_checked,\n )\n\n # If this is the first time we've seen this pool, or we never\n # made a Work for it, make one now.\n work_changed = False\n if pool and not pool.work:\n work, work_changed = pool.calculate_work()\n if work:\n work.set_presentation_ready()\n work_changed = True\n\n made_changes = (\n made_changes\n or changed_availability\n or open_access_status_changed\n or work_changed\n )\n\n return pool, made_changes\n\n def _availability_needs_update(self, pool):\n \"\"\"Does this CirculationData represent information more recent than\n what we have for the given LicensePool?\n \"\"\"\n if not self.last_checked:\n # Assume that our data represents the state of affairs\n # right now.\n return True\n if not pool.last_checked:\n # It looks like the LicensePool has never been checked.\n return True\n return self.last_checked >= pool.last_checked\n\n\nclass Metadata(MetaToModelUtility):\n\n \"\"\"A (potentially partial) set of metadata for a published work.\"\"\"\n\n log = logging.getLogger(\"Abstract metadata layer\")\n\n BASIC_EDITION_FIELDS = [\n \"title\",\n \"sort_title\",\n \"subtitle\",\n \"language\",\n \"medium\",\n \"series\",\n \"series_position\",\n \"publisher\",\n \"imprint\",\n \"issued\",\n \"published\",\n ]\n\n def __init__(\n self,\n data_source,\n title=None,\n subtitle=None,\n sort_title=None,\n language=None,\n medium=None,\n series=None,\n series_position=None,\n publisher=None,\n imprint=None,\n issued=None,\n published=None,\n primary_identifier=None,\n identifiers=None,\n recommendations=None,\n subjects=None,\n contributors=None,\n measurements=None,\n links=None,\n data_source_last_updated=None,\n # Note: brought back to keep callers of bibliographic extraction process_one() methods simple.\n circulation=None,\n **kwargs,\n ):\n # data_source is where the data comes from (e.g. overdrive, metadata wrangler, admin interface),\n # and not necessarily where the associated Identifier's LicencePool's lending licenses are coming from.\n self._data_source = data_source\n if isinstance(self._data_source, DataSource):\n self.data_source_obj = self._data_source\n else:\n self.data_source_obj = None\n\n self.title = title\n self.sort_title = sort_title\n self.subtitle = subtitle\n if language:\n language = LanguageCodes.string_to_alpha_3(language)\n self.language = language\n # medium is book/audio/video, etc.\n self.medium = medium\n self.series = series\n self.series_position = series_position\n self.publisher = publisher\n self.imprint = imprint\n self.issued = issued\n self.published = published\n\n self.primary_identifier = primary_identifier\n self.identifiers = identifiers or []\n self.permanent_work_id = None\n if self.primary_identifier and self.primary_identifier not in self.identifiers:\n self.identifiers.append(self.primary_identifier)\n self.recommendations = recommendations or []\n self.subjects = subjects or []\n self.contributors = contributors or []\n self.measurements = measurements or []\n\n self.circulation = circulation\n\n # renamed last_update_time to data_source_last_updated\n self.data_source_last_updated = data_source_last_updated\n\n self.__links = None\n self.links = links\n\n @property\n def links(self):\n return self.__links\n\n @links.setter\n def links(self, arg_links):\n \"\"\"If got passed all links, undiscriminately, filter out to only those relevant to\n editions (the image/cover/etc links).\n \"\"\"\n # start by deleting any old links\n self.__links = []\n\n if not arg_links:\n return\n\n for link in arg_links:\n if link.rel in Hyperlink.METADATA_ALLOWED:\n # only accept the types of links relevant to editions\n self.__links.append(link)\n\n @classmethod\n def from_edition(cls, edition):\n \"\"\"Create a basic Metadata object for the given Edition.\n\n This doesn't contain everything but it contains enough\n information to run guess_license_pools.\n \"\"\"\n kwargs = dict()\n for field in cls.BASIC_EDITION_FIELDS:\n kwargs[field] = getattr(edition, field)\n\n contributors = []\n for contribution in edition.contributions:\n contributor = ContributorData.from_contribution(contribution)\n contributors.append(contributor)\n\n if not edition.contributions:\n # This should only happen for low-quality data sources such as\n # the NYT best-seller API.\n if edition.sort_author and edition.sort_author != Edition.UNKNOWN_AUTHOR:\n contributors.append(\n ContributorData(\n sort_name=edition.sort_author,\n display_name=edition.author,\n roles=[Contributor.PRIMARY_AUTHOR_ROLE],\n )\n )\n\n i = edition.primary_identifier\n primary_identifier = IdentifierData(\n type=i.type, identifier=i.identifier, weight=1\n )\n\n links = []\n for link in i.links:\n link_data = LinkData(link.rel, link.resource.url)\n links.append(link_data)\n\n return Metadata(\n data_source=edition.data_source.name,\n primary_identifier=primary_identifier,\n contributors=contributors,\n links=links,\n **kwargs,\n )\n\n def normalize_contributors(self, metadata_client):\n \"\"\"Make sure that all contributors without a .sort_name get one.\"\"\"\n for contributor in contributors:\n if not contributor.sort_name:\n contributor.normalize(metadata_client)\n\n @property\n def primary_author(self):\n primary_author = None\n for tier in Contributor.author_contributor_tiers():\n for c in self.contributors:\n for role in tier:\n if role in c.roles:\n primary_author = c\n break\n if primary_author:\n break\n if primary_author:\n break\n return primary_author\n\n def update(self, metadata):\n \"\"\"Update this Metadata object with values from the given Metadata\n object.\n\n TODO: We might want to take a policy object as an argument.\n \"\"\"\n\n fields = self.BASIC_EDITION_FIELDS\n for field in fields:\n new_value = getattr(metadata, field)\n if new_value != None and new_value != \"\":\n setattr(self, field, new_value)\n\n new_value = getattr(metadata, \"contributors\")\n if new_value and isinstance(new_value, list):\n old_value = getattr(self, \"contributors\")\n # if we already have a better value, don't override it with a \"missing info\" placeholder value\n if not (old_value and new_value[0].sort_name == Edition.UNKNOWN_AUTHOR):\n setattr(self, \"contributors\", new_value)\n\n def calculate_permanent_work_id(self, _db, metadata_client):\n \"\"\"Try to calculate a permanent work ID from this metadata.\n\n This may require asking a metadata wrangler to turn a display name\n into a sort name--thus the `metadata_client` argument.\n \"\"\"\n primary_author = self.primary_author\n\n if not primary_author:\n return None, None\n\n if not primary_author.sort_name and metadata_client:\n primary_author.find_sort_name(_db, self.identifiers, metadata_client)\n\n sort_author = primary_author.sort_name\n pwid = Edition.calculate_permanent_work_id_for_title_and_author(\n self.title, sort_author, \"book\"\n )\n self.permanent_work_id = pwid\n return pwid\n\n def associate_with_identifiers_based_on_permanent_work_id(self, _db):\n \"\"\"Try to associate this object's primary identifier with\n the primary identifiers of Editions in the database which share\n a permanent work ID.\n \"\"\"\n if not self.primary_identifier or not self.permanent_work_id:\n # We don't have the information necessary to carry out this\n # task.\n return\n\n if not self.medium:\n # We don't know the medium of this item, and we only want\n # to associate it with other items of the same type.\n return\n\n primary_identifier_obj, ignore = self.primary_identifier.load(_db)\n\n # Try to find the primary identifiers of other Editions with\n # the same permanent work ID and the same medium, representing\n # books already in our collection.\n qu = (\n _db.query(Identifier)\n .join(Identifier.primarily_identifies)\n .filter(Edition.permanent_work_id == self.permanent_work_id)\n .filter(Identifier.type.in_(Identifier.LICENSE_PROVIDING_IDENTIFIER_TYPES))\n .filter(Edition.medium == self.medium)\n )\n identifiers_same_work_id = qu.all()\n for same_work_id in identifiers_same_work_id:\n if (\n same_work_id.type != self.primary_identifier.type\n or same_work_id.identifier != self.primary_identifier.identifier\n ):\n self.log.info(\n \"Discovered that %r is equivalent to %r because of matching permanent work ID %s\",\n same_work_id,\n primary_identifier_obj,\n self.permanent_work_id,\n )\n primary_identifier_obj.equivalent_to(\n self.data_source(_db), same_work_id, 0.85\n )\n\n def data_source(self, _db):\n if not self.data_source_obj:\n if not self._data_source:\n raise ValueError(\"No data source specified!\")\n self.data_source_obj = DataSource.lookup(_db, self._data_source)\n if not self.data_source_obj:\n raise ValueError(\"Data source %s not found!\" % self._data_source)\n return self.data_source_obj\n\n def edition(self, _db, create_if_not_exists=True):\n \"\"\"Find or create the edition described by this Metadata object.\"\"\"\n if not self.primary_identifier:\n raise ValueError(\"Cannot find edition: metadata has no primary identifier.\")\n\n data_source = self.data_source(_db)\n\n return Edition.for_foreign_id(\n _db,\n data_source,\n self.primary_identifier.type,\n self.primary_identifier.identifier,\n create_if_not_exists=create_if_not_exists,\n )\n\n def consolidate_identifiers(self):\n by_weight = defaultdict(list)\n for i in self.identifiers:\n by_weight[(i.type, i.identifier)].append(i.weight)\n new_identifiers = []\n for (type, identifier), weights in list(by_weight.items()):\n new_identifiers.append(\n IdentifierData(type=type, identifier=identifier, weight=median(weights))\n )\n self.identifiers = new_identifiers\n\n def guess_license_pools(self, _db, metadata_client):\n \"\"\"Try to find existing license pools for this Metadata.\"\"\"\n potentials = {}\n for contributor in self.contributors:\n if not any(\n x in contributor.roles\n for x in (Contributor.AUTHOR_ROLE, Contributor.PRIMARY_AUTHOR_ROLE)\n ):\n continue\n contributor.find_sort_name(_db, self.identifiers, metadata_client)\n confidence = 0\n\n base = (\n _db.query(Edition)\n .filter(Edition.title.ilike(self.title))\n .filter(Edition.medium == Edition.BOOK_MEDIUM)\n )\n success = False\n\n # A match based on work ID is the most reliable.\n pwid = self.calculate_permanent_work_id(_db, metadata_client)\n clause = and_(\n Edition.data_source_id == LicensePool.data_source_id,\n Edition.primary_identifier_id == LicensePool.identifier_id,\n )\n qu = base.filter(Edition.permanent_work_id == pwid).join(\n LicensePool, clause\n )\n success = self._run_query(qu, potentials, 0.95)\n if not success and contributor.sort_name:\n qu = base.filter(Edition.sort_author == contributor.sort_name)\n success = self._run_query(qu, potentials, 0.9)\n if not success and contributor.display_name:\n qu = base.filter(Edition.author == contributor.display_name)\n success = self._run_query(qu, potentials, 0.8)\n if not success:\n # Look for the book by an unknown author (our mistake)\n qu = base.filter(Edition.author == Edition.UNKNOWN_AUTHOR)\n success = self._run_query(qu, potentials, 0.45)\n if not success:\n # See if there is any book with this title at all.\n success = self._run_query(base, potentials, 0.3)\n return potentials\n\n def _run_query(self, qu, potentials, confidence):\n success = False\n for i in qu:\n pools = i.license_pools\n for lp in pools:\n if lp and lp.deliverable and potentials.get(lp, 0) < confidence:\n potentials[lp] = confidence\n success = True\n return success\n\n REL_REQUIRES_NEW_PRESENTATION_EDITION = [\n LinkRelations.IMAGE,\n LinkRelations.THUMBNAIL_IMAGE,\n ]\n REL_REQUIRES_FULL_RECALCULATION = [LinkRelations.DESCRIPTION]\n\n # TODO: We need to change all calls to apply() to use a ReplacementPolicy\n # instead of passing in individual `replace` arguments. Once that's done,\n # we can get rid of the `replace` arguments.\n def apply(\n self,\n edition,\n collection,\n metadata_client=None,\n replace=None,\n replace_identifiers=False,\n replace_subjects=False,\n replace_contributions=False,\n replace_links=False,\n replace_formats=False,\n replace_rights=False,\n force=False,\n ):\n \"\"\"Apply this metadata to the given edition.\n\n :return: (edition, made_core_changes), where edition is the newly-updated object, and made_core_changes\n answers the question: were any edition core fields harmed in the making of this update?\n So, if title changed, return True.\n New: If contributors changed, this is now considered a core change,\n so work.simple_opds_feed refresh can be triggered.\n \"\"\"\n _db = Session.object_session(edition)\n\n # If summary, subjects, or measurements change, then any Work\n # associated with this edition will need a full presentation\n # recalculation.\n work_requires_full_recalculation = False\n\n # If any other data changes, then any Work associated with\n # this edition will need to have its presentation edition\n # regenerated, but we can do it on the cheap.\n work_requires_new_presentation_edition = False\n\n if replace is None:\n replace = ReplacementPolicy(\n identifiers=replace_identifiers,\n subjects=replace_subjects,\n contributions=replace_contributions,\n links=replace_links,\n formats=replace_formats,\n rights=replace_rights,\n even_if_not_apparently_updated=force,\n )\n\n # We were given an Edition, so either this metadata's\n # primary_identifier must be missing or it must match the\n # Edition's primary identifier.\n if self.primary_identifier:\n if (\n self.primary_identifier.type != edition.primary_identifier.type\n or self.primary_identifier.identifier\n != edition.primary_identifier.identifier\n ):\n raise ValueError(\n \"Metadata's primary identifier (%s/%s) does not match edition's primary identifier (%r)\"\n % (\n self.primary_identifier.type,\n self.primary_identifier.identifier,\n edition.primary_identifier,\n )\n )\n\n # Check whether we should do any work at all.\n data_source = self.data_source(_db)\n\n if self.data_source_last_updated and not replace.even_if_not_apparently_updated:\n coverage_record = CoverageRecord.lookup(edition, data_source)\n if coverage_record:\n check_time = coverage_record.timestamp\n last_time = self.data_source_last_updated\n if check_time >= last_time:\n # The metadata has not changed since last time. Do nothing.\n return edition, False\n\n if metadata_client and not self.permanent_work_id:\n self.calculate_permanent_work_id(_db, metadata_client)\n\n identifier = edition.primary_identifier\n\n self.log.info(\"APPLYING METADATA TO EDITION: %s\", self.title)\n fields = self.BASIC_EDITION_FIELDS + [\"permanent_work_id\"]\n for field in fields:\n old_edition_value = getattr(edition, field)\n new_metadata_value = getattr(self, field)\n if (\n new_metadata_value != None\n and new_metadata_value != \"\"\n and (new_metadata_value != old_edition_value)\n ):\n if new_metadata_value in [NO_VALUE, NO_NUMBER]:\n new_metadata_value = None\n setattr(edition, field, new_metadata_value)\n work_requires_new_presentation_edition = True\n\n # Create equivalencies between all given identifiers and\n # the edition's primary identifier.\n contributors_changed = self.update_contributions(\n _db, edition, metadata_client, replace.contributions\n )\n if contributors_changed:\n work_requires_new_presentation_edition = True\n\n # TODO: remove equivalencies when replace.identifiers is True.\n if self.identifiers is not None:\n for identifier_data in self.identifiers:\n if not identifier_data.identifier:\n continue\n if (\n identifier_data.identifier == identifier.identifier\n and identifier_data.type == identifier.type\n ):\n # These are the same identifier.\n continue\n new_identifier, ignore = Identifier.for_foreign_id(\n _db, identifier_data.type, identifier_data.identifier\n )\n identifier.equivalent_to(\n data_source, new_identifier, identifier_data.weight\n )\n\n new_subjects = {}\n if self.subjects:\n new_subjects = dict((subject.key, subject) for subject in self.subjects)\n if replace.subjects:\n # Remove any old Subjects from this data source, unless they\n # are also in the list of new subjects.\n surviving_classifications = []\n\n def _key(classification):\n s = classification.subject\n return s.type, s.identifier, s.name, classification.weight\n\n for classification in identifier.classifications:\n if classification.data_source == data_source:\n key = _key(classification)\n if not key in new_subjects:\n # The data source has stopped claiming that\n # this classification should exist.\n _db.delete(classification)\n work_requires_full_recalculation = True\n else:\n # The data source maintains that this\n # classification is a good idea. We don't have\n # to do anything.\n del new_subjects[key]\n surviving_classifications.append(classification)\n else:\n # This classification comes from some other data\n # source. Don't mess with it.\n surviving_classifications.append(classification)\n identifier.classifications = surviving_classifications\n\n # Apply all new subjects to the identifier.\n for subject in list(new_subjects.values()):\n identifier.classify(\n data_source,\n subject.type,\n subject.identifier,\n subject.name,\n weight=subject.weight,\n )\n work_requires_full_recalculation = True\n\n # Associate all links with the primary identifier.\n if replace.links and self.links is not None:\n surviving_hyperlinks = []\n dirty = False\n for hyperlink in identifier.links:\n if hyperlink.data_source == data_source:\n _db.delete(hyperlink)\n dirty = True\n else:\n surviving_hyperlinks.append(hyperlink)\n if dirty:\n identifier.links = surviving_hyperlinks\n\n link_objects = {}\n\n for link in self.links:\n if link.rel in Hyperlink.METADATA_ALLOWED:\n original_resource = None\n if link.original:\n rights_status = RightsStatus.lookup(_db, link.original.rights_uri)\n original_resource, ignore = get_one_or_create(\n _db,\n Resource,\n url=link.original.href,\n )\n if not original_resource.data_source:\n original_resource.data_source = data_source\n original_resource.rights_status = rights_status\n original_resource.rights_explanation = (\n link.original.rights_explanation\n )\n if link.original.content:\n original_resource.set_fetched_content(\n link.original.guessed_media_type,\n link.original.content,\n None,\n )\n\n link_obj, ignore = identifier.add_link(\n rel=link.rel,\n href=link.href,\n data_source=data_source,\n media_type=link.guessed_media_type,\n content=link.content,\n rights_status_uri=link.rights_uri,\n rights_explanation=link.rights_explanation,\n original_resource=original_resource,\n transformation_settings=link.transformation_settings,\n )\n if link.rel in self.REL_REQUIRES_NEW_PRESENTATION_EDITION:\n work_requires_new_presentation_edition = True\n elif link.rel in self.REL_REQUIRES_FULL_RECALCULATION:\n work_requires_full_recalculation = True\n\n link_objects[link] = link_obj\n if link.thumbnail:\n if link.thumbnail.rel == Hyperlink.THUMBNAIL_IMAGE:\n thumbnail = link.thumbnail\n thumbnail_obj, ignore = identifier.add_link(\n rel=thumbnail.rel,\n href=thumbnail.href,\n data_source=data_source,\n media_type=thumbnail.guessed_media_type,\n content=thumbnail.content,\n )\n work_requires_new_presentation_edition = True\n if thumbnail_obj.resource and thumbnail_obj.resource.representation:\n thumbnail_obj.resource.representation.thumbnail_of = (\n link_obj.resource.representation\n )\n else:\n self.log.error(\n \"Thumbnail link %r cannot be marked as a thumbnail of %r because it has no Representation, probably due to a missing media type.\"\n % (link.thumbnail, link)\n )\n else:\n self.log.error(\n \"Thumbnail link %r does not have the thumbnail link relation! Not acceptable as a thumbnail of %r.\"\n % (link.thumbnail, link)\n )\n link.thumbnail = None\n\n # Apply all measurements to the primary identifier\n for measurement in self.measurements:\n work_requires_full_recalculation = True\n identifier.add_measurement(\n data_source,\n measurement.quantity_measured,\n measurement.value,\n measurement.weight,\n measurement.taken_at,\n )\n\n if not edition.sort_author:\n # This may be a situation like the NYT best-seller list where\n # we know the display name of the author but weren't able\n # to normalize that name.\n primary_author = self.primary_author\n if primary_author:\n self.log.info(\n \"In the absence of Contributor objects, setting Edition author name to %s/%s\",\n primary_author.sort_name,\n primary_author.display_name,\n )\n edition.sort_author = primary_author.sort_name\n edition.display_author = primary_author.display_name\n work_requires_new_presentation_edition = True\n\n # The Metadata object may include a CirculationData object which\n # contains information about availability such as open-access\n # links. Make sure\n # that that Collection has a LicensePool for this book and that\n # its information is up-to-date.\n if self.circulation:\n self.circulation.apply(_db, collection, replace)\n\n # obtains a presentation_edition for the title, which will later be used to get a mirror link.\n has_image = any([link.rel == Hyperlink.IMAGE for link in self.links])\n for link in self.links:\n link_obj = link_objects[link]\n\n if link_obj.rel == Hyperlink.THUMBNAIL_IMAGE and has_image:\n # This is a thumbnail but we also have a full-sized image link,\n # so we don't need to separately mirror the thumbnail.\n continue\n\n if replace.mirrors:\n # We need to mirror this resource. If it's an image, a\n # thumbnail may be provided as a side effect.\n self.mirror_link(edition, data_source, link, link_obj, replace)\n elif link.thumbnail:\n # We don't need to mirror this image, but we do need\n # to make sure that its thumbnail exists locally and\n # is associated with the original image.\n self.make_thumbnail(data_source, link, link_obj)\n\n # Make sure the work we just did shows up. This needs to happen after mirroring\n # so mirror urls are available.\n made_changes = edition.calculate_presentation(\n policy=replace.presentation_calculation_policy\n )\n if made_changes:\n work_requires_new_presentation_edition = True\n\n # The metadata wrangler doesn't need information from these data sources.\n # We don't need to send it information it originally provided, and\n # Overdrive makes metadata accessible to everyone without buying licenses\n # for the book, so the metadata wrangler can obtain it directly from\n # Overdrive.\n # TODO: Remove Bibliotheca and Axis 360 from this list.\n METADATA_UPLOAD_BLACKLIST = [\n DataSource.METADATA_WRANGLER,\n DataSource.OVERDRIVE,\n DataSource.BIBLIOTHECA,\n DataSource.AXIS_360,\n ]\n if (\n work_requires_new_presentation_edition\n and (not data_source.integration_client)\n and (data_source.name not in METADATA_UPLOAD_BLACKLIST)\n ):\n # Create a transient failure CoverageRecord for this edition\n # so it will be processed by the MetadataUploadCoverageProvider.\n internal_processing = DataSource.lookup(_db, DataSource.INTERNAL_PROCESSING)\n\n # If there's already a CoverageRecord, don't change it to transient failure.\n # TODO: Once the metadata wrangler can handle it, we'd like to re-sync the\n # metadata every time there's a change. For now,\n cr = CoverageRecord.lookup(\n edition,\n internal_processing,\n operation=CoverageRecord.METADATA_UPLOAD_OPERATION,\n )\n if not cr:\n CoverageRecord.add_for(\n edition,\n internal_processing,\n operation=CoverageRecord.METADATA_UPLOAD_OPERATION,\n status=CoverageRecord.TRANSIENT_FAILURE,\n )\n\n # Update the coverage record for this edition and data\n # source. We omit the collection information, even if we know\n # which collection this is, because we only changed metadata.\n CoverageRecord.add_for(\n edition,\n data_source,\n timestamp=self.data_source_last_updated,\n collection=None,\n )\n\n if work_requires_full_recalculation or work_requires_new_presentation_edition:\n # If there is a Work associated with the Edition's primary\n # identifier, mark it for recalculation.\n\n # Any LicensePool will do here, since all LicensePools for\n # a given Identifier have the same Work.\n pool = get_one(\n _db,\n LicensePool,\n identifier=edition.primary_identifier,\n on_multiple=\"interchangeable\",\n )\n if pool and pool.work:\n work = pool.work\n if work_requires_full_recalculation:\n work.needs_full_presentation_recalculation()\n else:\n work.needs_new_presentation_edition()\n\n return edition, work_requires_new_presentation_edition\n\n def make_thumbnail(self, data_source, link, link_obj):\n \"\"\"Make sure a Hyperlink representing an image is connected\n to its thumbnail.\n \"\"\"\n thumbnail = link.thumbnail\n if not thumbnail:\n return None\n\n if thumbnail.href == link.href:\n # The image serves as its own thumbnail. This is a\n # hacky way to represent this in the database.\n if link_obj.resource.representation:\n link_obj.resource.representation.image_height = (\n Edition.MAX_THUMBNAIL_HEIGHT\n )\n return link_obj\n\n # The thumbnail and image are different. Make sure there's a\n # separate link to the thumbnail.\n thumbnail_obj, ignore = link_obj.identifier.add_link(\n rel=thumbnail.rel,\n href=thumbnail.href,\n data_source=data_source,\n media_type=thumbnail.media_type,\n content=thumbnail.content,\n )\n # And make sure the thumbnail knows it's a thumbnail of the main\n # image.\n if thumbnail_obj.resource.representation:\n thumbnail_obj.resource.representation.thumbnail_of = (\n link_obj.resource.representation\n )\n return thumbnail_obj\n\n def update_contributions(self, _db, edition, metadata_client=None, replace=True):\n contributors_changed = False\n old_contributors = []\n new_contributors = []\n\n if not replace and self.contributors:\n # we've chosen to append new contributors, which exist\n # this means the edition's contributor list will, indeed, change\n contributors_changed = True\n\n if replace and self.contributors:\n # Remove any old Contributions from this data source --\n # we're about to add a new set\n surviving_contributions = []\n for contribution in edition.contributions:\n old_contributors.append(contribution.contributor.id)\n _db.delete(contribution)\n edition.contributions = surviving_contributions\n\n for contributor_data in self.contributors:\n contributor_data.find_sort_name(_db, self.identifiers, metadata_client)\n if (\n contributor_data.sort_name\n or contributor_data.lc\n or contributor_data.viaf\n ):\n contributor = edition.add_contributor(\n name=contributor_data.sort_name,\n roles=contributor_data.roles,\n lc=contributor_data.lc,\n viaf=contributor_data.viaf,\n )\n new_contributors.append(contributor.id)\n if contributor_data.display_name:\n contributor.display_name = contributor_data.display_name\n if contributor_data.biography:\n contributor.biography = contributor_data.biography\n if contributor_data.aliases:\n contributor.aliases = contributor_data.aliases\n if contributor_data.lc:\n contributor.lc = contributor_data.lc\n if contributor_data.viaf:\n contributor.viaf = contributor_data.viaf\n if contributor_data.wikipedia_name:\n contributor.wikipedia_name = contributor_data.wikipedia_name\n else:\n self.log.info(\n \"Not registering %s because no sort name, LC, or VIAF\",\n contributor_data.display_name,\n )\n\n if sorted(old_contributors) != sorted(new_contributors):\n contributors_changed = True\n\n return contributors_changed\n\n def filter_recommendations(self, _db):\n \"\"\"Filters out recommended identifiers that don't exist in the db.\n Any IdentifierData objects will be replaced with Identifiers.\n \"\"\"\n\n by_type = defaultdict(list)\n for identifier in self.recommendations:\n by_type[identifier.type].append(identifier.identifier)\n\n self.recommendations = []\n for type, identifiers in list(by_type.items()):\n existing_identifiers = (\n _db.query(Identifier)\n .filter(Identifier.type == type)\n .filter(Identifier.identifier.in_(identifiers))\n )\n self.recommendations += existing_identifiers.all()\n\n if self.primary_identifier in self.recommendations:\n self.recommendations.remove(identifier_data)\n\n\nclass CSVFormatError(csv.Error):\n pass\n\n\nclass CSVMetadataImporter(object):\n\n \"\"\"Turn a CSV file into a list of Metadata objects.\"\"\"\n\n log = logging.getLogger(\"CSV metadata importer\")\n\n IDENTIFIER_PRECEDENCE = [\n Identifier.AXIS_360_ID,\n Identifier.OVERDRIVE_ID,\n Identifier.THREEM_ID,\n Identifier.ISBN,\n ]\n\n DEFAULT_IDENTIFIER_FIELD_NAMES = {\n Identifier.OVERDRIVE_ID: (\"overdrive id\", 0.75),\n Identifier.THREEM_ID: (\"3m id\", 0.75),\n Identifier.AXIS_360_ID: (\"axis 360 id\", 0.75),\n Identifier.ISBN: (\"isbn\", 0.75),\n }\n\n # When classifications are imported from a CSV file, we treat\n # them as though they came from a trusted distributor.\n DEFAULT_SUBJECT_FIELD_NAMES = {\n \"tags\": (Subject.TAG, Classification.TRUSTED_DISTRIBUTOR_WEIGHT),\n \"age\": (Subject.AGE_RANGE, Classification.TRUSTED_DISTRIBUTOR_WEIGHT),\n \"audience\": (\n Subject.FREEFORM_AUDIENCE,\n Classification.TRUSTED_DISTRIBUTOR_WEIGHT,\n ),\n }\n\n def __init__(\n self,\n data_source_name,\n title_field=\"title\",\n language_field=\"language\",\n default_language=\"eng\",\n medium_field=\"medium\",\n default_medium=Edition.BOOK_MEDIUM,\n series_field=\"series\",\n publisher_field=\"publisher\",\n imprint_field=\"imprint\",\n issued_field=\"issued\",\n published_field=[\"published\", \"publication year\"],\n identifier_fields=DEFAULT_IDENTIFIER_FIELD_NAMES,\n subject_fields=DEFAULT_SUBJECT_FIELD_NAMES,\n sort_author_field=\"file author as\",\n display_author_field=[\"author\", \"display author as\"],\n ):\n self.data_source_name = data_source_name\n self.title_field = title_field\n self.language_field = language_field\n self.default_language = default_language\n self.medium_field = medium_field\n self.default_medium = default_medium\n self.series_field = series_field\n self.publisher_field = publisher_field\n self.imprint_field = imprint_field\n self.issued_field = issued_field\n self.published_field = published_field\n self.identifier_fields = identifier_fields\n self.subject_fields = subject_fields\n self.sort_author_field = sort_author_field\n self.display_author_field = display_author_field\n\n def to_metadata(self, dictreader):\n \"\"\"Turn the CSV file in `dictreader` into a sequence of Metadata.\n\n :yield: A sequence of Metadata objects.\n \"\"\"\n fields = dictreader.fieldnames\n\n # Make sure this CSV file has some way of identifying books.\n found_identifier_field = False\n possibilities = []\n for field_name, weight in list(self.identifier_fields.values()):\n possibilities.append(field_name)\n if field_name in fields:\n found_identifier_field = True\n break\n if not found_identifier_field:\n raise CSVFormatError(\n \"Could not find a primary identifier field. Possibilities: %r. Actualities: %r.\"\n % (possibilities, fields)\n )\n\n for row in dictreader:\n yield self.row_to_metadata(row)\n\n def row_to_metadata(self, row):\n title = self._field(row, self.title_field)\n language = self._field(row, self.language_field, self.default_language)\n medium = self._field(row, self.medium_field, self.default_medium)\n if medium not in list(Edition.medium_to_additional_type.keys()):\n self.log.warning(\"Ignored unrecognized medium %s\" % medium)\n medium = Edition.BOOK_MEDIUM\n series = self._field(row, self.series_field)\n publisher = self._field(row, self.publisher_field)\n imprint = self._field(row, self.imprint_field)\n issued = self._date_field(row, self.issued_field)\n published = self._date_field(row, self.published_field)\n\n primary_identifier = None\n identifiers = []\n # TODO: This is annoying and could use some work.\n for identifier_type in self.IDENTIFIER_PRECEDENCE:\n correct_type = False\n for target_type, v in list(self.identifier_fields.items()):\n if isinstance(v, tuple):\n field_name, weight = v\n else:\n field_name = v\n weight = 1\n if target_type == identifier_type:\n correct_type = True\n break\n if not correct_type:\n continue\n\n if field_name in row:\n value = self._field(row, field_name)\n if value:\n identifier = IdentifierData(identifier_type, value, weight=weight)\n identifiers.append(identifier)\n if not primary_identifier:\n primary_identifier = identifier\n\n subjects = []\n for (field_name, (subject_type, weight)) in list(self.subject_fields.items()):\n values = self.list_field(row, field_name)\n for value in values:\n subjects.append(\n SubjectData(type=subject_type, identifier=value, weight=weight)\n )\n\n contributors = []\n sort_author = self._field(row, self.sort_author_field)\n display_author = self._field(row, self.display_author_field)\n if sort_author or display_author:\n contributors.append(\n ContributorData(\n sort_name=sort_author,\n display_name=display_author,\n roles=[Contributor.AUTHOR_ROLE],\n )\n )\n\n metadata = Metadata(\n data_source=self.data_source_name,\n title=title,\n language=language,\n medium=medium,\n series=series,\n publisher=publisher,\n imprint=imprint,\n issued=issued,\n published=published,\n primary_identifier=primary_identifier,\n identifiers=identifiers,\n subjects=subjects,\n contributors=contributors,\n )\n metadata.csv_row = row\n return metadata\n\n @property\n def identifier_field_names(self):\n \"\"\"All potential field names that would identify an identifier.\"\"\"\n for identifier_type in self.IDENTIFIER_PRECEDENCE:\n field_names = self.identifier_fields.get(identifier_type, [])\n if isinstance(field_names, (bytes, str)):\n field_names = [field_names]\n for field_name in field_names:\n yield field_name\n\n def list_field(self, row, names):\n \"\"\"Parse a string into a list by splitting on commas.\"\"\"\n value = self._field(row, names)\n if not value:\n return []\n return [item.strip() for item in value.split(\",\")]\n\n def _field(self, row, names, default=None):\n \"\"\"Get a value from one of the given fields and ensure it comes in as\n Unicode.\n \"\"\"\n if isinstance(names, (bytes, str)):\n return self.__field(row, names, default)\n if not names:\n return default\n for name in names:\n v = self.__field(row, name)\n if v:\n return v\n else:\n return default\n\n def __field(self, row, name, default=None):\n \"\"\"Get a value from the given field and ensure it comes in as\n Unicode.\n \"\"\"\n value = row.get(name, default)\n if isinstance(value, bytes):\n value = value.decode(\"utf8\")\n return value\n\n def _date_field(self, row, field_name):\n \"\"\"Attempt to parse a field as a date.\"\"\"\n date = None\n value = self._field(row, field_name)\n if value:\n try:\n value = to_utc(parse(value))\n except ValueError:\n self.log.warning('Could not parse date \"%s\"' % value)\n value = None\n return value\n\n\nclass MARCExtractor(object):\n\n \"\"\"Transform a MARC file into a list of Metadata objects.\n\n This is not totally general, but it's a good start.\n \"\"\"\n\n # Common things found in a MARC record after the name of the author\n # which we sould like to remove.\n END_OF_AUTHOR_NAME_RES = [\n re.compile(r\",\\s+[0-9]+-\"), # Birth year\n re.compile(r\",\\s+active \"),\n re.compile(r\",\\s+graf,\"),\n re.compile(r\",\\s+author.\"),\n ]\n\n @classmethod\n def name_cleanup(cls, name):\n # Turn '<NAME>, 1265-1321, author.'\n # into '<NAME>'.\n for regex in cls.END_OF_AUTHOR_NAME_RES:\n match = regex.search(name)\n if match:\n name = name[: match.start()]\n break\n name = name_tidy(name)\n return name\n\n @classmethod\n def parse_year(cls, value):\n \"\"\"Handle a publication year that may not be in the right format.\"\"\"\n for format in (\"%Y\", \"%Y.\"):\n try:\n return strptime_utc(value, format)\n except ValueError:\n continue\n return None\n\n @classmethod\n def parse(cls, file, data_source_name, default_medium_type=None):\n reader = MARCReader(file)\n metadata_records = []\n\n for record in reader:\n title = record.title()\n if title.endswith(\" /\"):\n title = title[: -len(\" /\")]\n issued_year = cls.parse_year(record.pubyear())\n publisher = record.publisher()\n if publisher.endswith(\",\"):\n publisher = publisher[:-1]\n\n links = []\n summary = record.notes()[0][\"a\"]\n\n if summary:\n summary_link = LinkData(\n rel=Hyperlink.DESCRIPTION,\n media_type=Representation.TEXT_PLAIN,\n content=summary,\n )\n links.append(summary_link)\n\n isbn = record[\"020\"][\"a\"].split(\" \")[0]\n primary_identifier = IdentifierData(Identifier.ISBN, isbn)\n\n subjects = [\n SubjectData(\n Classifier.FAST,\n subject[\"a\"],\n )\n for subject in record.subjects()\n ]\n\n author = record.author()\n if author:\n author = cls.name_cleanup(author)\n author_names = [author]\n else:\n author_names = [\"Anonymous\"]\n contributors = [\n ContributorData(\n sort_name=author,\n roles=[Contributor.AUTHOR_ROLE],\n )\n for author in author_names\n ]\n\n metadata_records.append(\n Metadata(\n data_source=data_source_name,\n title=title,\n language=\"eng\",\n medium=Edition.BOOK_MEDIUM,\n publisher=publisher,\n issued=issued_year,\n primary_identifier=primary_identifier,\n subjects=subjects,\n contributors=contributors,\n links=links,\n )\n )\n return metadata_records\n", "id": "6441039", "language": "Python", "matching_score": 8.756113052368164, "max_stars_count": 0, "path": "core/metadata_layer.py" }, { "content": "import datetime\nimport logging\nimport traceback\nfrom contextlib import contextmanager\nfrom io import BytesIO\nfrom typing import Optional\nfrom urllib.parse import quote, urljoin, urlparse\n\nimport dateutil\nimport feedparser\nimport sqlalchemy\nfrom flask_babel import lazy_gettext as _\nfrom lxml import etree\nfrom sqlalchemy.orm import aliased\nfrom sqlalchemy.orm.session import Session\n\nfrom .classifier import Classifier\nfrom .config import CannotLoadConfiguration, IntegrationException\nfrom .coverage import CoverageFailure\nfrom .metadata_layer import (\n CirculationData,\n ContributorData,\n IdentifierData,\n LinkData,\n MeasurementData,\n Metadata,\n ReplacementPolicy,\n SubjectData,\n TimestampData,\n)\nfrom .mirror import MirrorUploader\nfrom .model import (\n Collection,\n CoverageRecord,\n DataSource,\n Edition,\n Equivalency,\n ExternalIntegration,\n Hyperlink,\n Identifier,\n LicensePool,\n Measurement,\n Representation,\n RightsStatus,\n Subject,\n get_one,\n)\nfrom .model.configuration import (\n ConfigurationAttributeType,\n ConfigurationFactory,\n ConfigurationGrouping,\n ConfigurationMetadata,\n ConfigurationStorage,\n ExternalIntegrationLink,\n HasExternalIntegration,\n)\nfrom .monitor import CollectionMonitor\nfrom .selftest import HasSelfTests, SelfTestResult\nfrom .util.datetime_helpers import datetime_utc, to_utc, utc_now\nfrom .util.http import HTTP, BadResponseException\nfrom .util.opds_writer import OPDSFeed, OPDSMessage\nfrom .util.string_helpers import base64\nfrom .util.xmlparser import XMLParser\n\n\ndef parse_identifier(db, identifier):\n \"\"\"Parse the identifier and return an Identifier object representing it.\n\n :param db: Database session\n :type db: sqlalchemy.orm.session.Session\n\n :param identifier: String containing the identifier\n :type identifier: str\n\n :return: Identifier object\n :rtype: Optional[core.model.identifier.Identifier]\n \"\"\"\n parsed_identifier = None\n\n try:\n result = Identifier.parse_urn(db, identifier)\n\n if result is not None:\n parsed_identifier, _ = result\n except Exception:\n logging.error(\n f\"An unexpected exception occurred during parsing identifier {identifier}\"\n )\n\n return parsed_identifier\n\n\nclass ConnectionConfiguration(ConfigurationGrouping):\n max_retry_count = ConfigurationMetadata(\n key=\"max_retry_count\",\n label=_(\"Max retry count\"),\n description=_(\n \"The maximum number of times to retry a request for certain connection-related errors.\"\n ),\n type=ConfigurationAttributeType.NUMBER,\n required=False,\n default=3,\n )\n\n\nclass AccessNotAuthenticated(Exception):\n \"\"\"No authentication is configured for this service\"\"\"\n\n\nclass SimplifiedOPDSLookup(object):\n \"\"\"Tiny integration class for the Simplified 'lookup' protocol.\"\"\"\n\n LOOKUP_ENDPOINT = \"lookup\"\n\n @classmethod\n def check_content_type(cls, response):\n content_type = response.headers.get(\"content-type\")\n if content_type != OPDSFeed.ACQUISITION_FEED_TYPE:\n raise BadResponseException.from_response(\n response.url, \"Wrong media type: %s\" % content_type, response\n )\n\n @classmethod\n def from_protocol(\n cls, _db, protocol, goal=ExternalIntegration.LICENSE_GOAL, library=None\n ):\n integration = ExternalIntegration.lookup(_db, protocol, goal, library=library)\n if not integration or not integration.url:\n return None\n return cls(integration.url)\n\n def __init__(self, base_url):\n if not base_url.endswith(\"/\"):\n base_url += \"/\"\n self.base_url = base_url\n\n @property\n def lookup_endpoint(self):\n return self.LOOKUP_ENDPOINT\n\n def _get(self, url, **kwargs):\n \"\"\"Make an HTTP request. This method is overridden in the mock class.\"\"\"\n kwargs[\"timeout\"] = kwargs.get(\"timeout\", 300)\n kwargs[\"allowed_response_codes\"] = kwargs.get(\"allowed_response_codes\", [])\n kwargs[\"allowed_response_codes\"] += [\"2xx\", \"3xx\"]\n return HTTP.get_with_timeout(url, **kwargs)\n\n def urn_args(self, identifiers):\n return \"&\".join(set(\"urn=%s\" % i.urn for i in identifiers))\n\n def lookup(self, identifiers):\n \"\"\"Retrieve an OPDS feed with metadata for the given identifiers.\"\"\"\n args = self.urn_args(identifiers)\n url = self.base_url + self.lookup_endpoint + \"?\" + args\n logging.info(\"Lookup URL: %s\", url)\n return self._get(url)\n\n\nclass MetadataWranglerOPDSLookup(SimplifiedOPDSLookup, HasSelfTests):\n\n PROTOCOL = ExternalIntegration.METADATA_WRANGLER\n NAME = _(\"Library Simplified Metadata Wrangler\")\n CARDINALITY = 1\n\n SETTINGS = [\n {\n \"key\": ExternalIntegration.URL,\n \"label\": _(\"URL\"),\n \"default\": \"http://metadata.librarysimplified.org/\",\n \"required\": True,\n \"format\": \"url\",\n },\n ]\n\n SITEWIDE = True\n\n ADD_ENDPOINT = \"add\"\n ADD_WITH_METADATA_ENDPOINT = \"add_with_metadata\"\n METADATA_NEEDED_ENDPOINT = \"metadata_needed\"\n REMOVE_ENDPOINT = \"remove\"\n UPDATES_ENDPOINT = \"updates\"\n CANONICALIZE_ENDPOINT = \"canonical-author-name\"\n\n @classmethod\n def from_config(cls, _db, collection=None):\n integration = ExternalIntegration.lookup(\n _db,\n ExternalIntegration.METADATA_WRANGLER,\n ExternalIntegration.METADATA_GOAL,\n )\n\n if not integration:\n raise CannotLoadConfiguration(\n \"No ExternalIntegration found for the Metadata Wrangler.\"\n )\n\n if not integration.url:\n raise CannotLoadConfiguration(\"Metadata Wrangler improperly configured.\")\n\n return cls(\n integration.url, shared_secret=integration.password, collection=collection\n )\n\n @classmethod\n def external_integration(cls, _db):\n return ExternalIntegration.lookup(\n _db,\n ExternalIntegration.METADATA_WRANGLER,\n ExternalIntegration.METADATA_GOAL,\n )\n\n def _run_self_tests(self, _db, lookup_class=None):\n \"\"\"Run self-tests on every eligible Collection.\n\n :param _db: A database connection.\n :param lookup_class: Pass in a mock class to instantiate that\n class as needed instead of MetadataWranglerOPDSLookup.\n :return: A dictionary mapping Collection objects to lists of\n SelfTestResult objects.\n \"\"\"\n lookup_class = lookup_class or MetadataWranglerOPDSLookup\n results = dict()\n\n # Find all eligible Collections on the system, instantiate a\n # _new_ MetadataWranglerOPDSLookup for each, and call\n # its _run_collection_self_tests method.\n for c in _db.query(Collection):\n try:\n metadata_identifier = c.metadata_identifier\n except ValueError as e:\n continue\n\n lookup = lookup_class.from_config(_db, c)\n for i in lookup._run_collection_self_tests():\n yield i\n\n def _run_collection_self_tests(self):\n \"\"\"Run the self-test suite on the Collection associated with this\n MetadataWranglerOPDSLookup.\n \"\"\"\n if not self.collection:\n return\n metadata_identifier = None\n try:\n metadata_identifier = self.collection.metadata_identifier\n except ValueError as e:\n # This collection has no metadata identifier. It's\n # probably a \"Manual intervention\" collection. It cannot\n # interact with the metadata wrangler and there's no need\n # to test it.\n return\n\n # Check various endpoints that yield OPDS feeds.\n one_day_ago = utc_now() - datetime.timedelta(days=1)\n for title, m, args in (\n (\"Metadata updates in last 24 hours\", self.updates, [one_day_ago]),\n (\n \"Titles where we could (but haven't) provide information to the metadata wrangler\",\n self.metadata_needed,\n [],\n ),\n ):\n yield self._feed_self_test(title, m, *args)\n\n def _feed_self_test(self, name, method, *args):\n \"\"\"Retrieve a feed from the metadata wrangler and\n turn it into a SelfTestResult.\n \"\"\"\n result = SelfTestResult(name)\n result.collection = self.collection\n\n # If the server returns a 500 error we don't want to raise an\n # exception -- we want to record it as part of the test\n # result.\n kwargs = dict(allowed_response_codes=[\"%sxx\" % f for f in range(1, 6)])\n\n response = method(*args, **kwargs)\n self._annotate_feed_response(result, response)\n\n # We're all done.\n result.end = utc_now()\n return result\n\n @classmethod\n def _annotate_feed_response(cls, result, response):\n \"\"\"Parse an OPDS feed and annotate a SelfTestResult with some\n information about it:\n\n * How the feed was requested.\n * What the response code was.\n * The number of items on the first page.\n * The title of each item on the page, if any.\n * The total number of items in the feed, if available.\n\n :param result: A SelfTestResult object.\n :param response: A requests Response object.\n \"\"\"\n lines = []\n lines.append(\"Request URL: %s\" % response.url)\n lines.append(\n \"Request authorization: %s\" % response.request.headers.get(\"Authorization\")\n )\n lines.append(\"Status code: %d\" % response.status_code)\n result.success = response.status_code == 200\n if result.success:\n feed = feedparser.parse(response.content)\n total_results = feed[\"feed\"].get(\"opensearch_totalresults\")\n if total_results is not None:\n lines.append(\n \"Total identifiers registered with this collection: %s\"\n % (total_results)\n )\n lines.append(\"Entries on this page: %d\" % len(feed[\"entries\"]))\n for i in feed[\"entries\"]:\n lines.append(\" \" + i[\"title\"])\n result.result = lines\n\n def __init__(self, url, shared_secret=None, collection=None):\n super(MetadataWranglerOPDSLookup, self).__init__(url)\n self.shared_secret = shared_secret\n self.collection = collection\n\n @property\n def authenticated(self):\n return bool(self.shared_secret)\n\n @property\n def authorization(self):\n if self.authenticated:\n token = \"Bearer \" + base64.b64encode(self.shared_secret)\n return {\"Authorization\": token}\n return None\n\n @property\n def lookup_endpoint(self):\n if not (self.authenticated and self.collection):\n return self.LOOKUP_ENDPOINT\n return self.collection.metadata_identifier + \"/\" + self.LOOKUP_ENDPOINT\n\n def _get(self, url, **kwargs):\n if self.authenticated:\n headers = kwargs.get(\"headers\", {})\n headers.update(self.authorization)\n kwargs[\"headers\"] = headers\n return super(MetadataWranglerOPDSLookup, self)._get(url, **kwargs)\n\n def _post(self, url, data=\"\", **kwargs):\n \"\"\"Make an HTTP request. This method is overridden in the mock class.\"\"\"\n if self.authenticated:\n headers = kwargs.get(\"headers\", {})\n headers.update(self.authorization)\n kwargs[\"headers\"] = headers\n kwargs[\"timeout\"] = kwargs.get(\"timeout\", 120)\n kwargs[\"allowed_response_codes\"] = kwargs.get(\"allowed_response_codes\", [])\n kwargs[\"allowed_response_codes\"] += [\"2xx\", \"3xx\"]\n return HTTP.post_with_timeout(url, data, **kwargs)\n\n def add_args(self, url, arg_string):\n joiner = \"?\"\n if joiner in url:\n # This URL already has an argument (namely: data_source), so\n # append the new arguments.\n joiner = \"&\"\n return url + joiner + arg_string\n\n def get_collection_url(self, endpoint):\n if not self.authenticated:\n raise AccessNotAuthenticated(\"Metadata Wrangler access not authenticated.\")\n if not self.collection:\n raise ValueError(\"No Collection provided.\")\n\n data_source = \"\"\n if self.collection.protocol == ExternalIntegration.OPDS_IMPORT:\n # Open access OPDS_IMPORT collections need to send a DataSource to\n # allow OPDS lookups on the Metadata Wrangler.\n data_source = \"?data_source=\" + quote(self.collection.data_source.name)\n\n return (\n self.base_url\n + self.collection.metadata_identifier\n + \"/\"\n + endpoint\n + data_source\n )\n\n def add(self, identifiers):\n \"\"\"Add items to an authenticated Metadata Wrangler Collection\"\"\"\n add_url = self.get_collection_url(self.ADD_ENDPOINT)\n url = self.add_args(add_url, self.urn_args(identifiers))\n\n logging.info(\"Metadata Wrangler Collection Addition URL: %s\", url)\n return self._post(url)\n\n def add_with_metadata(self, feed):\n \"\"\"Add a feed of items with metadata to an authenticated Metadata Wrangler Collection.\"\"\"\n add_with_metadata_url = self.get_collection_url(self.ADD_WITH_METADATA_ENDPOINT)\n return self._post(add_with_metadata_url, str(feed))\n\n def metadata_needed(self, **kwargs):\n \"\"\"Get a feed of items that need additional metadata to be processed\n by the Metadata Wrangler.\n \"\"\"\n metadata_needed_url = self.get_collection_url(self.METADATA_NEEDED_ENDPOINT)\n return self._get(metadata_needed_url, **kwargs)\n\n def remove(self, identifiers):\n \"\"\"Remove items from an authenticated Metadata Wrangler Collection\"\"\"\n remove_url = self.get_collection_url(self.REMOVE_ENDPOINT)\n url = self.add_args(remove_url, self.urn_args(identifiers))\n\n logging.info(\"Metadata Wrangler Collection Removal URL: %s\", url)\n return self._post(url)\n\n def updates(self, last_update_time, **kwargs):\n \"\"\"Retrieve updated items from an authenticated Metadata\n Wrangler Collection\n\n :param last_update_time: DateTime representing the last time\n an update was fetched. May be None.\n \"\"\"\n url = self.get_collection_url(self.UPDATES_ENDPOINT)\n if last_update_time:\n formatted_time = last_update_time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n url = self.add_args(url, (\"last_update_time=\" + formatted_time))\n logging.info(\"Metadata Wrangler Collection Updates URL: %s\", url)\n return self._get(url, **kwargs)\n\n def canonicalize_author_name(self, identifier, working_display_name):\n \"\"\"Attempt to find the canonical name for the author of a book.\n\n :param identifier: an ISBN-type Identifier.\n\n :param working_display_name: The display name of the author\n (i.e. the name format human being used as opposed to the name\n that goes into library records).\n \"\"\"\n args = \"display_name=%s\" % (quote(working_display_name.encode(\"utf8\")))\n if identifier:\n args += \"&urn=%s\" % quote(identifier.urn)\n url = self.base_url + self.CANONICALIZE_ENDPOINT + \"?\" + args\n logging.info(\"GET %s\", url)\n return self._get(url)\n\n\nclass MockSimplifiedOPDSLookup(SimplifiedOPDSLookup):\n def __init__(self, *args, **kwargs):\n self.requests = []\n self.responses = []\n super(MockSimplifiedOPDSLookup, self).__init__(*args, **kwargs)\n\n def queue_response(self, status_code, headers={}, content=None):\n from .testing import MockRequestsResponse\n\n self.responses.insert(0, MockRequestsResponse(status_code, headers, content))\n\n def _get(self, url, *args, **kwargs):\n self.requests.append((url, args, kwargs))\n response = self.responses.pop()\n return HTTP._process_response(\n url,\n response,\n kwargs.get(\"allowed_response_codes\"),\n kwargs.get(\"disallowed_response_codes\"),\n )\n\n\nclass MockMetadataWranglerOPDSLookup(\n MockSimplifiedOPDSLookup, MetadataWranglerOPDSLookup\n):\n def _post(self, url, *args, **kwargs):\n self.requests.append((url, args, kwargs))\n response = self.responses.pop()\n return HTTP._process_response(\n url,\n response,\n kwargs.get(\"allowed_response_codes\"),\n kwargs.get(\"disallowed_response_codes\"),\n )\n\n\nclass OPDSXMLParser(XMLParser):\n\n NAMESPACES = {\n \"simplified\": \"http://librarysimplified.org/terms/\",\n \"app\": \"http://www.w3.org/2007/app\",\n \"dcterms\": \"http://purl.org/dc/terms/\",\n \"dc\": \"http://purl.org/dc/elements/1.1/\",\n \"opds\": \"http://opds-spec.org/2010/catalog\",\n \"schema\": \"http://schema.org/\",\n \"atom\": \"http://www.w3.org/2005/Atom\",\n \"drm\": \"http://librarysimplified.org/terms/drm\",\n }\n\n\nclass OPDSImporter(object):\n \"\"\"Imports editions and license pools from an OPDS feed.\n Creates Edition, LicensePool and Work rows in the database, if those\n don't already exist.\n\n Should be used when a circulation server asks for data from\n our internal content server, and also when our content server asks for data\n from external content servers.\n \"\"\"\n\n COULD_NOT_CREATE_LICENSE_POOL = (\n \"No existing license pool for this identifier and no way of creating one.\"\n )\n\n NAME = ExternalIntegration.OPDS_IMPORT\n DESCRIPTION = _(\"Import books from a publicly-accessible OPDS feed.\")\n\n NO_DEFAULT_AUDIENCE = \"\"\n\n # These settings are used by all OPDS-derived import methods.\n BASE_SETTINGS = [\n {\n \"key\": Collection.EXTERNAL_ACCOUNT_ID_KEY,\n \"label\": _(\"URL\"),\n \"required\": True,\n \"format\": \"url\",\n },\n {\n \"key\": Collection.DATA_SOURCE_NAME_SETTING,\n \"label\": _(\"Data source name\"),\n \"required\": True,\n },\n {\n \"key\": Collection.DEFAULT_AUDIENCE_KEY,\n \"label\": _(\"Default audience\"),\n \"description\": _(\n \"If the vendor does not specify the target audience for their books, \"\n \"assume the books have this target audience.\"\n ),\n \"type\": \"select\",\n \"format\": \"narrow\",\n \"options\": [{\"key\": NO_DEFAULT_AUDIENCE, \"label\": _(\"No default audience\")}]\n + [\n {\"key\": audience, \"label\": audience}\n for audience in sorted(Classifier.AUDIENCES)\n ],\n \"default\": NO_DEFAULT_AUDIENCE,\n \"required\": False,\n \"readOnly\": True,\n },\n ]\n\n # These settings are used by 'regular' OPDS but not by OPDS For\n # Distributors, which has its own way of doing authentication.\n SETTINGS = BASE_SETTINGS + [\n {\n \"key\": ExternalIntegration.USERNAME,\n \"label\": _(\"Username\"),\n \"description\": _(\n \"If HTTP Basic authentication is required to access the OPDS feed (it usually isn't), enter the username here.\"\n ),\n },\n {\n \"key\": ExternalIntegration.PASSWORD,\n \"label\": _(\"Password\"),\n \"description\": _(\n \"If HTTP Basic authentication is required to access the OPDS feed (it usually isn't), enter the password here.\"\n ),\n },\n {\n \"key\": ExternalIntegration.CUSTOM_ACCEPT_HEADER,\n \"label\": _(\"Custom accept header\"),\n \"required\": False,\n \"description\": _(\n \"Some servers expect an accept header to decide which file to send. You can use */* if the server doesn't expect anything.\"\n ),\n \"default\": \",\".join(\n [\n OPDSFeed.ACQUISITION_FEED_TYPE,\n \"application/atom+xml;q=0.9\",\n \"application/xml;q=0.8\",\n \"*/*;q=0.1\",\n ]\n ),\n },\n {\n \"key\": ExternalIntegration.PRIMARY_IDENTIFIER_SOURCE,\n \"label\": _(\"Identifer\"),\n \"required\": False,\n \"description\": _(\"Which book identifier to use as ID.\"),\n \"type\": \"select\",\n \"options\": [\n {\"key\": \"\", \"label\": _(\"(Default) Use <id>\")},\n {\n \"key\": ExternalIntegration.DCTERMS_IDENTIFIER,\n \"label\": _(\"Use <dcterms:identifier> first, if not exist use <id>\"),\n },\n ],\n },\n ]\n\n # Subclasses of OPDSImporter may define a different parser class that's\n # a subclass of OPDSXMLParser. For example, a subclass may want to use\n # tags from an additional namespace.\n PARSER_CLASS = OPDSXMLParser\n\n # Subclasses of OPDSImporter may define a list of status codes\n # that should be treated as indicating success, rather than failure,\n # when they show up in <simplified:message> tags.\n SUCCESS_STATUS_CODES = None\n\n def __init__(\n self,\n _db,\n collection,\n data_source_name=None,\n identifier_mapping=None,\n http_get=None,\n metadata_client=None,\n content_modifier=None,\n map_from_collection=None,\n mirrors=None,\n ):\n \"\"\":param collection: LicensePools created by this OPDS import\n will be associated with the given Collection. If this is None,\n no LicensePools will be created -- only Editions.\n\n :param data_source_name: Name of the source of this OPDS feed.\n All Editions created by this import will be associated with\n this DataSource. If there is no DataSource with this name, one\n will be created. NOTE: If `collection` is provided, its\n .data_source will take precedence over any value provided\n here. This is only for use when you are importing OPDS\n metadata without any particular Collection in mind.\n\n :param mirrors: A dictionary of different MirrorUploader objects for\n different purposes.\n\n :param http_get: Use this method to make an HTTP GET request. This\n can be replaced with a stub method for testing purposes.\n\n :param metadata_client: A SimplifiedOPDSLookup object that is used\n to fill in missing metadata.\n\n :param content_modifier: A function that may modify-in-place\n representations (such as images and EPUB documents) as they\n come in from the network.\n\n :param map_from_collection\n\n :param mirrors\n \"\"\"\n self._db = _db\n self.log = logging.getLogger(\"OPDS Importer\")\n self._collection_id = collection.id if collection else None\n if self.collection and not data_source_name:\n # Use the Collection data_source for OPDS import.\n data_source = self.collection.data_source\n if data_source:\n data_source_name = data_source.name\n else:\n raise ValueError(\n \"Cannot perform an OPDS import on a Collection that has no associated DataSource!\"\n )\n else:\n # Use the given data_source or default to the Metadata\n # Wrangler.\n data_source_name = data_source_name or DataSource.METADATA_WRANGLER\n self.data_source_name = data_source_name\n self.identifier_mapping = identifier_mapping\n try:\n self.metadata_client = (\n metadata_client\n or MetadataWranglerOPDSLookup.from_config(_db, collection=collection)\n )\n except CannotLoadConfiguration:\n # The Metadata Wrangler isn't configured, but we can import without it.\n self.log.warning(\n \"Metadata Wrangler integration couldn't be loaded, importing without it.\"\n )\n self.metadata_client = None\n\n # Check to see if a mirror for each purpose was passed in.\n # If not, then attempt to create one.\n covers_mirror = (\n mirrors.get(ExternalIntegrationLink.COVERS, None) if mirrors else None\n )\n books_mirror = (\n mirrors.get(ExternalIntegrationLink.OPEN_ACCESS_BOOKS, None)\n if mirrors\n else None\n )\n self.primary_identifier_source = None\n if collection:\n if not covers_mirror:\n # If this Collection is configured to mirror the assets it\n # discovers, this will create a MirrorUploader for that\n # Collection for its purpose. Otherwise, this will return None.\n covers_mirror = MirrorUploader.for_collection(\n collection, ExternalIntegrationLink.COVERS\n )\n if not books_mirror:\n books_mirror = MirrorUploader.for_collection(\n collection, ExternalIntegrationLink.OPEN_ACCESS_BOOKS\n )\n self.primary_identifier_source = collection.primary_identifier_source\n\n self.mirrors = dict(covers_mirror=covers_mirror, books_mirror=books_mirror)\n self.content_modifier = content_modifier\n\n # In general, we are cautious when mirroring resources so that\n # we don't, e.g. accidentally get our IP banned from\n # gutenberg.org.\n self.http_get = http_get or Representation.cautious_http_get\n self.map_from_collection = map_from_collection\n\n @property\n def collection(self):\n \"\"\"Returns an associated Collection object\n\n :return: Associated Collection object\n :rtype: Optional[Collection]\n \"\"\"\n if self._collection_id:\n return Collection.by_id(self._db, id=self._collection_id)\n\n return None\n\n @property\n def data_source(self):\n \"\"\"Look up or create a DataSource object representing the\n source of this OPDS feed.\n \"\"\"\n offers_licenses = self.collection is not None\n return DataSource.lookup(\n self._db,\n self.data_source_name,\n autocreate=True,\n offers_licenses=offers_licenses,\n )\n\n def assert_importable_content(self, feed, feed_url, max_get_attempts=5):\n \"\"\"Raise an exception if the given feed contains nothing that can,\n even theoretically, be turned into a LicensePool.\n\n By default, this means the feed must link to open-access content\n that can actually be retrieved.\n \"\"\"\n metadata, failures = self.extract_feed_data(feed, feed_url)\n get_attempts = 0\n\n # Find an open-access link, and try to GET it just to make\n # sure OPDS feed isn't hiding non-open-access stuff behind an\n # open-access link.\n #\n # To avoid taking forever or antagonizing API providers, we'll\n # give up after `max_get_attempts` failures.\n for link in self._open_access_links(list(metadata.values())):\n url = link.href\n success = self._is_open_access_link(url, link.media_type)\n if success:\n return success\n get_attempts += 1\n if get_attempts >= max_get_attempts:\n error = (\n \"Was unable to GET supposedly open-access content such as %s (tried %s times)\"\n % (url, get_attempts)\n )\n explanation = \"This might be an OPDS For Distributors feed, or it might require different authentication credentials.\"\n raise IntegrationException(error, explanation)\n\n raise IntegrationException(\n \"No open-access links were found in the OPDS feed.\",\n \"This might be an OPDS for Distributors feed.\",\n )\n\n @classmethod\n def _open_access_links(cls, metadatas):\n \"\"\"Find all open-access links in a list of Metadata objects.\n\n :param metadatas: A list of Metadata objects.\n :yield: A sequence of `LinkData` objects.\n \"\"\"\n for item in metadatas:\n if not item.circulation:\n continue\n for link in item.circulation.links:\n if link.rel == Hyperlink.OPEN_ACCESS_DOWNLOAD:\n yield link\n\n def _is_open_access_link(self, url, type):\n \"\"\"Is `url` really an open-access link?\n\n That is, can we make a normal GET request and get something\n that looks like a book?\n \"\"\"\n headers = {}\n if type:\n headers[\"Accept\"] = type\n status, headers, body = self.http_get(url, headers=headers)\n if status == 200 and len(body) > 1024 * 10:\n # We could also check the media types, but this is good\n # enough for now.\n return \"Found a book-like thing at %s\" % url\n self.log.error(\n \"Supposedly open-access link %s didn't give us a book. Status=%s, body length=%s\",\n url,\n status,\n len(body),\n )\n return False\n\n def _parse_identifier(self, identifier):\n \"\"\"Parse the identifier and return an Identifier object representing it.\n\n :param identifier: String containing the identifier\n :type identifier: str\n\n :return: Identifier object\n :rtype: Identifier\n \"\"\"\n return parse_identifier(self._db, identifier)\n\n def import_from_feed(self, feed, feed_url=None):\n\n # Keep track of editions that were imported. Pools and works\n # for those editions may be looked up or created.\n imported_editions = {}\n pools = {}\n works = {}\n # CoverageFailures that note business logic errors and non-success download statuses\n failures = {}\n\n # If parsing the overall feed throws an exception, we should address that before\n # moving on. Let the exception propagate.\n metadata_objs, failures = self.extract_feed_data(feed, feed_url)\n # make editions. if have problem, make sure associated pool and work aren't created.\n for key, metadata in metadata_objs.items():\n # key is identifier.urn here\n\n # If there's a status message about this item, don't try to import it.\n if key in list(failures.keys()):\n continue\n\n try:\n # Create an edition. This will also create a pool if there's circulation data.\n edition = self.import_edition_from_metadata(metadata)\n if edition:\n imported_editions[key] = edition\n except Exception as e:\n # Rather than scratch the whole import, treat this as a failure that only applies\n # to this item.\n self.log.error(\"Error importing an OPDS item\", exc_info=e)\n identifier, ignore = Identifier.parse_urn(self._db, key)\n data_source = self.data_source\n failure = CoverageFailure(\n identifier,\n traceback.format_exc(),\n data_source=data_source,\n transient=False,\n )\n failures[key] = failure\n # clean up any edition might have created\n if key in imported_editions:\n del imported_editions[key]\n # Move on to the next item, don't create a work.\n continue\n\n try:\n pool, work = self.update_work_for_edition(edition)\n if pool:\n pools[key] = pool\n if work:\n works[key] = work\n except Exception as e:\n identifier, ignore = Identifier.parse_urn(self._db, key)\n data_source = self.data_source\n failure = CoverageFailure(\n identifier,\n traceback.format_exc(),\n data_source=data_source,\n transient=False,\n )\n failures[key] = failure\n\n return (\n list(imported_editions.values()),\n list(pools.values()),\n list(works.values()),\n failures,\n )\n\n def import_edition_from_metadata(self, metadata):\n \"\"\"For the passed-in Metadata object, see if can find or create an Edition\n in the database. Also create a LicensePool if the Metadata has\n CirculationData in it.\n \"\"\"\n # Locate or create an Edition for this book.\n edition, is_new_edition = metadata.edition(self._db)\n\n policy = ReplacementPolicy(\n subjects=True,\n links=True,\n contributions=True,\n rights=True,\n link_content=True,\n formats=True,\n even_if_not_apparently_updated=True,\n mirrors=self.mirrors,\n content_modifier=self.content_modifier,\n http_get=self.http_get,\n )\n metadata.apply(\n edition=edition,\n collection=self.collection,\n metadata_client=self.metadata_client,\n replace=policy,\n )\n\n return edition\n\n def update_work_for_edition(self, edition):\n \"\"\"If possible, ensure that there is a presentation-ready Work for the\n given edition's primary identifier.\n \"\"\"\n work = None\n\n # Find a LicensePool for the primary identifier. Any LicensePool will\n # do--the collection doesn't have to match, since all\n # LicensePools for a given identifier have the same Work.\n #\n # If we have CirculationData, a pool was created when we\n # imported the edition. If there was already a pool from a\n # different data source or a different collection, that's fine\n # too.\n pool = get_one(\n self._db,\n LicensePool,\n identifier=edition.primary_identifier,\n on_multiple=\"interchangeable\",\n )\n\n if pool:\n if not pool.work or not pool.work.presentation_ready:\n # There is no presentation-ready Work for this\n # LicensePool. Try to create one.\n work, ignore = pool.calculate_work()\n else:\n # There is a presentation-ready Work for this LicensePool.\n # Use it.\n work = pool.work\n\n # If a presentation-ready Work already exists, there's no\n # rush. We might have new metadata that will change the Work's\n # presentation, but when we called Metadata.apply() the work\n # was set up to have its presentation recalculated in the\n # background, and that's good enough.\n return pool, work\n\n @classmethod\n def extract_next_links(self, feed):\n if isinstance(feed, (bytes, str)):\n parsed = feedparser.parse(feed)\n else:\n parsed = feed\n feed = parsed[\"feed\"]\n next_links = []\n if feed and \"links\" in feed:\n next_links = [\n link[\"href\"] for link in feed[\"links\"] if link[\"rel\"] == \"next\"\n ]\n return next_links\n\n def extract_last_update_dates(self, feed):\n if isinstance(feed, (bytes, str)):\n parsed_feed = feedparser.parse(feed)\n else:\n parsed_feed = feed\n dates = [\n self.last_update_date_for_feedparser_entry(entry)\n for entry in parsed_feed[\"entries\"]\n ]\n return [x for x in dates if x and x[1]]\n\n def build_identifier_mapping(self, external_urns):\n \"\"\"Uses the given Collection and a list of URNs to reverse\n engineer an identifier mapping.\n\n NOTE: It would be better if .identifier_mapping weren't\n instance data, since a single OPDSImporter might import\n multiple pages of a feed. However, the code as written should\n work.\n \"\"\"\n if not self.collection:\n return\n\n mapping = dict()\n identifiers_by_urn, failures = Identifier.parse_urns(\n self._db, external_urns, autocreate=False\n )\n external_identifiers = list(identifiers_by_urn.values())\n\n internal_identifier = aliased(Identifier)\n qu = (\n self._db.query(Identifier, internal_identifier)\n .join(Identifier.inbound_equivalencies)\n .join(internal_identifier, Equivalency.input)\n .join(internal_identifier.licensed_through)\n .filter(\n Identifier.id.in_([x.id for x in external_identifiers]),\n LicensePool.collection == self.collection,\n )\n )\n\n for external_identifier, internal_identifier in qu:\n mapping[external_identifier] = internal_identifier\n\n self.identifier_mapping = mapping\n\n def extract_feed_data(self, feed, feed_url=None):\n \"\"\"Turn an OPDS feed into lists of Metadata and CirculationData objects,\n with associated messages and next_links.\n \"\"\"\n data_source = self.data_source\n fp_metadata, fp_failures = self.extract_data_from_feedparser(\n feed=feed, data_source=data_source\n )\n # gets: medium, measurements, links, contributors, etc.\n xml_data_meta, xml_failures = self.extract_metadata_from_elementtree(\n feed, data_source=data_source, feed_url=feed_url, do_get=self.http_get\n )\n\n if self.map_from_collection:\n # Build the identifier_mapping based on the Collection.\n self.build_identifier_mapping(\n list(fp_metadata.keys()) + list(fp_failures.keys())\n )\n\n # translate the id in failures to identifier.urn\n identified_failures = {}\n for urn, failure in list(fp_failures.items()) + list(xml_failures.items()):\n identifier, failure = self.handle_failure(urn, failure)\n identified_failures[identifier.urn] = failure\n\n # Use one loop for both, since the id will be the same for both dictionaries.\n metadata = {}\n circulationdata = {}\n for id, m_data_dict in list(fp_metadata.items()):\n xml_data_dict = xml_data_meta.get(id, {})\n\n external_identifier = None\n if self.primary_identifier_source == ExternalIntegration.DCTERMS_IDENTIFIER:\n # If it should use <dcterms:identifier> as the primary identifier, it must use the\n # first value from the dcterms identifier, that came from the metadata as an\n # IdentifierData object and it must be validated as a foreign_id before be used\n # as and external_identifier.\n dcterms_ids = xml_data_dict.get(\"dcterms_identifiers\", [])\n if len(dcterms_ids) > 0:\n external_identifier, ignore = Identifier.for_foreign_id(\n self._db, dcterms_ids[0].type, dcterms_ids[0].identifier\n )\n # the external identifier will be add later, so it must be removed at this point\n new_identifiers = dcterms_ids[1:]\n # Id must be in the identifiers with lower weight.\n id_type, id_identifier = Identifier.type_and_identifier_for_urn(id)\n id_weight = 1\n new_identifiers.append(\n IdentifierData(id_type, id_identifier, id_weight)\n )\n xml_data_dict[\"identifiers\"] = new_identifiers\n\n if external_identifier is None:\n external_identifier, ignore = Identifier.parse_urn(self._db, id)\n\n if self.identifier_mapping:\n internal_identifier = self.identifier_mapping.get(\n external_identifier, external_identifier\n )\n else:\n internal_identifier = external_identifier\n\n # Don't process this item if there was already an error\n if internal_identifier.urn in list(identified_failures.keys()):\n continue\n\n identifier_obj = IdentifierData(\n type=internal_identifier.type, identifier=internal_identifier.identifier\n )\n\n # form the Metadata object\n combined_meta = self.combine(m_data_dict, xml_data_dict)\n if combined_meta.get(\"data_source\") is None:\n combined_meta[\"data_source\"] = self.data_source_name\n\n combined_meta[\"primary_identifier\"] = identifier_obj\n\n metadata[internal_identifier.urn] = Metadata(**combined_meta)\n\n # Form the CirculationData that would correspond to this Metadata,\n # assuming there is a Collection to hold the LicensePool that\n # would result.\n c_data_dict = None\n if self.collection:\n c_circulation_dict = m_data_dict.get(\"circulation\")\n xml_circulation_dict = xml_data_dict.get(\"circulation\", {})\n c_data_dict = self.combine(c_circulation_dict, xml_circulation_dict)\n\n # Unless there's something useful in c_data_dict, we're\n # not going to put anything under metadata.circulation,\n # and any partial data that got added to\n # metadata.circulation is going to be removed.\n metadata[internal_identifier.urn].circulation = None\n if c_data_dict:\n circ_links_dict = {}\n # extract just the links to pass to CirculationData constructor\n if \"links\" in xml_data_dict:\n circ_links_dict[\"links\"] = xml_data_dict[\"links\"]\n combined_circ = self.combine(c_data_dict, circ_links_dict)\n if combined_circ.get(\"data_source\") is None:\n combined_circ[\"data_source\"] = self.data_source_name\n\n combined_circ[\"primary_identifier\"] = identifier_obj\n circulation = CirculationData(**combined_circ)\n\n self._add_format_data(circulation)\n\n if circulation.formats:\n metadata[internal_identifier.urn].circulation = circulation\n else:\n # If the CirculationData has no formats, it\n # doesn't really offer any way to actually get the\n # book, and we don't want to create a\n # LicensePool. All the circulation data is\n # useless.\n #\n # TODO: This will need to be revisited when we add\n # ODL support.\n pass\n return metadata, identified_failures\n\n def handle_failure(self, urn, failure):\n \"\"\"Convert a URN and a failure message that came in through\n an OPDS feed into an Identifier and a CoverageFailure object.\n\n The Identifier may not be the one designated by `urn` (if it's\n found in self.identifier_mapping) and the 'failure' may turn out not\n to be a CoverageFailure at all -- if it's an Identifier, that means\n that what a normal OPDSImporter would consider 'failure' is\n considered success.\n \"\"\"\n external_identifier, ignore = Identifier.parse_urn(self._db, urn)\n if self.identifier_mapping:\n # The identifier found in the OPDS feed is different from\n # the identifier we want to export.\n internal_identifier = self.identifier_mapping.get(\n external_identifier, external_identifier\n )\n else:\n internal_identifier = external_identifier\n if isinstance(failure, Identifier):\n # The OPDSImporter does not actually consider this a\n # failure. Signal success by returning the internal\n # identifier as the 'failure' object.\n failure = internal_identifier\n else:\n # This really is a failure. Associate the internal\n # identifier with the CoverageFailure object.\n failure.obj = internal_identifier\n return internal_identifier, failure\n\n @classmethod\n def _add_format_data(cls, circulation):\n \"\"\"Subclasses that specialize OPDS Import can implement this\n method to add formats to a CirculationData object with\n information that allows a patron to actually get a book\n that's not open access.\n \"\"\"\n\n @classmethod\n def combine(self, d1, d2):\n \"\"\"Combine two dictionaries that can be used as keyword arguments to\n the Metadata constructor.\n \"\"\"\n if not d1 and not d2:\n return dict()\n if not d1:\n return dict(d2)\n if not d2:\n return dict(d1)\n new_dict = dict(d1)\n for k, v in list(d2.items()):\n if k not in new_dict:\n # There is no value from d1. Even if the d2 value\n # is None, we want to set it.\n new_dict[k] = v\n elif v != None:\n # d1 provided a value, and d2 provided a value other\n # than None.\n if isinstance(v, list):\n # The values are lists. Merge them.\n new_dict[k].extend(v)\n elif isinstance(v, dict):\n # The values are dicts. Merge them by with\n # a recursive combine() call.\n new_dict[k] = self.combine(new_dict[k], v)\n else:\n # Overwrite d1's value with d2's value.\n new_dict[k] = v\n else:\n # d1 provided a value and d2 provided None. Do\n # nothing.\n pass\n return new_dict\n\n def extract_data_from_feedparser(self, feed, data_source):\n feedparser_parsed = feedparser.parse(feed)\n values = {}\n failures = {}\n for entry in feedparser_parsed[\"entries\"]:\n identifier, detail, failure = self.data_detail_for_feedparser_entry(\n entry=entry, data_source=data_source\n )\n\n if identifier:\n if failure:\n failures[identifier] = failure\n else:\n if detail:\n values[identifier] = detail\n else:\n # That's bad. Can't make an item-specific error message, but write to\n # log that something very wrong happened.\n logging.error(\n \"Tried to parse an element without a valid identifier. feed=%s\"\n % feed\n )\n return values, failures\n\n @classmethod\n def extract_metadata_from_elementtree(\n cls, feed, data_source, feed_url=None, do_get=None\n ):\n \"\"\"Parse the OPDS as XML and extract all author and subject\n information, as well as ratings and medium.\n\n All the stuff that Feedparser can't handle so we have to use lxml.\n\n :return: a dictionary mapping IDs to dictionaries. The inner\n dictionary can be used as keyword arguments to the Metadata\n constructor.\n \"\"\"\n values = {}\n failures = {}\n parser = cls.PARSER_CLASS()\n if isinstance(feed, bytes):\n inp = BytesIO(feed)\n else:\n inp = BytesIO(feed.encode(\"utf-8\"))\n root = etree.parse(inp)\n\n # Some OPDS feeds (eg Standard Ebooks) contain relative urls,\n # so we need the feed's self URL to extract links. If none was\n # passed in, we still might be able to guess.\n #\n # TODO: Section 2 of RFC 4287 says we should check xml:base\n # for this, so if anyone actually uses that we'll get around\n # to checking it.\n if not feed_url:\n links = [child.attrib for child in root.getroot() if \"link\" in child.tag]\n self_links = [link[\"href\"] for link in links if link.get(\"rel\") == \"self\"]\n if self_links:\n feed_url = self_links[0]\n\n # First, turn Simplified <message> tags into CoverageFailure\n # objects.\n for failure in cls.coveragefailures_from_messages(data_source, parser, root):\n if isinstance(failure, Identifier):\n # The Simplified <message> tag does not actually\n # represent a failure -- it was turned into an\n # Identifier instead of a CoverageFailure.\n urn = failure.urn\n else:\n urn = failure.obj.urn\n failures[urn] = failure\n\n # Then turn Atom <entry> tags into Metadata objects.\n for entry in parser._xpath(root, \"/atom:feed/atom:entry\"):\n identifier, detail, failure = cls.detail_for_elementtree_entry(\n parser, entry, data_source, feed_url, do_get=do_get\n )\n if identifier:\n if failure:\n failures[identifier] = failure\n if detail:\n values[identifier] = detail\n return values, failures\n\n @classmethod\n def _datetime(cls, entry, key):\n value = entry.get(key, None)\n if not value:\n return value\n return datetime_utc(*value[:6])\n\n def last_update_date_for_feedparser_entry(self, entry):\n identifier = entry.get(\"id\")\n updated = self._datetime(entry, \"updated_parsed\")\n return (identifier, updated)\n\n @classmethod\n def data_detail_for_feedparser_entry(cls, entry, data_source):\n \"\"\"Turn an entry dictionary created by feedparser into dictionaries of data\n that can be used as keyword arguments to the Metadata and CirculationData constructors.\n\n :return: A 3-tuple (identifier, kwargs for Metadata constructor, failure)\n \"\"\"\n identifier = entry.get(\"id\")\n if not identifier:\n return None, None, None\n\n # At this point we can assume that we successfully got some\n # metadata, and possibly a link to the actual book.\n try:\n kwargs_meta = cls._data_detail_for_feedparser_entry(entry, data_source)\n return identifier, kwargs_meta, None\n except Exception as e:\n _db = Session.object_session(data_source)\n identifier_obj, ignore = Identifier.parse_urn(_db, identifier)\n failure = CoverageFailure(\n identifier_obj, traceback.format_exc(), data_source, transient=True\n )\n return identifier, None, failure\n\n @classmethod\n def _data_detail_for_feedparser_entry(cls, entry, metadata_data_source):\n \"\"\"Helper method that extracts metadata and circulation data from a feedparser\n entry. This method can be overridden in tests to check that callers handle things\n properly when it throws an exception.\n \"\"\"\n title = entry.get(\"title\", None)\n if title == OPDSFeed.NO_TITLE:\n title = None\n subtitle = entry.get(\"schema_alternativeheadline\", None)\n\n # Generally speaking, a data source will provide either\n # metadata (e.g. the Simplified metadata wrangler) or both\n # metadata and circulation data (e.g. a publisher's ODL feed).\n #\n # However there is at least one case (the Simplified\n # open-access content server) where one server provides\n # circulation data from a _different_ data source\n # (e.g. Project Gutenberg).\n #\n # In this case we want the data source of the LicensePool to\n # be Project Gutenberg, but the data source of the pool's\n # presentation to be the open-access content server.\n #\n # The open-access content server uses a\n # <bibframe:distribution> tag to keep track of which data\n # source provides the circulation data.\n circulation_data_source = metadata_data_source\n circulation_data_source_tag = entry.get(\"bibframe_distribution\")\n if circulation_data_source_tag:\n circulation_data_source_name = circulation_data_source_tag.get(\n \"bibframe:providername\"\n )\n if circulation_data_source_name:\n _db = Session.object_session(metadata_data_source)\n # We know this data source offers licenses because\n # that's what the <bibframe:distribution> is there\n # to say.\n circulation_data_source = DataSource.lookup(\n _db,\n circulation_data_source_name,\n autocreate=True,\n offers_licenses=True,\n )\n if not circulation_data_source:\n raise ValueError(\n \"Unrecognized circulation data source: %s\"\n % (circulation_data_source_name)\n )\n last_opds_update = cls._datetime(entry, \"updated_parsed\")\n\n publisher = entry.get(\"publisher\", None)\n if not publisher:\n publisher = entry.get(\"dcterms_publisher\", None)\n\n language = entry.get(\"language\", None)\n if not language:\n language = entry.get(\"dcterms_language\", None)\n\n links = []\n\n def summary_to_linkdata(detail):\n if not detail:\n return None\n if not \"value\" in detail or not detail[\"value\"]:\n return None\n\n content = detail[\"value\"]\n media_type = detail.get(\"type\", \"text/plain\")\n return cls.make_link_data(\n rel=Hyperlink.DESCRIPTION, media_type=media_type, content=content\n )\n\n summary_detail = entry.get(\"summary_detail\", None)\n link = summary_to_linkdata(summary_detail)\n if link:\n links.append(link)\n\n for content_detail in entry.get(\"content\", []):\n link = summary_to_linkdata(content_detail)\n if link:\n links.append(link)\n\n rights_uri = cls.rights_uri_from_feedparser_entry(entry)\n\n kwargs_meta = dict(\n title=title,\n subtitle=subtitle,\n language=language,\n publisher=publisher,\n links=links,\n # refers to when was updated in opds feed, not our db\n data_source_last_updated=last_opds_update,\n )\n\n # Although we always provide the CirculationData, it will only\n # be used if the OPDSImporter has a Collection to hold the\n # LicensePool that will result from importing it.\n kwargs_circ = dict(\n data_source=circulation_data_source.name,\n links=list(links),\n default_rights_uri=rights_uri,\n )\n kwargs_meta[\"circulation\"] = kwargs_circ\n return kwargs_meta\n\n @classmethod\n def rights_uri(cls, rights_string):\n \"\"\"Determine the URI that best encapsulates the rights status of\n the downloads associated with this book.\n \"\"\"\n return RightsStatus.rights_uri_from_string(rights_string)\n\n @classmethod\n def rights_uri_from_feedparser_entry(cls, entry):\n \"\"\"Extract a rights URI from a parsed feedparser entry.\n\n :return: A rights URI.\n \"\"\"\n rights = entry.get(\"rights\", \"\")\n return cls.rights_uri(rights)\n\n @classmethod\n def rights_uri_from_entry_tag(cls, entry):\n \"\"\"Extract a rights string from an lxml <entry> tag.\n\n :return: A rights URI.\n \"\"\"\n rights = cls.PARSER_CLASS._xpath1(entry, \"rights\")\n if rights:\n return cls.rights_uri(rights)\n\n @classmethod\n def extract_messages(cls, parser, feed_tag):\n \"\"\"Extract <simplified:message> tags from an OPDS feed and convert\n them into OPDSMessage objects.\n \"\"\"\n path = \"/atom:feed/simplified:message\"\n for message_tag in parser._xpath(feed_tag, path):\n\n # First thing to do is determine which Identifier we're\n # talking about.\n identifier_tag = parser._xpath1(message_tag, \"atom:id\")\n if identifier_tag is None:\n urn = None\n else:\n urn = identifier_tag.text\n\n # What status code is associated with the message?\n status_code_tag = parser._xpath1(message_tag, \"simplified:status_code\")\n if status_code_tag is None:\n status_code = None\n else:\n try:\n status_code = int(status_code_tag.text)\n except ValueError:\n status_code = None\n\n # What is the human-readable message?\n description_tag = parser._xpath1(message_tag, \"schema:description\")\n if description_tag is None:\n description = \"\"\n else:\n description = description_tag.text\n\n yield OPDSMessage(urn, status_code, description)\n\n @classmethod\n def coveragefailures_from_messages(cls, data_source, parser, feed_tag):\n \"\"\"Extract CoverageFailure objects from a parsed OPDS document. This\n allows us to determine the fate of books which could not\n become <entry> tags.\n \"\"\"\n for message in cls.extract_messages(parser, feed_tag):\n failure = cls.coveragefailure_from_message(data_source, message)\n if failure:\n yield failure\n\n @classmethod\n def coveragefailure_from_message(cls, data_source, message):\n \"\"\"Turn a <simplified:message> tag into a CoverageFailure.\"\"\"\n\n _db = Session.object_session(data_source)\n\n # First thing to do is determine which Identifier we're\n # talking about. If we can't do that, we can't create a\n # CoverageFailure object.\n urn = message.urn\n try:\n identifier, ignore = Identifier.parse_urn(_db, urn)\n except ValueError as e:\n identifier = None\n\n if not identifier:\n # We can't associate this message with any particular\n # Identifier so we can't turn it into a CoverageFailure.\n return None\n\n if cls.SUCCESS_STATUS_CODES and message.status_code in cls.SUCCESS_STATUS_CODES:\n # This message is telling us that nothing went wrong. It\n # should be treated as a success.\n return identifier\n\n if message.status_code == 200:\n # By default, we treat a message with a 200 status code\n # as though nothing had been returned at all.\n return None\n\n description = message.message\n status_code = message.status_code\n if description and status_code:\n exception = \"%s: %s\" % (status_code, description)\n elif status_code:\n exception = str(status_code)\n elif description:\n exception = description\n else:\n exception = \"No detail provided.\"\n\n # All these CoverageFailures are transient because ATM we can\n # only assume that the server will eventually have the data.\n return CoverageFailure(identifier, exception, data_source, transient=True)\n\n @classmethod\n def detail_for_elementtree_entry(\n cls, parser, entry_tag, data_source, feed_url=None, do_get=None\n ):\n\n \"\"\"Turn an <atom:entry> tag into a dictionary of metadata that can be\n used as keyword arguments to the Metadata contructor.\n\n :return: A 2-tuple (identifier, kwargs)\n \"\"\"\n identifier = parser._xpath1(entry_tag, \"atom:id\")\n if identifier is None or not identifier.text:\n # This <entry> tag doesn't identify a book so we\n # can't derive any information from it.\n return None, None, None\n identifier = identifier.text\n\n try:\n data = cls._detail_for_elementtree_entry(\n parser, entry_tag, feed_url, do_get=do_get\n )\n return identifier, data, None\n\n except Exception as e:\n _db = Session.object_session(data_source)\n identifier_obj, ignore = Identifier.parse_urn(_db, identifier)\n failure = CoverageFailure(\n identifier_obj, traceback.format_exc(), data_source, transient=True\n )\n return identifier, None, failure\n\n @classmethod\n def _detail_for_elementtree_entry(\n cls, parser, entry_tag, feed_url=None, do_get=None\n ):\n \"\"\"Helper method that extracts metadata and circulation data from an elementtree\n entry. This method can be overridden in tests to check that callers handle things\n properly when it throws an exception.\n \"\"\"\n # We will fill this dictionary with all the information\n # we can find.\n data = dict()\n\n alternate_identifiers = []\n for id_tag in parser._xpath(entry_tag, \"dcterms:identifier\"):\n v = cls.extract_identifier(id_tag)\n if v:\n alternate_identifiers.append(v)\n data[\"dcterms_identifiers\"] = alternate_identifiers\n\n # If exist another identifer, add here\n data[\"identifiers\"] = data[\"dcterms_identifiers\"]\n\n data[\"contributors\"] = []\n for author_tag in parser._xpath(entry_tag, \"atom:author\"):\n contributor = cls.extract_contributor(parser, author_tag)\n if contributor is not None:\n data[\"contributors\"].append(contributor)\n\n data[\"subjects\"] = [\n cls.extract_subject(parser, category_tag)\n for category_tag in parser._xpath(entry_tag, \"atom:category\")\n ]\n\n ratings = []\n for rating_tag in parser._xpath(entry_tag, \"schema:Rating\"):\n v = cls.extract_measurement(rating_tag)\n if v:\n ratings.append(v)\n data[\"measurements\"] = ratings\n rights_uri = cls.rights_uri_from_entry_tag(entry_tag)\n\n data[\"links\"] = cls.consolidate_links(\n [\n cls.extract_link(link_tag, feed_url, rights_uri)\n for link_tag in parser._xpath(entry_tag, \"atom:link\")\n ]\n )\n\n derived_medium = cls.get_medium_from_links(data[\"links\"])\n data[\"medium\"] = cls.extract_medium(entry_tag, derived_medium)\n\n series_tag = parser._xpath(entry_tag, \"schema:Series\")\n if series_tag:\n data[\"series\"], data[\"series_position\"] = cls.extract_series(series_tag[0])\n\n issued_tag = parser._xpath(entry_tag, \"dcterms:issued\")\n if issued_tag:\n date_string = issued_tag[0].text\n # By default, the date for strings that only have a year will\n # be set to January 1 rather than the current date.\n default = datetime_utc(utc_now().year, 1, 1)\n try:\n data[\"published\"] = dateutil.parser.parse(date_string, default=default)\n except Exception as e:\n # This entry had an issued tag, but it was in a format we couldn't parse.\n pass\n\n return data\n\n @classmethod\n def get_medium_from_links(cls, links):\n \"\"\"Get medium if derivable from information in an acquisition link.\"\"\"\n derived = None\n for link in links:\n if (\n not link.rel\n or not link.media_type\n or not link.rel.startswith(\"http://opds-spec.org/acquisition/\")\n ):\n continue\n derived = Edition.medium_from_media_type(link.media_type)\n if derived:\n break\n return derived\n\n @classmethod\n def extract_identifier(cls, identifier_tag):\n \"\"\"Turn a <dcterms:identifier> tag into an IdentifierData object.\"\"\"\n try:\n type, identifier = Identifier.type_and_identifier_for_urn(\n identifier_tag.text.lower()\n )\n return IdentifierData(type, identifier)\n except ValueError:\n return None\n\n @classmethod\n def extract_medium(cls, entry_tag, default=Edition.BOOK_MEDIUM):\n \"\"\"Derive a value for Edition.medium from schema:additionalType or\n from a <dcterms:format> subtag.\n\n :param entry_tag: A <atom:entry> tag.\n :param default: The value to use if nothing is found.\n \"\"\"\n if entry_tag is None:\n return default\n\n medium = None\n additional_type = entry_tag.get(\"{http://schema.org/}additionalType\")\n if additional_type:\n medium = Edition.additional_type_to_medium.get(additional_type, None)\n if not medium:\n format_tag = entry_tag.find(\"{http://purl.org/dc/terms/}format\")\n if format_tag is not None:\n media_type = format_tag.text\n medium = Edition.medium_from_media_type(media_type)\n return medium or default\n\n @classmethod\n def extract_contributor(cls, parser, author_tag):\n \"\"\"Turn an <atom:author> tag into a ContributorData object.\"\"\"\n subtag = parser.text_of_optional_subtag\n sort_name = subtag(author_tag, \"simplified:sort_name\")\n display_name = subtag(author_tag, \"atom:name\")\n family_name = subtag(author_tag, \"simplified:family_name\")\n wikipedia_name = subtag(author_tag, \"simplified:wikipedia_name\")\n # TODO: we need a way of conveying roles. I believe Bibframe\n # has the answer.\n\n # TODO: Also collect VIAF and LC numbers if present. This\n # requires parsing the URIs. Only the metadata wrangler will\n # provide this information.\n\n viaf = None\n if sort_name or display_name or viaf:\n return ContributorData(\n sort_name=sort_name,\n display_name=display_name,\n family_name=family_name,\n wikipedia_name=wikipedia_name,\n roles=None,\n )\n\n logging.info(\n \"Refusing to create ContributorData for contributor with no sort name, display name, or VIAF.\"\n )\n return None\n\n @classmethod\n def extract_subject(cls, parser, category_tag):\n \"\"\"Turn an <atom:category> tag into a SubjectData object.\"\"\"\n attr = category_tag.attrib\n\n # Retrieve the type of this subject - FAST, Dewey Decimal,\n # etc.\n scheme = attr.get(\"scheme\")\n subject_type = Subject.by_uri.get(scheme)\n if not subject_type:\n # We can't represent this subject because we don't\n # know its scheme. Just treat it as a tag.\n subject_type = Subject.TAG\n\n # Retrieve the term (e.g. \"827\") and human-readable name\n # (e.g. \"English Satire & Humor\") for this subject.\n term = attr.get(\"term\")\n name = attr.get(\"label\")\n default_weight = 1\n\n weight = attr.get(\"{http://schema.org/}ratingValue\", default_weight)\n try:\n weight = int(weight)\n except ValueError as e:\n weight = default_weight\n\n return SubjectData(type=subject_type, identifier=term, name=name, weight=weight)\n\n @classmethod\n def extract_link(cls, link_tag, feed_url=None, entry_rights_uri=None):\n \"\"\"Convert a <link> tag into a LinkData object.\n\n :param feed_url: The URL to the enclosing feed, for use in resolving\n relative links.\n\n :param entry_rights_uri: A URI describing the rights advertised\n in the entry. Unless this specific link says otherwise, we\n will assume that the representation on the other end of the link\n if made available on these terms.\n \"\"\"\n attr = link_tag.attrib\n rel = attr.get(\"rel\")\n media_type = attr.get(\"type\")\n href = attr.get(\"href\")\n if not href or not rel:\n # The link exists but has no destination, or no specified\n # relationship to the entry.\n return None\n rights = attr.get(\"{%s}rights\" % OPDSXMLParser.NAMESPACES[\"dcterms\"])\n if rights:\n # Rights associated with the link override rights\n # associated with the entry.\n rights_uri = cls.rights_uri(rights)\n else:\n rights_uri = entry_rights_uri\n if feed_url and not urlparse(href).netloc:\n # This link is relative, so we need to get the absolute url\n href = urljoin(feed_url, href)\n return cls.make_link_data(rel, href, media_type, rights_uri)\n\n @classmethod\n def make_link_data(\n cls, rel, href=None, media_type=None, rights_uri=None, content=None\n ):\n \"\"\"Hook method for creating a LinkData object.\n\n Intended to be overridden in subclasses.\n \"\"\"\n return LinkData(\n rel=rel,\n href=href,\n media_type=media_type,\n rights_uri=rights_uri,\n content=content,\n )\n\n @classmethod\n def consolidate_links(cls, links):\n \"\"\"Try to match up links with their thumbnails.\n\n If link n is an image and link n+1 is a thumbnail, then the\n thumbnail is assumed to be the thumbnail of the image.\n\n Similarly if link n is a thumbnail and link n+1 is an image.\n \"\"\"\n # Strip out any links that didn't get turned into LinkData objects\n # due to missing `href` or whatever.\n new_links = [x for x in links if x]\n\n # Make a new list of links from that list, to iterate over --\n # we'll be modifying new_links in place so we can't iterate\n # over it.\n links = list(new_links)\n\n next_link_already_handled = False\n for i, link in enumerate(links):\n\n if link.rel not in (Hyperlink.THUMBNAIL_IMAGE, Hyperlink.IMAGE):\n # This is not any kind of image. Ignore it.\n continue\n\n if next_link_already_handled:\n # This link and the previous link were part of an\n # image-thumbnail pair.\n next_link_already_handled = False\n continue\n\n if i == len(links) - 1:\n # This is the last link. Since there is no next link\n # there's nothing to do here.\n continue\n\n # Peek at the next link.\n next_link = links[i + 1]\n\n if (\n link.rel == Hyperlink.THUMBNAIL_IMAGE\n and next_link.rel == Hyperlink.IMAGE\n ):\n # This link is a thumbnail and the next link is\n # (presumably) the corresponding image.\n thumbnail_link = link\n image_link = next_link\n elif (\n link.rel == Hyperlink.IMAGE\n and next_link.rel == Hyperlink.THUMBNAIL_IMAGE\n ):\n thumbnail_link = next_link\n image_link = link\n else:\n # This link and the next link do not form an\n # image-thumbnail pair. Do nothing.\n continue\n\n image_link.thumbnail = thumbnail_link\n new_links.remove(thumbnail_link)\n next_link_already_handled = True\n\n return new_links\n\n @classmethod\n def extract_measurement(cls, rating_tag):\n type = rating_tag.get(\"{http://schema.org/}additionalType\")\n value = rating_tag.get(\"{http://schema.org/}ratingValue\")\n if not value:\n value = rating_tag.attrib.get(\"{http://schema.org}ratingValue\")\n if not type:\n type = Measurement.RATING\n try:\n value = float(value)\n return MeasurementData(\n quantity_measured=type,\n value=value,\n )\n except ValueError:\n return None\n\n @classmethod\n def extract_series(cls, series_tag):\n attr = series_tag.attrib\n series_name = attr.get(\"{http://schema.org/}name\", None)\n series_position = attr.get(\"{http://schema.org/}position\", None)\n return series_name, series_position\n\n\nclass OPDSImportMonitor(CollectionMonitor, HasSelfTests, HasExternalIntegration):\n \"\"\"Periodically monitor a Collection's OPDS archive feed and import\n every title it mentions.\n \"\"\"\n\n SERVICE_NAME = \"OPDS Import Monitor\"\n\n # The first time this Monitor is invoked we want to get the\n # entire OPDS feed.\n DEFAULT_START_TIME = CollectionMonitor.NEVER\n\n # The protocol this Monitor works with. Subclasses that\n # specialize OPDS import should override this.\n PROTOCOL = ExternalIntegration.OPDS_IMPORT\n\n def __init__(\n self, _db, collection, import_class, force_reimport=False, **import_class_kwargs\n ):\n if not collection:\n raise ValueError(\n \"OPDSImportMonitor can only be run in the context of a Collection.\"\n )\n\n if collection.protocol != self.PROTOCOL:\n raise ValueError(\n \"Collection %s is configured for protocol %s, not %s.\"\n % (collection.name, collection.protocol, self.PROTOCOL)\n )\n\n data_source = self.data_source(collection)\n if not data_source:\n raise ValueError(\n \"Collection %s has no associated data source.\" % collection.name\n )\n\n self.external_integration_id = collection.external_integration.id\n self.feed_url = self.opds_url(collection)\n self.force_reimport = force_reimport\n self.username = collection.external_integration.username\n self.password = <PASSWORD>_<PASSWORD>\n self.custom_accept_header = collection.external_integration.custom_accept_header\n\n self.importer = import_class(_db, collection=collection, **import_class_kwargs)\n\n self._configuration_storage: ConfigurationStorage = ConfigurationStorage(self)\n self._configuration_factory: ConfigurationFactory = ConfigurationFactory()\n self._max_retry_count: Optional[int] = None\n\n with self._get_configuration(_db) as configuration:\n self._max_retry_count = (\n int(configuration.max_retry_count)\n if configuration.max_retry_count is not None\n else None\n )\n\n super(OPDSImportMonitor, self).__init__(_db, collection)\n\n @contextmanager\n def _get_configuration(\n self, db: sqlalchemy.orm.session.Session\n ) -> ConnectionConfiguration:\n \"\"\"Return the configuration object.\n\n :param db: Database session\n :return: Configuration object\n \"\"\"\n with self._configuration_factory.create(\n self._configuration_storage, db, ConnectionConfiguration\n ) as configuration:\n yield configuration\n\n def external_integration(self, _db):\n return get_one(_db, ExternalIntegration, id=self.external_integration_id)\n\n def _run_self_tests(self, _db):\n \"\"\"Retrieve the first page of the OPDS feed\"\"\"\n first_page = self.run_test(\n \"Retrieve the first page of the OPDS feed (%s)\" % self.feed_url,\n self.follow_one_link,\n self.feed_url,\n )\n yield first_page\n if not first_page.result:\n return\n\n # We got a page, but does it have anything the importer can\n # turn into a Work?\n #\n # By default, this means it must contain an open-access link.\n next_links, content = first_page.result\n yield self.run_test(\n \"Checking for importable content\",\n self.importer.assert_importable_content,\n content,\n self.feed_url,\n )\n\n def _get(self, url, headers):\n \"\"\"Make the sort of HTTP request that's normal for an OPDS feed.\n\n Long timeout, raise error on anything but 2xx or 3xx.\n \"\"\"\n\n headers = self._update_headers(headers)\n kwargs = dict(\n timeout=120,\n max_retry_count=self._max_retry_count,\n allowed_response_codes=[\"2xx\", \"3xx\"],\n )\n response = HTTP.get_with_timeout(url, headers=headers, **kwargs)\n return response.status_code, response.headers, response.content\n\n def _get_accept_header(self):\n return \",\".join(\n [\n OPDSFeed.ACQUISITION_FEED_TYPE,\n \"application/atom+xml;q=0.9\",\n \"application/xml;q=0.8\",\n \"*/*;q=0.1\",\n ]\n )\n\n def _update_headers(self, headers):\n headers = dict(headers) if headers else {}\n if self.username and self.password and not \"Authorization\" in headers:\n headers[\"Authorization\"] = \"Basic %s\" % base64.b64encode(\n \"%s:%s\" % (self.username, self.password)\n )\n\n if self.custom_accept_header:\n headers[\"Accept\"] = self.custom_accept_header\n elif not \"Accept\" in headers:\n headers[\"Accept\"] = self._get_accept_header()\n\n return headers\n\n def _parse_identifier(self, identifier):\n \"\"\"Extract the publication's identifier from its metadata.\n\n :param identifier: String containing the identifier\n :type identifier: str\n\n :return: Identifier object\n :rtype: Identifier\n \"\"\"\n return parse_identifier(self._db, identifier)\n\n def opds_url(self, collection):\n \"\"\"Returns the OPDS import URL for the given collection.\n\n By default, this URL is stored as the external account ID, but\n subclasses may override this.\n \"\"\"\n return collection.external_account_id\n\n def data_source(self, collection):\n \"\"\"Returns the data source name for the given collection.\n\n By default, this URL is stored as a setting on the collection, but\n subclasses may hard-code it.\n \"\"\"\n return collection.data_source\n\n def feed_contains_new_data(self, feed):\n \"\"\"Does the given feed contain any entries that haven't been imported\n yet?\n \"\"\"\n if self.force_reimport:\n # We don't even need to check. Always treat the feed as\n # though it contained new data.\n return True\n\n # For every item in the last page of the feed, check when that\n # item was last updated.\n last_update_dates = self.importer.extract_last_update_dates(feed)\n\n new_data = False\n for identifier, remote_updated in last_update_dates:\n\n identifier = self._parse_identifier(identifier)\n if not identifier:\n # Maybe this is new, maybe not, but we can't associate\n # the information with an Identifier, so we can't do\n # anything about it.\n self.log.info(\"Ignoring %s because unable to turn into an Identifier.\")\n continue\n\n if self.identifier_needs_import(identifier, remote_updated):\n new_data = True\n break\n return new_data\n\n def identifier_needs_import(self, identifier, last_updated_remote):\n \"\"\"Does the remote side have new information about this Identifier?\n\n :param identifier: An Identifier.\n :param last_update_remote: The last time the remote side updated\n the OPDS entry for this Identifier.\n \"\"\"\n if not identifier:\n return False\n\n record = CoverageRecord.lookup(\n identifier,\n self.importer.data_source,\n operation=CoverageRecord.IMPORT_OPERATION,\n )\n\n if not record:\n # We have no record of importing this Identifier. Import\n # it now.\n self.log.info(\n \"Counting %s as new because it has no CoverageRecord.\", identifier\n )\n return True\n\n # If there was a transient failure last time we tried to\n # import this book, try again regardless of whether the\n # feed has changed.\n if record.status == CoverageRecord.TRANSIENT_FAILURE:\n self.log.info(\n \"Counting %s as new because previous attempt resulted in transient failure: %s\",\n identifier,\n record.exception,\n )\n return True\n\n # If our last attempt was a success or a persistent\n # failure, we only want to import again if something\n # changed since then.\n\n if record.timestamp:\n # We've imported this entry before, so don't import it\n # again unless it's changed.\n\n if not last_updated_remote:\n # The remote isn't telling us whether the entry\n # has been updated. Import it again to be safe.\n self.log.info(\n \"Counting %s as new because remote has no information about when it was updated.\",\n identifier,\n )\n return True\n\n if to_utc(last_updated_remote) >= to_utc(record.timestamp):\n # This book has been updated.\n self.log.info(\n \"Counting %s as new because its coverage date is %s and remote has %s.\",\n identifier,\n record.timestamp,\n last_updated_remote,\n )\n return True\n\n def _verify_media_type(self, url, status_code, headers, feed):\n # Make sure we got an OPDS feed, and not an error page that was\n # sent with a 200 status code.\n media_type = headers.get(\"content-type\")\n if not media_type or not any(\n x in media_type for x in (OPDSFeed.ATOM_LIKE_TYPES)\n ):\n message = \"Expected Atom feed, got %s\" % media_type\n raise BadResponseException(\n url, message=message, debug_message=feed, status_code=status_code\n )\n\n def follow_one_link(self, url, do_get=None):\n \"\"\"Download a representation of a URL and extract the useful\n information.\n\n :return: A 2-tuple (next_links, feed). `next_links` is a list of\n additional links that need to be followed. `feed` is the content\n that needs to be imported.\n \"\"\"\n self.log.info(\"Following next link: %s\", url)\n get = do_get or self._get\n status_code, headers, feed = get(url, {})\n\n self._verify_media_type(url, status_code, headers, feed)\n\n new_data = self.feed_contains_new_data(feed)\n\n if new_data:\n # There's something new on this page, so we need to check\n # the next page as well.\n next_links = self.importer.extract_next_links(feed)\n return next_links, feed\n else:\n # There's nothing new, so we don't need to import this\n # feed or check the next page.\n self.log.info(\"No new data.\")\n return [], None\n\n def import_one_feed(self, feed):\n \"\"\"Import every book mentioned in an OPDS feed.\"\"\"\n\n # Because we are importing into a Collection, we will immediately\n # mark a book as presentation-ready if possible.\n imported_editions, pools, works, failures = self.importer.import_from_feed(\n feed, feed_url=self.opds_url(self.collection)\n )\n\n # Create CoverageRecords for the successful imports.\n for edition in imported_editions:\n record = CoverageRecord.add_for(\n edition,\n self.importer.data_source,\n CoverageRecord.IMPORT_OPERATION,\n status=CoverageRecord.SUCCESS,\n )\n\n # Create CoverageRecords for the failures.\n for urn, failure in list(failures.items()):\n if isinstance(failure, list):\n failure_items = failure\n else:\n failure_items = [failure]\n\n for failure_item in failure_items:\n failure_item.to_coverage_record(\n operation=CoverageRecord.IMPORT_OPERATION\n )\n\n return imported_editions, failures\n\n def _get_feeds(self):\n feeds = []\n queue = [self.feed_url]\n seen_links = set([])\n\n # First, follow the feed's next links until we reach a page with\n # nothing new. If any link raises an exception, nothing will be imported.\n\n while queue:\n new_queue = []\n\n for link in queue:\n if link in seen_links:\n continue\n next_links, feed = self.follow_one_link(link)\n new_queue.extend(next_links)\n if feed:\n feeds.append((link, feed))\n seen_links.add(link)\n\n queue = new_queue\n\n # Start importing at the end. If something fails, it will be easier to\n # pick up where we left off.\n feeds = reversed(feeds)\n\n return feeds\n\n def run_once(self, progress_ignore):\n feeds = self._get_feeds()\n total_imported = 0\n total_failures = 0\n\n for link, feed in feeds:\n self.log.info(\"Importing next feed: %s\", link)\n imported_editions, failures = self.import_one_feed(feed)\n total_imported += len(imported_editions)\n total_failures += len(failures)\n self._db.commit()\n\n achievements = \"Items imported: %d. Failures: %d.\" % (\n total_imported,\n total_failures,\n )\n\n return TimestampData(achievements=achievements)\n", "id": "10942011", "language": "Python", "matching_score": 7.228314399719238, "max_stars_count": 0, "path": "core/opds_import.py" }, { "content": "import datetime\nimport os\nfrom io import BytesIO\nfrom zipfile import ZipFile\n\nimport feedparser\nfrom flask_babel import lazy_gettext as _\n\nfrom core.model import (\n Collection,\n DataSource,\n ExternalIntegration,\n Hyperlink,\n Representation,\n RightsStatus,\n)\nfrom core.opds import OPDSFeed\nfrom core.opds_import import OPDSImporter, OPDSImportMonitor, OPDSXMLParser\nfrom core.util.epub import EpubAccessor\n\n\nclass FeedbooksOPDSImporter(OPDSImporter):\n\n REALLY_IMPORT_KEY = \"really_import\"\n REPLACEMENT_CSS_KEY = \"replacement_css\"\n\n NAME = ExternalIntegration.FEEDBOOKS\n DESCRIPTION = _(\"Import open-access books from FeedBooks.\")\n SETTINGS = [\n {\n \"key\": REALLY_IMPORT_KEY,\n \"type\": \"select\",\n \"label\": _(\"Really?\"),\n \"description\": _(\n \"Most libraries are better off importing free Feedbooks titles via an OPDS Import integration from NYPL's open-access content server or DPLA's Open Bookshelf. This setting makes sure you didn't create this collection by accident and really want to import directly from Feedbooks.\"\n ),\n \"options\": [\n {\n \"key\": \"false\",\n \"label\": _(\"Don't actually import directly from Feedbooks.\"),\n },\n {\n \"key\": \"true\",\n \"label\": _(\n \"I know what I'm doing; import directly from Feedbooks.\"\n ),\n },\n ],\n \"default\": \"false\",\n },\n {\n \"key\": Collection.EXTERNAL_ACCOUNT_ID_KEY,\n \"label\": _(\"Import books in this language\"),\n \"description\": _(\n \"Feedbooks offers separate feeds for different languages. Each one can be made into a separate collection.\"\n ),\n \"type\": \"select\",\n \"options\": [\n {\"key\": \"en\", \"label\": _(\"English\")},\n {\"key\": \"es\", \"label\": _(\"Spanish\")},\n {\"key\": \"fr\", \"label\": _(\"French\")},\n {\"key\": \"it\", \"label\": _(\"Italian\")},\n {\"key\": \"de\", \"label\": _(\"German\")},\n ],\n \"default\": \"en\",\n },\n {\n \"key\": REPLACEMENT_CSS_KEY,\n \"label\": _(\"Replacement stylesheet\"),\n \"description\": _(\n \"If you are mirroring the Feedbooks titles, you may replace the Feedbooks stylesheet with an alternate stylesheet in the mirrored copies. The default value is an accessibility-focused stylesheet produced by the DAISY consortium. If you mirror Feedbooks titles but leave this empty, the Feedbooks titles will be mirrored as-is.\"\n ),\n \"default\": \"http://www.daisy.org/z3986/2005/dtbook.2005.basic.css\",\n },\n ]\n\n BASE_OPDS_URL = \"http://www.feedbooks.com/books/recent.atom?lang=%(language)s\"\n\n THIRTY_DAYS = datetime.timedelta(days=30)\n\n def __init__(self, _db, collection, *args, **kwargs):\n integration = collection.external_integration\n new_css_url = integration.setting(self.REPLACEMENT_CSS_KEY).value\n if new_css_url:\n # We may need to modify incoming content to replace CSS.\n kwargs[\"content_modifier\"] = self.replace_css\n kwargs[\"data_source_name\"] = DataSource.FEEDBOOKS\n\n really_import = integration.setting(self.REALLY_IMPORT_KEY).bool_value\n if not really_import:\n raise Exception(\n \"Refusing to instantiate a Feedbooks importer because it's configured to not actually do an import.\"\n )\n\n self.language = collection.external_account_id\n\n super(FeedbooksOPDSImporter, self).__init__(_db, collection, **kwargs)\n\n self.new_css = None\n if new_css_url and self.http_get:\n status_code, headers, content = self.http_get(new_css_url, {})\n if status_code != 200:\n raise IOError(\n \"Replacement stylesheet URL returned %r response code.\"\n % status_code\n )\n content_type = headers.get(\"content-type\", \"\")\n if not content_type.startswith(\"text/css\"):\n raise IOError(\n \"Replacement stylesheet is %r, not a CSS document.\" % content_type\n )\n self.new_css = content\n\n def extract_feed_data(self, feed, feed_url=None):\n metadata, failures = super(FeedbooksOPDSImporter, self).extract_feed_data(\n feed, feed_url\n )\n for id, m in list(metadata.items()):\n self.improve_description(id, m)\n return metadata, failures\n\n @classmethod\n def rights_uri_from_feedparser_entry(cls, entry):\n \"\"\"(Refuse to) determine the URI that best encapsulates the rights\n status of the downloads associated with this book.\n\n We cannot answer this question from within feedparser code; we have\n to wait until we enter elementtree code.\n \"\"\"\n return None\n\n @classmethod\n def rights_uri_from_entry_tag(cls, entry):\n \"\"\"Determine the URI that best encapsulates the rights\n status of the downloads associated with this book.\n \"\"\"\n rights = OPDSXMLParser._xpath1(entry, \"atom:rights\")\n if rights is not None:\n rights = rights.text\n source = OPDSXMLParser._xpath1(entry, \"dcterms:source\")\n if source is not None:\n source = source.text\n publication_year = OPDSXMLParser._xpath1(entry, \"dcterms:issued\")\n if publication_year is not None:\n publication_year = publication_year.text\n return RehostingPolicy.rights_uri(rights, source, publication_year)\n\n @classmethod\n def _detail_for_elementtree_entry(\n cls, parser, entry_tag, feed_url=None, do_get=None\n ):\n \"\"\"Determine a more accurate value for this entry's default rights\n URI.\n\n We can't get it right within the Feedparser code, because\n dcterms:issued (which we use to determine whether a work is\n public domain in the United States) is not available through\n Feedparser.\n \"\"\"\n detail = super(FeedbooksOPDSImporter, cls)._detail_for_elementtree_entry(\n parser, entry_tag, feed_url, do_get=do_get\n )\n rights_uri = cls.rights_uri_from_entry_tag(entry_tag)\n circulation = detail.setdefault(\"circulation\", {})\n circulation[\"default_rights_uri\"] = rights_uri\n return detail\n\n @classmethod\n def make_link_data(\n cls, rel, href=None, media_type=None, rights_uri=None, content=None\n ):\n \"\"\"Turn basic link information into a LinkData object.\n\n FeedBooks puts open-access content behind generic\n 'acquisition' links. We want to treat the EPUBs as open-access\n links and (at the request of FeedBooks) ignore the other\n formats.\n \"\"\"\n if rel == Hyperlink.GENERIC_OPDS_ACQUISITION:\n if media_type and media_type.startswith(Representation.EPUB_MEDIA_TYPE):\n # Treat this generic acquisition link as an\n # open-access link.\n rel = Hyperlink.OPEN_ACCESS_DOWNLOAD\n else:\n # Feedbooks requests that we not mirror books in this format.\n # Act as if there was no link.\n return None\n\n return super(FeedbooksOPDSImporter, cls).make_link_data(\n rel, href, media_type, rights_uri, content\n )\n\n def improve_description(self, id, metadata):\n \"\"\"Improve the description associated with a book,\n if possible.\n\n This involves fetching an alternate OPDS entry that might\n contain more detailed descriptions than those available in the\n main feed.\n \"\"\"\n alternate_links = []\n existing_descriptions = []\n everything_except_descriptions = []\n for x in metadata.links:\n if (\n x.rel == Hyperlink.ALTERNATE\n and x.href\n and x.media_type == OPDSFeed.ENTRY_TYPE\n ):\n alternate_links.append(x)\n if x.rel == Hyperlink.DESCRIPTION:\n existing_descriptions.append((x.media_type, x.content))\n else:\n everything_except_descriptions.append(x)\n\n better_descriptions = []\n for alternate_link in alternate_links:\n # There should only be one alternate link, but we'll keep\n # processing them until we get a good description.\n\n # Fetch the alternate entry.\n representation, is_new = Representation.get(\n self._db,\n alternate_link.href,\n max_age=self.THIRTY_DAYS,\n do_get=self.http_get,\n )\n\n if representation.status_code != 200:\n continue\n\n # Parse the alternate entry with feedparser and run it through\n # data_detail_for_feedparser_entry().\n parsed = feedparser.parse(representation.content)\n if len(parsed[\"entries\"]) != 1:\n # This is supposed to be a single entry, and it's not.\n continue\n [entry] = parsed[\"entries\"]\n data_source = self.data_source\n detail_id, new_detail, failure = self.data_detail_for_feedparser_entry(\n entry, data_source\n )\n if failure:\n # There was a problem parsing the entry.\n self.log.error(failure.exception)\n continue\n\n # TODO: Ideally we could verify that detail_id == id, but\n # right now they are always different -- one is an HTTPS\n # URI and one is an HTTP URI. So we omit this step and\n # assume the documents at both ends of the 'alternate'\n # link identify the same resource.\n\n # Find any descriptions present in the alternate view which\n # are not present in the original.\n new_descriptions = [\n x\n for x in new_detail[\"links\"]\n if x.rel == Hyperlink.DESCRIPTION\n and (x.media_type, x.content) not in existing_descriptions\n ]\n\n if new_descriptions:\n # Replace old descriptions with new descriptions.\n metadata.links = everything_except_descriptions + new_descriptions\n break\n\n return metadata\n\n def replace_css(self, representation):\n \"\"\"This function will replace the content of every CSS file listed in an epub's\n manifest with the value in self.new_css. The rest of the file is not changed.\n \"\"\"\n if not (\n representation.media_type == Representation.EPUB_MEDIA_TYPE\n and representation.content\n ):\n return\n\n if not self.new_css:\n # There is no CSS to replace. Do nothing.\n return\n\n new_zip_content = BytesIO()\n with EpubAccessor.open_epub(\n representation.url, content=representation.content\n ) as (zip_file, package_path):\n try:\n manifest_element = EpubAccessor.get_element_from_package(\n zip_file, package_path, \"manifest\"\n )\n except ValueError as e:\n # Invalid EPUB\n self.log.warning(\"%s: %s\" % (representation.url, str(e)))\n return\n\n css_paths = []\n for child in manifest_element:\n if child.tag == (\"{%s}item\" % EpubAccessor.IDPF_NAMESPACE):\n if child.get(\"media-type\") == \"text/css\":\n href = package_path.replace(\n os.path.basename(package_path), child.get(\"href\")\n )\n css_paths.append(href)\n\n with ZipFile(new_zip_content, \"w\") as new_zip:\n for item in zip_file.infolist():\n if item.filename not in css_paths:\n new_zip.writestr(item, zip_file.read(item.filename))\n else:\n new_zip.writestr(item, self.new_css)\n\n representation.content = new_zip_content.getvalue()\n\n\nclass RehostingPolicy(object):\n \"\"\"Determining the precise copyright status of the underlying text\n is not directly useful, because Feedbooks has made derivative\n works and relicensed under CC-BY-NC. So that's going to be the\n license: CC-BY-NC.\n\n Except it's not that simple. There are two complications.\n\n 1. Feedbooks is located in France, and the NYPL/DPLA content\n servers are hosted in the US. We can't host a CC-BY-NC book if\n it's derived from a work that's still under US copyright. We must\n decide whether or not to accept a book in the first place based on\n the copyright status of the underlying text.\n\n 2. Some CC licenses are more restrictive (on the creators of\n derivative works) than CC-BY-NC. Feedbooks has no authority to\n relicense these books, so the old licenses need to be preserved.\n\n This class encapsulates the logic necessary to make this decision.\n\n \"\"\"\n\n PUBLIC_DOMAIN_CUTOFF = 1923\n\n # These are the licenses that need to be preserved.\n RIGHTS_DICT = {\n \"Attribution Share Alike (cc by-sa)\": RightsStatus.CC_BY_SA,\n \"Attribution Non-Commercial No Derivatives (cc by-nc-nd)\": RightsStatus.CC_BY_NC_ND,\n \"Attribution Non-Commercial Share Alike (cc by-nc-sa)\": RightsStatus.CC_BY_NC_SA,\n }\n\n # Feedbooks rights statuses indicating books that can be rehosted\n # in the US.\n CAN_REHOST_IN_US = set(\n [\n \"This work was published before 1923 and is in the public domain in the USA only.\",\n \"This work is available for countries where copyright is Life+70 and in the USA.\",\n \"This work is available for countries where copyright is Life+50 or in the USA (published before 1923).\",\n \"Attribution (cc by)\",\n \"Attribution Non-Commercial (cc by-nc)\",\n \"Attribution Share Alike (cc by-sa)\",\n \"Attribution Non-Commercial No Derivatives (cc by-nc-nd)\",\n \"Attribution Non-Commercial Share Alike (cc by-nc-sa)\",\n ]\n )\n\n RIGHTS_UNKNOWN = \"Please read the legal notice included in this e-book and/or check the copyright status in your country.\"\n\n # These websites are hosted in the US and specialize in\n # open-access content. We will accept all FeedBooks titles taken\n # from these sites, even post-1923 titles.\n US_SITES = set(\n [\n \"archive.org\",\n \"craphound.com\",\n \"en.wikipedia.org\",\n \"en.wikisource.org\",\n \"futurismic.com\",\n \"gutenberg.org\",\n \"project gutenberg\",\n \"shakespeare.mit.edu\",\n ]\n )\n\n @classmethod\n def rights_uri(cls, rights, source, publication_year):\n if publication_year and isinstance(publication_year, (bytes, str)):\n publication_year = int(publication_year)\n\n can_rehost = cls.can_rehost_us(rights, source, publication_year)\n if can_rehost is False:\n # We believe this book is still under copyright in the US\n # and we should not rehost it.\n return RightsStatus.IN_COPYRIGHT\n\n if can_rehost is None:\n # We don't have enough information to know whether the book\n # is under copyright in the US. We should not host it.\n return RightsStatus.UNKNOWN\n\n if rights in cls.RIGHTS_DICT:\n # The CC license of the underlying text means it cannot be\n # relicensed CC-BY-NC.\n return cls.RIGHTS_DICT[rights]\n\n # The default license as per our agreement with FeedBooks.\n return RightsStatus.CC_BY_NC\n\n @classmethod\n def can_rehost_us(cls, rights, source, publication_year):\n \"\"\"Can we rehost this book on a US server?\n\n :param rights: What FeedBooks says about the public domain status\n of the book.\n\n :param source: Where FeedBooks got the book.\n\n :param publication_year: When the text was originally published.\n\n :return: True if we can rehost in the US, False if we can't,\n None if we're not sure. The distinction between False and None\n is only useful when making lists of books that need to have\n their rights status manually investigated.\n \"\"\"\n if publication_year and publication_year < cls.PUBLIC_DOMAIN_CUTOFF:\n # We will rehost anything published prior to 1923, no\n # matter where it came from.\n return True\n\n if rights in cls.CAN_REHOST_IN_US:\n # This book's FeedBooks rights statement explicitly marks\n # it as one that can be rehosted in the US.\n return True\n\n # The rights statement isn't especially helpful, but maybe we\n # can make a determination based on where Feedbooks got the\n # book from.\n source = (source or \"\").lower()\n\n if any(site in source for site in cls.US_SITES):\n # This book originally came from a US-hosted site that\n # specializes in open-access books, so we must be able\n # to rehost it.\n return True\n\n if source in (\"wikisource\", \"gutenberg\"):\n # Presumably en.wikisource and Project Gutenberg US. We\n # special case these to avoid confusing the US versions of\n # these sites with other countries'.\n return True\n\n # And we special-case this one to avoid confusing Australian\n # Project Gutenberg with US Project Gutenberg.\n if \"gutenberg.net\" in source and not \"gutenberg.net.au\" in source:\n return True\n\n # Unless one of the above conditions is met, we must assume\n # the book cannot be rehosted in the US.\n if rights == cls.RIGHTS_UNKNOWN:\n # To be on the safe side we're not going to host this\n # book, but we actually don't know that it's unhostable.\n return None\n\n # In this case we're pretty sure. The rights status indicates\n # some kind of general incompatible restriction (such as\n # Life+70) and it's not a pre-1923 book.\n return False\n\n\nclass FeedbooksImportMonitor(OPDSImportMonitor):\n \"\"\"The same as OPDSImportMonitor, but uses FeedbooksOPDSImporter\n instead.\n \"\"\"\n\n PROTOCOL = ExternalIntegration.FEEDBOOKS\n\n def data_source(self, collection):\n \"\"\"The data source for all Feedbooks collections is Feedbooks.\"\"\"\n return ExternalIntegration.FEEDBOOKS\n\n def opds_url(self, collection):\n \"\"\"Returns the OPDS import URL for the given collection.\n\n This is the base URL plus the language setting.\n \"\"\"\n language = collection.external_account_id or \"en\"\n return FeedbooksOPDSImporter.BASE_OPDS_URL % dict(language=language)\n", "id": "1700376", "language": "Python", "matching_score": 2.671673536300659, "max_stars_count": 0, "path": "api/feedbooks.py" }, { "content": "import feedparser\n\nfrom api.admin.opds import AdminAnnotator, AdminFeed\nfrom api.opds import AcquisitionFeed\nfrom core.lane import Facets, Pagination\nfrom core.model import Complaint, DataSource, ExternalIntegration, Measurement\nfrom core.model.configuration import ExternalIntegrationLink\nfrom core.testing import DatabaseTest\n\n\nclass TestOPDS(DatabaseTest):\n def links(self, entry, rel=None):\n if \"feed\" in entry:\n entry = entry[\"feed\"]\n links = sorted(entry[\"links\"], key=lambda x: (x[\"rel\"], x.get(\"title\")))\n r = []\n for l in links:\n if (\n not rel\n or l[\"rel\"] == rel\n or (isinstance(rel, list) and l[\"rel\"] in rel)\n ):\n r.append(l)\n return r\n\n def test_feed_includes_staff_rating(self):\n work = self._work(with_open_access_download=True)\n lp = work.license_pools[0]\n staff_data_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)\n lp.identifier.add_measurement(\n staff_data_source, Measurement.RATING, 3, weight=1000\n )\n\n feed = AcquisitionFeed(\n self._db,\n \"test\",\n \"url\",\n [work],\n AdminAnnotator(None, self._default_library, test_mode=True),\n )\n [entry] = feedparser.parse(str(feed))[\"entries\"]\n rating = entry[\"schema_rating\"]\n assert 3 == float(rating[\"schema:ratingvalue\"])\n assert Measurement.RATING == rating[\"additionaltype\"]\n\n def test_feed_includes_refresh_link(self):\n work = self._work(with_open_access_download=True)\n lp = work.license_pools[0]\n lp.suppressed = False\n self._db.commit()\n\n # If the metadata wrangler isn't configured, the link is left out.\n feed = AcquisitionFeed(\n self._db,\n \"test\",\n \"url\",\n [work],\n AdminAnnotator(None, self._default_library, test_mode=True),\n )\n [entry] = feedparser.parse(str(feed))[\"entries\"]\n assert [] == [\n x\n for x in entry[\"links\"]\n if x[\"rel\"] == \"http://librarysimplified.org/terms/rel/refresh\"\n ]\n\n # If we configure a metadata wrangler integration, the link appears.\n integration = self._external_integration(\n ExternalIntegration.METADATA_WRANGLER,\n goal=ExternalIntegration.METADATA_GOAL,\n settings={ExternalIntegration.URL: \"http://metadata\"},\n password=\"pw\",\n )\n integration.collections += [self._default_collection]\n feed = AcquisitionFeed(\n self._db,\n \"test\",\n \"url\",\n [work],\n AdminAnnotator(None, self._default_library, test_mode=True),\n )\n [entry] = feedparser.parse(str(feed))[\"entries\"]\n [refresh_link] = [\n x\n for x in entry[\"links\"]\n if x[\"rel\"] == \"http://librarysimplified.org/terms/rel/refresh\"\n ]\n assert lp.identifier.identifier in refresh_link[\"href\"]\n\n def test_feed_includes_suppress_link(self):\n work = self._work(with_open_access_download=True)\n lp = work.license_pools[0]\n lp.suppressed = False\n self._db.commit()\n\n feed = AcquisitionFeed(\n self._db,\n \"test\",\n \"url\",\n [work],\n AdminAnnotator(None, self._default_library, test_mode=True),\n )\n [entry] = feedparser.parse(str(feed))[\"entries\"]\n [suppress_link] = [\n x\n for x in entry[\"links\"]\n if x[\"rel\"] == \"http://librarysimplified.org/terms/rel/hide\"\n ]\n assert lp.identifier.identifier in suppress_link[\"href\"]\n unsuppress_links = [\n x\n for x in entry[\"links\"]\n if x[\"rel\"] == \"http://librarysimplified.org/terms/rel/restore\"\n ]\n assert 0 == len(unsuppress_links)\n\n lp.suppressed = True\n self._db.commit()\n\n feed = AcquisitionFeed(\n self._db,\n \"test\",\n \"url\",\n [work],\n AdminAnnotator(None, self._default_library, test_mode=True),\n )\n [entry] = feedparser.parse(str(feed))[\"entries\"]\n [unsuppress_link] = [\n x\n for x in entry[\"links\"]\n if x[\"rel\"] == \"http://librarysimplified.org/terms/rel/restore\"\n ]\n assert lp.identifier.identifier in unsuppress_link[\"href\"]\n suppress_links = [\n x\n for x in entry[\"links\"]\n if x[\"rel\"] == \"http://librarysimplified.org/terms/rel/hide\"\n ]\n assert 0 == len(suppress_links)\n\n def test_feed_includes_edit_link(self):\n work = self._work(with_open_access_download=True)\n lp = work.license_pools[0]\n\n feed = AcquisitionFeed(\n self._db,\n \"test\",\n \"url\",\n [work],\n AdminAnnotator(None, self._default_library, test_mode=True),\n )\n [entry] = feedparser.parse(str(feed))[\"entries\"]\n [edit_link] = [x for x in entry[\"links\"] if x[\"rel\"] == \"edit\"]\n assert lp.identifier.identifier in edit_link[\"href\"]\n\n def test_feed_includes_change_cover_link(self):\n work = self._work(with_open_access_download=True)\n lp = work.license_pools[0]\n library = self._default_library\n\n feed = AcquisitionFeed(\n self._db,\n \"test\",\n \"url\",\n [work],\n AdminAnnotator(None, library, test_mode=True),\n )\n [entry] = feedparser.parse(str(feed))[\"entries\"]\n\n # Since there's no storage integration, the change cover link isn't included.\n assert [] == [\n x\n for x in entry[\"links\"]\n if x[\"rel\"] == \"http://librarysimplified.org/terms/rel/change_cover\"\n ]\n\n # There is now a covers storage integration that is linked to the external\n # integration for a collection that the work is in. It will use that\n # covers mirror and the change cover link is included.\n storage = self._external_integration(\n ExternalIntegration.S3, ExternalIntegration.STORAGE_GOAL\n )\n storage.username = \"user\"\n storage.password = \"<PASSWORD>\"\n\n collection = self._collection()\n purpose = ExternalIntegrationLink.COVERS\n external_integration_link = self._external_integration_link(\n integration=collection._external_integration,\n other_integration=storage,\n purpose=purpose,\n )\n library.collections.append(collection)\n work = self._work(with_open_access_download=True, collection=collection)\n lp = work.license_pools[0]\n feed = AcquisitionFeed(\n self._db,\n \"test\",\n \"url\",\n [work],\n AdminAnnotator(None, library, test_mode=True),\n )\n [entry] = feedparser.parse(str(feed))[\"entries\"]\n\n [change_cover_link] = [\n x\n for x in entry[\"links\"]\n if x[\"rel\"] == \"http://librarysimplified.org/terms/rel/change_cover\"\n ]\n assert lp.identifier.identifier in change_cover_link[\"href\"]\n\n def test_complaints_feed(self):\n \"\"\"Test the ability to show a paginated feed of works with complaints.\"\"\"\n\n type = iter(Complaint.VALID_TYPES)\n type1 = next(type)\n type2 = next(type)\n\n work1 = self._work(\n \"fiction work with complaint\",\n language=\"eng\",\n fiction=True,\n with_open_access_download=True,\n )\n work1_complaint1 = self._complaint(\n work1.license_pools[0],\n type1,\n \"work1 complaint1 source\",\n \"work1 complaint1 detail\",\n )\n work1_complaint2 = self._complaint(\n work1.license_pools[0],\n type1,\n \"work1 complaint2 source\",\n \"work1 complaint2 detail\",\n )\n work1_complaint3 = self._complaint(\n work1.license_pools[0],\n type2,\n \"work1 complaint3 source\",\n \"work1 complaint3 detail\",\n )\n work2 = self._work(\n \"nonfiction work with complaint\",\n language=\"eng\",\n fiction=False,\n with_open_access_download=True,\n )\n work2_complaint1 = self._complaint(\n work2.license_pools[0],\n type2,\n \"work2 complaint1 source\",\n \"work2 complaint1 detail\",\n )\n work3 = self._work(\n \"fiction work without complaint\",\n language=\"eng\",\n fiction=True,\n with_open_access_download=True,\n )\n work4 = self._work(\n \"nonfiction work without complaint\",\n language=\"eng\",\n fiction=False,\n with_open_access_download=True,\n )\n\n facets = Facets.default(self._default_library)\n pagination = Pagination(size=1)\n annotator = MockAnnotator(self._default_library)\n\n def make_page(pagination):\n return AdminFeed.complaints(\n library=self._default_library,\n title=\"Complaints\",\n url=self._url,\n annotator=annotator,\n pagination=pagination,\n )\n\n first_page = make_page(pagination)\n parsed = feedparser.parse(str(first_page))\n assert 1 == len(parsed[\"entries\"])\n assert work1.title == parsed[\"entries\"][0][\"title\"]\n # Verify that the entry has acquisition links.\n links = parsed[\"entries\"][0][\"links\"]\n open_access_links = [\n l\n for l in links\n if l[\"rel\"] == \"http://opds-spec.org/acquisition/open-access\"\n ]\n assert 1 == len(open_access_links)\n\n # Make sure the links are in place.\n [start] = self.links(parsed, \"start\")\n assert annotator.groups_url(None) == start[\"href\"]\n assert annotator.top_level_title() == start[\"title\"]\n\n [up] = self.links(parsed, \"up\")\n assert annotator.groups_url(None) == up[\"href\"]\n assert annotator.top_level_title() == up[\"title\"]\n\n [next_link] = self.links(parsed, \"next\")\n assert (\n annotator.complaints_url(facets, pagination.next_page) == next_link[\"href\"]\n )\n\n # This was the first page, so no previous link.\n assert [] == self.links(parsed, \"previous\")\n\n # Now get the second page and make sure it has a 'previous' link.\n second_page = make_page(pagination.next_page)\n parsed = feedparser.parse(str(second_page))\n [previous] = self.links(parsed, \"previous\")\n assert annotator.complaints_url(facets, pagination) == previous[\"href\"]\n assert 1 == len(parsed[\"entries\"])\n assert work2.title == parsed[\"entries\"][0][\"title\"]\n\n def test_suppressed_feed(self):\n # Test the ability to show a paginated feed of suppressed works.\n\n work1 = self._work(with_open_access_download=True)\n work1.license_pools[0].suppressed = True\n\n work2 = self._work(with_open_access_download=True)\n work2.license_pools[0].suppressed = True\n\n # This work won't be included in the feed since its\n # suppressed pool is superceded.\n work3 = self._work(with_open_access_download=True)\n work3.license_pools[0].suppressed = True\n work3.license_pools[0].superceded = True\n\n pagination = Pagination(size=1)\n annotator = MockAnnotator(self._default_library)\n titles = [work1.title, work2.title]\n\n def make_page(pagination):\n return AdminFeed.suppressed(\n _db=self._db,\n title=\"Hidden works\",\n url=self._url,\n annotator=annotator,\n pagination=pagination,\n )\n\n first_page = make_page(pagination)\n parsed = feedparser.parse(str(first_page))\n assert 1 == len(parsed[\"entries\"])\n assert parsed[\"entries\"][0].title in titles\n titles.remove(parsed[\"entries\"][0].title)\n [remaining_title] = titles\n\n # Make sure the links are in place.\n [start] = self.links(parsed, \"start\")\n assert annotator.groups_url(None) == start[\"href\"]\n assert annotator.top_level_title() == start[\"title\"]\n\n [up] = self.links(parsed, \"up\")\n assert annotator.groups_url(None) == up[\"href\"]\n assert annotator.top_level_title() == up[\"title\"]\n\n [next_link] = self.links(parsed, \"next\")\n assert annotator.suppressed_url(pagination.next_page) == next_link[\"href\"]\n\n # This was the first page, so no previous link.\n assert [] == self.links(parsed, \"previous\")\n\n # Now get the second page and make sure it has a 'previous' link.\n second_page = make_page(pagination.next_page)\n parsed = feedparser.parse(str(second_page))\n [previous] = self.links(parsed, \"previous\")\n assert annotator.suppressed_url(pagination) == previous[\"href\"]\n assert 1 == len(parsed[\"entries\"])\n assert remaining_title == parsed[\"entries\"][0][\"title\"]\n\n # The third page is empty.\n third_page = make_page(pagination.next_page.next_page)\n parsed = feedparser.parse(str(third_page))\n [previous] = self.links(parsed, \"previous\")\n assert annotator.suppressed_url(pagination.next_page) == previous[\"href\"]\n assert 0 == len(parsed[\"entries\"])\n\n\nclass MockAnnotator(AdminAnnotator):\n def __init__(self, library):\n super(MockAnnotator, self).__init__(None, library, test_mode=True)\n\n def groups_url(self, lane):\n if lane:\n name = lane.name\n else:\n name = \"\"\n return \"http://groups/%s\" % name\n\n def complaints_url(self, facets, pagination):\n base = \"http://complaints/\"\n sep = \"?\"\n if facets:\n base += sep + facets.query_string\n sep = \"&\"\n if pagination:\n base += sep + pagination.query_string\n return base\n\n def suppressed_url(self, pagination):\n base = \"http://complaints/\"\n sep = \"?\"\n if pagination:\n base += sep + pagination.query_string\n return base\n\n def annotate_feed(self, feed):\n super(MockAnnotator, self).annotate_feed(feed)\n", "id": "2653744", "language": "Python", "matching_score": 1.8812034130096436, "max_stars_count": 0, "path": "tests/api/admin/test_opds.py" }, { "content": "# coding=utf-8\n\nimport datetime\n\nfrom webpub_manifest_parser.core.ast import (\n CollectionList,\n CompactCollection,\n Link,\n LinkList,\n Node,\n PresentationMetadata,\n)\nfrom webpub_manifest_parser.core.parsers import TypeParser\nfrom webpub_manifest_parser.core.properties import BaseArrayProperty, PropertiesGrouping\nfrom webpub_manifest_parser.core.registry import RegistryItem\nfrom webpub_manifest_parser.opds2.ast import (\n OPDS2Feed,\n OPDS2FeedMetadata,\n OPDS2Group,\n OPDS2Publication,\n)\nfrom webpub_manifest_parser.opds2.registry import OPDS2LinkRelationsRegistry\nfrom webpub_manifest_parser.rwpm.registry import RWPMLinkRelationsRegistry\n\nfrom core.util.datetime_helpers import datetime_utc\n\n\ndef serialize(rwpm_item):\n \"\"\"Serialize RWPM AST node into a Python dictionary.\n\n :param rwpm_item: RWPM AST node\n :type rwpm_item: Node\n\n :return: Dictionary containing properties of the serialized RWPM AST node\n :rtype: dict\n \"\"\"\n if isinstance(rwpm_item, list):\n result = []\n\n for i in rwpm_item:\n result.append(serialize(i))\n\n return result\n\n result = {}\n\n if isinstance(rwpm_item, Node):\n\n required_properties = PropertiesGrouping.get_class_properties(\n rwpm_item.__class__\n )\n\n for (property_name, property_object) in required_properties:\n property_value = getattr(rwpm_item, property_name, None)\n\n if property_value is None and property_object.required:\n if property_object.default_value:\n property_value = property_object.default_value\n elif isinstance(property_object, BaseArrayProperty) or (\n isinstance(property_object.parser, TypeParser)\n and issubclass(property_object.parser.type, CompactCollection)\n ):\n property_value = []\n\n if isinstance(property_value, Node):\n property_value = serialize(property_value)\n elif isinstance(property_value, list):\n property_value = serialize(property_value)\n elif isinstance(property_value, datetime.datetime):\n property_value = property_value.isoformat() + \"Z\"\n if isinstance(rwpm_item, list):\n result.append(property_value)\n else:\n result[property_object.key] = property_value\n elif isinstance(rwpm_item, RegistryItem):\n result = rwpm_item.key\n\n return result\n\n\nPROQUEST_PUBLICATION_1 = OPDS2Publication(\n metadata=PresentationMetadata(\n identifier=\"urn:proquest.com/document-id/1\",\n title=\"Publićation # 1\",\n modified=datetime_utc(2020, 1, 31, 0, 0, 0),\n ),\n links=LinkList(\n [\n Link(\n href=\"https://feed.org/document-id/1\",\n rels=[OPDS2LinkRelationsRegistry.ACQUISITION],\n )\n ]\n ),\n)\n\nPROQUEST_PUBLICATION_2 = OPDS2Publication(\n metadata=PresentationMetadata(\n identifier=\"urn:proquest.com/document-id/2\",\n title=\"Publication # 2\",\n modified=datetime_utc(2020, 1, 30, 0, 0, 0),\n ),\n links=LinkList(\n [\n Link(\n href=\"https://feed.org/document-id/2\",\n rels=[OPDS2LinkRelationsRegistry.ACQUISITION],\n )\n ]\n ),\n)\n\nPROQUEST_PUBLICATION_3 = OPDS2Publication(\n metadata=PresentationMetadata(\n identifier=\"urn:proquest.com/document-id/3\",\n title=\"Publication # 3\",\n modified=datetime_utc(2020, 1, 29, 0, 0, 0),\n ),\n links=LinkList(\n [\n Link(\n href=\"https://feed.org/document-id/3\",\n rels=[OPDS2LinkRelationsRegistry.ACQUISITION],\n )\n ]\n ),\n)\n\nPROQUEST_PUBLICATION_4 = OPDS2Publication(\n metadata=PresentationMetadata(\n identifier=\"urn:proquest.com/document-id/4\",\n title=\"Publication # 4\",\n modified=datetime_utc(2020, 1, 28, 0, 0, 0),\n ),\n links=LinkList(\n [\n Link(\n href=\"https://feed.org/document-id/4\",\n rels=[OPDS2LinkRelationsRegistry.ACQUISITION],\n )\n ]\n ),\n)\n\nPROQUEST_FEED_PAGE_1 = OPDS2Feed(\n metadata=OPDS2FeedMetadata(\n title=\"Page # 1\", current_page=1, items_per_page=10, number_of_items=20\n ),\n groups=CollectionList(\n [\n OPDS2Group(\n publications=CollectionList(\n [PROQUEST_PUBLICATION_1, PROQUEST_PUBLICATION_2]\n )\n )\n ]\n ),\n links=LinkList(\n [Link(href=\"https://feed.org/pages/1\", rels=[RWPMLinkRelationsRegistry.SELF])]\n ),\n)\n\nPROQUEST_FEED_PAGE_2 = OPDS2Feed(\n metadata=OPDS2FeedMetadata(\n title=\"Page # 2\", current_page=2, items_per_page=10, number_of_items=20\n ),\n groups=CollectionList(\n [\n OPDS2Group(\n publications=CollectionList(\n [PROQUEST_PUBLICATION_3, PROQUEST_PUBLICATION_4]\n )\n )\n ]\n ),\n links=LinkList(\n [Link(href=\"https://feed.org/pages/2\", rels=[RWPMLinkRelationsRegistry.SELF])]\n ),\n)\n\nPROQUEST_RAW_PUBLICATION_1_ID = \"12345\"\nPROQUEST_RAW_PUBLICATION_1_COVER_HREF = \"http://proquest.com/covers/12345-m.jpg\"\n\nPROQUEST_RAW_PUBLICATION_2_ID = \"12346\"\nPROQUEST_RAW_PUBLICATION_2_COVER_HREF = \"http://proquest.com/covers/12346-m.jpg\"\n\nPROQUEST_RAW_FEED = \"\"\"{{\n \"metadata\": {{\n \"title\": \"Test Feed\",\n \"itemsPerPage\": 1,\n \"numberOfItems\": 1\n }},\n \"links\": [{{\n \"href\": \"https://drafts.opds.io/schema/feed.schema.json\",\n \"type\": \"application/opds+json\",\n \"rel\": \"self\",\n \"alternate\": [],\n \"children\": []\n }}],\n \"publications\": [],\n \"navigation\": [{{\n \"href\": \"https://drafts.opds.io/schema/feed.schema.json\",\n \"type\": \"application/opds+json\",\n \"title\": \"Test\",\n \"rel\": \"self\",\n \"alternate\": [],\n \"children\": []\n }}],\n \"facets\": [],\n \"groups\": [{{\n \"metadata\": {{\n \"title\": \"Test Group\"\n }},\n \"links\": [{{\n \"href\": \"https://drafts.opds.io/schema/feed.schema.json\",\n \"type\": \"application/opds+json\",\n \"rel\": \"self\",\n \"alternate\": [],\n \"children\": []\n }}],\n \"publications\": [{{\n \"metadata\": {{\n \"identifier\": \"urn:proquest.com/document-id/{0}\",\n \"@type\": \"http://schema.org/Book\",\n \"title\": \"Test Book 1\",\n \"modified\": \"2020-11-19T08:00:00.000Z\",\n \"published\": \"2020-01-15T08:06:00.000Z\",\n \"language\": [\n \"eng\"\n ],\n \"author\": [{{\n \"name\": \"<NAME>\",\n \"links\": [{{\n \"href\": \"https://catalog.feedbooks.com/catalog/index.json\",\n \"type\": \"application/opds+json\",\n \"alternate\": [],\n \"children\": []\n }}]\n }}],\n \"publisher\": {{\n \"name\": \"Test Publisher\",\n \"links\": []\n }},\n \"subject\": [],\n \"readingProgression\": \"ltr\"\n }},\n \"links\": [{{\n \"href\": \"https://proquest.com/lib/detail.action?docID={0}\",\n \"type\": \"application/vnd.adobe.adept+xml\",\n \"rel\": \"http://opds-spec.org/acquisition\",\n \"properties\": {{\n \"indirectAcquisition\": [{{\n \"type\": \"application/epub+zip\",\n \"alternate\": [],\n \"children\": []\n }}]\n }},\n \"language\": [\n \"eng\"\n ],\n \"alternate\": [],\n \"children\": []\n }}],\n \"images\": [{{\n \"href\": \"{1}\",\n \"type\": \"image/jpeg\",\n \"language\": [\n \"eng\"\n ],\n \"alternate\": [],\n \"children\": []\n }}]\n }},\n {{\n \"metadata\": {{\n \"identifier\": \"urn:proquest.com/document-id/{2}\",\n \"@type\": \"http://schema.org/Book\",\n \"title\": \"Test Book 2\",\n \"modified\": \"2020-11-19T08:00:00.000Z\",\n \"published\": \"2020-01-15T08:06:00.000Z\",\n \"language\": [\n \"eng\"\n ],\n \"author\": [{{\n \"name\": \"<NAME>\",\n \"links\": [{{\n \"href\": \"https://catalog.feedbooks.com/catalog/index.json\",\n \"type\": \"application/opds+json\",\n \"alternate\": [],\n \"children\": []\n }}]\n }}],\n \"publisher\": {{\n \"name\": \"Test Publisher\",\n \"links\": []\n }},\n \"subject\": [],\n \"readingProgression\": \"ltr\"\n }},\n \"links\": [{{\n \"href\": \"https://proquest.com/lib/detail.action?docID={2}\",\n \"type\": \"application/vnd.adobe.adept+xml\",\n \"rel\": \"http://opds-spec.org/acquisition\",\n \"properties\": {{\n \"indirectAcquisition\": [{{\n \"type\": \"application/epub+zip\",\n \"alternate\": [],\n \"children\": []\n }}]\n }},\n \"language\": [\n \"eng\"\n ],\n \"alternate\": [],\n \"children\": []\n }}],\n \"images\": [{{\n \"href\": \"{3}\",\n \"type\": \"image/jpeg\",\n \"language\": [\n \"eng\"\n ],\n \"alternate\": [],\n \"children\": []\n }}]\n }}]\n }}]\n}}\n\"\"\".format(\n PROQUEST_RAW_PUBLICATION_1_ID,\n PROQUEST_RAW_PUBLICATION_1_COVER_HREF,\n PROQUEST_RAW_PUBLICATION_2_ID,\n PROQUEST_RAW_PUBLICATION_2_COVER_HREF,\n)\n\nPROQUEST_RAW_PUBLICATION_3_ID = \"12347\"\nPROQUEST_RAW_PUBLICATION_3_COVER_HREF = \"http://proquest.com/covers/12347-m.jpg\"\n\nPROQUEST_RAW_FEED_WITH_A_REMOVED_PUBLICATION = \"\"\"{{\n \"metadata\": {{\n \"title\": \"Test Feed\",\n \"itemsPerPage\": 1,\n \"numberOfItems\": 1\n }},\n \"links\": [{{\n \"href\": \"https://drafts.opds.io/schema/feed.schema.json\",\n \"type\": \"application/opds+json\",\n \"rel\": \"self\",\n \"alternate\": [],\n \"children\": []\n }}],\n \"publications\": [],\n \"navigation\": [{{\n \"href\": \"https://drafts.opds.io/schema/feed.schema.json\",\n \"type\": \"application/opds+json\",\n \"title\": \"Test\",\n \"rel\": \"self\",\n \"alternate\": [],\n \"children\": []\n }}],\n \"facets\": [],\n \"groups\": [{{\n \"metadata\": {{\n \"title\": \"Test Group\"\n }},\n \"links\": [{{\n \"href\": \"https://drafts.opds.io/schema/feed.schema.json\",\n \"type\": \"application/opds+json\",\n \"rel\": \"self\",\n \"alternate\": [],\n \"children\": []\n }}],\n \"publications\": [{{\n \"metadata\": {{\n \"identifier\": \"urn:proquest.com/document-id/{0}\",\n \"@type\": \"http://schema.org/Book\",\n \"title\": \"Test Book 1\",\n \"modified\": \"2020-11-19T08:00:00.000Z\",\n \"published\": \"2020-01-15T08:06:00.000Z\",\n \"language\": [\n \"eng\"\n ],\n \"author\": [{{\n \"name\": \"<NAME>\",\n \"links\": [{{\n \"href\": \"https://catalog.feedbooks.com/catalog/index.json\",\n \"type\": \"application/opds+json\",\n \"alternate\": [],\n \"children\": []\n }}]\n }}],\n \"publisher\": {{\n \"name\": \"Test Publisher\",\n \"links\": []\n }},\n \"subject\": [],\n \"readingProgression\": \"ltr\"\n }},\n \"links\": [{{\n \"href\": \"https://proquest.com/lib/detail.action?docID={0}\",\n \"type\": \"application/vnd.adobe.adept+xml\",\n \"rel\": \"http://opds-spec.org/acquisition\",\n \"properties\": {{\n \"indirectAcquisition\": [{{\n \"type\": \"application/epub+zip\",\n \"alternate\": [],\n \"children\": []\n }}]\n }},\n \"language\": [\n \"eng\"\n ],\n \"alternate\": [],\n \"children\": []\n }}],\n \"images\": [{{\n \"href\": \"{1}\",\n \"type\": \"image/jpeg\",\n \"language\": [\n \"eng\"\n ],\n \"alternate\": [],\n \"children\": []\n }}]\n }},\n {{\n \"metadata\": {{\n \"identifier\": \"urn:proquest.com/document-id/{2}\",\n \"@type\": \"http://schema.org/Book\",\n \"title\": \"Test Book 3\",\n \"modified\": \"2020-11-19T08:00:00.000Z\",\n \"published\": \"2020-01-15T08:06:00.000Z\",\n \"language\": [\n \"eng\"\n ],\n \"author\": [{{\n \"name\": \"<NAME>\",\n \"links\": [{{\n \"href\": \"https://catalog.feedbooks.com/catalog/index.json\",\n \"type\": \"application/opds+json\",\n \"alternate\": [],\n \"children\": []\n }}]\n }}],\n \"publisher\": {{\n \"name\": \"<NAME>\",\n \"links\": []\n }},\n \"subject\": [],\n \"readingProgression\": \"ltr\"\n }},\n \"links\": [{{\n \"href\": \"https://proquest.com/lib/detail.action?docID={2}\",\n \"type\": \"application/vnd.adobe.adept+xml\",\n \"rel\": \"http://opds-spec.org/acquisition\",\n \"properties\": {{\n \"indirectAcquisition\": [{{\n \"type\": \"application/epub+zip\",\n \"alternate\": [],\n \"children\": []\n }}]\n }},\n \"language\": [\n \"eng\"\n ],\n \"alternate\": [],\n \"children\": []\n }}],\n \"images\": [{{\n \"href\": \"{3}\",\n \"type\": \"image/jpeg\",\n \"language\": [\n \"eng\"\n ],\n \"alternate\": [],\n \"children\": []\n }}]\n }}]\n }}]\n}}\n\"\"\".format(\n PROQUEST_RAW_PUBLICATION_1_ID,\n PROQUEST_RAW_PUBLICATION_1_COVER_HREF,\n PROQUEST_RAW_PUBLICATION_3_ID,\n PROQUEST_RAW_PUBLICATION_3_COVER_HREF,\n)\n", "id": "3562458", "language": "Python", "matching_score": 0.8947838544845581, "max_stars_count": 0, "path": "tests/api/proquest/fixtures.py" }, { "content": "import json\n\nfrom core.facets import FacetConstants\nfrom core.lane import Facets\nfrom core.model import Library\nfrom core.testing import DatabaseTest\nfrom migartion_scripts import RandomSortOptionRemover\n\n\nclass TestRandomSortOptionRemover(DatabaseTest):\n \"\"\"Contains tests ensuring that RandomSortOptionRemover correctly removes `random` sort option from CM.\"\"\"\n\n def _get_library(self) -> Library:\n \"\"\"Return a library with randomly sorted facets.\n\n :return: Library with randomly sorted facets\n \"\"\"\n library: Library = self._default_library\n\n # Set the library's default sort option to `random`.\n library.default_facet_setting(\n Facets.ORDER_FACET_GROUP_NAME\n ).value = Facets.ORDER_RANDOM\n assert (\n library.default_facet(Facets.ORDER_FACET_GROUP_NAME) == Facets.ORDER_RANDOM\n )\n\n # Include `random` into the list of the library's available sort options.\n available_sort_options = FacetConstants.DEFAULT_ENABLED_FACETS.get(\n Facets.ORDER_FACET_GROUP_NAME, []\n )\n library.enabled_facets_setting(\n Facets.ORDER_FACET_GROUP_NAME\n ).value = json.dumps(available_sort_options + [Facets.ORDER_RANDOM])\n assert Facets.ORDER_RANDOM in library.enabled_facets(\n Facets.ORDER_FACET_GROUP_NAME\n )\n\n return library\n\n def test_random_sort_option_remover_removes_sort_options(self):\n \"\"\"Ensure that RandomSortOptionRemover correctly removes `random` sort options from CM.\"\"\"\n # Prepare a library with `random` set as the default sort option and part of the available sort options list.\n library = self._get_library()\n default_facet_order = FacetConstants.DEFAULT_FACET.get(\n Facets.ORDER_FACET_GROUP_NAME\n )\n\n # Run the script to remove `random` sort options.\n remover = RandomSortOptionRemover()\n remover.run(self._db)\n\n # Ensure that the default sort option changed and it's not `random` any more.\n assert (\n library.default_facet(Facets.ORDER_FACET_GROUP_NAME) == default_facet_order\n )\n assert (\n library.default_facet(Facets.ORDER_FACET_GROUP_NAME) != Facets.ORDER_RANDOM\n )\n\n # Ensure that `random` is not in the list of available sort options.\n assert Facets.ORDER_RANDOM not in library.enabled_facets(\n Facets.ORDER_FACET_GROUP_NAME\n )\n", "id": "11705427", "language": "Python", "matching_score": 3.2675952911376953, "max_stars_count": 0, "path": "tests/api/test_migration_scripts.py" }, { "content": "import json\nimport logging\n\nimport sqlalchemy.orm\n\nfrom core.facets import FacetConstants\nfrom core.lane import Facets\nfrom core.model import Library\n\n\nclass RandomSortOptionRemover:\n \"\"\"Class designed to remove `random` sort options from Circulation Manager's library configuration.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize a new instance of RandomSortOptionRemover class.\"\"\"\n self._logger: logging.Logger = logging.getLogger(__name__)\n\n def _process_library_default_sort_option(self, library: Library) -> None:\n \"\"\"Check the library's default sort option and, if it's `random`, replace it.\n\n :param library: Library object\n \"\"\"\n default_facet_setting = library.default_facet_setting(\n Facets.ORDER_FACET_GROUP_NAME\n )\n\n self._logger.info(\n f\"Library {library}'s default sort option: {default_facet_setting.value if default_facet_setting else None}\"\n )\n\n if default_facet_setting and default_facet_setting.value == Facets.ORDER_RANDOM:\n default_facet_setting.value = FacetConstants.DEFAULT_FACET.get(\n Facets.ORDER_FACET_GROUP_NAME\n )\n\n self._logger.info(\n f\"Library {library}'s new default sort option: {default_facet_setting.value}\"\n )\n\n def _process_library_available_sort_options(self, library: Library) -> None:\n \"\"\"Exclude `random` sort option from the library's available sort options.\n\n :param library: Library object\n \"\"\"\n enabled_facets = library.enabled_facets(Facets.ORDER_FACET_GROUP_NAME)\n\n self._logger.info(\n f\"Library {library}'s available sort options: {enabled_facets}\"\n )\n\n if isinstance(enabled_facets, list) and Facets.ORDER_RANDOM in enabled_facets:\n library.enabled_facets_setting(\n Facets.ORDER_FACET_GROUP_NAME\n ).value = json.dumps(list(set(enabled_facets) - {Facets.ORDER_RANDOM}))\n\n enabled_facets = library.enabled_facets(Facets.ORDER_FACET_GROUP_NAME)\n\n self._logger.info(\n f\"Library {library}'s updated available sort options: {enabled_facets}\"\n )\n\n def run(self, db: sqlalchemy.orm.session.Session) -> None:\n \"\"\"remove `random` sort options from Circulation Manager's library configuration.\n\n :param db: Database connection\n \"\"\"\n libraries = db.query(Library).all()\n\n for library in libraries:\n self._logger.info(f\"Started processing {library}\")\n\n self._process_library_default_sort_option(library)\n self._process_library_available_sort_options(library)\n db.commit()\n\n self._logger.info(f\"Finished processing {library}\")\n", "id": "5535329", "language": "Python", "matching_score": 0.8833779692649841, "max_stars_count": 0, "path": "migartion_scripts.py" }, { "content": "# encoding: utf-8\nimport random\nimport time\nfrom threading import Thread\nfrom urllib.parse import quote, urlencode\n\nimport numpy\nimport requests\n\n\nclass QueryTimingThread(Thread):\n def __init__(self, urls):\n Thread.__init__(self)\n self.urls = urls\n\n def run(self):\n self.elapsed = []\n self.exceptions = []\n for url in self.urls:\n a = time.time()\n exception = self.do_query(url)\n self.elapsed.append(time.time() - a)\n if exception:\n self.exceptions.append((url, exception))\n\n def do_query(self, url):\n print(url)\n try:\n response = requests.get(url)\n return None\n except Exception as e:\n return e\n\n def report(self):\n print(\"\")\n print(\"Timing results for %s\" % self.urls[0])\n print(\"------------------\")\n # print \"Total time elapsed: %s\" % numpy.sum(self.elapsed)\n print(\"Mean time elapsed: %.2f\" % numpy.mean(self.elapsed))\n print(\"Median time elapsed: %.2f\" % numpy.median(self.elapsed))\n m = numpy.argmax(self.elapsed)\n print(\"Max time elapsed: %.2f\" % self.elapsed[m])\n print(\"Max url: %s\" % self.urls[m])\n print(\"Raw data:\")\n for i, url in enumerate(self.urls):\n print(\"(%.2f) %s\" % (self.elapsed[i], url))\n for (url, e) in self.exceptions:\n print(\"Exception: %s: %s\" % (url, e))\n print(\"\")\n\n\nsize = 50\npages = 10\nthread_count = 10\nbase_url = \"http://qa.circulation.librarysimplified.org\"\n\nqueries = [\n {\n \"language\": \"eng\",\n \"category\": \"Adult Fiction\",\n \"params\": {\"order\": \"author\", \"available\": \"now\", \"collection\": \"full\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Adult Fiction\",\n \"params\": {\"order\": \"title\", \"available\": \"all\", \"collection\": \"main\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Adult Nonfiction\",\n \"params\": {\"order\": \"author\", \"available\": \"now\", \"collection\": \"main\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Adult Nonfiction\",\n \"params\": {\"order\": \"title\", \"available\": \"all\", \"collection\": \"featured\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"English Best Sellers\",\n \"params\": {\"order\": \"author\", \"available\": \"all\", \"collection\": \"featured\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Young Adult Fiction\",\n \"params\": {\"order\": \"added\", \"available\": \"all\", \"collection\": \"main\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Children and Middle Grade\",\n \"params\": {\"order\": \"author\", \"available\": \"now\", \"collection\": \"featured\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Adventure\",\n \"params\": {\"order\": \"author\", \"available\": \"main\", \"collection\": \"featured\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Classics\",\n \"params\": {\"order\": \"title\", \"available\": \"now\", \"collection\": \"full\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Police Procedural\",\n \"params\": {\"order\": \"title\", \"available\": \"now\", \"collection\": \"featured\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Biography & Memoir\",\n \"params\": {\"order\": \"author\", \"available\": \"always\", \"collection\": \"main\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Business\",\n \"params\": {\"order\": \"added\", \"available\": \"now\", \"collection\": \"full\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Parenting & Family\",\n \"params\": {\"order\": \"author\", \"available\": \"all\", \"collection\": \"featured\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Cooking\",\n \"params\": {\"order\": \"title\", \"available\": \"all\", \"collection\": \"featured\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Latin American History\",\n \"params\": {\"order\": \"author\", \"available\": \"all\", \"collection\": \"main\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Pets\",\n \"params\": {\"order\": \"title\", \"available\": \"now\", \"collection\": \"featured\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Photography\",\n \"params\": {\"order\": \"author\", \"available\": \"now\", \"collection\": \"featured\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Music\",\n \"params\": {\"order\": \"added\", \"available\": \"now\", \"collection\": \"featured\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Life Strategies\",\n \"params\": {\"order\": \"title\", \"available\": \"all\", \"collection\": \"main\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Buddhism\",\n \"params\": {\"order\": \"author\", \"available\": \"all\", \"collection\": \"featured\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Computers\",\n \"params\": {\"order\": \"added\", \"available\": \"now\", \"collection\": \"featured\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"Self Help\",\n \"params\": {\"order\": \"author\", \"available\": \"all\", \"collection\": \"full\"},\n },\n {\n \"language\": \"eng\",\n \"category\": \"True Crime\",\n \"params\": {\"order\": \"title\", \"available\": \"all\", \"collection\": \"full\"},\n },\n]\n\n\ndef urls_from_query(query, pages, size):\n urls = []\n for i in range(pages):\n if i > 0:\n query[\"params\"][\"after\"] = i * size\n url = quote(\n \"%s/feed/%s/%s?%s\"\n % (\n base_url,\n query[\"language\"],\n query[\"category\"],\n urlencode(query[\"params\"]),\n ),\n safe=\":/?=&\",\n )\n urls.append(url)\n return urls\n\n\nthreads = [\n QueryTimingThread(urls=urls_from_query(random.choice(queries), pages, size))\n for i in range(thread_count)\n]\n\nfor t in threads:\n t.start()\nfor t in threads:\n t.join()\nfor t in threads:\n t.report()\n", "id": "8443379", "language": "Python", "matching_score": 1.3081334829330444, "max_stars_count": 0, "path": "integration_tests/benchmark_feed_queries.py" }, { "content": "from urllib.parse import ParseResult, urlencode, urlparse\n\n\nclass URLUtility(object):\n \"\"\"Contains different helper methods simplifying URL construction.\"\"\"\n\n @staticmethod\n def build_url(base_url, query_parameters):\n \"\"\"Construct a URL with specified query parameters.\n\n :param base_url: Base URL\n :type base_url: str\n\n :param query_parameters: Dictionary containing query parameters\n :type query_parameters: Dict\n\n :return: Constructed URL\n :rtype: str\n \"\"\"\n result = urlparse(base_url)\n result = ParseResult(\n result.scheme,\n result.netloc,\n result.path,\n result.params,\n urlencode(query_parameters),\n result.fragment,\n )\n\n return result.geturl()\n", "id": "8142384", "language": "Python", "matching_score": 0.044248636811971664, "max_stars_count": 0, "path": "api/util/url.py" }, { "content": "import json\nfrom datetime import date, datetime, timedelta\n\nfrom api.admin.announcement_list_validator import AnnouncementListValidator\nfrom api.announcements import Announcement\nfrom api.testing import AnnouncementTest\nfrom core.problem_details import INVALID_INPUT\nfrom core.util.problem_detail import ProblemDetail\n\n\nclass TestAnnouncementListValidator(AnnouncementTest):\n def assert_invalid(self, x, detail):\n assert isinstance(x, ProblemDetail)\n assert INVALID_INPUT.uri == x.uri\n assert detail == x.detail\n\n def test_defaults(self):\n validator = AnnouncementListValidator()\n assert 3 == validator.maximum_announcements\n assert 15 == validator.minimum_announcement_length\n assert 350 == validator.maximum_announcement_length\n assert 60 == validator.default_duration_days\n\n def test_validate_announcements(self):\n # validate_announcement succeeds if every individual announcment succeeds,\n # and if some additional checks pass on the announcement list as a whole.\n\n class AlwaysAcceptValidator(AnnouncementListValidator):\n def validate_announcement(self, announcement):\n announcement[\"validated\"] = True\n return announcement\n\n validator = AlwaysAcceptValidator(maximum_announcements=2)\n m = validator.validate_announcements\n\n # validate_announcements calls validate_announcement on every\n # announcement in a list, so this...\n before = [\n {\"id\": \"announcement1\"},\n {\"id\": \"announcement2\"},\n ]\n\n # ...should become this.\n after = [\n {\"id\": \"announcement1\", \"validated\": True},\n {\"id\": \"announcement2\", \"validated\": True},\n ]\n validated = m(before)\n assert validated == after\n\n # If a JSON string is passed in, it will be decoded before\n # processing.\n assert m(json.dumps(before)) == after\n\n # If you pass in something other than a list or JSON-encoded\n # list, you get a ProblemDetail.\n for invalid in dict(), json.dumps(dict()), \"non-json string\":\n self.assert_invalid(\n m(invalid),\n \"Invalid announcement list format: %(announcements)r\"\n % dict(announcements=invalid),\n )\n\n # validate_announcements runs some checks on the list of announcements.\n # Each validator has a maximum length it will accept.\n too_many = [\n {\"id\": \"announcement1\"},\n {\"id\": \"announcement2\"},\n {\"id\": \"announcement3\"},\n ]\n self.assert_invalid(m(too_many), \"Too many announcements: maximum is 2\")\n\n # A list of announcements will be rejected if it contains duplicate IDs.\n duplicate_ids = [\n {\"id\": \"announcement1\"},\n {\"id\": \"announcement1\"},\n ]\n self.assert_invalid(\n m(duplicate_ids), \"Duplicate announcement ID: announcement1\"\n )\n\n # In addition, if validate_announcement ever rejects an\n # announcement, validate_announcements will fail with whatever\n # problem detail validate_announcement returned.\n class AlwaysRejectValidator(AnnouncementListValidator):\n def validate_announcement(self, announcement):\n return INVALID_INPUT.detailed(\"Rejected!\")\n\n validator = AlwaysRejectValidator()\n self.assert_invalid(\n validator.validate_announcements([\"an announcement\"]), \"Rejected!\"\n )\n\n def test_validate_announcement_success(self):\n # End-to-end test of validate_announcement in successful scenarios.\n validator = AnnouncementListValidator()\n m = validator.validate_announcement\n\n # Simulate the creation of a new announcement -- no incoming ID.\n today = date.today()\n in_a_week = today + timedelta(days=7)\n valid = dict(\n start=today.strftime(\"%Y-%m-%d\"),\n finish=in_a_week.strftime(\"%Y-%m-%d\"),\n content=\"This is a test of announcement validation.\",\n )\n\n validated = m(valid)\n\n # A UUID has been added in the 'id' field.\n id = validated.pop(\"id\")\n assert 36 == len(id)\n for position in 8, 13, 18, 23:\n assert \"-\" == id[position]\n\n # Date strings have been converted to date objects.\n assert today == validated[\"start\"]\n assert in_a_week == validated[\"finish\"]\n\n # Now simulate an edit, where an ID is provided.\n validated[\"id\"] = \"an existing id\"\n\n # Now the incoming data is validated but not changed at all.\n assert validated == m(validated)\n\n # If no start date is specified, today's date is used. If no\n # finish date is specified, a default associated with the\n # validator is used.\n no_finish_date = dict(content=\"This is a test of announcment validation\")\n validated = m(no_finish_date)\n assert today == validated[\"start\"]\n assert (\n today + timedelta(days=validator.default_duration_days)\n == validated[\"finish\"]\n )\n\n def test_validate_announcement_failure(self):\n # End-to-end tests of validation failures for a single\n # announcement.\n validator = AnnouncementListValidator()\n m = validator.validate_announcement\n\n # Totally bogus format\n for invalid in '{\"a\": \"string\"}', [\"a list\"]:\n self.assert_invalid(\n m(invalid),\n \"Invalid announcement format: %(announcement)r\"\n % dict(announcement=invalid),\n )\n\n # Some baseline valid value to use in tests where _some_ of the data is valid.\n today = date.today()\n tomorrow = today + timedelta(days=1)\n message = \"An important message to all patrons: reading is FUN-damental!\"\n\n # Missing a required field\n no_content = dict(start=today)\n self.assert_invalid(m(no_content), \"Missing required field: content\")\n\n # Bad content -- tested at greater length in another test.\n bad_content = dict(start=today, content=\"short\")\n self.assert_invalid(\n m(bad_content), \"Value too short (5 versus 15 characters): short\"\n )\n\n # Bad start date -- tested at greater length in another test.\n bad_start_date = dict(start=\"not-a-date\", content=message)\n self.assert_invalid(\n m(bad_start_date), \"Value for start is not a date: not-a-date\"\n )\n\n # Bad finish date.\n yesterday = today - timedelta(days=1)\n for bad_finish_date in (today, yesterday):\n bad_data = dict(start=today, finish=bad_finish_date, content=message)\n self.assert_invalid(\n m(bad_data),\n \"Value for finish must be no earlier than %s\"\n % (tomorrow.strftime(validator.DATE_FORMAT)),\n )\n\n def test_validate_length(self):\n # Test the validate_length helper method in more detail than\n # it's tested in validate_announcement.\n m = AnnouncementListValidator.validate_length\n value = \"four\"\n assert value == m(value, 3, 5)\n\n self.assert_invalid(\n m(value, 10, 20), \"Value too short (4 versus 10 characters): four\"\n )\n\n self.assert_invalid(\n m(value, 1, 3), \"Value too long (4 versus 3 characters): four\"\n )\n\n def test_validate_date(self):\n # Test the validate_date helper method in more detail than\n # it's tested in validate_announcement.\n m = AnnouncementListValidator.validate_date\n\n february_1 = date(2020, 2, 1)\n\n # The incoming date can be either a string, date, or datetime.\n # The output is always a date.\n assert february_1 == m(\"somedate\", \"2020-2-1\")\n assert february_1 == m(\"somedate\", february_1)\n assert february_1 == m(\"somedate\", datetime(2020, 2, 1))\n\n # But if a string is used, it must be in a specific format.\n self.assert_invalid(\n m(\"somedate\", \"not-a-date\"), \"Value for somedate is not a date: not-a-date\"\n )\n\n # If a minimum (date or datetime) is provided, the selection\n # must be on or after that date.\n\n january_1 = date(2020, 1, 1)\n january_1_datetime = datetime(2020, 1, 1)\n assert february_1 == m(\"somedate\", february_1, minimum=january_1)\n assert february_1 == m(\"somedate\", february_1, minimum=january_1_datetime)\n\n self.assert_invalid(\n m(\"somedate\", january_1, minimum=february_1),\n \"Value for somedate must be no earlier than 2020-02-01\",\n )\n\n def test_format(self):\n # Test our ability to format the output of validate_announcements for storage\n # in the database.\n\n validator = AnnouncementListValidator()\n announcements = [self.active, self.forthcoming]\n\n # Convert the announcements into a single JSON string.\n ready_for_storage = validator.format_as_string(announcements)\n\n # Now examine the string by converting it back from JSON to a list.\n as_list = json.loads(ready_for_storage)\n\n # The list contains dictionary representations of self.active\n # and self.forthcoming. But they're not exactly the same as\n # self.active and self.forthcoming -- they were converted into\n # Announcement objects and then back to dictionaries using\n # Announcement.json_ready.\n assert [Announcement(**x).json_ready for x in announcements] == as_list\n", "id": "12193552", "language": "Python", "matching_score": 4.249340057373047, "max_stars_count": 0, "path": "tests/api/admin/test_announcement_list_validator.py" }, { "content": "import datetime\n\nfrom core.util.problem_detail import ProblemDetail\n\nfrom .admin.announcement_list_validator import AnnouncementListValidator\n\n\nclass Announcements(object):\n \"\"\"Data model class for a library's announcements.\n\n This entire list is stored as a single\n ConfigurationSetting, which is why this isn't in core/model.\n \"\"\"\n\n SETTING_NAME = \"announcements\"\n\n @classmethod\n def for_library(cls, library):\n \"\"\"Load an Announcements object for the given Library.\n\n :param library: A Library\n \"\"\"\n announcements = library.setting(cls.SETTING_NAME).json_value or []\n return cls(announcements)\n\n def __init__(self, announcements):\n \"\"\"Instantiate an Announcements object from a (potentially serialised)\n list.\n\n :param announcements: A value for the ANNOUNCEMENTS ConfigurationSetting,\n either serialized or un-.\n :return: A list of Announcement objects. The list will be empty if\n there are validation errors in `announcements`.\n \"\"\"\n validator = AnnouncementListValidator()\n validated = validator.validate_announcements(announcements)\n if isinstance(validated, ProblemDetail):\n # There's a problem with the way the announcements were\n # serialized to the database. Treat this as an empty list.\n validated = []\n\n self.announcements = [Announcement(**data) for data in validated]\n\n @property\n def active(self):\n \"\"\"Yield only the active announcements.\"\"\"\n for a in self.announcements:\n if a.is_active:\n yield a\n\n\nclass Announcement(object):\n \"\"\"Data model class for a single library-wide announcement.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Instantiate an Announcement from a dictionary of data.\n\n It's assumed that the data is present and valid.\n\n :param id: Globally unique ID for the Announcement.\n :param content: Textual content of the announcement.\n :param start: The date (relative to the time zone of the server)\n on which the announcement should start being published.\n :param finish: The date (relative to the time zone of the server)\n on which the announcement should stop being published.\n \"\"\"\n self.id = kwargs.pop(\"id\")\n self.content = kwargs.pop(\"content\")\n self.start = AnnouncementListValidator.validate_date(\"\", kwargs.pop(\"start\"))\n self.finish = AnnouncementListValidator.validate_date(\"\", kwargs.pop(\"finish\"))\n\n @property\n def json_ready(self):\n format = AnnouncementListValidator.DATE_FORMAT\n return dict(\n id=self.id,\n content=self.content,\n start=datetime.datetime.strftime(self.start, format),\n finish=datetime.datetime.strftime(self.finish, format),\n )\n\n @property\n def is_active(self):\n \"\"\"Should this announcement be displayed now?\"\"\"\n today_local = datetime.date.today()\n return self.start <= today_local and self.finish >= today_local\n\n @property\n def for_authentication_document(self):\n \"\"\"The publishable representation of this announcement,\n for use in an authentication document.\n\n Basically just the ID and the content.\n \"\"\"\n return dict(id=self.id, content=self.content)\n", "id": "397370", "language": "Python", "matching_score": 0.8359487056732178, "max_stars_count": 0, "path": "api/announcements.py" }, { "content": "import json\nimport urllib.error\nimport urllib.parse\nimport urllib.request\n\nimport pypostalcode\nimport uszipcode\n\nfrom api.admin.geographic_validator import GeographicValidator\nfrom api.admin.problem_details import *\nfrom api.registration.registry import RemoteRegistry\nfrom core.model import ExternalIntegration, create\nfrom core.testing import MockRequestsResponse\nfrom tests.api.admin.controller.test_controller import SettingsControllerTest\n\n\nclass TestGeographicValidator(SettingsControllerTest):\n def test_validate_geographic_areas(self):\n original_validator = GeographicValidator\n db = self._db\n\n class Mock(GeographicValidator):\n def __init__(self):\n self._db = db\n self.value = None\n\n def mock_find_location_through_registry(self, value, db):\n self.value = value\n\n def mock_find_location_through_registry_with_error(self, value, db):\n self.value = value\n return REMOTE_INTEGRATION_FAILED\n\n def mock_find_location_through_registry_success(self, value, db):\n self.value = value\n return \"CA\"\n\n mock = Mock()\n mock.find_location_through_registry = mock.mock_find_location_through_registry\n\n # Invalid US zipcode\n response = mock.validate_geographic_areas('[\"00000\"]', self._db)\n assert response.uri == UNKNOWN_LOCATION.uri\n assert response.detail == '\"00000\" is not a valid U.S. zipcode.'\n assert response.status_code == 400\n # The validator should have returned the problem detail without bothering to ask the registry.\n assert mock.value == None\n\n # Invalid Canadian zipcode\n response = mock.validate_geographic_areas('[\"X1Y\"]', self._db)\n assert response.uri == UNKNOWN_LOCATION.uri\n assert response.detail == '\"X1Y\" is not a valid Canadian zipcode.'\n # The validator should have returned the problem detail without bothering to ask the registry.\n assert mock.value == None\n\n # Invalid 2-letter abbreviation\n response = mock.validate_geographic_areas('[\"ZZ\"]', self._db)\n assert response.uri == UNKNOWN_LOCATION.uri\n assert (\n response.detail\n == '\"ZZ\" is not a valid U.S. state or Canadian province abbreviation.'\n )\n # The validator should have returned the problem detail without bothering to ask the registry.\n assert mock.value == None\n\n # Validator converts Canadian 2-letter abbreviations into province names, without needing to ask the registry.\n response = mock.validate_geographic_areas('[\"NL\"]', self._db)\n assert response == {\"CA\": [\"Newfoundland and Labrador\"], \"US\": []}\n assert mock.value == None\n\n # County with wrong state\n response = mock.validate_geographic_areas('[\"Fairfield County, FL\"]', self._db)\n assert response.uri == UNKNOWN_LOCATION.uri\n assert response.detail == 'Unable to locate \"Fairfield County, FL\".'\n # The validator should go ahead and call find_location_through_registry\n assert mock.value == \"Fairfield County, FL\"\n\n # City with wrong state\n response = mock.validate_geographic_areas('[\"Albany, NJ\"]', self._db)\n assert response.uri == UNKNOWN_LOCATION.uri\n assert response.detail == 'Unable to locate \"Albany, NJ\".'\n # The validator should go ahead and call find_location_through_registry\n assert mock.value == \"Albany, NJ\"\n\n # The Canadian zip code is valid, but it corresponds to a place too small for the registry to know about it.\n response = mock.validate_geographic_areas('[\"J5J\"]', self._db)\n assert response.uri == UNKNOWN_LOCATION.uri\n assert (\n response.detail\n == 'Unable to locate \"J5J\" (Saint-Sophie, Quebec). Try entering the name of a larger area.'\n )\n assert mock.value == \"Saint-Sophie, Quebec\"\n\n # Can't connect to registry\n mock.find_location_through_registry = (\n mock.mock_find_location_through_registry_with_error\n )\n response = mock.validate_geographic_areas('[\"Victoria, BC\"]', self._db)\n # The controller goes ahead and calls find_location_through_registry, but it can't connect to the registry.\n assert response.uri == REMOTE_INTEGRATION_FAILED.uri\n\n # The registry successfully finds the place\n mock.find_location_through_registry = (\n mock.mock_find_location_through_registry_success\n )\n response = mock.validate_geographic_areas('[\"Victoria, BC\"]', self._db)\n assert response == {\"CA\": [\"Victoria, BC\"], \"US\": []}\n\n def test_format_as_string(self):\n # GeographicValidator.format_as_string just turns its output into JSON.\n value = {\"CA\": [\"Victoria, BC\"], \"US\": []}\n as_string = GeographicValidator().format_as_string(value)\n assert as_string == json.dumps(value)\n\n def test_find_location_through_registry(self):\n get = self.do_request\n test = self\n original_ask_registry = GeographicValidator().ask_registry\n\n class Mock(GeographicValidator):\n called_with = []\n\n def mock_ask_registry(self, service_area_object, db):\n places = {\"US\": [\"Chicago\"], \"CA\": [\"Victoria, BC\"]}\n service_area_info = json.loads(\n urllib.parse.unquote(service_area_object)\n )\n nation = list(service_area_info.keys())[0]\n city_or_county = list(service_area_info.values())[0]\n if city_or_county == \"ERROR\":\n test.responses.append(MockRequestsResponse(502))\n elif city_or_county in places[nation]:\n self.called_with.append(service_area_info)\n test.responses.append(\n MockRequestsResponse(\n 200, content=json.dumps(dict(unknown=None, ambiguous=None))\n )\n )\n else:\n self.called_with.append(service_area_info)\n test.responses.append(\n MockRequestsResponse(\n 200, content=json.dumps(dict(unknown=[city_or_county]))\n )\n )\n return original_ask_registry(service_area_object, db, get)\n\n mock = Mock()\n mock.ask_registry = mock.mock_ask_registry\n\n self._registry(\"https://registry_url\")\n\n us_response = mock.find_location_through_registry(\"Chicago\", self._db)\n assert len(mock.called_with) == 1\n assert {\"US\": \"Chicago\"} == mock.called_with[0]\n assert us_response == \"US\"\n\n mock.called_with = []\n\n ca_response = mock.find_location_through_registry(\"Victoria, BC\", self._db)\n assert len(mock.called_with) == 2\n assert {\"US\": \"Victoria, BC\"} == mock.called_with[0]\n assert {\"CA\": \"Victoria, BC\"} == mock.called_with[1]\n assert ca_response == \"CA\"\n\n mock.called_with = []\n\n nowhere_response = mock.find_location_through_registry(\n \"Not a real place\", self._db\n )\n assert len(mock.called_with) == 2\n assert {\"US\": \"Not a real place\"} == mock.called_with[0]\n assert {\"CA\": \"Not a real place\"} == mock.called_with[1]\n assert nowhere_response == None\n\n error_response = mock.find_location_through_registry(\"ERROR\", self._db)\n assert (\n error_response.detail\n == \"Unable to contact the registry at https://registry_url.\"\n )\n assert error_response.status_code == 502\n\n def test_ask_registry(self, monkeypatch):\n validator = GeographicValidator()\n\n registry_1 = \"https://registry_1_url\"\n registry_2 = \"https://registry_2_url\"\n registry_3 = \"https://registry_3_url\"\n registries = self._registries([registry_1, registry_2, registry_3], monkeypatch)\n\n true_response = MockRequestsResponse(200, content=\"{}\")\n unknown_response = MockRequestsResponse(200, content='{\"unknown\": \"place\"}')\n ambiguous_response = MockRequestsResponse(200, content='{\"ambiguous\": \"place\"}')\n problem_response = MockRequestsResponse(404)\n\n # Registry 1 knows about the place\n self.responses.append(true_response)\n response_1 = validator.ask_registry(\n json.dumps({\"CA\": \"Victoria, BC\"}), self._db, self.do_request\n )\n assert response_1 == True\n assert len(self.requests) == 1\n request_1 = self.requests.pop()\n assert (\n request_1[0]\n == 'https://registry_1_url/coverage?coverage={\"CA\": \"Victoria, BC\"}'\n )\n\n # Registry 1 says the place is unknown, but Registry 2 finds it.\n self.responses.append(true_response)\n self.responses.append(unknown_response)\n response_2 = validator.ask_registry(\n json.dumps({\"CA\": \"Victoria, BC\"}), self._db, self.do_request\n )\n assert response_2 == True\n assert len(self.requests) == 2\n request_2 = self.requests.pop()\n assert (\n request_2[0]\n == 'https://registry_2_url/coverage?coverage={\"CA\": \"Victoria, BC\"}'\n )\n request_1 = self.requests.pop()\n assert (\n request_1[0]\n == 'https://registry_1_url/coverage?coverage={\"CA\": \"Victoria, BC\"}'\n )\n\n # Registry_1 says the place is ambiguous and Registry_2 says it's unknown, but Registry_3 finds it.\n self.responses.append(true_response)\n self.responses.append(unknown_response)\n self.responses.append(ambiguous_response)\n response_3 = validator.ask_registry(\n json.dumps({\"CA\": \"Victoria, BC\"}), self._db, self.do_request\n )\n assert response_3 == True\n assert len(self.requests) == 3\n request_3 = self.requests.pop()\n assert (\n request_3[0]\n == 'https://registry_3_url/coverage?coverage={\"CA\": \"Victoria, BC\"}'\n )\n request_2 = self.requests.pop()\n assert (\n request_2[0]\n == 'https://registry_2_url/coverage?coverage={\"CA\": \"Victoria, BC\"}'\n )\n request_1 = self.requests.pop()\n assert (\n request_1[0]\n == 'https://registry_1_url/coverage?coverage={\"CA\": \"Victoria, BC\"}'\n )\n\n # Registry 1 returns a problem detail, but Registry 2 finds the place\n self.responses.append(true_response)\n self.responses.append(problem_response)\n response_4 = validator.ask_registry(\n json.dumps({\"CA\": \"Victoria, BC\"}), self._db, self.do_request\n )\n assert response_4 == True\n assert len(self.requests) == 2\n request_2 = self.requests.pop()\n assert (\n request_2[0]\n == 'https://registry_2_url/coverage?coverage={\"CA\": \"Victoria, BC\"}'\n )\n request_1 = self.requests.pop()\n assert (\n request_1[0]\n == 'https://registry_1_url/coverage?coverage={\"CA\": \"Victoria, BC\"}'\n )\n\n # Registry 1 returns a problem detail and the other two registries can't find the place\n self.responses.append(unknown_response)\n self.responses.append(ambiguous_response)\n self.responses.append(problem_response)\n response_5 = validator.ask_registry(\n json.dumps({\"CA\": \"Victoria, BC\"}), self._db, self.do_request\n )\n assert response_5.status_code == 502\n assert (\n response_5.detail\n == \"Unable to contact the registry at https://registry_1_url.\"\n )\n assert len(self.requests) == 3\n request_3 = self.requests.pop()\n assert (\n request_3[0]\n == 'https://registry_3_url/coverage?coverage={\"CA\": \"Victoria, BC\"}'\n )\n request_2 = self.requests.pop()\n assert (\n request_2[0]\n == 'https://registry_2_url/coverage?coverage={\"CA\": \"Victoria, BC\"}'\n )\n request_1 = self.requests.pop()\n assert (\n request_1[0]\n == 'https://registry_1_url/coverage?coverage={\"CA\": \"Victoria, BC\"}'\n )\n\n def _registry(self, url):\n integration, is_new = create(\n self._db,\n ExternalIntegration,\n protocol=ExternalIntegration.OPDS_REGISTRATION,\n goal=ExternalIntegration.DISCOVERY_GOAL,\n )\n integration.url = url\n return RemoteRegistry(integration)\n\n def _registries(self, urls, monkeypatch):\n \"\"\"Create and mock the `for_protocol_and_goal` function from\n RemoteRegistry. Instead of relying on getting the newly created\n integrations from the database in a specific order, we return them\n in the order they were created.\n \"\"\"\n integrations = []\n for url in urls:\n integration, is_new = create(\n self._db,\n ExternalIntegration,\n protocol=ExternalIntegration.OPDS_REGISTRATION,\n goal=ExternalIntegration.DISCOVERY_GOAL,\n )\n integration.url = url\n integrations.append(integration)\n\n def mock_for_protocol_and_goal(_db, protocol, goal):\n for integration in integrations:\n yield RemoteRegistry(integration)\n\n monkeypatch.setattr(\n RemoteRegistry, \"for_protocol_and_goal\", mock_for_protocol_and_goal\n )\n\n def test_is_zip(self):\n validator = GeographicValidator()\n assert validator.is_zip(\"06759\", \"US\") == True\n assert validator.is_zip(\"J2S\", \"US\") == False\n assert validator.is_zip(\"1234\", \"US\") == False\n assert validator.is_zip(\"1a234\", \"US\") == False\n\n assert validator.is_zip(\"J2S\", \"CA\") == True\n assert validator.is_zip(\"06759\", \"CA\") == False\n assert validator.is_zip(\"12S\", \"CA\") == False\n # \"J2S 0A1\" is a legit Canadian zipcode, but pypostalcode, which we use for looking up Canadian zipcodes,\n # only takes the FSA (the first three characters).\n assert validator.is_zip(\"J2S 0A1\", \"CA\") == False\n\n def test_look_up_zip(self):\n validator = GeographicValidator()\n us_zip_unformatted = validator.look_up_zip(\"06759\", \"US\")\n assert isinstance(us_zip_unformatted, uszipcode.SimpleZipcode)\n us_zip_formatted = validator.look_up_zip(\"06759\", \"US\", True)\n assert us_zip_formatted == {\"06759\": \"Litchfield, CT\"}\n\n ca_zip_unformatted = validator.look_up_zip(\"R2V\", \"CA\")\n assert isinstance(ca_zip_unformatted, pypostalcode.PostalCode)\n ca_zip_formatted = validator.look_up_zip(\"R2V\", \"CA\", True)\n assert ca_zip_formatted == {\"R2V\": \"Winnipeg (Seven Oaks East), Manitoba\"}\n", "id": "11920638", "language": "Python", "matching_score": 5.776493549346924, "max_stars_count": 0, "path": "tests/api/admin/test_geographic_validator.py" }, { "content": "import json\nimport os\nimport re\nimport urllib.error\nimport urllib.parse\nimport urllib.request\n\nimport uszipcode\nfrom flask_babel import lazy_gettext as _\nfrom pypostalcode import PostalCodeDatabase\n\nfrom api.admin.exceptions import *\nfrom api.admin.validator import Validator\nfrom api.problem_details import *\nfrom api.registration.registry import RemoteRegistry\nfrom core.model import ExternalIntegration\nfrom core.util.http import HTTP\nfrom core.util.problem_detail import ProblemDetail\n\n\nclass GeographicValidator(Validator):\n @staticmethod\n def get_us_search():\n # Use a known path for the uszipcode db_file_dir that already contains the DB that the\n # library would otherwise download. This is done because the host for this file can\n # be flaky. There is an issue for this in the underlying library here:\n # https://github.com/MacHu-GWU/uszipcode-project/issues/40\n db_file_path = os.path.join(\n os.path.dirname(__file__), \"..\", \"..\", \"data\", \"uszipcode\"\n )\n return uszipcode.SearchEngine(simple_zipcode=True, db_file_dir=db_file_path)\n\n def validate_geographic_areas(self, values, db):\n # Note: the validator does not recognize data from US territories other than Puerto Rico.\n\n us_search = self.get_us_search()\n ca_search = PostalCodeDatabase()\n CA_PROVINCES = {\n \"AB\": \"Alberta\",\n \"BC\": \"British Columbia\",\n \"MB\": \"Manitoba\",\n \"NB\": \"New Brunswick\",\n \"NL\": \"Newfoundland and Labrador\",\n \"NT\": \"Northwest Territories\",\n \"NS\": \"Nova Scotia\",\n \"NU\": \"Nunavut\",\n \"ON\": \"Ontario\",\n \"PE\": \"Prince Edward Island\",\n \"QC\": \"Quebec\",\n \"SK\": \"Saskatchewan\",\n \"YT\": \"Yukon Territories\",\n }\n\n locations = {\"US\": [], \"CA\": []}\n\n for value in json.loads(values):\n flagged = False\n if value == \"everywhere\":\n locations[\"US\"].append(value)\n elif len(value) and isinstance(value, str):\n if len(value) == 2:\n # Is it a US state or Canadian province abbreviation?\n if value in CA_PROVINCES:\n locations[\"CA\"].append(CA_PROVINCES[value])\n elif len(us_search.query(state=value)):\n locations[\"US\"].append(value)\n else:\n return UNKNOWN_LOCATION.detailed(\n _(\n '\"%(value)s\" is not a valid U.S. state or Canadian province abbreviation.',\n value=value,\n )\n )\n elif value in list(CA_PROVINCES.values()):\n locations[\"CA\"].append(value)\n elif self.is_zip(value, \"CA\"):\n # Is it a Canadian zipcode?\n try:\n info = self.look_up_zip(value, \"CA\")\n formatted = \"%s, %s\" % (info.city, info.province)\n # In some cases--mainly involving very small towns--even if the zip code is valid,\n # the registry won't recognize the name of the place to which it corresponds.\n registry_response = self.find_location_through_registry(\n formatted, db\n )\n if registry_response:\n locations[\"CA\"].append(formatted)\n else:\n return UNKNOWN_LOCATION.detailed(\n _(\n 'Unable to locate \"%(value)s\" (%(formatted)s). Try entering the name of a larger area.',\n value=value,\n formatted=formatted,\n )\n )\n except:\n return UNKNOWN_LOCATION.detailed(\n _(\n '\"%(value)s\" is not a valid Canadian zipcode.',\n value=value,\n )\n )\n elif len(value.split(\", \")) == 2:\n # Is it in the format \"[city], [state abbreviation]\" or \"[county], [state abbreviation]\"?\n city_or_county, state = value.split(\", \")\n if us_search.by_city_and_state(city_or_county, state):\n locations[\"US\"].append(value)\n elif len(\n [\n x\n for x in us_search.query(state=state, returns=None)\n if x.county == city_or_county\n ]\n ):\n locations[\"US\"].append(value)\n else:\n # Flag this as needing to be checked with the registry\n flagged = True\n elif self.is_zip(value, \"US\"):\n # Is it a US zipcode?\n info = self.look_up_zip(value, \"US\")\n if not info:\n return UNKNOWN_LOCATION.detailed(\n _('\"%(value)s\" is not a valid U.S. zipcode.', value=value)\n )\n locations[\"US\"].append(value)\n else:\n flagged = True\n\n if flagged:\n registry_response = self.find_location_through_registry(value, db)\n if registry_response and isinstance(\n registry_response, ProblemDetail\n ):\n return registry_response\n elif registry_response:\n locations[registry_response].append(value)\n else:\n return UNKNOWN_LOCATION.detailed(\n _('Unable to locate \"%(value)s\".', value=value)\n )\n return locations\n\n def is_zip(self, value, country):\n if country == \"US\":\n return len(value) == 5 and value.isdigit()\n elif country == \"CA\":\n return len(value) == 3 and bool(re.search(\"^[A-Za-z]\\\\d[A-Za-z]\", value))\n\n def look_up_zip(self, zip, country, formatted=False):\n if country == \"US\":\n info = self.get_us_search().by_zipcode(zip)\n if formatted:\n info = self.format_place(zip, info.major_city, info.state)\n elif country == \"CA\":\n info = PostalCodeDatabase()[zip]\n if formatted:\n info = self.format_place(zip, info.city, info.province)\n return info\n\n def format_place(self, zip, city, state_or_province):\n details = \"%s, %s\" % (city, state_or_province)\n return {zip: details}\n\n def find_location_through_registry(self, value, db):\n for nation in [\"US\", \"CA\"]:\n service_area_object = urllib.parse.quote('{\"%s\": \"%s\"}' % (nation, value))\n registry_check = self.ask_registry(service_area_object, db)\n if registry_check and isinstance(registry_check, ProblemDetail):\n return registry_check\n elif registry_check:\n # If the registry has established that this is a US location, don't bother also trying to find it in Canada\n return nation\n\n def ask_registry(self, service_area_object, db, do_get=HTTP.debuggable_get):\n # If the circulation manager doesn't know about this location, check whether the Library Registry does.\n result = None\n for registry in RemoteRegistry.for_protocol_and_goal(\n db,\n ExternalIntegration.OPDS_REGISTRATION,\n ExternalIntegration.DISCOVERY_GOAL,\n ):\n base_url = registry.integration.url + \"/coverage?coverage=\"\n\n response = do_get(base_url + service_area_object)\n if not response.status_code == 200:\n result = REMOTE_INTEGRATION_FAILED.detailed(\n _(\n \"Unable to contact the registry at %(url)s.\",\n url=registry.integration.url,\n )\n )\n\n if hasattr(response, \"content\"):\n content = json.loads(response.content)\n found_place = not (content.get(\"unknown\") or content.get(\"ambiguous\"))\n if found_place:\n return True\n\n return result\n\n def format_as_string(self, value):\n \"\"\"Format the output of validate_geographic_areas for storage in ConfigurationSetting.value.\"\"\"\n return json.dumps(value)\n", "id": "3155231", "language": "Python", "matching_score": 2.135747194290161, "max_stars_count": 0, "path": "api/admin/geographic_validator.py" }, { "content": "import flask\nfrom flask import Response\nfrom flask_babel import lazy_gettext as _\n\nfrom api.admin.problem_details import *\nfrom api.odl import SharedODLAPI\nfrom api.registration.registry import Registration, RemoteRegistry\nfrom core.model import Collection, ConfigurationSetting, Library, get_one\nfrom core.util.http import HTTP\nfrom core.util.problem_detail import ProblemDetail\n\nfrom . import SettingsController\n\n\nclass CollectionLibraryRegistrationsController(SettingsController):\n \"\"\"Use the OPDS Directory Registration Protocol to register a\n Collection with its remote source of truth.\n\n :param registration_class: Mock class to use instead of Registration.\"\"\"\n\n # TODO: This controller can share some code with DiscoveryServiceLibraryRegistrationsController.\n\n def __init__(self, manager):\n super(CollectionLibraryRegistrationsController, self).__init__(manager)\n self.shared_collection_provider_apis = [SharedODLAPI]\n\n def process_collection_library_registrations(\n self,\n do_get=HTTP.debuggable_get,\n do_post=HTTP.debuggable_post,\n key=None,\n registration_class=Registration,\n ):\n\n registration_class = registration_class or Registration\n self.require_system_admin()\n if flask.request.method == \"GET\":\n return self.process_get()\n else:\n return self.process_post(registration_class, do_get, do_post)\n\n def get_library_info(self, library, collection):\n \"\"\"Find the relevant information about the library which the user\n is trying to register\"\"\"\n\n library_info = dict(short_name=library.short_name)\n status = ConfigurationSetting.for_library_and_externalintegration(\n self._db,\n Registration.LIBRARY_REGISTRATION_STATUS,\n library,\n collection.external_integration,\n ).value\n if status:\n library_info[\"status\"] = status\n return library_info\n\n def process_get(self):\n collections = []\n for collection in self._db.query(Collection):\n libraries = []\n for library in collection.libraries:\n library_info = self.get_library_info(library, collection)\n if library_info:\n libraries.append(library_info)\n\n collections.append(\n dict(\n id=collection.id,\n libraries=libraries,\n )\n )\n return dict(library_registrations=collections)\n\n def process_post(self, registration_class, do_get, do_post):\n collection_id = flask.request.form.get(\"collection_id\")\n library_short_name = flask.request.form.get(\"library_short_name\")\n\n collection = self.look_up_collection(collection_id)\n if isinstance(collection, ProblemDetail):\n return collection\n\n library = self.look_up_library(library_short_name)\n if isinstance(library, ProblemDetail):\n return library\n\n registry = self.look_up_registry(collection.external_integration)\n if isinstance(registry, ProblemDetail):\n return registry\n\n registration = registration_class(registry, library)\n registered = registration.push(\n Registration.PRODUCTION_STAGE,\n self.url_for,\n catalog_url=collection.external_account_id,\n do_get=do_get,\n do_post=do_post,\n )\n\n if isinstance(registered, ProblemDetail):\n return registered\n\n return Response(str(_(\"Success\")), 200)\n\n def look_up_collection(self, collection_id):\n \"\"\"Find the collection that the user is trying to register the library with,\n and check that it actually exists.\"\"\"\n\n collection = get_one(self._db, Collection, id=collection_id)\n if not collection:\n return MISSING_COLLECTION\n if collection.protocol not in [\n api.NAME for api in self.shared_collection_provider_apis\n ]:\n return COLLECTION_DOES_NOT_SUPPORT_REGISTRATION\n return collection\n\n def look_up_library(self, library_short_name):\n \"\"\"Find the library the user is trying to register, and check that it actually exists.\"\"\"\n\n library = get_one(self._db, Library, short_name=library_short_name)\n if not library:\n return NO_SUCH_LIBRARY\n return library\n\n def look_up_registry(self, external_integration):\n \"\"\"Find the remote registry that the user is trying to register the collection with, and\n check that it is in the list of recognized protocols (currently just SharedODLAPI)\"\"\"\n\n registry = RemoteRegistry(external_integration)\n if not registry:\n return MISSING_SERVICE\n return registry\n", "id": "83919", "language": "Python", "matching_score": 5.558228015899658, "max_stars_count": 0, "path": "api/admin/controller/collection_library_registrations.py" }, { "content": "import flask\nimport pytest\nfrom werkzeug.datastructures import MultiDict\n\nfrom api.admin.exceptions import *\nfrom api.odl import SharedODLAPI\nfrom api.registration.registry import Registration\nfrom core.model import AdminRole, ConfigurationSetting, Library, create\nfrom core.util.http import HTTP\n\nfrom .test_controller import SettingsControllerTest\n\n\nclass TestCollectionRegistration(SettingsControllerTest):\n \"\"\"Test the process of registering a specific collection with\n a RemoteRegistry.\n \"\"\"\n\n def test_collection_library_registrations_get(self):\n collection = self._default_collection\n succeeded, ignore = create(\n self._db,\n Library,\n name=\"Library 1\",\n short_name=\"L1\",\n )\n ConfigurationSetting.for_library_and_externalintegration(\n self._db,\n \"library-registration-status\",\n succeeded,\n collection.external_integration,\n ).value = \"success\"\n failed, ignore = create(\n self._db,\n Library,\n name=\"Library 2\",\n short_name=\"L2\",\n )\n ConfigurationSetting.for_library_and_externalintegration(\n self._db,\n \"library-registration-status\",\n failed,\n collection.external_integration,\n ).value = \"failure\"\n unregistered, ignore = create(\n self._db,\n Library,\n name=\"Library 3\",\n short_name=\"L3\",\n )\n collection.libraries = [succeeded, failed, unregistered]\n\n with self.request_context_with_admin(\"/\", method=\"GET\"):\n response = (\n self.manager.admin_collection_library_registrations_controller.process_collection_library_registrations()\n )\n\n serviceInfo = response.get(\"library_registrations\")\n assert 1 == len(serviceInfo)\n assert collection.id == serviceInfo[0].get(\"id\")\n\n libraryInfo = serviceInfo[0].get(\"libraries\")\n expected = [\n dict(short_name=succeeded.short_name, status=\"success\"),\n dict(short_name=failed.short_name, status=\"failure\"),\n ]\n assert expected == libraryInfo\n\n self.admin.remove_role(AdminRole.SYSTEM_ADMIN)\n self._db.flush()\n pytest.raises(\n AdminNotAuthorized,\n self.manager.admin_collection_library_registrations_controller.process_collection_library_registrations,\n )\n\n def test_collection_library_registrations_post(self):\n \"\"\"Test what might happen POSTing to collection_library_registrations.\"\"\"\n # First test the failure cases.\n\n m = (\n self.manager.admin_collection_library_registrations_controller.process_collection_library_registrations\n )\n\n # Here, the user doesn't have permission to start the\n # registration process.\n self.admin.remove_role(AdminRole.SYSTEM_ADMIN)\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n pytest.raises(AdminNotAuthorized, m)\n self.admin.add_role(AdminRole.SYSTEM_ADMIN)\n\n # The collection ID doesn't correspond to any real collection.\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict([(\"collection_id\", \"1234\")])\n response = m()\n assert MISSING_COLLECTION == response\n\n # Pass in a collection ID so that doesn't happen again.\n collection = self._collection()\n collection.external_account_id = \"collection url\"\n\n # Oops, the collection doesn't actually support registration.\n form = MultiDict(\n [\n (\"collection_id\", collection.id),\n (\"library_short_name\", \"not-a-library\"),\n ]\n )\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = form\n response = m()\n assert COLLECTION_DOES_NOT_SUPPORT_REGISTRATION == response\n\n # Change the protocol to one that supports registration.\n collection.protocol = SharedODLAPI.NAME\n\n # Now the problem is the library doesn't correspond to a real\n # library.\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = form\n response = m()\n assert NO_SUCH_LIBRARY == response\n\n # The push() implementation might return a ProblemDetail for any\n # number of reasons.\n library = self._default_library\n form = MultiDict(\n [\n (\"collection_id\", collection.id),\n (\"library_short_name\", library.short_name),\n ]\n )\n\n class Mock(Registration):\n def push(self, *args, **kwargs):\n return REMOTE_INTEGRATION_FAILED\n\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = form\n assert REMOTE_INTEGRATION_FAILED == m(registration_class=Mock)\n\n # But if that doesn't happen, success!\n class Mock(Registration):\n \"\"\"When asked to push a registration, do nothing and say it\n worked.\n \"\"\"\n\n called_with = None\n\n def push(self, *args, **kwargs):\n Mock.called_with = (args, kwargs)\n return True\n\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = form\n result = m(registration_class=Mock)\n assert 200 == result.status_code\n\n # push() was called with the arguments we would expect.\n args, kwargs = Mock.called_with\n assert (Registration.PRODUCTION_STAGE, self.manager.url_for) == args\n\n # We would have made real HTTP requests.\n assert HTTP.debuggable_post == kwargs.pop(\"do_post\")\n assert HTTP.debuggable_get == kwargs.pop(\"do_get\")\n # And passed the collection URL over to the shared collection.\n assert collection.external_account_id == kwargs.pop(\"catalog_url\")\n # No other weird keyword arguments were passed in.\n assert {} == kwargs\n", "id": "8430132", "language": "Python", "matching_score": 2.8746089935302734, "max_stars_count": 0, "path": "tests/api/admin/controller/test_collection_registrations.py" }, { "content": "class RegistrationConstants:\n \"\"\"Constants used for library registration.\"\"\"\n\n # A library registration attempt may succeed or fail.\n LIBRARY_REGISTRATION_STATUS = \"library-registration-status\"\n SUCCESS_STATUS = \"success\"\n FAILURE_STATUS = \"failure\"\n\n # A library may be registered in a 'testing' stage or a\n # 'production' stage. This represents the _library's_ opinion\n # about whether the integration is ready for production. The\n # library won't actually be in production (whatever that means for\n # a given integration) until the _remote_ also thinks it should.\n LIBRARY_REGISTRATION_STAGE = \"library-registration-stage\"\n TESTING_STAGE = \"testing\"\n PRODUCTION_STAGE = \"production\"\n VALID_REGISTRATION_STAGES = [TESTING_STAGE, PRODUCTION_STAGE]\n\n # A registry may provide access to a web client. If so, we'll store\n # the URL so we can enable CORS headers in requests from that client,\n # and use it in MARC records so the library's main catalog can link\n # to it.\n LIBRARY_REGISTRATION_WEB_CLIENT = \"library-registration-web-client\"\n", "id": "10535923", "language": "Python", "matching_score": 1.8824917078018188, "max_stars_count": 0, "path": "api/registration/constants.py" }, { "content": "import urllib.error\nimport urllib.parse\nimport urllib.request\n\nfrom pymarc import Field\n\nfrom core.config import Configuration\nfrom core.marc import Annotator, MARCExporter\nfrom core.model import ConfigurationSetting, Session\n\n\nclass LibraryAnnotator(Annotator):\n def __init__(self, library):\n super(LibraryAnnotator, self).__init__()\n self.library = library\n _db = Session.object_session(library)\n self.base_url = ConfigurationSetting.sitewide(\n _db, Configuration.BASE_URL_KEY\n ).value\n\n def value(self, key, integration):\n _db = Session.object_session(integration)\n return ConfigurationSetting.for_library_and_externalintegration(\n _db, key, self.library, integration\n ).value\n\n def annotate_work_record(\n self,\n work,\n active_license_pool,\n edition,\n identifier,\n record,\n integration=None,\n updated=None,\n ):\n super(LibraryAnnotator, self).annotate_work_record(\n work, active_license_pool, edition, identifier, record, integration, updated\n )\n\n if integration:\n marc_org = self.value(MARCExporter.MARC_ORGANIZATION_CODE, integration)\n include_summary = (\n self.value(MARCExporter.INCLUDE_SUMMARY, integration) == \"true\"\n )\n include_genres = (\n self.value(MARCExporter.INCLUDE_SIMPLIFIED_GENRES, integration)\n == \"true\"\n )\n\n if marc_org:\n self.add_marc_organization_code(record, marc_org)\n\n if include_summary:\n self.add_summary(record, work)\n\n if include_genres:\n self.add_simplified_genres(record, work)\n\n self.add_web_client_urls(record, self.library, identifier, integration)\n\n def add_web_client_urls(self, record, library, identifier, integration=None):\n _db = Session.object_session(library)\n settings = []\n\n if integration:\n marc_setting = self.value(MARCExporter.WEB_CLIENT_URL, integration)\n if marc_setting:\n settings.append(marc_setting)\n\n from api.registration.registry import Registration\n\n settings += [\n s.value\n for s in _db.query(ConfigurationSetting).filter(\n ConfigurationSetting.key\n == Registration.LIBRARY_REGISTRATION_WEB_CLIENT,\n ConfigurationSetting.library_id == library.id,\n )\n if s.value\n ]\n\n qualified_identifier = urllib.parse.quote(\n identifier.type + \"/\" + identifier.identifier, safe=\"\"\n )\n\n for web_client_base_url in settings:\n link = \"{}/{}/works/{}\".format(\n self.base_url,\n library.short_name,\n qualified_identifier,\n )\n encoded_link = urllib.parse.quote(link, safe=\"\")\n url = \"{}/book/{}\".format(web_client_base_url, encoded_link)\n record.add_field(\n Field(\n tag=\"856\",\n indicators=[\"4\", \"0\"],\n subfields=[\"u\", url],\n )\n )\n", "id": "7893118", "language": "Python", "matching_score": 0.6483442783355713, "max_stars_count": 0, "path": "api/marc.py" }, { "content": "import os\nfrom typing import Optional\n\nfrom aws_xray_sdk.core import AWSXRayRecorder\nfrom aws_xray_sdk.core import patch as xray_patch\nfrom aws_xray_sdk.core.models.segment import Segment\nfrom aws_xray_sdk.ext.flask.middleware import XRayMiddleware\nfrom aws_xray_sdk.ext.httplib import add_ignored as httplib_add_ignored\nfrom flask import Flask, Response, request, session\n\nfrom core.config import Configuration\n\n\nclass PalaceXrayMiddleware(XRayMiddleware):\n XRAY_ENV_NAME = \"PALACE_XRAY_NAME\"\n XRAY_ENV_ANNOTATE = \"PALACE_XRAY_ANNOTATE_\"\n XRAY_ENV_PATRON_BARCODE = \"PALACE_XRAY_INCLUDE_BARCODE\"\n\n @classmethod\n def put_annotations(\n cls, segment: Optional[Segment], seg_type: Optional[str] = None\n ):\n if seg_type is not None:\n segment.put_annotation(\"type\", seg_type)\n\n for env, value in os.environ.items():\n if env.startswith(cls.XRAY_ENV_ANNOTATE):\n name = env.replace(cls.XRAY_ENV_ANNOTATE, \"\").lower()\n segment.put_annotation(name, value)\n\n if Configuration.app_version() != Configuration.NO_APP_VERSION_FOUND:\n segment.put_annotation(\"version\", Configuration.app_version())\n\n @classmethod\n def setup_xray(cls, xray_recorder):\n name = os.environ.get(cls.XRAY_ENV_NAME, \"Palace\")\n xray_recorder.configure(\n service=name,\n streaming_threshold=5,\n context_missing=\"LOG_ERROR\",\n plugins=[\"EC2Plugin\"],\n )\n xray_patch((\"httplib\", \"sqlalchemy_core\", \"requests\"))\n httplib_add_ignored(hostname=\"logs.*.amazonaws.com\")\n\n @classmethod\n def include_barcode(cls):\n include_barcode = os.environ.get(cls.XRAY_ENV_PATRON_BARCODE, \"true\")\n return include_barcode.lower() == \"true\"\n\n def __init__(self, app: Flask, recorder: AWSXRayRecorder):\n super().__init__(app, recorder)\n\n # Add an additional hook to before first request\n self.app.before_first_request(self._before_first_request)\n\n def _before_first_request(self):\n self._before_request()\n segment = self._recorder.current_segment()\n\n # Add an annotation for the first request, since it does extra caching work.\n segment.put_annotation(\"request\", \"first\")\n request._palace_first_request = True\n\n def _before_request(self):\n if getattr(request, \"_palace_first_request\", None) is not None:\n # If we are in the first request this work is already done\n return\n super()._before_request()\n self.put_annotations(self._recorder.current_segment(), \"web\")\n\n def _after_request(self, response: Response):\n super()._after_request(response)\n\n segment = self._recorder.current_segment()\n\n # Add library shortname\n if hasattr(request, \"library\") and hasattr(request.library, \"short_name\"):\n segment.put_annotation(\"library\", str(request.library.short_name))\n\n # Add patron data\n if (\n self.include_barcode()\n and hasattr(request, \"patron\")\n and hasattr(request.patron, \"authorization_identifier\")\n ):\n segment.set_user(str(request.patron.authorization_identifier))\n segment.put_annotation(\n \"barcode\", str(request.patron.authorization_identifier)\n )\n\n # Add admin UI username\n if \"admin_email\" in session:\n segment.set_user(session[\"admin_email\"])\n\n return response\n", "id": "9722405", "language": "Python", "matching_score": 3.3729028701782227, "max_stars_count": 0, "path": "api/util/xray.py" }, { "content": "from unittest.mock import MagicMock, call\n\nfrom api.util.xray import PalaceXrayMiddleware\nfrom core.config import Configuration\n\n\nclass TestPalaceXrayMiddleware:\n def test_put_annotations_none(self):\n # If no segment is passed in nothing is returned\n value = PalaceXrayMiddleware.put_annotations(None)\n assert value is None\n\n def test_put_annotations(self):\n # Type annotation set based on seg_type passed into put_annotation\n segment = MagicMock()\n PalaceXrayMiddleware.put_annotations(segment, \"test\")\n segment.put_annotation.assert_called_once_with(\"type\", \"test\")\n\n def test_put_annotations_env(self, monkeypatch):\n # Annotations are made based on environment variables\n segment = MagicMock()\n monkeypatch.setenv(f\"{PalaceXrayMiddleware.XRAY_ENV_ANNOTATE}TEST\", \"test\")\n monkeypatch.setenv(\n f\"{PalaceXrayMiddleware.XRAY_ENV_ANNOTATE}ANOTHER_TEST\", \"test123\"\n )\n PalaceXrayMiddleware.put_annotations(segment)\n assert segment.put_annotation.called is True\n assert segment.put_annotation.call_count == 2\n assert segment.put_annotation.call_args_list == [\n call(\"test\", \"test\"),\n call(\"another_test\", \"test123\"),\n ]\n\n def test_put_annotations_version(self, monkeypatch):\n # The version number is added as an annotation\n segment = MagicMock()\n monkeypatch.setattr(Configuration, \"app_version\", lambda: \"foo\")\n PalaceXrayMiddleware.put_annotations(segment)\n segment.put_annotation.assert_called_once_with(\"version\", \"foo\")\n", "id": "6307050", "language": "Python", "matching_score": 1.2326527833938599, "max_stars_count": 0, "path": "tests/api/util/test_xray.py" }, { "content": "import logging\nimport os\nimport time\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom flask import Flask, g, request\n\n\nclass PalaceProfiler:\n ENVIRONMENT_VARIABLE: str\n FILENAME_TEMPLATE = \"{time:.0f}.{method}.{path}.{elapsed:.0f}ms\"\n\n @classmethod\n def enabled(cls) -> bool:\n return os.environ.get(cls.ENVIRONMENT_VARIABLE, None) is not None\n\n @classmethod\n def create_profile_dir(cls) -> Optional[Path]:\n if not cls.enabled():\n return None\n\n profile_dir = Path(os.environ.get(cls.ENVIRONMENT_VARIABLE))\n if not profile_dir.exists():\n profile_dir.mkdir(parents=True)\n\n return profile_dir\n\n @classmethod\n def configure(cls, app: Flask):\n raise NotImplementedError\n\n\nclass PalacePyInstrumentProfiler(PalaceProfiler):\n ENVIRONMENT_VARIABLE = \"PALACE_PYINSTRUMENT\"\n\n @classmethod\n def configure(cls, app: Flask):\n profile_dir = cls.create_profile_dir()\n if profile_dir is None:\n # We are not configured\n return\n\n # Don't import if we are not profiling\n from pyinstrument import Profiler\n\n @app.before_first_request\n @app.before_request\n def before_request():\n if \"profiler\" not in g:\n g.profiler = Profiler()\n g.profiler_starttime = time.time()\n g.profiler.start()\n\n @app.after_request\n def after_request(response):\n if \"profiler\" in g:\n session = g.profiler.stop()\n elapsed = (time.time() - g.profiler_starttime) * 1000.0\n request_path = request.path.strip(\"/\").replace(\"/\", \".\") or \"root\"\n filename = cls.FILENAME_TEMPLATE.format(\n time=time.time(),\n method=request.method,\n path=request_path,\n elapsed=elapsed,\n )\n filename += \".pyisession\"\n session.save(profile_dir / filename)\n return response\n\n\nclass PalaceCProfileProfiler(PalaceProfiler):\n ENVIRONMENT_VARIABLE = \"PALACE_CPROFILE\"\n\n @classmethod\n def configure(cls, app: Flask):\n profile_dir = cls.create_profile_dir()\n if profile_dir is None:\n # We are not configured\n return\n\n from werkzeug.middleware.profiler import ProfilerMiddleware\n\n filename = cls.FILENAME_TEMPLATE + \".prof\"\n app.config[\"PROFILE\"] = True\n app.wsgi_app = ProfilerMiddleware(\n app.wsgi_app, profile_dir=str(profile_dir), filename_format=filename\n )\n\n\nclass PalaceXrayProfiler(PalaceProfiler):\n ENVIRONMENT_VARIABLE = \"PALACE_XRAY\"\n\n @classmethod\n def configure(cls, app: Flask):\n if not cls.enabled():\n return\n\n from aws_xray_sdk.core import xray_recorder\n\n from api.util.xray import PalaceXrayMiddleware\n\n logging.getLogger(cls.__name__).info(\"Configuring app with AWS XRAY.\")\n PalaceXrayMiddleware.setup_xray(xray_recorder)\n PalaceXrayMiddleware(app, xray_recorder)\n", "id": "4988392", "language": "Python", "matching_score": 2.813823938369751, "max_stars_count": 0, "path": "api/util/profilers.py" }, { "content": "import logging\nimport os\nimport urllib.parse\n\nfrom flask import Flask\nfrom flask_babel import Babel\nfrom flask_sqlalchemy_session import flask_scoped_session\n\nfrom api.config import Configuration\nfrom core.log import LogConfiguration\nfrom core.model import SessionManager\nfrom core.util import LanguageCodes\n\nfrom .util.profilers import (\n PalaceCProfileProfiler,\n PalacePyInstrumentProfiler,\n PalaceXrayProfiler,\n)\n\napp = Flask(__name__)\napp._db = None\napp.config[\"BABEL_DEFAULT_LOCALE\"] = LanguageCodes.three_to_two[\n Configuration.localization_languages()[0]\n]\napp.config[\"BABEL_TRANSLATION_DIRECTORIES\"] = \"../translations\"\nbabel = Babel(app)\n\n# We use URIs as identifiers throughout the application, meaning that\n# we never want werkzeug's merge_slashes feature.\napp.url_map.merge_slashes = False\n\n# Optionally setup any profilers that are enabled\nPalacePyInstrumentProfiler.configure(app)\nPalaceCProfileProfiler.configure(app)\nPalaceXrayProfiler.configure(app)\n\n\n@app.before_first_request\ndef initialize_database(autoinitialize=True):\n testing = \"TESTING\" in os.environ\n\n db_url = Configuration.database_url()\n if autoinitialize:\n SessionManager.initialize(db_url)\n session_factory = SessionManager.sessionmaker(db_url)\n _db = flask_scoped_session(session_factory, app)\n app._db = _db\n\n log_level = LogConfiguration.initialize(_db, testing=testing)\n debug = log_level == \"DEBUG\"\n app.config[\"DEBUG\"] = debug\n app.debug = debug\n _db.commit()\n logging.getLogger().info(\"Application debug mode==%r\" % app.debug)\n\n\nfrom . import routes # noqa\nfrom .admin import routes # noqa\n\n\ndef run(url=None):\n base_url = url or \"http://localhost:6500/\"\n scheme, netloc, path, parameters, query, fragment = urllib.parse.urlparse(base_url)\n if \":\" in netloc:\n host, port = netloc.split(\":\")\n port = int(port)\n else:\n host = netloc\n port = 80\n\n # Required for subdomain support.\n app.config[\"SERVER_NAME\"] = netloc\n\n debug = True\n\n # Workaround for a \"Resource temporarily unavailable\" error when\n # running in debug mode with the global socket timeout set by isbnlib\n if debug:\n import socket\n\n socket.setdefaulttimeout(None)\n\n logging.info(\"Starting app on %s:%s\", host, port)\n sslContext = \"adhoc\" if scheme == \"https\" else None\n app.run(debug=debug, host=host, port=port, threaded=True, ssl_context=sslContext)\n", "id": "12822438", "language": "Python", "matching_score": 0.45774045586586, "max_stars_count": 0, "path": "api/app.py" }, { "content": "import json\nfrom collections import defaultdict\n\nfrom flask_babel import lazy_gettext as _\nfrom oauth2client import client as GoogleClient\n\nfrom api.admin.template_styles import *\nfrom core.model import (\n Admin,\n AdminRole,\n ConfigurationSetting,\n ExternalIntegration,\n Session,\n get_one,\n)\n\nfrom .admin_authentication_provider import AdminAuthenticationProvider\nfrom .problem_details import GOOGLE_OAUTH_FAILURE, INVALID_ADMIN_CREDENTIALS\n\n\nclass GoogleOAuthAdminAuthenticationProvider(AdminAuthenticationProvider):\n\n NAME = ExternalIntegration.GOOGLE_OAUTH\n DESCRIPTION = _(\"How to Configure a Google OAuth Integration\")\n DOMAINS = \"domains\"\n\n INSTRUCTIONS = _(\n \"<p>Configuring a Google OAuth integration in the Circulation Manager \"\n + \"will allow admins to sign into the Admin interface with their Google/GMail credentials.</p>\"\n + \"<p>Configure the Google OAuth Service: </p>\"\n + \"<ol><li>To use this integration, visit the \"\n + \"<a href='https://console.developers.google.com/apis/dashboard?pli=1' rel='noopener' \"\n + \"rel='noreferer' target='_blank'>Google developer console.</a> \"\n + \"Create a project, click 'Create Credentials' in the left sidebar, and select 'OAuth client ID'. \"\n + \"If you get a warning about the consent screen, click 'Configure consent screen' and enter your \"\n + \"library name as the product name. Save the consent screen information.</li>\"\n + \"<li>Choose 'Web Application' as the application type.</li>\"\n + \"<li>Leave 'Authorized JavaScript origins' blank, but under 'Authorized redirect URIs', add the url \"\n + \"of your circulation manager followed by '/admin/GoogleAuth/callback', e.g. \"\n + \"'http://mycircmanager.org/admin/GoogleAuth/callback'.</li>\"\n \"<li>Click create, and you'll get a popup with your new client ID and secret. \"\n + \"Copy these values and enter them in the form below.</li></ol>\"\n )\n\n SETTINGS = [\n {\n \"key\": ExternalIntegration.URL,\n \"label\": _(\"Authentication URI\"),\n \"default\": \"https://accounts.google.com/o/oauth2/auth\",\n \"required\": True,\n \"format\": \"url\",\n },\n {\n \"key\": ExternalIntegration.USERNAME,\n \"label\": _(\"Client ID\"),\n \"required\": True,\n },\n {\n \"key\": ExternalIntegration.PASSWORD,\n \"label\": _(\"Client Secret\"),\n \"required\": True,\n },\n ]\n\n LIBRARY_SETTINGS = [\n {\n \"key\": DOMAINS,\n \"label\": _(\"Allowed Domains\"),\n \"description\": _(\n \"Anyone who logs in with an email address from one of these domains will automatically have librarian-level access to this library. Library manager roles must still be granted individually by other admins. If you want to set up admins individually but still allow them to log in with Google, you can create the admin authentication service without adding any libraries.\"\n ),\n \"type\": \"list\",\n },\n ]\n SITEWIDE = True\n\n TEMPLATE = \"\"\"\n <a style='{}' href=%(auth_uri)s>Sign in with Google</a>\n \"\"\".format(\n link_style\n )\n\n def __init__(self, integration, redirect_uri, test_mode=False):\n super(GoogleOAuthAdminAuthenticationProvider, self).__init__(integration)\n self.redirect_uri = redirect_uri\n self.test_mode = test_mode\n if self.test_mode:\n self.dummy_client = DummyGoogleClient()\n\n @property\n def client(self):\n if self.test_mode:\n return self.dummy_client\n\n config = dict()\n config[\"auth_uri\"] = self.integration.url\n config[\"client_id\"] = self.integration.username\n config[\"client_secret\"] = self.integration.password\n config[\"redirect_uri\"] = self.redirect_uri\n config[\"scope\"] = \"https://www.googleapis.com/auth/userinfo.email\"\n return GoogleClient.OAuth2WebServerFlow(**config)\n\n @property\n def domains(self):\n domains = defaultdict(list)\n if self.integration:\n _db = Session.object_session(self.integration)\n for library in self.integration.libraries:\n setting = ConfigurationSetting.for_library_and_externalintegration(\n _db, self.DOMAINS, library, self.integration\n )\n if setting.json_value:\n for domain in setting.json_value:\n domains[domain.lower()].append(library)\n return domains\n\n def sign_in_template(self, redirect_url):\n return self.TEMPLATE % dict(auth_uri=self.auth_uri(redirect_url))\n\n def auth_uri(self, redirect_url):\n return self.client.step1_get_authorize_url(state=redirect_url)\n\n def callback(self, _db, request={}):\n \"\"\"Google OAuth sign-in flow\"\"\"\n\n # The Google OAuth client sometimes hits the callback with an error.\n # These will be returned as a problem detail.\n error = request.get(\"error\")\n if error:\n return self.google_error_problem_detail(error), None\n auth_code = request.get(\"code\")\n if auth_code:\n redirect_url = request.get(\"state\")\n try:\n credentials = self.client.step2_exchange(auth_code)\n except GoogleClient.FlowExchangeError as e:\n return self.google_error_problem_detail(str(e)), None\n email = credentials.id_token.get(\"email\")\n if not self.staff_email(_db, email):\n return INVALID_ADMIN_CREDENTIALS, None\n domain = email[email.index(\"@\") + 1 :].lower()\n roles = []\n for library in self.domains[domain]:\n roles.append(\n {\"role\": AdminRole.LIBRARIAN, \"library\": library.short_name}\n )\n return (\n dict(\n email=email,\n credentials=credentials.to_json(),\n type=self.NAME,\n roles=roles,\n ),\n redirect_url,\n )\n\n def google_error_problem_detail(self, error):\n error_detail = _(\"Error: %(error)s\", error=error)\n\n # ProblemDetail.detailed requires the detail to be an internationalized\n # string, so pass the combined string through _ as well even though the\n # components were translated already. Space is a variable so it doesn't\n # end up in the translation template.\n space = \" \"\n error_detail = _(str(GOOGLE_OAUTH_FAILURE.detail) + space + str(error_detail))\n\n return GOOGLE_OAUTH_FAILURE.detailed(error_detail)\n\n def active_credentials(self, admin):\n \"\"\"Check that existing credentials aren't expired\"\"\"\n\n if admin.credential:\n oauth_credentials = GoogleClient.OAuth2Credentials.from_json(\n admin.credential\n )\n return not oauth_credentials.access_token_expired\n return False\n\n def staff_email(self, _db, email):\n # If the admin already exists in the database, they can log in regardless of\n # whether their domain has been whitelisted for a library.\n admin = get_one(_db, Admin, email=email)\n if admin:\n return True\n\n # Otherwise, their email must match one of the configured domains.\n staff_domains = list(self.domains.keys())\n domain = email[email.index(\"@\") + 1 :]\n return domain.lower() in [\n staff_domain.lower() for staff_domain in staff_domains\n ]\n\n\nclass DummyGoogleClient(object):\n \"\"\"Mock Google OAuth client for testing\"\"\"\n\n expired = False\n\n class Credentials(object):\n \"\"\"Mock OAuth2Credentials object for testing\"\"\"\n\n access_token_expired = False\n\n def __init__(self, email):\n domain = email[email.index(\"@\") + 1 :]\n self.id_token = {\"hd\": domain, \"email\": email}\n\n def to_json(self):\n return json.dumps(dict(id_token=self.id_token))\n\n def from_json(self, credentials):\n return self\n\n def __init__(self, email=\"<EMAIL>\"):\n self.credentials = self.Credentials(email=email)\n self.OAuth2Credentials = self.credentials\n\n def flow_from_client_secrets(self, config, scope=None, redirect_uri=None):\n return self\n\n def step2_exchange(self, auth_code):\n return self.credentials\n\n def step1_get_authorize_url(self, state):\n return \"GOOGLE REDIRECT\"\n", "id": "7205530", "language": "Python", "matching_score": 5.38858699798584, "max_stars_count": 0, "path": "api/admin/google_oauth_admin_authentication_provider.py" }, { "content": "import json\n\nfrom oauth2client import client as GoogleClient\n\nfrom api.admin.google_oauth_admin_authentication_provider import (\n DummyGoogleClient,\n GoogleOAuthAdminAuthenticationProvider,\n)\nfrom api.admin.problem_details import INVALID_ADMIN_CREDENTIALS\nfrom core.model import (\n Admin,\n AdminRole,\n ConfigurationSetting,\n ExternalIntegration,\n create,\n)\nfrom core.testing import DatabaseTest\nfrom core.util.problem_detail import ProblemDetail\n\n\nclass TestGoogleOAuthAdminAuthenticationProvider(DatabaseTest):\n def test_callback(self):\n super(TestGoogleOAuthAdminAuthenticationProvider, self).setup_method()\n auth_integration, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=ExternalIntegration.GOOGLE_OAUTH,\n goal=ExternalIntegration.ADMIN_AUTH_GOAL,\n )\n self.google = GoogleOAuthAdminAuthenticationProvider(\n auth_integration, \"\", test_mode=True\n )\n auth_integration.libraries += [self._default_library]\n ConfigurationSetting.for_library_and_externalintegration(\n self._db, \"domains\", self._default_library, auth_integration\n ).value = json.dumps([\"nypl.org\"])\n\n # Returns a problem detail when Google returns an error.\n error_response, redirect = self.google.callback(\n self._db, {\"error\": \"access_denied\"}\n )\n assert True == isinstance(error_response, ProblemDetail)\n assert 400 == error_response.status_code\n assert True == error_response.detail.endswith(\"access_denied\")\n assert None == redirect\n\n # Successful case creates a dict of admin details\n success, redirect = self.google.callback(self._db, {\"code\": \"abc\"})\n assert \"<EMAIL>\" == success[\"email\"]\n default_credentials = json.dumps(\n {\"id_token\": {\"hd\": \"nypl.org\", \"email\": \"<EMAIL>\"}}\n )\n assert default_credentials == success[\"credentials\"]\n assert GoogleOAuthAdminAuthenticationProvider.NAME == success[\"type\"]\n [role] = success.get(\"roles\")\n assert AdminRole.LIBRARIAN == role.get(\"role\")\n assert self._default_library.short_name == role.get(\"library\")\n\n # If domains are set, the admin's domain must match one of the domains.\n setting = ConfigurationSetting.for_library_and_externalintegration(\n self._db, \"domains\", self._default_library, auth_integration\n )\n setting.value = json.dumps([\"otherlibrary.org\"])\n failure, ignore = self.google.callback(self._db, {\"code\": \"abc\"})\n assert INVALID_ADMIN_CREDENTIALS == failure\n setting.value = json.dumps([\"nypl.org\"])\n\n # Returns a problem detail when the oauth client library\n # raises an exception.\n class ExceptionRaisingClient(DummyGoogleClient):\n def step2_exchange(self, auth_code):\n raise GoogleClient.FlowExchangeError(\"mock error\")\n\n self.google.dummy_client = ExceptionRaisingClient()\n error_response, redirect = self.google.callback(self._db, {\"code\": \"abc\"})\n assert True == isinstance(error_response, ProblemDetail)\n assert 400 == error_response.status_code\n assert True == error_response.detail.endswith(\"mock error\")\n assert None == redirect\n\n def test_domains(self):\n super(TestGoogleOAuthAdminAuthenticationProvider, self).setup_method()\n auth_integration, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=ExternalIntegration.GOOGLE_OAUTH,\n goal=ExternalIntegration.ADMIN_AUTH_GOAL,\n )\n auth_integration.libraries += [self._default_library]\n ConfigurationSetting.for_library_and_externalintegration(\n self._db, \"domains\", self._default_library, auth_integration\n ).value = json.dumps([\"nypl.org\"])\n\n google = GoogleOAuthAdminAuthenticationProvider(\n auth_integration, \"\", test_mode=True\n )\n\n assert [\"nypl.org\"] == list(google.domains.keys())\n assert [self._default_library] == google.domains[\"nypl.org\"]\n\n l2 = self._library()\n auth_integration.libraries += [l2]\n ConfigurationSetting.for_library_and_externalintegration(\n self._db, \"domains\", l2, auth_integration\n ).value = json.dumps([\"nypl.org\", \"l2.org\"])\n\n assert set([self._default_library, l2]) == set(google.domains[\"nypl.org\"])\n assert [l2] == google.domains[\"l2.org\"]\n\n def test_staff_email(self):\n super(TestGoogleOAuthAdminAuthenticationProvider, self).setup_method()\n auth_integration, ignore = create(\n self._db,\n ExternalIntegration,\n protocol=ExternalIntegration.GOOGLE_OAUTH,\n goal=ExternalIntegration.ADMIN_AUTH_GOAL,\n )\n\n nypl_admin = create(self._db, Admin, email=\"<EMAIL>\")\n bpl_admin = create(self._db, Admin, email=\"<EMAIL>\")\n\n # If no domains are set, the admin must already exist in the db\n # to be considered library staff.\n google = GoogleOAuthAdminAuthenticationProvider(\n auth_integration, \"\", test_mode=True\n )\n\n assert True == google.staff_email(self._db, \"<EMAIL>\")\n assert True == google.staff_email(self._db, \"<EMAIL>\")\n assert False == google.staff_email(self._db, \"<EMAIL>\")\n\n # If domains are set, the admin's domain can match one of the domains\n # if the admin doesn't exist yet.\n auth_integration.libraries += [self._default_library]\n setting = ConfigurationSetting.for_library_and_externalintegration(\n self._db, \"domains\", self._default_library, auth_integration\n )\n setting.value = json.dumps([\"nypl.org\"])\n assert True == google.staff_email(self._db, \"<EMAIL>\")\n assert True == google.staff_email(self._db, \"<EMAIL>\")\n assert True == google.staff_email(self._db, \"<EMAIL>\")\n assert False == google.staff_email(self._db, \"<EMAIL>\")\n\n setting.value = json.dumps([\"nypl.org\", \"bklynlibrary.org\"])\n assert True == google.staff_email(self._db, \"<EMAIL>\")\n assert True == google.staff_email(self._db, \"<EMAIL>\")\n assert True == google.staff_email(self._db, \"<EMAIL>\")\n assert True == google.staff_email(self._db, \"<EMAIL>\")\n", "id": "1033539", "language": "Python", "matching_score": 2.269444704055786, "max_stars_count": 0, "path": "tests/api/admin/test_google_oauth_admin_authentication_provider.py" }, { "content": "import json\n\nimport flask\nimport pytest\nfrom werkzeug.datastructures import MultiDict\n\nfrom api.admin.exceptions import *\nfrom api.admin.problem_details import *\nfrom core.model import Admin, AdminRole, create, get_one\n\nfrom .test_controller import SettingsControllerTest\n\n\nclass TestIndividualAdmins(SettingsControllerTest):\n def test_individual_admins_get(self):\n for admin in self._db.query(Admin):\n self._db.delete(admin)\n\n # There are two admins that can sign in with passwords, with different roles.\n admin1, ignore = create(self._db, Admin, email=\"<EMAIL>\")\n admin1.password = \"<PASSWORD>\"\n admin1.add_role(AdminRole.SYSTEM_ADMIN)\n admin2, ignore = create(self._db, Admin, email=\"<EMAIL>\")\n admin2.password = \"<PASSWORD>\"\n admin2.add_role(AdminRole.LIBRARY_MANAGER, self._default_library)\n admin2.add_role(AdminRole.SITEWIDE_LIBRARIAN)\n\n # These admins don't have passwords.\n admin3, ignore = create(self._db, Admin, email=\"<EMAIL>\")\n admin3.add_role(AdminRole.LIBRARIAN, self._default_library)\n library2 = self._library()\n admin4, ignore = create(self._db, Admin, email=\"<EMAIL>\")\n admin4.add_role(AdminRole.LIBRARY_MANAGER, library2)\n admin5, ignore = create(self._db, Admin, email=\"<EMAIL>\")\n admin5.add_role(AdminRole.LIBRARIAN, library2)\n\n with self.request_context_with_admin(\"/\", admin=admin1):\n # A system admin can see all other admins' roles.\n response = (\n self.manager.admin_individual_admin_settings_controller.process_get()\n )\n admins = response.get(\"individualAdmins\")\n assert (\n sorted(\n [\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [{\"role\": AdminRole.SYSTEM_ADMIN}],\n },\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [\n {\n \"role\": AdminRole.LIBRARY_MANAGER,\n \"library\": self._default_library.short_name,\n },\n {\"role\": AdminRole.SITEWIDE_LIBRARIAN},\n ],\n },\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [\n {\n \"role\": AdminRole.LIBRARIAN,\n \"library\": self._default_library.short_name,\n }\n ],\n },\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [\n {\n \"role\": AdminRole.LIBRARY_MANAGER,\n \"library\": library2.short_name,\n }\n ],\n },\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [\n {\n \"role\": AdminRole.LIBRARIAN,\n \"library\": library2.short_name,\n }\n ],\n },\n ],\n key=lambda x: x[\"email\"],\n )\n == sorted(admins, key=lambda x: x[\"email\"])\n )\n\n with self.request_context_with_admin(\"/\", admin=admin2):\n # A sitewide librarian or library manager can also see all admins' roles.\n response = (\n self.manager.admin_individual_admin_settings_controller.process_get()\n )\n admins = response.get(\"individualAdmins\")\n assert (\n sorted(\n [\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [{\"role\": AdminRole.SYSTEM_ADMIN}],\n },\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [\n {\n \"role\": AdminRole.LIBRARY_MANAGER,\n \"library\": self._default_library.short_name,\n },\n {\"role\": AdminRole.SITEWIDE_LIBRARIAN},\n ],\n },\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [\n {\n \"role\": AdminRole.LIBRARIAN,\n \"library\": self._default_library.short_name,\n }\n ],\n },\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [\n {\n \"role\": AdminRole.LIBRARY_MANAGER,\n \"library\": library2.short_name,\n }\n ],\n },\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [\n {\n \"role\": AdminRole.LIBRARIAN,\n \"library\": library2.short_name,\n }\n ],\n },\n ],\n key=lambda x: x[\"email\"],\n )\n == sorted(admins, key=lambda x: x[\"email\"])\n )\n\n with self.request_context_with_admin(\"/\", admin=admin3):\n # A librarian or library manager of a specific library can see all admins, but only\n # roles that affect their libraries.\n response = (\n self.manager.admin_individual_admin_settings_controller.process_get()\n )\n admins = response.get(\"individualAdmins\")\n assert (\n sorted(\n [\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [{\"role\": AdminRole.SYSTEM_ADMIN}],\n },\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [\n {\n \"role\": AdminRole.LIBRARY_MANAGER,\n \"library\": self._default_library.short_name,\n },\n {\"role\": AdminRole.SITEWIDE_LIBRARIAN},\n ],\n },\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [\n {\n \"role\": AdminRole.LIBRARIAN,\n \"library\": self._default_library.short_name,\n }\n ],\n },\n {\"email\": \"<EMAIL>\", \"roles\": []},\n {\"email\": \"<EMAIL>\", \"roles\": []},\n ],\n key=lambda x: x[\"email\"],\n )\n == sorted(admins, key=lambda x: x[\"email\"])\n )\n\n with self.request_context_with_admin(\"/\", admin=admin4):\n response = (\n self.manager.admin_individual_admin_settings_controller.process_get()\n )\n admins = response.get(\"individualAdmins\")\n assert (\n sorted(\n [\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [{\"role\": AdminRole.SYSTEM_ADMIN}],\n },\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [{\"role\": AdminRole.SITEWIDE_LIBRARIAN}],\n },\n {\"email\": \"<EMAIL>\", \"roles\": []},\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [\n {\n \"role\": AdminRole.LIBRARY_MANAGER,\n \"library\": library2.short_name,\n }\n ],\n },\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [\n {\n \"role\": AdminRole.LIBRARIAN,\n \"library\": library2.short_name,\n }\n ],\n },\n ],\n key=lambda x: x[\"email\"],\n )\n == sorted(admins, key=lambda x: x[\"email\"])\n )\n\n with self.request_context_with_admin(\"/\", admin=admin5):\n response = (\n self.manager.admin_individual_admin_settings_controller.process_get()\n )\n admins = response.get(\"individualAdmins\")\n assert (\n sorted(\n [\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [{\"role\": AdminRole.SYSTEM_ADMIN}],\n },\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [{\"role\": AdminRole.SITEWIDE_LIBRARIAN}],\n },\n {\"email\": \"<EMAIL>\", \"roles\": []},\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [\n {\n \"role\": AdminRole.LIBRARY_MANAGER,\n \"library\": library2.short_name,\n }\n ],\n },\n {\n \"email\": \"<EMAIL>\",\n \"roles\": [\n {\n \"role\": AdminRole.LIBRARIAN,\n \"library\": library2.short_name,\n }\n ],\n },\n ],\n key=lambda x: x[\"email\"],\n )\n == sorted(admins, key=lambda x: x[\"email\"])\n )\n\n def test_individual_admins_post_errors(self):\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict([])\n response = (\n self.manager.admin_individual_admin_settings_controller.process_post()\n )\n assert response.uri == INCOMPLETE_CONFIGURATION.uri\n\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"email\", \"<EMAIL>\"),\n (\n \"roles\",\n json.dumps(\n [{\"role\": AdminRole.LIBRARIAN, \"library\": \"notalibrary\"}]\n ),\n ),\n ]\n )\n response = (\n self.manager.admin_individual_admin_settings_controller.process_post()\n )\n assert response.uri == LIBRARY_NOT_FOUND.uri\n\n library = self._library()\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"email\", \"<EMAIL>\"),\n (\n \"roles\",\n json.dumps(\n [{\"role\": \"notarole\", \"library\": library.short_name}]\n ),\n ),\n ]\n )\n response = (\n self.manager.admin_individual_admin_settings_controller.process_post()\n )\n assert response.uri == UNKNOWN_ROLE.uri\n\n def test_individual_admins_post_permissions(self):\n l1 = self._library()\n l2 = self._library()\n system, ignore = create(self._db, Admin, email=\"<EMAIL>\")\n system.add_role(AdminRole.SYSTEM_ADMIN)\n sitewide_manager, ignore = create(\n self._db, Admin, email=\"<EMAIL>\"\n )\n sitewide_manager.add_role(AdminRole.SITEWIDE_LIBRARY_MANAGER)\n sitewide_librarian, ignore = create(\n self._db, Admin, email=\"<EMAIL>\"\n )\n sitewide_librarian.add_role(AdminRole.SITEWIDE_LIBRARIAN)\n manager1, ignore = create(\n self._db, Admin, email=\"<EMAIL>\"\n )\n manager1.add_role(AdminRole.LIBRARY_MANAGER, l1)\n librarian1, ignore = create(self._db, Admin, email=\"<EMAIL>\")\n librarian1.add_role(AdminRole.LIBRARIAN, l1)\n l2 = self._library()\n manager2, ignore = create(\n self._db, Admin, email=\"<EMAIL>\"\n )\n manager2.add_role(AdminRole.LIBRARY_MANAGER, l2)\n librarian2, ignore = create(self._db, Admin, email=\"<EMAIL>\")\n librarian2.add_role(AdminRole.LIBRARIAN, l2)\n\n def test_changing_roles(\n admin_making_request, target_admin, roles=None, allowed=False\n ):\n with self.request_context_with_admin(\n \"/\", method=\"POST\", admin=admin_making_request\n ):\n flask.request.form = MultiDict(\n [\n (\"email\", target_admin.email),\n (\"roles\", json.dumps(roles or [])),\n ]\n )\n if allowed:\n self.manager.admin_individual_admin_settings_controller.process_post()\n self._db.rollback()\n else:\n pytest.raises(\n AdminNotAuthorized,\n self.manager.admin_individual_admin_settings_controller.process_post,\n )\n\n # Various types of user trying to change a system admin's roles\n test_changing_roles(system, system, allowed=True)\n test_changing_roles(sitewide_manager, system)\n test_changing_roles(sitewide_librarian, system)\n test_changing_roles(manager1, system)\n test_changing_roles(librarian1, system)\n test_changing_roles(manager2, system)\n test_changing_roles(librarian2, system)\n\n # Various types of user trying to change a sitewide manager's roles\n test_changing_roles(system, sitewide_manager, allowed=True)\n test_changing_roles(sitewide_manager, sitewide_manager, allowed=True)\n test_changing_roles(sitewide_librarian, sitewide_manager)\n test_changing_roles(manager1, sitewide_manager)\n test_changing_roles(librarian1, sitewide_manager)\n test_changing_roles(manager2, sitewide_manager)\n test_changing_roles(librarian2, sitewide_manager)\n\n # Various types of user trying to change a sitewide librarian's roles\n test_changing_roles(system, sitewide_librarian, allowed=True)\n test_changing_roles(sitewide_manager, sitewide_librarian, allowed=True)\n test_changing_roles(sitewide_librarian, sitewide_librarian)\n test_changing_roles(manager1, sitewide_librarian)\n test_changing_roles(librarian1, sitewide_librarian)\n test_changing_roles(manager2, sitewide_librarian)\n test_changing_roles(librarian2, sitewide_librarian)\n\n test_changing_roles(manager1, manager1, allowed=True)\n test_changing_roles(\n manager1,\n sitewide_librarian,\n roles=[\n {\"role\": AdminRole.SITEWIDE_LIBRARIAN},\n {\"role\": AdminRole.LIBRARY_MANAGER, \"library\": l1.short_name},\n ],\n allowed=True,\n )\n test_changing_roles(manager1, librarian1, allowed=True)\n test_changing_roles(\n manager2,\n librarian2,\n roles=[{\"role\": AdminRole.LIBRARIAN, \"library\": l1.short_name}],\n )\n test_changing_roles(\n manager2,\n librarian1,\n roles=[{\"role\": AdminRole.LIBRARY_MANAGER, \"library\": l1.short_name}],\n )\n\n test_changing_roles(sitewide_librarian, librarian1)\n\n test_changing_roles(\n sitewide_manager, sitewide_manager, roles=[{\"role\": AdminRole.SYSTEM_ADMIN}]\n )\n test_changing_roles(\n sitewide_librarian,\n manager1,\n roles=[{\"role\": AdminRole.SITEWIDE_LIBRARY_MANAGER}],\n )\n\n def test_changing_password(admin_making_request, target_admin, allowed=False):\n with self.request_context_with_admin(\n \"/\", method=\"POST\", admin=admin_making_request\n ):\n flask.request.form = MultiDict(\n [\n (\"email\", target_admin.email),\n (\"password\", \"<PASSWORD>\"),\n (\n \"roles\",\n json.dumps([role.to_dict() for role in target_admin.roles]),\n ),\n ]\n )\n if allowed:\n self.manager.admin_individual_admin_settings_controller.process_post()\n self._db.rollback()\n else:\n pytest.raises(\n AdminNotAuthorized,\n self.manager.admin_individual_admin_settings_controller.process_post,\n )\n\n # Various types of user trying to change a system admin's password\n test_changing_password(system, system, allowed=True)\n test_changing_password(sitewide_manager, system)\n test_changing_password(sitewide_librarian, system)\n test_changing_password(manager1, system)\n test_changing_password(librarian1, system)\n test_changing_password(manager2, system)\n test_changing_password(librarian2, system)\n\n # Various types of user trying to change a sitewide manager's password\n test_changing_password(system, sitewide_manager, allowed=True)\n test_changing_password(sitewide_manager, sit<PASSWORD>, allowed=True)\n test_changing_password(sitewide_librarian, sit<PASSWORD>)\n test_changing_password(manager1, sit<PASSWORD>)\n test_changing_password(librarian1, sit<PASSWORD>)\n test_changing_password(manager2, sitewide_manager)\n test_changing_password(librarian2, sit<PASSWORD>)\n\n # Various types of user trying to change a sitewide librarian's password\n test_changing_password(system, sitewide_librarian, allowed=True)\n test_changing_password(sitewide_manager, sitewide_librarian, allowed=True)\n test_changing_password(manager1, sitewide_librarian, allowed=True)\n test_changing_password(manager2, sitewide_librarian, allowed=True)\n test_changing_password(sitewide_librarian, sitewide_librarian)\n test_changing_password(librarian1, sitewide_librarian)\n test_changing_password(librarian2, sitewide_librarian)\n\n # Various types of user trying to change a manager's password\n # Manager 1\n test_changing_password(system, manager1, allowed=True)\n test_changing_password(sitewide_manager, manager1, allowed=True)\n test_changing_password(manager1, manager1, allowed=True)\n test_changing_password(sitewide_librarian, manager1)\n test_changing_password(manager2, manager1)\n test_changing_password(librarian2, manager1)\n # Manager 2\n test_changing_password(system, manager2, allowed=True)\n test_changing_password(sitewide_manager, manager2, allowed=True)\n test_changing_password(manager2, manager2, allowed=True)\n test_changing_password(sitewide_librarian, manager2)\n test_changing_password(manager1, manager2)\n test_changing_password(librarian1, manager2)\n\n # Various types of user trying to change a librarian's password\n # Librarian 1\n test_changing_password(system, librarian1, allowed=True)\n test_changing_password(sitewide_manager, librarian1, allowed=True)\n test_changing_password(manager1, librarian1, allowed=True)\n test_changing_password(sitewide_librarian, librarian1)\n test_changing_password(manager2, librarian1)\n test_changing_password(librarian2, librarian1)\n # Librarian 2\n test_changing_password(system, librarian2, allowed=True)\n test_changing_password(sitewide_manager, librarian2, allowed=True)\n test_changing_password(manager2, librarian2, allowed=True)\n test_changing_password(sitewide_librarian, librarian2)\n test_changing_password(manager1, librarian2)\n test_changing_password(librarian1, librarian2)\n\n def test_individual_admins_post_create(self):\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"email\", \"<EMAIL>\"),\n (\"password\", \"<PASSWORD>\"),\n (\n \"roles\",\n json.dumps(\n [\n {\n \"role\": AdminRole.LIBRARY_MANAGER,\n \"library\": self._default_library.short_name,\n }\n ]\n ),\n ),\n ]\n )\n response = (\n self.manager.admin_individual_admin_settings_controller.process_post()\n )\n assert response.status_code == 201\n\n # The admin was created.\n admin_match = Admin.authenticate(self._db, \"<EMAIL>\", \"<PASSWORD>\")\n assert admin_match.email == response.get_data(as_text=True)\n assert admin_match\n assert admin_match.has_password(\"<PASSWORD>\")\n\n [role] = admin_match.roles\n assert AdminRole.LIBRARY_MANAGER == role.role\n assert self._default_library == role.library\n\n # The new admin is a library manager, so they can create librarians.\n with self.request_context_with_admin(\"/\", method=\"POST\", admin=admin_match):\n flask.request.form = MultiDict(\n [\n (\"email\", \"<EMAIL>\"),\n (\"password\", \"<PASSWORD>\"),\n (\n \"roles\",\n json.dumps(\n [\n {\n \"role\": AdminRole.LIBRARIAN,\n \"library\": self._default_library.short_name,\n }\n ]\n ),\n ),\n ]\n )\n response = (\n self.manager.admin_individual_admin_settings_controller.process_post()\n )\n assert response.status_code == 201\n\n admin_match = Admin.authenticate(self._db, \"<EMAIL>\", \"<PASSWORD>\")\n assert admin_match.email == response.get_data(as_text=True)\n assert admin_match\n assert admin_match.has_password(\"<PASSWORD>\")\n\n [role] = admin_match.roles\n assert AdminRole.LIBRARIAN == role.role\n assert self._default_library == role.library\n\n def test_individual_admins_post_edit(self):\n # An admin exists.\n admin, ignore = create(\n self._db,\n Admin,\n email=\"<EMAIL>\",\n )\n admin.password = \"password\"\n admin.add_role(AdminRole.SYSTEM_ADMIN)\n\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"email\", \"<EMAIL>\"),\n (\"password\", \"<PASSWORD>\"),\n (\n \"roles\",\n json.dumps(\n [\n {\"role\": AdminRole.SITEWIDE_LIBRARIAN},\n {\n \"role\": AdminRole.LIBRARY_MANAGER,\n \"library\": self._default_library.short_name,\n },\n ]\n ),\n ),\n ]\n )\n response = (\n self.manager.admin_individual_admin_settings_controller.process_post()\n )\n assert response.status_code == 200\n\n assert admin.email == response.get_data(as_text=True)\n\n # The password was changed.\n old_password_match = Admin.authenticate(self._db, \"<EMAIL>\", \"password\")\n assert None == old_password_match\n\n new_password_match = Admin.authenticate(\n self._db, \"<EMAIL>\", \"<PASSWORD>\"\n )\n assert admin == new_password_match\n\n # The roles were changed.\n assert False == admin.is_system_admin()\n [librarian_all, manager] = sorted(admin.roles, key=lambda x: x.role)\n assert AdminRole.SITEWIDE_LIBRARIAN == librarian_all.role\n assert None == librarian_all.library\n assert AdminRole.LIBRARY_MANAGER == manager.role\n assert self._default_library == manager.library\n\n def test_individual_admin_delete(self):\n librarian, ignore = create(self._db, Admin, email=self._str)\n librarian.password = \"password\"\n librarian.add_role(AdminRole.LIBRARIAN, self._default_library)\n\n sitewide_manager, ignore = create(self._db, Admin, email=self._str)\n sitewide_manager.add_role(AdminRole.SITEWIDE_LIBRARY_MANAGER)\n\n system_admin, ignore = create(self._db, Admin, email=self._str)\n system_admin.add_role(AdminRole.SYSTEM_ADMIN)\n\n with self.request_context_with_admin(\"/\", method=\"DELETE\", admin=librarian):\n pytest.raises(\n AdminNotAuthorized,\n self.manager.admin_individual_admin_settings_controller.process_delete,\n librarian.email,\n )\n\n with self.request_context_with_admin(\n \"/\", method=\"DELETE\", admin=sitewide_manager\n ):\n response = (\n self.manager.admin_individual_admin_settings_controller.process_delete(\n librarian.email\n )\n )\n assert response.status_code == 200\n\n pytest.raises(\n AdminNotAuthorized,\n self.manager.admin_individual_admin_settings_controller.process_delete,\n system_admin.email,\n )\n\n with self.request_context_with_admin(\"/\", method=\"DELETE\", admin=system_admin):\n response = (\n self.manager.admin_individual_admin_settings_controller.process_delete(\n system_admin.email\n )\n )\n assert response.status_code == 200\n\n admin = get_one(self._db, Admin, id=librarian.id)\n assert None == admin\n\n admin = get_one(self._db, Admin, id=system_admin.id)\n assert None == admin\n\n def test_individual_admins_post_create_on_setup(self):\n for admin in self._db.query(Admin):\n self._db.delete(admin)\n\n # Creating an admin that's not a system admin will fail.\n with self.app.test_request_context(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"email\", \"<EMAIL>\"),\n (\"password\", \"<PASSWORD>\"),\n (\n \"roles\",\n json.dumps(\n [\n {\n \"role\": AdminRole.LIBRARY_MANAGER,\n \"library\": self._default_library.short_name,\n }\n ]\n ),\n ),\n ]\n )\n flask.request.files = {}\n pytest.raises(\n AdminNotAuthorized,\n self.manager.admin_individual_admin_settings_controller.process_post,\n )\n self._db.rollback()\n\n # The password is required.\n with self.app.test_request_context(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"email\", \"<EMAIL>\"),\n (\"roles\", json.dumps([{\"role\": AdminRole.SYSTEM_ADMIN}])),\n ]\n )\n flask.request.files = {}\n response = (\n self.manager.admin_individual_admin_settings_controller.process_post()\n )\n assert 400 == response.status_code\n assert response.uri == INCOMPLETE_CONFIGURATION.uri\n\n # Creating a system admin with a password works.\n with self.app.test_request_context(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"email\", \"<EMAIL>\"),\n (\"password\", \"<PASSWORD>\"),\n (\"roles\", json.dumps([{\"role\": AdminRole.SYSTEM_ADMIN}])),\n ]\n )\n flask.request.files = {}\n response = (\n self.manager.admin_individual_admin_settings_controller.process_post()\n )\n assert 201 == response.status_code\n\n # The admin was created.\n admin_match = Admin.authenticate(self._db, \"<EMAIL>\", \"<PASSWORD>\")\n assert admin_match.email == response.get_data(as_text=True)\n assert admin_match\n assert admin_match.has_password(\"<PASSWORD>\")\n\n [role] = admin_match.roles\n assert AdminRole.SYSTEM_ADMIN == role.role\n", "id": "2717430", "language": "Python", "matching_score": 4.339568614959717, "max_stars_count": 0, "path": "tests/api/admin/controller/test_individual_admins.py" }, { "content": "import flask\nimport pytest\nfrom werkzeug.datastructures import MultiDict\n\nfrom api.admin.exceptions import *\nfrom api.config import Configuration\nfrom core.model import AdminRole, ConfigurationSetting\n\nfrom .test_controller import SettingsControllerTest\n\n\nclass TestSitewideSettings(SettingsControllerTest):\n def test_sitewide_settings_get(self):\n with self.request_context_with_admin(\"/\"):\n response = (\n self.manager.admin_sitewide_configuration_settings_controller.process_get()\n )\n settings = response.get(\"settings\")\n all_settings = response.get(\"all_settings\")\n\n assert [] == settings\n keys = [s.get(\"key\") for s in all_settings]\n assert Configuration.LOG_LEVEL in keys\n assert Configuration.DATABASE_LOG_LEVEL in keys\n assert Configuration.SECRET_KEY in keys\n\n ConfigurationSetting.sitewide(\n self._db, Configuration.DATABASE_LOG_LEVEL\n ).value = \"INFO\"\n ConfigurationSetting.sitewide(\n self._db, Configuration.SECRET_KEY\n ).value = \"secret\"\n self._db.flush()\n\n with self.request_context_with_admin(\"/\"):\n response = (\n self.manager.admin_sitewide_configuration_settings_controller.process_get()\n )\n settings = response.get(\"settings\")\n all_settings = response.get(\"all_settings\")\n\n assert 2 == len(settings)\n settings_by_key = {s.get(\"key\"): s.get(\"value\") for s in settings}\n assert \"INFO\" == settings_by_key.get(Configuration.DATABASE_LOG_LEVEL)\n assert \"secret\" == settings_by_key.get(Configuration.SECRET_KEY)\n keys = [s.get(\"key\") for s in all_settings]\n assert Configuration.LOG_LEVEL in keys\n assert Configuration.DATABASE_LOG_LEVEL in keys\n assert Configuration.SECRET_KEY in keys\n\n self.admin.remove_role(AdminRole.SYSTEM_ADMIN)\n self._db.flush()\n pytest.raises(\n AdminNotAuthorized,\n self.manager.admin_sitewide_configuration_settings_controller.process_get,\n )\n\n def test_sitewide_settings_post_errors(self):\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict([(\"key\", None)])\n response = (\n self.manager.admin_sitewide_configuration_settings_controller.process_post()\n )\n assert response == MISSING_SITEWIDE_SETTING_KEY\n\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [(\"key\", Configuration.SECRET_KEY), (\"value\", None)]\n )\n response = (\n self.manager.admin_sitewide_configuration_settings_controller.process_post()\n )\n assert response == MISSING_SITEWIDE_SETTING_VALUE\n\n self.admin.remove_role(AdminRole.SYSTEM_ADMIN)\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"key\", Configuration.SECRET_KEY),\n (\"value\", \"secret\"),\n ]\n )\n pytest.raises(\n AdminNotAuthorized,\n self.manager.admin_sitewide_configuration_settings_controller.process_post,\n )\n\n def test_sitewide_settings_post_create(self):\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"key\", Configuration.DATABASE_LOG_LEVEL),\n (\"value\", \"10\"),\n ]\n )\n response = (\n self.manager.admin_sitewide_configuration_settings_controller.process_post()\n )\n assert response.status_code == 200\n\n # The setting was created.\n setting = ConfigurationSetting.sitewide(\n self._db, Configuration.DATABASE_LOG_LEVEL\n )\n assert setting.key == response.get_data(as_text=True)\n assert \"10\" == setting.value\n\n def test_sitewide_settings_post_edit(self):\n setting = ConfigurationSetting.sitewide(\n self._db, Configuration.DATABASE_LOG_LEVEL\n )\n setting.value = \"WARN\"\n\n with self.request_context_with_admin(\"/\", method=\"POST\"):\n flask.request.form = MultiDict(\n [\n (\"key\", Configuration.DATABASE_LOG_LEVEL),\n (\"value\", \"ERROR\"),\n ]\n )\n response = (\n self.manager.admin_sitewide_configuration_settings_controller.process_post()\n )\n assert response.status_code == 200\n\n # The setting was changed.\n assert setting.key == response.get_data(as_text=True)\n assert \"ERROR\" == setting.value\n\n def test_sitewide_setting_delete(self):\n setting = ConfigurationSetting.sitewide(\n self._db, Configuration.DATABASE_LOG_LEVEL\n )\n setting.value = \"WARN\"\n\n with self.request_context_with_admin(\"/\", method=\"DELETE\"):\n self.admin.remove_role(AdminRole.SYSTEM_ADMIN)\n pytest.raises(\n AdminNotAuthorized,\n self.manager.admin_sitewide_configuration_settings_controller.process_delete,\n setting.key,\n )\n\n self.admin.add_role(AdminRole.SYSTEM_ADMIN)\n response = self.manager.admin_sitewide_configuration_settings_controller.process_delete(\n setting.key\n )\n assert response.status_code == 200\n\n assert None == setting.value\n", "id": "8071400", "language": "Python", "matching_score": 0.7217866778373718, "max_stars_count": 0, "path": "tests/api/admin/controller/test_sitewide_settings.py" }, { "content": "import os\nfrom typing import Optional\n\nimport pytest\n\nfrom api.admin.config import Configuration as AdminConfig\nfrom api.admin.config import OperationalMode\n\n\nclass TestAdminUI(object):\n @staticmethod\n def _set_env(monkeypatch, key: str, value: Optional[str]):\n if value:\n monkeypatch.setenv(key, value)\n elif key in os.environ:\n monkeypatch.delenv(key)\n\n @pytest.mark.parametrize(\n \"package_name, package_version, mode, expected_result_startswith\",\n [\n [\n None,\n None,\n OperationalMode.production,\n \"https://cdn.jsdelivr.net/npm/@thepalaceproject/circulation-admin@\",\n ],\n [\n \"@some-scope/some-package\",\n \"1.0.0\",\n OperationalMode.production,\n \"https://cdn.jsdelivr.net/npm/@some-scope/some-package@1.0.0\",\n ],\n [\n \"some-package\",\n \"1.0.0\",\n OperationalMode.production,\n \"https://cdn.jsdelivr.net/npm/some-package@1.0.0\",\n ],\n [None, None, OperationalMode.development, \"/\"],\n [None, \"1.0.0\", OperationalMode.development, \"/\"],\n [\"some-package\", \"1.0.0\", OperationalMode.development, \"/\"],\n ],\n )\n def test_package_url(\n self,\n monkeypatch,\n package_name: Optional[str],\n package_version: Optional[str],\n mode: OperationalMode,\n expected_result_startswith: str,\n ):\n self._set_env(monkeypatch, \"TPP_CIRCULATION_ADMIN_PACKAGE_NAME\", package_name)\n self._set_env(\n monkeypatch, \"TPP_CIRCULATION_ADMIN_PACKAGE_VERSION\", package_version\n )\n result = AdminConfig.package_url(_operational_mode=mode)\n assert result.startswith(expected_result_startswith)\n\n @pytest.mark.parametrize(\n \"package_name, package_version, expected_result\",\n [\n [\n None,\n None,\n \"/my-base-dir/node_modules/@thepalaceproject/circulation-admin\",\n ],\n [\n None,\n \"1.0.0\",\n \"/my-base-dir/node_modules/@thepalaceproject/circulation-admin\",\n ],\n [\"some-package\", \"1.0.0\", \"/my-base-dir/node_modules/some-package\"],\n ],\n )\n def test_package_development_directory(\n self,\n monkeypatch,\n package_name: Optional[str],\n package_version: Optional[str],\n expected_result: str,\n ):\n self._set_env(monkeypatch, \"TPP_CIRCULATION_ADMIN_PACKAGE_NAME\", package_name)\n self._set_env(\n monkeypatch, \"TPP_CIRCULATION_ADMIN_PACKAGE_VERSION\", package_version\n )\n result = AdminConfig.package_development_directory(_base_dir=\"/my-base-dir\")\n assert result == expected_result\n\n @pytest.mark.parametrize(\n \"asset_key, operational_mode, expected_result\",\n [\n [\n \"admin_css\",\n OperationalMode.development,\n \"/admin/static/circulation-admin.css\",\n ],\n [\n \"admin_css\",\n OperationalMode.production,\n \"https://cdn.jsdelivr.net/npm/known-package-name@1.0.0/dist/circulation-admin.css\",\n ],\n [\n \"admin_js\",\n OperationalMode.development,\n \"/admin/static/circulation-admin.js\",\n ],\n [\n \"admin_js\",\n OperationalMode.production,\n \"https://cdn.jsdelivr.net/npm/known-package-name@1.0.0/dist/circulation-admin.js\",\n ],\n [\n \"another-asset.jpg\",\n OperationalMode.development,\n \"/admin/static/another-asset.jpg\",\n ],\n [\n \"another-asset.jpg\",\n OperationalMode.production,\n \"https://cdn.jsdelivr.net/npm/known-package-name@1.0.0/dist/another-asset.jpg\",\n ],\n ],\n )\n def test_lookup_asset_url(\n self,\n monkeypatch,\n asset_key: str,\n operational_mode: OperationalMode,\n expected_result: str,\n ):\n self._set_env(\n monkeypatch, \"TPP_CIRCULATION_ADMIN_PACKAGE_NAME\", \"known-package-name\"\n )\n self._set_env(monkeypatch, \"TPP_CIRCULATION_ADMIN_PACKAGE_VERSION\", \"1.0.0\")\n result = AdminConfig.lookup_asset_url(\n key=asset_key, _operational_mode=operational_mode\n )\n assert result == expected_result\n", "id": "967836", "language": "Python", "matching_score": 4.571423053741455, "max_stars_count": 0, "path": "tests/api/admin/test_config.py" }, { "content": "import os\nfrom enum import Enum\nfrom urllib.parse import urljoin\n\n\nclass OperationalMode(str, Enum):\n production = \"production\"\n development = \"development\"\n\n\nclass Configuration:\n\n APP_NAME = \"Palace Collection Manager\"\n PACKAGE_NAME = \"@thepalaceproject/circulation-admin\"\n PACKAGE_VERSION = \"0.0.7\"\n\n STATIC_ASSETS = {\n \"admin_js\": \"circulation-admin.js\",\n \"admin_css\": \"circulation-admin.css\",\n \"admin_logo\": \"PalaceCollectionManagerLogo.svg\",\n }\n\n # For proper operation, `package_url` MUST end with a slash ('/') and\n # `asset_rel_url` MUST NOT begin with one.\n PACKAGE_TEMPLATES = {\n OperationalMode.production: {\n \"package_url\": \"https://cdn.jsdelivr.net/npm/{name}@{version}/\",\n \"asset_rel_url\": \"dist/{filename}\",\n },\n OperationalMode.development: {\n \"package_url\": \"/admin/\",\n \"asset_rel_url\": \"static/{filename}\",\n },\n }\n\n DEVELOPMENT_MODE_PACKAGE_TEMPLATE = \"node_modules/{name}\"\n STATIC_ASSETS_REL_PATH = \"dist\"\n\n ADMIN_DIRECTORY = os.path.abspath(os.path.dirname(__file__))\n\n # Environment variables that contain admin client package information.\n ENV_ADMIN_UI_PACKAGE_NAME = \"TPP_CIRCULATION_ADMIN_PACKAGE_NAME\"\n ENV_ADMIN_UI_PACKAGE_VERSION = \"TPP_CIRCULATION_ADMIN_PACKAGE_VERSION\"\n\n @classmethod\n def operational_mode(cls) -> OperationalMode:\n return (\n OperationalMode.development\n if os.path.isdir(cls.package_development_directory())\n else OperationalMode.production\n )\n\n @classmethod\n def _package_name(cls) -> str:\n \"\"\"Get the effective package name.\n\n :return: A package name.\n :rtype: str\n \"\"\"\n return os.environ.get(cls.ENV_ADMIN_UI_PACKAGE_NAME) or cls.PACKAGE_NAME\n\n @classmethod\n def lookup_asset_url(\n cls, key: str, *, _operational_mode: OperationalMode = None\n ) -> str:\n \"\"\"Get the URL for the asset_type.\n\n :param key: The key used to lookup an asset's filename. If the key is\n not found in the asset list, then the key itself is used as the asset.\n :type key: str\n :param _operational_mode: Provided for testing purposes. The operational\n mode is normally determined by local state\n :type _operational_mode: OperationalMode\n :return: A URL string.\n :rtype: str\n \"\"\"\n operational_mode = _operational_mode or cls.operational_mode()\n filename = cls.STATIC_ASSETS.get(key, key)\n return urljoin(\n cls.package_url(_operational_mode=operational_mode),\n cls.PACKAGE_TEMPLATES[operational_mode][\"asset_rel_url\"].format(\n filename=filename\n ),\n )\n\n @classmethod\n def package_url(cls, *, _operational_mode: OperationalMode = None) -> str:\n \"\"\"Compute the URL for the admin UI package.\n\n :param _operational_mode: For testing. The operational mode is\n normally determined by local state.\n :type _operational_mode: OperationalMode\n :return: String representation of the URL/path for either the asset\n of the given type or, if no type is specified, the base path\n of the package.\n :rtype: str\n \"\"\"\n operational_mode = _operational_mode or cls.operational_mode()\n version = (\n os.environ.get(cls.ENV_ADMIN_UI_PACKAGE_VERSION) or cls.PACKAGE_VERSION\n )\n template = cls.PACKAGE_TEMPLATES[operational_mode][\"package_url\"]\n url = template.format(name=cls._package_name(), version=version)\n if not url.endswith(\"/\"):\n url += \"/\"\n return url\n\n @classmethod\n def package_development_directory(cls, *, _base_dir: str = None) -> str:\n \"\"\"Absolute path for the admin UI package when in development mode.\n\n :param _base_dir: For testing purposes. Not used in normal operation.\n :type _base_dir: str\n :returns: String containing absolute path to the admin UI package.\n :rtype: str\n \"\"\"\n base_dir = _base_dir or cls.ADMIN_DIRECTORY\n return os.path.join(\n base_dir,\n cls.DEVELOPMENT_MODE_PACKAGE_TEMPLATE.format(name=cls._package_name()),\n )\n\n @classmethod\n def static_files_directory(cls, *, _base_dir: str = None) -> str:\n \"\"\"Absolute path for the admin UI static files.\n\n :param _base_dir: For testing purposes. Not used in normal operation.\n :type _base_dir: str\n :returns: String containing absolute path to the admin UI package.\n :rtype: str\n \"\"\"\n package_dir = cls.package_development_directory(_base_dir=_base_dir)\n return os.path.join(package_dir, cls.STATIC_ASSETS_REL_PATH)\n", "id": "2707510", "language": "Python", "matching_score": 1.3905326128005981, "max_stars_count": 0, "path": "api/admin/config.py" }, { "content": "admin = \"\"\"\n<!doctype html>\n<html>\n<head>\n<title>{{ app_name }}</title>\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n<link href=\"{{ admin_css }}\" rel=\"stylesheet\" />\n</head>\n<body>\n <script src=\"{{ admin_js }}\"></script>\n <script>\n var circulationAdmin = new CirculationAdmin({\n csrfToken: \"{{ csrf_token }}\",\n tos_link_href: \"{{ sitewide_tos_href }}\",\n tos_link_text: \"{{ sitewide_tos_text }}\",\n showCircEventsDownload: {{ \"true\" if show_circ_events_download else \"false\" }},\n settingUp: {{ \"true\" if setting_up else \"false\" }},\n email: \"{{ email }}\",\n roles: [{% for role in roles %}{\"role\": \"{{role.role}}\"{% if role.library %}, \"library\": \"{{role.library.short_name}}\"{% endif %} },{% endfor %}]\n });\n </script>\n</body>\n</html>\n\"\"\"\n\nadmin_sign_in_again = \"\"\"\n<!doctype html>\n<html>\n<head><title>{{ app_name }}</title></head>\n<body>\n <p>You are now logged in. You may close this window and try your request again.\n</body>\n</html>\n\"\"\"\n", "id": "7561347", "language": "Python", "matching_score": 0.9193667769432068, "max_stars_count": 0, "path": "api/admin/templates.py" }, { "content": "import json\nimport logging\nimport socket\n\nfrom boto3.session import Session as AwsSession\nfrom flask_babel import lazy_gettext as _\nfrom loggly.handlers import HTTPSHandler as LogglyHandler\nfrom watchtower import CloudWatchLogHandler\n\nfrom .config import CannotLoadConfiguration, Configuration\nfrom .model import ConfigurationSetting, ExternalIntegration\nfrom .util.datetime_helpers import utc_now\n\n\nclass JSONFormatter(logging.Formatter):\n hostname = socket.gethostname()\n fqdn = socket.getfqdn()\n if len(fqdn) > len(hostname):\n hostname = fqdn\n\n def __init__(self, app_name):\n super(JSONFormatter, self).__init__()\n self.app_name = app_name or LogConfiguration.DEFAULT_APP_NAME\n\n def format(self, record):\n def ensure_str(s):\n \"\"\"Ensure that unicode strings are used for a record's message.\n We don't want to try to interpolate an incompatible byte type; it\n could lead to a UnicodeDecodeError.\n \"\"\"\n if isinstance(s, bytes):\n s = s.decode(\"utf-8\")\n return s\n\n message = ensure_str(record.msg)\n\n if record.args:\n record_args = tuple([ensure_str(arg) for arg in record.args])\n try:\n message = message % record_args\n except Exception as e:\n # There was a problem formatting the log message,\n # which points to a bug. A problem with the logging\n # code shouldn't break the code that actually does the\n # work, but we can't just let this slide -- we need to\n # report the problem so it can be fixed.\n message = (\n \"Log message could not be formatted. Exception: %r. Original message: message=%r args=%r\"\n % (e, message, record_args)\n )\n data = dict(\n host=self.hostname,\n app=self.app_name,\n name=record.name,\n level=record.levelname,\n filename=record.filename,\n message=message,\n timestamp=utc_now().isoformat(),\n )\n if record.exc_info:\n data[\"traceback\"] = self.formatException(record.exc_info)\n return json.dumps(data)\n\n\nclass StringFormatter(logging.Formatter):\n \"\"\"Encode all output as a string.\"\"\"\n\n def format(self, record):\n data = super(StringFormatter, self).format(record)\n return str(data)\n\n\nclass Logger(object):\n \"\"\"Abstract base class for logging\"\"\"\n\n DEFAULT_APP_NAME = \"simplified\"\n\n JSON_LOG_FORMAT = \"json\"\n TEXT_LOG_FORMAT = \"text\"\n DEFAULT_MESSAGE_TEMPLATE = (\n \"%(asctime)s:%(name)s:%(levelname)s:%(filename)s:%(message)s\"\n )\n\n @classmethod\n def set_formatter(\n cls, handler, app_name=None, log_format=None, message_template=None\n ):\n \"\"\"Tell the given `handler` to format its log messages in a\n certain way.\n \"\"\"\n # Initialize defaults\n if log_format is None:\n log_format = cls.JSON_LOG_FORMAT\n if message_template is None:\n message_template = cls.DEFAULT_MESSAGE_TEMPLATE\n\n if log_format == cls.JSON_LOG_FORMAT:\n formatter = JSONFormatter(app_name)\n else:\n formatter = StringFormatter(message_template)\n handler.setFormatter(formatter)\n\n @classmethod\n def from_configuration(cls, _db, testing=False):\n \"\"\"Should be implemented in each logging class.\"\"\"\n raise NotImplementedError()\n\n\nclass SysLogger(Logger):\n\n NAME = \"sysLog\"\n\n # Settings for the integration with protocol=INTERNAL_LOGGING\n LOG_FORMAT = \"log_format\"\n LOG_MESSAGE_TEMPLATE = \"message_template\"\n\n SETTINGS = [\n {\n \"key\": LOG_FORMAT,\n \"label\": _(\"Log Format\"),\n \"type\": \"select\",\n \"options\": [\n {\"key\": Logger.JSON_LOG_FORMAT, \"label\": _(\"json\")},\n {\"key\": Logger.TEXT_LOG_FORMAT, \"label\": _(\"text\")},\n ],\n },\n {\n \"key\": LOG_MESSAGE_TEMPLATE,\n \"label\": _(\"template\"),\n \"default\": Logger.DEFAULT_MESSAGE_TEMPLATE,\n \"required\": True,\n },\n ]\n\n SITEWIDE = True\n\n @classmethod\n def _defaults(cls, testing=False):\n \"\"\"Return default log configuration values.\"\"\"\n if testing:\n internal_log_format = cls.TEXT_LOG_FORMAT\n else:\n internal_log_format = cls.JSON_LOG_FORMAT\n message_template = cls.DEFAULT_MESSAGE_TEMPLATE\n return internal_log_format, message_template\n\n @classmethod\n def from_configuration(cls, _db, testing=False):\n (internal_log_format, message_template) = cls._defaults(testing)\n app_name = cls.DEFAULT_APP_NAME\n\n if _db and not testing:\n goal = ExternalIntegration.LOGGING_GOAL\n internal = ExternalIntegration.lookup(\n _db, ExternalIntegration.INTERNAL_LOGGING, goal\n )\n\n if internal:\n internal_log_format = (\n internal.setting(cls.LOG_FORMAT).value or internal_log_format\n )\n message_template = (\n internal.setting(cls.LOG_MESSAGE_TEMPLATE).value or message_template\n )\n app_name = (\n ConfigurationSetting.sitewide(_db, Configuration.LOG_APP_NAME).value\n or app_name\n )\n\n handler = logging.StreamHandler()\n cls.set_formatter(\n handler,\n log_format=internal_log_format,\n message_template=message_template,\n app_name=app_name,\n )\n return handler\n\n\nclass Loggly(Logger):\n\n NAME = \"Loggly\"\n DEFAULT_LOGGLY_URL = \"https://logs-01.loggly.com/inputs/%(token)s/tag/python/\"\n\n USER = \"user\"\n PASSWORD = \"password\"\n URL = \"url\"\n\n SETTINGS = [\n {\"key\": USER, \"label\": _(\"Username\"), \"required\": True},\n {\"key\": PASSWORD, \"label\": _(\"Password\"), \"required\": True},\n {\"key\": URL, \"label\": _(\"URL\"), \"required\": True, \"format\": \"url\"},\n ]\n\n SITEWIDE = True\n\n @classmethod\n def from_configuration(cls, _db, testing=False):\n loggly = None\n from .model import ConfigurationSetting, ExternalIntegration\n\n app_name = cls.DEFAULT_APP_NAME\n if _db and not testing:\n goal = ExternalIntegration.LOGGING_GOAL\n loggly = ExternalIntegration.lookup(_db, ExternalIntegration.LOGGLY, goal)\n app_name = (\n ConfigurationSetting.sitewide(_db, Configuration.LOG_APP_NAME).value\n or app_name\n )\n\n if loggly:\n loggly = Loggly.loggly_handler(loggly)\n cls.set_formatter(loggly, app_name)\n\n return loggly\n\n @classmethod\n def loggly_handler(cls, externalintegration):\n \"\"\"Turn a Loggly ExternalIntegration into a log handler.\"\"\"\n token = <PASSWORD>integration.password\n url = externalintegration.url or cls.DEFAULT_LOGGLY_URL\n if not url:\n raise CannotLoadConfiguration(\n \"Loggly integration configured but no URL provided.\"\n )\n try:\n url = cls._interpolate_loggly_url(url, token)\n except (TypeError, KeyError) as e:\n raise CannotLoadConfiguration(\n \"Cannot interpolate token %s into loggly URL %s\"\n % (\n token,\n url,\n )\n )\n return LogglyHandler(url)\n\n @classmethod\n def _interpolate_loggly_url(cls, url, token):\n if \"%s\" in url:\n return url % token\n if \"%(\" in url:\n return url % dict(token=token)\n\n # Assume the token is already in the URL.\n return url\n\n @classmethod\n def set_formatter(cls, handler, app_name):\n \"\"\"Tell the given `handler` to format its log messages in a\n certain way.\n \"\"\"\n formatter = JSONFormatter(app_name)\n handler.setFormatter(formatter)\n\n\nclass CloudwatchLogs(Logger):\n\n NAME = \"AWS Cloudwatch Logs\"\n GROUP = \"group\"\n STREAM = \"stream\"\n INTERVAL = \"interval\"\n CREATE_GROUP = \"create_group\"\n REGION = \"region\"\n DEFAULT_REGION = \"us-west-2\"\n DEFAULT_INTERVAL = 60\n DEFAULT_CREATE_GROUP = \"TRUE\"\n\n # https://docs.aws.amazon.com/general/latest/gr/rande.html#cwl_region\n REGIONS = [\n {\"key\": \"us-east-2\", \"label\": _(\"US East (Ohio)\")},\n {\"key\": \"us-east-1\", \"label\": _(\"US East (N. Virginia)\")},\n {\"key\": \"us-west-1\", \"label\": _(\"US West (N. California)\")},\n {\"key\": \"us-west-2\", \"label\": _(\"US West (Oregon)\")},\n {\"key\": \"ap-south-1\", \"label\": _(\"Asia Pacific (Mumbai)\")},\n {\"key\": \"ap-northeast-3\", \"label\": _(\"Asia Pacific (Osaka-Local)\")},\n {\"key\": \"ap-northeast-2\", \"label\": _(\"Asia Pacific (Seoul)\")},\n {\"key\": \"ap-southeast-1\", \"label\": _(\"Asia Pacific (Singapore)\")},\n {\"key\": \"ap-southeast-2\", \"label\": _(\"Asia Pacific (Sydney)\")},\n {\"key\": \"ap-northeast-1\", \"label\": _(\"Asia Pacific (Tokyo)\")},\n {\"key\": \"ca-central-1\", \"label\": _(\"Canada (Central)\")},\n {\"key\": \"cn-north-1\", \"label\": _(\"China (Beijing)\")},\n {\"key\": \"cn-northwest-1\", \"label\": _(\"China (Ningxia)\")},\n {\"key\": \"eu-central-1\", \"label\": _(\"EU (Frankfurt)\")},\n {\"key\": \"eu-west-1\", \"label\": _(\"EU (Ireland)\")},\n {\"key\": \"eu-west-2\", \"label\": _(\"EU (London)\")},\n {\"key\": \"eu-west-3\", \"label\": _(\"EU (Paris)\")},\n {\"key\": \"sa-east-1\", \"label\": _(\"South America (Sao Paulo)\")},\n ]\n\n SETTINGS = [\n {\n \"key\": GROUP,\n \"label\": _(\"Log Group\"),\n \"default\": Logger.DEFAULT_APP_NAME,\n \"required\": True,\n },\n {\n \"key\": STREAM,\n \"label\": _(\"Log Stream\"),\n \"default\": Logger.DEFAULT_APP_NAME,\n \"required\": True,\n },\n {\n \"key\": INTERVAL,\n \"label\": _(\"Update Interval Seconds\"),\n \"default\": DEFAULT_INTERVAL,\n \"required\": True,\n },\n {\n \"key\": REGION,\n \"label\": _(\"AWS Region\"),\n \"type\": \"select\",\n \"options\": REGIONS,\n \"default\": DEFAULT_REGION,\n \"required\": True,\n },\n {\n \"key\": CREATE_GROUP,\n \"label\": _(\"Automatically Create Log Group\"),\n \"type\": \"select\",\n \"options\": [\n {\"key\": \"TRUE\", \"label\": _(\"Yes\")},\n {\"key\": \"FALSE\", \"label\": _(\"No\")},\n ],\n \"default\": True,\n \"required\": True,\n },\n ]\n\n SITEWIDE = True\n\n @classmethod\n def from_configuration(cls, _db, testing=False):\n settings = None\n cloudwatch = None\n\n app_name = cls.DEFAULT_APP_NAME\n if _db and not testing:\n goal = ExternalIntegration.LOGGING_GOAL\n settings = ExternalIntegration.lookup(\n _db, ExternalIntegration.CLOUDWATCH, goal\n )\n app_name = (\n ConfigurationSetting.sitewide(_db, Configuration.LOG_APP_NAME).value\n or app_name\n )\n\n if settings:\n cloudwatch = cls.get_handler(settings, testing)\n cls.set_formatter(cloudwatch, app_name)\n\n return cloudwatch\n\n @classmethod\n def get_handler(cls, settings, testing=False):\n \"\"\"Turn ExternalIntegration into a log handler.\"\"\"\n group = settings.setting(cls.GROUP).value or cls.DEFAULT_APP_NAME\n stream = settings.setting(cls.STREAM).value or cls.DEFAULT_APP_NAME\n interval = settings.setting(cls.INTERVAL).value or cls.DEFAULT_INTERVAL\n region = settings.setting(cls.REGION).value or cls.DEFAULT_REGION\n create_group = (\n settings.setting(cls.CREATE_GROUP).value or cls.DEFAULT_CREATE_GROUP\n )\n\n try:\n interval = int(interval)\n if interval <= 0:\n raise CannotLoadConfiguration(\n \"AWS Cloudwatch Logs interval must be a positive integer.\"\n )\n except ValueError:\n raise CannotLoadConfiguration(\n \"AWS Cloudwatch Logs interval configuration must be an integer.\"\n )\n session = AwsSession(region_name=region)\n handler = CloudWatchLogHandler(\n log_group=group,\n stream_name=stream,\n send_interval=interval,\n boto3_session=session,\n create_log_group=create_group == \"TRUE\",\n )\n # Add a filter that makes sure no messages from botocore are processed by\n # the cloudwatch logs integration, as these messages can lead to an infinite loop.\n class BotoFilter(logging.Filter):\n def filter(self, record):\n return not record.name.startswith(\"botocore\")\n\n handler.addFilter(BotoFilter())\n return handler\n\n\nclass LogConfiguration(object):\n \"\"\"Configures the active Python logging handlers based on logging\n configuration from the database.\n \"\"\"\n\n DEBUG = \"DEBUG\"\n INFO = \"INFO\"\n WARN = \"WARN\"\n ERROR = \"ERROR\"\n\n # The default value to put into the 'app' field of JSON-format logs,\n # unless LOG_APP_NAME overrides it.\n DEFAULT_APP_NAME = \"simplified\"\n LOG_APP_NAME = \"log_app\"\n\n DEFAULT_LOG_LEVEL = INFO\n DEFAULT_DATABASE_LOG_LEVEL = WARN\n\n # Settings for the integration with protocol=INTERNAL_LOGGING\n LOG_LEVEL = \"log_level\"\n DATABASE_LOG_LEVEL = \"database_log_level\"\n LOG_LEVEL_UI = [\n {\"key\": DEBUG, \"value\": _(\"Debug\")},\n {\"key\": INFO, \"value\": _(\"Info\")},\n {\"key\": WARN, \"value\": _(\"Warn\")},\n {\"key\": ERROR, \"value\": _(\"Error\")},\n ]\n\n SITEWIDE_SETTINGS = [\n {\n \"key\": LOG_LEVEL,\n \"label\": _(\"Log Level\"),\n \"type\": \"select\",\n \"options\": LOG_LEVEL_UI,\n \"default\": INFO,\n },\n {\n \"key\": LOG_APP_NAME,\n \"label\": _(\"Log Application name\"),\n \"description\": _(\n \"Log messages originating from this application will be tagged with this name. If you run multiple instances, giving each one a different application name will help you determine which instance is having problems.\"\n ),\n \"default\": DEFAULT_APP_NAME,\n },\n {\n \"key\": DATABASE_LOG_LEVEL,\n \"label\": _(\"Database Log Level\"),\n \"type\": \"select\",\n \"options\": LOG_LEVEL_UI,\n \"description\": _(\n \"Database logs are extremely verbose, so unless you're diagnosing a database-related problem, it's a good idea to set a higher log level for database messages.\"\n ),\n \"default\": WARN,\n },\n ]\n\n @classmethod\n def initialize(cls, _db, testing=False):\n \"\"\"Make the logging handlers reflect the current logging rules\n as configured in the database.\n\n :param _db: A database connection. If this is None, the default logging\n configuration will be used.\n\n :param testing: True if unit tests are currently running; otherwise False.\n \"\"\"\n log_level, database_log_level, new_handlers, errors = cls.from_configuration(\n _db, testing\n )\n\n # Replace the set of handlers associated with the root logger.\n logger = logging.getLogger()\n logger.setLevel(log_level)\n old_handlers = list(logger.handlers)\n for handler in new_handlers:\n logger.addHandler(handler)\n handler.setLevel(log_level)\n for handler in old_handlers:\n logger.removeHandler(handler)\n\n # Set the loggers for various verbose libraries to the database\n # log level, which is probably higher than the normal log level.\n for logger in (\n \"sqlalchemy.engine\",\n \"elasticsearch\",\n \"requests.packages.urllib3.connectionpool\",\n \"botocore\",\n ):\n logging.getLogger(logger).setLevel(database_log_level)\n\n # These loggers can cause infinite loops if they're set to\n # DEBUG, because their log is triggered during the process of\n # logging something to Loggly. These loggers will never have their\n # log level set lower than WARN.\n if database_log_level == cls.ERROR:\n loop_prevention_log_level = cls.ERROR\n else:\n loop_prevention_log_level = cls.WARN\n for logger in [\"urllib3.connectionpool\"]:\n logging.getLogger(logger).setLevel(loop_prevention_log_level)\n\n # If we had an error creating any log handlers report it\n for error in errors:\n logging.getLogger().error(error)\n\n return log_level\n\n @classmethod\n def from_configuration(cls, _db, testing=False):\n \"\"\"Return the logging policy as configured in the database.\n\n :param _db: A database connection. If None, the default\n logging policy will be used.\n\n :param testing: A boolean indicating whether a unit test is\n happening right now. If True, the database configuration will\n be ignored in favor of a known test-friendly policy. (It's\n okay to pass in False during a test *of this method*.)\n\n :return: A 3-tuple (internal_log_level, database_log_level,\n handlers). `internal_log_level` is the log level to be used\n for most log messages. `database_log_level` is the log level\n to be applied to the loggers for the database connector and\n other verbose third-party libraries. `handlers` is a list of\n Handler objects that will be associated with the top-level\n logger.\n \"\"\"\n log_level = cls.DEFAULT_LOG_LEVEL\n database_log_level = cls.DEFAULT_DATABASE_LOG_LEVEL\n\n if _db and not testing:\n log_level = (\n ConfigurationSetting.sitewide(_db, Configuration.LOG_LEVEL).value\n or log_level\n )\n database_log_level = (\n ConfigurationSetting.sitewide(\n _db, Configuration.DATABASE_LOG_LEVEL\n ).value\n or database_log_level\n )\n\n loggers = [SysLogger, Loggly, CloudwatchLogs]\n handlers = []\n errors = []\n\n for logger in loggers:\n try:\n handler = logger.from_configuration(_db, testing)\n if handler:\n handlers.append(handler)\n except Exception as e:\n errors.append(\"Error creating logger %s %s\" % (logger.NAME, str(e)))\n\n return log_level, database_log_level, handlers, errors\n", "id": "6134428", "language": "Python", "matching_score": 2.1150293350219727, "max_stars_count": 0, "path": "core/log.py" }, { "content": "\"\"\"A custom patron catalog annotates a library's authentication\ndocument to describe an unusual setup.\n\"\"\"\n\nfrom flask_babel import lazy_gettext as _\nfrom sqlalchemy.orm.session import Session\n\nfrom core.lane import Lane\nfrom core.model import ConfigurationSetting, ExternalIntegration, get_one\nfrom core.util.opds_writer import OPDSFeed\n\nfrom .config import CannotLoadConfiguration\n\n\nclass CustomPatronCatalog(object):\n \"\"\"An annotator for a library's authentication document.\n\n Any subclass of this class must define PROTOCOL and must be\n passed into a CustomPatronCatalog.register() call after the class\n definition is complete.\n\n A subclass of this class will be stored in the\n LibraryAuthenticator. CustomPatronCatalogs should not store\n any objects obtained from the database without disconnecting them\n from their session.\n \"\"\"\n\n BY_PROTOCOL = {}\n\n GOAL = \"custom_patron_catalog\"\n\n @classmethod\n def register(self, view_class):\n protocol = view_class.PROTOCOL\n if protocol in self.BY_PROTOCOL:\n raise ValueError(\"Duplicate patron catalog for protocol: %s\" % protocol)\n self.BY_PROTOCOL[protocol] = view_class\n\n @classmethod\n def unregister(self, view_class):\n \"\"\"Remove a CustomPatronCatalog from consideration.\n Only used in tests.\n \"\"\"\n del self.BY_PROTOCOL[view_class.PROTOCOL]\n\n @classmethod\n def for_library(cls, library):\n \"\"\"Find the appropriate CustomPatronCatalog for the given library.\"\"\"\n _db = Session.object_session(library)\n integration = ExternalIntegration.one_for_library_and_goal(\n _db, library, cls.GOAL\n )\n if not integration:\n return None\n protocol = integration.protocol\n if not protocol in cls.BY_PROTOCOL:\n raise CannotLoadConfiguration(\n \"Unregistered custom patron catalog protocol: %s\" % protocol\n )\n view_class = cls.BY_PROTOCOL[protocol]\n return view_class(library, integration)\n\n def __init__(self, library, integration):\n raise NotImplementedError()\n\n def annotate_authentication_document(self, library, doc, url_for):\n \"\"\"Modify the library's authentication document.\n\n :param library: A Library\n :param doc: A dictionary representing the library's\n default authentication document.\n :param url_for: An implementation of Flask url_for,\n used to generate URLs.\n :return: A dictionary representing the library's\n default authentication document. It's okay to modify\n `doc` and return the modified version.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _load_lane(cls, library, lane_id):\n \"\"\"Make sure the Lane with the given ID actually exists and is\n associated with the given Library.\n \"\"\"\n _db = Session.object_session(library)\n lane = get_one(_db, Lane, id=lane_id)\n if not lane:\n raise CannotLoadConfiguration(\"No lane with ID: %s\" % lane_id)\n if lane.library != library:\n raise CannotLoadConfiguration(\n \"Lane %d is for the wrong library (%s, I need %s)\"\n % (lane.id, lane.library.name, library.name)\n )\n return lane\n\n @classmethod\n def replace_link(cls, doc, rel, **kwargs):\n \"\"\"Remove all links with the given relation and replace them\n with the given link.\n\n :param doc: An authentication document. Will be modified in place.\n :param rel: Remove links with this relation.\n :param kwargs: Add a new link with these attributes.\n :return: A modified authentication document.\n \"\"\"\n links = [x for x in doc[\"links\"] if x[\"rel\"] != rel]\n links.append(dict(rel=rel, **kwargs))\n doc[\"links\"] = links\n return doc\n\n\nclass CustomRootLane(CustomPatronCatalog):\n \"\"\"Send library patrons to a lane other than the root lane.\"\"\"\n\n PROTOCOL = \"Custom Root Lane\"\n\n LANE = \"lane\"\n\n SETTINGS = [\n {\n \"key\": LANE,\n \"label\": _(\"Send patrons to the lane with this ID.\"),\n },\n ]\n\n def __init__(self, library, integration):\n _db = Session.object_session(library)\n m = ConfigurationSetting.for_library_and_externalintegration\n lane_id = m(_db, self.LANE, library, integration)\n\n # We don't want to store the Lane objects long-term, but we do need\n # to make sure the lane ID corresponds to a real lane for the\n # right library.\n self.lane_id = lane_id.int_value\n lane = self._load_lane(library, self.lane_id)\n\n def annotate_authentication_document(self, library, doc, url_for):\n \"\"\"Replace the 'start' link with a link to the configured Lane.\"\"\"\n root_url = url_for(\n \"acquisition_groups\",\n library_short_name=library.short_name,\n lane_identifier=self.lane_id,\n _external=True,\n )\n self.replace_link(\n doc, \"start\", href=root_url, type=OPDSFeed.ACQUISITION_FEED_TYPE\n )\n return doc\n\n\nCustomPatronCatalog.register(CustomRootLane)\n\n\nclass COPPAGate(CustomPatronCatalog):\n\n PROTOCOL = \"COPPA Age Gate\"\n\n AUTHENTICATION_TYPE = \"http://librarysimplified.org/terms/authentication/gate/coppa\"\n AUTHENTICATION_YES_REL = (\n \"http://librarysimplified.org/terms/rel/authentication/restriction-met\"\n )\n AUTHENTICATION_NO_REL = (\n \"http://librarysimplified.org/terms/rel/authentication/restriction-not-met\"\n )\n\n REQUIREMENT_MET_LANE = \"requirement_met_lane\"\n REQUIREMENT_NOT_MET_LANE = \"requirement_not_met_lane\"\n\n SETTINGS = [\n {\n \"key\": REQUIREMENT_MET_LANE,\n \"label\": _(\"ID of lane for patrons who are 13 or older\"),\n },\n {\n \"key\": REQUIREMENT_NOT_MET_LANE,\n \"label\": _(\"ID of lane for patrons who are under 13\"),\n },\n ]\n\n def __init__(self, library, integration):\n _db = Session.object_session(library)\n m = ConfigurationSetting.for_library_and_externalintegration\n yes_lane_id = m(_db, self.REQUIREMENT_MET_LANE, library, integration)\n no_lane_id = m(_db, self.REQUIREMENT_NOT_MET_LANE, library, integration)\n\n # We don't want to store the Lane objects long-term, but we do need\n # to make sure the lane IDs correspond to real lanes for the\n # right library.\n self.yes_lane_id = yes_lane_id.int_value\n self.no_lane_id = no_lane_id.int_value\n yes_lane = self._load_lane(library, self.yes_lane_id)\n no_lane = self._load_lane(library, self.no_lane_id)\n\n def annotate_authentication_document(self, library, doc, url_for):\n \"\"\"Replace the 'start' link and add a custom authentication\n mechanism.\n \"\"\"\n\n # A lane for grown-ups.\n yes_url = url_for(\n \"acquisition_groups\",\n library_short_name=library.short_name,\n lane_identifier=self.yes_lane_id,\n _external=True,\n )\n\n # A lane for children.\n no_url = url_for(\n \"acquisition_groups\",\n library_short_name=library.short_name,\n lane_identifier=self.no_lane_id,\n _external=True,\n )\n\n # Replace the 'start' link with the childrens link. Any client\n # that doesn't understand the extensions will be safe from\n # grown-up content.\n feed = OPDSFeed.ACQUISITION_FEED_TYPE\n self.replace_link(doc, \"start\", href=no_url, type=feed)\n\n # Add a custom authentication technique that\n # explains the COPPA gate.\n links = [\n dict(rel=self.AUTHENTICATION_YES_REL, href=yes_url, type=feed),\n dict(rel=self.AUTHENTICATION_NO_REL, href=no_url, type=feed),\n ]\n\n authentication = dict(type=self.AUTHENTICATION_TYPE, links=links)\n\n # It's an academic question whether this is replacing the existing\n # auth mechanisms or just adding another one, but for the moment\n # let's go with \"adding another one\".\n doc.setdefault(\"authentication\", []).append(authentication)\n return doc\n\n\nCustomPatronCatalog.register(COPPAGate)\n", "id": "10555805", "language": "Python", "matching_score": 2.9445202350616455, "max_stars_count": 0, "path": "api/custom_patron_catalog.py" }, { "content": "\"\"\"Test the base authentication framework: that is, the classes that\ndon't interact with any particular source of truth.\n\"\"\"\nimport datetime\nimport json\nimport os\nimport re\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nfrom decimal import Decimal\n\nimport flask\nimport pytest\nfrom flask import url_for\nfrom flask_babel import lazy_gettext as _\nfrom money import Money\n\nfrom api.annotations import AnnotationWriter\nfrom api.announcements import Announcements\nfrom api.authenticator import (\n AuthenticationProvider,\n Authenticator,\n BasicAuthenticationProvider,\n CirculationPatronProfileStorage,\n LibraryAuthenticator,\n OAuthAuthenticationProvider,\n OAuthController,\n PatronData,\n)\nfrom api.clever import CleverAuthenticationAPI\nfrom api.config import CannotLoadConfiguration, Configuration\nfrom api.firstbook import FirstBookAuthenticationAPI\nfrom api.millenium_patron import MilleniumPatronAPI\nfrom api.opds import LibraryAnnotator\nfrom api.problem_details import *\nfrom api.problem_details import PATRON_OF_ANOTHER_LIBRARY\nfrom api.simple_authentication import SimpleAuthenticationProvider\nfrom api.util.patron import PatronUtility\nfrom core.mock_analytics_provider import MockAnalyticsProvider\nfrom core.model import (\n CirculationEvent,\n ConfigurationSetting,\n Credential,\n DataSource,\n ExternalIntegration,\n Library,\n Patron,\n Session,\n create,\n)\nfrom core.opds import OPDSFeed\nfrom core.testing import DatabaseTest\nfrom core.user_profile import ProfileController\nfrom core.util.authentication_for_opds import AuthenticationForOPDSDocument\nfrom core.util.datetime_helpers import utc_now\nfrom core.util.http import IntegrationException\n\nfrom .test_controller import ControllerTest\n\n\nclass MockAuthenticationProvider(object):\n \"\"\"An AuthenticationProvider that always authenticates requests for\n the given Patron and always returns the given PatronData when\n asked to look up data.\n \"\"\"\n\n def __init__(self, patron=None, patrondata=None):\n self.patron = patron\n self.patrondata = patrondata\n\n def authenticate(self, _db, header):\n return self.patron\n\n\nclass MockBasicAuthenticationProvider(\n BasicAuthenticationProvider, MockAuthenticationProvider\n):\n \"\"\"A mock basic authentication provider for use in testing the overall\n authentication process.\n \"\"\"\n\n def __init__(\n self,\n library,\n integration,\n analytics=None,\n patron=None,\n patrondata=None,\n *args,\n **kwargs\n ):\n super(MockBasicAuthenticationProvider, self).__init__(\n library, integration, analytics, *args, **kwargs\n )\n self.patron = patron\n self.patrondata = patrondata\n\n def authenticate(self, _db, header):\n return self.patron\n\n def remote_authenticate(self, username, password):\n return self.patrondata\n\n def remote_patron_lookup(self, patrondata):\n return self.patrondata\n\n\nclass MockBasic(BasicAuthenticationProvider):\n \"\"\"A second mock basic authentication provider for use in testing\n the workflow around Basic Auth.\n \"\"\"\n\n NAME = \"Mock Basic Auth provider\"\n LOGIN_BUTTON_IMAGE = \"BasicButton.png\"\n\n def __init__(\n self,\n library,\n integration,\n analytics=None,\n patrondata=None,\n remote_patron_lookup_patrondata=None,\n *args,\n **kwargs\n ):\n super(MockBasic, self).__init__(library, integration, analytics)\n self.patrondata = patrondata\n self.remote_patron_lookup_patrondata = remote_patron_lookup_patrondata\n\n def remote_authenticate(self, username, password):\n return self.patrondata\n\n def remote_patron_lookup(self, patrondata):\n return self.remote_patron_lookup_patrondata\n\n\nclass MockOAuthAuthenticationProvider(\n OAuthAuthenticationProvider, MockAuthenticationProvider\n):\n \"\"\"A mock OAuth authentication provider for use in testing the overall\n authentication process.\n \"\"\"\n\n def __init__(self, library, provider_name, patron=None, patrondata=None):\n self.library_id = library.id\n self.NAME = provider_name\n self.patron = patron\n self.patrondata = patrondata\n\n def authenticated_patron(self, _db, provider_token):\n return self.patron\n\n\nclass MockOAuth(OAuthAuthenticationProvider):\n \"\"\"A second mock basic authentication provider for use in testing\n the workflow around OAuth.\n \"\"\"\n\n URI = \"http://example.org/\"\n NAME = \"Mock provider\"\n TOKEN_TYPE = \"test token\"\n TOKEN_DATA_SOURCE_NAME = DataSource.MANUAL\n LOGIN_BUTTON_IMAGE = \"OAuthButton.png\"\n\n def __init__(self, library, name=\"Mock OAuth\", integration=None, analytics=None):\n _db = Session.object_session(library)\n integration = integration or self._mock_integration(_db, name)\n super(MockOAuth, self).__init__(library, integration, analytics)\n\n @classmethod\n def _mock_integration(self, _db, name):\n integration, ignore = create(\n _db,\n ExternalIntegration,\n protocol=\"OAuth\",\n goal=ExternalIntegration.PATRON_AUTH_GOAL,\n )\n integration.username = name\n integration.password = \"\"\n integration.setting(self.OAUTH_TOKEN_EXPIRATION_DAYS).value = 20\n return integration\n\n\nclass AuthenticatorTest(DatabaseTest):\n def mock_basic(self, *args, **kwargs):\n \"\"\"Convenience method to instantiate a MockBasic object with the\n default library.\n \"\"\"\n self.mock_basic_integration = self._external_integration(\n self._str, ExternalIntegration.PATRON_AUTH_GOAL\n )\n return MockBasic(\n self._default_library, self.mock_basic_integration, *args, **kwargs\n )\n\n\nclass TestPatronData(AuthenticatorTest):\n def setup_method(self):\n super(TestPatronData, self).setup_method()\n self.expiration_time = utc_now()\n self.data = PatronData(\n permanent_id=\"1\",\n authorization_identifier=\"2\",\n username=\"3\",\n personal_name=\"4\",\n email_address=\"5\",\n authorization_expires=self.expiration_time,\n fines=Money(6, \"USD\"),\n block_reason=PatronData.NO_VALUE,\n )\n\n def test_to_dict(self):\n data = self.data.to_dict\n expect = dict(\n permanent_id=\"1\",\n authorization_identifier=\"2\",\n authorization_identifiers=[\"2\"],\n external_type=None,\n username=\"3\",\n personal_name=\"4\",\n email_address=\"5\",\n authorization_expires=self.expiration_time.strftime(\"%Y-%m-%d\"),\n fines=\"6\",\n block_reason=None,\n )\n assert data == expect\n\n # Test with an empty fines field\n self.data.fines = PatronData.NO_VALUE\n data = self.data.to_dict\n expect[\"fines\"] = None\n assert data == expect\n\n # Test with a zeroed-out fines field\n self.data.fines = Decimal(0.0)\n data = self.data.to_dict\n expect[\"fines\"] = \"0\"\n assert data == expect\n\n # Test with an empty expiration time\n self.data.authorization_expires = PatronData.NO_VALUE\n data = self.data.to_dict\n expect[\"authorization_expires\"] = None\n assert data == expect\n\n def test_apply(self):\n patron = self._patron()\n self.data.cached_neighborhood = \"Little Homeworld\"\n\n self.data.apply(patron)\n assert self.data.permanent_id == patron.external_identifier\n assert self.data.authorization_identifier == patron.authorization_identifier\n assert self.data.username == patron.username\n assert self.data.authorization_expires == patron.authorization_expires\n assert self.data.fines == patron.fines\n assert None == patron.block_reason\n assert \"Little Homeworld\" == patron.cached_neighborhood\n\n # This data is stored in PatronData but not applied to Patron.\n assert \"4\" == self.data.personal_name\n assert False == hasattr(patron, \"personal_name\")\n assert \"5\" == self.data.email_address\n assert False == hasattr(patron, \"email_address\")\n\n # This data is stored on the Patron object as a convenience,\n # but it's not stored in the database.\n assert \"Little Homeworld\" == patron.neighborhood\n\n def test_apply_block_reason(self):\n \"\"\"If the PatronData has a reason why a patron is blocked,\n the reason is put into the Patron record.\n \"\"\"\n self.data.block_reason = PatronData.UNKNOWN_BLOCK\n patron = self._patron()\n self.data.apply(patron)\n assert PatronData.UNKNOWN_BLOCK == patron.block_reason\n\n def test_apply_multiple_authorization_identifiers(self):\n \"\"\"If there are multiple authorization identifiers, the first\n one is chosen.\n \"\"\"\n patron = self._patron()\n patron.authorization_identifier = None\n data = PatronData(authorization_identifier=[\"2\", \"3\"], complete=True)\n data.apply(patron)\n assert \"2\" == patron.authorization_identifier\n\n # If Patron.authorization_identifier is already set, it will\n # not be changed, so long as its current value is acceptable.\n data = PatronData(authorization_identifier=[\"3\", \"2\"], complete=True)\n data.apply(patron)\n assert \"2\" == patron.authorization_identifier\n\n # If Patron.authorization_identifier ever turns out not to be\n # an acceptable value, it will be changed.\n data = PatronData(authorization_identifier=[\"3\", \"4\"], complete=True)\n data.apply(patron)\n assert \"3\" == patron.authorization_identifier\n\n def test_apply_sets_last_external_sync_if_data_is_complete(self):\n \"\"\"Patron.last_external_sync is only updated when apply() is called on\n a PatronData object that represents a full set of metadata.\n What constitutes a 'full set' depends on the authentication\n provider.\n \"\"\"\n patron = self._patron()\n self.data.complete = False\n self.data.apply(patron)\n assert None == patron.last_external_sync\n self.data.complete = True\n self.data.apply(patron)\n assert None != patron.last_external_sync\n\n def test_apply_sets_first_valid_authorization_identifier(self):\n \"\"\"If the ILS has multiple authorization identifiers for a patron, the\n first one is used.\n \"\"\"\n patron = self._patron()\n patron.authorization_identifier = None\n self.data.set_authorization_identifier([\"identifier 1\", \"identifier 2\"])\n self.data.apply(patron)\n assert \"identifier 1\" == patron.authorization_identifier\n\n def test_apply_leaves_valid_authorization_identifier_alone(self):\n \"\"\"If the ILS says a patron has a new preferred authorization\n identifier, but our Patron record shows them using an\n authorization identifier that still works, we don't change it.\n \"\"\"\n patron = self._patron()\n patron.authorization_identifier = \"old identifier\"\n self.data.set_authorization_identifier(\n [\"new identifier\", patron.authorization_identifier]\n )\n self.data.apply(patron)\n assert \"old identifier\" == patron.authorization_identifier\n\n def test_apply_overwrites_invalid_authorization_identifier(self):\n \"\"\"If the ILS says a patron has a new preferred authorization\n identifier, and our Patron record shows them using an\n authorization identifier that no longer works, we change it.\n \"\"\"\n patron = self._patron()\n self.data.set_authorization_identifier([\"identifier 1\", \"identifier 2\"])\n self.data.apply(patron)\n assert \"identifier 1\" == patron.authorization_identifier\n\n def test_apply_on_incomplete_information(self):\n \"\"\"When we call apply() based on incomplete information (most\n commonly, the fact that a given string was successfully used\n to authenticate a patron), we are very careful about modifying\n data already in the database.\n \"\"\"\n now = utc_now()\n\n # If the only thing we know about a patron is that a certain\n # string authenticated them, we set\n # Patron.authorization_identifier to that string but we also\n # indicate that we need to perform an external sync on them\n # ASAP.\n authenticated = PatronData(authorization_identifier=\"1234\", complete=False)\n patron = self._patron()\n patron.authorization_identifier = None\n patron.last_external_sync = now\n authenticated.apply(patron)\n assert \"1234\" == patron.authorization_identifier\n assert None == patron.last_external_sync\n\n # If a patron authenticates by username, we leave their Patron\n # record alone.\n patron = self._patron()\n patron.authorization_identifier = \"1234\"\n patron.username = \"user\"\n patron.last_external_sync = now\n patron.fines = Money(10, \"USD\")\n authenticated_by_username = PatronData(\n authorization_identifier=\"user\", complete=False\n )\n authenticated_by_username.apply(patron)\n assert now == patron.last_external_sync\n\n # If a patron authenticates with a string that is neither\n # their authorization identifier nor their username, we leave\n # their Patron record alone, except that we indicate that we\n # need to perform an external sync on them ASAP.\n patron.last_external_sync = now\n authenticated_by_weird_identifier = PatronData(\n authorization_identifier=\"5678\", complete=False\n )\n authenticated_by_weird_identifier.apply(patron)\n assert \"1234\" == patron.authorization_identifier\n assert None == patron.last_external_sync\n\n def test_get_or_create_patron(self):\n analytics = MockAnalyticsProvider()\n\n # The patron didn't exist yet, so it was created\n # and an analytics event was sent.\n patron, is_new = self.data.get_or_create_patron(\n self._db, self._default_library.id, analytics\n )\n assert \"2\" == patron.authorization_identifier\n assert self._default_library == patron.library\n assert True == is_new\n assert CirculationEvent.NEW_PATRON == analytics.event_type\n assert 1 == analytics.count\n\n # Patron.neighborhood was set, even though there is no\n # value and that's not a database field.\n assert None == patron.neighborhood\n\n # Set a neighborhood and try again.\n self.data.neighborhood = \"Achewood\"\n\n # The same patron is returned, and no analytics\n # event was sent.\n patron, is_new = self.data.get_or_create_patron(\n self._db, self._default_library.id, analytics\n )\n assert \"2\" == patron.authorization_identifier\n assert False == is_new\n assert \"Achewood\" == patron.neighborhood\n assert 1 == analytics.count\n\n def test_to_response_parameters(self):\n\n params = self.data.to_response_parameters\n assert dict(name=\"4\") == params\n\n self.data.personal_name = None\n params = self.data.to_response_parameters\n assert dict() == params\n\n\nclass TestCirculationPatronProfileStorage(ControllerTest):\n def test_profile_document(self):\n def mock_url_for(endpoint, library_short_name, _external=True):\n return (\n \"http://host/\"\n + endpoint\n + \"?\"\n + \"library_short_name=\"\n + library_short_name\n )\n\n patron = self._patron()\n storage = CirculationPatronProfileStorage(patron, mock_url_for)\n doc = storage.profile_document\n assert \"settings\" in doc\n # Since there's no authdata configured, the DRM fields are not present\n assert \"drm:vendor\" not in doc\n assert \"drm:clientToken\" not in doc\n assert \"drm:scheme\" not in doc\n assert \"links\" not in doc\n\n # Now there's authdata configured, and the DRM fields are populated with\n # the vendor ID and a short client token\n self.initialize_adobe(patron.library)\n\n doc = storage.profile_document\n [adobe] = doc[\"drm\"]\n assert adobe[\"drm:vendor\"] == \"vendor id\"\n assert adobe[\"drm:clientToken\"].startswith(\n patron.library.short_name.upper() + \"TOKEN\"\n )\n assert (\n adobe[\"drm:scheme\"] == \"http://librarysimplified.org/terms/drm/scheme/ACS\"\n )\n [device_link, annotations_link] = doc[\"links\"]\n assert (\n device_link[\"rel\"] == \"http://librarysimplified.org/terms/drm/rel/devices\"\n )\n assert (\n device_link[\"href\"]\n == \"http://host/adobe_drm_devices?library_short_name=default\"\n )\n assert annotations_link[\"rel\"] == \"http://www.w3.org/ns/oa#annotationService\"\n assert (\n annotations_link[\"href\"]\n == \"http://host/annotations?library_short_name=default\"\n )\n assert annotations_link[\"type\"] == AnnotationWriter.CONTENT_TYPE\n\n\nclass MockAuthenticator(Authenticator):\n \"\"\"Allows testing Authenticator methods outside of a request context.\"\"\"\n\n def __init__(self, current_library, authenticators, analytics=None):\n _db = Session.object_session(current_library)\n super(MockAuthenticator, self).__init__(_db, analytics)\n self.current_library_name = current_library.short_name\n self.library_authenticators = authenticators\n\n def populate_authenticators(self, *args, **kwargs):\n \"\"\"Do nothing -- authenticators were set in the constructor.\"\"\"\n\n @property\n def current_library_short_name(self):\n return self.current_library_name\n\n\nclass TestAuthenticator(ControllerTest):\n def test_init(self):\n # The default library has already been configured to use the\n # SimpleAuthenticationProvider for its basic auth.\n l1 = self._default_library\n l1.short_name = \"l1\"\n\n # This library uses Millenium Patron.\n l2, ignore = create(self._db, Library, short_name=\"l2\")\n integration = self._external_integration(\n \"api.millenium_patron\", goal=ExternalIntegration.PATRON_AUTH_GOAL\n )\n integration.url = \"http://url/\"\n l2.integrations.append(integration)\n\n self._db.commit()\n\n analytics = MockAnalyticsProvider()\n\n auth = Authenticator(self._db, self._db.query(Library), analytics)\n\n # A LibraryAuthenticator has been created for each Library.\n assert \"l1\" in auth.library_authenticators\n assert \"l2\" in auth.library_authenticators\n assert isinstance(auth.library_authenticators[\"l1\"], LibraryAuthenticator)\n assert isinstance(auth.library_authenticators[\"l2\"], LibraryAuthenticator)\n\n # Each LibraryAuthenticator has been associated with an\n # appropriate AuthenticationProvider.\n\n assert isinstance(\n auth.library_authenticators[\"l1\"].basic_auth_provider,\n SimpleAuthenticationProvider,\n )\n assert isinstance(\n auth.library_authenticators[\"l2\"].basic_auth_provider, MilleniumPatronAPI\n )\n\n # Each provider has the analytics set.\n assert (\n analytics == auth.library_authenticators[\"l1\"].basic_auth_provider.analytics\n )\n assert (\n analytics == auth.library_authenticators[\"l2\"].basic_auth_provider.analytics\n )\n\n def test_methods_call_library_authenticators(self):\n class MockLibraryAuthenticator(LibraryAuthenticator):\n def __init__(self, name):\n self.name = name\n\n def authenticated_patron(self, _db, header):\n return \"authenticated patron for %s\" % self.name\n\n def create_authentication_document(self):\n return \"authentication document for %s\" % self.name\n\n def create_authentication_headers(self):\n return \"authentication headers for %s\" % self.name\n\n def get_credential_from_header(self, header):\n return \"credential for %s\" % self.name\n\n def create_bearer_token(self, *args, **kwargs):\n return \"bearer token for %s\" % self.name\n\n def oauth_provider_lookup(self, *args, **kwargs):\n return \"oauth provider for %s\" % self.name\n\n def decode_bearer_token(self, *args, **kwargs):\n return \"decoded bearer token for %s\" % self.name\n\n l1, ignore = create(self._db, Library, short_name=\"l1\")\n l2, ignore = create(self._db, Library, short_name=\"l2\")\n\n auth = Authenticator(self._db, self._db.query(Library))\n auth.library_authenticators[\"l1\"] = MockLibraryAuthenticator(\"l1\")\n auth.library_authenticators[\"l2\"] = MockLibraryAuthenticator(\"l2\")\n\n # This new library isn't in the authenticator.\n l3, ignore = create(self._db, Library, short_name=\"l3\")\n\n with self.app.test_request_context(\"/\"):\n flask.request.library = l3\n assert LIBRARY_NOT_FOUND == auth.authenticated_patron(self._db, {})\n assert LIBRARY_NOT_FOUND == auth.create_authentication_document()\n assert LIBRARY_NOT_FOUND == auth.create_authentication_headers()\n assert LIBRARY_NOT_FOUND == auth.get_credential_from_header({})\n assert LIBRARY_NOT_FOUND == auth.create_bearer_token()\n assert LIBRARY_NOT_FOUND == auth.oauth_provider_lookup()\n\n # The other libraries are in the authenticator.\n with self.app.test_request_context(\"/\"):\n flask.request.library = l1\n assert \"authenticated patron for l1\" == auth.authenticated_patron(\n self._db, {}\n )\n assert (\n \"authentication document for l1\"\n == auth.create_authentication_document()\n )\n assert (\n \"authentication headers for l1\" == auth.create_authentication_headers()\n )\n assert \"credential for l1\" == auth.get_credential_from_header({})\n assert \"bearer token for l1\" == auth.create_bearer_token()\n assert \"oauth provider for l1\" == auth.oauth_provider_lookup()\n assert \"decoded bearer token for l1\" == auth.decode_bearer_token()\n\n with self.app.test_request_context(\"/\"):\n flask.request.library = l2\n assert \"authenticated patron for l2\" == auth.authenticated_patron(\n self._db, {}\n )\n assert (\n \"authentication document for l2\"\n == auth.create_authentication_document()\n )\n assert (\n \"authentication headers for l2\" == auth.create_authentication_headers()\n )\n assert \"credential for l2\" == auth.get_credential_from_header({})\n assert \"bearer token for l2\" == auth.create_bearer_token()\n assert \"oauth provider for l2\" == auth.oauth_provider_lookup()\n assert \"decoded bearer token for l2\" == auth.decode_bearer_token()\n\n\nclass TestLibraryAuthenticator(AuthenticatorTest):\n def test_from_config_basic_auth_only(self):\n # Only a basic auth provider.\n millenium = self._external_integration(\n \"api.millenium_patron\",\n ExternalIntegration.PATRON_AUTH_GOAL,\n libraries=[self._default_library],\n )\n millenium.url = \"http://url/\"\n auth = LibraryAuthenticator.from_config(self._db, self._default_library)\n\n assert auth.basic_auth_provider != None\n assert isinstance(auth.basic_auth_provider, MilleniumPatronAPI)\n assert {} == auth.oauth_providers_by_name\n\n def test_from_config_basic_auth_and_oauth(self):\n library = self._default_library\n # A basic auth provider and an oauth provider.\n firstbook = self._external_integration(\n \"api.firstbook\",\n ExternalIntegration.PATRON_AUTH_GOAL,\n )\n firstbook.url = \"http://url/\"\n firstbook.password = \"<PASSWORD>\"\n library.integrations.append(firstbook)\n\n oauth = self._external_integration(\n \"api.clever\",\n ExternalIntegration.PATRON_AUTH_GOAL,\n )\n oauth.username = \"client_id\"\n oauth.password = \"<PASSWORD>\"\n library.integrations.append(oauth)\n\n analytics = MockAnalyticsProvider()\n auth = LibraryAuthenticator.from_config(self._db, library, analytics)\n\n assert auth.basic_auth_provider != None\n assert isinstance(auth.basic_auth_provider, FirstBookAuthenticationAPI)\n assert analytics == auth.basic_auth_provider.analytics\n\n assert 1 == len(auth.oauth_providers_by_name)\n clever = auth.oauth_providers_by_name[CleverAuthenticationAPI.NAME]\n assert isinstance(clever, CleverAuthenticationAPI)\n assert analytics == clever.analytics\n\n def test_with_custom_patron_catalog(self):\n \"\"\"Instantiation of a LibraryAuthenticator may\n include instantiation of a CustomPatronCatalog.\n \"\"\"\n mock_catalog = object()\n\n class MockCustomPatronCatalog(object):\n @classmethod\n def for_library(self, library):\n self.called_with = library\n return mock_catalog\n\n authenticator = LibraryAuthenticator.from_config(\n self._db,\n self._default_library,\n custom_catalog_source=MockCustomPatronCatalog,\n )\n assert self._default_library == MockCustomPatronCatalog.called_with\n\n # The custom patron catalog is stored as\n # authentication_document_annotator.\n assert mock_catalog == authenticator.authentication_document_annotator\n\n def test_config_succeeds_when_no_providers_configured(self):\n # You can call from_config even when there are no authentication\n # providers configured.\n\n # This should not happen in normal usage, but there will be an\n # interim period immediately after a library is created where\n # this will be its configuration.\n\n authenticator = LibraryAuthenticator.from_config(\n self._db, self._default_library\n )\n assert [] == list(authenticator.providers)\n\n def test_configuration_exception_during_from_config_stored(self):\n # If the initialization of an AuthenticationProvider from config\n # raises CannotLoadConfiguration or ImportError, the exception\n # is stored with the LibraryAuthenticator rather than being\n # propagated.\n\n # Create an integration destined to raise CannotLoadConfiguration..\n misconfigured = self._external_integration(\n \"api.firstbook\",\n ExternalIntegration.PATRON_AUTH_GOAL,\n )\n\n # ... and one destined to raise ImportError.\n unknown = self._external_integration(\n \"unknown protocol\", ExternalIntegration.PATRON_AUTH_GOAL\n )\n for integration in [misconfigured, unknown]:\n self._default_library.integrations.append(integration)\n auth = LibraryAuthenticator.from_config(self._db, self._default_library)\n\n # The LibraryAuthenticator exists but has no AuthenticationProviders.\n assert None == auth.basic_auth_provider\n assert {} == auth.oauth_providers_by_name\n\n # Both integrations have left their trace in\n # initialization_exceptions.\n not_configured = auth.initialization_exceptions[misconfigured.id]\n assert isinstance(not_configured, CannotLoadConfiguration)\n assert \"First Book server not configured.\" == str(not_configured)\n\n not_found = auth.initialization_exceptions[unknown.id]\n assert isinstance(not_found, ImportError)\n assert \"No module named 'unknown protocol'\" == str(not_found)\n\n def test_register_fails_when_integration_has_wrong_goal(self):\n integration = self._external_integration(\"protocol\", \"some other goal\")\n auth = LibraryAuthenticator(_db=self._db, library=self._default_library)\n with pytest.raises(CannotLoadConfiguration) as excinfo:\n auth.register_provider(integration)\n assert (\n \"Was asked to register an integration with goal=some other goal as though it were a way of authenticating patrons.\"\n in str(excinfo.value)\n )\n\n def test_register_fails_when_integration_not_associated_with_library(self):\n integration = self._external_integration(\n \"protocol\", ExternalIntegration.PATRON_AUTH_GOAL\n )\n auth = LibraryAuthenticator(_db=self._db, library=self._default_library)\n with pytest.raises(CannotLoadConfiguration) as excinfo:\n auth.register_provider(integration)\n assert \"Was asked to register an integration with library {}, which doesn't use it.\".format(\n self._default_library.name\n ) in str(\n excinfo.value\n )\n\n def test_register_fails_when_integration_module_does_not_contain_provider_class(\n self,\n ):\n library = self._default_library\n integration = self._external_integration(\n \"api.lanes\", ExternalIntegration.PATRON_AUTH_GOAL\n )\n library.integrations.append(integration)\n auth = LibraryAuthenticator(_db=self._db, library=library)\n with pytest.raises(CannotLoadConfiguration) as excinfo:\n auth.register_provider(integration)\n assert (\n \"Loaded module api.lanes but could not find a class called AuthenticationProvider inside.\"\n in str(excinfo.value)\n )\n\n def test_register_provider_fails_but_does_not_explode_on_remote_integration_error(\n self,\n ):\n library = self._default_library\n # We're going to instantiate the a mock authentication provider that\n # immediately raises a RemoteIntegrationException, which will become\n # a CannotLoadConfiguration.\n integration = self._external_integration(\n \"tests.api.mock_authentication_provider\",\n ExternalIntegration.PATRON_AUTH_GOAL,\n )\n library.integrations.append(integration)\n auth = LibraryAuthenticator(_db=self._db, library=library)\n with pytest.raises(CannotLoadConfiguration) as excinfo:\n auth.register_provider(integration)\n assert \"Could not instantiate\" in str(excinfo.value)\n assert \"authentication provider for library {}, possibly due to a network connection problem.\".format(\n self._default_library.name\n ) in str(\n excinfo.value\n )\n\n def test_register_provider_basic_auth(self):\n firstbook = self._external_integration(\n \"api.firstbook\",\n ExternalIntegration.PATRON_AUTH_GOAL,\n )\n firstbook.url = \"http://url/\"\n firstbook.password = \"<PASSWORD>\"\n self._default_library.integrations.append(firstbook)\n auth = LibraryAuthenticator(_db=self._db, library=self._default_library)\n auth.register_provider(firstbook)\n assert isinstance(auth.basic_auth_provider, FirstBookAuthenticationAPI)\n\n def test_register_oauth_provider(self):\n oauth = self._external_integration(\n \"api.clever\",\n ExternalIntegration.PATRON_AUTH_GOAL,\n )\n oauth.username = \"client_id\"\n oauth.password = \"<PASSWORD>\"\n self._default_library.integrations.append(oauth)\n auth = LibraryAuthenticator(_db=self._db, library=self._default_library)\n auth.register_provider(oauth)\n assert 1 == len(auth.oauth_providers_by_name)\n clever = auth.oauth_providers_by_name[CleverAuthenticationAPI.NAME]\n assert isinstance(clever, CleverAuthenticationAPI)\n\n def test_oauth_provider_requires_secret(self):\n integration = self._external_integration(self._str)\n\n basic = MockBasicAuthenticationProvider(self._default_library, integration)\n oauth = MockOAuthAuthenticationProvider(self._default_library, \"provider1\")\n\n # You can create an Authenticator that only uses Basic Auth\n # without providing a secret.\n LibraryAuthenticator(\n _db=self._db, library=self._default_library, basic_auth_provider=basic\n )\n\n # You can create an Authenticator that uses OAuth if you\n # provide a secret.\n LibraryAuthenticator(\n _db=self._db,\n library=self._default_library,\n oauth_providers=[oauth],\n bearer_token_signing_secret=\"foo\",\n )\n\n # But you can't create an Authenticator that uses OAuth\n # without providing a secret.\n with pytest.raises(CannotLoadConfiguration) as excinfo:\n LibraryAuthenticator(\n _db=self._db, library=self._default_library, oauth_providers=[oauth]\n )\n assert (\n \"OAuth providers are configured, but secret for signing bearer tokens is not.\"\n in str(excinfo.value)\n )\n\n def test_supports_patron_authentication(self):\n authenticator = LibraryAuthenticator.from_config(\n self._db, self._default_library\n )\n\n # This LibraryAuthenticator does not actually support patron\n # authentication because it has no auth providers.\n #\n # (This isn't necessarily a deal breaker, but most libraries\n # do authenticate their patrons.)\n assert False == authenticator.supports_patron_authentication\n\n # Adding a basic auth provider will make it start supporting\n # patron authentication.\n authenticator.basic_auth_provider = object()\n assert True == authenticator.supports_patron_authentication\n authenticator.basic_auth_provider = None\n\n # So will adding an OAuth provider.\n authenticator.oauth_providers_by_name[object()] = object()\n assert True == authenticator.supports_patron_authentication\n\n def test_identifies_individuals(self):\n # This LibraryAuthenticator does not authenticate patrons at\n # all, so it does not identify patrons as individuals.\n authenticator = LibraryAuthenticator(\n _db=self._db,\n library=self._default_library,\n )\n\n # This LibraryAuthenticator has two Authenticators, but\n # neither of them identify patrons as individuals.\n class MockAuthenticator(object):\n NAME = \"mock\"\n IDENTIFIES_INDIVIDUALS = False\n\n basic = MockAuthenticator()\n oauth = MockAuthenticator()\n authenticator = LibraryAuthenticator(\n _db=self._db,\n library=self._default_library,\n basic_auth_provider=basic,\n oauth_providers=[oauth],\n bearer_token_signing_secret=self._str,\n )\n assert False == authenticator.identifies_individuals\n\n # If some Authenticators identify individuals and some do not,\n # the library as a whole does not (necessarily) identify\n # individuals.\n basic.IDENTIFIES_INDIVIDUALS = True\n assert False == authenticator.identifies_individuals\n\n # If every Authenticator identifies individuals, then so does\n # the library as a whole.\n oauth.IDENTIFIES_INDIVIDUALS = True\n assert True == authenticator.identifies_individuals\n\n def test_providers(self):\n integration = self._external_integration(self._str)\n basic = MockBasicAuthenticationProvider(self._default_library, integration)\n oauth1 = MockOAuthAuthenticationProvider(self._default_library, \"provider1\")\n oauth2 = MockOAuthAuthenticationProvider(self._default_library, \"provider2\")\n\n authenticator = LibraryAuthenticator(\n _db=self._db,\n library=self._default_library,\n basic_auth_provider=basic,\n oauth_providers=[oauth1, oauth2],\n bearer_token_signing_secret=\"foo\",\n )\n assert [basic, oauth1, oauth2] == list(authenticator.providers)\n\n def test_provider_registration(self):\n \"\"\"You can register the same provider multiple times,\n but you can't register two different basic auth providers,\n and you can't register two different OAuth providers\n with the same .NAME.\n \"\"\"\n authenticator = LibraryAuthenticator(\n _db=self._db,\n library=self._default_library,\n bearer_token_signing_secret=\"foo\",\n )\n integration = self._external_integration(self._str)\n basic1 = MockBasicAuthenticationProvider(self._default_library, integration)\n basic2 = MockBasicAuthenticationProvider(self._default_library, integration)\n oauth1 = MockOAuthAuthenticationProvider(self._default_library, \"provider1\")\n oauth2 = MockOAuthAuthenticationProvider(self._default_library, \"provider2\")\n oauth1_dupe = MockOAuthAuthenticationProvider(\n self._default_library, \"provider1\"\n )\n\n authenticator.register_basic_auth_provider(basic1)\n authenticator.register_basic_auth_provider(basic1)\n\n with pytest.raises(CannotLoadConfiguration) as excinfo:\n authenticator.register_basic_auth_provider(basic2)\n assert \"Two basic auth providers configured\" in str(excinfo.value)\n\n authenticator.register_oauth_provider(oauth1)\n authenticator.register_oauth_provider(oauth1)\n authenticator.register_oauth_provider(oauth2)\n\n with pytest.raises(CannotLoadConfiguration) as excinfo:\n authenticator.register_oauth_provider(oauth1_dupe)\n assert 'Two different OAuth providers claim the name \"provider1\"' in str(\n excinfo.value\n )\n\n def test_oauth_provider_lookup(self):\n\n # If there are no OAuth providers we cannot look one up.\n integration = self._external_integration(self._str)\n basic = MockBasicAuthenticationProvider(self._default_library, integration)\n authenticator = LibraryAuthenticator(\n _db=self._db, library=self._default_library, basic_auth_provider=basic\n )\n problem = authenticator.oauth_provider_lookup(\"provider1\")\n assert problem.uri == UNKNOWN_OAUTH_PROVIDER.uri\n assert _(\"No OAuth providers are configured.\") == problem.detail\n\n # We can look up registered providers but not unregistered providers.\n oauth1 = MockOAuthAuthenticationProvider(self._default_library, \"provider1\")\n oauth2 = MockOAuthAuthenticationProvider(self._default_library, \"provider2\")\n oauth3 = MockOAuthAuthenticationProvider(self._default_library, \"provider3\")\n authenticator = LibraryAuthenticator(\n _db=self._db,\n library=self._default_library,\n oauth_providers=[oauth1, oauth2],\n bearer_token_signing_secret=\"foo\",\n )\n\n provider = authenticator.oauth_provider_lookup(\"provider1\")\n assert oauth1 == provider\n\n problem = authenticator.oauth_provider_lookup(\"provider3\")\n assert problem.uri == UNKNOWN_OAUTH_PROVIDER.uri\n assert (\n _(\n \"The specified OAuth provider name isn't one of the known providers. The known providers are: provider1, provider2\"\n )\n == problem.detail\n )\n\n def test_authenticated_patron_basic(self):\n patron = self._patron()\n patrondata = PatronData(\n permanent_id=patron.external_identifier,\n authorization_identifier=patron.authorization_identifier,\n username=patron.username,\n neighborhood=\"Achewood\",\n )\n integration = self._external_integration(self._str)\n basic = MockBasicAuthenticationProvider(\n self._default_library, integration, patron=patron, patrondata=patrondata\n )\n authenticator = LibraryAuthenticator(\n _db=self._db, library=self._default_library, basic_auth_provider=basic\n )\n assert patron == authenticator.authenticated_patron(\n self._db, dict(username=\"foo\", password=\"<PASSWORD>\")\n )\n\n # Neighborhood information is being temporarily stored in the\n # Patron object for use elsewhere in request processing. It\n # won't be written to the database because there's no field in\n # `patrons` to store it.\n assert \"Achewood\" == patron.neighborhood\n\n # OAuth doesn't work.\n problem = authenticator.authenticated_patron(self._db, \"Bearer abcd\")\n assert UNSUPPORTED_AUTHENTICATION_MECHANISM == problem\n\n def test_authenticated_patron_oauth(self):\n patron1 = self._patron()\n patron2 = self._patron()\n oauth1 = MockOAuthAuthenticationProvider(\n self._default_library, \"oauth1\", patron=patron1\n )\n oauth2 = MockOAuthAuthenticationProvider(\n self._default_library, \"oauth2\", patron=patron2\n )\n authenticator = LibraryAuthenticator(\n _db=self._db,\n library=self._default_library,\n oauth_providers=[oauth1, oauth2],\n bearer_token_signing_secret=\"foo\",\n )\n\n # Ask oauth1 to create a bearer token.\n token = authenticator.create_bearer_token(oauth1.NAME, \"some token\")\n\n # The authenticator will decode the bearer token into a\n # provider and a provider token. It will look up the oauth1\n # provider (as opposed to oauth2) and ask it to authenticate\n # the provider token.\n #\n # This gives us patron1, as opposed to patron2.\n authenticated = authenticator.authenticated_patron(self._db, \"Bearer \" + token)\n assert patron1 == authenticated\n\n # Basic auth doesn't work.\n problem = authenticator.authenticated_patron(\n self._db, dict(username=\"foo\", password=\"<PASSWORD>\")\n )\n assert UNSUPPORTED_AUTHENTICATION_MECHANISM == problem\n\n def test_authenticated_patron_unsupported_mechanism(self):\n authenticator = LibraryAuthenticator(\n _db=self._db,\n library=self._default_library,\n )\n problem = authenticator.authenticated_patron(self._db, object())\n assert UNSUPPORTED_AUTHENTICATION_MECHANISM == problem\n\n def test_get_credential_from_header(self):\n integration = self._external_integration(self._str)\n basic = MockBasicAuthenticationProvider(self._default_library, integration)\n oauth = MockOAuthAuthenticationProvider(self._default_library, \"oauth1\")\n\n # We can pull the password out of a Basic Auth credential\n # if a Basic Auth authentication provider is configured.\n authenticator = LibraryAuthenticator(\n _db=self._db,\n library=self._default_library,\n basic_auth_provider=basic,\n oauth_providers=[oauth],\n bearer_token_signing_secret=\"secret\",\n )\n credential = dict(password=\"<PASSWORD>\")\n assert \"foo\" == authenticator.get_credential_from_header(credential)\n\n # We can't pull the password out if only OAuth authentication\n # providers are configured.\n authenticator = LibraryAuthenticator(\n _db=self._db,\n library=self._default_library,\n basic_auth_provider=None,\n oauth_providers=[oauth],\n bearer_token_signing_secret=\"secret\",\n )\n assert None == authenticator.get_credential_from_header(credential)\n\n def test_create_bearer_token(self):\n oauth1 = MockOAuthAuthenticationProvider(self._default_library, \"oauth1\")\n oauth2 = MockOAuthAuthenticationProvider(self._default_library, \"oauth2\")\n authenticator = LibraryAuthenticator(\n _db=self._db,\n library=self._default_library,\n oauth_providers=[oauth1, oauth2],\n bearer_token_signing_secret=\"foo\",\n )\n\n # A token is created and signed with the bearer token.\n token1 = authenticator.create_bearer_token(oauth1.NAME, \"some token\")\n assert (\n \"<KEY>\"\n == token1\n )\n\n # Varying the name of the OAuth provider varies the bearer\n # token.\n token2 = authenticator.create_bearer_token(oauth2.NAME, \"some token\")\n assert token1 != token2\n\n # Varying the token sent by the OAuth provider varies the\n # bearer token.\n token3 = authenticator.create_bearer_token(oauth1.NAME, \"some other token\")\n assert token3 != token1\n\n # Varying the secret used to sign the token varies the bearer\n # token.\n authenticator.bearer_token_signing_secret = \"a different secret\"\n token4 = authenticator.create_bearer_token(oauth1.NAME, \"some token\")\n assert token4 != token1\n\n def test_decode_bearer_token(self):\n oauth = MockOAuthAuthenticationProvider(self._default_library, \"oauth\")\n authenticator = LibraryAuthenticator(\n _db=self._db,\n library=self._default_library,\n oauth_providers=[oauth],\n bearer_token_signing_secret=\"secret\",\n )\n\n # A token is created and signed with the secret.\n token_value = (oauth.NAME, \"some token\")\n encoded = authenticator.create_bearer_token(*token_value)\n decoded = authenticator.decode_bearer_token(encoded)\n assert token_value == decoded\n\n decoded = authenticator.decode_bearer_token_from_header(\"Bearer \" + encoded)\n assert token_value == decoded\n\n def test_create_authentication_document(self):\n class MockAuthenticator(LibraryAuthenticator):\n \"\"\"Mock the _geographic_areas method.\"\"\"\n\n AREAS = [\"focus area\", \"service area\"]\n\n @classmethod\n def _geographic_areas(cls, library):\n return cls.AREAS\n\n integration = self._external_integration(self._str)\n library = self._default_library\n basic = MockBasicAuthenticationProvider(library, integration)\n oauth = MockOAuthAuthenticationProvider(library, \"oauth\")\n oauth.URI = \"http://example.org/\"\n library.name = \"A Fabulous Library\"\n authenticator = MockAuthenticator(\n _db=self._db,\n library=library,\n basic_auth_provider=basic,\n oauth_providers=[oauth],\n bearer_token_signing_secret=\"secret\",\n )\n\n class MockAuthenticationDocumentAnnotator(object):\n def annotate_authentication_document(self, library, doc, url_for):\n self.called_with = library, doc, url_for\n doc[\"modified\"] = \"Kilroy was here\"\n return doc\n\n annotator = MockAuthenticationDocumentAnnotator()\n authenticator.authentication_document_annotator = annotator\n\n # We're about to call url_for, so we must create an\n # application context.\n os.environ[\"AUTOINITIALIZE\"] = \"False\"\n from api.app import app\n\n self.app = app\n del os.environ[\"AUTOINITIALIZE\"]\n\n # Set up configuration settings for links.\n link_config = {\n LibraryAnnotator.TERMS_OF_SERVICE: \"http://terms\",\n LibraryAnnotator.PRIVACY_POLICY: \"http://privacy\",\n LibraryAnnotator.COPYRIGHT: \"http://copyright\",\n LibraryAnnotator.ABOUT: \"http://about\",\n LibraryAnnotator.LICENSE: \"http://license/\",\n LibraryAnnotator.REGISTER: \"custom-registration-hook://library/\",\n Configuration.LOGO: \"image data\",\n Configuration.WEB_CSS_FILE: \"http://style.css\",\n }\n\n for rel, value in link_config.items():\n ConfigurationSetting.for_library(rel, self._default_library).value = value\n\n ConfigurationSetting.for_library(\n Configuration.LIBRARY_DESCRIPTION, library\n ).value = \"Just the best.\"\n\n # Set the URL to the library's web page.\n ConfigurationSetting.for_library(\n Configuration.WEBSITE_URL, library\n ).value = \"http://library/\"\n\n # Set the color scheme a mobile client should use.\n ConfigurationSetting.for_library(\n Configuration.COLOR_SCHEME, library\n ).value = \"plaid\"\n\n # Set the colors a web client should use.\n ConfigurationSetting.for_library(\n Configuration.WEB_PRIMARY_COLOR, library\n ).value = \"#012345\"\n ConfigurationSetting.for_library(\n Configuration.WEB_SECONDARY_COLOR, library\n ).value = \"#abcdef\"\n\n # Configure the various ways a patron can get help.\n ConfigurationSetting.for_library(\n Configuration.HELP_EMAIL, library\n ).value = \"help@library\"\n ConfigurationSetting.for_library(\n Configuration.HELP_WEB, library\n ).value = \"http://library.help/\"\n ConfigurationSetting.for_library(\n Configuration.HELP_URI, library\n ).value = \"custom:uri\"\n\n base_url = ConfigurationSetting.sitewide(self._db, Configuration.BASE_URL_KEY)\n base_url.value = \"http://circulation-manager/\"\n\n # Configure three announcements: two active and one\n # inactive.\n format = \"%Y-%m-%d\"\n today = datetime.date.today()\n tomorrow = (today + datetime.timedelta(days=1)).strftime(format)\n yesterday = (today - datetime.timedelta(days=1)).strftime(format)\n two_days_ago = (today - datetime.timedelta(days=2)).strftime(format)\n today = today.strftime(format)\n announcements = [\n dict(\n id=\"a1\",\n content=\"this is announcement 1\",\n start=yesterday,\n finish=today,\n ),\n dict(\n id=\"a2\",\n content=\"this is announcement 2\",\n start=two_days_ago,\n finish=yesterday,\n ),\n dict(\n id=\"a3\",\n content=\"this is announcement 3\",\n start=yesterday,\n finish=today,\n ),\n ]\n announcement_setting = ConfigurationSetting.for_library(\n Announcements.SETTING_NAME, library\n )\n announcement_setting.value = json.dumps(announcements)\n\n with self.app.test_request_context(\"/\"):\n url = authenticator.authentication_document_url(library)\n assert url.endswith(\"/%s/authentication_document\" % library.short_name)\n\n doc = json.loads(authenticator.create_authentication_document())\n # The main thing we need to test is that the\n # authentication sub-documents are assembled properly and\n # placed in the right position.\n flows = doc[\"authentication\"]\n oauth_doc, basic_doc = sorted(flows, key=lambda x: x[\"type\"])\n\n expect_basic = basic.authentication_flow_document(self._db)\n assert expect_basic == basic_doc\n\n expect_oauth = oauth.authentication_flow_document(self._db)\n assert expect_oauth == oauth_doc\n\n # We also need to test that the library's name and ID\n # were placed in the document.\n assert \"A Fabulous Library\" == doc[\"title\"]\n assert \"Just the best.\" == doc[\"service_description\"]\n assert url == doc[\"id\"]\n\n # The mobile color scheme and web colors are correctly reported.\n assert \"plaid\" == doc[\"color_scheme\"]\n assert \"#012345\" == doc[\"web_color_scheme\"][\"primary\"]\n assert \"#abcdef\" == doc[\"web_color_scheme\"][\"secondary\"]\n\n # _geographic_areas was called and provided the library's\n # focus area and service area.\n assert \"focus area\" == doc[\"focus_area\"]\n assert \"service area\" == doc[\"service_area\"]\n\n # We also need to test that the links got pulled in\n # from the configuration.\n (\n about,\n alternate,\n copyright,\n help_uri,\n help_web,\n help_email,\n copyright_agent,\n profile,\n loans,\n license,\n logo,\n privacy_policy,\n register,\n start,\n stylesheet,\n terms_of_service,\n ) = sorted(doc[\"links\"], key=lambda x: (x[\"rel\"], x[\"href\"]))\n assert \"http://terms\" == terms_of_service[\"href\"]\n assert \"http://privacy\" == privacy_policy[\"href\"]\n assert \"http://copyright\" == copyright[\"href\"]\n assert \"http://about\" == about[\"href\"]\n assert \"http://license/\" == license[\"href\"]\n assert \"image data\" == logo[\"href\"]\n assert \"http://style.css\" == stylesheet[\"href\"]\n\n assert \"/loans\" in loans[\"href\"]\n assert \"http://opds-spec.org/shelf\" == loans[\"rel\"]\n assert OPDSFeed.ACQUISITION_FEED_TYPE == loans[\"type\"]\n\n assert \"/patrons/me\" in profile[\"href\"]\n assert ProfileController.LINK_RELATION == profile[\"rel\"]\n assert ProfileController.MEDIA_TYPE == profile[\"type\"]\n\n expect_start = url_for(\n \"index\",\n library_short_name=self._default_library.short_name,\n _external=True,\n )\n assert expect_start == start[\"href\"]\n\n # The start link points to an OPDS feed.\n assert OPDSFeed.ACQUISITION_FEED_TYPE == start[\"type\"]\n\n # Most of the other links have type='text/html'\n assert \"text/html\" == about[\"type\"]\n\n # The registration link doesn't have a type, because it\n # uses a non-HTTP URI scheme.\n assert \"type\" not in register\n assert \"custom-registration-hook://library/\" == register[\"href\"]\n\n # The logo link has type \"image/png\".\n assert \"image/png\" == logo[\"type\"]\n\n # We have three help links.\n assert \"custom:uri\" == help_uri[\"href\"]\n assert \"http://library.help/\" == help_web[\"href\"]\n assert \"text/html\" == help_web[\"type\"]\n assert \"mailto:help@library\" == help_email[\"href\"]\n\n # Since no special address was given for the copyright\n # designated agent, the help address was reused.\n copyright_rel = (\n \"http://librarysimplified.org/rel/designated-agent/copyright\"\n )\n assert copyright_rel == copyright_agent[\"rel\"]\n assert \"mailto:help@library\" == copyright_agent[\"href\"]\n\n # The public key is correct.\n assert authenticator.public_key == doc[\"public_key\"][\"value\"]\n assert \"RSA\" == doc[\"public_key\"][\"type\"]\n\n # The library's web page shows up as an HTML alternate\n # to the OPDS server.\n assert (\n dict(rel=\"alternate\", type=\"text/html\", href=\"http://library/\")\n == alternate\n )\n\n # Active announcements are published; inactive announcements are not.\n a1, a3 = doc[\"announcements\"]\n assert dict(id=\"a1\", content=\"this is announcement 1\") == a1\n assert dict(id=\"a3\", content=\"this is announcement 3\") == a3\n\n # Features that are enabled for this library are communicated\n # through the 'features' item.\n features = doc[\"features\"]\n assert [] == features[\"disabled\"]\n assert [Configuration.RESERVATIONS_FEATURE] == features[\"enabled\"]\n\n # If a separate copyright designated agent is configured,\n # that email address is used instead of the default\n # patron support address.\n ConfigurationSetting.for_library(\n Configuration.COPYRIGHT_DESIGNATED_AGENT_EMAIL, library\n ).value = \"mailto:<EMAIL>\"\n doc = json.loads(authenticator.create_authentication_document())\n [agent] = [x for x in doc[\"links\"] if x[\"rel\"] == copyright_rel]\n assert \"mailto:<EMAIL>\" == agent[\"href\"]\n\n # If no focus area or service area are provided, those fields\n # are not added to the document.\n MockAuthenticator.AREAS = [None, None]\n doc = json.loads(authenticator.create_authentication_document())\n for key in (\"focus_area\", \"service_area\"):\n assert key not in doc\n\n # If there are no announcements, the list of announcements is present\n # but empty.\n announcement_setting.value = None\n doc = json.loads(authenticator.create_authentication_document())\n assert [] == doc[\"announcements\"]\n\n # The annotator's annotate_authentication_document method\n # was called and successfully modified the authentication\n # document.\n assert (library, doc, url_for) == annotator.called_with\n assert \"Kilroy was here\" == doc[\"modified\"]\n\n # While we're in this context, let's also test\n # create_authentication_headers.\n\n # So long as the authenticator includes a basic auth\n # provider, that provider's .authentication_header is used\n # for WWW-Authenticate.\n headers = authenticator.create_authentication_headers()\n assert AuthenticationForOPDSDocument.MEDIA_TYPE == headers[\"Content-Type\"]\n assert basic.authentication_header == headers[\"WWW-Authenticate\"]\n\n # The response contains a Link header pointing to the authentication\n # document\n expect = \"<%s>; rel=%s\" % (\n authenticator.authentication_document_url(self._default_library),\n AuthenticationForOPDSDocument.LINK_RELATION,\n )\n assert expect == headers[\"Link\"]\n\n # If the authenticator does not include a basic auth provider,\n # no WWW-Authenticate header is provided.\n authenticator = LibraryAuthenticator(\n _db=self._db,\n library=library,\n oauth_providers=[oauth],\n bearer_token_signing_secret=\"secret\",\n )\n headers = authenticator.create_authentication_headers()\n assert \"WWW-Authenticate\" not in headers\n\n def test_key_pair(self):\n \"\"\"Test the public/private key pair associated with a library.\"\"\"\n\n library = self._default_library\n\n # Initially, the KEY_PAIR setting is not set.\n def keys():\n return ConfigurationSetting.for_library(\n Configuration.KEY_PAIR, library\n ).json_value\n\n assert None == keys()\n\n # Instantiating a LibraryAuthenticator for a library automatically\n # generates a public/private key pair.\n auth = LibraryAuthenticator.from_config(self._db, library)\n public, private = keys()\n assert \"BEGIN PUBLIC KEY\" in public\n assert \"BEGIN RSA PRIVATE KEY\" in private\n\n # The public key is stored in the\n # LibraryAuthenticator.public_key property.\n assert public == auth.public_key\n\n # The private key is not stored in the LibraryAuthenticator\n # object, but it can be obtained from the database by\n # using the key_pair property.\n assert not hasattr(auth, \"private_key\")\n assert (public, private) == auth.key_pair\n\n # Each library has its own key pair.\n library2 = self._library()\n auth2 = LibraryAuthenticator.from_config(self._db, library2)\n assert auth.public_key != auth2.public_key\n\n def test__geographic_areas(self):\n \"\"\"Test the _geographic_areas helper method.\"\"\"\n\n class Mock(LibraryAuthenticator):\n values = {\n Configuration.LIBRARY_FOCUS_AREA: \"focus\",\n Configuration.LIBRARY_SERVICE_AREA: \"service\",\n }\n\n @classmethod\n def _geographic_area(cls, key, library):\n cls.called_with = library\n return cls.values.get(key)\n\n # _geographic_areas calls _geographic_area twice and\n # reutrns the results in a 2-tuple.\n m = Mock._geographic_areas\n library = object()\n assert (\"focus\", \"service\") == m(library)\n assert library == Mock.called_with\n\n # If only one value is provided, the same value is given for both\n # areas.\n del Mock.values[Configuration.LIBRARY_FOCUS_AREA]\n assert (\"service\", \"service\") == m(library)\n\n Mock.values[Configuration.LIBRARY_FOCUS_AREA] = \"focus\"\n del Mock.values[Configuration.LIBRARY_SERVICE_AREA]\n assert (\"focus\", \"focus\") == m(library)\n\n def test__geographic_area(self):\n \"\"\"Test the _geographic_area helper method.\"\"\"\n library = self._default_library\n key = \"a key\"\n setting = ConfigurationSetting.for_library(key, library)\n\n def m():\n return LibraryAuthenticator._geographic_area(key, library)\n\n # A missing value is returned as None.\n assert None == m()\n\n # The literal string \"everywhere\" is returned as is.\n setting.value = \"everywhere\"\n assert \"everywhere\" == m()\n\n # A string that makes sense as JSON is returned as its JSON\n # equivalent.\n two_states = [\"NY\", \"NJ\"]\n setting.value = json.dumps(two_states)\n assert two_states == m()\n\n # A string that does not make sense as JSON is put in a\n # single-element list.\n setting.value = \"Arvin, CA\"\n assert [\"Arvin, CA\"] == m()\n\n\nclass TestAuthenticationProvider(AuthenticatorTest):\n\n credentials = dict(username=\"user\", password=\"\")\n\n def test_external_integration(self):\n provider = self.mock_basic(patrondata=None)\n assert self.mock_basic_integration == provider.external_integration(self._db)\n\n def test_authenticated_patron_passes_on_none(self):\n provider = self.mock_basic(patrondata=None)\n patron = provider.authenticated_patron(self._db, self.credentials)\n assert None == patron\n\n def test_authenticated_patron_passes_on_problem_detail(self):\n provider = self.mock_basic(patrondata=UNSUPPORTED_AUTHENTICATION_MECHANISM)\n patron = provider.authenticated_patron(self._db, self.credentials)\n assert UNSUPPORTED_AUTHENTICATION_MECHANISM == patron\n\n def test_authenticated_patron_allows_access_to_expired_credentials(self):\n \"\"\"Even if your card has expired, you can log in -- you just can't\n borrow books.\n \"\"\"\n yesterday = utc_now() - datetime.timedelta(days=1)\n\n expired = PatronData(\n permanent_id=\"1\",\n authorization_identifier=\"2\",\n authorization_expires=yesterday,\n )\n provider = self.mock_basic(\n patrondata=expired, remote_patron_lookup_patrondata=expired\n )\n patron = provider.authenticated_patron(self._db, self.credentials)\n assert \"1\" == patron.external_identifier\n assert \"2\" == patron.authorization_identifier\n\n def test_authenticated_patron_updates_metadata_if_necessary(self):\n patron = self._patron()\n assert True == PatronUtility.needs_external_sync(patron)\n\n # If we authenticate this patron by username we find out their\n # permanent ID but not any other information about them.\n username = \"user\"\n barcode = \"1234\"\n incomplete_data = PatronData(\n permanent_id=patron.external_identifier,\n authorization_identifier=username,\n complete=False,\n )\n\n # If we do a lookup for this patron we will get more complete\n # information.\n complete_data = PatronData(\n permanent_id=patron.external_identifier,\n authorization_identifier=barcode,\n username=username,\n cached_neighborhood=\"Little Homeworld\",\n complete=True,\n )\n\n provider = self.mock_basic(\n patrondata=incomplete_data, remote_patron_lookup_patrondata=complete_data\n )\n patron2 = provider.authenticated_patron(self._db, self.credentials)\n\n # We found the right patron.\n assert patron == patron2\n\n # We updated their metadata.\n assert \"user\" == patron.username\n assert barcode == patron.authorization_identifier\n assert \"Little Homeworld\" == patron.cached_neighborhood\n\n # .cached_neighborhood (stored in the database) was reused as\n # .neighborhood (destroyed at the end of the request)\n assert \"Little Homeworld\" == patron.neighborhood\n\n # We did a patron lookup, which means we updated\n # .last_external_sync.\n assert patron.last_external_sync != None\n assert barcode == patron.authorization_identifier\n assert username == patron.username\n\n # Looking up the patron a second time does not cause another\n # metadata refresh, because we just did a refresh and the\n # patron has borrowing privileges.\n last_sync = patron.last_external_sync\n assert False == PatronUtility.needs_external_sync(patron)\n patron = provider.authenticated_patron(self._db, dict(username=username))\n assert last_sync == patron.last_external_sync\n assert barcode == patron.authorization_identifier\n assert username == patron.username\n\n # Here, patron.neighborhood was copied over from\n # patron.cached_neighborhood. It couldn't have been set by a\n # metadata refresh, because there was no refresh.\n assert \"Little Homeworld\" == patron.neighborhood\n\n # If we somehow authenticate with an identifier other than\n # the ones in the Patron record, we trigger another metadata\n # refresh to see if anything has changed.\n incomplete_data = PatronData(\n permanent_id=patron.external_identifier,\n authorization_identifier=\"some other identifier\",\n complete=False,\n )\n provider.patrondata = incomplete_data\n patron = provider.authenticated_patron(\n self._db, dict(username=\"someotheridentifier\")\n )\n assert patron.last_external_sync > last_sync\n\n # But Patron.authorization_identifier doesn't actually change\n # to \"some other identifier\", because when we do the metadata\n # refresh we get the same data as before.\n assert barcode == patron.authorization_identifier\n assert username == patron.username\n\n def test_update_patron_metadata(self):\n patron = self._patron()\n patron.authorization_identifier = \"2345\"\n assert None == patron.last_external_sync\n assert None == patron.username\n\n patrondata = PatronData(username=\"user\", neighborhood=\"Little Homeworld\")\n provider = self.mock_basic(remote_patron_lookup_patrondata=patrondata)\n provider.external_type_regular_expression = re.compile(\"^(.)\")\n provider.update_patron_metadata(patron)\n\n # The patron's username has been changed.\n assert \"user\" == patron.username\n\n # last_external_sync has been updated.\n assert patron.last_external_sync != None\n\n # external_type was updated based on the regular expression\n assert \"2\" == patron.external_type\n\n # .neighborhood was not stored in .cached_neighborhood. In\n # this case, it must be cheap to get .neighborhood every time,\n # and it's better not to store information we can get cheaply.\n assert \"Little Homeworld\" == patron.neighborhood\n assert None == patron.cached_neighborhood\n\n def test_update_patron_metadata_noop_if_no_remote_metadata(self):\n\n patron = self._patron()\n provider = self.mock_basic(patrondata=None)\n provider.update_patron_metadata(patron)\n\n # We can tell that update_patron_metadata was a no-op because\n # patron.last_external_sync didn't change.\n assert None == patron.last_external_sync\n\n def test_remote_patron_lookup(self):\n \"\"\"The default implementation of remote_patron_lookup returns whatever was passed in.\"\"\"\n provider = BasicAuthenticationProvider(\n self._default_library, self._external_integration(self._str)\n )\n assert None == provider.remote_patron_lookup(None)\n patron = self._patron()\n assert patron == provider.remote_patron_lookup(patron)\n patrondata = PatronData()\n assert patrondata == provider.remote_patron_lookup(patrondata)\n\n def test_update_patron_external_type(self):\n patron = self._patron()\n patron.authorization_identifier = \"A123\"\n patron.external_type = \"old value\"\n library = patron.library\n integration = self._external_integration(self._str)\n\n class MockProvider(AuthenticationProvider):\n NAME = \"Just a mock\"\n\n setting = ConfigurationSetting.for_library_and_externalintegration(\n self._db,\n MockProvider.EXTERNAL_TYPE_REGULAR_EXPRESSION,\n library,\n integration,\n )\n setting.value = None\n\n # If there is no EXTERNAL_TYPE_REGULAR_EXPRESSION, calling\n # update_patron_external_type does nothing.\n MockProvider(library, integration).update_patron_external_type(patron)\n assert \"old value\" == patron.external_type\n\n setting.value = \"([A-Z])\"\n MockProvider(library, integration).update_patron_external_type(patron)\n assert \"A\" == patron.external_type\n\n setting.value = \"([0-9]$)\"\n MockProvider(library, integration).update_patron_external_type(patron)\n assert \"3\" == patron.external_type\n\n # These regexp has no groups, so it has no power to change\n # external_type.\n setting.value = \"A\"\n MockProvider(library, integration).update_patron_external_type(patron)\n assert \"3\" == patron.external_type\n\n # This regexp is invalid, so it isn't used.\n setting.value = \"(not a valid regexp\"\n provider = MockProvider(library, integration)\n assert None == provider.external_type_regular_expression\n\n def test_restriction_matches(self):\n \"\"\"Test the behavior of the library identifier restriction algorithm.\"\"\"\n m = AuthenticationProvider._restriction_matches\n\n # If restriction is none, we always return True.\n assert True == m(\n \"123\",\n None,\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_PREFIX,\n )\n assert True == m(\n \"123\",\n None,\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_STRING,\n )\n assert True == m(\n \"123\",\n None,\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_REGEX,\n )\n assert True == m(\n \"123\", None, AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_LIST\n )\n\n # If field is None we always return False.\n assert False == m(\n None,\n \"1234\",\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_PREFIX,\n )\n assert False == m(\n None,\n \"1234\",\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_STRING,\n )\n assert False == m(\n None,\n re.compile(\".*\"),\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_REGEX,\n )\n assert False == m(\n None,\n [\"1\", \"2\"],\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_LIST,\n )\n\n # Test prefix\n assert True == m(\n \"12345a\",\n \"1234\",\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_PREFIX,\n )\n assert False == m(\n \"a1234\",\n \"1234\",\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_PREFIX,\n )\n\n # Test string\n assert False == m(\n \"12345a\",\n \"1234\",\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_STRING,\n )\n assert False == m(\n \"a1234\",\n \"1234\",\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_STRING,\n )\n assert True == m(\n \"1234\",\n \"1234\",\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_STRING,\n )\n\n # Test list\n assert True == m(\n \"1234\",\n [\"1234\", \"4321\"],\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_LIST,\n )\n assert True == m(\n \"4321\",\n [\"1234\", \"4321\"],\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_LIST,\n )\n assert False == m(\n \"12345\",\n [\"1234\", \"4321\"],\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_LIST,\n )\n assert False == m(\n \"54321\",\n [\"1234\", \"4321\"],\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_LIST,\n )\n\n # Test Regex\n assert True == m(\n \"123\",\n re.compile(\"^(12|34)\"),\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_REGEX,\n )\n assert True == m(\n \"345\",\n re.compile(\"^(12|34)\"),\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_REGEX,\n )\n assert False == m(\n \"abc\",\n re.compile(\"^bc\"),\n AuthenticationProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_REGEX,\n )\n\n def test_enforce_library_identifier_restriction(self):\n \"\"\"Test the enforce_library_identifier_restriction method.\"\"\"\n provider = self.mock_basic()\n m = provider.enforce_library_identifier_restriction\n patron = self._patron()\n patrondata = PatronData()\n\n # Test with patron rather than patrondata as argument\n assert patron == m(object(), patron)\n patron.library_id = -1\n assert False == m(object(), patron)\n\n # Test no restriction\n provider.library_identifier_restriction_type = (\n MockBasic.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_NONE\n )\n provider.library_identifier_restriction = \"2345\"\n provider.library_identifier_field = (\n MockBasic.LIBRARY_IDENTIFIER_RESTRICTION_BARCODE\n )\n assert patrondata == m(\"12365\", patrondata)\n\n # Test regex against barcode\n provider.library_identifier_restriction_type = (\n MockBasic.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_REGEX\n )\n provider.library_identifier_restriction = re.compile(\"23[46]5\")\n provider.library_identifier_field = (\n MockBasic.LIBRARY_IDENTIFIER_RESTRICTION_BARCODE\n )\n assert patrondata == m(\"23456\", patrondata)\n assert patrondata == m(\"2365\", patrondata)\n assert False == m(\"2375\", provider.patrondata)\n\n # Test prefix against barcode\n provider.library_identifier_restriction_type = (\n MockBasic.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_PREFIX\n )\n provider.library_identifier_restriction = \"2345\"\n provider.library_identifier_field = (\n MockBasic.LIBRARY_IDENTIFIER_RESTRICTION_BARCODE\n )\n assert patrondata == m(\"23456\", patrondata)\n assert False == m(\"123456\", patrondata)\n\n # Test string against barcode\n provider.library_identifier_restriction_type = (\n MockBasic.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_STRING\n )\n provider.library_identifier_restriction = \"2345\"\n provider.library_identifier_field = (\n MockBasic.LIBRARY_IDENTIFIER_RESTRICTION_BARCODE\n )\n assert False == m(\"123456\", patrondata)\n assert patrondata == m(\"2345\", patrondata)\n\n # Test match applied to field on patrondata not barcode\n provider.library_identifier_restriction_type = (\n MockBasic.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_STRING\n )\n provider.library_identifier_restriction = \"2345\"\n provider.library_identifier_field = \"agent\"\n patrondata.library_identifier = \"2345\"\n assert patrondata == m(\"123456\", patrondata)\n patrondata.library_identifier = \"12345\"\n assert False == m(\"2345\", patrondata)\n\n def test_patron_identifier_restriction(self):\n library = self._default_library\n integration = self._external_integration(self._str)\n\n class MockProvider(AuthenticationProvider):\n NAME = \"Just a mock\"\n\n string_setting = ConfigurationSetting.for_library_and_externalintegration(\n self._db, MockProvider.LIBRARY_IDENTIFIER_RESTRICTION, library, integration\n )\n\n type_setting = ConfigurationSetting.for_library_and_externalintegration(\n self._db,\n MockProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE,\n library,\n integration,\n )\n\n # If the type is regex its converted into a regular expression.\n type_setting.value = MockProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_REGEX\n string_setting.value = \"^abcd\"\n provider = MockProvider(library, integration)\n assert \"^abcd\" == provider.library_identifier_restriction.pattern\n\n # If its type is list, make sure its converted into a list\n type_setting.value = MockProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_LIST\n string_setting.value = \"a,b,c\"\n provider = MockProvider(library, integration)\n assert [\"a\", \"b\", \"c\"] == provider.library_identifier_restriction\n\n # If its type is prefix make sure its a string\n type_setting.value = MockProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_PREFIX\n string_setting.value = \"abc\"\n provider = MockProvider(library, integration)\n assert \"abc\" == provider.library_identifier_restriction\n\n # If its type is string make sure its a string\n type_setting.value = MockProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_STRING\n string_setting.value = \"abc\"\n provider = MockProvider(library, integration)\n assert \"abc\" == provider.library_identifier_restriction\n\n # If its type is none make sure its actually None\n type_setting.value = MockProvider.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_NONE\n string_setting.value = \"abc\"\n provider = MockProvider(library, integration)\n assert None == provider.library_identifier_restriction\n\n\nclass TestBasicAuthenticationProvider(AuthenticatorTest):\n def test_constructor(self):\n\n b = BasicAuthenticationProvider\n\n class ConfigAuthenticationProvider(b):\n NAME = \"Config loading test\"\n\n integration = self._external_integration(\n self._str, goal=ExternalIntegration.PATRON_AUTH_GOAL\n )\n self._default_library.integrations.append(integration)\n integration.setting(b.IDENTIFIER_REGULAR_EXPRESSION).value = \"idre\"\n integration.setting(b.PASSWORD_REGULAR_EXPRESSION).value = \"<PASSWORD>\"\n integration.setting(b.TEST_IDENTIFIER).value = \"username\"\n integration.setting(b.TEST_PASSWORD).value = \"pw\"\n\n provider = ConfigAuthenticationProvider(self._default_library, integration)\n assert \"idre\" == provider.identifier_re.pattern\n assert \"pwre\" == provider.password_re.pattern\n assert \"username\" == provider.test_username\n assert \"pw\" == provider.test_password\n\n # Test the defaults.\n integration = self._external_integration(\n self._str, goal=ExternalIntegration.PATRON_AUTH_GOAL\n )\n provider = ConfigAuthenticationProvider(self._default_library, integration)\n assert b.DEFAULT_IDENTIFIER_REGULAR_EXPRESSION == provider.identifier_re\n assert None == provider.password_re\n\n def test_testing_patron(self):\n class MockAuthenticatedPatron(MockBasicAuthenticationProvider):\n def __init__(self, *args, **kwargs):\n self._authenticated_patron_returns = kwargs.pop(\n \"_authenticated_patron_returns\", None\n )\n super(MockAuthenticatedPatron, self).__init__(*args, **kwargs)\n\n def authenticated_patron(self, *args, **kwargs):\n return self._authenticated_patron_returns\n\n # You don't have to have a testing patron.\n integration = self._external_integration(self._str)\n no_testing_patron = BasicAuthenticationProvider(\n self._default_library, integration\n )\n assert (None, None) == no_testing_patron.testing_patron(self._db)\n\n # But if you don't, testing_patron_or_bust() will raise an\n # exception.\n with pytest.raises(CannotLoadConfiguration) as excinfo:\n no_testing_patron.testing_patron_or_bust(self._db)\n assert \"No test patron identifier is configured\" in str(excinfo.value)\n\n # We configure a testing patron but their username and\n # password don't actually authenticate anyone. We don't crash,\n # but we can't look up the testing patron either.\n b = BasicAuthenticationProvider\n integration = self._external_integration(self._str)\n integration.setting(b.TEST_IDENTIFIER).value = \"1\"\n integration.setting(b.TEST_PASSWORD).value = \"2\"\n missing_patron = MockBasicAuthenticationProvider(\n self._default_library, integration, patron=None\n )\n value = missing_patron.testing_patron(self._db)\n assert (None, \"2\") == value\n\n # And testing_patron_or_bust() still doesn't work.\n with pytest.raises(IntegrationException) as excinfo:\n missing_patron.testing_patron_or_bust(self._db)\n assert \"Remote declined to authenticate the test patron.\" in str(excinfo.value)\n\n # We configure a testing patron but authenticating them\n # results in a problem detail document.\n b = BasicAuthenticationProvider\n patron = self._patron()\n integration = self._external_integration(self._str)\n integration.setting(b.TEST_IDENTIFIER).value = \"1\"\n integration.setting(b.TEST_PASSWORD).value = \"2\"\n problem_patron = MockAuthenticatedPatron(\n self._default_library,\n integration,\n patron=patron,\n _authenticated_patron_returns=PATRON_OF_ANOTHER_LIBRARY,\n )\n value = problem_patron.testing_patron(self._db)\n assert patron != PATRON_OF_ANOTHER_LIBRARY\n assert (PATRON_OF_ANOTHER_LIBRARY, \"2\") == value\n\n # And testing_patron_or_bust() still doesn't work.\n with pytest.raises(IntegrationException) as excinfo:\n problem_patron.testing_patron_or_bust(self._db)\n assert \"Test patron lookup returned a problem detail\" in str(excinfo.value)\n\n # We configure a testing patron but authenticating them\n # results in something (non None) that's not a Patron\n # or a problem detail document.\n not_a_patron = \"<not a patron>\"\n b = BasicAuthenticationProvider\n patron = self._patron()\n integration = self._external_integration(self._str)\n integration.setting(b.TEST_IDENTIFIER).value = \"1\"\n integration.setting(b.TEST_PASSWORD).value = \"2\"\n problem_patron = MockAuthenticatedPatron(\n self._default_library,\n integration,\n patron=patron,\n _authenticated_patron_returns=not_a_patron,\n )\n value = problem_patron.testing_patron(self._db)\n assert patron != not_a_patron\n assert (not_a_patron, \"2\") == value\n\n # And testing_patron_or_bust() still doesn't work.\n with pytest.raises(IntegrationException) as excinfo:\n problem_patron.testing_patron_or_bust(self._db)\n assert \"Test patron lookup returned invalid value for patron\" in str(\n excinfo.value\n )\n\n # Here, we configure a testing patron who is authenticated by\n # their username and password.\n patron = self._patron()\n present_patron = MockBasicAuthenticationProvider(\n self._default_library, integration, patron=patron\n )\n value = present_patron.testing_patron(self._db)\n assert (patron, \"2\") == value\n\n # Finally, testing_patron_or_bust works, returning the same\n # value as testing_patron()\n assert value == present_patron.testing_patron_or_bust(self._db)\n\n def test__run_self_tests(self):\n _db = object()\n\n class CantAuthenticateTestPatron(BasicAuthenticationProvider):\n def __init__(self):\n pass\n\n def testing_patron_or_bust(self, _db):\n self.called_with = _db\n raise Exception(\"Nope\")\n\n # If we can't authenticate a test patron, the rest of the tests\n # aren't even run.\n provider = CantAuthenticateTestPatron()\n [result] = list(provider._run_self_tests(_db))\n assert _db == provider.called_with\n assert False == result.success\n assert \"Nope\" == result.exception.args[0]\n\n # If we can authenticate a test patron, the patron and their\n # password are passed into the next test.\n\n class Mock(BasicAuthenticationProvider):\n def __init__(self, patron, password):\n self.patron = patron\n self.password = password\n\n def testing_patron_or_bust(self, _db):\n return self.patron, self.password\n\n def update_patron_metadata(self, patron):\n # The patron obtained from testing_patron_or_bust\n # is passed into update_patron_metadata.\n assert patron == self.patron\n return \"some metadata\"\n\n provider = Mock(\"patron\", \"password\")\n [get_patron, update_metadata] = provider._run_self_tests(object())\n assert \"Authenticating test patron\" == get_patron.name\n assert True == get_patron.success\n assert (provider.patron, provider.password) == get_patron.result\n\n assert \"Syncing patron metadata\" == update_metadata.name\n assert True == update_metadata.success\n assert \"some metadata\" == update_metadata.result\n\n def test_client_configuration(self):\n \"\"\"Test that client-side configuration settings are retrieved from\n ConfigurationSetting objects.\n \"\"\"\n b = BasicAuthenticationProvider\n integration = self._external_integration(self._str)\n integration.setting(b.IDENTIFIER_KEYBOARD).value = b.EMAIL_ADDRESS_KEYBOARD\n integration.setting(b.PASSWORD_KEYBOARD).value = b.NUMBER_PAD\n integration.setting(b.IDENTIFIER_LABEL).value = \"Your Library Card\"\n integration.setting(b.PASSWORD_LABEL).value = \"Password\"\n integration.setting(b.IDENTIFIER_BARCODE_FORMAT).value = \"some barcode\"\n\n provider = b(self._default_library, integration)\n\n assert b.EMAIL_ADDRESS_KEYBOARD == provider.identifier_keyboard\n assert b.NUMBER_PAD == provider.password_keyboard\n assert \"Your Library Card\" == provider.identifier_label\n assert \"Password\" == provider.password_label\n assert \"some barcode\" == provider.identifier_barcode_format\n\n def test_server_side_validation(self):\n b = BasicAuthenticationProvider\n integration = self._external_integration(self._str)\n integration.setting(b.IDENTIFIER_REGULAR_EXPRESSION).value = \"foo\"\n integration.setting(b.PASSWORD_REGULAR_EXPRESSION).value = \"bar\"\n\n provider = b(self._default_library, integration)\n\n assert True == provider.server_side_validation(\"food\", \"barbecue\")\n assert False == provider.server_side_validation(\"food\", \"arbecue\")\n assert False == provider.server_side_validation(\"ood\", \"barbecue\")\n assert False == provider.server_side_validation(None, None)\n\n # If this authenticator does not look at provided passwords,\n # then the only values that will pass validation are null\n # and the empty string.\n provider.password_keyboard = provider.NULL_KEYBOARD\n assert False == provider.server_side_validation(\"food\", \"barbecue\")\n assert False == provider.server_side_validation(\"food\", \"is good\")\n assert False == provider.server_side_validation(\"food\", \" \")\n assert True == provider.server_side_validation(\"food\", None)\n assert True == provider.server_side_validation(\"food\", \"\")\n provider.password_keyboard = provider.DEFAULT_KEYBOARD\n\n # It's okay not to provide anything for server side validation.\n # The default settings will be used.\n integration.setting(b.IDENTIFIER_REGULAR_EXPRESSION).value = None\n integration.setting(b.PASSWORD_REGULAR_EXPRESSION).value = None\n provider = b(self._default_library, integration)\n assert (\n b.DEFAULT_IDENTIFIER_REGULAR_EXPRESSION.pattern\n == provider.identifier_re.pattern\n )\n assert None == provider.password_re\n assert True == provider.server_side_validation(\"food\", \"barbecue\")\n assert True == provider.server_side_validation(\"a\", None)\n assert False == provider.server_side_validation(\"!@#$\", None)\n\n # Test maximum length of identifier and password.\n integration.setting(b.IDENTIFIER_MAXIMUM_LENGTH).value = \"5\"\n integration.setting(b.PASSWORD_MAXIMUM_LENGTH).value = \"10\"\n provider = b(self._default_library, integration)\n\n assert True == provider.server_side_validation(\"a\", \"1234\")\n assert False == provider.server_side_validation(\"a\", \"123456789012345\")\n assert False == provider.server_side_validation(\"abcdefghijklmnop\", \"1234\")\n\n # You can disable the password check altogether by setting maximum\n # length to zero.\n integration.setting(b.PASSWORD_MAXIMUM_LENGTH).value = \"0\"\n provider = b(self._default_library, integration)\n assert True == provider.server_side_validation(\"a\", None)\n\n def test_local_patron_lookup(self):\n # This patron of another library looks just like the patron\n # we're about to create, but will never be selected.\n other_library = self._library()\n other_library_patron = self._patron(\"patron1_ext_id\", library=other_library)\n other_library_patron.authorization_identifier = \"patron1_auth_id\"\n other_library_patron.username = \"patron1\"\n\n patron1 = self._patron(\"patron1_ext_id\")\n patron1.authorization_identifier = \"patron1_auth_id\"\n patron1.username = \"patron1\"\n\n patron2 = self._patron(\"patron2_ext_id\")\n patron2.authorization_identifier = \"patron2_auth_id\"\n patron2.username = \"patron2\"\n self._db.commit()\n\n provider = self.mock_basic()\n\n # If we provide PatronData associated with patron1, we look up\n # patron1, even though we provided the username associated\n # with patron2.\n for patrondata_args in [\n dict(permanent_id=patron1.external_identifier),\n dict(authorization_identifier=patron1.authorization_identifier),\n dict(username=patron1.username),\n dict(\n permanent_id=PatronData.NO_VALUE,\n username=PatronData.NO_VALUE,\n authorization_identifier=patron1.authorization_identifier,\n ),\n ]:\n patrondata = PatronData(**patrondata_args)\n assert patron1 == provider.local_patron_lookup(\n self._db, patron2.authorization_identifier, patrondata\n )\n\n # If no PatronData is provided, we can look up patron1 either\n # by authorization identifier or username, but not by\n # permanent identifier.\n assert patron1 == provider.local_patron_lookup(\n self._db, patron1.authorization_identifier, None\n )\n assert patron1 == provider.local_patron_lookup(self._db, patron1.username, None)\n assert None == provider.local_patron_lookup(\n self._db, patron1.external_identifier, None\n )\n\n def test_get_credential_from_header(self):\n provider = self.mock_basic()\n assert None == provider.get_credential_from_header(\"Bearer [some token]\")\n assert None == provider.get_credential_from_header(dict())\n assert \"foo\" == provider.get_credential_from_header(dict(password=\"<PASSWORD>\"))\n\n def test_authentication_flow_document(self):\n \"\"\"Test the default authentication provider document.\"\"\"\n provider = self.mock_basic()\n provider.identifier_maximum_length = 22\n provider.password_maximum_length = 7\n provider.identifier_barcode_format = provider.BARCODE_FORMAT_CODABAR\n\n # We're about to call url_for, so we must create an\n # application context.\n os.environ[\"AUTOINITIALIZE\"] = \"False\"\n from api.app import app\n\n self.app = app\n del os.environ[\"AUTOINITIALIZE\"]\n with self.app.test_request_context(\"/\"):\n doc = provider.authentication_flow_document(self._db)\n assert _(provider.DISPLAY_NAME) == doc[\"description\"]\n assert provider.FLOW_TYPE == doc[\"type\"]\n\n labels = doc[\"labels\"]\n assert provider.identifier_label == labels[\"login\"]\n assert provider.password_label == labels[\"password\"]\n\n inputs = doc[\"inputs\"]\n assert provider.identifier_keyboard == inputs[\"login\"][\"keyboard\"]\n assert provider.password_keyboard == inputs[\"password\"][\"keyboard\"]\n\n assert provider.BARCODE_FORMAT_CODABAR == inputs[\"login\"][\"barcode_format\"]\n\n assert (\n provider.identifier_maximum_length == inputs[\"login\"][\"maximum_length\"]\n )\n assert (\n provider.password_maximum_length == inputs[\"password\"][\"maximum_length\"]\n )\n\n [logo_link] = doc[\"links\"]\n assert \"logo\" == logo_link[\"rel\"]\n assert (\n \"http://localhost/images/\" + MockBasic.LOGIN_BUTTON_IMAGE\n == logo_link[\"href\"]\n )\n\n def test_remote_patron_lookup(self):\n # remote_patron_lookup does the lookup by calling _remote_patron_lookup,\n # then calls enforce_library_identifier_restriction to make sure that the patron\n # is associated with the correct library\n\n class Mock(BasicAuthenticationProvider):\n def _remote_patron_lookup(self, patron_or_patrondata):\n self._remote_patron_lookup_called_with = patron_or_patrondata\n return patron_or_patrondata\n\n def enforce_library_identifier_restriction(self, identifier, patrondata):\n self.enforce_library_identifier_restriction_called_with = (\n identifier,\n patrondata,\n )\n return \"Result\"\n\n integration = self._external_integration(\n self._str, ExternalIntegration.PATRON_AUTH_GOAL\n )\n provider = Mock(self._default_library, integration)\n patron = self._patron()\n assert \"Result\" == provider.remote_patron_lookup(patron)\n assert provider._remote_patron_lookup_called_with == patron\n assert provider.enforce_library_identifier_restriction_called_with == (\n patron.authorization_identifier,\n patron,\n )\n\n def test_scrub_credential(self):\n # Verify that the scrub_credential helper method strips extra whitespace\n # and nothing else.\n integration = self._external_integration(\n self._str, ExternalIntegration.PATRON_AUTH_GOAL\n )\n provider = BasicAuthenticationProvider(self._default_library, integration)\n m = provider.scrub_credential\n\n assert None == provider.scrub_credential(None)\n assert 1 == provider.scrub_credential(1)\n o = object()\n assert o == provider.scrub_credential(o)\n assert \"user\" == provider.scrub_credential(\"user\")\n assert \"user\" == provider.scrub_credential(\" user\")\n assert \"user\" == provider.scrub_credential(\" user \")\n assert \"user\" == provider.scrub_credential(\" \\ruser\\t \")\n assert b\"user\" == provider.scrub_credential(b\" user \")\n\n\nclass TestBasicAuthenticationProviderAuthenticate(AuthenticatorTest):\n \"\"\"Test the complex BasicAuthenticationProvider.authenticate method.\"\"\"\n\n # A dummy set of credentials, for use when the exact details of\n # the credentials passed in are not important.\n credentials = dict(username=\"user\", password=\"<PASSWORD>\")\n\n def test_success(self):\n patron = self._patron()\n patrondata = PatronData(permanent_id=patron.external_identifier)\n provider = self.mock_basic(patrondata=patrondata)\n\n # authenticate() calls remote_authenticate(), which returns the\n # queued up PatronData object. The corresponding Patron is then\n # looked up in the database.\n\n # BasicAuthenticationProvider scrubs leading and trailing spaces from\n # the credentials.\n credentials_with_spaces = dict(username=\" user \", password=\" <PASSWORD> \")\n for creds in (self.credentials, credentials_with_spaces):\n assert patron == provider.authenticate(self._db, self.credentials)\n\n # All the different ways the database lookup might go are covered in\n # test_local_patron_lookup. This test only covers the case where\n # the server sends back the permanent ID of the patron.\n\n def _inactive_patron(self):\n \"\"\"Simulate a patron who has not logged in for a really long time.\n\n :return: A 2-tuple (Patron, PatronData). The Patron contains\n 'out-of-date' data and the PatronData containing 'up-to-date'\n data.\n \"\"\"\n now = utc_now()\n long_ago = now - datetime.timedelta(hours=10000)\n patron = self._patron()\n patron.last_external_sync = long_ago\n\n # All of their authorization information has changed in the\n # meantime, but -- crucially -- their permanent ID has not.\n patron.authorization_identifier = \"old auth id\"\n patron.username = \"old username\"\n\n # Here is the up-to-date information about this patron,\n # as found in the 'ILS'.\n patrondata = PatronData(\n permanent_id=patron.external_identifier,\n username=\"new username\",\n authorization_identifier=\"new authorization identifier\",\n complete=True,\n )\n\n return patron, patrondata\n\n def test_success_but_local_patron_needs_sync(self):\n # This patron has not logged on in a really long time.\n patron, complete_patrondata = self._inactive_patron()\n\n # The 'ILS' will respond to an authentication request with a minimal\n # set of information.\n #\n # It will respond to a patron lookup request with more detailed\n # information.\n minimal_patrondata = PatronData(\n permanent_id=patron.external_identifier, complete=False\n )\n provider = self.mock_basic(\n patrondata=minimal_patrondata,\n remote_patron_lookup_patrondata=complete_patrondata,\n )\n\n # The patron can be authenticated.\n assert patron == provider.authenticate(self._db, self.credentials)\n\n # The Authenticator noticed that the patron's account was out\n # of sync, and since the authentication response did not\n # provide a complete set of patron information, the\n # Authenticator performed a more detailed lookup to make sure\n # that the patron's details were correct going forward.\n assert \"new username\" == patron.username\n assert \"new authorization identifier\" == patron.authorization_identifier\n assert (utc_now() - patron.last_external_sync).total_seconds() < 10\n\n def test_success_with_immediate_patron_sync(self):\n # This patron has not logged on in a really long time.\n patron, complete_patrondata = self._inactive_patron()\n\n # The 'ILS' will respond to an authentication request with a complete\n # set of information. If a remote patron lookup were to happen,\n # it would explode.\n provider = self.mock_basic(\n patrondata=complete_patrondata, remote_patron_lookup_patrondata=object()\n )\n\n # The patron can be authenticated.\n assert patron == provider.authenticate(self._db, self.credentials)\n\n # Since the authentication response provided a complete\n # overview of the patron, the Authenticator was able to sync\n # the account immediately, without doing a separate remote\n # patron lookup.\n assert \"new username\" == patron.username\n assert \"new authorization identifier\" == patron.authorization_identifier\n assert (utc_now() - patron.last_external_sync).total_seconds() < 10\n\n def test_failure_when_remote_authentication_returns_problemdetail(self):\n patron = self._patron()\n patrondata = PatronData(permanent_id=patron.external_identifier)\n provider = self.mock_basic(patrondata=UNSUPPORTED_AUTHENTICATION_MECHANISM)\n assert UNSUPPORTED_AUTHENTICATION_MECHANISM == provider.authenticate(\n self._db, self.credentials\n )\n\n def test_failure_when_remote_authentication_returns_none(self):\n patron = self._patron()\n patrondata = PatronData(permanent_id=patron.external_identifier)\n provider = self.mock_basic(patrondata=None)\n assert None == provider.authenticate(self._db, self.credentials)\n\n def test_server_side_validation_runs(self):\n patron = self._patron()\n patrondata = PatronData(permanent_id=patron.external_identifier)\n\n b = MockBasic\n integration = self._external_integration(self._str)\n integration.setting(b.IDENTIFIER_REGULAR_EXPRESSION).value = \"foo\"\n integration.setting(b.PASSWORD_REGULAR_EXPRESSION).value = \"bar\"\n provider = b(self._default_library, integration, patrondata=patrondata)\n\n # This would succeed, but we don't get to remote_authenticate()\n # because we fail the regex test.\n assert None == provider.authenticate(self._db, self.credentials)\n\n # This succeeds because we pass the regex test.\n assert patron == provider.authenticate(\n self._db, dict(username=\"food\", password=\"<PASSWORD>\")\n )\n\n def test_authentication_succeeds_but_patronlookup_fails(self):\n \"\"\"This case should never happen--it indicates a malfunctioning\n authentication provider. But we handle it.\n \"\"\"\n patrondata = PatronData(permanent_id=self._str)\n provider = self.mock_basic(patrondata=patrondata)\n\n # When we call remote_authenticate(), we get patrondata, but\n # there is no corresponding local patron, so we call\n # remote_patron_lookup() for details, and we get nothing. At\n # this point we give up -- there is no authenticated patron.\n assert None == provider.authenticate(self._db, self.credentials)\n\n def test_authentication_creates_missing_patron(self):\n # The authentication provider knows about this patron,\n # but this is the first we've heard about them.\n patrondata = PatronData(\n permanent_id=self._str,\n authorization_identifier=self._str,\n fines=Money(1, \"USD\"),\n )\n\n library = self._library()\n integration = self._external_integration(\n self._str, ExternalIntegration.PATRON_AUTH_GOAL\n )\n provider = MockBasic(\n library,\n integration,\n patrondata=patrondata,\n remote_patron_lookup_patrondata=patrondata,\n )\n patron = provider.authenticate(self._db, self.credentials)\n\n # A server side Patron was created from the PatronData.\n assert isinstance(patron, Patron)\n assert library == patron.library\n assert patrondata.permanent_id == patron.external_identifier\n assert patrondata.authorization_identifier == patron.authorization_identifier\n\n # Information not relevant to the patron's identity was stored\n # in the Patron object after it was created.\n assert 1 == patron.fines\n\n def test_authentication_updates_outdated_patron_on_permanent_id_match(self):\n # A patron's permanent ID won't change.\n permanent_id = self._str\n\n # But this patron has not used the circulation manager in a\n # long time, and their other identifiers are out of date.\n old_identifier = \"1234\"\n old_username = \"user1\"\n patron = self._patron(old_identifier)\n patron.external_identifier = permanent_id\n patron.username = old_username\n\n # The authorization provider has all the new information about\n # this patron.\n new_identifier = \"5678\"\n new_username = \"user2\"\n patrondata = PatronData(\n permanent_id=permanent_id,\n authorization_identifier=new_identifier,\n username=new_username,\n )\n\n provider = self.mock_basic(patrondata=patrondata)\n provider.external_type_regular_expression = re.compile(\"^(.)\")\n patron2 = provider.authenticate(self._db, self.credentials)\n\n # We were able to match our local patron to the patron held by the\n # authorization provider.\n assert patron2 == patron\n\n # And we updated our local copy of the patron to reflect their\n # new identifiers.\n assert new_identifier == patron.authorization_identifier\n assert new_username == patron.username\n assert patron.authorization_identifier[0] == patron.external_type\n\n def test_authentication_updates_outdated_patron_on_username_match(self):\n # This patron has no permanent ID. Their library card number has\n # changed but their username has not.\n old_identifier = \"1234\"\n new_identifier = \"5678\"\n username = \"user1\"\n patron = self._patron(old_identifier)\n patron.external_identifier = None\n patron.username = username\n\n # The authorization provider has all the new information about\n # this patron.\n patrondata = PatronData(\n authorization_identifier=new_identifier,\n username=username,\n )\n\n provider = self.mock_basic(patrondata=patrondata)\n patron2 = provider.authenticate(self._db, self.credentials)\n\n # We were able to match our local patron to the patron held by the\n # authorization provider, based on the username match.\n assert patron2 == patron\n\n # And we updated our local copy of the patron to reflect their\n # new identifiers.\n assert new_identifier == patron.authorization_identifier\n\n def test_authentication_updates_outdated_patron_on_authorization_identifier_match(\n self,\n ):\n # This patron has no permanent ID. Their username has\n # changed but their library card number has not.\n identifier = \"1234\"\n old_username = \"user1\"\n new_username = \"user2\"\n patron = self._patron()\n patron.external_identifier = None\n patron.authorization_identifier = identifier\n patron.username = old_username\n\n # The authorization provider has all the new information about\n # this patron.\n patrondata = PatronData(\n authorization_identifier=identifier,\n username=new_username,\n )\n\n provider = self.mock_basic(patrondata=patrondata)\n patron2 = provider.authenticate(self._db, self.credentials)\n\n # We were able to match our local patron to the patron held by the\n # authorization provider, based on the username match.\n assert patron2 == patron\n\n # And we updated our local copy of the patron to reflect their\n # new identifiers.\n assert new_username == patron.username\n\n # Notice what's missing: If a patron has no permanent identifier,\n # _and_ their username and authorization identifier both change,\n # then we have no way of locating them in our database. They will\n # appear no different to us than a patron who has never used the\n # circulation manager before.\n\n\nclass TestOAuthAuthenticationProvider(AuthenticatorTest):\n def test_from_config(self):\n class ConfigAuthenticationProvider(OAuthAuthenticationProvider):\n NAME = \"Config loading test\"\n\n integration = self._external_integration(\n self._str, goal=ExternalIntegration.PATRON_AUTH_GOAL\n )\n integration.username = \"client_id\"\n integration.password = \"<PASSWORD>\"\n integration.setting(\n ConfigAuthenticationProvider.OAUTH_TOKEN_EXPIRATION_DAYS\n ).value = 20\n provider = ConfigAuthenticationProvider(self._default_library, integration)\n assert \"client_id\" == provider.client_id\n assert \"client_secret\" == provider.client_secret\n assert 20 == provider.token_expiration_days\n\n def test_get_credential_from_header(self):\n \"\"\"There is no way to get a credential from a bearer token that can\n be passed on to a content provider like Overdrive.\n \"\"\"\n provider = MockOAuth(self._default_library)\n assert None == provider.get_credential_from_header(\"Bearer abcd\")\n\n def test_create_token(self):\n patron = self._patron()\n provider = MockOAuth(self._default_library)\n in_twenty_days = utc_now() + datetime.timedelta(\n days=provider.token_expiration_days\n )\n data_source = provider.token_data_source(self._db)\n token, is_new = provider.create_token(self._db, patron, \"some token\")\n assert True == is_new\n assert patron == token.patron\n assert \"some token\" == token.credential\n\n # The token expires in twenty days.\n almost_no_time = abs(token.expires - in_twenty_days)\n assert almost_no_time.seconds < 2\n\n def test_authenticated_patron_success(self):\n patron = self._patron()\n provider = MockOAuth(self._default_library)\n data_source = provider.token_data_source(self._db)\n\n # Until we call create_token, this won't work.\n assert None == provider.authenticated_patron(self._db, \"some token\")\n\n token, is_new = provider.create_token(self._db, patron, \"some token\")\n assert True == is_new\n assert patron == token.patron\n\n # Now it works.\n assert patron == provider.authenticated_patron(self._db, \"some token\")\n\n def test_oauth_callback(self):\n\n mock_patrondata = PatronData(\n authorization_identifier=\"1234\", username=\"user\", personal_name=\"The User\"\n )\n\n class CallbackImplementation(MockOAuth):\n def remote_exchange_code_for_access_token(self, _db, access_code):\n self.used_code = access_code\n return \"a token\"\n\n def remote_patron_lookup(self, bearer_token):\n return mock_patrondata\n\n integration = CallbackImplementation._mock_integration(self._db, \"Mock OAuth\")\n\n ConfigurationSetting.for_library_and_externalintegration(\n self._db,\n CallbackImplementation.LIBRARY_IDENTIFIER_RESTRICTION,\n self._default_library,\n integration,\n ).value = \"123\"\n ConfigurationSetting.for_library_and_externalintegration(\n self._db,\n CallbackImplementation.LIBRARY_IDENTIFIER_RESTRICTION_TYPE,\n self._default_library,\n integration,\n ).value = CallbackImplementation.LIBRARY_IDENTIFIER_RESTRICTION_TYPE_PREFIX\n ConfigurationSetting.for_library_and_externalintegration(\n self._db,\n CallbackImplementation.LIBRARY_IDENTIFIER_FIELD,\n self._default_library,\n integration,\n ).value = CallbackImplementation.LIBRARY_IDENTIFIER_RESTRICTION_BARCODE\n\n oauth = CallbackImplementation(self._default_library, integration=integration)\n credential, patron, patrondata = oauth.oauth_callback(self._db, \"a code\")\n\n # remote_exchange_code_for_access_token was called with the\n # access code.\n assert \"a code\" == oauth.used_code\n\n # The bearer token became a Credential object.\n assert isinstance(credential, Credential)\n assert \"a token\" == credential.credential\n\n # Information that could go into the Patron record did.\n assert isinstance(patron, Patron)\n assert \"1234\" == patron.authorization_identifier\n assert \"user\" == patron.username\n\n # The PatronData returned from remote_patron_lookup\n # has been passed along.\n assert mock_patrondata == patrondata\n assert \"The User\" == patrondata.personal_name\n\n # A patron whose identifier doesn't match the patron\n # identifier restriction is treated as a patron of a different\n # library.\n mock_patrondata.set_authorization_identifier(\"abcd\")\n assert PATRON_OF_ANOTHER_LIBRARY == oauth.oauth_callback(self._db, \"a code\")\n\n def test_authentication_flow_document(self):\n # We're about to call url_for, so we must create an\n # application context.\n os.environ[\"AUTOINITIALIZE\"] = \"False\"\n from api.app import app\n\n self.app = app\n del os.environ[\"AUTOINITIALIZE\"]\n provider = MockOAuth(self._default_library)\n with self.app.test_request_context(\"/\"):\n doc = provider.authentication_flow_document(self._db)\n assert provider.FLOW_TYPE == doc[\"type\"]\n assert provider.NAME == doc[\"description\"]\n\n # To authenticate with this provider, you must follow the\n # 'authenticate' link.\n [auth_link] = [x for x in doc[\"links\"] if x[\"rel\"] == \"authenticate\"]\n assert auth_link[\"href\"] == provider._internal_authenticate_url(self._db)\n\n [logo_link] = [x for x in doc[\"links\"] if x[\"rel\"] == \"logo\"]\n assert (\n \"http://localhost/images/\" + MockOAuth.LOGIN_BUTTON_IMAGE\n == logo_link[\"href\"]\n )\n\n def test_token_data_source_can_create_new_data_source(self):\n class OAuthWithUnusualDataSource(MockOAuth):\n TOKEN_DATA_SOURCE_NAME = \"Unusual data source\"\n\n oauth = OAuthWithUnusualDataSource(self._default_library)\n source, is_new = oauth.token_data_source(self._db)\n assert True == is_new\n assert oauth.TOKEN_DATA_SOURCE_NAME == source.name\n\n source, is_new = oauth.token_data_source(self._db)\n assert False == is_new\n assert oauth.TOKEN_DATA_SOURCE_NAME == source.name\n\n def test_external_authenticate_url_parameters(self):\n \"\"\"Verify that external_authenticate_url_parameters generates\n realistic results when run in a real application.\n \"\"\"\n # We're about to call url_for, so we must create an\n # application context.\n my_api = MockOAuth(self._default_library)\n my_api.client_id = \"clientid\"\n os.environ[\"AUTOINITIALIZE\"] = \"False\"\n from api.app import app\n\n del os.environ[\"AUTOINITIALIZE\"]\n\n with app.test_request_context(\"/\"):\n params = my_api.external_authenticate_url_parameters(\"state\", self._db)\n assert \"state\" == params[\"state\"]\n assert \"clientid\" == params[\"client_id\"]\n expected_url = url_for(\n \"oauth_callback\",\n library_short_name=self._default_library.short_name,\n _external=True,\n )\n assert expected_url == params[\"oauth_callback_url\"]\n\n\nclass TestOAuthController(AuthenticatorTest):\n def setup_method(self):\n super(TestOAuthController, self).setup_method()\n\n class MockOAuthWithExternalAuthenticateURL(MockOAuth):\n def __init__(self, library, _db, external_authenticate_url, patron):\n super(MockOAuthWithExternalAuthenticateURL, self).__init__(\n library,\n )\n self.url = external_authenticate_url\n self.patron = patron\n self.token, ignore = self.create_token(_db, self.patron, \"a token\")\n self.patrondata = PatronData(personal_name=\"Abcd\")\n\n def external_authenticate_url(self, state, _db):\n return self.url + \"?state=\" + state\n\n def oauth_callback(self, _db, params):\n return self.token, self.patron, self.patrondata\n\n patron = self._patron()\n self.basic = self.mock_basic()\n self.oauth1 = MockOAuthWithExternalAuthenticateURL(\n self._default_library, self._db, \"http://oauth1.com/\", patron\n )\n self.oauth1.NAME = \"Mock OAuth 1\"\n self.oauth2 = MockOAuthWithExternalAuthenticateURL(\n self._default_library, self._db, \"http://oauth2.org/\", patron\n )\n self.oauth2.NAME = \"Mock OAuth 2\"\n\n self.library_auth = LibraryAuthenticator(\n _db=self._db,\n library=self._default_library,\n basic_auth_provider=self.basic,\n oauth_providers=[self.oauth1, self.oauth2],\n bearer_token_signing_secret=\"a secret\",\n )\n\n self.auth = MockAuthenticator(\n self._default_library, {self._default_library.short_name: self.library_auth}\n )\n self.controller = OAuthController(self.auth)\n\n def test_oauth_authentication_redirect(self):\n # Test the controller method that sends patrons off to the OAuth\n # provider, where they're supposed to log in.\n\n params = dict(provider=self.oauth1.NAME)\n response = self.controller.oauth_authentication_redirect(params, self._db)\n assert 302 == response.status_code\n expected_state = dict(\n provider=self.oauth1.NAME,\n redirect_uri=\"\",\n )\n expected_state = urllib.parse.quote(json.dumps(expected_state))\n assert \"http://oauth1.com/?state=\" + expected_state == response.location\n\n params = dict(provider=self.oauth2.NAME, redirect_uri=\"http://foo.com/\")\n response = self.controller.oauth_authentication_redirect(params, self._db)\n assert 302 == response.status_code\n expected_state = urllib.parse.quote(json.dumps(params))\n assert \"http://oauth2.org/?state=\" + expected_state == response.location\n\n # If we don't recognize the OAuth provider you get sent to\n # the redirect URI with a fragment containing an encoded\n # problem detail document.\n params = dict(redirect_uri=\"http://foo.com/\", provider=\"not an oauth provider\")\n response = self.controller.oauth_authentication_redirect(params, self._db)\n assert 302 == response.status_code\n assert response.location.startswith(\"http://foo.com/#\")\n fragments = urllib.parse.parse_qs(\n urllib.parse.urlparse(response.location).fragment\n )\n error = json.loads(fragments.get(\"error\")[0])\n assert UNKNOWN_OAUTH_PROVIDER.uri == error.get(\"type\")\n\n def test_oauth_authentication_callback(self):\n \"\"\"Test the controller method that the OAuth provider is supposed\n to send patrons to once they log in on the remote side.\n \"\"\"\n\n # Successful callback through OAuth provider 1.\n params = dict(code=\"foo\", state=json.dumps(dict(provider=self.oauth1.NAME)))\n response = self.controller.oauth_authentication_callback(self._db, params)\n assert 302 == response.status_code\n fragments = urllib.parse.parse_qs(\n urllib.parse.urlparse(response.location).fragment\n )\n token = fragments.get(\"access_token\")[0]\n provider_name, provider_token = self.auth.decode_bearer_token(token)\n assert self.oauth1.NAME == provider_name\n assert self.oauth1.token.credential == provider_token\n\n # Successful callback through OAuth provider 2.\n params = dict(code=\"foo\", state=json.dumps(dict(provider=self.oauth2.NAME)))\n response = self.controller.oauth_authentication_callback(self._db, params)\n assert 302 == response.status_code\n fragments = urllib.parse.parse_qs(\n urllib.parse.urlparse(response.location).fragment\n )\n token = fragments.get(\"access_token\")[0]\n provider_name, provider_token = self.auth.decode_bearer_token(token)\n assert self.oauth2.NAME == provider_name\n assert self.oauth2.token.credential == provider_token\n\n # State is missing so we never get to check the code.\n params = dict(code=\"foo\")\n response = self.controller.oauth_authentication_callback(self._db, params)\n assert INVALID_OAUTH_CALLBACK_PARAMETERS == response\n\n # Code is missing so we never check the state.\n params = dict(state=json.dumps(dict(provider=self.oauth1.NAME)))\n response = self.controller.oauth_authentication_callback(self._db, params)\n assert INVALID_OAUTH_CALLBACK_PARAMETERS == response\n\n # In this example we're pretending to be coming in after\n # authenticating with an OAuth provider that doesn't exist.\n params = dict(\n code=\"foo\", state=json.dumps(dict(provider=(\"not_an_oauth_provider\")))\n )\n response = self.controller.oauth_authentication_callback(self._db, params)\n assert 302 == response.status_code\n fragments = urllib.parse.parse_qs(\n urllib.parse.urlparse(response.location).fragment\n )\n assert None == fragments.get(\"access_token\")\n error = json.loads(fragments.get(\"error\")[0])\n assert UNKNOWN_OAUTH_PROVIDER.uri == error.get(\"type\")\n\n def test_oauth_authentication_invalid_token(self):\n \"\"\"If an invalid bearer token is provided, an appropriate problem\n detail is returned.\n \"\"\"\n problem = self.library_auth.authenticated_patron(\n self._db, \"Bearer - this is a bad token\"\n )\n assert INVALID_OAUTH_BEARER_TOKEN == problem\n", "id": "7079913", "language": "Python", "matching_score": 7.347027778625488, "max_stars_count": 0, "path": "tests/api/test_authenticator.py" }, { "content": "import argparse\nimport base64\nimport datetime\nimport json\nimport logging\nimport sys\nimport uuid\n\nimport flask\nimport jwt\nfrom flask import Response\nfrom flask_babel import lazy_gettext as _\nfrom jwt.algorithms import HMACAlgorithm\nfrom sqlalchemy.orm.session import Session\n\nfrom api.base_controller import BaseCirculationManagerController\nfrom api.registration.constants import RegistrationConstants\nfrom core.app_server import url_for\nfrom core.model import (\n ConfigurationSetting,\n Credential,\n DataSource,\n DelegatedPatronIdentifier,\n ExternalIntegration,\n Library,\n Patron,\n create,\n get_one,\n)\nfrom core.scripts import Script\nfrom core.util.datetime_helpers import datetime_utc, utc_now\nfrom core.util.problem_detail import ProblemDetail\nfrom core.util.xmlparser import XMLParser\n\nfrom .config import CannotLoadConfiguration, Configuration\nfrom .problem_details import *\n\n\nclass AdobeVendorIDController(object):\n\n \"\"\"Flask controllers that implement the Account Service and\n Authorization Service portions of the Adobe Vendor ID protocol.\n \"\"\"\n\n def __init__(self, _db, library, vendor_id, node_value, authenticator):\n self._db = _db\n self.library = library\n self.request_handler = AdobeVendorIDRequestHandler(vendor_id)\n self.model = AdobeVendorIDModel(_db, library, authenticator, node_value)\n\n def create_authdata_handler(self, patron):\n \"\"\"Create an authdata token for the given patron.\n\n This controller method exists only for backwards compatibility\n with older client applications. Newer applications are\n expected to understand the DRM Extensions for OPDS.\n \"\"\"\n __transaction = self._db.begin_nested()\n credential = self.model.create_authdata(patron)\n __transaction.commit()\n return Response(credential.credential, 200, {\"Content-Type\": \"text/plain\"})\n\n def signin_handler(self):\n \"\"\"Process an incoming signInRequest document.\"\"\"\n __transaction = self._db.begin_nested()\n output = self.request_handler.handle_signin_request(\n flask.request.data, self.model.standard_lookup, self.model.authdata_lookup\n )\n __transaction.commit()\n return Response(output, 200, {\"Content-Type\": \"application/xml\"})\n\n def userinfo_handler(self):\n \"\"\"Process an incoming userInfoRequest document.\"\"\"\n output = self.request_handler.handle_accountinfo_request(\n flask.request.data, self.model.urn_to_label\n )\n return Response(output, 200, {\"Content-Type\": \"application/xml\"})\n\n def status_handler(self):\n return Response(\"UP\", 200, {\"Content-Type\": \"text/plain\"})\n\n\nclass DeviceManagementProtocolController(BaseCirculationManagerController):\n \"\"\"Implementation of the DRM Device ID Management Protocol.\n\n The code that does the actual work is in DeviceManagementRequestHandler.\n \"\"\"\n\n DEVICE_ID_LIST_MEDIA_TYPE = \"vnd.librarysimplified/drm-device-id-list\"\n PLAIN_TEXT_HEADERS = {\"Content-Type\": \"text/plain\"}\n\n @property\n def link_template_header(self):\n \"\"\"Generate the Link Template that explains how to deregister\n a specific DRM device ID.\n \"\"\"\n library = flask.request.library\n url = url_for(\n \"adobe_drm_device\",\n library_short_name=library.short_name,\n device_id=\"{id}\",\n _external=True,\n )\n # The curly brackets in {id} were escaped. Un-escape them to\n # get a Link Template.\n url = url.replace(\"%7Bid%7D\", \"{id}\")\n return {\"Link-Template\": '<%s>; rel=\"item\"' % url}\n\n def _request_handler(self, patron):\n \"\"\"Create a DeviceManagementRequestHandler for the appropriate\n Credential of the given Patron.\n\n :return: A DeviceManagementRequestHandler\n \"\"\"\n if not patron:\n return INVALID_CREDENTIALS.detailed(_(\"No authenticated patron\"))\n\n credential = AdobeVendorIDModel.get_or_create_patron_identifier_credential(\n patron\n )\n return DeviceManagementRequestHandler(credential)\n\n def device_id_list_handler(self):\n \"\"\"Manage the list of device IDs associated with an Adobe ID.\"\"\"\n handler = self._request_handler(flask.request.patron)\n if isinstance(handler, ProblemDetail):\n return handler\n\n device_ids = self.DEVICE_ID_LIST_MEDIA_TYPE\n if flask.request.method == \"GET\":\n # Serve a list of device IDs.\n output = handler.device_list()\n if isinstance(output, ProblemDetail):\n return output\n headers = self.link_template_header\n headers[\"Content-Type\"] = device_ids\n return Response(output, 200, headers)\n elif flask.request.method == \"POST\":\n # Add a device ID to the list.\n incoming_media_type = flask.request.headers.get(\"Content-Type\")\n if incoming_media_type != device_ids:\n return UNSUPPORTED_MEDIA_TYPE.detailed(\n _(\"Expected %(media_type)s document.\", media_type=device_ids)\n )\n output = handler.register_device(flask.request.get_data(as_text=True))\n if isinstance(output, ProblemDetail):\n return output\n return Response(output, 200, self.PLAIN_TEXT_HEADERS)\n return METHOD_NOT_ALLOWED.detailed(_(\"Only GET and POST are supported.\"))\n\n def device_id_handler(self, device_id):\n \"\"\"Manage one of the device IDs associated with an Adobe ID.\"\"\"\n handler = self._request_handler(getattr(flask.request, \"patron\", None))\n if isinstance(handler, ProblemDetail):\n return handler\n\n if flask.request.method != \"DELETE\":\n return METHOD_NOT_ALLOWED.detailed(_(\"Only DELETE is supported.\"))\n\n # Delete the specified device ID.\n output = handler.deregister_device(device_id)\n if isinstance(output, ProblemDetail):\n return output\n return Response(output, 200, self.PLAIN_TEXT_HEADERS)\n\n\nclass AdobeVendorIDRequestHandler(object):\n\n \"\"\"Standalone class that can be tested without bringing in Flask or\n the database schema.\n \"\"\"\n\n SIGN_IN_RESPONSE_TEMPLATE = \"\"\"<signInResponse xmlns=\"http://ns.adobe.com/adept\">\n<user>%(user)s</user>\n<label>%(label)s</label>\n</signInResponse>\"\"\"\n\n ACCOUNT_INFO_RESPONSE_TEMPLATE = \"\"\"<accountInfoResponse xmlns=\"http://ns.adobe.com/adept\">\n<label>%(label)s</label>\n</accountInfoResponse>\"\"\"\n\n AUTH_ERROR_TYPE = \"AUTH\"\n ACCOUNT_INFO_ERROR_TYPE = \"ACCOUNT_INFO\"\n\n ERROR_RESPONSE_TEMPLATE = '<error xmlns=\"http://ns.adobe.com/adept\" data=\"E_%(vendor_id)s_%(type)s %(message)s\"/>'\n\n TOKEN_FAILURE = \"Incorrect token.\"\n AUTHENTICATION_FAILURE = \"Incorrect barcode or PIN.\"\n URN_LOOKUP_FAILURE = \"Could not identify patron from '%s'.\"\n\n def __init__(self, vendor_id):\n self.vendor_id = vendor_id\n\n def handle_signin_request(self, data, standard_lookup, authdata_lookup):\n parser = AdobeSignInRequestParser()\n try:\n data = parser.process(data)\n except Exception as e:\n logging.error(\"Error processing %s\", data, exc_info=e)\n return self.error_document(self.AUTH_ERROR_TYPE, str(e))\n user_id = label = None\n if not data:\n return self.error_document(\n self.AUTH_ERROR_TYPE, \"Request document in wrong format.\"\n )\n if not \"method\" in data:\n return self.error_document(self.AUTH_ERROR_TYPE, \"No method specified\")\n if data[\"method\"] == parser.STANDARD:\n user_id, label = standard_lookup(data)\n failure = self.AUTHENTICATION_FAILURE\n elif data[\"method\"] == parser.AUTH_DATA:\n authdata = data[parser.AUTH_DATA]\n user_id, label = authdata_lookup(authdata)\n failure = self.TOKEN_FAILURE\n if user_id is None:\n return self.error_document(self.AUTH_ERROR_TYPE, failure)\n else:\n return self.SIGN_IN_RESPONSE_TEMPLATE % dict(user=user_id, label=label)\n\n def handle_accountinfo_request(self, data, urn_to_label):\n parser = AdobeAccountInfoRequestParser()\n label = None\n try:\n data = parser.process(data)\n if not data:\n return self.error_document(\n self.ACCOUNT_INFO_ERROR_TYPE, \"Request document in wrong format.\"\n )\n if not \"user\" in data:\n return self.error_document(\n self.ACCOUNT_INFO_ERROR_TYPE,\n \"Could not find user identifer in request document.\",\n )\n label = urn_to_label(data[\"user\"])\n except Exception as e:\n return self.error_document(self.ACCOUNT_INFO_ERROR_TYPE, str(e))\n\n if label:\n return self.ACCOUNT_INFO_RESPONSE_TEMPLATE % dict(label=label)\n else:\n return self.error_document(\n self.ACCOUNT_INFO_ERROR_TYPE, self.URN_LOOKUP_FAILURE % data[\"user\"]\n )\n\n def error_document(self, type, message):\n return self.ERROR_RESPONSE_TEMPLATE % dict(\n vendor_id=self.vendor_id, type=type, message=message\n )\n\n\nclass DeviceManagementRequestHandler(object):\n \"\"\"Handle incoming requests for the DRM Device Management Protocol.\"\"\"\n\n def __init__(self, credential):\n self.credential = credential\n\n def device_list(self):\n return \"\\n\".join(\n sorted(x.device_identifier for x in self.credential.drm_device_identifiers)\n )\n\n def register_device(self, data):\n device_ids = data.split(\"\\n\")\n if len(device_ids) > 1:\n return PAYLOAD_TOO_LARGE.detailed(\n _(\"You may only register one device ID at a time.\")\n )\n for device_id in device_ids:\n if device_id:\n self.credential.register_drm_device_identifier(device_id)\n return \"Success\"\n\n def deregister_device(self, device_id):\n self.credential.deregister_drm_device_identifier(device_id)\n return \"Success\"\n\n\nclass AdobeRequestParser(XMLParser):\n\n NAMESPACES = {\"adept\": \"http://ns.adobe.com/adept\"}\n\n def process(self, data):\n requests = list(self.process_all(data, self.REQUEST_XPATH, self.NAMESPACES))\n if not requests:\n return None\n # There should only be one request tag, but if there's more than\n # one, only return the first one.\n return requests[0]\n\n def _add(self, d, tag, key, namespaces, transform=None):\n v = self._xpath1(tag, \"adept:\" + key, namespaces)\n if v is not None:\n v = v.text\n if v is not None:\n v = v.strip()\n if transform is not None:\n v = transform(v)\n if isinstance(v, bytes):\n v = v.decode(\"utf-8\")\n d[key] = v\n\n\nclass AdobeSignInRequestParser(AdobeRequestParser):\n\n REQUEST_XPATH = \"/adept:signInRequest\"\n\n STANDARD = \"standard\"\n AUTH_DATA = \"authData\"\n\n def process_one(self, tag, namespaces):\n method = tag.attrib.get(\"method\")\n\n if not method:\n raise ValueError(\"No signin method specified\")\n data = dict(method=method)\n if method == self.STANDARD:\n self._add(data, tag, \"username\", namespaces)\n self._add(data, tag, \"password\", namespaces)\n elif method == self.AUTH_DATA:\n self._add(data, tag, self.AUTH_DATA, namespaces, base64.b64decode)\n else:\n raise ValueError(\"Unknown signin method: %s\" % method)\n return data\n\n\nclass AdobeAccountInfoRequestParser(AdobeRequestParser):\n\n REQUEST_XPATH = \"/adept:accountInfoRequest\"\n\n def process_one(self, tag, namespaces):\n method = tag.attrib.get(\"method\")\n data = dict(method=method)\n self._add(data, tag, \"user\", namespaces)\n return data\n\n\nclass AdobeVendorIDModel(object):\n\n \"\"\"Implement Adobe Vendor ID within the Simplified database\n model.\n \"\"\"\n\n AUTHDATA_TOKEN_TYPE = \"Authdata for Adobe Vendor ID\"\n VENDOR_ID_UUID_TOKEN_TYPE = \"Vendor ID UUID\"\n\n def __init__(\n self, _db, library, authenticator, node_value, temporary_token_duration=None\n ):\n self.library = library\n self._db = _db\n self.authenticator = authenticator\n self.temporary_token_duration = temporary_token_duration or datetime.timedelta(\n minutes=10\n )\n if isinstance(node_value, (bytes, str)):\n node_value = int(node_value, 16)\n self.node_value = node_value\n\n @property\n def data_source(self):\n return DataSource.lookup(self._db, DataSource.ADOBE)\n\n def uuid_and_label(self, patron):\n \"\"\"Create or retrieve a Vendor ID UUID and human-readable Vendor ID\n label for the given patron.\n\n This code is semi-deprecated, which accounts for the varying\n paths and the code that tries to migrate patrons to the new\n system. In the future everyone will send JWTs as authdata and\n we will always go from the JWT to a DelegatedPatronIdentifier.\n This code always ends up at a DelegatedPatronIdentifier, but\n it might pick up the final value from somewhere else along the way.\n\n The _reason_ this code is semi-deprecated is that it only\n works for a library that has its own Adobe Vendor ID.\n \"\"\"\n if not patron:\n return None, None\n\n # First, find or create a Credential containing the patron's\n # anonymized key into the DelegatedPatronIdentifier database.\n adobe_account_id_patron_identifier_credential = (\n self.get_or_create_patron_identifier_credential(patron)\n )\n\n # Look up a Credential containing the patron's Adobe account\n # ID created under the old system. We don't use\n # Credential.lookup because we don't want to create a\n # Credential if it doesn't exist.\n old_style_adobe_account_id_credential = get_one(\n self._db,\n Credential,\n patron=patron,\n data_source=self.data_source,\n type=self.VENDOR_ID_UUID_TOKEN_TYPE,\n )\n\n if old_style_adobe_account_id_credential:\n # The value of the old-style credential will become the\n # default value of the DelegatedPatronIdentifier, assuming\n # we have to create one.\n def new_value():\n return old_style_adobe_account_id_credential.credential\n\n else:\n # There is no old-style credential. If we have to create a\n # new DelegatedPatronIdentifier we will give it a value\n # using the default mechanism.\n new_value = None\n\n # Look up or create a DelegatedPatronIdentifier using the\n # anonymized patron identifier we just looked up or created.\n utility = AuthdataUtility.from_config(patron.library, self._db)\n return self.to_delegated_patron_identifier_uuid(\n utility.library_uri,\n adobe_account_id_patron_identifier_credential.credential,\n value_generator=new_value,\n )\n\n def create_authdata(self, patron):\n credential, is_new = Credential.persistent_token_create(\n self._db, self.data_source, self.AUTHDATA_TOKEN_TYPE, patron\n )\n return credential\n\n def standard_lookup(self, authorization_data):\n \"\"\"Look up a patron by authorization header. Return their Vendor ID\n UUID and their human-readable label, creating a Credential\n object to hold the UUID if necessary.\n \"\"\"\n username = authorization_data.get(\"username\")\n password = authorization_data.get(\"password\")\n if username and not password:\n # The absence of a password indicates the username might\n # be a persistent authdata token smuggled to get around a\n # broken Adobe client-side API. Try treating the\n # 'username' as a token.\n possible_authdata_token = authorization_data[\"username\"]\n return self.authdata_lookup(possible_authdata_token)\n\n if username and password:\n # Try to look up the username and password as a short\n # client token. This is currently the best way to do\n # authentication.\n uuid, label = self.short_client_token_lookup(username, password)\n if uuid and label:\n return uuid, label\n\n # Last ditch effort: try a normal username/password lookup.\n # This should almost never be used.\n patron = self.authenticator.authenticated_patron(self._db, authorization_data)\n return self.uuid_and_label(patron)\n\n def authdata_lookup(self, authdata):\n \"\"\"Turn an authdata string into a Vendor ID UUID and a human-readable\n label.\n\n Generally we do this by decoding the authdata as a JWT and\n looking up or creating an appropriate\n DelegatedPatronIdentifier.\n\n However, for backwards compatibility purposes, if the authdata\n cannot be decoded as a JWT, we will try the old method of\n treating it as a Credential that identifies a Patron, and\n finding the DelegatedPatronIdentifier that way.\n \"\"\"\n if not authdata:\n return None, None\n\n library_uri = foreign_patron_identifier = None\n utility = AuthdataUtility.from_config(self.library, self._db)\n if utility:\n # Hopefully this is an authdata JWT generated by another\n # library's circulation manager.\n try:\n library_uri, foreign_patron_identifier = utility.decode(authdata)\n except Exception as e:\n # Not a problem -- we'll try the old system.\n pass\n\n if library_uri and foreign_patron_identifier:\n # We successfully decoded the authdata as a JWT. We know\n # which library the patron is from and which (hopefully\n # anonymized) ID identifies this patron within that\n # library. Keep their Adobe account ID in a\n # DelegatedPatronIdentifier.\n uuid_and_label = self.to_delegated_patron_identifier_uuid(\n library_uri, foreign_patron_identifier\n )\n else:\n # Maybe this is an old-style authdata, stored as a\n # Credential associated with a specific patron.\n patron = self.patron_from_authdata_lookup(authdata)\n if patron:\n # Yes, that's what's going on.\n uuid_and_label = self.uuid_and_label(patron)\n else:\n # This alleged authdata doesn't fit into either\n # category. Stop trying to turn it into an Adobe account ID.\n uuid_and_label = (None, None)\n return uuid_and_label\n\n def short_client_token_lookup(self, token, signature):\n \"\"\"Validate a short client token that came in as username/password.\"\"\"\n utility = AuthdataUtility.from_config(self.library, self._db)\n library_uri = foreign_patron_identifier = None\n if utility:\n # Hopefully this is a short client token generated by\n # another library's circulation manager.\n try:\n (\n library_uri,\n foreign_patron_identifier,\n ) = utility.decode_two_part_short_client_token(token, signature)\n except Exception as e:\n # This didn't work--either the incoming data was wrong\n # or this technique wasn't the right one to use.\n pass\n\n if library_uri and foreign_patron_identifier:\n # We successfully decoded the authdata as a short client\n # token. We know which library the patron is from and\n # which (hopefully anonymized) ID identifies this patron\n # within that library. Keep their Adobe account ID in a\n # DelegatedPatronIdentifier.\n uuid_and_label = self.to_delegated_patron_identifier_uuid(\n library_uri, foreign_patron_identifier\n )\n else:\n # We were not able to decode the authdata as a short client\n # token.\n uuid_and_label = (None, None)\n return uuid_and_label\n\n def to_delegated_patron_identifier_uuid(\n self, library_uri, foreign_patron_identifier, value_generator=None\n ):\n \"\"\"Create or lookup a DelegatedPatronIdentifier containing an Adobe\n account ID for the given library and foreign patron ID.\n\n :return: A 2-tuple (UUID, label)\n \"\"\"\n if not library_uri or not foreign_patron_identifier:\n return None, None\n value_generator = value_generator or self.uuid\n identifier, is_new = DelegatedPatronIdentifier.get_one_or_create(\n self._db,\n library_uri,\n foreign_patron_identifier,\n DelegatedPatronIdentifier.ADOBE_ACCOUNT_ID,\n value_generator,\n )\n\n if identifier is None:\n return None, None\n return (\n identifier.delegated_identifier,\n self.urn_to_label(identifier.delegated_identifier),\n )\n\n def patron_from_authdata_lookup(self, authdata):\n \"\"\"Look up a patron by their persistent authdata token.\"\"\"\n credential = Credential.lookup_by_token(\n self._db,\n self.data_source,\n self.AUTHDATA_TOKEN_TYPE,\n authdata,\n allow_persistent_token=True,\n )\n if not credential:\n return None\n return credential.patron\n\n def urn_to_label(self, urn):\n \"\"\"We have no information about patrons, so labels are sparse.\"\"\"\n return \"Delegated account ID %s\" % urn\n\n def uuid(self):\n \"\"\"Create a new UUID URN compatible with the Vendor ID system.\"\"\"\n u = str(uuid.uuid1(self.node_value))\n # This chop is required by the spec. I have no idea why, but\n # since the first part of the UUID is the least significant,\n # it doesn't do much damage.\n value = \"urn:uuid:0\" + u[1:]\n return value\n\n @classmethod\n def get_or_create_patron_identifier_credential(cls, patron):\n _db = Session.object_session(patron)\n\n def refresh(credential):\n credential.credential = str(uuid.uuid1())\n\n data_source = DataSource.lookup(_db, DataSource.INTERNAL_PROCESSING)\n patron_identifier_credential = Credential.lookup(\n _db,\n data_source,\n AuthdataUtility.ADOBE_ACCOUNT_ID_PATRON_IDENTIFIER,\n patron,\n refresher_method=refresh,\n allow_persistent_token=True,\n )\n return patron_identifier_credential\n\n\nclass AuthdataUtility(object):\n\n \"\"\"Generate authdata JWTs as per the Vendor ID Service spec:\n https://docs.google.com/document/d/1j8nWPVmy95pJ_iU4UTC-QgHK2QhDUSdQ0OQTFR2NE_0\n\n Capable of encoding JWTs (for this library), and decoding them\n (from this library and potentially others).\n\n Also generates and decodes JWT-like strings used to get around\n Adobe's lack of support for authdata in deactivation.\n \"\"\"\n\n # The type of the Credential created to identify a patron to the\n # Vendor ID Service. Using this as an alias keeps the Vendor ID\n # Service from knowing anything about the patron's true\n # identity. This Credential is permanent (unlike a patron's\n # username or authorization identifier), but can be revoked (if\n # the patron needs to reset their Adobe account ID) with no\n # consequences other than losing their currently checked-in books.\n ADOBE_ACCOUNT_ID_PATRON_IDENTIFIER = \"Identifier for Adobe account ID purposes\"\n\n ALGORITHM = \"HS256\"\n\n def __init__(\n self, vendor_id, library_uri, library_short_name, secret, other_libraries={}\n ):\n \"\"\"Basic constructor.\n\n :param vendor_id: The Adobe Vendor ID that should accompany authdata\n generated by this utility.\n\n If this library has its own Adobe Vendor ID, it should go\n here. If this library is delegating authdata control to some\n other library, that library's Vendor ID should go here.\n\n :param library_uri: A URI identifying this library. This is\n used when generating JWTs.\n\n :param short_name: A short string identifying this\n library. This is used when generating short client tokens,\n which must be as short as possible (thus the name).\n\n :param secret: A secret used to sign this library's authdata.\n\n :param other_libraries: A dictionary mapping other libraries'\n canonical URIs to their (short name, secret) 2-tuples. An\n instance of this class will be able to decode an authdata from\n any library in this dictionary (plus the library it was\n initialized for).\n \"\"\"\n self.vendor_id = vendor_id\n\n # This is used to _encode_ JWTs and send them to the\n # delegation authority.\n self.library_uri = library_uri\n\n # This is used to _encode_ short client tokens.\n self.short_name = library_short_name.upper()\n\n # This is used to encode both JWTs and short client tokens.\n self.secret = secret\n\n # This is used by the delegation authority to _decode_ JWTs.\n self.secrets_by_library_uri = {}\n self.secrets_by_library_uri[self.library_uri] = secret\n\n # This is used by the delegation authority to _decode_ short\n # client tokens.\n self.library_uris_by_short_name = {}\n self.library_uris_by_short_name[self.short_name] = self.library_uri\n\n # Fill in secrets_by_library_uri and library_uris_by_short_name\n # for other libraries.\n for uri, v in list(other_libraries.items()):\n short_name, secret = v\n short_name = short_name.upper()\n if short_name in self.library_uris_by_short_name:\n # This can happen if the same library is in the list\n # twice, capitalized differently.\n raise ValueError(\"Duplicate short name: %s\" % short_name)\n self.library_uris_by_short_name[short_name] = uri\n self.secrets_by_library_uri[uri] = secret\n\n self.log = logging.getLogger(\"Adobe authdata utility\")\n\n self.short_token_signer = HMACAlgorithm(HMACAlgorithm.SHA256)\n self.short_token_signing_key = self.short_token_signer.prepare_key(self.secret)\n\n VENDOR_ID_KEY = \"vendor_id\"\n OTHER_LIBRARIES_KEY = \"other_libraries\"\n\n @classmethod\n def from_config(cls, library: Library, _db=None):\n \"\"\"Initialize an AuthdataUtility from site configuration.\n\n The library must be successfully registered with a discovery\n integration in order for that integration to be a candidate\n to provide configuration for the AuthdataUtility.\n\n :return: An AuthdataUtility if one is configured; otherwise None.\n\n :raise CannotLoadConfiguration: If an AuthdataUtility is\n incompletely configured.\n \"\"\"\n _db = _db or Session.object_session(library)\n if not _db:\n raise ValueError(\n \"No database connection provided and could not derive one from Library object!\"\n )\n # Use a version of the library\n library = _db.merge(library, load=False)\n\n # Try to find an external integration with a configured Vendor ID.\n integrations = (\n _db.query(ExternalIntegration)\n .outerjoin(ExternalIntegration.libraries)\n .filter(\n ExternalIntegration.protocol == ExternalIntegration.OPDS_REGISTRATION,\n ExternalIntegration.goal == ExternalIntegration.DISCOVERY_GOAL,\n Library.id == library.id,\n )\n )\n\n for possible_integration in integrations:\n vendor_id = ConfigurationSetting.for_externalintegration(\n cls.VENDOR_ID_KEY, possible_integration\n ).value\n registration_status = (\n ConfigurationSetting.for_library_and_externalintegration(\n _db,\n RegistrationConstants.LIBRARY_REGISTRATION_STATUS,\n library,\n possible_integration,\n ).value\n )\n if (\n vendor_id\n and registration_status == RegistrationConstants.SUCCESS_STATUS\n ):\n integration = possible_integration\n break\n else:\n return None\n\n library_uri = ConfigurationSetting.for_library(\n Configuration.WEBSITE_URL, library\n ).value\n\n vendor_id = integration.setting(cls.VENDOR_ID_KEY).value\n library_short_name = ConfigurationSetting.for_library_and_externalintegration(\n _db, ExternalIntegration.USERNAME, library, integration\n ).value\n secret = ConfigurationSetting.for_library_and_externalintegration(\n _db, ExternalIntegration.PASSWORD, library, integration\n ).value\n\n other_libraries = None\n adobe_integration = ExternalIntegration.lookup(\n _db,\n ExternalIntegration.ADOBE_VENDOR_ID,\n ExternalIntegration.DRM_GOAL,\n library=library,\n )\n if adobe_integration:\n other_libraries = adobe_integration.setting(\n cls.OTHER_LIBRARIES_KEY\n ).json_value\n other_libraries = other_libraries or dict()\n\n if not vendor_id or not library_uri or not library_short_name or not secret:\n raise CannotLoadConfiguration(\n \"Short Client Token configuration is incomplete. \"\n \"vendor_id (%s), username (%s), password (%s) and \"\n \"Library website_url (%s) must all be defined.\"\n % (vendor_id, library_uri, library_short_name, secret)\n )\n if \"|\" in library_short_name:\n raise CannotLoadConfiguration(\n \"Library short name cannot contain the pipe character.\"\n )\n return cls(vendor_id, library_uri, library_short_name, secret, other_libraries)\n\n @classmethod\n def adobe_relevant_credentials(self, patron):\n \"\"\"Find all Adobe-relevant Credential objects for the given\n patron.\n\n This includes the patron's identifier for Adobe ID purposes,\n and (less likely) any Adobe IDs directly associated with the\n Patron.\n\n :return: A SQLAlchemy query\n \"\"\"\n _db = Session.object_session(patron)\n types = (\n AdobeVendorIDModel.VENDOR_ID_UUID_TOKEN_TYPE,\n AuthdataUtility.ADOBE_ACCOUNT_ID_PATRON_IDENTIFIER,\n )\n return (\n _db.query(Credential)\n .filter(Credential.patron == patron)\n .filter(Credential.type.in_(types))\n )\n\n def encode(self, patron_identifier):\n \"\"\"Generate an authdata JWT suitable for putting in an OPDS feed, where\n it can be picked up by a client and sent to the delegation\n authority to look up an Adobe ID.\n\n :return: A 2-tuple (vendor ID, authdata)\n \"\"\"\n if not patron_identifier:\n raise ValueError(\"No patron identifier specified\")\n now = utc_now()\n expires = now + datetime.timedelta(minutes=60)\n authdata = self._encode(self.library_uri, patron_identifier, now, expires)\n return self.vendor_id, authdata\n\n def _encode(self, iss=None, sub=None, iat=None, exp=None):\n \"\"\"Helper method split out separately for use in tests.\"\"\"\n payload = dict(iss=iss) # Issuer\n if sub:\n payload[\"sub\"] = sub # Subject\n if iat:\n payload[\"iat\"] = self.numericdate(iat) # Issued At\n if exp:\n payload[\"exp\"] = self.numericdate(exp) # Expiration Time\n return base64.encodebytes(\n jwt.encode(payload, self.secret, algorithm=self.ALGORITHM)\n )\n\n @classmethod\n def adobe_base64_encode(cls, str_to_encode):\n \"\"\"A modified base64 encoding that avoids triggering an Adobe bug.\n\n The bug seems to happen when the 'password' portion of a\n username/password pair contains a + character. So we replace +\n with :. We also replace / (another \"suspicious\" character)\n with ;. and strip newlines.\n \"\"\"\n if isinstance(str_to_encode, str):\n str_to_encode = str_to_encode.encode(\"utf-8\")\n encoded = base64.encodebytes(str_to_encode).decode(\"utf-8\").strip()\n return encoded.replace(\"+\", \":\").replace(\"/\", \";\").replace(\"=\", \"@\")\n\n @classmethod\n def adobe_base64_decode(cls, str):\n \"\"\"Undoes adobe_base64_encode.\"\"\"\n encoded = str.replace(\":\", \"+\").replace(\";\", \"/\").replace(\"@\", \"=\")\n return base64.decodebytes(encoded.encode(\"utf-8\"))\n\n def decode(self, authdata):\n \"\"\"Decode and verify an authdata JWT from one of the libraries managed\n by `secrets_by_library`.\n\n :return: a 2-tuple (library_uri, patron_identifier)\n\n :raise jwt.exceptions.DecodeError: When the JWT is not valid\n for any reason.\n \"\"\"\n\n self.log.info(\"Authdata.decode() received authdata %s\", authdata)\n # We are going to try to verify the authdata as is (in case\n # Adobe secretly decoded it en route), but we're also going to\n # try to decode it ourselves and verify it that way.\n potential_tokens = [authdata]\n try:\n decoded = base64.decodebytes(authdata)\n potential_tokens.append(decoded)\n except Exception as e:\n # Do nothing -- the authdata was not encoded to begin with.\n pass\n\n exceptions = []\n library_uri = subject = None\n for authdata in potential_tokens:\n try:\n return self._decode(authdata)\n except Exception as e:\n self.log.error(\"Error decoding %s\", authdata, exc_info=e)\n exceptions.append(e)\n\n # If we got to this point there is at least one exception\n # in the list.\n raise exceptions[-1]\n\n def _decode(self, authdata):\n # First, decode the authdata without checking the signature.\n decoded = jwt.decode(\n authdata, algorithm=self.ALGORITHM, options=dict(verify_signature=False)\n )\n\n # This lets us get the library URI, which lets us get the secret.\n library_uri = decoded.get(\"iss\")\n if not library_uri in self.secrets_by_library_uri:\n # The request came in without a library specified\n # or with an unknown library specified.\n raise jwt.exceptions.DecodeError(\"Unknown library: %s\" % library_uri)\n\n # We know the secret for this library, so we can re-decode the\n # secret and require signature valudation this time.\n secret = self.secrets_by_library_uri[library_uri]\n decoded = jwt.decode(authdata, secret, algorithm=self.ALGORITHM)\n if not \"sub\" in decoded:\n raise jwt.exceptions.DecodeError(\"No subject specified.\")\n return library_uri, decoded[\"sub\"]\n\n @classmethod\n def _adobe_patron_identifier(cls, patron):\n \"\"\"Take patron object and return identifier for Adobe ID purposes\"\"\"\n _db = Session.object_session(patron)\n internal = DataSource.lookup(_db, DataSource.INTERNAL_PROCESSING)\n\n def refresh(credential):\n credential.credential = str(uuid.uuid1())\n\n patron_identifier = Credential.lookup(\n _db,\n internal,\n AuthdataUtility.ADOBE_ACCOUNT_ID_PATRON_IDENTIFIER,\n patron,\n refresher_method=refresh,\n allow_persistent_token=True,\n )\n return patron_identifier.credential\n\n def short_client_token_for_patron(self, patron_information):\n \"\"\"Generate short client token for patron, or for a patron's identifier\n for Adobe ID purposes\"\"\"\n\n if isinstance(patron_information, Patron):\n # Find the patron's identifier for Adobe ID purposes.\n patron_identifier = self._adobe_patron_identifier(patron_information)\n else:\n patron_identifier = patron_information\n\n vendor_id, token = self.encode_short_client_token(patron_identifier)\n return vendor_id, token\n\n def _now(self):\n \"\"\"Function to return current time. Used to override in testing.\"\"\"\n return utc_now()\n\n def encode_short_client_token(self, patron_identifier, expires=None):\n \"\"\"Generate a short client token suitable for putting in an OPDS feed,\n where it can be picked up by a client and sent to the\n delegation authority to look up an Adobe ID.\n\n :return: A 2-tuple (vendor ID, token)\n \"\"\"\n if expires is None:\n expires = {\"minutes\": 60}\n if not patron_identifier:\n raise ValueError(\"No patron identifier specified\")\n expires = int(self.numericdate(self._now() + datetime.timedelta(**expires)))\n authdata = self._encode_short_client_token(\n self.short_name, patron_identifier, expires\n )\n return self.vendor_id, authdata\n\n def _encode_short_client_token(\n self, library_short_name, patron_identifier, expires\n ):\n base = library_short_name + \"|\" + str(expires) + \"|\" + patron_identifier\n signature = self.short_token_signer.sign(\n base.encode(\"utf-8\"), self.short_token_signing_key\n )\n signature = self.adobe_base64_encode(signature)\n if len(base) > 80:\n self.log.error(\n \"Username portion of short client token exceeds 80 characters; Adobe will probably truncate it.\"\n )\n if len(signature) > 76:\n self.log.error(\n \"Password portion of short client token exceeds 76 characters; Adobe will probably truncate it.\"\n )\n return base + \"|\" + signature\n\n def decode_short_client_token(self, token):\n \"\"\"Attempt to interpret a 'username' and 'password' as a short\n client token identifying a patron of a specific library.\n\n :return: a 2-tuple (library_uri, patron_identifier)\n\n :raise ValueError: When the token is not valid for any reason.\n \"\"\"\n if not \"|\" in token:\n raise ValueError(\n 'Supposed client token \"%s\" does not contain a pipe.' % token\n )\n\n username, password = token.rsplit(\"|\", 1)\n return self.decode_two_part_short_client_token(username, password)\n\n def decode_two_part_short_client_token(self, username, password):\n \"\"\"Decode a short client token that has already been split into\n two parts.\n \"\"\"\n signature = self.adobe_base64_decode(password)\n return self._decode_short_client_token(username, signature)\n\n def _decode_short_client_token(self, token, supposed_signature):\n \"\"\"Make sure a client token is properly formatted, correctly signed,\n and not expired.\n \"\"\"\n if token.count(\"|\") < 2:\n raise ValueError(\"Invalid client token: %s\" % token)\n library_short_name, expiration, patron_identifier = token.split(\"|\", 2)\n\n library_short_name = library_short_name.upper()\n try:\n expiration = float(expiration)\n except ValueError:\n raise ValueError('Expiration time \"%s\" is not numeric.' % expiration)\n\n # We don't police the content of the patron identifier but there\n # has to be _something_ there.\n if not patron_identifier:\n raise ValueError(\"Token %s has empty patron identifier\" % token)\n\n if not library_short_name in self.library_uris_by_short_name:\n raise ValueError(\n 'I don\\'t know how to handle tokens from library \"%s\"'\n % library_short_name\n )\n library_uri = self.library_uris_by_short_name[library_short_name]\n if not library_uri in self.secrets_by_library_uri:\n raise ValueError(\"I don't know the secret for library %s\" % library_uri)\n secret = self.secrets_by_library_uri[library_uri]\n\n # Don't bother checking an expired token.\n now = utc_now()\n expiration = self.EPOCH + datetime.timedelta(seconds=expiration)\n if expiration < now:\n raise ValueError(\n \"Token %s expired at %s (now is %s).\" % (token, expiration, now)\n )\n\n # Sign the token and check against the provided signature.\n key = self.short_token_signer.prepare_key(secret)\n actual_signature = self.short_token_signer.sign(token.encode(\"utf-8\"), key)\n\n if actual_signature != supposed_signature:\n raise ValueError(\"Invalid signature for %s.\" % token)\n\n return library_uri, patron_identifier\n\n EPOCH = datetime_utc(1970, 1, 1)\n\n @classmethod\n def numericdate(cls, d):\n \"\"\"Turn a datetime object into a NumericDate as per RFC 7519.\"\"\"\n return (d - cls.EPOCH).total_seconds()\n\n def migrate_adobe_id(self, patron):\n \"\"\"If the given patron has an Adobe ID stored as a Credential, also\n store it as a DelegatedPatronIdentifier.\n\n This method and its test should be removed once all instances have\n run the migration script\n 20161102-adobe-id-is-delegated-patron-identifier.py.\n \"\"\"\n\n _db = Session.object_session(patron)\n credential = get_one(\n _db,\n Credential,\n patron=patron,\n type=AdobeVendorIDModel.VENDOR_ID_UUID_TOKEN_TYPE,\n )\n if not credential:\n # This patron has no Adobe ID. Do nothing.\n return None, None\n adobe_id = credential.credential\n\n # Create a new Credential containing an anonymized patron ID.\n patron_identifier_credential = (\n AdobeVendorIDModel.get_or_create_patron_identifier_credential(patron)\n )\n\n # Then create a DelegatedPatronIdentifier mapping that\n # anonymized patron ID to the patron's Adobe ID.\n def create_function():\n \"\"\"This will be called as the DelegatedPatronIdentifier\n is created. We already know the patron's Adobe ID and just\n want to store it in the DPI.\n \"\"\"\n return adobe_id\n\n delegated_identifier, is_new = DelegatedPatronIdentifier.get_one_or_create(\n _db,\n self.library_uri,\n patron_identifier_credential.credential,\n DelegatedPatronIdentifier.ADOBE_ACCOUNT_ID,\n create_function,\n )\n return patron_identifier_credential, delegated_identifier\n\n\nclass VendorIDLibraryConfigurationScript(Script):\n @classmethod\n def arg_parser(cls):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--website-url\",\n help='The URL to this library\\'s patron-facing website (not their circulation manager), e.g. \"https://nypl.org/\". This is used to uniquely identify a library.',\n )\n parser.add_argument(\n \"--short-name\",\n help='The short name the library will use in Short Client Tokens, e.g. \"NYNYPL\".',\n )\n parser.add_argument(\n \"--secret\",\n help=\"The secret the library will use to sign Short Client Tokens.\",\n )\n return parser\n\n def do_run(self, _db=None, cmd_args=None, output=sys.stdout):\n _db = _db or self._db\n args = self.parse_command_line(self._db, cmd_args=cmd_args)\n\n default_library = Library.default(_db)\n adobe_integration = ExternalIntegration.lookup(\n _db,\n ExternalIntegration.ADOBE_VENDOR_ID,\n ExternalIntegration.DRM_GOAL,\n library=default_library,\n )\n if not adobe_integration:\n output.write(\n \"Could not find an Adobe Vendor ID integration for default library %s.\\n\"\n % default_library.short_name\n )\n return\n\n setting = adobe_integration.setting(AuthdataUtility.OTHER_LIBRARIES_KEY)\n other_libraries = setting.json_value\n\n chosen_website = args.website_url\n if not chosen_website:\n for website in list(other_libraries.keys()):\n self.explain(output, other_libraries, website)\n return\n\n if not args.short_name and not args.secret:\n self.explain(output, other_libraries, chosen_website)\n return\n\n if not args.short_name or not args.secret:\n output.write(\n \"To configure a library you must provide both --short_name and --secret.\\n\"\n )\n return\n\n # All three arguments are specified. Set or modify the library's\n # SCT configuration.\n if chosen_website in other_libraries:\n what = \"change\"\n else:\n what = \"set\"\n output.write(\n \"About to %s the Short Client Token configuration for %s.\\n\"\n % (what, chosen_website)\n )\n if chosen_website in other_libraries:\n output.write(\"Old configuration:\\n\")\n short_name, secret = other_libraries[chosen_website]\n self.explain(output, other_libraries, chosen_website)\n other_libraries[chosen_website] = [args.short_name, args.secret]\n\n output.write(\"New configuration:\\n\")\n self.explain(output, other_libraries, chosen_website)\n setting.value = json.dumps(other_libraries)\n self._db.commit()\n\n def explain(self, output, libraries, website):\n if not website in libraries:\n raise ValueError(\"Library not configured: %s\" % website)\n short_name, secret = libraries[website]\n output.write(\"Website: %s\\n\" % website)\n output.write(\" Short name: %s\\n\" % short_name)\n output.write(\" Short Client Token secret: %s\\n\" % secret)\n\n\nclass ShortClientTokenLibraryConfigurationScript(Script):\n @classmethod\n def arg_parser(cls):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--website-url\",\n help='The URL to this library\\'s patron-facing website (not their circulation manager), e.g. \"https://nypl.org/\". This is used to uniquely identify a library.',\n required=True,\n )\n parser.add_argument(\n \"--vendor-id\",\n help=\"The name of the vendor ID the library will use. The default of 'NYPL' is probably what you want.\",\n default=\"NYPL\",\n )\n parser.add_argument(\n \"--short-name\",\n help='The short name the library will use in Short Client Tokens, e.g. \"NYBPL\".',\n )\n parser.add_argument(\n \"--secret\",\n help=\"The secret the library will use to sign Short Client Tokens.\",\n )\n return parser\n\n def do_run(self, _db=None, cmd_args=None, output=sys.stdout):\n _db = _db or self._db\n args = self.parse_command_line(self._db, cmd_args=cmd_args)\n\n self.set_secret(\n _db, args.website_url, args.vendor_id, args.short_name, args.secret, output\n )\n _db.commit()\n\n def set_secret(self, _db, website_url, vendor_id, short_name, secret, output):\n # Look up a library by its url setting.\n library_setting = get_one(\n _db,\n ConfigurationSetting,\n key=Configuration.WEBSITE_URL,\n value=website_url,\n )\n if not library_setting:\n available_urls = (\n _db.query(ConfigurationSetting)\n .filter(ConfigurationSetting.key == Configuration.WEBSITE_URL)\n .filter(ConfigurationSetting.library != None)\n )\n raise Exception(\n \"Could not locate library with URL %s. Available URLs: %s\"\n % (website_url, \",\".join(x.value for x in available_urls))\n )\n library = library_setting.library\n integration = ExternalIntegration.lookup(\n _db,\n ExternalIntegration.OPDS_REGISTRATION,\n ExternalIntegration.DISCOVERY_GOAL,\n library=library,\n )\n if not integration:\n integration, ignore = create(\n _db,\n ExternalIntegration,\n protocol=ExternalIntegration.OPDS_REGISTRATION,\n goal=ExternalIntegration.DISCOVERY_GOAL,\n )\n library.integrations.append(integration)\n\n vendor_id_s = integration.setting(AuthdataUtility.VENDOR_ID_KEY)\n username_s = ConfigurationSetting.for_library_and_externalintegration(\n _db, ExternalIntegration.USERNAME, library, integration\n )\n password_s = ConfigurationSetting.for_library_and_externalintegration(\n _db, ExternalIntegration.PASSWORD, library, integration\n )\n\n if vendor_id and short_name and secret:\n vendor_id_s.value = vendor_id\n username_s.value = short_name\n password_s.value = secret\n\n output.write(\"Current Short Client Token configuration for %s:\\n\" % website_url)\n output.write(\" Vendor ID: %s\\n\" % vendor_id_s.value)\n output.write(\" Library name: %s\\n\" % username_s.value)\n output.write(\" Shared secret: %s\\n\" % password_s.value)\n", "id": "9602245", "language": "Python", "matching_score": 5.395303249359131, "max_stars_count": 0, "path": "api/adobe_vendor_id.py" }, { "content": "import argparse\nimport logging\nimport os\nimport random\nimport re\nimport subprocess\nimport sys\nimport traceback\nimport unicodedata\nimport uuid\nfrom collections import defaultdict\nfrom enum import Enum\n\nfrom sqlalchemy import and_, exists, text\nfrom sqlalchemy.exc import ProgrammingError\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nfrom .config import CannotLoadConfiguration, Configuration\nfrom .coverage import CollectionCoverageProviderJob, CoverageProviderProgress\nfrom .external_search import ExternalSearchIndex, Filter, SearchIndexCoverageProvider\nfrom .lane import Lane\nfrom .metadata_layer import (\n LinkData,\n MetaToModelUtility,\n ReplacementPolicy,\n TimestampData,\n)\nfrom .mirror import MirrorUploader\nfrom .model import (\n BaseCoverageRecord,\n CachedFeed,\n Collection,\n Complaint,\n ConfigurationSetting,\n Contributor,\n CustomList,\n DataSource,\n Edition,\n ExternalIntegration,\n Hyperlink,\n Identifier,\n Library,\n LicensePool,\n LicensePoolDeliveryMechanism,\n Patron,\n PresentationCalculationPolicy,\n Representation,\n SessionManager,\n Subject,\n Timestamp,\n Work,\n WorkCoverageRecord,\n create,\n get_one,\n get_one_or_create,\n production_session,\n site_configuration_has_changed,\n)\nfrom .model.configuration import ExternalIntegrationLink\nfrom .monitor import CollectionMonitor, ReaperMonitor\nfrom .opds_import import OPDSImporter, OPDSImportMonitor\nfrom .util import fast_query_count\nfrom .util.datetime_helpers import strptime_utc, to_utc, utc_now\nfrom .util.personal_names import contributor_name_match_ratio, display_name_to_sort_name\nfrom .util.worker_pools import DatabasePool\n\n\nclass Script(object):\n @property\n def _db(self):\n if not hasattr(self, \"_session\"):\n self._session = production_session()\n return self._session\n\n @property\n def script_name(self):\n \"\"\"Find or guess the name of the script.\n\n This is either the .name of the Script object or the name of\n the class.\n \"\"\"\n return getattr(self, \"name\", self.__class__.__name__)\n\n @property\n def log(self):\n if not hasattr(self, \"_log\"):\n self._log = logging.getLogger(self.script_name)\n return self._log\n\n @property\n def data_directory(self):\n return Configuration.data_directory()\n\n @classmethod\n def parse_command_line(cls, _db=None, cmd_args=None):\n parser = cls.arg_parser()\n return parser.parse_known_args(cmd_args)[0]\n\n @classmethod\n def arg_parser(cls):\n raise NotImplementedError()\n\n @classmethod\n def parse_time(cls, time_string):\n \"\"\"Try to pass the given string as a time.\"\"\"\n if not time_string:\n return None\n for format in (\"%Y-%m-%d\", \"%m/%d/%Y\", \"%Y%m%d\"):\n for hours in (\"\", \" %H:%M:%S\"):\n full_format = format + hours\n try:\n parsed = strptime_utc(time_string, full_format)\n return parsed\n except ValueError as e:\n continue\n raise ValueError(\"Could not parse time: %s\" % time_string)\n\n def __init__(self, _db=None, *args, **kwargs):\n \"\"\"Basic constructor.\n\n :_db: A database session to be used instead of\n creating a new one. Useful in tests.\n \"\"\"\n if _db:\n self._session = _db\n\n def run(self):\n self.load_configuration()\n DataSource.well_known_sources(self._db)\n start_time = utc_now()\n try:\n timestamp_data = self.do_run()\n if not isinstance(timestamp_data, TimestampData):\n # Ignore any nonstandard return value from do_run().\n timestamp_data = None\n self.update_timestamp(timestamp_data, start_time, None)\n except Exception as e:\n logging.error(\"Fatal exception while running script: %s\", e, exc_info=e)\n stack_trace = traceback.format_exc()\n self.update_timestamp(None, start_time, stack_trace)\n raise\n\n def load_configuration(self):\n if not Configuration.cdns_loaded_from_database():\n Configuration.load(self._db)\n\n def update_timestamp(self, timestamp_data, start_time, exception):\n \"\"\"By default scripts have no timestamp of their own.\n\n Most scripts either work through Monitors or CoverageProviders,\n which have their own logic for creating timestamps, or they\n are designed to be run interactively from the command-line, so\n facts about when they last ran are not relevant.\n\n :param start_time: The time the script started running.\n :param exception: A stack trace for the exception, if any,\n that stopped the script from running.\n \"\"\"\n\n\nclass TimestampScript(Script):\n \"\"\"A script that automatically records a timestamp whenever it runs.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(TimestampScript, self).__init__(*args, **kwargs)\n self.timestamp_collection = None\n\n def update_timestamp(self, timestamp_data, start, exception):\n \"\"\"Update the appropriate Timestamp for this script.\n\n :param timestamp_data: A TimestampData representing what the script\n itself thinks its timestamp should look like. Data will be filled in\n where it is missing, but it will not be modified if present.\n\n :param start: The time at which this script believes the\n service started running. The script itself may change this\n value for its own purposes.\n\n :param exception: The exception with which this script\n believes the service stopped running. The script itself may\n change this value for its own purposes.\n \"\"\"\n if timestamp_data is None:\n timestamp_data = TimestampData()\n timestamp_data.finalize(\n self.script_name,\n Timestamp.SCRIPT_TYPE,\n self.timestamp_collection,\n start=start,\n exception=exception,\n )\n timestamp_data.apply(self._db)\n\n\nclass RunMonitorScript(Script):\n def __init__(self, monitor, _db=None, **kwargs):\n super(RunMonitorScript, self).__init__(_db)\n if issubclass(monitor, CollectionMonitor):\n self.collection_monitor = monitor\n self.collection_monitor_kwargs = kwargs\n self.monitor = None\n self.name = self.collection_monitor.SERVICE_NAME\n else:\n self.collection_monitor = None\n if callable(monitor):\n monitor = monitor(self._db, **kwargs)\n self.monitor = monitor\n self.name = self.monitor.service_name\n\n def do_run(self):\n if self.monitor:\n self.monitor.run()\n elif self.collection_monitor:\n logging.warning(\n \"Running a CollectionMonitor by delegating to RunCollectionMonitorScript. \"\n \"It would be better if you used RunCollectionMonitorScript directly.\"\n )\n RunCollectionMonitorScript(\n self.collection_monitor, self._db, **self.collection_monitor_kwargs\n ).run()\n\n\nclass RunMultipleMonitorsScript(Script):\n \"\"\"Run a number of monitors in sequence.\n\n Currently the Monitors are run one at a time. It should be\n possible to take a command-line argument that runs all the\n Monitors in batches, each in its own thread. Unfortunately, it's\n tough to know in a given situation that this won't overload the\n system.\n \"\"\"\n\n def __init__(self, _db=None, **kwargs):\n \"\"\"Constructor.\n\n :param kwargs: Keyword arguments to pass into the `monitors` method\n when building the Monitor objects.\n \"\"\"\n super(RunMultipleMonitorsScript, self).__init__(_db)\n self.kwargs = kwargs\n\n def monitors(self, **kwargs):\n \"\"\"Find all the Monitors that need to be run.\n\n :return: A list of Monitor objects.\n \"\"\"\n raise NotImplementedError()\n\n def do_run(self):\n for monitor in self.monitors(**self.kwargs):\n try:\n monitor.run()\n except Exception as e:\n # This is bad, but not so bad that we should give up trying\n # to run the other Monitors.\n if monitor.collection:\n collection_name = monitor.collection.name\n else:\n collection_name = None\n monitor.exception = e\n self.log.error(\n \"Error running monitor %s for collection %s: %s\",\n self.name,\n collection_name,\n e,\n exc_info=e,\n )\n\n\nclass RunReaperMonitorsScript(RunMultipleMonitorsScript):\n \"\"\"Run all the monitors found in ReaperMonitor.REGISTRY\"\"\"\n\n name = \"Run all reaper monitors\"\n\n def monitors(self, **kwargs):\n return [cls(self._db, **kwargs) for cls in ReaperMonitor.REGISTRY]\n\n\nclass RunCoverageProvidersScript(Script):\n \"\"\"Alternate between multiple coverage providers.\"\"\"\n\n def __init__(self, providers, _db=None):\n super(RunCoverageProvidersScript, self).__init__(_db=_db)\n self.providers = []\n for i in providers:\n if callable(i):\n i = i(self._db)\n self.providers.append(i)\n\n def do_run(self):\n providers = list(self.providers)\n if not providers:\n self.log.info(\"No CoverageProviders to run.\")\n\n progress = []\n while providers:\n random.shuffle(providers)\n for provider in providers:\n self.log.debug(\"Running %s\", provider.service_name)\n\n try:\n provider_progress = provider.run_once_and_update_timestamp()\n progress.append(provider_progress)\n except Exception as e:\n self.log.error(\n \"Error in %r, moving on to next CoverageProvider.\",\n provider,\n exc_info=e,\n )\n\n self.log.debug(\"Completed %s\", provider.service_name)\n providers.remove(provider)\n return progress\n\n\nclass RunCollectionCoverageProviderScript(RunCoverageProvidersScript):\n \"\"\"Run the same CoverageProvider code for all Collections that\n get their licenses from the appropriate place.\n \"\"\"\n\n def __init__(self, provider_class, _db=None, providers=None, **kwargs):\n _db = _db or self._db\n providers = providers or list()\n if provider_class:\n providers += self.get_providers(_db, provider_class, **kwargs)\n super(RunCollectionCoverageProviderScript, self).__init__(providers, _db=_db)\n\n def get_providers(self, _db, provider_class, **kwargs):\n return list(provider_class.all(_db, **kwargs))\n\n\nclass RunThreadedCollectionCoverageProviderScript(Script):\n \"\"\"Run coverage providers in multiple threads.\"\"\"\n\n DEFAULT_WORKER_SIZE = 5\n\n def __init__(self, provider_class, worker_size=None, _db=None, **provider_kwargs):\n super(RunThreadedCollectionCoverageProviderScript, self).__init__(_db)\n\n self.worker_size = worker_size or self.DEFAULT_WORKER_SIZE\n self.session_factory = SessionManager.sessionmaker(session=self._db)\n\n # Use a database from the factory.\n if not _db:\n # Close the new, autogenerated database session.\n self._session.close()\n self._session = self.session_factory()\n\n self.provider_class = provider_class\n self.provider_kwargs = provider_kwargs\n\n def run(self, pool=None):\n \"\"\"Runs a CollectionCoverageProvider with multiple threads and\n updates the timestamp accordingly.\n\n :param pool: A DatabasePool (or other) object for use in testing\n environments.\n \"\"\"\n collections = self.provider_class.collections(self._db)\n if not collections:\n return\n\n for collection in collections:\n provider = self.provider_class(collection, **self.provider_kwargs)\n with (\n pool or DatabasePool(self.worker_size, self.session_factory)\n ) as job_queue:\n query_size, batch_size = self.get_query_and_batch_sizes(provider)\n # Without a commit, the query to count which items need\n # coverage hangs in the database, blocking the threads.\n self._db.commit()\n\n offset = 0\n # TODO: We create a separate 'progress' object\n # for each job, and each will overwrite the timestamp\n # value as its complets. It woudl be better if all the\n # jobs could share a single 'progress' object.\n while offset < query_size:\n progress = CoverageProviderProgress(start=utc_now())\n progress.offset = offset\n job = CollectionCoverageProviderJob(\n collection,\n self.provider_class,\n progress,\n **self.provider_kwargs,\n )\n job_queue.put(job)\n offset += batch_size\n\n def get_query_and_batch_sizes(self, provider):\n qu = provider.items_that_need_coverage(\n count_as_covered=BaseCoverageRecord.DEFAULT_COUNT_AS_COVERED\n )\n return fast_query_count(qu), provider.batch_size\n\n\nclass RunWorkCoverageProviderScript(RunCollectionCoverageProviderScript):\n \"\"\"Run a WorkCoverageProvider on every relevant Work in the system.\"\"\"\n\n # This class overrides RunCollectionCoverageProviderScript just to\n # take advantage of the constructor; it doesn't actually use the\n # concept of 'collections' at all.\n\n def get_providers(self, _db, provider_class, **kwargs):\n return [provider_class(_db, **kwargs)]\n\n\nclass InputScript(Script):\n @classmethod\n def read_stdin_lines(self, stdin):\n \"\"\"Read lines from a (possibly mocked, possibly empty) standard input.\"\"\"\n if stdin is not sys.stdin or not os.isatty(0):\n # A file has been redirected into standard input. Grab its\n # lines.\n lines = [x.strip() for x in stdin.readlines()]\n else:\n lines = []\n return lines\n\n\nclass IdentifierInputScript(InputScript):\n \"\"\"A script that takes identifiers as command line inputs.\"\"\"\n\n DATABASE_ID = \"Database ID\"\n\n @classmethod\n def parse_command_line(\n cls, _db=None, cmd_args=None, stdin=sys.stdin, *args, **kwargs\n ):\n parser = cls.arg_parser()\n parsed = parser.parse_args(cmd_args)\n stdin = cls.read_stdin_lines(stdin)\n return cls.look_up_identifiers(_db, parsed, stdin, *args, **kwargs)\n\n @classmethod\n def look_up_identifiers(\n cls, _db, parsed, stdin_identifier_strings, *args, **kwargs\n ):\n \"\"\"Turn identifiers as specified on the command line into\n real database Identifier objects.\n \"\"\"\n data_source = None\n if parsed.identifier_data_source:\n data_source = DataSource.lookup(_db, parsed.identifier_data_source)\n if _db and parsed.identifier_type:\n # We can also call parse_identifier_list.\n identifier_strings = parsed.identifier_strings\n if stdin_identifier_strings:\n identifier_strings = identifier_strings + stdin_identifier_strings\n parsed.identifiers = cls.parse_identifier_list(\n _db,\n parsed.identifier_type,\n data_source,\n identifier_strings,\n *args,\n **kwargs,\n )\n else:\n # The script can call parse_identifier_list later if it\n # wants to.\n parsed.identifiers = None\n return parsed\n\n @classmethod\n def arg_parser(cls):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--identifier-type\",\n help='Process identifiers of this type. If IDENTIFIER is not specified, all identifiers of this type will be processed. To name identifiers by their database ID, use --identifier-type=\"Database ID\"',\n )\n parser.add_argument(\n \"--identifier-data-source\",\n help=\"Process only identifiers which have a LicensePool associated with this DataSource\",\n )\n parser.add_argument(\n \"identifier_strings\",\n help=\"A specific identifier to process.\",\n metavar=\"IDENTIFIER\",\n nargs=\"*\",\n )\n return parser\n\n @classmethod\n def parse_identifier_list(\n cls, _db, identifier_type, data_source, arguments, autocreate=False\n ):\n \"\"\"Turn a list of identifiers into a list of Identifier objects.\n\n The list of arguments is probably derived from a command-line\n parser such as the one defined in\n IdentifierInputScript.arg_parser().\n\n This makes it easy to identify specific identifiers on the\n command line. Examples:\n\n 1 2\n\n a b c\n \"\"\"\n identifiers = []\n\n if not identifier_type:\n raise ValueError(\n \"No identifier type specified! Use '--identifier-type=\\\"Database ID\\\"' to name identifiers by database ID.\"\n )\n\n if len(arguments) == 0:\n if data_source:\n identifiers = (\n _db.query(Identifier)\n .join(Identifier.licensed_through)\n .filter(\n Identifier.type == identifier_type,\n LicensePool.data_source == data_source,\n )\n .all()\n )\n return identifiers\n\n for arg in arguments:\n if identifier_type == cls.DATABASE_ID:\n try:\n arg = int(arg)\n except ValueError as e:\n # We'll print out a warning later.\n arg = None\n if arg:\n identifier = get_one(_db, Identifier, id=arg)\n else:\n identifier, ignore = Identifier.for_foreign_id(\n _db, identifier_type, arg, autocreate=autocreate\n )\n if not identifier:\n logging.warning(\"Could not load identifier %s/%s\", identifier_type, arg)\n if identifier:\n identifiers.append(identifier)\n return identifiers\n\n\nclass LibraryInputScript(InputScript):\n \"\"\"A script that operates on one or more Libraries.\"\"\"\n\n @classmethod\n def parse_command_line(cls, _db=None, cmd_args=None, *args, **kwargs):\n parser = cls.arg_parser(_db)\n parsed = parser.parse_args(cmd_args)\n return cls.look_up_libraries(_db, parsed, *args, **kwargs)\n\n @classmethod\n def arg_parser(cls, _db, multiple_libraries=True):\n parser = argparse.ArgumentParser()\n library_names = sorted(l.short_name for l in _db.query(Library))\n library_names = '\"' + '\", \"'.join(library_names) + '\"'\n parser.add_argument(\n \"libraries\",\n help=\"Name of a specific library to process. Libraries on this system: %s\"\n % library_names,\n metavar=\"SHORT_NAME\",\n nargs=\"*\" if multiple_libraries else 1,\n )\n return parser\n\n @classmethod\n def look_up_libraries(cls, _db, parsed, *args, **kwargs):\n \"\"\"Turn library names as specified on the command line into real\n Library objects.\n \"\"\"\n if _db:\n library_strings = parsed.libraries\n if library_strings:\n parsed.libraries = cls.parse_library_list(\n _db, library_strings, *args, **kwargs\n )\n else:\n # No libraries are specified. We will be processing\n # every library.\n parsed.libraries = _db.query(Library).all()\n else:\n # Database is not active yet. The script can call\n # parse_library_list later if it wants to.\n parsed.libraries = None\n return parsed\n\n @classmethod\n def parse_library_list(cls, _db, arguments):\n \"\"\"Turn a list of library short names into a list of Library objects.\n\n The list of arguments is probably derived from a command-line\n parser such as the one defined in\n LibraryInputScript.arg_parser().\n \"\"\"\n if len(arguments) == 0:\n return []\n libraries = []\n for arg in arguments:\n if not arg:\n continue\n for field in (Library.short_name, Library.name):\n try:\n library = _db.query(Library).filter(field == arg).one()\n except NoResultFound:\n continue\n except MultipleResultsFound:\n continue\n if library:\n libraries.append(library)\n break\n else:\n logging.warning(\"Could not find library %s\", arg)\n return libraries\n\n def do_run(self, *args, **kwargs):\n parsed = self.parse_command_line(self._db, *args, **kwargs)\n self.process_libraries(parsed.libraries)\n\n def process_libraries(self, libraries):\n for library in libraries:\n self.process_library(library)\n\n def process_library(self, library):\n raise NotImplementedError()\n\n\nclass PatronInputScript(LibraryInputScript):\n \"\"\"A script that operates on one or more Patrons.\"\"\"\n\n @classmethod\n def parse_command_line(\n cls, _db=None, cmd_args=None, stdin=sys.stdin, *args, **kwargs\n ):\n parser = cls.arg_parser(_db)\n parsed = parser.parse_args(cmd_args)\n if stdin:\n stdin = cls.read_stdin_lines(stdin)\n parsed = super(PatronInputScript, cls).look_up_libraries(\n _db, parsed, *args, **kwargs\n )\n return cls.look_up_patrons(_db, parsed, stdin, *args, **kwargs)\n\n @classmethod\n def arg_parser(cls, _db):\n parser = super(PatronInputScript, cls).arg_parser(_db, multiple_libraries=False)\n parser.add_argument(\n \"identifiers\",\n help=\"A specific patron identifier to process.\",\n metavar=\"IDENTIFIER\",\n nargs=\"+\",\n )\n return parser\n\n @classmethod\n def look_up_patrons(cls, _db, parsed, stdin_patron_strings, *args, **kwargs):\n \"\"\"Turn patron identifiers as specified on the command line into real\n Patron objects.\n \"\"\"\n if _db:\n patron_strings = parsed.identifiers\n library = parsed.libraries[0]\n if stdin_patron_strings:\n patron_strings = patron_strings + stdin_patron_strings\n parsed.patrons = cls.parse_patron_list(\n _db, library, patron_strings, *args, **kwargs\n )\n else:\n # Database is not active yet. The script can call\n # parse_patron_list later if it wants to.\n parsed.patrons = None\n return parsed\n\n @classmethod\n def parse_patron_list(cls, _db, library, arguments):\n \"\"\"Turn a list of patron identifiers into a list of Patron objects.\n\n The list of arguments is probably derived from a command-line\n parser such as the one defined in\n PatronInputScript.arg_parser().\n \"\"\"\n if len(arguments) == 0:\n return []\n patrons = []\n for arg in arguments:\n if not arg:\n continue\n for field in (\n Patron.authorization_identifier,\n Patron.username,\n Patron.external_identifier,\n ):\n try:\n patron = (\n _db.query(Patron)\n .filter(field == arg)\n .filter(Patron.library_id == library.id)\n .one()\n )\n except NoResultFound:\n continue\n except MultipleResultsFound:\n continue\n if patron:\n patrons.append(patron)\n break\n else:\n logging.warning(\"Could not find patron %s\", arg)\n return patrons\n\n def do_run(self, *args, **kwargs):\n parsed = self.parse_command_line(self._db, *args, **kwargs)\n self.process_patrons(parsed.patrons)\n\n def process_patrons(self, patrons):\n for patron in patrons:\n self.process_patron(patron)\n\n def process_patron(self, patron):\n raise NotImplementedError()\n\n\nclass LaneSweeperScript(LibraryInputScript):\n \"\"\"Do something to each lane in a library.\"\"\"\n\n def process_library(self, library):\n from .lane import WorkList\n\n top_level = WorkList.top_level_for_library(self._db, library)\n queue = [top_level]\n while queue:\n new_queue = []\n for l in queue:\n if isinstance(l, Lane):\n l = self._db.merge(l)\n if self.should_process_lane(l):\n self.process_lane(l)\n self._db.commit()\n for sublane in l.children:\n new_queue.append(sublane)\n queue = new_queue\n\n def should_process_lane(self, lane):\n return True\n\n def process_lane(self, lane):\n pass\n\n\nclass CustomListSweeperScript(LibraryInputScript):\n \"\"\"Do something to each custom list in a library.\"\"\"\n\n def process_library(self, library):\n lists = self._db.query(CustomList).filter(CustomList.library_id == library.id)\n for l in lists:\n self.process_custom_list(l)\n self._db.commit()\n\n def process_custom_list(self, custom_list):\n pass\n\n\nclass SubjectInputScript(Script):\n \"\"\"A script whose command line filters the set of Subjects.\n\n :return: a 2-tuple (subject type, subject filter) that can be\n passed into the SubjectSweepMonitor constructor.\n\n \"\"\"\n\n @classmethod\n def arg_parser(cls):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--subject-type\", help=\"Process subjects of this type\")\n parser.add_argument(\n \"--subject-filter\",\n help=\"Process subjects whose names or identifiers match this substring\",\n )\n return parser\n\n\nclass RunCoverageProviderScript(IdentifierInputScript):\n \"\"\"Run a single coverage provider.\"\"\"\n\n @classmethod\n def arg_parser(cls):\n parser = IdentifierInputScript.arg_parser()\n parser.add_argument(\n \"--cutoff-time\",\n help=\"Update existing coverage records if they were originally created after this time.\",\n )\n return parser\n\n @classmethod\n def parse_command_line(cls, _db, cmd_args=None, stdin=sys.stdin, *args, **kwargs):\n parser = cls.arg_parser()\n parsed = parser.parse_args(cmd_args)\n stdin = cls.read_stdin_lines(stdin)\n parsed = cls.look_up_identifiers(_db, parsed, stdin, *args, **kwargs)\n if parsed.cutoff_time:\n parsed.cutoff_time = cls.parse_time(parsed.cutoff_time)\n return parsed\n\n def __init__(\n self, provider, _db=None, cmd_args=None, *provider_args, **provider_kwargs\n ):\n\n super(RunCoverageProviderScript, self).__init__(_db)\n parsed_args = self.parse_command_line(self._db, cmd_args)\n if parsed_args.identifier_type:\n self.identifier_type = parsed_args.identifier_type\n self.identifier_types = [self.identifier_type]\n else:\n self.identifier_type = None\n self.identifier_types = []\n\n if parsed_args.identifiers:\n self.identifiers = parsed_args.identifiers\n else:\n self.identifiers = []\n\n if callable(provider):\n kwargs = self.extract_additional_command_line_arguments()\n kwargs.update(provider_kwargs)\n\n provider = provider(\n self._db, *provider_args, cutoff_time=parsed_args.cutoff_time, **kwargs\n )\n self.provider = provider\n self.name = self.provider.service_name\n\n def extract_additional_command_line_arguments(self):\n \"\"\"A hook method for subclasses.\n\n Turns command-line arguments into additional keyword arguments\n to the CoverageProvider constructor.\n\n By default, pass in a value used only by CoverageProvider\n (as opposed to WorkCoverageProvider).\n \"\"\"\n return {\n \"input_identifiers\": self.identifiers,\n }\n\n def do_run(self):\n if self.identifiers:\n self.provider.run_on_specific_identifiers(self.identifiers)\n else:\n self.provider.run()\n\n\nclass ShowLibrariesScript(Script):\n \"\"\"Show information about the libraries on a server.\"\"\"\n\n name = \"List the libraries on this server.\"\n\n @classmethod\n def arg_parser(cls):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--short-name\",\n help=\"Only display information for the library with the given short name\",\n )\n parser.add_argument(\n \"--show-secrets\",\n help=\"Print out secrets associated with the library.\",\n action=\"store_true\",\n )\n return parser\n\n def do_run(self, _db=None, cmd_args=None, output=sys.stdout):\n _db = _db or self._db\n args = self.parse_command_line(_db, cmd_args=cmd_args)\n if args.short_name:\n library = get_one(_db, Library, short_name=args.short_name)\n libraries = [library]\n else:\n libraries = _db.query(Library).order_by(Library.name).all()\n if not libraries:\n output.write(\"No libraries found.\\n\")\n for library in libraries:\n output.write(\"\\n\".join(library.explain(include_secrets=args.show_secrets)))\n output.write(\"\\n\")\n\n\nclass ConfigurationSettingScript(Script):\n @classmethod\n def _parse_setting(self, setting):\n \"\"\"Parse a command-line setting option into a key-value pair.\"\"\"\n if not \"=\" in setting:\n raise ValueError(\n 'Incorrect format for setting: \"%s\". Should be \"key=value\"' % setting\n )\n return setting.split(\"=\", 1)\n\n @classmethod\n def add_setting_argument(self, parser, help):\n \"\"\"Modify an ArgumentParser to indicate that the script takes\n command-line settings.\n \"\"\"\n parser.add_argument(\"--setting\", help=help, action=\"append\")\n\n def apply_settings(self, settings, obj):\n \"\"\"Treat `settings` as a list of command-line argument settings,\n and apply each one to `obj`.\n \"\"\"\n if not settings:\n return None\n for setting in settings:\n key, value = self._parse_setting(setting)\n obj.setting(key).value = value\n\n\nclass ConfigureSiteScript(ConfigurationSettingScript):\n \"\"\"View or update site-wide configuration.\"\"\"\n\n def __init__(self, _db=None, config=Configuration):\n self.config = config\n super(ConfigureSiteScript, self).__init__(_db=_db)\n\n @classmethod\n def arg_parser(cls):\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--show-secrets\",\n help=\"Include secrets when displaying site settings.\",\n action=\"store_true\",\n default=False,\n )\n\n cls.add_setting_argument(\n parser,\n 'Set a site-wide setting, such as default_nongrouped_feed_max_age. Format: --setting=\"default_nongrouped_feed_max_age=1200\"',\n )\n\n parser.add_argument(\n \"--force\",\n help=\"Set a site-wide setting even if the key isn't a known setting.\",\n dest=\"force\",\n action=\"store_true\",\n )\n\n return parser\n\n def do_run(self, _db=None, cmd_args=None, output=sys.stdout):\n _db = _db or self._db\n args = self.parse_command_line(_db, cmd_args=cmd_args)\n if args.setting:\n for setting in args.setting:\n key, value = self._parse_setting(setting)\n if not args.force and not key in [\n s.get(\"key\") for s in self.config.SITEWIDE_SETTINGS\n ]:\n raise ValueError(\n \"'%s' is not a known site-wide setting. Use --force to set it anyway.\"\n % key\n )\n else:\n ConfigurationSetting.sitewide(_db, key).value = value\n output.write(\n \"\\n\".join(\n ConfigurationSetting.explain(_db, include_secrets=args.show_secrets)\n )\n )\n site_configuration_has_changed(_db)\n _db.commit()\n\n\nclass ConfigureLibraryScript(ConfigurationSettingScript):\n \"\"\"Create a library or change its settings.\"\"\"\n\n name = \"Change a library's settings\"\n\n @classmethod\n def arg_parser(cls):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--name\",\n help=\"Official name of the library\",\n )\n parser.add_argument(\n \"--short-name\",\n help=\"Short name of the library\",\n )\n cls.add_setting_argument(\n parser,\n 'Set a per-library setting, such as terms-of-service. Format: --setting=\"terms-of-service=https://example.library/tos\"',\n )\n return parser\n\n def do_run(self, _db=None, cmd_args=None, output=sys.stdout):\n _db = _db or self._db\n args = self.parse_command_line(_db, cmd_args=cmd_args)\n if not args.short_name:\n raise ValueError(\"You must identify the library by its short name.\")\n\n # Are we talking about an existing library?\n libraries = _db.query(Library).all()\n\n if libraries:\n # Currently there can only be one library, and one already exists.\n [library] = libraries\n if args.short_name and library.short_name != args.short_name:\n raise ValueError(\"Could not locate library '%s'\" % args.short_name)\n else:\n # No existing library. Make one.\n library, ignore = get_one_or_create(\n _db,\n Library,\n create_method_kwargs=dict(\n uuid=str(uuid.uuid4()),\n short_name=args.short_name,\n ),\n )\n\n if args.name:\n library.name = args.name\n if args.short_name:\n library.short_name = args.short_name\n self.apply_settings(args.setting, library)\n site_configuration_has_changed(_db)\n _db.commit()\n output.write(\"Configuration settings stored.\\n\")\n output.write(\"\\n\".join(library.explain()))\n output.write(\"\\n\")\n\n\nclass ShowCollectionsScript(Script):\n \"\"\"Show information about the collections on a server.\"\"\"\n\n name = \"List the collections on this server.\"\n\n @classmethod\n def arg_parser(cls):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--name\",\n help=\"Only display information for the collection with the given name\",\n )\n parser.add_argument(\n \"--show-secrets\",\n help=\"Display secret values such as passwords.\",\n action=\"store_true\",\n )\n return parser\n\n def do_run(self, _db=None, cmd_args=None, output=sys.stdout):\n _db = _db or self._db\n args = self.parse_command_line(_db, cmd_args=cmd_args)\n if args.name:\n name = args.name\n collection = get_one(_db, Collection, name=name)\n if collection:\n collections = [collection]\n else:\n output.write(\"Could not locate collection by name: %s\" % name)\n collections = []\n else:\n collections = _db.query(Collection).order_by(Collection.name).all()\n if not collections:\n output.write(\"No collections found.\\n\")\n for collection in collections:\n output.write(\n \"\\n\".join(collection.explain(include_secrets=args.show_secrets))\n )\n output.write(\"\\n\")\n\n\nclass ShowIntegrationsScript(Script):\n \"\"\"Show information about the external integrations on a server.\"\"\"\n\n name = \"List the external integrations on this server.\"\n\n @classmethod\n def arg_parser(cls):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--name\",\n help=\"Only display information for the integration with the given name or ID\",\n )\n parser.add_argument(\n \"--show-secrets\",\n help=\"Display secret values such as passwords.\",\n action=\"store_true\",\n )\n return parser\n\n def do_run(self, _db=None, cmd_args=None, output=sys.stdout):\n _db = _db or self._db\n args = self.parse_command_line(_db, cmd_args=cmd_args)\n if args.name:\n name = args.name\n integration = get_one(_db, ExternalIntegration, name=name)\n if not integration:\n integration = get_one(_db, ExternalIntegration, id=name)\n if integration:\n integrations = [integration]\n else:\n output.write(\"Could not locate integration by name or ID: %s\\n\" % args)\n integrations = []\n else:\n integrations = (\n _db.query(ExternalIntegration)\n .order_by(ExternalIntegration.name, ExternalIntegration.id)\n .all()\n )\n if not integrations:\n output.write(\"No integrations found.\\n\")\n for integration in integrations:\n output.write(\n \"\\n\".join(integration.explain(include_secrets=args.show_secrets))\n )\n output.write(\"\\n\")\n\n\nclass ConfigureCollectionScript(ConfigurationSettingScript):\n \"\"\"Create a collection or change its settings.\"\"\"\n\n name = \"Change a collection's settings\"\n\n @classmethod\n def parse_command_line(cls, _db=None, cmd_args=None):\n parser = cls.arg_parser(_db)\n return parser.parse_known_args(cmd_args)[0]\n\n @classmethod\n def arg_parser(cls, _db):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--name\", help=\"Name of the collection\", required=True)\n parser.add_argument(\n \"--protocol\",\n help='Protocol to use to get the licenses. Possible values: \"%s\"'\n % ('\", \"'.join(ExternalIntegration.LICENSE_PROTOCOLS)),\n )\n parser.add_argument(\n \"--external-account-id\",\n help='The ID of this collection according to the license source. Sometimes called a \"library ID\".',\n )\n parser.add_argument(\n \"--url\",\n help=\"Run the acquisition protocol against this URL.\",\n )\n parser.add_argument(\n \"--username\",\n help='Use this username to authenticate with the license protocol. Sometimes called a \"key\".',\n )\n parser.add_argument(\n \"--password\",\n help='Use this password to authenticate with the license protocol. Sometimes called a \"secret\".',\n )\n cls.add_setting_argument(\n parser,\n 'Set a protocol-specific setting on the collection, such as Overdrive\\'s \"website_id\". Format: --setting=\"website_id=89\"',\n )\n library_names = cls._library_names(_db)\n if library_names:\n parser.add_argument(\n \"--library\",\n help=\"Associate this collection with the given library. Possible libraries: %s\"\n % library_names,\n action=\"append\",\n )\n\n return parser\n\n @classmethod\n def _library_names(self, _db):\n \"\"\"Return a string that lists known library names.\"\"\"\n library_names = [\n x.short_name for x in _db.query(Library).order_by(Library.short_name)\n ]\n if library_names:\n return '\"' + '\", \"'.join(library_names) + '\"'\n return \"\"\n\n def do_run(self, _db=None, cmd_args=None, output=sys.stdout):\n _db = _db or self._db\n args = self.parse_command_line(_db, cmd_args=cmd_args)\n\n # Find or create the collection\n protocol = None\n name = args.name\n protocol = args.protocol\n collection = get_one(_db, Collection, Collection.name == name)\n if not collection:\n if protocol:\n collection, is_new = Collection.by_name_and_protocol(\n _db, name, protocol\n )\n else:\n # We didn't find a Collection, and we don't have a protocol,\n # so we can't create a new Collection.\n raise ValueError(\n 'No collection called \"%s\". You can create it, but you must specify a protocol.'\n % name\n )\n integration = collection.external_integration\n if protocol:\n integration.protocol = protocol\n if args.external_account_id:\n collection.external_account_id = args.external_account_id\n\n if args.url:\n integration.url = args.url\n if args.username:\n integration.username = args.username\n if args.password:\n integration.password = args.password\n self.apply_settings(args.setting, integration)\n\n if hasattr(args, \"library\"):\n for name in args.library:\n library = get_one(_db, Library, short_name=name)\n if not library:\n library_names = self._library_names(_db)\n message = 'No such library: \"%s\".' % name\n if library_names:\n message += \" I only know about: %s\" % library_names\n raise ValueError(message)\n if collection not in library.collections:\n library.collections.append(collection)\n site_configuration_has_changed(_db)\n _db.commit()\n output.write(\"Configuration settings stored.\\n\")\n output.write(\"\\n\".join(collection.explain()))\n output.write(\"\\n\")\n\n\nclass ConfigureIntegrationScript(ConfigurationSettingScript):\n \"\"\"Create a integration or change its settings.\"\"\"\n\n name = \"Create a site-wide integration or change an integration's settings\"\n\n @classmethod\n def parse_command_line(cls, _db=None, cmd_args=None):\n parser = cls.arg_parser(_db)\n return parser.parse_known_args(cmd_args)[0]\n\n @classmethod\n def arg_parser(cls, _db):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--name\",\n help=\"Name of the integration\",\n )\n parser.add_argument(\n \"--id\",\n help=\"ID of the integration, if it has no name\",\n )\n parser.add_argument(\n \"--protocol\",\n help=\"Protocol used by the integration.\",\n )\n parser.add_argument(\n \"--goal\",\n help=\"Goal of the integration\",\n )\n cls.add_setting_argument(\n parser,\n 'Set a configuration value on the integration. Format: --setting=\"key=value\"',\n )\n return parser\n\n @classmethod\n def _integration(self, _db, id, name, protocol, goal):\n \"\"\"Find or create the ExternalIntegration referred to.\"\"\"\n if not id and not name and not (protocol and goal):\n raise ValueError(\n \"An integration must by identified by either ID, name, or the combination of protocol and goal.\"\n )\n integration = None\n if id:\n integration = get_one(\n _db, ExternalIntegration, ExternalIntegration.id == id\n )\n if not integration:\n raise ValueError(\"No integration with ID %s.\" % id)\n if name:\n integration = get_one(_db, ExternalIntegration, name=name)\n if not integration and not (protocol and goal):\n raise ValueError(\n 'No integration with name \"%s\". To create it, you must also provide protocol and goal.'\n % name\n )\n if not integration and (protocol and goal):\n integration, is_new = get_one_or_create(\n _db, ExternalIntegration, protocol=protocol, goal=goal\n )\n if name:\n integration.name = name\n return integration\n\n def do_run(self, _db=None, cmd_args=None, output=sys.stdout):\n _db = _db or self._db\n args = self.parse_command_line(_db, cmd_args=cmd_args)\n\n # Find or create the integration\n protocol = None\n id = args.id\n name = args.name\n protocol = args.protocol\n goal = args.goal\n integration = self._integration(_db, id, name, protocol, goal)\n self.apply_settings(args.setting, integration)\n site_configuration_has_changed(_db)\n _db.commit()\n output.write(\"Configuration settings stored.\\n\")\n output.write(\"\\n\".join(integration.explain()))\n output.write(\"\\n\")\n\n\nclass ShowLanesScript(Script):\n \"\"\"Show information about the lanes on a server.\"\"\"\n\n name = \"List the lanes on this server.\"\n\n @classmethod\n def arg_parser(cls):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--id\",\n help=\"Only display information for the lane with the given ID\",\n )\n return parser\n\n def do_run(self, _db=None, cmd_args=None, output=sys.stdout):\n _db = _db or self._db\n args = self.parse_command_line(_db, cmd_args=cmd_args)\n if args.id:\n id = args.id\n lane = get_one(_db, Lane, id=id)\n if lane:\n lanes = [lane]\n else:\n output.write(\"Could not locate lane with id: %s\" % id)\n lanes = []\n else:\n lanes = _db.query(Lane).order_by(Lane.id).all()\n if not lanes:\n output.write(\"No lanes found.\\n\")\n for lane in lanes:\n output.write(\"\\n\".join(lane.explain()))\n output.write(\"\\n\\n\")\n\n\nclass ConfigureLaneScript(ConfigurationSettingScript):\n \"\"\"Create a lane or change its settings.\"\"\"\n\n name = \"Change a lane's settings\"\n\n @classmethod\n def parse_command_line(cls, _db=None, cmd_args=None):\n parser = cls.arg_parser(_db)\n return parser.parse_known_args(cmd_args)[0]\n\n @classmethod\n def arg_parser(cls, _db):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--id\",\n help=\"ID of the lane, if editing an existing lane.\",\n )\n parser.add_argument(\n \"--library-short-name\",\n help=\"Short name of the library for this lane. Possible values: %s\"\n % cls._library_names(_db),\n )\n parser.add_argument(\n \"--parent-id\",\n help=\"The ID of this lane's parent lane\",\n )\n parser.add_argument(\n \"--priority\",\n help=\"The lane's priority\",\n )\n parser.add_argument(\n \"--display-name\",\n help=\"The lane name that will be displayed to patrons.\",\n )\n return parser\n\n @classmethod\n def _library_names(self, _db):\n \"\"\"Return a string that lists known library names.\"\"\"\n library_names = [\n x.short_name for x in _db.query(Library).order_by(Library.short_name)\n ]\n if library_names:\n return '\"' + '\", \"'.join(library_names) + '\"'\n return \"\"\n\n def do_run(self, _db=None, cmd_args=None, output=sys.stdout):\n _db = _db or self._db\n args = self.parse_command_line(_db, cmd_args=cmd_args)\n\n # Find or create the lane\n id = args.id\n lane = get_one(_db, Lane, id=id)\n if not lane:\n if args.library_short_name:\n library = get_one(_db, Library, short_name=args.library_short_name)\n if not library:\n raise ValueError('No such library: \"%s\".' % args.library_short_name)\n lane, is_new = create(_db, Lane, library=library)\n else:\n raise ValueError(\"Library short name is required to create a new lane.\")\n\n if args.parent_id:\n lane.parent_id = args.parent_id\n if args.priority:\n lane.priority = args.priority\n if args.display_name:\n lane.display_name = args.display_name\n site_configuration_has_changed(_db)\n _db.commit()\n output.write(\"Lane settings stored.\\n\")\n output.write(\"\\n\".join(lane.explain()))\n output.write(\"\\n\")\n\n\nclass AddClassificationScript(IdentifierInputScript):\n name = \"Add a classification to an identifier\"\n\n @classmethod\n def arg_parser(cls):\n parser = IdentifierInputScript.arg_parser()\n parser.add_argument(\n \"--subject-type\",\n help=\"The type of the subject to add to each identifier.\",\n required=True,\n )\n parser.add_argument(\n \"--subject-identifier\",\n help=\"The identifier of the subject to add to each identifier.\",\n )\n parser.add_argument(\n \"--subject-name\", help=\"The name of the subject to add to each identifier.\"\n )\n parser.add_argument(\n \"--data-source\",\n help=\"The data source to use when classifying.\",\n default=DataSource.MANUAL,\n )\n parser.add_argument(\n \"--weight\",\n help=\"The weight to use when classifying.\",\n type=int,\n default=1000,\n )\n parser.add_argument(\n \"--create-subject\",\n help=\"Add the subject to the database if it doesn't already exist\",\n action=\"store_const\",\n const=True,\n )\n return parser\n\n def __init__(self, _db=None, cmd_args=None, stdin=sys.stdin):\n super(AddClassificationScript, self).__init__(_db=_db)\n args = self.parse_command_line(self._db, cmd_args=cmd_args, stdin=stdin)\n self.identifier_type = args.identifier_type\n self.identifiers = args.identifiers\n subject_type = args.subject_type\n subject_identifier = args.subject_identifier\n subject_name = args.subject_name\n if not subject_name and not subject_identifier:\n raise ValueError(\n \"Either subject-name or subject-identifier must be provided.\"\n )\n self.data_source = DataSource.lookup(self._db, args.data_source)\n self.weight = args.weight\n self.subject, ignore = Subject.lookup(\n self._db,\n subject_type,\n subject_identifier,\n subject_name,\n autocreate=args.create_subject,\n )\n\n def do_run(self):\n policy = PresentationCalculationPolicy(\n choose_edition=False,\n set_edition_metadata=False,\n classify=True,\n choose_summary=False,\n calculate_quality=False,\n choose_cover=False,\n regenerate_opds_entries=True,\n regenerate_marc_record=True,\n update_search_index=True,\n verbose=True,\n )\n if self.subject:\n for identifier in self.identifiers:\n identifier.classify(\n self.data_source,\n self.subject.type,\n self.subject.identifier,\n self.subject.name,\n self.weight,\n )\n work = identifier.work\n if work:\n work.calculate_presentation(policy=policy)\n else:\n self.log.warning(\"Could not locate subject, doing nothing.\")\n\n\nclass WorkProcessingScript(IdentifierInputScript):\n\n name = \"Work processing script\"\n\n def __init__(\n self, force=False, batch_size=10, _db=None, cmd_args=None, stdin=sys.stdin\n ):\n super(WorkProcessingScript, self).__init__(_db=_db)\n\n args = self.parse_command_line(self._db, cmd_args=cmd_args, stdin=stdin)\n self.identifier_type = args.identifier_type\n self.data_source = args.identifier_data_source\n\n self.identifiers = self.parse_identifier_list(\n self._db, self.identifier_type, self.data_source, args.identifier_strings\n )\n\n self.batch_size = batch_size\n self.query = self.make_query(\n self._db,\n self.identifier_type,\n self.identifiers,\n self.data_source,\n log=self.log,\n )\n self.force = force\n\n @classmethod\n def make_query(cls, _db, identifier_type, identifiers, data_source, log=None):\n query = _db.query(Work)\n if identifiers or identifier_type:\n query = query.join(Work.license_pools).join(LicensePool.identifier)\n\n if identifiers:\n if log:\n log.info(\"Restricted to %d specific identifiers.\" % len(identifiers))\n query = query.filter(\n LicensePool.identifier_id.in_([x.id for x in identifiers])\n )\n elif data_source:\n if log:\n log.info('Restricted to identifiers from DataSource \"%s\".', data_source)\n source = DataSource.lookup(_db, data_source)\n query = query.filter(LicensePool.data_source == source)\n\n if identifier_type:\n if log:\n log.info('Restricted to identifier type \"%s\".' % identifier_type)\n query = query.filter(Identifier.type == identifier_type)\n\n if log:\n log.info(\"Processing %d works.\", query.count())\n return query.order_by(Work.id)\n\n def do_run(self):\n works = True\n offset = 0\n while works:\n works = self.query.offset(offset).limit(self.batch_size).all()\n for work in works:\n self.process_work(work)\n offset += self.batch_size\n self._db.commit()\n self._db.commit()\n\n def process_work(self, work):\n raise NotImplementedError()\n\n\nclass WorkConsolidationScript(WorkProcessingScript):\n \"\"\"Given an Identifier, make sure all the LicensePools for that\n Identifier are in Works that follow these rules:\n\n a) For a given permanent work ID, there may be at most one Work\n containing open-access LicensePools.\n\n b) Each non-open-access LicensePool has its own individual Work.\n \"\"\"\n\n name = \"Work consolidation script\"\n\n def make_query(self, _db, identifier_type, identifiers, data_source, log=None):\n # We actually process LicensePools, not Works.\n qu = _db.query(LicensePool).join(LicensePool.identifier)\n if identifier_type:\n qu = qu.filter(Identifier.type == identifier_type)\n if identifiers:\n qu = qu.filter(\n Identifier.identifier.in_([x.identifier for x in identifiers])\n )\n return qu\n\n def process_work(self, work):\n # We call it 'work' for signature compatibility with the superclass,\n # but it's actually a LicensePool.\n licensepool = work\n licensepool.calculate_work()\n\n def do_run(self):\n super(WorkConsolidationScript, self).do_run()\n qu = (\n self._db.query(Work)\n .outerjoin(Work.license_pools)\n .filter(LicensePool.id == None)\n )\n self.log.info(\"Deleting %d Works that have no LicensePools.\" % qu.count())\n for i in qu:\n self._db.delete(i)\n self._db.commit()\n\n\nclass WorkPresentationScript(TimestampScript, WorkProcessingScript):\n \"\"\"Calculate the presentation for Work objects.\"\"\"\n\n name = \"Recalculate the presentation for works that need it.\"\n\n # Do a complete recalculation of the presentation.\n policy = PresentationCalculationPolicy()\n\n def process_work(self, work):\n work.calculate_presentation(policy=self.policy)\n\n\nclass WorkClassificationScript(WorkPresentationScript):\n \"\"\"Recalculate the classification--and nothing else--for Work objects.\"\"\"\n\n name = \"Recalculate the classification for works that need it.\" \"\"\n\n policy = PresentationCalculationPolicy(\n choose_edition=False,\n set_edition_metadata=False,\n classify=True,\n choose_summary=False,\n calculate_quality=False,\n choose_cover=False,\n regenerate_opds_entries=False,\n regenerate_marc_record=False,\n update_search_index=False,\n )\n\n\nclass ReclassifyWorksForUncheckedSubjectsScript(WorkClassificationScript):\n \"\"\"Reclassify all Works whose current classifications appear to\n depend on Subjects in the 'unchecked' state.\n\n This generally means that some migration script reset those\n Subjects because the rules for processing them changed.\n \"\"\"\n\n name = \"Reclassify works that use unchecked subjects.\" \"\"\n\n policy = WorkClassificationScript.policy\n\n batch_size = 100\n\n def __init__(self, _db=None):\n if _db:\n self._session = _db\n self.query = Work.for_unchecked_subjects(self._db)\n\n\nclass WorkOPDSScript(WorkPresentationScript):\n \"\"\"Recalculate the OPDS entries, MARC record, and search index entries\n for Work objects.\n\n This is intended to verify that a problem has already been resolved and just\n needs to be propagated to these three 'caches'.\n \"\"\"\n\n name = \"Recalculate OPDS entries, MARC record, and search index entries for works that need it.\"\n\n policy = PresentationCalculationPolicy(\n choose_edition=False,\n set_edition_metadata=False,\n classify=True,\n choose_summary=False,\n calculate_quality=False,\n choose_cover=False,\n regenerate_opds_entries=True,\n regenerate_marc_record=True,\n update_search_index=True,\n )\n\n\nclass CustomListManagementScript(Script):\n \"\"\"Maintain a CustomList whose membership is determined by a\n MembershipManager.\n \"\"\"\n\n def __init__(\n self,\n manager_class,\n data_source_name,\n list_identifier,\n list_name,\n primary_language,\n description,\n **manager_kwargs,\n ):\n data_source = DataSource.lookup(self._db, data_source_name)\n self.custom_list, is_new = get_one_or_create(\n self._db,\n CustomList,\n data_source_id=data_source.id,\n foreign_identifier=list_identifier,\n )\n self.custom_list.primary_language = primary_language\n self.custom_list.description = description\n self.membership_manager = manager_class(self.custom_list, **manager_kwargs)\n\n def run(self):\n self.membership_manager.update()\n self._db.commit()\n\n\nclass CollectionType(Enum):\n OPEN_ACCESS = \"OPEN_ACCESS\"\n PROTECTED_ACCESS = \"PROTECTED_ACCESS\"\n LCP = \"LCP\"\n\n def __str__(self):\n return self.name\n\n\nclass CollectionInputScript(Script):\n \"\"\"A script that takes collection names as command line inputs.\"\"\"\n\n @classmethod\n def parse_command_line(cls, _db=None, cmd_args=None, *args, **kwargs):\n parser = cls.arg_parser()\n parsed = parser.parse_args(cmd_args)\n return cls.look_up_collections(_db, parsed, *args, **kwargs)\n\n @classmethod\n def look_up_collections(cls, _db, parsed, *args, **kwargs):\n \"\"\"Turn collection names as specified on the command line into\n real database Collection objects.\n \"\"\"\n parsed.collections = []\n for name in parsed.collection_names:\n collection = get_one(_db, Collection, name=name)\n if not collection:\n raise ValueError(\"Unknown collection: %s\" % name)\n parsed.collections.append(collection)\n return parsed\n\n @classmethod\n def arg_parser(cls):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--collection\",\n help=\"Collection to use\",\n dest=\"collection_names\",\n metavar=\"NAME\",\n action=\"append\",\n default=[],\n )\n return parser\n\n\nclass CollectionArgumentsScript(CollectionInputScript):\n @classmethod\n def arg_parser(cls):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"collection_names\",\n help=\"One or more collection names.\",\n metavar=\"COLLECTION\",\n nargs=\"*\",\n )\n return parser\n\n\nclass RunCollectionMonitorScript(RunMultipleMonitorsScript, CollectionArgumentsScript):\n \"\"\"Run a CollectionMonitor on every Collection that comes through a\n certain protocol.\n \"\"\"\n\n def __init__(self, monitor_class, _db=None, cmd_args=None, **kwargs):\n \"\"\"Constructor.\n\n :param monitor_class: A class object that derives from\n CollectionMonitor.\n :type monitor_class: CollectionMonitor\n\n :param cmd_args: Optional command line arguments. These will be\n passed on to the command line parser.\n :type cmd_args: Optional[List[str]]\n\n :param kwargs: Keyword arguments to pass into the `monitor_class`\n constructor each time it's called.\n\n \"\"\"\n super(RunCollectionMonitorScript, self).__init__(_db, **kwargs)\n self.monitor_class = monitor_class\n self.name = self.monitor_class.SERVICE_NAME\n parsed = vars(self.parse_command_line(self._db, cmd_args=cmd_args))\n parsed.pop(\"collection_names\", None)\n self.collections = parsed.pop(\"collections\", None)\n self.kwargs.update(parsed)\n\n def monitors(self, **kwargs):\n return self.monitor_class.all(self._db, collections=self.collections, **kwargs)\n\n\nclass OPDSImportScript(CollectionInputScript):\n \"\"\"Import all books from the OPDS feed associated with a collection.\"\"\"\n\n name = \"Import all books from the OPDS feed associated with a collection.\"\n\n IMPORTER_CLASS = OPDSImporter\n MONITOR_CLASS = OPDSImportMonitor\n PROTOCOL = ExternalIntegration.OPDS_IMPORT\n\n def __init__(\n self,\n _db=None,\n importer_class=None,\n monitor_class=None,\n protocol=None,\n *args,\n **kwargs,\n ):\n super(OPDSImportScript, self).__init__(_db, *args, **kwargs)\n self.importer_class = importer_class or self.IMPORTER_CLASS\n self.monitor_class = monitor_class or self.MONITOR_CLASS\n self.protocol = protocol or self.PROTOCOL\n self.importer_kwargs = kwargs\n\n @classmethod\n def arg_parser(cls):\n parser = CollectionInputScript.arg_parser()\n parser.add_argument(\n \"--force\",\n help=\"Import the feed from scratch, even if it seems like it was already imported.\",\n dest=\"force\",\n action=\"store_true\",\n )\n return parser\n\n def do_run(self, cmd_args=None):\n parsed = self.parse_command_line(self._db, cmd_args=cmd_args)\n collections = parsed.collections or Collection.by_protocol(\n self._db, self.protocol\n )\n for collection in collections:\n self.run_monitor(collection, force=parsed.force)\n\n def run_monitor(self, collection, force=None):\n monitor = self.monitor_class(\n self._db,\n collection,\n import_class=self.importer_class,\n force_reimport=force,\n **self.importer_kwargs,\n )\n monitor.run()\n\n\nclass MirrorResourcesScript(CollectionInputScript):\n \"\"\"Make sure that all mirrorable resources in a collection have\n in fact been mirrored.\n \"\"\"\n\n # This object contains the actual logic of mirroring.\n MIRROR_UTILITY = MetaToModelUtility()\n\n @classmethod\n def arg_parser(cls):\n parser = super().arg_parser()\n parser.add_argument(\n \"--collection-type\",\n help=\"Collection type. Valid values are: OPEN_ACCESS (default), PROTECTED_ACCESS.\",\n type=CollectionType,\n choices=list(CollectionType),\n default=CollectionType.OPEN_ACCESS,\n )\n return parser\n\n def do_run(self, cmd_args=None):\n parsed = self.parse_command_line(self._db, cmd_args=cmd_args)\n collections = parsed.collections\n collection_type = parsed.collection_type\n if not collections:\n # Assume they mean all collections.\n collections = self._db.query(Collection).all()\n\n # But only process collections that have an associated MirrorUploader.\n for collection, policy in self.collections_with_uploader(\n collections, collection_type\n ):\n self.process_collection(collection, policy)\n\n def collections_with_uploader(\n self, collections, collection_type=CollectionType.OPEN_ACCESS\n ):\n \"\"\"Filter out collections that have no MirrorUploader.\n\n :yield: 2-tuples (Collection, ReplacementPolicy). The\n ReplacementPolicy is the appropriate one for this script\n to use for that Collection.\n \"\"\"\n for collection in collections:\n covers = MirrorUploader.for_collection(\n collection, ExternalIntegrationLink.COVERS\n )\n books_mirror_type = (\n ExternalIntegrationLink.OPEN_ACCESS_BOOKS\n if collection_type == CollectionType.OPEN_ACCESS\n else ExternalIntegrationLink.PROTECTED_ACCESS_BOOKS\n )\n books = MirrorUploader.for_collection(collection, books_mirror_type)\n if covers or books:\n mirrors = {\n ExternalIntegrationLink.COVERS: covers,\n books_mirror_type: books,\n }\n policy = self.replacement_policy(mirrors)\n yield collection, policy\n else:\n self.log.info(\"Skipping %r as it has no MirrorUploader.\", collection)\n\n @classmethod\n def replacement_policy(cls, mirrors):\n \"\"\"Create a ReplacementPolicy for this script that uses the\n given mirrors.\n \"\"\"\n return ReplacementPolicy(\n mirrors=mirrors,\n link_content=True,\n even_if_not_apparently_updated=True,\n http_get=Representation.cautious_http_get,\n )\n\n def process_collection(self, collection, policy, unmirrored=None):\n \"\"\"Make sure every mirrorable resource in this collection has\n been mirrored.\n\n :param unmirrored: A replacement for Hyperlink.unmirrored,\n for use in tests.\n\n \"\"\"\n unmirrored = unmirrored or Hyperlink.unmirrored\n for link in unmirrored(collection):\n self.process_item(collection, link, policy)\n self._db.commit()\n\n @classmethod\n def derive_rights_status(cls, license_pool, resource):\n \"\"\"Make a best guess about the rights status for the given\n resource.\n\n This relies on the information having been available at one point,\n but having been stored in the database at a slight remove.\n \"\"\"\n rights_status = None\n if not license_pool:\n return None\n if resource:\n lpdm = resource.as_delivery_mechanism_for(license_pool)\n # When this Resource was associated with this LicensePool,\n # the rights information was recorded in its\n # LicensePoolDeliveryMechanism.\n if lpdm:\n rights_status = lpdm.rights_status\n if not rights_status:\n # We could not find a LicensePoolDeliveryMechanism for\n # this particular resource, but if every\n # LicensePoolDeliveryMechanism has the same rights\n # status, we can assume it's that one.\n statuses = list(\n set([x.rights_status for x in license_pool.delivery_mechanisms])\n )\n if len(statuses) == 1:\n [rights_status] = statuses\n if rights_status:\n rights_status = rights_status.uri\n return rights_status\n\n def process_item(self, collection, link_obj, policy):\n \"\"\"Determine the URL that needs to be mirrored and (for books)\n the rationale that lets us mirror that URL. Then mirror it.\n \"\"\"\n identifier = link_obj.identifier\n license_pool, ignore = LicensePool.for_foreign_id(\n self._db,\n collection.data_source,\n identifier.type,\n identifier.identifier,\n collection=collection,\n autocreate=False,\n )\n if not license_pool:\n # This shouldn't happen.\n self.log.warning(\n \"Could not find LicensePool for %r, skipping it rather than mirroring something we shouldn't.\"\n )\n return\n resource = link_obj.resource\n\n if link_obj.rel == Hyperlink.OPEN_ACCESS_DOWNLOAD:\n rights_status = self.derive_rights_status(license_pool, resource)\n if not rights_status:\n self.log.warning(\n \"Could not unambiguously determine rights status for %r, skipping.\",\n link_obj,\n )\n return\n else:\n # For resources like book covers, the rights status is\n # irrelevant -- we rely on fair use.\n rights_status = None\n\n # Mock up a LinkData that MetaToModelUtility can use to\n # mirror this link (or decide not to mirror it).\n linkdata = LinkData(\n rel=link_obj.rel, href=resource.url, rights_uri=rights_status\n )\n\n # Mirror the link (or not).\n self.MIRROR_UTILITY.mirror_link(\n model_object=license_pool,\n data_source=collection.data_source,\n link=linkdata,\n link_obj=link_obj,\n policy=policy,\n )\n\n\nclass DatabaseMigrationScript(Script):\n \"\"\"Runs new migrations.\n\n This script needs to execute without ever loading an ORM object,\n because the database might be in a state that's not compatible\n with the current ORM version.\n\n This is not a TimestampScript because it keeps separate Timestamps\n for the Python and the SQL migrations, and because Timestamps\n are ORM objects, which this script can't touch.\n \"\"\"\n\n SERVICE_NAME = \"Database Migration\"\n PY_TIMESTAMP_SERVICE_NAME = SERVICE_NAME + \" - Python\"\n\n MIGRATION_WITH_COUNTER = re.compile(r\"\\d{8}-(\\d+)-(.)+\\.(py|sql)\")\n\n # There are some SQL commands that can't be run inside a transaction.\n TRANSACTIONLESS_COMMANDS = [\"alter type\"]\n\n TRANSACTION_PER_STATEMENT = \"SIMPLYE_MIGRATION_TRANSACTION_PER_STATEMENT\"\n DO_NOT_EXECUTE = \"SIMPLYE_MIGRATION_DO_NOT_EXECUTE\"\n\n class TimestampInfo(object):\n \"\"\"Act like a ORM Timestamp object, but with no database connection.\"\"\"\n\n @classmethod\n def find(cls, script, service):\n \"\"\"Find or create an existing timestamp representing the last\n migration script that was run.\n\n :return: A TimestampInfo object or None\n \"\"\"\n\n # We need to be aware of schema changes to the timestamps\n # table itself, since this is a necessary prerequisite to\n # running the migration scripts that will make those\n # schema changes.\n #\n # 2.3.0 - 'timestamp' field renamed to 'finish'\n exception = None\n for sql in (\n \"SELECT finish, counter FROM timestamps WHERE service=:service LIMIT 1;\",\n \"SELECT timestamp, counter FROM timestamps WHERE service=:service LIMIT 1;\",\n ):\n _db = script._db\n try:\n results = list(_db.execute(text(sql), dict(service=service)))\n if exception:\n logging.error(\n \"Yes, everything should be fine -- I was able to find a timestamp in the new schema.\"\n )\n exception = None\n _db.commit()\n break\n except ProgrammingError as e:\n # The database connection is now tainted; we must\n # create a new one.\n logging.error(\n \"Got a database error obtaining the timestamp for %s. Hopefully the timestamps table itself must be migrated and this is all according to plan.\",\n service,\n exc_info=e,\n )\n _db.close()\n script._session = production_session(initialize_data=False)\n exception = e\n\n # If _none_ of those worked, something is wrong on a\n # deeper level.\n if exception:\n raise exception\n\n if not results:\n # Make sure there's a row for this service in the timestamps\n # table so that we can update it later.\n sql = \"INSERT INTO timestamps (service) values (:service);\"\n _db.execute(text(sql), dict(service=service))\n return None\n\n [(date, counter)] = results\n if not date:\n # This is an empty Timestamp created during a previous\n # TimestampInfo.find attempt. It shouldn't be returned or\n # worked with in any way.\n return None\n return cls(service, date, counter)\n\n def __init__(self, service, finish, counter=None):\n self.service = service\n if isinstance(finish, str):\n finish = Script.parse_time(finish)\n else:\n finish = to_utc(finish)\n self.finish = finish\n if isinstance(counter, str):\n counter = int(counter)\n self.counter = counter\n\n def save(self, _db):\n self.update(_db, self.finish, self.counter)\n\n def update(self, _db, finish, counter, migration_name=None):\n \"\"\"Saves a TimestampInfo object to the database.\"\"\"\n # Reset values locally.\n self.finish = to_utc(finish)\n self.counter = counter\n\n sql = (\n \"UPDATE timestamps SET start=(:finish at time zone 'utc'), finish=(:finish at time zone 'utc'), counter=:counter\"\n \" where service=:service\"\n )\n values = dict(\n finish=self.finish,\n counter=self.counter,\n service=self.service,\n )\n _db.execute(text(sql), values)\n _db.flush()\n\n message = \"%s Timestamp stamped at %s\" % (\n self.service,\n self.finish.strftime(\"%Y-%m-%d\"),\n )\n if migration_name:\n message += \" for %s\" % migration_name\n print(message)\n\n @classmethod\n def arg_parser(cls):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-d\",\n \"--last-run-date\",\n help=(\n \"A date string representing the last migration file \"\n \"run against your database, formatted as YYYY-MM-DD\"\n ),\n )\n parser.add_argument(\n \"-c\",\n \"--last-run-counter\",\n type=int,\n help=(\n \"An optional digit representing the counter of the last \"\n \"migration run against your database. Only necessary if \"\n \"multiple migrations were created on the same date.\"\n ),\n )\n parser.add_argument(\n \"--python-only\",\n action=\"store_true\",\n help=(\n \"Only run python migrations since the given timestamp or the\"\n \"most recent python timestamp\"\n ),\n )\n return parser\n\n @classmethod\n def migratable_files(cls, filelist, extensions):\n \"\"\"Filter a list of files for migratable file extensions\"\"\"\n extensions = tuple(extensions)\n migratable = [f for f in filelist if f.endswith(extensions)]\n return cls.sort_migrations(migratable)\n\n @classmethod\n def sort_migrations(cls, migrations):\n \"\"\"All Python migrations sort after all SQL migrations, since a Python\n migration requires an up-to-date database schema.\n\n Migrations with a counter digit sort after migrations without\n one.\n \"\"\"\n\n def compare_migrations(first):\n \"\"\"Compares migrations according to ideal sorting order.\n\n - All Python migrations run after all SQL migrations.\n - Migrations are first ordered by timestamp (asc).\n - If two migrations have the same timestamp, any migrations\n without counters come before migrations with counters.\n - If two migrations with the same timestamp, have counters,\n migrations are sorted by counter (asc).\n \"\"\"\n key = []\n if first.endswith(\".py\"):\n key.append(1)\n else:\n key.append(-1)\n\n try:\n key.append(int(first[:8]))\n except ValueError:\n key.append(-1)\n\n # Both migrations have the same timestamp, so compare using\n # their counters (default to 0 if no counter is included)\n first_count = cls.MIGRATION_WITH_COUNTER.search(first)\n if first_count is not None:\n first_count = int(first_count.groups()[0])\n else:\n first_count = 0\n key.append(first_count)\n\n return key\n\n return sorted(migrations, key=compare_migrations)\n\n @property\n def directories_by_priority(self):\n \"\"\"Returns a list containing the migration directory path for core\n and its container server, organized in priority order (core first)\n \"\"\"\n current_dir = os.path.split(os.path.abspath(__file__))[0]\n core = os.path.join(current_dir, \"migration\")\n server = os.path.join(os.path.split(current_dir)[0], \"migration\")\n\n # Core is listed first, since core makes changes to the core database\n # schema. Server migrations generally fix bugs or otherwise update\n # the data itself.\n return [core, server]\n\n @property\n def name(self):\n \"\"\"Returns the appropriate target Timestamp service name for the\n timestamp, depending on the script parameters.\n \"\"\"\n if self.python_only:\n return self.PY_TIMESTAMP_SERVICE_NAME\n return self.SERVICE_NAME\n\n @property\n def overall_timestamp(self):\n \"\"\"Returns a TimestampInfo object corresponding to the the overall or\n general \"Database Migration\" service.\n\n If there is no Timestamp or the Timestamp doesn't have a timestamp\n attribute, it returns None.\n \"\"\"\n return self.TimestampInfo.find(self, self.SERVICE_NAME)\n\n @property\n def python_timestamp(self):\n \"\"\"Returns a TimestampInfo object corresponding to the python migration-\n specific \"Database Migration - Python\" Timestamp.\n\n If there is no Timestamp or the Timestamp hasn't been initialized with\n a timestamp attribute, it returns None.\n \"\"\"\n return self.TimestampInfo.find(self, self.PY_TIMESTAMP_SERVICE_NAME)\n\n def __init__(self, *args, **kwargs):\n super(DatabaseMigrationScript, self).__init__(*args, **kwargs)\n self.python_only = False\n\n def load_configuration(self):\n \"\"\"Load configuration without accessing the database.\"\"\"\n Configuration.load(None)\n\n def run(self, test_db=None, test=False, cmd_args=None):\n # Use or create a database session.\n if test_db:\n self._session = test_db\n else:\n # Create a special database session that doesn't initialize\n # the ORM. As long as we only execute SQL and don't try to use\n # any ORM objects, we'll be fine.\n url = Configuration.database_url()\n self._session = SessionManager.session(url, initialize_data=False)\n\n parsed = self.parse_command_line(cmd_args=cmd_args)\n if parsed.python_only:\n self.python_only = parsed.python_only\n\n timestamp = None\n last_run_date = parsed.last_run_date\n last_run_counter = parsed.last_run_counter\n if last_run_date:\n timestamp = self.TimestampInfo(self.name, last_run_date, last_run_counter)\n # Save the timestamp at this point. This will set back the clock\n # in the case that the input last_run_date/counter is before the\n # existing Timestamp.finish / Timestamp.counter.\n #\n # DatabaseMigrationScript.update_timestamps will no longer rewind\n # a Timestamp, so saving here is important.\n timestamp.save(self._db)\n\n if not timestamp:\n # No timestamp was given. Get the timestamp from the database.\n timestamp = self.TimestampInfo.find(self, self.name)\n\n if not timestamp or not self.overall_timestamp:\n # There's no timestamp in the database! Raise an error.\n print(\"\")\n print(\n \"NO TIMESTAMP FOUND. Either initialize your untouched database \"\n \"with the script `core/bin/initialize_database` OR run this \"\n \"script with a timestamp that indicates the last migration run \"\n \"against your existing-but-uninitialized database.\"\n )\n self.arg_parser().print_help()\n sys.exit(1)\n\n migrations, migrations_by_dir = self.fetch_migration_files()\n\n new_migrations = self.get_new_migrations(timestamp, migrations)\n if new_migrations:\n # Log the new migrations.\n print(\"%d new migrations found.\" % len(new_migrations))\n for migration in new_migrations:\n print(\" - %s\" % migration)\n self.run_migrations(new_migrations, migrations_by_dir, timestamp)\n self._db.commit()\n else:\n print(\"No new migrations found. Your database is up-to-date.\")\n\n def fetch_migration_files(self):\n \"\"\"Pulls migration files from the expected locations\n\n :return: a tuple with a list of migration filenames and a dictionary of\n those files separated by their absolute directory location.\n \"\"\"\n migrations = list()\n migrations_by_dir = defaultdict(list)\n\n extensions = [\".py\"]\n if not self.python_only:\n extensions.insert(0, \".sql\")\n\n for directory in self.directories_by_priority:\n # In the case of tests, the container server migration directory\n # may not exist.\n if os.path.isdir(directory):\n dir_migrations = self.migratable_files(\n os.listdir(directory), extensions\n )\n migrations += dir_migrations\n migrations_by_dir[directory] = dir_migrations\n\n return migrations, migrations_by_dir\n\n def get_new_migrations(self, timestamp, migrations):\n \"\"\"Return a list of migration filenames, representing migrations\n created since the timestamp\n \"\"\"\n last_run = timestamp.finish.strftime(\"%Y%m%d\")\n migrations = self.sort_migrations(migrations)\n new_migrations = [\n migration for migration in migrations if int(migration[:8]) >= int(last_run)\n ]\n\n # Multiple migrations run on the same day have an additional digit\n # after the date and a dash, eg:\n #\n # 20150826-1-change_target_age_from_int_to_range.sql\n #\n # When that migration is run, the number will be saved to the\n # 'counter' column of Timestamp, so we have to account for that.\n start_found = False\n later_found = False\n index = 0\n while not start_found and not later_found and index < len(new_migrations):\n start_found, later_found = self._is_matching_migration(\n new_migrations[index], timestamp\n )\n index += 1\n\n if later_found:\n index -= 1\n new_migrations = new_migrations[index:]\n return new_migrations\n\n def _is_matching_migration(self, migration_file, timestamp):\n \"\"\"Determine whether a given migration filename matches a given\n timestamp or is after it.\n \"\"\"\n is_match = False\n is_after_timestamp = False\n\n timestamp_str = timestamp.finish.strftime(\"%Y%m%d\")\n counter = timestamp.counter\n\n if migration_file[:8] >= timestamp_str:\n if migration_file[:8] > timestamp_str:\n is_after_timestamp = True\n elif counter:\n count = self.MIGRATION_WITH_COUNTER.search(migration_file)\n if count:\n migration_num = int(count.groups()[0])\n if migration_num == counter:\n is_match = True\n if migration_num > counter:\n is_after_timestamp = True\n else:\n is_match = True\n return is_match, is_after_timestamp\n\n def run_migrations(self, migrations, migrations_by_dir, timestamp):\n \"\"\"Run each migration, first by timestamp and then by directory\n priority.\n \"\"\"\n previous = None\n\n def raise_error(migration_path, message, code=1):\n print()\n print(\"ERROR: %s\" % message)\n print(\"%s must be migrated manually.\" % migration_path)\n print(\"=\" * 50)\n print(traceback.print_exc(file=sys.stdout))\n sys.exit(code)\n\n migrations = self.sort_migrations(migrations)\n for migration_file in migrations:\n for d in self.directories_by_priority:\n if migration_file in migrations_by_dir[d]:\n full_migration_path = os.path.join(d, migration_file)\n try:\n self._run_migration(full_migration_path, timestamp)\n self._db.commit()\n previous = migration_file\n except SystemExit as se:\n if se.code:\n raise_error(\n full_migration_path,\n \"Migration raised error code '%d'\" % se.code,\n code=se.code,\n )\n\n # Sometimes a migration isn't relevant and it\n # runs sys.exit() to carry on with things.\n # This shouldn't end the migration script, though.\n self.update_timestamps(migration_file)\n continue\n except Exception:\n raise_error(full_migration_path, \"Migration has been halted.\")\n else:\n print(\"All new migrations have been run.\")\n\n def _run_migration(self, migration_path, timestamp):\n \"\"\"Runs a single SQL or Python migration file\"\"\"\n\n migration_filename = os.path.split(migration_path)[1]\n ok_to_execute = True\n\n if migration_path.endswith(\".sql\"):\n with open(migration_path) as clause:\n sql = clause.read()\n\n transactionless = any(\n [c for c in self.TRANSACTIONLESS_COMMANDS if c in sql.lower()]\n )\n one_tx_per_statement = bool(\n self.TRANSACTION_PER_STATEMENT.lower() in sql.lower()\n )\n ok_to_execute = not bool(self.DO_NOT_EXECUTE.lower() in sql.lower())\n\n if ok_to_execute:\n if transactionless:\n new_session = self._run_migration_without_transaction(sql)\n elif one_tx_per_statement:\n commands = self._extract_statements_from_sql_file(\n migration_path\n )\n for command in commands:\n self._db.execute(f\"BEGIN;{command}COMMIT;\")\n else:\n # By wrapping the action in a transation, we can avoid\n # rolling over errors and losing data in files\n # with multiple interrelated SQL actions.\n sql = \"BEGIN;\\n%s\\nCOMMIT;\" % sql\n self._db.execute(sql)\n\n if migration_path.endswith(\".py\"):\n module_name = migration_filename[:-3]\n subprocess.call(migration_path)\n\n # Update timestamp for the migration.\n if ok_to_execute:\n self.update_timestamps(migration_filename)\n\n def _extract_statements_from_sql_file(self, filepath):\n \"\"\"\n From an SQL file, return a python list of the individual statements.\n\n Removes comment lines and extraneous whitespace at the start / end of\n statements, but that's about it. Use carefully.\n \"\"\"\n with open(filepath) as f:\n sql_file_lines = f.readlines()\n\n sql_commands = []\n current_command = \"\"\n\n for line in sql_file_lines:\n if line.strip().startswith(\"--\"):\n continue\n else:\n if current_command == \"\":\n current_command = line.strip()\n else:\n current_command = current_command + \" \" + line.strip()\n\n if current_command.endswith(\";\"):\n sql_commands.append(current_command)\n current_command = \"\"\n\n return sql_commands\n\n def _run_migration_without_transaction(self, sql_statement):\n \"\"\"Runs a single SQL statement outside of a transaction.\"\"\"\n # Go back up to engine-level.\n connection = self._db.get_bind()\n\n # Close the Session so it benefits from the changes.\n self._session.close()\n\n # Get each individual SQL command from the migration text.\n #\n # In the case of 'ALTER TYPE' (at least), running commands\n # simultaneously raises psycopg2.InternalError ending with 'cannot be\n # executed from a fuction or multi-command string'\n sql_commands = [\n command.strip() + \";\"\n for command in sql_statement.split(\";\")\n if command.strip()\n ]\n\n # Run each command in the sql statement right up against the\n # database: no transactions, no guardrails.\n for command in sql_commands:\n connection.execution_options(isolation_level=\"AUTOCOMMIT\").execute(\n text(command)\n )\n\n # Update the script's Session to a new one that has the changed schema\n # and other important info.\n self._session = Session(connection)\n self.load_configuration()\n DataSource.well_known_sources(self._db)\n\n def update_timestamps(self, migration_file):\n \"\"\"Updates this service's timestamp to match a given migration\"\"\"\n last_run_date = self.parse_time(migration_file[0:8])\n counter = None\n\n # When multiple migration files are created on the same date, an\n # additional number is added. This number is held in the 'counter'\n # column of Timestamp.\n # (It's not ideal, but it avoids creating a new database table.)\n match = self.MIGRATION_WITH_COUNTER.search(migration_file)\n if match:\n counter = int(match.groups()[0])\n\n if migration_file.endswith(\"py\") and self.python_timestamp:\n # This is a python migration. Update the python timestamp.\n self.python_timestamp.update(\n self._db,\n finish=last_run_date,\n counter=counter,\n migration_name=migration_file,\n )\n\n # Nothing to update\n if self.overall_timestamp is None:\n return\n\n if self.overall_timestamp.finish is not None:\n finish_timestamp = self.overall_timestamp.finish\n # The last script that ran had an earlier timestamp than the current script\n if finish_timestamp > last_run_date:\n return\n\n # The dates of the scrips are the same so compare the counters\n if finish_timestamp == last_run_date:\n # The current script has no counter, so it's the same script that ran\n # or an earlier script that ran\n if counter is None:\n return\n # The previous script has a higher counter\n if (\n self.overall_timestamp.counter is not None\n and self.overall_timestamp.counter > counter\n ):\n return\n\n self.overall_timestamp.update(\n self._db,\n finish=last_run_date,\n counter=counter,\n migration_name=migration_file,\n )\n\n\nclass DatabaseMigrationInitializationScript(DatabaseMigrationScript):\n\n \"\"\"Creates a timestamp to kickoff the regular use of\n DatabaseMigrationScript to manage migrations.\n \"\"\"\n\n @classmethod\n def arg_parser(cls):\n parser = super(DatabaseMigrationInitializationScript, cls).arg_parser()\n parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n help=\"Force reset the initialization, ignoring any existing timestamps.\",\n )\n return parser\n\n def run(self, cmd_args=None):\n parsed = self.parse_command_line(cmd_args=cmd_args)\n last_run_date = parsed.last_run_date\n last_run_counter = parsed.last_run_counter\n\n if last_run_counter and not last_run_date:\n raise ValueError(\n \"Timestamp.counter must be reset alongside Timestamp.finish\"\n )\n\n existing_timestamp = get_one(self._db, Timestamp, service=self.name)\n if existing_timestamp and existing_timestamp.finish:\n # A Timestamp exists and it has a .finish, so it wasn't created\n # by TimestampInfo.find.\n if parsed.force:\n self.log.warning(\n \"Overwriting existing %s timestamp: %r\",\n self.name,\n existing_timestamp,\n )\n else:\n raise RuntimeError(\n \"%s timestamp already exists: %r. Use --force to update.\"\n % (self.name, existing_timestamp)\n )\n\n # Initialize the required timestamps with the Space Jam release date.\n init_timestamp = self.parse_time(\"1996-11-15\")\n overall_timestamp = existing_timestamp or Timestamp.stamp(\n _db=self._db,\n service=self.SERVICE_NAME,\n service_type=Timestamp.SCRIPT_TYPE,\n finish=init_timestamp,\n )\n python_timestamp = Timestamp.stamp(\n _db=self._db,\n service=self.PY_TIMESTAMP_SERVICE_NAME,\n service_type=Timestamp.SCRIPT_TYPE,\n finish=init_timestamp,\n )\n\n if last_run_date:\n submitted_time = self.parse_time(last_run_date)\n for timestamp in (overall_timestamp, python_timestamp):\n timestamp.finish = submitted_time\n timestamp.counter = last_run_counter\n self._db.commit()\n return\n\n migrations = self.sort_migrations(self.fetch_migration_files()[0])\n py_migrations = [m for m in migrations if m.endswith(\".py\")]\n sql_migrations = [m for m in migrations if m.endswith(\".sql\")]\n\n most_recent_sql_migration = sql_migrations[-1]\n most_recent_python_migration = py_migrations[-1]\n\n self.update_timestamps(most_recent_sql_migration)\n self.update_timestamps(most_recent_python_migration)\n self._db.commit()\n\n\nclass CheckContributorNamesInDB(IdentifierInputScript):\n \"\"\"Checks that contributor sort_names are display_names in\n \"last name, comma, other names\" format.\n\n Read contributors edition by edition, so that can, if necessary,\n restrict db query by passed-in identifiers, and so can find associated\n license pools to register author complaints to.\n\n NOTE: There's also CheckContributorNamesOnWeb in metadata,\n it's a child of this script. Use it to check our knowledge against\n viaf, with the newer better sort_name selection and formatting.\n\n TODO: make sure don't start at beginning again when interrupt while batch job is running.\n \"\"\"\n\n COMPLAINT_SOURCE = \"CheckContributorNamesInDB\"\n COMPLAINT_TYPE = \"http://librarysimplified.org/terms/problem/wrong-author\"\n\n def __init__(self, _db=None, cmd_args=None, stdin=sys.stdin):\n super(CheckContributorNamesInDB, self).__init__(_db=_db)\n\n self.parsed_args = self.parse_command_line(\n _db=self._db, cmd_args=cmd_args, stdin=stdin\n )\n\n @classmethod\n def make_query(self, _db, identifier_type, identifiers, log=None):\n query = _db.query(Edition)\n if identifiers or identifier_type:\n query = query.join(Edition.primary_identifier)\n\n # we only want to look at editions with license pools, in case we want to make a Complaint\n query = query.join(Edition.is_presentation_for)\n\n if identifiers:\n if log:\n log.info(\"Restricted to %d specific identifiers.\" % len(identifiers))\n query = query.filter(\n Edition.primary_identifier_id.in_([x.id for x in identifiers])\n )\n if identifier_type:\n if log:\n log.info('Restricted to identifier type \"%s\".' % identifier_type)\n query = query.filter(Identifier.type == identifier_type)\n\n if log:\n log.info(\"Processing %d editions.\", query.count())\n\n return query.order_by(Edition.id)\n\n def do_run(self, batch_size=10):\n\n self.query = self.make_query(\n self._db,\n self.parsed_args.identifier_type,\n self.parsed_args.identifiers,\n self.log,\n )\n\n editions = True\n offset = 0\n output = \"ContributorID|\\tSortName|\\tDisplayName|\\tComputedSortName|\\tResolution|\\tComplaintSource\"\n print(output.encode(\"utf8\"))\n\n while editions:\n my_query = self.query.offset(offset).limit(batch_size)\n editions = my_query.all()\n\n for edition in editions:\n if edition.contributions:\n for contribution in edition.contributions:\n self.process_contribution_local(\n self._db, contribution, self.log\n )\n offset += batch_size\n\n self._db.commit()\n self._db.commit()\n\n def process_contribution_local(self, _db, contribution, log=None):\n if not contribution or not contribution.edition:\n return\n\n contributor = contribution.contributor\n\n identifier = contribution.edition.primary_identifier\n\n if contributor.sort_name and contributor.display_name:\n computed_sort_name_local_new = unicodedata.normalize(\n \"NFKD\", str(display_name_to_sort_name(contributor.display_name))\n )\n # Did HumanName parser produce a differet result from the plain comma replacement?\n if (\n contributor.sort_name.strip().lower()\n != computed_sort_name_local_new.strip().lower()\n ):\n error_message_detail = (\n \"Contributor[id=%s].sort_name is oddly different from computed_sort_name, human intervention required.\"\n % contributor.id\n )\n\n # computed names don't match. by how much? if it's a matter of a comma or a misplaced\n # suffix, we can fix without asking for human intervention. if the names are very different,\n # there's a chance the sort and display names are different on purpose, s.a. when foreign names\n # are passed as translated into only one of the fields, or when the author has a popular pseudonym.\n # best ask a human.\n\n # if the relative lengths are off by more than a stray space or comma, ask a human\n # it probably means that a human metadata professional had added an explanation/expansion to the\n # sort_name, s.a. \"<NAME>\" --> \"<NAME>. (<NAME>\", and we'd rather not replace this data\n # with the \"<NAME>.\" that the auto-algorigthm would generate.\n length_difference = len(contributor.sort_name.strip()) - len(\n computed_sort_name_local_new.strip()\n )\n if abs(length_difference) > 3:\n return self.process_local_mismatch(\n _db=_db,\n contribution=contribution,\n computed_sort_name=computed_sort_name_local_new,\n error_message_detail=error_message_detail,\n log=log,\n )\n\n match_ratio = contributor_name_match_ratio(\n contributor.sort_name,\n computed_sort_name_local_new,\n normalize_names=False,\n )\n\n if match_ratio < 40:\n # ask a human. this kind of score can happen when the sort_name is a transliteration of the display_name,\n # and is non-trivial to fix.\n self.process_local_mismatch(\n _db=_db,\n contribution=contribution,\n computed_sort_name=computed_sort_name_local_new,\n error_message_detail=error_message_detail,\n log=log,\n )\n else:\n # we can fix it!\n output = \"%s|\\t%s|\\t%s|\\t%s|\\tlocal_fix\" % (\n contributor.id,\n contributor.sort_name,\n contributor.display_name,\n computed_sort_name_local_new,\n )\n print(output.encode(\"utf8\"))\n self.set_contributor_sort_name(\n computed_sort_name_local_new, contribution\n )\n\n @classmethod\n def set_contributor_sort_name(cls, sort_name, contribution):\n \"\"\"Sets the contributor.sort_name and associated edition.author_name to the passed-in value.\"\"\"\n contribution.contributor.sort_name = sort_name\n\n # also change edition.sort_author, if the author was primary\n # Note: I considered using contribution.edition.author_contributors, but\n # found that it's not impossible to have a messy dataset that doesn't work on.\n # For our purpose here, the following logic is cleaner-acting:\n # If this author appears as Primary Author anywhere on the edition, then change edition.sort_author.\n edition_contributions = contribution.edition.contributions\n for edition_contribution in edition_contributions:\n if (edition_contribution.role == Contributor.PRIMARY_AUTHOR_ROLE) and (\n edition_contribution.contributor.display_name\n == contribution.contributor.display_name\n ):\n contribution.edition.sort_author = sort_name\n\n def process_local_mismatch(\n self, _db, contribution, computed_sort_name, error_message_detail, log=None\n ):\n \"\"\"\n Determines if a problem is to be investigated further or recorded as a Complaint,\n to be solved by a human. In this class, it's always a complaint. In the overridden\n method in the child class in metadata_wrangler code, we sometimes go do a web query.\n \"\"\"\n self.register_problem(\n source=self.COMPLAINT_SOURCE,\n contribution=contribution,\n computed_sort_name=computed_sort_name,\n error_message_detail=error_message_detail,\n log=log,\n )\n\n @classmethod\n def register_problem(\n cls, source, contribution, computed_sort_name, error_message_detail, log=None\n ):\n \"\"\"\n Make a Complaint in the database, so a human can take a look at this Contributor's name\n and resolve whatever the complex issue that got us here.\n \"\"\"\n success = True\n contributor = contribution.contributor\n\n pools = contribution.edition.is_presentation_for\n try:\n complaint, is_new = Complaint.register(\n pools[0], cls.COMPLAINT_TYPE, source, error_message_detail\n )\n output = \"%s|\\t%s|\\t%s|\\t%s|\\tcomplain|\\t%s\" % (\n contributor.id,\n contributor.sort_name,\n contributor.display_name,\n computed_sort_name,\n source,\n )\n print(output.encode(\"utf8\"))\n except ValueError as e:\n # log and move on, don't stop run\n log.error(\"Error registering complaint: %r\", contributor, exc_info=e)\n success = False\n\n return success\n\n\nclass Explain(IdentifierInputScript):\n \"\"\"Explain everything known about a given work.\"\"\"\n\n name = \"Explain everything known about a given work\"\n\n # Where to go to get best available metadata about a work.\n METADATA_URL_TEMPLATE = \"http://metadata.librarysimplified.org/lookup?urn=%s\"\n TIME_FORMAT = \"%Y-%m-%d %H:%M\"\n\n def do_run(self, cmd_args=None, stdin=sys.stdin, stdout=sys.stdout):\n param_args = self.parse_command_line(self._db, cmd_args=cmd_args, stdin=stdin)\n identifier_ids = [x.id for x in param_args.identifiers]\n editions = self._db.query(Edition).filter(\n Edition.primary_identifier_id.in_(identifier_ids)\n )\n self.stdout = stdout\n\n policy = None\n for edition in editions:\n self.explain(self._db, edition, policy)\n self.write(\"-\" * 80)\n\n def write(self, s):\n \"\"\"Write a string to self.stdout.\"\"\"\n if not s.endswith(\"\\n\"):\n s += \"\\n\"\n self.stdout.write(s)\n\n def explain(self, _db, edition, presentation_calculation_policy=None):\n if edition.medium not in (\"Book\", \"Audio\"):\n # we haven't yet decided what to display for you\n return\n\n # Tell about the Edition record.\n output = \"%s (%s, %s) according to %s\" % (\n edition.title,\n edition.author,\n edition.medium,\n edition.data_source.name,\n )\n self.write(output)\n self.write(\" Permanent work ID: %s\" % edition.permanent_work_id)\n self.write(\n \" Metadata URL: %s \"\n % (self.METADATA_URL_TEMPLATE % edition.primary_identifier.urn)\n )\n\n seen = set()\n self.explain_identifier(edition.primary_identifier, True, seen, 1, 0)\n\n # Find all contributions, and tell about the contributors.\n if edition.contributions:\n for contribution in edition.contributions:\n self.explain_contribution(contribution)\n\n # Tell about the LicensePool.\n lps = edition.license_pools\n if lps:\n for lp in lps:\n self.explain_license_pool(lp)\n else:\n self.write(\" No associated license pools.\")\n\n # Tell about the Work.\n work = edition.work\n if work:\n self.explain_work(work)\n else:\n self.write(\" No associated work.\")\n\n # Note: Can change DB state.\n if work and presentation_calculation_policy is not None:\n print(\"!!! About to calculate presentation!\")\n work.calculate_presentation(policy=presentation_calculation_policy)\n print(\"!!! All done!\")\n print()\n print(\"After recalculating presentation:\")\n self.explain_work(work)\n\n def explain_contribution(self, contribution):\n contributor_id = contribution.contributor.id\n contributor_sort_name = contribution.contributor.sort_name\n contributor_display_name = contribution.contributor.display_name\n self.write(\n \" Contributor[%s]: contributor_sort_name=%s, contributor_display_name=%s, \"\n % (contributor_id, contributor_sort_name, contributor_display_name)\n )\n\n def explain_identifier(self, identifier, primary, seen, strength, level):\n indent = \" \" * level\n if primary:\n ident = \"Primary identifier\"\n else:\n ident = \"Identifier\"\n if primary:\n strength = 1\n self.write(\n \"%s %s: %s/%s (q=%s)\"\n % (indent, ident, identifier.type, identifier.identifier, strength)\n )\n\n _db = Session.object_session(identifier)\n classifications = Identifier.classifications_for_identifier_ids(\n _db, [identifier.id]\n )\n for classification in classifications:\n subject = classification.subject\n genre = subject.genre\n if genre:\n genre = genre.name\n else:\n genre = \"(!genre)\"\n # print(\"%s %s says: %s/%s %s w=%s\" % (\n # indent, classification.data_source.name,\n # subject.identifier, subject.name, genre, classification.weight\n # ))\n seen.add(identifier)\n for equivalency in identifier.equivalencies:\n if equivalency.id in seen:\n continue\n seen.add(equivalency.id)\n output = equivalency.output\n self.explain_identifier(\n output, False, seen, equivalency.strength, level + 1\n )\n if primary:\n crs = identifier.coverage_records\n if crs:\n self.write(\" %d coverage records:\" % len(crs))\n for cr in sorted(crs, key=lambda x: x.timestamp):\n self.explain_coverage_record(cr)\n\n def explain_license_pool(self, pool):\n self.write(\"Licensepool info:\")\n if pool.collection:\n self.write(\" Collection: %r\" % pool.collection)\n libraries = [library.name for library in pool.collection.libraries]\n if libraries:\n self.write(\" Available to libraries: %s\" % \", \".join(libraries))\n else:\n self.write(\"Not available to any libraries!\")\n else:\n self.write(\" Not in any collection!\")\n self.write(\" Delivery mechanisms:\")\n if pool.delivery_mechanisms:\n for lpdm in pool.delivery_mechanisms:\n dm = lpdm.delivery_mechanism\n if dm.default_client_can_fulfill:\n fulfillable = \"Fulfillable\"\n else:\n fulfillable = \"Unfulfillable\"\n self.write(\" %s %s/%s\" % (fulfillable, dm.content_type, dm.drm_scheme))\n else:\n self.write(\" No delivery mechanisms.\")\n self.write(\n \" %s owned, %d available, %d holds, %d reserves\"\n % (\n pool.licenses_owned,\n pool.licenses_available,\n pool.patrons_in_hold_queue,\n pool.licenses_reserved,\n )\n )\n\n def explain_work(self, work):\n self.write(\"Work info:\")\n if work.presentation_edition:\n self.write(\n \" Identifier of presentation edition: %r\"\n % work.presentation_edition.primary_identifier\n )\n else:\n self.write(\" No presentation edition.\")\n self.write(\" Fiction: %s\" % work.fiction)\n self.write(\" Audience: %s\" % work.audience)\n self.write(\" Target age: %r\" % work.target_age)\n self.write(\" %s genres.\" % (len(work.genres)))\n for genre in work.genres:\n self.write(\" %s\" % genre)\n self.write(\" License pools:\")\n for pool in work.license_pools:\n active = \"SUPERCEDED\"\n if not pool.superceded:\n active = \"ACTIVE\"\n if pool.collection:\n collection = pool.collection.name\n else:\n collection = \"!collection\"\n self.write(\" %s: %r %s\" % (active, pool.identifier, collection))\n wcrs = sorted(work.coverage_records, key=lambda x: x.timestamp)\n if wcrs:\n self.write(\" %s work coverage records\" % len(wcrs))\n for wcr in wcrs:\n self.explain_work_coverage_record(wcr)\n\n def explain_coverage_record(self, cr):\n self._explain_coverage_record(\n cr.timestamp, cr.data_source, cr.operation, cr.status, cr.exception\n )\n\n def explain_work_coverage_record(self, cr):\n self._explain_coverage_record(\n cr.timestamp, None, cr.operation, cr.status, cr.exception\n )\n\n def _explain_coverage_record(\n self, timestamp, data_source, operation, status, exception\n ):\n timestamp = timestamp.strftime(self.TIME_FORMAT)\n if data_source:\n data_source = data_source.name + \" | \"\n else:\n data_source = \"\"\n if operation:\n operation = operation + \" | \"\n else:\n operation = \"\"\n if exception:\n exception = \" | \" + exception\n else:\n exception = \"\"\n self.write(\n \" %s | %s%s%s%s\" % (timestamp, data_source, operation, status, exception)\n )\n\n\nclass WhereAreMyBooksScript(CollectionInputScript):\n \"\"\"Try to figure out why Works aren't showing up.\n\n This is a common problem on a new installation or when a new collection\n is being configured.\n \"\"\"\n\n def __init__(self, _db=None, output=None, search=None):\n _db = _db or self._db\n super(WhereAreMyBooksScript, self).__init__(_db)\n self.output = output or sys.stdout\n try:\n self.search = search or ExternalSearchIndex(_db)\n except CannotLoadConfiguration:\n self.out(\n \"Here's your problem: the search integration is missing or misconfigured.\"\n )\n raise\n\n def out(self, s, *args):\n if not s.endswith(\"\\n\"):\n s += \"\\n\"\n self.output.write(s % args)\n\n def run(self, cmd_args=None):\n parsed = self.parse_command_line(self._db, cmd_args=cmd_args or [])\n\n # Check each library.\n libraries = self._db.query(Library).all()\n if libraries:\n for library in libraries:\n self.check_library(library)\n self.out(\"\\n\")\n else:\n self.out(\"There are no libraries in the system -- that's a problem.\")\n self.delete_cached_feeds()\n self.out(\"\\n\")\n collections = parsed.collections or self._db.query(Collection)\n for collection in collections:\n self.explain_collection(collection)\n self.out(\"\\n\")\n\n def check_library(self, library):\n \"\"\"Make sure a library is properly set up to show works.\"\"\"\n self.out(\"Checking library %s\", library.name)\n\n # Make sure it has collections.\n if not library.collections:\n self.out(\" This library has no collections -- that's a problem.\")\n else:\n for collection in library.collections:\n self.out(\" Associated with collection %s.\", collection.name)\n\n # Make sure it has lanes.\n if not library.lanes:\n self.out(\" This library has no lanes -- that's a problem.\")\n else:\n self.out(\" Associated with %s lanes.\", len(library.lanes))\n\n def delete_cached_feeds(self):\n page_feeds = self._db.query(CachedFeed).filter(\n CachedFeed.type != CachedFeed.GROUPS_TYPE\n )\n page_feeds_count = page_feeds.count()\n self.out(\n \"%d feeds in cachedfeeds table, not counting grouped feeds.\",\n page_feeds_count,\n )\n if page_feeds_count:\n self.out(\" Deleting them all.\")\n page_feeds.delete()\n self._db.commit()\n\n def explain_collection(self, collection):\n self.out('Examining collection \"%s\"', collection.name)\n\n base = (\n self._db.query(Work)\n .join(LicensePool)\n .filter(LicensePool.collection == collection)\n )\n\n ready = base.filter(Work.presentation_ready == True)\n unready = base.filter(Work.presentation_ready == False)\n\n ready_count = ready.count()\n unready_count = unready.count()\n self.out(\" %d presentation-ready works.\", ready_count)\n self.out(\" %d works not presentation-ready.\", unready_count)\n\n # Check if the works have delivery mechanisms.\n LPDM = LicensePoolDeliveryMechanism\n no_delivery_mechanisms = base.filter(\n ~exists().where(\n and_(\n LicensePool.data_source_id == LPDM.data_source_id,\n LicensePool.identifier_id == LPDM.identifier_id,\n )\n )\n ).count()\n if no_delivery_mechanisms > 0:\n self.out(\n \" %d works are missing delivery mechanisms and won't show up.\",\n no_delivery_mechanisms,\n )\n\n # Check if the license pools are suppressed.\n suppressed = base.filter(LicensePool.suppressed == True).count()\n if suppressed > 0:\n self.out(\n \" %d works have suppressed LicensePools and won't show up.\", suppressed\n )\n\n # Check if the pools have available licenses.\n not_owned = base.filter(\n and_(LicensePool.licenses_owned == 0, ~LicensePool.open_access)\n ).count()\n if not_owned > 0:\n self.out(\n \" %d non-open-access works have no owned licenses and won't show up.\",\n not_owned,\n )\n\n filter = Filter(collections=[collection])\n count = self.search.count_works(filter)\n self.out(\n \" %d works in the search index, expected around %d.\", count, ready_count\n )\n\n\nclass ListCollectionMetadataIdentifiersScript(CollectionInputScript):\n \"\"\"List the metadata identifiers for Collections in the database.\n\n This script is helpful for accounting for and tracking collections on\n the metadata wrangler.\n \"\"\"\n\n def __init__(self, _db=None, output=None):\n _db = _db or self._db\n super(ListCollectionMetadataIdentifiersScript, self).__init__(_db)\n self.output = output or sys.stdout\n\n def run(self, cmd_args=None):\n parsed = self.parse_command_line(self._db, cmd_args=cmd_args)\n self.do_run(parsed.collections)\n\n def do_run(self, collections=None):\n collection_ids = list()\n if collections:\n collection_ids = [c.id for c in collections]\n\n collections = self._db.query(Collection).order_by(Collection.id)\n if collection_ids:\n collections = collections.filter(Collection.id.in_(collection_ids))\n\n self.output.write(\"COLLECTIONS\\n\")\n self.output.write(\"=\" * 50 + \"\\n\")\n\n def add_line(id, name, protocol, metadata_identifier):\n line = \"(%s) %s/%s => %s\\n\" % (id, name, protocol, metadata_identifier)\n self.output.write(line)\n\n count = 0\n for collection in collections:\n if not count:\n # Add a format line.\n add_line(\"id\", \"name\", \"protocol\", \"metadata_identifier\")\n\n count += 1\n add_line(\n str(collection.id),\n collection.name,\n collection.protocol,\n collection.metadata_identifier,\n )\n\n self.output.write(\"\\n%d collections found.\\n\" % count)\n\n\nclass UpdateLaneSizeScript(LaneSweeperScript):\n def should_process_lane(self, lane):\n \"\"\"We don't want to process generic WorkLists -- there's nowhere\n to store the data.\n \"\"\"\n return isinstance(lane, Lane)\n\n def process_lane(self, lane):\n \"\"\"Update the estimated size of a Lane.\"\"\"\n lane.update_size(self._db)\n self.log.info(\"%s: %d\", lane.full_identifier, lane.size)\n\n\nclass UpdateCustomListSizeScript(CustomListSweeperScript):\n def process_custom_list(self, custom_list):\n custom_list.update_size()\n\n\nclass RemovesSearchCoverage(object):\n \"\"\"Mix-in class for a script that might remove all coverage records\n for the search engine.\n \"\"\"\n\n def remove_search_coverage_records(self):\n \"\"\"Delete all search coverage records from the database.\n\n :return: The number of records deleted.\n \"\"\"\n wcr = WorkCoverageRecord\n clause = wcr.operation == wcr.UPDATE_SEARCH_INDEX_OPERATION\n count = self._db.query(wcr).filter(clause).count()\n\n # We want records to be updated in ascending order in order to avoid deadlocks.\n # To guarantee lock order, we explicitly acquire locks by using a subquery with FOR UPDATE (with_for_update).\n # Please refer for my details to this SO article:\n # https://stackoverflow.com/questions/44660368/postgres-update-with-order-by-how-to-do-it\n self._db.execute(\n wcr.__table__.delete().where(\n wcr.id.in_(\n self._db.query(wcr.id)\n .with_for_update()\n .filter(clause)\n .order_by(WorkCoverageRecord.id)\n )\n )\n )\n\n return count\n\n\nclass RebuildSearchIndexScript(RunWorkCoverageProviderScript, RemovesSearchCoverage):\n \"\"\"Completely delete the search index and recreate it.\"\"\"\n\n def __init__(self, *args, **kwargs):\n search = kwargs.get(\"search_index_client\", None)\n self.search = search or ExternalSearchIndex(self._db)\n super(RebuildSearchIndexScript, self).__init__(\n SearchIndexCoverageProvider, *args, **kwargs\n )\n\n def do_run(self):\n # Calling setup_index will destroy the index and recreate it\n # empty.\n self.search.setup_index()\n\n # Remove all search coverage records so the\n # SearchIndexCoverageProvider will start from scratch.\n count = self.remove_search_coverage_records()\n self.log.info(\"Deleted %d search coverage records.\", count)\n\n # Now let the SearchIndexCoverageProvider do its thing.\n return super(RebuildSearchIndexScript, self).do_run()\n\n\nclass SearchIndexCoverageRemover(TimestampScript, RemovesSearchCoverage):\n \"\"\"Script that removes search index coverage for all works.\n\n This guarantees the SearchIndexCoverageProvider will add\n fresh coverage for every Work the next time it runs.\n \"\"\"\n\n def do_run(self):\n count = self.remove_search_coverage_records()\n return TimestampData(\n achievements=\"Coverage records deleted: %(deleted)d\" % dict(deleted=count)\n )\n\n\nclass MockStdin(object):\n \"\"\"Mock a list of identifiers passed in on standard input.\"\"\"\n\n def __init__(self, *lines):\n self.lines = lines\n\n def readlines(self):\n lines = self.lines\n self.lines = []\n return lines\n", "id": "5670303", "language": "Python", "matching_score": 7.077329158782959, "max_stars_count": 0, "path": "core/scripts.py" }, { "content": "# encoding: utf-8\nimport json\nimport os\nimport random\nfrom datetime import datetime, timedelta\nfrom io import BytesIO, StringIO\nfrom unittest import mock\nfrom unittest.mock import MagicMock\n\nimport pytest\nfrom pymarc import parse_xml_to_array\nfrom pymarc.record import Record\n\nfrom api.authenticator import BasicAuthenticationProvider\nfrom api.bibliotheca import (\n BibliothecaAPI,\n BibliothecaBibliographicCoverageProvider,\n BibliothecaCirculationSweep,\n BibliothecaEventMonitor,\n BibliothecaParser,\n BibliothecaPurchaseMonitor,\n CheckoutResponseParser,\n ErrorParser,\n EventParser,\n ItemListParser,\n MockBibliothecaAPI,\n PatronCirculationParser,\n)\nfrom api.circulation import CirculationAPI, FulfillmentInfo, HoldInfo, LoanInfo\nfrom api.circulation_exceptions import *\nfrom api.web_publication_manifest import FindawayManifest\nfrom core.metadata_layer import ReplacementPolicy, TimestampData\nfrom core.mock_analytics_provider import MockAnalyticsProvider\nfrom core.model import (\n CirculationEvent,\n Contributor,\n DataSource,\n DeliveryMechanism,\n Edition,\n ExternalIntegration,\n Hyperlink,\n Identifier,\n LicensePool,\n Measurement,\n Representation,\n Subject,\n Timestamp,\n Work,\n WorkCoverageRecord,\n create,\n)\nfrom core.scripts import RunCollectionCoverageProviderScript\nfrom core.testing import DatabaseTest\nfrom core.util.datetime_helpers import datetime_utc, utc_now\nfrom core.util.http import BadResponseException\nfrom core.util.web_publication_manifest import AudiobookManifest\n\nfrom . import sample_data\n\n\nclass BibliothecaAPITest(DatabaseTest):\n def setup_method(self):\n super(BibliothecaAPITest, self).setup_method()\n self.collection = MockBibliothecaAPI.mock_collection(self._db)\n self.api = MockBibliothecaAPI(self._db, self.collection)\n\n base_path = os.path.split(__file__)[0]\n resource_path = os.path.join(base_path, \"files\", \"bibliotheca\")\n\n @classmethod\n def sample_data(self, filename):\n return sample_data(filename, \"bibliotheca\")\n\n\nclass TestBibliothecaAPI(BibliothecaAPITest):\n def setup_method(self):\n super(TestBibliothecaAPI, self).setup_method()\n self.collection = MockBibliothecaAPI.mock_collection(self._db)\n self.api = MockBibliothecaAPI(self._db, self.collection)\n\n def test_external_integration(self):\n assert self.collection.external_integration == self.api.external_integration(\n object()\n )\n\n def test__run_self_tests(self):\n # Verify that BibliothecaAPI._run_self_tests() calls the right\n # methods.\n\n class Mock(MockBibliothecaAPI):\n \"Mock every method used by BibliothecaAPI._run_self_tests.\"\n\n # First we will count the circulation events that happened in the\n # last five minutes.\n def get_events_between(self, start, finish):\n self.get_events_between_called_with = (start, finish)\n return [1, 2, 3]\n\n # Then we will count the loans and holds for the default\n # patron.\n def patron_activity(self, patron, pin):\n self.patron_activity_called_with = (patron, pin)\n return [\"loan\", \"hold\"]\n\n # Now let's make sure two Libraries have access to this\n # Collection -- one library with a default patron and one\n # without.\n no_default_patron = self._library()\n self.collection.libraries.append(no_default_patron)\n\n with_default_patron = self._default_library\n integration = self._external_integration(\n \"api.simple_authentication\",\n ExternalIntegration.PATRON_AUTH_GOAL,\n libraries=[with_default_patron],\n )\n p = BasicAuthenticationProvider\n integration.setting(p.TEST_IDENTIFIER).value = \"username1\"\n integration.setting(p.TEST_PASSWORD).value = \"<PASSWORD>\"\n\n # Now that everything is set up, run the self-test.\n api = Mock(self._db, self.collection)\n now = utc_now()\n [no_patron_credential, recent_circulation_events, patron_activity] = sorted(\n api._run_self_tests(self._db), key=lambda x: x.name\n )\n\n assert (\n \"Acquiring test patron credentials for library %s\" % no_default_patron.name\n == no_patron_credential.name\n )\n assert False == no_patron_credential.success\n assert \"Library has no test patron configured.\" == str(\n no_patron_credential.exception\n )\n\n assert (\n \"Asking for circulation events for the last five minutes\"\n == recent_circulation_events.name\n )\n assert True == recent_circulation_events.success\n assert \"Found 3 event(s)\" == recent_circulation_events.result\n start, end = api.get_events_between_called_with\n assert 5 * 60 == (end - start).total_seconds()\n assert (end - now).total_seconds() < 2\n\n assert (\n \"Checking activity for test patron for library %s\"\n % with_default_patron.name\n == patron_activity.name\n )\n assert \"Found 2 loans/holds\" == patron_activity.result\n patron, pin = api.patron_activity_called_with\n assert \"username1\" == patron.authorization_identifier\n assert \"password1\" == pin\n\n def test_full_path(self):\n id = self.api.library_id\n assert \"/cirrus/library/%s/foo\" % id == self.api.full_path(\"foo\")\n assert \"/cirrus/library/%s/foo\" % id == self.api.full_path(\"/foo\")\n assert \"/cirrus/library/%s/foo\" % id == self.api.full_path(\n \"/cirrus/library/%s/foo\" % id\n )\n\n def test_full_url(self):\n id = self.api.library_id\n assert (\n \"http://bibliotheca.test/cirrus/library/%s/foo\" % id\n == self.api.full_url(\"foo\")\n )\n assert (\n \"http://bibliotheca.test/cirrus/library/%s/foo\" % id\n == self.api.full_url(\"/foo\")\n )\n\n def test_request_signing(self):\n # Confirm a known correct result for the Bibliotheca request signing\n # algorithm.\n\n self.api.queue_response(200)\n response = self.api.request(\"some_url\")\n [request] = self.api.requests\n headers = request[-1][\"headers\"]\n assert \"Fri, 01 Jan 2016 00:00:00 GMT\" == headers[\"3mcl-Datetime\"]\n assert \"2.0\" == headers[\"3mcl-Version\"]\n expect = \"3MCLAUTH a:HZHNGfn6WVceakGrwXaJQ9zIY0Ai5opGct38j9/bHrE=\"\n assert expect == headers[\"3mcl-Authorization\"]\n\n # Tweak one of the variables that go into the signature, and\n # the signature changes.\n self.api.library_id = self.api.library_id + \"1\"\n self.api.queue_response(200)\n response = self.api.request(\"some_url\")\n request = self.api.requests[-1]\n headers = request[-1][\"headers\"]\n assert headers[\"3mcl-Authorization\"] != expect\n\n def test_replacement_policy(self):\n mock_analytics = object()\n policy = self.api.replacement_policy(self._db, analytics=mock_analytics)\n assert isinstance(policy, ReplacementPolicy)\n assert mock_analytics == policy.analytics\n\n def test_bibliographic_lookup_request(self):\n self.api.queue_response(200, content=\"some data\")\n response = self.api.bibliographic_lookup_request([\"id1\", \"id2\"])\n [request] = self.api.requests\n url = request[1]\n\n # The request URL is the /items endpoint with the IDs concatenated.\n assert url == self.api.full_url(\"items\") + \"/id1,id2\"\n\n # The response string is returned directly.\n assert b\"some data\" == response\n\n def test_bibliographic_lookup(self):\n class MockItemListParser(object):\n def parse(self, data):\n self.parse_called_with = data\n yield \"item1\"\n yield \"item2\"\n\n class Mock(MockBibliothecaAPI):\n \"\"\"Mock the functionality used by bibliographic_lookup_request.\"\"\"\n\n def __init__(self):\n self.item_list_parser = MockItemListParser()\n\n def bibliographic_lookup_request(self, identifier_strings):\n self.bibliographic_lookup_request_called_with = identifier_strings\n return \"parse me\"\n\n api = Mock()\n\n identifier = self._identifier()\n # We can pass in a list of identifier strings, a list of\n # Identifier objects, or a single example of each:\n for identifier, identifier_string in (\n (\"id1\", \"id1\"),\n (identifier, identifier.identifier),\n ):\n for identifier_list in ([identifier], identifier):\n api.item_list_parser.parse_called_with = None\n\n results = list(api.bibliographic_lookup(identifier_list))\n\n # A list of identifier strings is passed into\n # bibliographic_lookup_request().\n assert [\n identifier_string\n ] == api.bibliographic_lookup_request_called_with\n\n # The response content is passed into parse()\n assert \"parse me\" == api.item_list_parser.parse_called_with\n\n # The results of parse() are yielded.\n assert [\"item1\", \"item2\"] == results\n\n def test_bad_response_raises_exception(self):\n self.api.queue_response(500, content=\"oops\")\n identifier = self._identifier()\n with pytest.raises(BadResponseException) as excinfo:\n self.api.bibliographic_lookup(identifier)\n assert \"Got status code 500\" in str(excinfo.value)\n\n def test_put_request(self):\n # This is a basic test to make sure the method calls line up\n # right--there are more thorough tests in the circulation\n # manager, which actually uses this functionality.\n\n self.api.queue_response(200, content=\"ok, you put something\")\n response = self.api.request(\"checkout\", \"put this!\", method=\"PUT\")\n\n # The PUT request went through to the correct URL and the right\n # payload was sent.\n [[method, url, args, kwargs]] = self.api.requests\n assert \"PUT\" == method\n assert self.api.full_url(\"checkout\") == url\n assert \"put this!\" == kwargs[\"data\"]\n\n # The response is what we'd expect.\n assert 200 == response.status_code\n assert b\"ok, you put something\" == response.content\n\n def test_get_events_between_success(self):\n data = self.sample_data(\"empty_end_date_event.xml\")\n self.api.queue_response(200, content=data)\n now = utc_now()\n an_hour_ago = now - timedelta(minutes=3600)\n response = self.api.get_events_between(an_hour_ago, now)\n [event] = list(response)\n assert \"d5rf89\" == event[0]\n\n def test_get_events_between_failure(self):\n self.api.queue_response(500)\n now = utc_now()\n an_hour_ago = now - timedelta(minutes=3600)\n pytest.raises(\n BadResponseException, self.api.get_events_between, an_hour_ago, now\n )\n\n def test_update_availability(self):\n # Test the Bibliotheca implementation of the update_availability\n # method defined by the CirculationAPI interface.\n\n # Create an analytics integration so we can make sure\n # events are tracked.\n integration, ignore = create(\n self._db,\n ExternalIntegration,\n goal=ExternalIntegration.ANALYTICS_GOAL,\n protocol=\"core.local_analytics_provider\",\n )\n\n # Create a LicensePool that needs updating.\n edition, pool = self._edition(\n identifier_type=Identifier.THREEM_ID,\n data_source_name=DataSource.THREEM,\n with_license_pool=True,\n collection=self.collection,\n )\n\n # We have never checked the circulation information for this\n # LicensePool. Put some random junk in the pool to verify\n # that it gets changed.\n pool.licenses_owned = 10\n pool.licenses_available = 5\n pool.patrons_in_hold_queue = 3\n assert None == pool.last_checked\n\n # We do have a Work hanging around, but things are about to\n # change for it.\n work, is_new = pool.calculate_work()\n assert any(\n x\n for x in work.coverage_records\n if x.operation == WorkCoverageRecord.CLASSIFY_OPERATION\n )\n\n # Prepare availability information.\n data = self.sample_data(\"item_metadata_single.xml\")\n # Change the ID in the test data so it looks like it's talking\n # about the LicensePool we just created.\n data = data.replace(b\"ddf4gr9\", pool.identifier.identifier.encode(\"utf8\"))\n\n # Update availability using that data.\n self.api.queue_response(200, content=data)\n self.api.update_availability(pool)\n\n # The availability information has been updated, as has the\n # date the availability information was last checked.\n assert 1 == pool.licenses_owned\n assert 1 == pool.licenses_available\n assert 0 == pool.patrons_in_hold_queue\n\n circulation_events = (\n self._db.query(CirculationEvent)\n .join(LicensePool)\n .filter(LicensePool.id == pool.id)\n )\n assert 3 == circulation_events.count()\n types = [e.type for e in circulation_events]\n assert (\n sorted(\n [\n CirculationEvent.DISTRIBUTOR_LICENSE_REMOVE,\n CirculationEvent.DISTRIBUTOR_CHECKOUT,\n CirculationEvent.DISTRIBUTOR_HOLD_RELEASE,\n ]\n )\n == sorted(types)\n )\n\n old_last_checked = pool.last_checked\n assert old_last_checked is not None\n\n # The work's CLASSIFY_OPERATION coverage record has been\n # removed. In the near future its coverage will be\n # recalculated to accommodate the new metadata.\n assert any(\n x\n for x in work.coverage_records\n if x.operation == WorkCoverageRecord.CLASSIFY_OPERATION\n )\n\n # Now let's try update_availability again, with a file that\n # makes it look like the book has been removed from the\n # collection.\n data = self.sample_data(\"empty_item_bibliographic.xml\")\n self.api.queue_response(200, content=data)\n\n self.api.update_availability(pool)\n\n assert 0 == pool.licenses_owned\n assert 0 == pool.licenses_available\n assert 0 == pool.patrons_in_hold_queue\n\n assert pool.last_checked is not old_last_checked\n\n circulation_events = (\n self._db.query(CirculationEvent)\n .join(LicensePool)\n .filter(LicensePool.id == pool.id)\n )\n assert 5 == circulation_events.count()\n\n def test_marc_request(self):\n # A request for MARC records between two dates makes an API\n # call and yields a sequence of pymarc Record objects.\n start = datetime_utc(2012, 1, 2, 3, 4, 5)\n end = datetime_utc(2014, 5, 6, 7, 8, 9)\n self.api.queue_response(200, content=self.sample_data(\"marc_records_two.xml\"))\n records = [x for x in self.api.marc_request(start, end, 10, 20)]\n [(method, url, body, headers)] = self.api.requests\n\n # A GET request was sent to the expected endpoint\n assert method == \"GET\"\n for expect in (\n \"/data/marc?\" \"startdate=2012-01-02T03:04:05\",\n \"enddate=2014-05-06T07:08:09\",\n \"offset=10\",\n \"limit=20\",\n ):\n assert expect in url\n\n # The queued response was converted into pymarc Record objects.\n assert all(isinstance(x, Record) for x in records)\n assert [\"Siege and Storm\", \"Red Island House A Novel/\"] == [\n x.title() for x in records\n ]\n\n # If the API returns an error, an appropriate exception is raised.\n self.api.queue_response(404, content=self.sample_data(\"error_unknown.xml\"))\n with pytest.raises(RemoteInitiatedServerError) as excinfo:\n [x for x in self.api.marc_request(start, end, 10, 20)]\n\n def test_sync_bookshelf(self):\n patron = self._patron()\n circulation = CirculationAPI(\n self._db,\n self._default_library,\n api_map={self.collection.protocol: MockBibliothecaAPI},\n )\n\n api = circulation.api_for_collection[self.collection.id]\n api.queue_response(200, content=self.sample_data(\"checkouts.xml\"))\n circulation.sync_bookshelf(patron, \"dummy pin\")\n\n # The patron should have two loans and two holds.\n l1, l2 = patron.loans\n h1, h2 = patron.holds\n\n assert datetime_utc(2015, 3, 20, 18, 50, 22) == l1.start\n assert datetime_utc(2015, 4, 10, 18, 50, 22) == l1.end\n\n assert datetime_utc(2015, 3, 13, 13, 38, 19) == l2.start\n assert datetime_utc(2015, 4, 3, 13, 38, 19) == l2.end\n\n # The patron is fourth in line. The end date is an estimate\n # of when the hold will be available to check out.\n assert datetime_utc(2015, 3, 24, 15, 6, 56) == h1.start\n assert datetime_utc(2015, 3, 24, 15, 7, 51) == h1.end\n assert 4 == h1.position\n\n # The hold has an end date. It's time for the patron to decide\n # whether or not to check out this book.\n assert datetime_utc(2015, 5, 25, 17, 5, 34) == h2.start\n assert datetime_utc(2015, 5, 27, 17, 5, 34) == h2.end\n assert 0 == h2.position\n\n def test_place_hold(self):\n patron = self._patron()\n edition, pool = self._edition(with_license_pool=True)\n self.api.queue_response(200, content=self.sample_data(\"successful_hold.xml\"))\n response = self.api.place_hold(patron, \"pin\", pool)\n assert pool.identifier.type == response.identifier_type\n assert pool.identifier.identifier == response.identifier\n\n def test_place_hold_fails_if_exceeded_hold_limit(self):\n patron = self._patron()\n edition, pool = self._edition(with_license_pool=True)\n self.api.queue_response(\n 400, content=self.sample_data(\"error_exceeded_hold_limit.xml\")\n )\n pytest.raises(PatronHoldLimitReached, self.api.place_hold, patron, \"pin\", pool)\n\n def test_get_audio_fulfillment_file(self):\n \"\"\"Verify that get_audio_fulfillment_file sends the\n request we expect.\n \"\"\"\n self.api.queue_response(200, content=\"A license\")\n response = self.api.get_audio_fulfillment_file(\"patron id\", \"bib id\")\n\n [[method, url, args, kwargs]] = self.api.requests\n assert \"POST\" == method\n assert url.endswith(\"GetItemAudioFulfillment\")\n assert (\n \"<AudioFulfillmentRequest><ItemId>bib id</ItemId><PatronId>patron id</PatronId></AudioFulfillmentRequest>\"\n == kwargs[\"data\"]\n )\n\n assert 200 == response.status_code\n assert b\"A license\" == response.content\n\n def test_fulfill(self):\n patron = self._patron()\n\n # This miracle book is available either as an audiobook or as\n # an EPUB.\n work = self._work(\n data_source_name=DataSource.BIBLIOTHECA, with_license_pool=True\n )\n [pool] = work.license_pools\n\n # Let's fulfill the EPUB first.\n self.api.queue_response(\n 200,\n headers={\"Content-Type\": \"presumably/an-acsm\"},\n content=\"this is an ACSM\",\n )\n fulfillment = self.api.fulfill(patron, \"password\", pool, internal_format=\"ePub\")\n assert isinstance(fulfillment, FulfillmentInfo)\n assert b\"this is an ACSM\" == fulfillment.content\n assert pool.identifier.identifier == fulfillment.identifier\n assert pool.identifier.type == fulfillment.identifier_type\n assert pool.data_source.name == fulfillment.data_source_name\n\n # The media type reported by the server is passed through.\n assert \"presumably/an-acsm\" == fulfillment.content_type\n\n # Now let's try the audio version.\n license = self.sample_data(\"sample_findaway_audiobook_license.json\")\n self.api.queue_response(\n 200, headers={\"Content-Type\": \"application/json\"}, content=license\n )\n fulfillment = self.api.fulfill(patron, \"password\", pool, internal_format=\"MP3\")\n assert isinstance(fulfillment, FulfillmentInfo)\n\n # Here, the media type reported by the server is not passed\n # through; it's replaced by a more specific media type\n assert DeliveryMechanism.FINDAWAY_DRM == fulfillment.content_type\n\n # The document sent by the 'Findaway' server has been\n # converted into a web publication manifest.\n manifest = json.loads(fulfillment.content)\n\n # The conversion process is tested more fully in\n # test_findaway_license_to_webpub_manifest. This just verifies\n # that the manifest contains information from the 'Findaway'\n # document as well as information from the Work.\n metadata = manifest[\"metadata\"]\n assert (\n \"abcdef01234789abcdef0123\" == metadata[\"encrypted\"][\"findaway:checkoutId\"]\n )\n assert work.title == metadata[\"title\"]\n\n # Now let's see what happens to fulfillment when 'Findaway' or\n # 'Bibliotheca' sends bad information.\n bad_media_type = \"application/error+json\"\n bad_content = b\"This is not my beautiful license document!\"\n self.api.queue_response(\n 200, headers={\"Content-Type\": bad_media_type}, content=bad_content\n )\n fulfillment = self.api.fulfill(patron, \"password\", pool, internal_format=\"MP3\")\n assert isinstance(fulfillment, FulfillmentInfo)\n\n # The (apparently) bad document is just passed on to the\n # client as part of the FulfillmentInfo, in the hopes that the\n # client will know what to do with it.\n assert bad_media_type == fulfillment.content_type\n assert bad_content == fulfillment.content\n\n def test_findaway_license_to_webpub_manifest(self):\n work = self._work(with_license_pool=True)\n [pool] = work.license_pools\n document = self.sample_data(\"sample_findaway_audiobook_license.json\")\n\n # Randomly scramble the Findaway manifest to make sure it gets\n # properly sorted when converted to a Webpub-like manifest.\n document = json.loads(document)\n document[\"items\"].sort(key=lambda x: random.random())\n document = json.dumps(document)\n\n m = BibliothecaAPI.findaway_license_to_webpub_manifest\n media_type, manifest = m(pool, document)\n assert DeliveryMechanism.FINDAWAY_DRM == media_type\n manifest = json.loads(manifest)\n\n # We use the default context for Web Publication Manifest\n # files, but we also define an extension context called\n # 'findaway', which lets us include terms coined by Findaway\n # in a normal Web Publication Manifest document.\n context = manifest[\"@context\"]\n default, findaway = context\n assert AudiobookManifest.DEFAULT_CONTEXT == default\n assert {\"findaway\": FindawayManifest.FINDAWAY_EXTENSION_CONTEXT} == findaway\n\n metadata = manifest[\"metadata\"]\n\n # Information about the book has been added to metadata.\n # (This is tested more fully in\n # core/tests/util/test_util_web_publication_manifest.py.)\n assert work.title == metadata[\"title\"]\n assert pool.identifier.urn == metadata[\"identifier\"]\n assert \"en\" == metadata[\"language\"]\n\n # Information about the license has been added to an 'encrypted'\n # object within metadata.\n encrypted = metadata[\"encrypted\"]\n assert (\n \"http://librarysimplified.org/terms/drm/scheme/FAE\" == encrypted[\"scheme\"]\n )\n assert \"abcdef01234789abcdef0123\" == encrypted[\"findaway:checkoutId\"]\n assert \"1234567890987654321ababa\" == encrypted[\"findaway:licenseId\"]\n assert \"3M\" == encrypted[\"findaway:accountId\"]\n assert \"123456\" == encrypted[\"findaway:fulfillmentId\"]\n assert (\n \"aaaaaaaa-<KEY>\" == encrypted[\"findaway:sessionKey\"]\n )\n\n # Every entry in the license document's 'items' list has\n # become a readingOrder item in the manifest.\n reading_order = manifest[\"readingOrder\"]\n assert 79 == len(reading_order)\n\n # The duration of each readingOrder item has been converted to\n # seconds.\n first = reading_order[0]\n assert 16.201 == first[\"duration\"]\n assert \"Track 1\" == first[\"title\"]\n\n # There is no 'href' value for the readingOrder items because the\n # files must be obtained through the Findaway SDK rather than\n # through regular HTTP requests.\n #\n # Since this is a relatively small book, it only has one part,\n # part #0. Within that part, the items have been sorted by\n # their sequence.\n for i, item in enumerate(reading_order):\n assert None == item.get(\"href\", None)\n assert Representation.MP3_MEDIA_TYPE == item[\"type\"]\n assert 0 == item[\"findaway:part\"]\n assert i + 1 == item[\"findaway:sequence\"]\n\n # The total duration, in seconds, has been added to metadata.\n assert 28371 == int(metadata[\"duration\"])\n\n\nclass TestBibliothecaCirculationSweep(BibliothecaAPITest):\n def test_circulation_sweep_discovers_work(self):\n # Test what happens when BibliothecaCirculationSweep discovers a new\n # work.\n\n # Create an analytics integration so we can make sure\n # events are tracked.\n integration, ignore = create(\n self._db,\n ExternalIntegration,\n goal=ExternalIntegration.ANALYTICS_GOAL,\n protocol=\"core.local_analytics_provider\",\n )\n\n # We know about an identifier, but nothing else.\n identifier = self._identifier(\n identifier_type=Identifier.BIBLIOTHECA_ID, foreign_id=\"ddf4gr9\"\n )\n\n # We're about to get information about that identifier from\n # the API.\n data = self.sample_data(\"item_metadata_single.xml\")\n\n # Update availability using that data.\n self.api.queue_response(200, content=data)\n monitor = BibliothecaCirculationSweep(\n self._db, self.collection, api_class=self.api\n )\n monitor.process_items([identifier])\n\n # Validate that the HTTP request went to the /items endpoint.\n request = self.api.requests.pop()\n url = request[1]\n assert url == self.api.full_url(\"items\") + \"/\" + identifier.identifier\n\n # A LicensePool has been created for the previously mysterious\n # identifier.\n [pool] = identifier.licensed_through\n assert self.collection == pool.collection\n assert False == pool.open_access\n\n # Three circulation events were created for this license pool,\n # marking the creation of the license pool, the addition of\n # licenses owned, and the making of those licenses available.\n circulation_events = (\n self._db.query(CirculationEvent)\n .join(LicensePool)\n .filter(LicensePool.id == pool.id)\n )\n assert 3 == circulation_events.count()\n types = [e.type for e in circulation_events]\n assert (\n sorted(\n [\n CirculationEvent.DISTRIBUTOR_LICENSE_ADD,\n CirculationEvent.DISTRIBUTOR_TITLE_ADD,\n CirculationEvent.DISTRIBUTOR_CHECKIN,\n ]\n )\n == sorted(types)\n )\n\n\n# Tests of the various parser classes.\n#\n\n\nclass TestBibliothecaParser(BibliothecaAPITest):\n def test_parse_date(self):\n parser = BibliothecaParser()\n v = parser.parse_date(\"2016-01-02T12:34:56\")\n assert datetime_utc(2016, 1, 2, 12, 34, 56) == v\n\n assert None == parser.parse_date(None)\n assert None == parser.parse_date(\"Some weird value\")\n\n\nclass TestEventParser(BibliothecaAPITest):\n def test_parse_empty_list(self):\n data = self.sample_data(\"empty_event_batch.xml\")\n\n # By default, we consider an empty batch of events not\n # as an error.\n events = list(EventParser().process_all(data))\n assert [] == events\n\n # But if we consider not having events for a certain time\n # period, then an exception should be raised.\n no_events_error = True\n with pytest.raises(RemoteInitiatedServerError) as excinfo:\n list(EventParser().process_all(data, no_events_error))\n assert (\n \"No events returned from server. This may not be an error, but treating it as one to be safe.\"\n in str(excinfo.value)\n )\n\n def test_parse_empty_end_date_event(self):\n data = self.sample_data(\"empty_end_date_event.xml\")\n [event] = list(EventParser().process_all(data))\n (threem_id, isbn, patron_id, start_time, end_time, internal_event_type) = event\n assert \"d5rf89\" == threem_id\n assert \"9781101190623\" == isbn\n assert None == patron_id\n assert datetime_utc(2016, 4, 28, 11, 4, 6) == start_time\n assert None == end_time\n assert \"distributor_license_add\" == internal_event_type\n\n\nclass TestPatronCirculationParser(BibliothecaAPITest):\n def test_parse(self):\n data = self.sample_data(\"checkouts.xml\")\n collection = self.collection\n loans_and_holds = PatronCirculationParser(collection).process_all(data)\n loans = [x for x in loans_and_holds if isinstance(x, LoanInfo)]\n holds = [x for x in loans_and_holds if isinstance(x, HoldInfo)]\n assert 2 == len(loans)\n assert 2 == len(holds)\n [l1, l2] = sorted(loans, key=lambda x: x.identifier)\n assert \"1ad589\" == l1.identifier\n assert \"cgaxr9\" == l2.identifier\n expect_loan_start = datetime_utc(2015, 3, 20, 18, 50, 22)\n expect_loan_end = datetime_utc(2015, 4, 10, 18, 50, 22)\n assert expect_loan_start == l1.start_date\n assert expect_loan_end == l1.end_date\n\n [h1, h2] = sorted(holds, key=lambda x: x.identifier)\n\n # This is the book on reserve.\n assert collection.id == h1.collection_id\n assert DataSource.BIBLIOTHECA == h1.data_source_name\n assert \"9wd8\" == h1.identifier\n expect_hold_start = datetime_utc(2015, 5, 25, 17, 5, 34)\n expect_hold_end = datetime_utc(2015, 5, 27, 17, 5, 34)\n assert expect_hold_start == h1.start_date\n assert expect_hold_end == h1.end_date\n assert 0 == h1.hold_position\n\n # This is the book on hold.\n assert \"d4o8r9\" == h2.identifier\n assert collection.id == h2.collection_id\n assert DataSource.BIBLIOTHECA == h2.data_source_name\n expect_hold_start = datetime_utc(2015, 3, 24, 15, 6, 56)\n expect_hold_end = datetime_utc(2015, 3, 24, 15, 7, 51)\n assert expect_hold_start == h2.start_date\n assert expect_hold_end == h2.end_date\n assert 4 == h2.hold_position\n\n\nclass TestCheckoutResponseParser(BibliothecaAPITest):\n def test_parse(self):\n data = self.sample_data(\"successful_checkout.xml\")\n due_date = CheckoutResponseParser().process_all(data)\n assert datetime_utc(2015, 4, 16, 0, 32, 36) == due_date\n\n\nclass TestErrorParser(BibliothecaAPITest):\n def test_exceeded_limit(self):\n \"\"\"The normal case--we get a helpful error message which we turn into\n an appropriate circulation exception.\n \"\"\"\n msg = self.sample_data(\"error_exceeded_limit.xml\")\n error = ErrorParser().process_all(msg)\n assert isinstance(error, PatronLoanLimitReached)\n assert \"Patron cannot loan more than 12 documents\" == error.message\n\n def test_exceeded_hold_limit(self):\n msg = self.sample_data(\"error_exceeded_hold_limit.xml\")\n error = ErrorParser().process_all(msg)\n assert isinstance(error, PatronHoldLimitReached)\n assert \"Patron cannot have more than 15 holds\" == error.message\n\n def test_wrong_status(self):\n msg = self.sample_data(\"error_no_licenses.xml\")\n error = ErrorParser().process_all(msg)\n assert isinstance(error, NoLicenses)\n assert (\n \"the patron document status was CAN_WISH and not one of CAN_LOAN,RESERVATION\"\n == error.message\n )\n\n problem = error.as_problem_detail_document()\n assert \"The library currently has no licenses for this book.\" == problem.detail\n assert 404 == problem.status_code\n\n def test_internal_server_error_beomces_remote_initiated_server_error(self):\n \"\"\"Simulate the message we get when the server goes down.\"\"\"\n msg = \"The server has encountered an error\"\n error = ErrorParser().process_all(msg)\n assert isinstance(error, RemoteInitiatedServerError)\n assert BibliothecaAPI.SERVICE_NAME == error.service_name\n assert 502 == error.status_code\n assert msg == error.message\n doc = error.as_problem_detail_document()\n assert 502 == doc.status_code\n assert \"Integration error communicating with Bibliotheca\" == doc.detail\n\n def test_unknown_error_becomes_remote_initiated_server_error(self):\n \"\"\"Simulate the message we get when the server gives a vague error.\"\"\"\n msg = self.sample_data(\"error_unknown.xml\")\n error = ErrorParser().process_all(msg)\n assert isinstance(error, RemoteInitiatedServerError)\n assert BibliothecaAPI.SERVICE_NAME == error.service_name\n assert \"Unknown error\" == error.message\n\n def test_remote_authentication_failed_becomes_remote_initiated_server_error(self):\n \"\"\"Simulate the message we get when the error message is\n 'Authentication failed' but our authentication information is\n set up correctly.\n \"\"\"\n msg = self.sample_data(\"error_authentication_failed.xml\")\n error = ErrorParser().process_all(msg)\n assert isinstance(error, RemoteInitiatedServerError)\n assert BibliothecaAPI.SERVICE_NAME == error.service_name\n assert \"Authentication failed\" == error.message\n\n def test_malformed_error_message_becomes_remote_initiated_server_error(self):\n msg = \"\"\"<weird>This error does not follow the standard set out by Bibliotheca.</weird>\"\"\"\n error = ErrorParser().process_all(msg)\n assert isinstance(error, RemoteInitiatedServerError)\n assert BibliothecaAPI.SERVICE_NAME == error.service_name\n assert \"Unknown error\" == error.message\n\n def test_blank_error_message_becomes_remote_initiated_server_error(self):\n msg = \"\"\"<Error xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"><Message/></Error>\"\"\"\n error = ErrorParser().process_all(msg)\n assert isinstance(error, RemoteInitiatedServerError)\n assert BibliothecaAPI.SERVICE_NAME == error.service_name\n assert \"Unknown error\" == error.message\n\n\nclass TestBibliothecaEventParser(object):\n\n # Sample event feed to test out the parser.\n TWO_EVENTS = \"\"\"<LibraryEventBatch xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n <PublishId>1b0d6667-a10e-424a-9f73-fb6f6d41308e</PublishId>\n <PublishDateTimeInUTC>2014-04-14T13:59:05.6920303Z</PublishDateTimeInUTC>\n <LastEventDateTimeInUTC>2014-04-03T00:00:34</LastEventDateTimeInUTC>\n <Events>\n <CloudLibraryEvent>\n <LibraryId>test-library</LibraryId>\n <EventId>event-1</EventId>\n <EventType>CHECKIN</EventType>\n <EventStartDateTimeInUTC>2014-04-03T00:00:23</EventStartDateTimeInUTC>\n <EventEndDateTimeInUTC>2014-04-03T00:00:23</EventEndDateTimeInUTC>\n <ItemId>theitem1</ItemId>\n <ISBN>900isbn1</ISBN>\n <PatronId>patronid1</PatronId>\n <EventPublishDateTimeInUTC>2014-04-14T13:59:05</EventPublishDateTimeInUTC>\n </CloudLibraryEvent>\n <CloudLibraryEvent>\n <LibraryId>test-library</LibraryId>\n <EventId>event-2</EventId>\n <EventType>CHECKOUT</EventType>\n <EventStartDateTimeInUTC>2014-04-03T00:00:34</EventStartDateTimeInUTC>\n <EventEndDateTimeInUTC>2014-04-02T23:57:37</EventEndDateTimeInUTC>\n <ItemId>theitem2</ItemId>\n <ISBN>900isbn2</ISBN>\n <PatronId>patronid2</PatronId>\n <EventPublishDateTimeInUTC>2014-04-14T13:59:05</EventPublishDateTimeInUTC>\n </CloudLibraryEvent>\n </Events>\n</LibraryEventBatch>\n\"\"\"\n\n def test_parse_event_batch(self):\n # Parsing the XML gives us two events.\n event1, event2 = EventParser().process_all(self.TWO_EVENTS)\n\n (threem_id, isbn, patron_id, start_time, end_time, internal_event_type) = event1\n\n assert \"theitem1\" == threem_id\n assert \"900isbn1\" == isbn\n assert \"patronid1\" == patron_id\n assert CirculationEvent.DISTRIBUTOR_CHECKIN == internal_event_type\n assert start_time == end_time\n\n (threem_id, isbn, patron_id, start_time, end_time, internal_event_type) = event2\n assert \"theitem2\" == threem_id\n assert \"900isbn2\" == isbn\n assert \"patronid2\" == patron_id\n assert CirculationEvent.DISTRIBUTOR_CHECKOUT == internal_event_type\n\n # Verify that start and end time were parsed correctly.\n correct_start = datetime_utc(2014, 4, 3, 0, 0, 34)\n correct_end = datetime_utc(2014, 4, 2, 23, 57, 37)\n assert correct_start == start_time\n assert correct_end == end_time\n\n\nclass TestErrorParser(object):\n\n # Some sample error documents.\n\n NOT_LOANABLE = '<Error xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"><Code>Gen-001</Code><Message>the patron document status was CAN_HOLD and not one of CAN_LOAN,RESERVATION</Message></Error>'\n\n ALREADY_ON_LOAN = '<Error xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"><Code>Gen-001</Code><Message>the patron document status was LOAN and not one of CAN_LOAN,RESERVATION</Message></Error>'\n\n TRIED_TO_RETURN_UNLOANED_BOOK = '<Error xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"><Code>Gen-001</Code><Message>The patron has no eBooks checked out</Message></Error>'\n\n TRIED_TO_HOLD_LOANABLE_BOOK = '<Error xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"><Code>Gen-001</Code><Message>the patron document status was CAN_LOAN and not one of CAN_HOLD</Message></Error>'\n\n TRIED_TO_HOLD_BOOK_ON_LOAN = '<Error xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"><Code>Gen-001</Code><Message>the patron document status was LOAN and not one of CAN_HOLD</Message></Error>'\n\n ALREADY_ON_HOLD = '<Error xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"><Code>Gen-001</Code><Message>the patron document status was HOLD and not one of CAN_HOLD</Message></Error>'\n\n TRIED_TO_CANCEL_NONEXISTENT_HOLD = '<Error xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"><Code>Gen-001</Code><Message>The patron does not have the book on hold</Message></Error>'\n\n TOO_MANY_LOANS = '<Error xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"><Code>Gen-001</Code><Message>Patron cannot loan more than 12 documents</Message></Error>'\n\n def test_exception(self):\n parser = ErrorParser()\n\n error = parser.process_all(self.NOT_LOANABLE)\n assert isinstance(error, NoAvailableCopies)\n\n error = parser.process_all(self.ALREADY_ON_LOAN)\n assert isinstance(error, AlreadyCheckedOut)\n\n error = parser.process_all(self.ALREADY_ON_HOLD)\n assert isinstance(error, AlreadyOnHold)\n\n error = parser.process_all(self.TOO_MANY_LOANS)\n assert isinstance(error, PatronLoanLimitReached)\n\n error = parser.process_all(self.TRIED_TO_CANCEL_NONEXISTENT_HOLD)\n assert isinstance(error, NotOnHold)\n\n error = parser.process_all(self.TRIED_TO_RETURN_UNLOANED_BOOK)\n assert isinstance(error, NotCheckedOut)\n\n error = parser.process_all(self.TRIED_TO_HOLD_LOANABLE_BOOK)\n assert isinstance(error, CurrentlyAvailable)\n\n # This is such a weird case we don't have a special\n # exception for it.\n error = parser.process_all(self.TRIED_TO_HOLD_BOOK_ON_LOAN)\n assert isinstance(error, CannotHold)\n\n\nclass TestBibliothecaPurchaseMonitor(BibliothecaAPITest):\n @pytest.fixture()\n def default_monitor(self):\n return BibliothecaPurchaseMonitor(\n self._db,\n self.collection,\n api_class=MockBibliothecaAPI,\n analytics=MockAnalyticsProvider(),\n )\n\n @pytest.fixture()\n def initialized_monitor(self):\n collection = MockBibliothecaAPI.mock_collection(\n self._db, name=\"Initialized Purchase Monitor Collection\"\n )\n monitor = BibliothecaPurchaseMonitor(\n self._db, collection, api_class=MockBibliothecaAPI\n )\n Timestamp.stamp(\n self._db,\n service=monitor.service_name,\n service_type=Timestamp.MONITOR_TYPE,\n collection=collection,\n )\n return monitor\n\n @pytest.mark.parametrize(\n \"specified_default_start, expected_default_start\",\n [\n (\"2011\", datetime_utc(year=2011, month=1, day=1)),\n (\"2011-10\", datetime_utc(year=2011, month=10, day=1)),\n (\"2011-10-05\", datetime_utc(year=2011, month=10, day=5)),\n (\"2011-10-05T15\", datetime_utc(year=2011, month=10, day=5, hour=15)),\n (\n \"2011-10-05T15:27\",\n datetime_utc(year=2011, month=10, day=5, hour=15, minute=27),\n ),\n (\n \"2011-10-05T15:27:33\",\n datetime_utc(year=2011, month=10, day=5, hour=15, minute=27, second=33),\n ),\n (\n \"2011-10-05 15:27:33\",\n datetime_utc(year=2011, month=10, day=5, hour=15, minute=27, second=33),\n ),\n (\n \"2011-10-05T15:27:33.123456\",\n datetime_utc(\n year=2011,\n month=10,\n day=5,\n hour=15,\n minute=27,\n second=33,\n microsecond=123456,\n ),\n ),\n (\n datetime_utc(year=2011, month=10, day=5, hour=15, minute=27),\n datetime_utc(year=2011, month=10, day=5, hour=15, minute=27),\n ),\n (None, None),\n ],\n )\n def test_optional_iso_date_valid_dates(\n self, specified_default_start, expected_default_start, default_monitor\n ):\n # ISO 8601 strings, `datetime`s, or None are valid.\n actual_default_start = default_monitor._optional_iso_date(\n specified_default_start\n )\n if expected_default_start is not None:\n assert isinstance(actual_default_start, datetime)\n assert actual_default_start == expected_default_start\n\n def test_monitor_intrinsic_start_time(self, default_monitor, initialized_monitor):\n # No `default_start` time is specified for either `default_monitor` or\n # `initialized_monitor`, so each monitor's `default_start_time` should\n # match the monitor class's intrinsic start time.\n for monitor in [default_monitor, initialized_monitor]:\n expected_intrinsic_start = BibliothecaPurchaseMonitor.DEFAULT_START_TIME\n intrinsic_start = monitor._intrinsic_start_time(self._db)\n assert isinstance(intrinsic_start, datetime)\n assert intrinsic_start == expected_intrinsic_start\n assert intrinsic_start == monitor.default_start_time\n\n @pytest.mark.parametrize(\n \"specified_default_start, override_timestamp, expected_start\",\n [\n (\n \"2011-10-05T15:27\",\n False,\n datetime_utc(year=2011, month=10, day=5, hour=15, minute=27),\n ),\n (\n \"2011-10-05T15:27:33\",\n False,\n datetime_utc(year=2011, month=10, day=5, hour=15, minute=27, second=33),\n ),\n (None, False, None),\n (None, True, None),\n (\n \"2011-10-05T15:27\",\n True,\n datetime_utc(year=2011, month=10, day=5, hour=15, minute=27),\n ),\n (\n \"2011-10-05T15:27:33\",\n True,\n datetime_utc(year=2011, month=10, day=5, hour=15, minute=27, second=33),\n ),\n ],\n )\n def test_specified_start_trumps_intrinsic_default_start(\n self, specified_default_start, override_timestamp, expected_start\n ):\n # When a valid `default_start` parameter is specified, it -- not the monitor's\n # intrinsic default -- will always become the monitor's `default_start_time`.\n monitor = BibliothecaPurchaseMonitor(\n self._db,\n self.collection,\n api_class=MockBibliothecaAPI,\n default_start=specified_default_start,\n override_timestamp=override_timestamp,\n )\n monitor_intrinsic_default = monitor._intrinsic_start_time(self._db)\n assert isinstance(monitor.default_start_time, datetime)\n assert isinstance(monitor_intrinsic_default, datetime)\n if specified_default_start:\n assert monitor.default_start_time == expected_start\n else:\n assert (\n abs(\n (\n monitor_intrinsic_default - monitor.default_start_time\n ).total_seconds()\n )\n <= 1\n )\n\n # If no `default_date` specified, then `override_timestamp` must be false.\n if not specified_default_start:\n assert monitor.override_timestamp is False\n\n # For an uninitialized monitor (no timestamp), the monitor's `default_start_time`,\n # whether from a specified `default_start` or the monitor's intrinsic start time,\n # will be the actual start time. The cut-off will be roughly the current time, in\n # either case.\n expected_cutoff = utc_now()\n with mock.patch.object(\n monitor, \"catch_up_from\", return_value=None\n ) as catch_up_from:\n monitor.run()\n actual_start, actual_cutoff, progress = catch_up_from.call_args[0]\n assert abs((expected_cutoff - actual_cutoff).total_seconds()) <= 1\n assert actual_cutoff == progress.finish\n assert actual_start == monitor.default_start_time\n assert progress.start == monitor.default_start_time\n\n @pytest.mark.parametrize(\n \"specified_default_start, override_timestamp, expected_start\",\n [\n (\n \"2011-10-05T15:27\",\n False,\n datetime_utc(year=2011, month=10, day=5, hour=15, minute=27),\n ),\n (\n \"2011-10-05T15:27:33\",\n False,\n datetime_utc(year=2011, month=10, day=5, hour=15, minute=27, second=33),\n ),\n (None, False, None),\n (None, True, None),\n (\n \"2011-10-05T15:27\",\n True,\n datetime_utc(year=2011, month=10, day=5, hour=15, minute=27),\n ),\n (\n \"2011-10-05T15:27:33\",\n True,\n datetime_utc(year=2011, month=10, day=5, hour=15, minute=27, second=33),\n ),\n ],\n )\n def test_specified_start_can_override_timestamp(\n self, specified_default_start, override_timestamp, expected_start\n ):\n monitor = BibliothecaPurchaseMonitor(\n self._db,\n self.collection,\n api_class=MockBibliothecaAPI,\n default_start=specified_default_start,\n override_timestamp=override_timestamp,\n )\n # For an initialized monitor, the `default_start_time` will be derived from\n # `timestamp.finish`, unless overridden by a specified `default_start` when\n # `override_timestamp` is specified as True.\n ts = Timestamp.stamp(\n self._db,\n service=monitor.service_name,\n service_type=Timestamp.MONITOR_TYPE,\n collection=monitor.collection,\n )\n start_time_from_ts = ts.finish - BibliothecaPurchaseMonitor.OVERLAP\n expected_actual_start_time = (\n expected_start if monitor.override_timestamp else start_time_from_ts\n )\n expected_cutoff = utc_now()\n with mock.patch.object(\n monitor, \"catch_up_from\", return_value=None\n ) as catch_up_from:\n monitor.run()\n actual_start, actual_cutoff, progress = catch_up_from.call_args[0]\n assert abs((expected_cutoff - actual_cutoff).total_seconds()) <= 1\n assert actual_cutoff == progress.finish\n assert actual_start == expected_actual_start_time\n assert progress.start == expected_actual_start_time\n\n @pytest.mark.parametrize(\"input\", [(\"invalid\"), (\"2020/10\"), ([\"2020-10-05\"])])\n def test_optional_iso_date_invalid_dates(self, input, default_monitor):\n with pytest.raises(ValueError) as excinfo:\n default_monitor._optional_iso_date(input)\n\n def test_catch_up_from(self, default_monitor):\n # catch_up_from() slices up its given timespan, calls\n # purchases() to find purchases for each slice, processes each\n # purchase using process_record(), and sets a checkpoint for each\n # slice that is unambiguously in the past.\n today = utc_now().date()\n\n # _checkpoint() will be called after processing this slice\n # because it's a full slice that ends before today.\n full_slice = [datetime_utc(2014, 1, 1), datetime_utc(2014, 1, 2), True]\n\n # _checkpoint() is not called after processing this slice\n # because it's not a full slice.\n incomplete_slice = [datetime_utc(2015, 1, 1), datetime_utc(2015, 1, 2), False]\n\n # _checkpoint() is not called after processing this slice,\n # even though it's supposedly complete, because today isn't\n # over yet.\n today_slice = [today - timedelta(days=1), today, True]\n\n # _checkpoint() is not called after processing this slice\n # because it doesn't end in the past.\n future_slice = [today + timedelta(days=1), today + timedelta(days=2), True]\n\n default_monitor.slice_timespan = MagicMock(\n return_value=[full_slice, incomplete_slice, today_slice, future_slice]\n )\n default_monitor.purchases = MagicMock(return_value=[\"A record\"])\n default_monitor.process_record = MagicMock()\n default_monitor._checkpoint = MagicMock()\n\n # Execute.\n progress = TimestampData()\n start = datetime_utc(2019, 1, 1)\n cutoff = datetime_utc(2020, 1, 1)\n default_monitor.catch_up_from(start, cutoff, progress)\n\n # slice_timespan was called once.\n default_monitor.slice_timespan.assert_called_once_with(\n start, cutoff, timedelta(days=1)\n )\n\n # purchases() was called on each slice it returned.\n default_monitor.purchases.assert_has_calls(\n [\n mock.call(*x[:2])\n for x in (full_slice, incomplete_slice, today_slice, future_slice)\n ]\n )\n\n # Each purchases() call returned a single record, which was\n # passed into process_record along with the start date of the\n # current slice.\n default_monitor.process_record.assert_has_calls(\n [\n mock.call(\"A record\", x[0])\n for x in [full_slice, incomplete_slice, today_slice, future_slice]\n ]\n )\n\n # TimestampData.achievements was set to the total number of\n # records processed.\n assert progress.achievements == \"MARC records processed: 4\"\n\n # Only one of our contrived time slices -- the first one --\n # was a full slice that ended before the current\n # date. _checkpoint was called on that slice, and only that\n # slice.\n default_monitor._checkpoint.assert_called_once_with(\n progress, start, full_slice[0], \"MARC records processed: 1\"\n )\n\n def test__checkpoint(self, default_monitor):\n # The _checkpoint method allows the BibliothecaPurchaseMonitor\n # to preserve its progress in case of a crash.\n\n # The Timestamp for the default monitor shows that it has\n # a start date but it's never successfully completed.\n timestamp_obj = default_monitor.timestamp()\n assert timestamp_obj.achievements is None\n assert timestamp_obj.start == BibliothecaPurchaseMonitor.DEFAULT_START_TIME\n assert timestamp_obj.finish is None\n\n timestamp_data = TimestampData()\n finish = datetime_utc(2020, 1, 1)\n achievements = \"Some achievements\"\n\n default_monitor._checkpoint(\n timestamp_data, timestamp_obj.start, finish, achievements\n )\n\n # Calling _checkpoint creates the impression that the monitor\n # completed at the checkpoint, even though in point of fact\n # it's still running.\n timestamp_obj = default_monitor.timestamp()\n assert timestamp_obj.achievements == achievements\n assert timestamp_obj.start == BibliothecaPurchaseMonitor.DEFAULT_START_TIME\n assert timestamp_obj.finish == finish\n\n def test_purchases(self, default_monitor):\n # The purchases() method calls marc_request repeatedly, handling\n # pagination.\n\n # Mock three pages that contain 50, 50, and 49 items.\n default_monitor.api.marc_request = MagicMock(\n side_effect=[[1] * 50, [2] * 50, [3] * 49]\n )\n start = datetime_utc(2020, 1, 1)\n end = datetime_utc(2020, 1, 2)\n records = [x for x in default_monitor.purchases(start, end)]\n\n # marc_request was called repeatedly with increasing offsets\n # until it returned fewer than 50 results.\n default_monitor.api.marc_request.assert_has_calls(\n [mock.call(start, end, offset, 50) for offset in (1, 51, 101)]\n )\n\n # Every \"record\" it returned was yielded as part of a single\n # stream.\n assert ([1] * 50) + ([2] * 50) + ([3] * 49) == records\n\n def test_process_record(self, default_monitor, caplog):\n # process_record may create a LicensePool, trigger the\n # bibliographic coverage provider, and/or issue a \"license\n # added\" analytics event, based on the identifier found in a\n # MARC record.\n purchase_time = utc_now()\n analytics = MockAnalyticsProvider()\n default_monitor.analytics = analytics\n ensure_coverage = MagicMock()\n default_monitor.bibliographic_coverage_provider.ensure_coverage = (\n ensure_coverage\n )\n\n # Try some cases that won't happen in real life.\n multiple_control_numbers = b\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\" ?><marc:collection xmlns:marc=\"http://www.loc.gov/MARC21/slim\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.loc.gov/MARC21/slim http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd\"><marc:record><marc:leader>01034nam a22002413a 4500</marc:leader><marc:controlfield tag=\"001\">ehasb89</marc:controlfield><marc:controlfield tag=\"001\">abcde</marc:controlfield></marc:record></marc:collection>\"\"\"\n no_control_number = b\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\" ?><marc:collection xmlns:marc=\"http://www.loc.gov/MARC21/slim\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.loc.gov/MARC21/slim http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd\"><marc:record><marc:leader>01034nam a22002413a 4500</marc:leader></marc:record></marc:collection>\"\"\"\n for bad_record, expect_error in (\n (\n multiple_control_numbers,\n \"Ignoring MARC record with multiple Bibliotheca control numbers.\",\n ),\n (\n no_control_number,\n \"Ignoring MARC record with no Bibliotheca control number.\",\n ),\n ):\n [marc] = parse_xml_to_array(BytesIO(bad_record))\n assert default_monitor.process_record(marc, purchase_time) is None\n assert expect_error in caplog.messages[-1]\n\n # Now, try the two real cases.\n [ehasb89, oock89] = parse_xml_to_array(\n StringIO(self.sample_data(\"marc_records_two.xml\").decode(\"utf8\"))\n )\n\n # If the book is new to this collection, it's run through\n # BibliothecaBibliographicCoverageProvider.ensure_coverage to\n # give it initial bibliographic and circulation data.\n pool = default_monitor.process_record(ehasb89, purchase_time)\n assert pool.identifier.identifier == \"ehasb89\"\n assert pool.identifier.type == Identifier.BIBLIOTHECA_ID\n assert pool.data_source.name == DataSource.BIBLIOTHECA\n assert self.collection == pool.collection\n ensure_coverage.assert_called_once_with(pool.identifier, force=True)\n\n # An analytics event is issued to mark the time at which the\n # book was first purchased.\n assert analytics.count == 1\n assert analytics.event_type == \"distributor_title_add\"\n assert analytics.time == purchase_time\n\n # If the book is already in this collection, ensure_coverage\n # is not called.\n pool, ignore = LicensePool.for_foreign_id(\n self._db,\n DataSource.BIBLIOTHECA,\n Identifier.BIBLIOTHECA_ID,\n \"3oock89\",\n collection=self.collection,\n )\n pool2 = default_monitor.process_record(oock89, purchase_time)\n assert pool == pool2\n assert ensure_coverage.call_count == 1 # i.e. was not called again.\n\n # But an analytics event is still issued to mark the purchase.\n assert analytics.count == 2\n assert analytics.event_type == \"distributor_title_add\"\n assert analytics.time == purchase_time\n\n def test_end_to_end(self, default_monitor):\n # Limited end-to-end test of the BibliothecaPurchaseMonitor.\n\n # Set the default start time to one minute in the past, so the\n # monitor doesn't feel the need to make more than one call to\n # the MARC endpoint.\n default_monitor.override_timestamp = True\n start_time = utc_now() - timedelta(minutes=1)\n default_monitor.default_start_time = start_time\n\n # There will be two calls to the mock API: one to the MARC\n # endpoint, which will tell us about the purchase of a single\n # book, and one to the metadata endpoint for information about\n # that book.\n api = default_monitor.api\n api.queue_response(200, content=self.sample_data(\"marc_records_one.xml\"))\n api.queue_response(200, content=self.sample_data(\"item_metadata_single.xml\"))\n default_monitor.run()\n\n # One book was created.\n work = self._db.query(Work).one()\n\n # Bibliographic information came from the coverage provider,\n # not from our fake MARC record (which is actually for a\n # different book).\n assert work.title == \"The Incense Game\"\n\n # Licensing information was also taken from the coverage\n # provider.\n [lp] = work.license_pools\n assert lp.identifier.identifier == \"ddf4gr9\"\n assert default_monitor.collection == lp.collection\n assert lp.licenses_owned == 1\n assert lp.licenses_available == 1\n\n # An analytics event was issued to commemorate the addition of\n # the book to the collection.\n assert default_monitor.analytics.event_type == \"distributor_title_add\"\n\n # The timestamp has been updated; the next time the monitor\n # runs it will ask for purchases that haven't happened yet.\n default_monitor.override_timestamp = False\n timestamp = default_monitor.timestamp()\n assert timestamp.achievements == \"MARC records processed: 1\"\n assert timestamp.finish > start_time\n\n\nclass TestBibliothecaEventMonitor(BibliothecaAPITest):\n @pytest.fixture()\n def default_monitor(self):\n return BibliothecaEventMonitor(\n self._db, self.collection, api_class=MockBibliothecaAPI\n )\n\n @pytest.fixture()\n def initialized_monitor(self):\n collection = MockBibliothecaAPI.mock_collection(\n self._db, name=\"Initialized Monitor Collection\"\n )\n monitor = BibliothecaEventMonitor(\n self._db, collection, api_class=MockBibliothecaAPI\n )\n Timestamp.stamp(\n self._db,\n service=monitor.service_name,\n service_type=Timestamp.MONITOR_TYPE,\n collection=collection,\n )\n return monitor\n\n def test_run_once(self):\n # run_once() slices the time between its start date\n # and the current time into five-minute intervals, and asks for\n # data about one interval at a time.\n\n now = utc_now()\n one_hour_ago = now - timedelta(hours=1)\n two_hours_ago = now - timedelta(hours=2)\n\n # Simulate that this script last ran 24 hours ago\n before_timestamp = TimestampData(start=two_hours_ago, finish=one_hour_ago)\n\n api = MockBibliothecaAPI(self._db, self.collection)\n api.queue_response(200, content=self.sample_data(\"item_metadata_single.xml\"))\n # Setting up making requests in 5-minute intervals in the hour slice.\n for i in range(1, 15):\n api.queue_response(\n 200, content=self.sample_data(\"empty_end_date_event.xml\")\n )\n\n monitor = BibliothecaEventMonitor(self._db, self.collection, api_class=api)\n\n after_timestamp = monitor.run_once(before_timestamp)\n # Fifteen requests were made to the API:\n #\n # 1. Looking up detailed information about the single book\n # whose event we found.\n #\n # 2. Retrieving the 'slices' of events between 2 hours ago and\n # 1 hour ago in 5 minute intervals.\n assert 15 == len(api.requests)\n\n # There is no second 'detailed information' lookup because both events\n # relate to the same book.\n\n # A LicensePool was created for the identifier referred to\n # in empty_end_date_event.xml.\n [pool] = self.collection.licensepools\n assert \"d5rf89\" == pool.identifier.identifier\n\n # But since the metadata retrieved in the follow-up request\n # was for a different book, no Work and no Edition have been\n # created. (See test_handle_event for what happens when the\n # API cooperates.)\n assert None == pool.work\n assert None == pool.presentation_edition\n\n # The timeframe covered by that run starts a little before the\n # 'finish' date associated with the old timestamp, and ends\n # around the time run_once() was called.\n #\n # The events we found were both from 2016, but that's not\n # considered when setting the timestamp.\n assert one_hour_ago - monitor.OVERLAP == after_timestamp.start\n self.time_eq(after_timestamp.finish, now)\n # The timestamp's achivements have been updated.\n assert \"Events handled: 13.\" == after_timestamp.achievements\n\n # In earlier versions, the progress timestamp's `counter`\n # property was manipulated to put the monitor in different\n # states that would improve its reliability in different\n # failure scenarios. With the addition of the\n # BibliothecaPurchaseMonitor, the reliability of\n # BibliothecaEventMonitor became much less important, so the\n # complex code has been removed.\n assert None == after_timestamp.counter\n\n # To prove this, run the monitor again, catching up between\n # after_timestamp.start (the current time, minus 5 minutes and\n # a little bit), and the current time.\n #\n # This is going to result in two more API calls, one for the\n # \"5 minutes\" and one for the \"little bit\".\n api.queue_response(200, content=self.sample_data(\"empty_event_batch.xml\"))\n api.queue_response(200, content=self.sample_data(\"empty_event_batch.xml\"))\n monitor.run_once(after_timestamp)\n\n # Two more requests were made, but no events were found for the\n # corresponding time slices, so nothing happened.\n #\n # Previously the lack of any events would have been treated as\n # an error.\n assert 17 == len(api.requests)\n assert \"Events handled: 0.\" == after_timestamp.achievements\n\n def test_handle_event(self):\n api = MockBibliothecaAPI(self._db, self.collection)\n api.queue_response(200, content=self.sample_data(\"item_metadata_single.xml\"))\n analytics = MockAnalyticsProvider()\n monitor = BibliothecaEventMonitor(\n self._db, self.collection, api_class=api, analytics=analytics\n )\n\n now = utc_now()\n monitor.handle_event(\n \"ddf4gr9\",\n \"9781250015280\",\n None,\n now,\n None,\n CirculationEvent.DISTRIBUTOR_LICENSE_ADD,\n )\n\n # The collection now has a LicensePool corresponding to the book\n # we just loaded.\n [pool] = self.collection.licensepools\n assert \"ddf4gr9\" == pool.identifier.identifier\n\n # The book has a presentation-ready work and we know its\n # bibliographic metadata.\n assert True == pool.work.presentation_ready\n assert \"The Incense Game\" == pool.work.title\n\n # The LicensePool's circulation information has been changed\n # to reflect what we know about the book -- that we have one\n # license which (as of the instant the event happened) is\n # available.\n assert 1 == pool.licenses_owned\n assert 1 == pool.licenses_available\n\n # Three analytics events were collected: one for the license\n # add event itself, one for the 'checkin' that made the new\n # license available, and a redundant 'license add' event which\n # was registered with analytics but which did not affect the\n # counts.\n #\n # In earlier versions a fourth analytics event would have been\n # issued, for the creation of a new LicensePool, but that is now\n # solely the job of the BibliothecaPurchasMonitor.\n assert 3 == analytics.count\n\n\nclass TestBibliothecaPurchaseMonitorWhenMultipleCollections(BibliothecaAPITest):\n def test_multiple_service_type_timestamps_with_start_date(self):\n # Start with multiple collections that have timestamps\n # because they've run before.\n collections = [\n MockBibliothecaAPI.mock_collection(self._db, name=\"Collection 1\"),\n MockBibliothecaAPI.mock_collection(self._db, name=\"Collection 2\"),\n ]\n for c in collections:\n Timestamp.stamp(\n self._db,\n service=BibliothecaPurchaseMonitor.SERVICE_NAME,\n service_type=Timestamp.MONITOR_TYPE,\n collection=c,\n )\n # Instantiate the associated monitors with a start date.\n monitors = [\n BibliothecaPurchaseMonitor(\n self._db, c, api_class=BibliothecaAPI, default_start=\"2011-02-03\"\n )\n for c in collections\n ]\n assert len(monitors) == len(collections)\n # Ensure that we get monitors and not an exception.\n for m in monitors:\n assert isinstance(m, BibliothecaPurchaseMonitor)\n\n\nclass TestItemListParser(BibliothecaAPITest):\n def test_contributors_for_string(cls):\n authors = list(\n ItemListParser.contributors_from_string(\n \"<NAME>; <NAME>.\"\n )\n )\n assert [x.sort_name for x in authors] == [\n \"<NAME>\",\n \"<NAME>.\",\n ]\n assert [x.roles for x in authors] == [\n [Contributor.AUTHOR_ROLE],\n [Contributor.AUTHOR_ROLE],\n ]\n\n # Parentheticals are stripped.\n [author] = ItemListParser.contributors_from_string(\n \"<NAME>. (<NAME>)\"\n )\n assert \"<NAME>.\" == author.sort_name\n\n # Contributors may have two levels of entity reference escaping,\n # one of which will have already been handled by the initial parse.\n # So, we'll test zero and one escapings here.\n authors = list(\n ItemListParser.contributors_from_string(\n u\"<NAME>, Esmé; <NAME>, Esm&#233;\"\n )\n )\n author_names = [a.sort_name for a in authors]\n assert len(authors) == 2\n assert len(set(author_names)) == 1\n assert all(u\"<NAME>, Esmé\" == name for name in author_names)\n\n # It's possible to specify some role other than AUTHOR_ROLE.\n narrators = list(\n ItemListParser.contributors_from_string(\n \"<NAME>; <NAME>; <NAME>\", Contributor.NARRATOR_ROLE\n )\n )\n for narrator in narrators:\n assert [Contributor.NARRATOR_ROLE] == narrator.roles\n assert [\"<NAME>\", \"<NAME>\", \"<NAME>\"] == [\n narrator.sort_name for narrator in narrators\n ]\n\n def test_parse_genre_string(self):\n def f(genre_string):\n genres = ItemListParser.parse_genre_string(genre_string)\n assert all([x.type == Subject.BISAC for x in genres])\n return [x.name for x in genres]\n\n assert [\"Children's Health\", \"Health\"] == f(\"Children&amp;#39;s Health,Health,\")\n\n assert [\n \"Action & Adventure\",\n \"Science Fiction\",\n \"Fantasy\",\n \"Magic\",\n \"Renaissance\",\n ] == f(\n \"Action &amp;amp; Adventure,Science Fiction, Fantasy, Magic,Renaissance,\"\n )\n\n def test_item_list(cls):\n data = cls.sample_data(\"item_metadata_list_mini.xml\")\n data = list(ItemListParser().parse(data))\n\n # There should be 2 items in the list.\n assert 2 == len(data)\n\n cooked = data[0]\n\n assert \"The Incense Game\" == cooked.title\n assert \"A Novel of Feudal Japan\" == cooked.subtitle\n assert Edition.BOOK_MEDIUM == cooked.medium\n assert \"eng\" == cooked.language\n assert \"St. Martin's Press\" == cooked.publisher\n assert datetime_utc(year=2012, month=9, day=17) == cooked.published\n\n primary = cooked.primary_identifier\n assert \"ddf4gr9\" == primary.identifier\n assert Identifier.THREEM_ID == primary.type\n\n identifiers = sorted(cooked.identifiers, key=lambda x: x.identifier)\n assert [\"9781250015280\", \"9781250031112\", \"ddf4gr9\"] == [\n x.identifier for x in identifiers\n ]\n\n [author] = cooked.contributors\n assert \"<NAME>\" == author.sort_name\n assert [Contributor.AUTHOR_ROLE] == author.roles\n\n subjects = [x.name for x in cooked.subjects]\n assert [\"Children's Health\", \"Mystery & Detective\"] == sorted(subjects)\n\n [pages] = cooked.measurements\n assert Measurement.PAGE_COUNT == pages.quantity_measured\n assert 304 == pages.value\n\n [alternate, image, description] = sorted(cooked.links, key=lambda x: x.rel)\n assert \"alternate\" == alternate.rel\n assert alternate.href.startswith(\"http://ebook.3m.com/library\")\n\n # We have a full-size image...\n assert Hyperlink.IMAGE == image.rel\n assert Representation.JPEG_MEDIA_TYPE == image.media_type\n assert image.href.startswith(\"http://ebook.3m.com/delivery\")\n assert \"documentID=ddf4gr9\" in image.href\n assert \"&size=NORMAL\" not in image.href\n\n # ... and a thumbnail, which we obtained by adding an argument\n # to the main image URL.\n thumbnail = image.thumbnail\n assert Hyperlink.THUMBNAIL_IMAGE == thumbnail.rel\n assert Representation.JPEG_MEDIA_TYPE == thumbnail.media_type\n assert thumbnail.href == image.href + \"&size=NORMAL\"\n\n # We have a description.\n assert Hyperlink.DESCRIPTION == description.rel\n assert description.content.startswith(\"<b>Winner\")\n\n def test_multiple_contributor_roles(self):\n data = self.sample_data(\"item_metadata_audio.xml\")\n [data] = list(ItemListParser().parse(data))\n names_and_roles = []\n for c in data.contributors:\n [role] = c.roles\n names_and_roles.append((c.sort_name, role))\n\n # We found one author and three narrators.\n assert (\n sorted(\n [\n (\"<NAME>\", \"Author\"),\n (\"<NAME>\", \"Narrator\"),\n (\"<NAME>\", \"Narrator\"),\n (\"<NAME>\", \"Narrator\"),\n ]\n )\n == sorted(names_and_roles)\n )\n\n\nclass TestBibliographicCoverageProvider(TestBibliothecaAPI):\n\n \"\"\"Test the code that looks up bibliographic information from Bibliotheca.\"\"\"\n\n def test_script_instantiation(self):\n \"\"\"Test that RunCollectionCoverageProviderScript can instantiate\n this coverage provider.\n \"\"\"\n script = RunCollectionCoverageProviderScript(\n BibliothecaBibliographicCoverageProvider,\n self._db,\n api_class=MockBibliothecaAPI,\n )\n [provider] = script.providers\n assert isinstance(provider, BibliothecaBibliographicCoverageProvider)\n assert isinstance(provider.api, MockBibliothecaAPI)\n\n def test_process_item_creates_presentation_ready_work(self):\n # Test the normal workflow where we ask Bibliotheca for data,\n # Bibliotheca provides it, and we create a presentation-ready work.\n identifier = self._identifier(identifier_type=Identifier.BIBLIOTHECA_ID)\n identifier.identifier = \"ddf4gr9\"\n\n # This book has no LicensePools.\n assert [] == identifier.licensed_through\n\n # Run it through the BibliothecaBibliographicCoverageProvider\n provider = BibliothecaBibliographicCoverageProvider(\n self.collection, api_class=MockBibliothecaAPI\n )\n data = self.sample_data(\"item_metadata_single.xml\")\n\n # We can't use self.api because that's not the same object\n # as the one created by the coverage provider.\n provider.api.queue_response(200, content=data)\n\n [result] = provider.process_batch([identifier])\n assert identifier == result\n\n # A LicensePool was created and populated with format and availability\n # information.\n [pool] = identifier.licensed_through\n assert 1 == pool.licenses_owned\n assert 1 == pool.licenses_available\n [lpdm] = pool.delivery_mechanisms\n assert (\n \"application/epub+zip (application/vnd.adobe.adept+xml)\"\n == lpdm.delivery_mechanism.name\n )\n\n # A Work was created and made presentation ready.\n assert \"The Incense Game\" == pool.work.title\n assert True == pool.work.presentation_ready\n\n def test_internal_formats(self):\n\n m = ItemListParser.internal_formats\n\n def _check_format(input, expect_medium, expect_format, expect_drm):\n medium, formats = m(input)\n assert medium == expect_medium\n [format] = formats\n assert expect_format == format.content_type\n assert expect_drm == format.drm_scheme\n\n rep = Representation\n adobe = DeliveryMechanism.ADOBE_DRM\n findaway = DeliveryMechanism.FINDAWAY_DRM\n book = Edition.BOOK_MEDIUM\n\n # Verify that we handle the known strings from Bibliotheca\n # appropriately.\n _check_format(\"EPUB\", book, rep.EPUB_MEDIA_TYPE, adobe)\n _check_format(\"EPUB3\", book, rep.EPUB_MEDIA_TYPE, adobe)\n _check_format(\"PDF\", book, rep.PDF_MEDIA_TYPE, adobe)\n _check_format(\"MP3\", Edition.AUDIO_MEDIUM, None, findaway)\n\n # Now Try a string we don't recognize from Bibliotheca.\n medium, formats = m(\"Unknown\")\n\n # We assume it's a book.\n assert Edition.BOOK_MEDIUM == medium\n\n # But we don't know which format.\n assert [] == formats\n", "id": "7530243", "language": "Python", "matching_score": 6.500836372375488, "max_stars_count": 0, "path": "tests/api/test_bibliotheca.py" }, { "content": "\"\"\"Vendor-specific variants of the standard Web Publication Manifest classes.\n\"\"\"\n\nfrom core.model import DeliveryMechanism, Representation\nfrom core.util.web_publication_manifest import AudiobookManifest\n\n\nclass SpineItem(object):\n \"\"\"Metadata about a piece of playable audio from an audiobook.\"\"\"\n\n def __init__(\n self, title, duration, part, sequence, media_type=Representation.MP3_MEDIA_TYPE\n ):\n \"\"\"Constructor.\n\n :param title: The title of this spine item.\n :param duration: The duration of this spine item, in milliseconds.\n :param part: The part number of this spine item, roughly equivalent\n to 'Part X' in a book.\n :param sequence: The sequence number of this spine item within its\n part, roughly equivalent to a chapter number.\n :param media_type: The media type of this spine item.\n \"\"\"\n self.title = title\n self.duration = duration\n self.part = part\n self.sequence = sequence\n self.media_type = media_type\n\n @classmethod\n def sort_key(self, o):\n \"\"\"Used to sort a list of SpineItem objects in reading\n order.\n \"\"\"\n return (o.part, o.sequence)\n\n\nclass FindawayManifest(AudiobookManifest):\n\n # This URI prefix makes it clear when we are using a term coined\n # by Findaway in a JSON-LD document.\n FINDAWAY_EXTENSION_CONTEXT = (\n \"http://librarysimplified.org/terms/third-parties/findaway.com/\"\n )\n\n MEDIA_TYPE = DeliveryMechanism.FINDAWAY_DRM\n\n def __init__(\n self,\n license_pool,\n accountId=None,\n checkoutId=None,\n fulfillmentId=None,\n licenseId=None,\n sessionKey=None,\n spine_items=[],\n ):\n \"\"\"Create a FindawayManifest object from raw data.\n\n :param license_pool: A LicensePool for the title being fulfilled.\n This will be used to fill in basic bibliographic information.\n\n :param accountId: An opaque string that Findaway calls the\n 'account ID'. Apparently this is no longer used.\n\n :param checkoutId: An opaque string that Findaway calls the\n 'checkout transaction ID'. Apparently this is no longer used.\n\n :param fulfillmentId: An opaque string that Findaway calls the\n 'title identifier' or 'content ID'.\n\n :param licenseId: An opaque string that Findaway calls the\n 'license ID'\n\n :param sessionId: An opaque string that Findaway calls the\n 'session key'.\n\n :param spine_items: A list of SpineItem objects representing\n the chapters or other sections of the audiobook.\n\n The PEP8-incompatible variable names are for compatibility\n with the names of these variables in the JSON-LD documents.\n \"\"\"\n\n context_with_extension = [\n \"http://readium.org/webpub/default.jsonld\",\n {\"findaway\": self.FINDAWAY_EXTENSION_CONTEXT},\n ]\n super(FindawayManifest, self).__init__(context=context_with_extension)\n\n # Add basic bibliographic information (identifier, title,\n # cover link) to the manifest based on our existing knowledge\n # of the LicensePool and its Work.\n self.update_bibliographic_metadata(license_pool)\n\n # Add Findaway-specific DRM information as an 'encrypted' object\n # within the metadata object.\n encrypted = dict(scheme=\"http://librarysimplified.org/terms/drm/scheme/FAE\")\n self.metadata[\"encrypted\"] = encrypted\n for findaway_extension, value in [\n (\"accountId\", accountId),\n (\"checkoutId\", checkoutId),\n (\"fulfillmentId\", fulfillmentId),\n (\"licenseId\", licenseId),\n (\"sessionKey\", sessionKey),\n ]:\n if not value:\n continue\n output_key = \"findaway:\" + findaway_extension\n encrypted[output_key] = value\n\n # Add the SpineItems as reading order items. None of them will\n # have working 'href' fields -- it's just to give the client a\n # picture of the structure of the timeline.\n part_key = \"findaway:part\"\n sequence_key = \"findaway:sequence\"\n total_duration = 0\n spine_items.sort(key=SpineItem.sort_key)\n for item in spine_items:\n kwargs = {part_key: item.part, sequence_key: item.sequence}\n self.add_reading_order(\n href=None,\n title=item.title,\n duration=item.duration,\n type=item.media_type,\n **kwargs\n )\n total_duration += item.duration\n\n if spine_items:\n self.metadata[\"duration\"] = total_duration\n", "id": "1810088", "language": "Python", "matching_score": 1.6002141237258911, "max_stars_count": 0, "path": "api/web_publication_manifest.py" }, { "content": "import logging\n\nfrom contextlib2 import contextmanager\nfrom flask_babel import lazy_gettext as _\nfrom webpub_manifest_parser.odl import ODLFeedParserFactory\nfrom webpub_manifest_parser.opds2.registry import OPDS2LinkRelationsRegistry\n\nfrom api.odl import ODLAPI, ODLImporter\nfrom core.metadata_layer import FormatData\nfrom core.model import Edition, RightsStatus\nfrom core.model.configuration import (\n ConfigurationAttributeType,\n ConfigurationFactory,\n ConfigurationMetadata,\n ConfigurationStorage,\n ExternalIntegration,\n HasExternalIntegration,\n)\nfrom core.opds2_import import OPDS2Importer, OPDS2ImportMonitor, RWPMManifestParser\nfrom core.opds_import import ConnectionConfiguration\nfrom core.util import first_or_default\nfrom core.util.datetime_helpers import to_utc\n\n\nclass ODL2APIConfiguration(ConnectionConfiguration):\n skipped_license_formats = ConfigurationMetadata(\n key=\"odl2_skipped_license_formats\",\n label=_(\"Skipped license formats\"),\n description=_(\n \"List of license formats that will NOT be imported into Circulation Manager.\"\n ),\n type=ConfigurationAttributeType.LIST,\n required=False,\n default=[\"text/html\"],\n )\n\n\nclass ODL2API(ODLAPI):\n NAME = ExternalIntegration.ODL2\n SETTINGS = ODLAPI.SETTINGS + ODL2APIConfiguration.to_settings()\n\n\nclass ODL2Importer(OPDS2Importer, HasExternalIntegration):\n \"\"\"Import information and formats from an ODL feed.\n\n The only change from OPDS2Importer is that this importer extracts\n FormatData and LicenseData from ODL 2.x's \"licenses\" arrays.\n \"\"\"\n\n NAME = ODL2API.NAME\n\n def __init__(\n self,\n db,\n collection,\n parser=None,\n data_source_name=None,\n identifier_mapping=None,\n http_get=None,\n metadata_client=None,\n content_modifier=None,\n map_from_collection=None,\n mirrors=None,\n ):\n \"\"\"Initialize a new instance of ODL2Importer class.\n\n :param db: Database session\n :type db: sqlalchemy.orm.session.Session\n\n :param collection: Circulation Manager's collection.\n LicensePools created by this OPDS2Import class will be associated with the given Collection.\n If this is None, no LicensePools will be created -- only Editions.\n :type collection: Collection\n\n :param parser: Feed parser\n :type parser: RWPMManifestParser\n\n :param data_source_name: Name of the source of this OPDS feed.\n All Editions created by this import will be associated with this DataSource.\n If there is no DataSource with this name, one will be created.\n NOTE: If `collection` is provided, its .data_source will take precedence over any value provided here.\n This is only for use when you are importing OPDS metadata without any particular Collection in mind.\n :type data_source_name: str\n\n :param identifier_mapping: Dictionary used for mapping external identifiers into a set of internal ones\n :type identifier_mapping: Dict\n\n :param metadata_client: A SimplifiedOPDSLookup object that is used to fill in missing metadata\n :type metadata_client: SimplifiedOPDSLookup\n\n :param content_modifier: A function that may modify-in-place representations (such as images and EPUB documents)\n as they come in from the network.\n :type content_modifier: Callable\n\n :param map_from_collection: Identifier mapping\n :type map_from_collection: Dict\n\n :param mirrors: A dictionary of different MirrorUploader objects for different purposes\n :type mirrors: Dict[MirrorUploader]\n \"\"\"\n super(ODL2Importer, self).__init__(\n db,\n collection,\n parser if parser else RWPMManifestParser(ODLFeedParserFactory()),\n data_source_name,\n identifier_mapping,\n http_get,\n metadata_client,\n content_modifier,\n map_from_collection,\n mirrors,\n )\n\n self._logger = logging.getLogger(__name__)\n\n self._configuration_storage = ConfigurationStorage(self)\n self._configuration_factory = ConfigurationFactory()\n\n @contextmanager\n def _get_configuration(self, db):\n \"\"\"Return the configuration object.\n\n :param db: Database session\n :type db: sqlalchemy.orm.session.Session\n\n :return: Configuration object\n :rtype: ODL2APIConfiguration\n \"\"\"\n with self._configuration_factory.create(\n self._configuration_storage, db, ODL2APIConfiguration\n ) as configuration:\n yield configuration\n\n def _extract_publication_metadata(self, feed, publication, data_source_name):\n \"\"\"Extract a Metadata object from webpub-manifest-parser's publication.\n\n :param publication: Feed object\n :type publication: opds2_ast.OPDS2Feed\n\n :param publication: Publication object\n :type publication: opds2_ast.OPDS2Publication\n\n :param data_source_name: Data source's name\n :type data_source_name: str\n\n :return: Publication's metadata\n :rtype: Metadata\n \"\"\"\n metadata = super(ODL2Importer, self)._extract_publication_metadata(\n feed, publication, data_source_name\n )\n formats = []\n licenses = []\n medium = None\n\n with self._get_configuration(self._db) as configuration:\n skipped_license_formats = configuration.skipped_license_formats\n\n if skipped_license_formats:\n skipped_license_formats = set(skipped_license_formats)\n\n if publication.licenses:\n for odl_license in publication.licenses:\n identifier = odl_license.metadata.identifier\n checkout_link = first_or_default(\n odl_license.links.get_by_rel(OPDS2LinkRelationsRegistry.BORROW.key)\n )\n if checkout_link:\n checkout_link = checkout_link.href\n\n license_info_document_link = first_or_default(\n odl_license.links.get_by_rel(OPDS2LinkRelationsRegistry.SELF.key)\n )\n if license_info_document_link:\n license_info_document_link = license_info_document_link.href\n\n expires = (\n to_utc(odl_license.metadata.terms.expires)\n if odl_license.metadata.terms\n else None\n )\n concurrency = (\n int(odl_license.metadata.terms.concurrency)\n if odl_license.metadata.terms\n else None\n )\n\n if not license_info_document_link:\n parsed_license = None\n else:\n parsed_license = ODLImporter.get_license_data(\n license_info_document_link,\n checkout_link,\n identifier,\n expires,\n concurrency,\n self.http_get,\n )\n\n if parsed_license is not None:\n licenses.append(parsed_license)\n\n # DPLA feed doesn't have information about a DRM protection used for audiobooks.\n # We want to try to extract that information from the License Info Document it's present there.\n license_formats = set(odl_license.metadata.formats)\n if parsed_license and parsed_license.content_types:\n license_formats |= set(parsed_license.content_types)\n\n for license_format in license_formats:\n if (\n skipped_license_formats\n and license_format in skipped_license_formats\n ):\n continue\n\n if not medium:\n medium = Edition.medium_from_media_type(license_format)\n\n if license_format in ODLImporter.LICENSE_FORMATS:\n # Special case to handle DeMarque audiobooks which\n # include the protection in the content type\n drm_schemes = [\n ODLImporter.LICENSE_FORMATS[license_format][\n ODLImporter.DRM_SCHEME\n ]\n ]\n license_format = ODLImporter.LICENSE_FORMATS[license_format][\n ODLImporter.CONTENT_TYPE\n ]\n else:\n drm_schemes = (\n odl_license.metadata.protection.formats\n if odl_license.metadata.protection\n else []\n )\n\n for drm_scheme in drm_schemes or [None]:\n formats.append(\n FormatData(\n content_type=license_format,\n drm_scheme=drm_scheme,\n rights_uri=RightsStatus.IN_COPYRIGHT,\n )\n )\n\n metadata.circulation.licenses = licenses\n metadata.circulation.licenses_owned = None\n metadata.circulation.licenses_available = None\n metadata.circulation.licenses_reserved = None\n metadata.circulation.patrons_in_hold_queue = None\n metadata.circulation.formats.extend(formats)\n metadata.medium = medium\n\n return metadata\n\n def external_integration(self, db):\n return self.collection.external_integration\n\n\nclass ODL2ImportMonitor(OPDS2ImportMonitor):\n \"\"\"Import information from an ODL feed.\"\"\"\n\n PROTOCOL = ODL2Importer.NAME\n SERVICE_NAME = \"ODL 2.x Import Monitor\"\n\n def __init__(self, _db, collection, import_class, **import_class_kwargs):\n # Always force reimport ODL collections to get up to date license information\n super().__init__(\n _db, collection, import_class, force_reimport=True, **import_class_kwargs\n )\n", "id": "10453332", "language": "Python", "matching_score": 2.0329434871673584, "max_stars_count": 0, "path": "api/odl2.py" }, { "content": "import logging\n\nfrom api.proquest.client import ProQuestAPIClientFactory\nfrom api.proquest.importer import ProQuestOPDS2ImportMonitor\nfrom core.scripts import OPDSImportScript\n\n\nclass ProQuestOPDS2ImportScript(OPDSImportScript):\n \"\"\"Runs a ProQuestOPDS2ImportMonitor.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ProQuestOPDS2ImportScript, self).__init__(*args, **kwargs)\n\n self._logger = logging.getLogger(__name__)\n\n @classmethod\n def arg_parser(cls):\n parser = OPDSImportScript.arg_parser()\n parser.add_argument(\n \"--process-removals\",\n help=\"Remove from the Circulation Manager's catalog items that are no longer present in the ProQuest feed\",\n dest=\"process_removals\",\n action=\"store_true\",\n )\n\n return parser\n\n def run_monitor(self, collection, force=None):\n \"\"\"Run the monitor for the specified collection.\n\n :param collection: Collection object\n :type collection: core.model.collection.Collection\n\n :param force: Boolean value indicating whether the import process should be run from scratch\n :type force: bool\n \"\"\"\n if not issubclass(self.monitor_class, ProQuestOPDS2ImportMonitor):\n raise ValueError()\n\n self._logger.info(\n \"Started running ProQuestOPDS2ImportScript for collection {0}\".format(\n collection\n )\n )\n\n parsed = self.parse_command_line(self._db)\n client_factory = ProQuestAPIClientFactory()\n monitor = self.monitor_class(\n client_factory,\n self._db,\n collection,\n parser=self.importer_kwargs[\"parser\"],\n import_class=self.importer_class,\n force_reimport=force,\n process_removals=parsed.process_removals,\n )\n\n monitor.run()\n\n self._logger.info(\n \"Finished running ProQuestOPDS2ImportScript for collection {0}\".format(\n collection\n )\n )\n", "id": "10543621", "language": "Python", "matching_score": 1.6998496055603027, "max_stars_count": 0, "path": "api/proquest/scripts.py" }, { "content": "import datetime\nimport json\nimport os\nimport shutil\nimport tempfile\nfrom unittest.mock import ANY, MagicMock, call, create_autospec, patch\n\nimport pytest\nfrom flask import Response\nfrom freezegun import freeze_time\nfrom parameterized import parameterized\nfrom requests import HTTPError\nfrom webpub_manifest_parser.opds2 import OPDS2FeedParserFactory\n\nfrom api.authenticator import BaseSAMLAuthenticationProvider\nfrom api.circulation import BaseCirculationAPI\nfrom api.circulation_exceptions import CannotFulfill, CannotLoan\nfrom api.proquest.client import (\n ProQuestAPIClient,\n ProQuestAPIClientFactory,\n ProQuestBook,\n)\nfrom api.proquest.credential import ProQuestCredentialManager\nfrom api.proquest.identifier import ProQuestIdentifierParser\nfrom api.proquest.importer import (\n ProQuestOPDS2Importer,\n ProQuestOPDS2ImporterConfiguration,\n ProQuestOPDS2ImportMonitor,\n)\nfrom api.saml.metadata.model import (\n SAMLAttribute,\n SAMLAttributeStatement,\n SAMLAttributeType,\n SAMLSubject,\n SAMLSubjectJSONEncoder,\n)\nfrom core.metadata_layer import LinkData\nfrom core.model import (\n Collection,\n CoverageRecord,\n Credential,\n DataSource,\n DeliveryMechanism,\n ExternalIntegration,\n Hyperlink,\n Identifier,\n)\nfrom core.model.configuration import (\n ConfigurationFactory,\n ConfigurationSetting,\n ConfigurationStorage,\n HasExternalIntegration,\n)\nfrom core.opds2_import import RWPMManifestParser\nfrom core.testing import DatabaseTest\nfrom core.util.datetime_helpers import datetime_utc, utc_now\nfrom tests.api.proquest import fixtures\n\n\nclass TestProQuestOPDS2Importer(DatabaseTest):\n def setup_method(self, mock_search=True):\n super(TestProQuestOPDS2Importer, self).setup_method()\n\n self._proquest_data_source = DataSource.lookup(\n self._db, DataSource.PROQUEST, autocreate=True\n )\n self._proquest_collection = self._collection(\n protocol=ExternalIntegration.PROQUEST\n )\n self._proquest_collection.external_integration.set_setting(\n Collection.DATA_SOURCE_NAME_SETTING, DataSource.PROQUEST\n )\n\n self._proquest_patron = self._patron()\n self._loan_start_date = datetime_utc(2020, 1, 1)\n self._loan_end_date = self._loan_start_date + datetime.timedelta(\n days=Collection.STANDARD_DEFAULT_LOAN_PERIOD\n )\n self._proquest_document_id = \"12345\"\n self._proquest_edition = self._edition(\n data_source_name=self._proquest_data_source.name,\n identifier_type=Identifier.PROQUEST_ID,\n identifier_id=self._proquest_document_id,\n )\n self._proquest_license_pool = self._licensepool(\n edition=self._proquest_edition,\n data_source_name=self._proquest_data_source.name,\n collection=self._proquest_collection,\n )\n self._proquest_delivery_mechanism = self._add_generic_delivery_mechanism(\n self._proquest_license_pool\n )\n\n self._integration = self._proquest_collection.external_integration\n integration_owner = create_autospec(spec=HasExternalIntegration)\n integration_owner.external_integration = MagicMock(\n return_value=self._integration\n )\n self._configuration_storage = ConfigurationStorage(integration_owner)\n self._configuration_factory = ConfigurationFactory()\n\n @freeze_time(\"2020-01-01 00:00:00+00:00\")\n def test_patron_activity(self):\n # We want to test that ProQuestOPDS2Importer.patron_activity returns actual loans made by patrons.\n\n # Arrange\n proquest_token = \"1234567890\"\n credential_manager_mock = create_autospec(spec=ProQuestCredentialManager)\n credential_manager_mock.lookup_proquest_token = MagicMock(\n return_value=proquest_token\n )\n importer = ProQuestOPDS2Importer(\n self._db,\n self._proquest_collection,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n loan, _ = self._proquest_license_pool.loan_to(self._proquest_patron)\n\n # Act\n remote_loan_infos = importer.patron_activity(self._proquest_patron, None)\n [remote_loan_info] = remote_loan_infos\n\n assert loan.license_pool.collection_id == remote_loan_info.collection_id\n assert loan.license_pool.data_source.name == remote_loan_info.data_source_name\n assert loan.license_pool.identifier.type == remote_loan_info.identifier_type\n assert loan.license_pool.identifier.identifier == remote_loan_info.identifier\n assert loan.start == remote_loan_info.start_date\n assert loan.end == remote_loan_info.end_date\n assert None == remote_loan_info.fulfillment_info\n assert None == remote_loan_info.external_identifier\n\n @freeze_time(\"2020-01-01 00:00:00+00:00\")\n def test_checkout_lookups_for_existing_token(self):\n # We want to test that checkout operation always is always preceded by\n # checking for a ProQuest JWT bearer token.\n # Without a valid JWT token, checkout operation will fail.\n\n # Arrange\n proquest_token = \"1234567890\"\n credential_manager_mock = create_autospec(spec=ProQuestCredentialManager)\n credential_manager_mock.lookup_proquest_token = MagicMock(\n return_value=proquest_token\n )\n\n with patch(\n \"api.proquest.importer.ProQuestCredentialManager\"\n ) as credential_manager_constructor_mock:\n credential_manager_constructor_mock.return_value = credential_manager_mock\n\n # Act\n importer = ProQuestOPDS2Importer(\n self._db,\n self._proquest_collection,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n loan = importer.checkout(\n self._proquest_patron,\n \"pin\",\n self._proquest_license_pool,\n self._proquest_delivery_mechanism,\n )\n\n # Assert\n assert self._proquest_collection.id == loan.collection_id\n assert self._proquest_collection == loan.collection(self._db)\n assert self._proquest_license_pool == loan.license_pool(self._db)\n assert self._proquest_data_source.name == loan.data_source_name\n assert self._proquest_license_pool.identifier.type == loan.identifier_type\n assert loan.external_identifier is None\n assert self._loan_start_date == loan.start_date\n assert self._loan_end_date == loan.end_date\n\n # Assert that ProQuestCredentialManager.lookup_proquest_token\n # was called when CM tried to fetch an existing token.\n credential_manager_mock.lookup_proquest_token.assert_called_once_with(\n self._db, self._proquest_patron\n )\n\n @freeze_time(\"2020-01-01 00:00:00+00:00\")\n def test_checkout_creates_new_token_if_there_is_none(self):\n # We want to test that checkout operation without an existing ProQuest JWT bearer token leads to the following:\n # 1. Circulation Manager (CM) lookups for an existing token and doesn't find any.\n # 2. CM looks for an existing SAML affiliation ID.\n # 3. CM creates a new ProQuest JWT bearer token using the SAML affiliation ID from the previous step.\n # 4. CM saves the new token.\n\n # Arrange\n affiliation_id = \"12345\"\n proquest_token = \"<PASSWORD>\"\n\n api_client_mock = create_autospec(spec=ProQuestAPIClient)\n api_client_mock.create_token = MagicMock(return_value=proquest_token)\n\n api_client_factory_mock = create_autospec(spec=ProQuestAPIClientFactory)\n api_client_factory_mock.create = MagicMock(return_value=api_client_mock)\n\n credential_manager_mock = create_autospec(spec=ProQuestCredentialManager)\n credential_manager_mock.lookup_patron_affiliation_id = MagicMock(\n return_value=affiliation_id\n )\n credential_manager_mock.lookup_proquest_token = MagicMock(\n side_effect=[None, proquest_token]\n )\n\n with patch(\n \"api.proquest.importer.ProQuestAPIClientFactory\"\n ) as api_client_factory_constructor_mock, patch(\n \"api.proquest.importer.ProQuestCredentialManager\"\n ) as credential_manager_constructor_mock:\n api_client_factory_constructor_mock.return_value = api_client_factory_mock\n credential_manager_constructor_mock.return_value = credential_manager_mock\n\n # Act\n importer = ProQuestOPDS2Importer(\n self._db,\n self._proquest_collection,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n loan = importer.checkout(\n self._proquest_patron,\n \"pin\",\n self._proquest_license_pool,\n self._proquest_delivery_mechanism,\n )\n\n # Assert\n assert self._proquest_collection.id == loan.collection_id\n assert self._proquest_collection == loan.collection(self._db)\n assert self._proquest_license_pool == loan.license_pool(self._db)\n assert self._proquest_data_source.name == loan.data_source_name\n assert self._proquest_license_pool.identifier.type == loan.identifier_type\n assert loan.external_identifier is None\n assert self._loan_start_date == loan.start_date\n assert self._loan_end_date == loan.end_date\n\n # 1. Assert that ProQuestCredentialManager.lookup_proquest_token\n # was called when CM tried to fetch a non-existent token.\n credential_manager_mock.lookup_proquest_token.assert_called_once_with(\n self._db, self._proquest_patron\n )\n\n # 2. Assert that ProQuestCredentialManager.lookup_patron_affiliation_id\n # was called when CM tried to fetch an existing SAML affiliation ID.\n credential_manager_mock.lookup_patron_affiliation_id.assert_called_once_with(\n self._db,\n self._proquest_patron,\n ProQuestOPDS2ImporterConfiguration.DEFAULT_AFFILIATION_ATTRIBUTES,\n )\n\n # 3. Assert that ProQuest.create_token was called when CM tried to create a new ProQuest JWT bearer token\n # using the SAML affiliation ID from step 2.\n api_client_mock.create_token.assert_called_once_with(\n self._db, affiliation_id\n )\n\n # 4. Assert that ProQuestCredentialManager.save_proquest_token\n # was called when CM tried to save the token created in step 3.\n credential_manager_mock.save_proquest_token.assert_called_once_with(\n self._db,\n self._proquest_patron,\n datetime.timedelta(hours=1),\n proquest_token,\n )\n\n @parameterized.expand(\n [\n (\n \"list_string\",\n json.dumps(\n [\n SAMLAttributeType.mail.name,\n SAMLAttributeType.uid.name,\n ]\n ),\n ),\n ]\n )\n @freeze_time(\"2020-01-01 00:00:00+00:00\")\n def test_checkout_creates_new_token_using_affiliation_id_from_custom_saml_attribute(\n self, _, custom_affiliation_attributes\n ):\n # We want to test that checkout operation without an existing ProQuest JWT bearer token leads to the following:\n # 1. Circulation Manager (CM) lookups for an existing token and doesn't find any.\n # 2. CM looks for an existing SAML affiliation ID in the list of SAML attributes specified in the settings.\n # 3. CM creates a new ProQuest JWT bearer token using the SAML affiliation ID from the previous step.\n # 4. CM saves the new token.\n\n # Arrange\n affiliation_id = \"12345\"\n proquest_token = \"<PASSWORD>\"\n\n expected_affiliation_attributes = [\n SAMLAttributeType.mail.name,\n SAMLAttributeType.uid.name,\n ]\n\n saml_subject = SAMLSubject(\n None,\n SAMLAttributeStatement(\n [SAMLAttribute(SAMLAttributeType.uid.name, [affiliation_id])]\n ),\n )\n saml_token = json.dumps(saml_subject, cls=SAMLSubjectJSONEncoder)\n saml_datasource = DataSource.lookup(\n self._db,\n BaseSAMLAuthenticationProvider.TOKEN_DATA_SOURCE_NAME,\n autocreate=True,\n )\n Credential.temporary_token_create(\n self._db,\n saml_datasource,\n BaseSAMLAuthenticationProvider.TOKEN_TYPE,\n self._proquest_patron,\n datetime.timedelta(hours=1),\n saml_token,\n )\n\n api_client_mock = create_autospec(spec=ProQuestAPIClient)\n api_client_mock.create_token = MagicMock(return_value=proquest_token)\n\n api_client_factory_mock = create_autospec(spec=ProQuestAPIClientFactory)\n api_client_factory_mock.create = MagicMock(return_value=api_client_mock)\n\n credential_manager_mock = create_autospec(spec=ProQuestCredentialManager)\n credential_manager_mock.lookup_patron_affiliation_id = MagicMock(\n return_value=affiliation_id\n )\n credential_manager_mock.lookup_proquest_token = MagicMock(\n side_effect=[None, proquest_token]\n )\n\n with self._configuration_factory.create(\n self._configuration_storage, self._db, ProQuestOPDS2ImporterConfiguration\n ) as configuration:\n configuration.affiliation_attributes = custom_affiliation_attributes\n\n with patch(\n \"api.proquest.importer.ProQuestAPIClientFactory\"\n ) as api_client_factory_constructor_mock, patch(\n \"api.proquest.importer.ProQuestCredentialManager\"\n ) as credential_manager_constructor_mock:\n api_client_factory_constructor_mock.return_value = (\n api_client_factory_mock\n )\n credential_manager_constructor_mock.return_value = (\n credential_manager_mock\n )\n\n # Act\n importer = ProQuestOPDS2Importer(\n self._db,\n self._proquest_collection,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n loan = importer.checkout(\n self._proquest_patron,\n \"pin\",\n self._proquest_license_pool,\n self._proquest_delivery_mechanism,\n )\n\n # Assert\n assert self._proquest_collection.id == loan.collection_id\n assert self._proquest_collection == loan.collection(self._db)\n assert self._proquest_license_pool == loan.license_pool(self._db)\n assert self._proquest_data_source.name == loan.data_source_name\n assert (\n self._proquest_license_pool.identifier.type == loan.identifier_type\n )\n assert loan.external_identifier is None\n assert self._loan_start_date == loan.start_date\n assert self._loan_end_date == loan.end_date\n\n # 1. Assert that ProQuestCredentialManager.lookup_proquest_token\n # was called when CM tried to fetch a non-existent token.\n credential_manager_mock.lookup_proquest_token.assert_called_once_with(\n self._db, self._proquest_patron\n )\n\n # 2. Assert that ProQuestCredentialManager.lookup_patron_affiliation_id\n # was called when CM tried to fetch an existing SAML affiliation ID.\n credential_manager_mock.lookup_patron_affiliation_id.assert_called_once_with(\n self._db,\n self._proquest_patron,\n expected_affiliation_attributes,\n )\n\n # 3. Assert that ProQuest.create_token was called when CM tried to create\n # a new ProQuest JWT bearer token using the SAML affiliation ID from step 2.\n api_client_mock.create_token.assert_called_once_with(\n self._db, affiliation_id\n )\n\n # 4. Assert that ProQuestCredentialManager.save_proquest_token\n # was called when CM tried to save the token created in step 3.\n credential_manager_mock.save_proquest_token.assert_called_once_with(\n self._db,\n self._proquest_patron,\n datetime.timedelta(hours=1),\n proquest_token,\n )\n\n @freeze_time(\"2020-01-01 00:00:00+00:00\")\n def test_checkout_raises_cannot_loan_error_if_it_cannot_get_affiliation_id(self):\n # We want to test that checkout operation returns api.proquest.importer.MISSING_AFFILIATION_ID\n # when it cannot get the patron's affiliation ID.\n # Arrange\n credential_manager_mock = create_autospec(spec=ProQuestCredentialManager)\n credential_manager_mock.lookup_proquest_token = MagicMock(return_value=None)\n credential_manager_mock.lookup_patron_affiliation_id = MagicMock(\n return_value=None\n )\n\n with patch(\n \"api.proquest.importer.ProQuestCredentialManager\"\n ) as credential_manager_constructor_mock:\n credential_manager_constructor_mock.return_value = credential_manager_mock\n\n # Act\n importer = ProQuestOPDS2Importer(\n self._db,\n self._proquest_collection,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n\n with pytest.raises(CannotLoan):\n importer.checkout(\n self._proquest_patron,\n \"pin\",\n self._proquest_license_pool,\n self._proquest_delivery_mechanism,\n )\n\n # Assert\n # 1. Assert that ProQuestCredentialManager.lookup_proquest_token\n # was called when CM tried to fetch a non-existent token.\n credential_manager_mock.lookup_proquest_token.assert_called_once_with(\n self._db, self._proquest_patron\n )\n\n # 2. Assert that ProQuestCredentialManager.lookup_patron_affiliation_id\n # was called when CM tried to fetch an affiliation ID.\n # This operation failed leading to raising CannotLoan.\n credential_manager_mock.lookup_proquest_token.lookup_patron_affiliation_id(\n self._db, self._proquest_patron\n )\n\n @freeze_time(\"2020-01-01 00:00:00+00:00\")\n def test_checkout_raises_cannot_loan_error_if_it_cannot_create_proquest_token(self):\n # We want to test that checkout operation returns api.proquest.importer.CANNOT_CREATE_PROQUEST_TOKEN\n # when it cannot create a ProQuest JWT bearer token using ProQuest API.\n\n # Arrange\n affiliation_id = \"1\"\n\n api_client_mock = create_autospec(spec=ProQuestAPIClient)\n api_client_mock.create_token = MagicMock(side_effect=HTTPError)\n\n api_client_factory_mock = create_autospec(spec=ProQuestAPIClientFactory)\n api_client_factory_mock.create = MagicMock(return_value=api_client_mock)\n\n credential_manager_mock = create_autospec(spec=ProQuestCredentialManager)\n credential_manager_mock.lookup_proquest_token = MagicMock(return_value=None)\n credential_manager_mock.lookup_patron_affiliation_id = MagicMock(\n return_value=affiliation_id\n )\n\n with patch(\n \"api.proquest.importer.ProQuestAPIClientFactory\"\n ) as api_client_factory_constructor_mock, patch(\n \"api.proquest.importer.ProQuestCredentialManager\"\n ) as credential_manager_constructor_mock:\n api_client_factory_constructor_mock.return_value = api_client_factory_mock\n credential_manager_constructor_mock.return_value = credential_manager_mock\n\n # Act\n importer = ProQuestOPDS2Importer(\n self._db,\n self._proquest_collection,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n with pytest.raises(CannotLoan):\n importer.checkout(\n self._proquest_patron,\n \"pin\",\n self._proquest_license_pool,\n self._proquest_delivery_mechanism,\n )\n\n # Assert\n # Assert than ProQuestOPDS2Importer correctly created an instance of ProQuestAPIClient.\n api_client_factory_mock.create.assert_called_once_with(importer)\n\n # 1. Assert that ProQuestCredentialManager.lookup_proquest_token\n # was called when CM tried to fetch a non-existent token.\n credential_manager_mock.lookup_proquest_token.assert_called_once_with(\n self._db, self._proquest_patron\n )\n\n # 2. Assert that ProQuestCredentialManager.lookup_patron_affiliation_id\n # was called when CM tried to fetch an existing affiliation ID.\n credential_manager_mock.lookup_patron_affiliation_id.assert_called_once_with(\n self._db,\n self._proquest_patron,\n ProQuestOPDS2ImporterConfiguration.DEFAULT_AFFILIATION_ATTRIBUTES,\n )\n\n # 3. Assert that ProQuestAPIClient.create_token was called when CM tried to create a new JWT bearer token.\n # This operation failed resulting in raising CannotFulfill error.\n api_client_mock.create_token.assert_called_once_with(\n self._db, affiliation_id\n )\n\n @parameterized.expand([(\"default_value\", None), (\"custom_value_set_by_admin\", 10)])\n @freeze_time(\"2020-01-01 00:00:00+00:00\")\n def test_checkout_uses_loan_length_configuration_setting(self, _, loan_length=None):\n # We want to test that checkout operation always uses\n # loan length configuration setting BaseCirculationAPI.DEFAULT_LOAN_DURATION_SETTING.\n # We try different scenarios:\n # - when the configuration setting is not initialized - in this case the ProQuest configuration\n # must use the default value.\n # - when the configuration setting is set to a custom value by the admin - in this case the ProQuest integration\n # must use the custom value.\n\n # Arrange\n affiliation_id = \"12345\"\n proquest_token = \"<PASSWORD>\"\n\n api_client_mock = create_autospec(spec=ProQuestAPIClient)\n api_client_mock.create_token = MagicMock(return_value=proquest_token)\n\n api_client_factory_mock = create_autospec(spec=ProQuestAPIClientFactory)\n api_client_factory_mock.create = MagicMock(return_value=api_client_mock)\n\n credential_manager_mock = create_autospec(spec=ProQuestCredentialManager)\n credential_manager_mock.lookup_patron_affiliation_id = MagicMock(\n return_value=affiliation_id\n )\n credential_manager_mock.lookup_proquest_token = MagicMock(\n side_effect=[None, proquest_token]\n )\n\n if loan_length is None:\n loan_length = Collection.STANDARD_DEFAULT_LOAN_PERIOD\n\n ConfigurationSetting.for_library_and_externalintegration(\n self._db,\n BaseCirculationAPI.DEFAULT_LOAN_DURATION_SETTING[\"key\"],\n self._proquest_patron.library,\n self._integration,\n ).value = loan_length\n loan_end_date = self._loan_start_date + datetime.timedelta(days=loan_length)\n\n with patch(\n \"api.proquest.importer.ProQuestAPIClientFactory\"\n ) as api_client_factory_constructor_mock, patch(\n \"api.proquest.importer.ProQuestCredentialManager\"\n ) as credential_manager_constructor_mock:\n api_client_factory_constructor_mock.return_value = api_client_factory_mock\n credential_manager_constructor_mock.return_value = credential_manager_mock\n\n # Act\n importer = ProQuestOPDS2Importer(\n self._db,\n self._proquest_collection,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n loan = importer.checkout(\n self._proquest_patron,\n \"pin\",\n self._proquest_license_pool,\n self._proquest_delivery_mechanism,\n )\n\n # Assert\n assert self._proquest_collection.id == loan.collection_id\n assert self._proquest_collection == loan.collection(self._db)\n assert self._proquest_license_pool == loan.license_pool(self._db)\n assert self._proquest_data_source.name == loan.data_source_name\n assert self._proquest_license_pool.identifier.type == loan.identifier_type\n assert loan.external_identifier is None\n assert self._loan_start_date == loan.start_date\n assert loan_end_date == loan.end_date\n\n # 1. Assert that ProQuestCredentialManager.lookup_proquest_token\n # was called when CM tried to fetch a non-existent token.\n credential_manager_mock.lookup_proquest_token.assert_called_once_with(\n self._db, self._proquest_patron\n )\n\n # 2. Assert that ProQuestCredentialManager.lookup_patron_affiliation_id\n # was called when CM tried to fetch an existing SAML affiliation ID.\n credential_manager_mock.lookup_patron_affiliation_id.assert_called_once_with(\n self._db,\n self._proquest_patron,\n ProQuestOPDS2ImporterConfiguration.DEFAULT_AFFILIATION_ATTRIBUTES,\n )\n\n # 3. Assert that ProQuest.create_token was called when CM tried to create a new ProQuest JWT bearer token\n # using the SAML affiliation ID from step 2.\n api_client_mock.create_token.assert_called_once_with(\n self._db, affiliation_id\n )\n\n # 4. Assert that ProQuestCredentialManager.save_proquest_token\n # was called when CM tried to save the token created in step 3.\n credential_manager_mock.save_proquest_token.assert_called_once_with(\n self._db,\n self._proquest_patron,\n datetime.timedelta(hours=1),\n proquest_token,\n )\n\n @freeze_time(\"2020-01-01 00:00:00+00:00\")\n def test_fulfil_lookups_for_existing_token(self):\n # We want to test that fulfil operation always is always preceded by\n # checking for a ProQuest JWT bearer token.\n # Without a valid JWT token, fulfil operation will fail.\n # Additionally, we want to test that Circulation Manager handles downloading of DRM-free books.\n\n # Arrange\n proquest_token = \"<PASSWORD>\"\n proquest_token_expires_in = utc_now() + datetime.timedelta(hours=1)\n proquest_credential = Credential(\n credential=proquest_token, expires=proquest_token_expires_in\n )\n drm_free_book = ProQuestBook(link=\"https://proquest.com/books/books.epub\")\n\n api_client_mock = create_autospec(spec=ProQuestAPIClient)\n api_client_mock.get_book = MagicMock(return_value=drm_free_book)\n\n api_client_factory_mock = create_autospec(spec=ProQuestAPIClientFactory)\n api_client_factory_mock.create = MagicMock(return_value=api_client_mock)\n\n credential_manager_mock = create_autospec(spec=ProQuestCredentialManager)\n credential_manager_mock.lookup_proquest_token = MagicMock(\n return_value=proquest_credential\n )\n\n with patch(\n \"api.proquest.importer.ProQuestAPIClientFactory\"\n ) as api_client_factory_constructor_mock, patch(\n \"api.proquest.importer.ProQuestCredentialManager\"\n ) as credential_manager_constructor_mock:\n api_client_factory_constructor_mock.return_value = api_client_factory_mock\n credential_manager_constructor_mock.return_value = credential_manager_mock\n\n # Act\n importer = ProQuestOPDS2Importer(\n self._db,\n self._proquest_collection,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n fulfilment_info = importer.fulfill(\n self._proquest_patron,\n \"pin\",\n self._proquest_license_pool,\n self._proquest_delivery_mechanism,\n )\n\n # Assert\n assert self._proquest_collection.id == fulfilment_info.collection_id\n assert self._proquest_collection == fulfilment_info.collection(self._db)\n assert self._proquest_license_pool == fulfilment_info.license_pool(self._db)\n assert self._proquest_data_source.name == fulfilment_info.data_source_name\n assert (\n self._proquest_license_pool.identifier.type\n == fulfilment_info.identifier_type\n )\n\n # Make sure that the fulfilment info doesn't contain a link but instead contains a JSON document\n # which is used to pass the book's link and the ProQuest token to the client app.\n assert fulfilment_info.content_link is None\n assert DeliveryMechanism.BEARER_TOKEN == fulfilment_info.content_type\n assert fulfilment_info.content is not None\n\n token_document = json.loads(fulfilment_info.content)\n assert \"Bearer\" == token_document[\"token_type\"]\n assert proquest_token == token_document[\"access_token\"]\n assert (\n proquest_token_expires_in - utc_now()\n ).total_seconds() == token_document[\"expires_in\"]\n assert drm_free_book.link == token_document[\"location\"]\n assert DeliveryMechanism.BEARER_TOKEN == fulfilment_info.content_type\n assert proquest_token_expires_in == fulfilment_info.content_expires\n\n # Assert than ProQuestOPDS2Importer correctly created an instance of ProQuestAPIClient.\n api_client_factory_mock.create.assert_called_once_with(importer)\n\n # 1. Assert that ProQuestCredentialManager.lookup_proquest_token\n # was called when CM tried to fetch an existing token.\n credential_manager_mock.lookup_proquest_token.assert_called_with(\n self._db, self._proquest_patron\n )\n\n # 2. Assert that ProQuestAPIClient.get_book\n # was called when CM tried to get the book.\n api_client_mock.get_book.assert_called_once_with(\n self._db,\n proquest_token,\n self._proquest_license_pool.identifier.identifier,\n )\n\n @freeze_time(\"2020-01-01 00:00:00+00:00\")\n def test_fulfil_creates_new_token_if_there_is_none(self):\n # We want to test that fulfil operation without an existing ProQuest JWT bearer token leads to the following:\n # 1. Circulation Manager (CM) lookups for an existing token and doesn't find any.\n # 2. CM looks for an existing SAML affiliation ID.\n # 3. CM creates a new ProQuest JWT bearer token using the SAML affiliation ID from the previous step.\n # 4. CM saves the new token.\n\n # Arrange\n affiliation_id = \"12345\"\n proquest_token = \"<PASSWORD>\"\n proquest_credential = Credential(credential=proquest_token)\n book = ProQuestBook(content=b\"Book\")\n\n api_client_mock = create_autospec(spec=ProQuestAPIClient)\n api_client_mock.create_token = MagicMock(return_value=proquest_token)\n api_client_mock.get_book = MagicMock(return_value=book)\n\n api_client_factory_mock = create_autospec(spec=ProQuestAPIClientFactory)\n api_client_factory_mock.create = MagicMock(return_value=api_client_mock)\n\n credential_manager_mock = create_autospec(spec=ProQuestCredentialManager)\n credential_manager_mock.lookup_patron_affiliation_id = MagicMock(\n return_value=affiliation_id\n )\n credential_manager_mock.lookup_proquest_token = MagicMock(\n side_effect=[None, proquest_credential]\n )\n\n with patch(\n \"api.proquest.importer.ProQuestAPIClientFactory\"\n ) as api_client_factory_constructor_mock, patch(\n \"api.proquest.importer.ProQuestCredentialManager\"\n ) as credential_manager_constructor_mock:\n api_client_factory_constructor_mock.return_value = api_client_factory_mock\n credential_manager_constructor_mock.return_value = credential_manager_mock\n\n # Act\n importer = ProQuestOPDS2Importer(\n self._db,\n self._proquest_collection,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n fulfilment_info = importer.fulfill(\n self._proquest_patron,\n \"pin\",\n self._proquest_license_pool,\n self._proquest_delivery_mechanism,\n )\n\n # Assert\n assert self._proquest_collection.id == fulfilment_info.collection_id\n assert self._proquest_collection == fulfilment_info.collection(self._db)\n assert self._proquest_license_pool == fulfilment_info.license_pool(self._db)\n assert self._proquest_data_source.name == fulfilment_info.data_source_name\n assert (\n self._proquest_license_pool.identifier.type\n == fulfilment_info.identifier_type\n )\n assert fulfilment_info.content_link is None\n assert (\n self._proquest_delivery_mechanism.delivery_mechanism.content_type\n == fulfilment_info.content_type\n )\n assert book.content == fulfilment_info.content\n assert fulfilment_info.content_expires is None\n\n # Assert than ProQuestOPDS2Importer correctly created an instance of ProQuestAPIClient.\n api_client_factory_mock.create.assert_called_once_with(importer)\n\n # 1. Assert that ProQuestCredentialManager.lookup_proquest_token\n # was called when CM tried to fetch an nonexistent token.\n credential_manager_mock.lookup_proquest_token.assert_called_with(\n self._db, self._proquest_patron\n )\n\n # 2. Assert that ProQuestCredentialManager.lookup_patron_affiliation_id\n # was called when CM tried to fetch an existing SAML affiliation ID.\n credential_manager_mock.lookup_patron_affiliation_id.assert_called_once_with(\n self._db,\n self._proquest_patron,\n ProQuestOPDS2ImporterConfiguration.DEFAULT_AFFILIATION_ATTRIBUTES,\n )\n\n # 3. Assert that ProQuest.create_token was called when CM tried to create a new ProQuest JWT bearer token\n # using the SAML affiliation ID from step 2.\n api_client_mock.create_token.assert_called_once_with(\n self._db, affiliation_id\n )\n\n # 4. Assert that ProQuestCredentialManager.save_proquest_token\n # was called when CM tried to save the token created in step 3.\n credential_manager_mock.save_proquest_token.assert_called_once_with(\n self._db,\n self._proquest_patron,\n datetime.timedelta(hours=1),\n proquest_token,\n )\n\n # 5. Assert that ProQuestAPIClient.get_book\n # was called when CM tried to get the book.\n api_client_mock.get_book.assert_called_once_with(\n self._db,\n proquest_token,\n self._proquest_license_pool.identifier.identifier,\n )\n\n @freeze_time(\"2020-01-01 00:00:00+00:00\")\n def test_fulfil_raises_cannot_fulfil_error_if_it_cannot_get_affiliation_id(self):\n # We want to test that fulfil operation returns api.proquest.importer.MISSING_AFFILIATION_ID\n # when it cannot get the patron's affiliation ID.\n\n # Arrange\n credential_manager_mock = create_autospec(spec=ProQuestCredentialManager)\n credential_manager_mock.lookup_proquest_token = MagicMock(return_value=None)\n credential_manager_mock.lookup_patron_affiliation_id = MagicMock(\n return_value=None\n )\n\n with patch(\n \"api.proquest.importer.ProQuestCredentialManager\"\n ) as credential_manager_constructor_mock:\n credential_manager_constructor_mock.return_value = credential_manager_mock\n\n # Act\n importer = ProQuestOPDS2Importer(\n self._db,\n self._proquest_collection,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n\n with pytest.raises(CannotFulfill):\n importer.fulfill(\n self._proquest_patron,\n \"pin\",\n self._proquest_license_pool,\n self._proquest_delivery_mechanism,\n )\n\n # Assert\n # 1. Assert that ProQuestCredentialManager.lookup_proquest_token\n # was called when CM tried to fetch an nonexistent token.\n credential_manager_mock.lookup_proquest_token.assert_called_once_with(\n self._db, self._proquest_patron\n )\n\n # 2. Assert that ProQuestCredentialManager.lookup_patron_affiliation_id\n # was called when CM tried to fetch an existing SAML affiliation ID.\n credential_manager_mock.lookup_patron_affiliation_id.assert_called_once_with(\n self._db,\n self._proquest_patron,\n ProQuestOPDS2ImporterConfiguration.DEFAULT_AFFILIATION_ATTRIBUTES,\n )\n\n @freeze_time(\"2020-01-01 00:00:00+00:00\")\n def test_fulfil_raises_cannot_fulfil_error_if_it_cannot_create_proquest_token(self):\n # We want to test that fulfil operation returns api.proquest.importer.CANNOT_CREATE_PROQUEST_TOKEN\n # when it cannot create a ProQuest JWT bearer token using ProQuest API.\n\n # Arrange\n affiliation_id = \"1\"\n\n api_client_mock = create_autospec(spec=ProQuestAPIClient)\n api_client_mock.create_token = MagicMock(side_effect=HTTPError)\n\n api_client_factory_mock = create_autospec(spec=ProQuestAPIClientFactory)\n api_client_factory_mock.create = MagicMock(return_value=api_client_mock)\n\n credential_manager_mock = create_autospec(spec=ProQuestCredentialManager)\n credential_manager_mock.lookup_proquest_token = MagicMock(return_value=None)\n credential_manager_mock.lookup_patron_affiliation_id = MagicMock(\n return_value=affiliation_id\n )\n\n with patch(\n \"api.proquest.importer.ProQuestAPIClientFactory\"\n ) as api_client_factory_constructor_mock, patch(\n \"api.proquest.importer.ProQuestCredentialManager\"\n ) as credential_manager_constructor_mock:\n api_client_factory_constructor_mock.return_value = api_client_factory_mock\n credential_manager_constructor_mock.return_value = credential_manager_mock\n\n # Act\n importer = ProQuestOPDS2Importer(\n self._db,\n self._proquest_collection,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n\n with pytest.raises(CannotFulfill):\n importer.fulfill(\n self._proquest_patron,\n \"pin\",\n self._proquest_license_pool,\n self._proquest_delivery_mechanism,\n )\n\n # Assert\n # 1. Assert that ProQuestCredentialManager.lookup_proquest_token\n # was called when CM tried to fetch an nonexistent token.\n credential_manager_mock.lookup_proquest_token.assert_called_once_with(\n self._db, self._proquest_patron\n )\n\n # 2. Assert that ProQuestCredentialManager.lookup_patron_affiliation_id\n # was called when CM tried to fetch an existing SAML affiliation ID.\n credential_manager_mock.lookup_patron_affiliation_id.assert_called_once_with(\n self._db,\n self._proquest_patron,\n ProQuestOPDS2ImporterConfiguration.DEFAULT_AFFILIATION_ATTRIBUTES,\n )\n\n # 3. Assert that ProQuestAPIClient.create_token was called when CM tried to create a new JWT bearer token.\n # This operation failed resulting in raising CannotFulfill error.\n api_client_mock.create_token.assert_called_once_with(\n self._db, affiliation_id\n )\n\n @freeze_time(\"2020-01-01 00:00:00+00:00\")\n def test_fulfil_refreshes_expired_token(self):\n # By default ProQuest JWT bearer tokens should be valid for 1 hour but\n # since they are controlled by ProQuest we cannot be sure that they will not change this setting.\n # We want to test that fulfil operation automatically refreshes an expired token:\n # 1. CM fetches a token from the storage.\n # 2. CM tries to download the book using the token but ProQuest API returns 401 status code.\n # 3. CM generates a new token.\n # 4. CM tries to generate a book using the new token.\n # Additionally, we want to test that Circulation Manager handles downloading of ACSM files.\n\n # Arrange\n affiliation_id = \"12345\"\n expired_proquest_token = \"<PASSWORD>\"\n expired_proquest_token_expired_in = utc_now() - datetime.timedelta(minutes=1)\n expired_proquest_token_credential = Credential(\n credential=expired_proquest_token, expires=expired_proquest_token_expired_in\n )\n new_proquest_token = \"<PASSWORD>\"\n new_proquest_token_expires_in = utc_now() + datetime.timedelta(hours=1)\n new_proquest_token_credential = Credential(\n credential=new_proquest_token, expires=new_proquest_token_expires_in\n )\n adobe_drm_protected_book = ProQuestBook(\n content=b\"ACSM file\", content_type=DeliveryMechanism.ADOBE_DRM\n )\n\n api_client_mock = create_autospec(spec=ProQuestAPIClient)\n api_client_mock.create_token = MagicMock(return_value=new_proquest_token)\n api_client_mock.get_book = MagicMock(\n side_effect=[\n HTTPError(response=Response(status=401)),\n adobe_drm_protected_book,\n ]\n )\n\n api_client_factory_mock = create_autospec(spec=ProQuestAPIClientFactory)\n api_client_factory_mock.create = MagicMock(return_value=api_client_mock)\n\n credential_manager_mock = create_autospec(spec=ProQuestCredentialManager)\n credential_manager_mock.lookup_patron_affiliation_id = MagicMock(\n return_value=affiliation_id\n )\n credential_manager_mock.lookup_proquest_token = MagicMock(\n return_value=expired_proquest_token_credential\n )\n credential_manager_mock.save_proquest_token = MagicMock(\n return_value=new_proquest_token_credential\n )\n\n with patch(\n \"api.proquest.importer.ProQuestAPIClientFactory\"\n ) as api_client_factory_constructor_mock, patch(\n \"api.proquest.importer.ProQuestCredentialManager\"\n ) as credential_manager_constructor_mock:\n api_client_factory_constructor_mock.return_value = api_client_factory_mock\n credential_manager_constructor_mock.return_value = credential_manager_mock\n\n # Act\n importer = ProQuestOPDS2Importer(\n self._db,\n self._proquest_collection,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n fulfilment_info = importer.fulfill(\n self._proquest_patron,\n \"pin\",\n self._proquest_license_pool,\n self._proquest_delivery_mechanism,\n )\n\n # Assert\n assert self._proquest_collection.id == fulfilment_info.collection_id\n assert self._proquest_collection == fulfilment_info.collection(self._db)\n assert self._proquest_license_pool == fulfilment_info.license_pool(self._db)\n assert self._proquest_data_source.name == fulfilment_info.data_source_name\n assert (\n self._proquest_license_pool.identifier.type\n == fulfilment_info.identifier_type\n )\n\n # Make sure that fulfilment info contains content of the ACSM file not a link.\n assert fulfilment_info.content_link is None\n assert adobe_drm_protected_book.content_type == fulfilment_info.content_type\n assert adobe_drm_protected_book.content == fulfilment_info.content\n assert fulfilment_info.content_expires is None\n\n # Assert than ProQuestOPDS2Importer correctly created an instance of ProQuestAPIClient.\n api_client_factory_mock.create.assert_called_once_with(importer)\n\n # 1. Assert that ProQuestCredentialManager.lookup_proquest_token\n # was called when CM tried to fetch a existing token.\n credential_manager_mock.lookup_proquest_token.assert_called_with(\n self._db, self._proquest_patron\n )\n\n # 2. Assert that ProQuestAPIClient.get_book\n # was called when CM tried to get the book.\n api_client_mock.get_book.assert_any_call(\n self._db,\n expired_proquest_token,\n self._proquest_license_pool.identifier.identifier,\n )\n\n # 3. Assert that ProQuestCredentialManager.lookup_patron_affiliation_id\n # was called when CM tried to fetch an existing SAML affiliation ID.\n credential_manager_mock.lookup_patron_affiliation_id.assert_called_once_with(\n self._db,\n self._proquest_patron,\n ProQuestOPDS2ImporterConfiguration.DEFAULT_AFFILIATION_ATTRIBUTES,\n )\n\n # 4. Assert that ProQuest.create_token was called when CM tried to create a new ProQuest JWT bearer token\n # using the SAML affiliation ID from step 2.\n api_client_mock.create_token.assert_called_once_with(\n self._db, affiliation_id\n )\n\n # 5. Assert that ProQuestCredentialManager.save_proquest_token\n # was called when CM tried to save the token created in step 3.\n credential_manager_mock.save_proquest_token.assert_called_once_with(\n self._db,\n self._proquest_patron,\n datetime.timedelta(hours=1),\n new_proquest_token,\n )\n\n # 6. Assert that ProQuestAPIClient.get_book\n # was called when CM tried to get the book.\n api_client_mock.get_book.assert_any_call(\n self._db,\n new_proquest_token,\n self._proquest_license_pool.identifier.identifier,\n )\n assert 2 == api_client_mock.get_book.call_count\n\n def test_correctly_imports_covers(self):\n # We want to make sure that ProQuestOPDS2Importer\n # correctly processes cover links in the ProQuest feed\n # and generates LinkData for both, the full cover and thumbnail.\n\n # Act\n importer = ProQuestOPDS2Importer(\n self._db,\n self._proquest_collection,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n\n result = importer.extract_feed_data(fixtures.PROQUEST_RAW_FEED)\n\n # Assert\n assert 2 == len(result)\n publication_metadata_dictionary = result[0]\n\n assert True == (\n fixtures.PROQUEST_RAW_PUBLICATION_1_ID in publication_metadata_dictionary\n )\n publication_metadata = publication_metadata_dictionary[\n fixtures.PROQUEST_RAW_PUBLICATION_1_ID\n ]\n\n assert 1 == len(publication_metadata.links)\n\n [full_cover_link] = publication_metadata.links\n assert isinstance(full_cover_link, LinkData)\n assert fixtures.PROQUEST_RAW_PUBLICATION_1_COVER_HREF == full_cover_link.href\n assert Hyperlink.IMAGE == full_cover_link.rel\n\n thumbnail_cover_link = full_cover_link.thumbnail\n assert isinstance(thumbnail_cover_link, LinkData)\n assert (\n fixtures.PROQUEST_RAW_PUBLICATION_1_COVER_HREF == thumbnail_cover_link.href\n )\n assert Hyperlink.THUMBNAIL_IMAGE == thumbnail_cover_link.rel\n\n\nclass TestProQuestOPDS2ImportMonitor(DatabaseTest):\n def setup_method(self, mock_search=True):\n super(TestProQuestOPDS2ImportMonitor, self).setup_method()\n\n self._proquest_data_source = DataSource.lookup(\n self._db, DataSource.PROQUEST, autocreate=True\n )\n self._proquest_collection = self._collection(\n protocol=ExternalIntegration.PROQUEST\n )\n self._proquest_collection.external_integration.set_setting(\n Collection.DATA_SOURCE_NAME_SETTING, DataSource.PROQUEST\n )\n\n @parameterized.expand(\n [\n (\"no_pages\", [], []),\n (\n \"one_page\",\n [fixtures.PROQUEST_FEED_PAGE_1],\n [call(fixtures.PROQUEST_FEED_PAGE_1)],\n ),\n (\n \"two_pages\",\n [fixtures.PROQUEST_FEED_PAGE_1, fixtures.PROQUEST_FEED_PAGE_2],\n [\n call(fixtures.PROQUEST_FEED_PAGE_1),\n call(fixtures.PROQUEST_FEED_PAGE_2),\n ],\n ),\n ]\n )\n def test_monitor_correctly_processes_pages(self, _, feeds, expected_calls):\n \"\"\"This test makes sure that ProQuestOPDS2ImportMonitor correctly processes\n any response returned by ProQuestAPIClient.download_all_feed_pages without having any prior CoverageRecords.\n\n :param feeds: List of ProQuest OPDS 2.0 paged feeds\n :type feeds: List[webpub_manifest_parser.opds2.ast.OPDS2Feed]\n\n :param expected_calls: List of expected ProQuestOPDS2ImportMonitor.import_one_feed calls\n :type expected_calls: List[call]\n \"\"\"\n # Arrange\n client = create_autospec(spec=ProQuestAPIClient)\n client_factory = create_autospec(spec=ProQuestAPIClientFactory)\n client_factory.create = MagicMock(return_value=client)\n\n monitor = ProQuestOPDS2ImportMonitor(\n client_factory,\n self._db,\n self._proquest_collection,\n ProQuestOPDS2Importer,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n monitor._get_feeds = MagicMock(\n return_value=list(zip([None] * len(feeds), feeds))\n )\n monitor.import_one_feed = MagicMock(return_value=([], []))\n\n # Act\n monitor.run_once(False)\n\n # Assert\n # Make sure that ProQuestOPDS2ImportMonitor.import_one_feed was called for each paged feed (if any)\n monitor.import_one_feed.assert_has_calls(expected_calls)\n\n @parameterized.expand(\n [\n (\"no_pages\", []),\n (\n \"one_page\",\n [fixtures.PROQUEST_FEED_PAGE_1],\n ),\n (\n \"two_pages\",\n [fixtures.PROQUEST_FEED_PAGE_1, fixtures.PROQUEST_FEED_PAGE_2],\n ),\n ]\n )\n def test_monitor_correctly_uses_temporary_files(self, _, feed_pages):\n \"\"\"This test makes sure that ProQuestOPDS2ImportMonitor correctly uses temporary files\n to process the ProQuest feed:\n - it creates a temporary directory\n - it downloads all the pages one by one saving them in the temporary directory\n - when all the pages are dumped to the local drive and stored in the temporary directory,\n it starts importing those pages\n - after the import process finished, it deletes the temporary directory\n\n :param feed_pages: List of ProQuest OPDS 2.0 paged feeds\n :type feed_pages: List[webpub_manifest_parser.opds2.ast.OPDS2Feed]\n \"\"\"\n # Arrange\n client = create_autospec(spec=ProQuestAPIClient)\n client.download_all_feed_pages = MagicMock(\n return_value=list(map(fixtures.serialize, feed_pages))\n )\n\n client_factory = create_autospec(spec=ProQuestAPIClientFactory)\n client_factory.create = MagicMock(return_value=client)\n\n parser = RWPMManifestParser(OPDS2FeedParserFactory())\n parser.parse_manifest = MagicMock(side_effect=parser.parse_manifest)\n monitor = ProQuestOPDS2ImportMonitor(\n client_factory,\n self._db,\n self._proquest_collection,\n ProQuestOPDS2Importer,\n parser,\n )\n monitor.import_one_feed = MagicMock(return_value=([], []))\n\n results = {\"temp_directory\": None, \"temp_files\": []}\n original_mkdtemp = tempfile.mkdtemp\n original_named_temporary_file_constructor = tempfile.NamedTemporaryFile\n original_rmtree = shutil.rmtree\n\n def create_temp_directory():\n results[\"temp_directory\"] = original_mkdtemp()\n\n return results[\"temp_directory\"]\n\n def create_temp_file(**kwargs):\n temp_file = original_named_temporary_file_constructor(**kwargs)\n results[\"temp_files\"].append(temp_file.name)\n\n return temp_file\n\n # Act\n with patch(\"tempfile.mkdtemp\") as mkdtemp_mock, patch(\n \"tempfile.NamedTemporaryFile\"\n ) as named_temporary_file_constructor_mock, patch(\n \"shutil.rmtree\"\n ) as rmtree_mock:\n mkdtemp_mock.side_effect = create_temp_directory\n named_temporary_file_constructor_mock.side_effect = create_temp_file\n rmtree_mock.side_effect = original_rmtree\n\n monitor.run_once(False)\n\n # Assert\n # Ensure that the temp directory was successfully created.\n tempfile.mkdtemp.assert_called_once()\n\n # Ensure that the number of created temp files is equal to the number of feed pages.\n tempfile.NamedTemporaryFile.assert_has_calls(\n [call(mode=\"r+\", dir=results[\"temp_directory\"], delete=False)]\n * len(feed_pages)\n )\n\n # Ensure that parse_manifest method was called for each feed page.\n parser.parse_manifest.assert_has_calls([call(ANY)] * len(feed_pages))\n\n # Ensure that the temp directory was successfully removed.\n shutil.rmtree.assert_called_once_with(results[\"temp_directory\"])\n assert not os.path.exists(results[\"temp_directory\"])\n\n def test_monitor_correctly_deletes_temporary_directory_in_the_case_of_any_error(\n self,\n ):\n \"\"\"This test makes sure that ProQuestOPDS2ImportMonitor correctly deletes the temporary directory\n even when an error happens.\n \"\"\"\n # Arrange\n feed_pages = [fixtures.PROQUEST_FEED_PAGE_1, fixtures.PROQUEST_FEED_PAGE_2]\n\n client = create_autospec(spec=ProQuestAPIClient)\n client.download_all_feed_pages = MagicMock(\n return_value=list(map(fixtures.serialize, feed_pages))\n )\n\n client_factory = create_autospec(spec=ProQuestAPIClientFactory)\n client_factory.create = MagicMock(return_value=client)\n\n parser = RWPMManifestParser(OPDS2FeedParserFactory())\n\n # An exception will be raised while trying to parse the feed page.\n parser.parse_manifest = MagicMock(side_effect=Exception(\"\"))\n\n monitor = ProQuestOPDS2ImportMonitor(\n client_factory,\n self._db,\n self._proquest_collection,\n ProQuestOPDS2Importer,\n parser,\n )\n monitor.import_one_feed = MagicMock(return_value=([], []))\n\n results = {\"temp_directory\": None, \"temp_files\": []}\n original_mkdtemp = tempfile.mkdtemp\n original_temp_file_constructor = tempfile.NamedTemporaryFile\n original_rmtree = shutil.rmtree\n\n def create_temp_directory():\n results[\"temp_directory\"] = original_mkdtemp()\n\n return results[\"temp_directory\"]\n\n def create_temp_file(**kwargs):\n temp_file = original_temp_file_constructor(**kwargs)\n results[\"temp_files\"].append(temp_file.name)\n\n return temp_file\n\n # Act\n with patch(\"tempfile.mkdtemp\") as mkdtemp_mock, patch(\n \"tempfile.NamedTemporaryFile\"\n ) as named_temporary_file_constructor_mock, patch(\n \"shutil.rmtree\"\n ) as rmtree_mock:\n mkdtemp_mock.side_effect = create_temp_directory\n named_temporary_file_constructor_mock.side_effect = create_temp_file\n rmtree_mock.side_effect = original_rmtree\n\n monitor.run_once(False)\n\n # Assert\n # Ensure that the temp directory was successfully created.\n tempfile.mkdtemp.assert_called_once()\n\n # Ensure that only one temp file was created, after this an exception was raised and the process stopped.\n tempfile.NamedTemporaryFile.assert_has_calls(\n [call(mode=\"r+\", dir=results[\"temp_directory\"], delete=False)]\n )\n\n # Ensure that parse_manifest method was called only once.\n parser.parse_manifest.assert_has_calls([call(ANY)])\n\n # Ensure that the temp directory was successfully removed.\n shutil.rmtree.assert_called_once_with(results[\"temp_directory\"])\n assert not os.path.exists(results[\"temp_directory\"])\n\n def test_monitor_correctly_does_not_process_already_processed_pages(self):\n \"\"\"This test makes sure that the monitor has a short circuit breaker\n which allows to not process already processed feeds.\n\n The feed contains two pages:\n - page # 1: publication # 1 and publication # 2\n - page # 2: publication # 3 and publication # 4\n\n Publication # 2, 3, and 4 were already processed and have coverage records.\n Publication # 1 is a new one and doesn't have a coverage record.\n It means the monitor must process the whole page # 1.\n \"\"\"\n # Arrange\n # There are two pages: page # 1 and page # 2\n feeds = [fixtures.PROQUEST_FEED_PAGE_1, fixtures.PROQUEST_FEED_PAGE_2]\n # But only the page # 1 will be processed\n expected_calls = [call(fixtures.PROQUEST_FEED_PAGE_1)]\n\n identifier_parser = ProQuestIdentifierParser()\n\n # Create Identifiers for publications # 2, 3, and 4\n publication_2_identifier, _ = identifier, _ = Identifier.parse(\n self._db,\n fixtures.PROQUEST_PUBLICATION_2.metadata.identifier,\n identifier_parser,\n )\n publication_3_identifier, _ = identifier, _ = Identifier.parse(\n self._db,\n fixtures.PROQUEST_PUBLICATION_3.metadata.identifier,\n identifier_parser,\n )\n publication_4_identifier, _ = identifier, _ = Identifier.parse(\n self._db,\n fixtures.PROQUEST_PUBLICATION_4.metadata.identifier,\n identifier_parser,\n )\n\n # Make sure that all the publications # 2, 3, and 4 were already processed\n max_modified_date = max(\n fixtures.PROQUEST_PUBLICATION_2.metadata.modified,\n fixtures.PROQUEST_PUBLICATION_3.metadata.modified,\n fixtures.PROQUEST_PUBLICATION_4.metadata.modified,\n )\n coverage_date = max_modified_date + datetime.timedelta(days=1)\n\n # Create coverage records for publications # 2, 3, and 4\n CoverageRecord.add_for(\n publication_2_identifier,\n self._proquest_data_source,\n operation=CoverageRecord.IMPORT_OPERATION,\n timestamp=coverage_date,\n )\n CoverageRecord.add_for(\n publication_3_identifier,\n self._proquest_data_source,\n operation=CoverageRecord.IMPORT_OPERATION,\n timestamp=coverage_date,\n )\n CoverageRecord.add_for(\n publication_4_identifier,\n self._proquest_data_source,\n operation=CoverageRecord.IMPORT_OPERATION,\n timestamp=coverage_date,\n )\n\n client = create_autospec(spec=ProQuestAPIClient)\n\n client_factory = create_autospec(spec=ProQuestAPIClientFactory)\n client_factory.create = MagicMock(return_value=client)\n\n monitor = ProQuestOPDS2ImportMonitor(\n client_factory,\n self._db,\n self._proquest_collection,\n ProQuestOPDS2Importer,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n )\n monitor._get_feeds = MagicMock(\n return_value=list(zip([None] * len(feeds), feeds))\n )\n monitor.import_one_feed = MagicMock(return_value=([], []))\n\n # Act\n monitor.run_once(False)\n\n # Assert\n # Make sure that ProQuestOPDS2ImportMonitor.import_one_feed was called only for the page # 1\n monitor.import_one_feed.assert_has_calls(expected_calls)\n\n def test_monitor_does_not_clean_removed_items_if_downloading_has_not_finished(self):\n # Arrange\n client = create_autospec(spec=ProQuestAPIClient)\n\n # We want to emulate an exception happening during the download process.\n client.download_all_feed_pages = MagicMock(side_effect=HTTPError)\n\n client_factory = create_autospec(spec=ProQuestAPIClientFactory)\n client_factory.create = MagicMock(return_value=client)\n\n monitor = ProQuestOPDS2ImportMonitor(\n client_factory,\n self._db,\n self._proquest_collection,\n ProQuestOPDS2Importer,\n RWPMManifestParser(OPDS2FeedParserFactory()),\n process_removals=True,\n )\n monitor._clean_removed_items = MagicMock()\n\n # Act\n monitor.run_once(False)\n\n # Assert\n monitor._clean_removed_items.assert_not_called()\n", "id": "4081999", "language": "Python", "matching_score": 6.794785976409912, "max_stars_count": 0, "path": "tests/api/proquest/test_importer.py" }, { "content": "import datetime\nimport json\n\nfrom parameterized import parameterized\n\nfrom api.authenticator import BaseSAMLAuthenticationProvider\nfrom api.proquest.credential import ProQuestCredentialManager, ProQuestCredentialType\nfrom api.saml.metadata.model import (\n SAMLAttribute,\n SAMLAttributeStatement,\n SAMLAttributeType,\n SAMLSubject,\n SAMLSubjectJSONEncoder,\n)\nfrom core.model import Credential, DataSource\nfrom core.testing import DatabaseTest\nfrom tests.api.saml import fixtures\n\n\nclass TestProQuestCredentialManager(DatabaseTest):\n def setup_method(self):\n super(TestProQuestCredentialManager, self).setup_method()\n\n self._data_source = DataSource.lookup(\n self._db, DataSource.PROQUEST, autocreate=True\n )\n\n def test_lookup_proquest_token_returns_none_if_token_missing(self):\n # Arrange\n credential_manager = ProQuestCredentialManager()\n patron = self._patron()\n\n # Act\n token = credential_manager.lookup_proquest_token(self._db, patron)\n\n # Assert\n assert None == token\n\n def test_lookup_proquest_token_returns_token(self):\n # Arrange\n credential_manager = ProQuestCredentialManager()\n patron = self._patron()\n expected_token = \"<PASSWORD>\"\n\n Credential.temporary_token_create(\n self._db,\n self._data_source,\n ProQuestCredentialType.PROQUEST_JWT_TOKEN.value,\n patron,\n datetime.timedelta(hours=1),\n expected_token,\n )\n\n # Act\n token = credential_manager.lookup_proquest_token(self._db, patron)\n\n # Assert\n assert True == isinstance(token, Credential)\n assert expected_token == token.credential\n\n def test_save_proquest_token_saves_token(self):\n # Arrange\n credential_manager = ProQuestCredentialManager()\n patron = self._patron()\n expected_token = \"<PASSWORD>\"\n\n # Act\n credential_manager.save_proquest_token(\n self._db, patron, datetime.timedelta(hours=1), expected_token\n )\n token = Credential.lookup_by_patron(\n self._db,\n self._data_source.name,\n ProQuestCredentialType.PROQUEST_JWT_TOKEN.value,\n patron,\n )\n\n # Assert\n assert expected_token == token.credential\n\n @parameterized.expand(\n [\n (\"when_there_is_no_token\", None, None),\n (\n \"when_subject_does_not_contain_affiliation_id_attributes\",\n SAMLSubject(\n None,\n SAMLAttributeStatement(\n [SAMLAttribute(SAMLAttributeType.mail.name, [fixtures.MAIL])]\n ),\n ),\n None,\n ),\n (\n \"when_subject_contains_affiliation_id_attribute\",\n SAMLSubject(\n None,\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n SAMLAttributeType.eduPersonPrincipalName.name,\n [fixtures.EDU_PERSON_PRINCIPAL_NAME],\n ),\n ]\n ),\n ),\n fixtures.EDU_PERSON_PRINCIPAL_NAME,\n ),\n (\n \"when_subject_contains_multiple_affiliation_id_attributes\",\n SAMLSubject(\n None,\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n SAMLAttributeType.eduPersonPrincipalName.name,\n [\"12345\", fixtures.EDU_PERSON_PRINCIPAL_NAME],\n ),\n ]\n ),\n ),\n \"12345\",\n ),\n (\n \"with_custom_affiliation_attributes\",\n SAMLSubject(\n None,\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n SAMLAttributeType.mail.name,\n [\"<EMAIL>\"],\n ),\n ]\n ),\n ),\n \"<EMAIL>\",\n (SAMLAttributeType.mail.name,),\n ),\n ]\n )\n def test_lookup_patron_affiliation_id(\n self, _, subject, expected_affiliation_id, affiliation_attributes=None\n ):\n # Arrange\n credential_manager = ProQuestCredentialManager()\n patron = self._patron()\n\n if subject:\n expected_token = json.dumps(subject, cls=SAMLSubjectJSONEncoder)\n\n data_source = DataSource.lookup(\n self._db,\n BaseSAMLAuthenticationProvider.TOKEN_DATA_SOURCE_NAME,\n autocreate=True,\n )\n Credential.temporary_token_create(\n self._db,\n data_source,\n BaseSAMLAuthenticationProvider.TOKEN_TYPE,\n patron,\n datetime.timedelta(hours=1),\n expected_token,\n )\n\n # Act\n if affiliation_attributes:\n token = credential_manager.lookup_patron_affiliation_id(\n self._db, patron, affiliation_attributes\n )\n else:\n token = credential_manager.lookup_patron_affiliation_id(self._db, patron)\n\n # Assert\n assert expected_affiliation_id == token\n", "id": "4403806", "language": "Python", "matching_score": 3.9397947788238525, "max_stars_count": 0, "path": "tests/api/proquest/test_credential.py" }, { "content": "import datetime\nimport json\nimport logging\nfrom enum import Enum\n\nfrom api.saml.metadata.model import SAMLAttributeType, SAMLSubjectJSONDecoder\nfrom core.model import Credential, DataSource, DataSourceConstants, Patron\nfrom core.util import first_or_default, is_session\n\n\nclass ProQuestCredentialType(Enum):\n \"\"\"Contains an enumeration of different ProQuest credential types\"\"\"\n\n PROQUEST_JWT_TOKEN = \"ProQuest JWT Token\"\n\n\nclass ProQuestCredentialManager(object):\n \"\"\"Manages ProQuest credentials.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance of ProQuestCredentialManager class.\"\"\"\n self._logger = logging.getLogger(__name__)\n\n def _extract_saml_subject(self, credential):\n \"\"\"Extract a SAML subject from SAML token.\n\n :param credential: Credential object containing a SAML token\n :type credential: core.model.credential.Credential\n\n :return: SAML subject\n :rtype: api.saml.metadata.Subject\n \"\"\"\n self._logger.debug(\"Started deserializing SAML token {0}\".format(credential))\n\n subject = json.loads(credential.credential, cls=SAMLSubjectJSONDecoder)\n\n self._logger.debug(\n \"Finished deserializing SAML token {0}: {1}\".format(credential, subject)\n )\n\n return subject\n\n def _lookup_saml_token(self, db, patron):\n \"\"\"Look up for a SAML token.\n\n :param db: Database session\n :type db: sqlalchemy.orm.session.Session\n\n :param patron: Patron object\n :type patron: core.model.patron.Patron\n\n :return: SAML subject (if any)\n :rtype: Optional[api.saml.metadata.Subject]\n \"\"\"\n self._logger.debug(\"Started looking up for a SAML token\")\n\n from api.authenticator import BaseSAMLAuthenticationProvider\n\n credential = Credential.lookup_by_patron(\n db,\n BaseSAMLAuthenticationProvider.TOKEN_DATA_SOURCE_NAME,\n BaseSAMLAuthenticationProvider.TOKEN_TYPE,\n patron,\n allow_persistent_token=False,\n auto_create_datasource=True,\n )\n\n self._logger.debug(\n \"Finished looking up for a SAML token: {0}\".format(credential)\n )\n\n return credential\n\n def lookup_proquest_token(self, db, patron):\n \"\"\"Look up for a JWT bearer token used required to use ProQuest API.\n\n :param db: Database session\n :type db: sqlalchemy.orm.session.Session\n\n :param patron: Patron object\n :type patron: core.model.patron.Patron\n\n :return: Credential object containing the existing ProQuest JWT bearer token (if any)\n :rtype: Optional[core.model.credential.Credential]\n \"\"\"\n if not is_session(db):\n raise ValueError('\"db\" argument must be a valid SQLAlchemy session')\n if not isinstance(patron, Patron):\n raise ValueError('\"patron\" argument must be an instance of Patron class')\n\n self._logger.debug(\"Started looking up for a ProQuest JWT token\")\n\n credential = Credential.lookup_by_patron(\n db,\n DataSourceConstants.PROQUEST,\n ProQuestCredentialType.PROQUEST_JWT_TOKEN.value,\n patron,\n allow_persistent_token=False,\n auto_create_datasource=True,\n )\n\n self._logger.debug(\n \"Finished looking up for a ProQuest JWT token: {0}\".format(credential)\n )\n\n if credential:\n return credential\n\n return None\n\n def save_proquest_token(self, db, patron, duration, token):\n \"\"\"Save a ProQuest JWT bearer token for later use.\n\n :param db: Database session\n :type db: sqlalchemy.orm.session.Session\n\n :param patron: Patron object\n :type patron: core.model.patron.Patron\n\n :param duration: How long this token can be valid\n :type duration: datetime.timedelta\n\n :param token: ProQuest JWT bearer token\n :type token: str\n\n :return: Credential object containing a new ProQuest JWT bearer token\n :rtype: Optional[core.model.credential.Credential]\n \"\"\"\n if not is_session(db):\n raise ValueError('\"db\" argument must be a valid SQLAlchemy session')\n if not isinstance(patron, Patron):\n raise ValueError('\"patron\" argument must be an instance of Patron class')\n if not isinstance(duration, datetime.timedelta):\n raise ValueError(\n '\"duration\" argument must be an instance of datetime.timedelta class'\n )\n if not isinstance(token, str) or not token:\n raise ValueError('\"token\" argument must be a non-empty string')\n\n self._logger.debug(\n \"Started saving a ProQuest JWT bearer token {0}\".format(token)\n )\n\n data_source = DataSource.lookup(\n db, DataSourceConstants.PROQUEST, autocreate=True\n )\n credential, is_new = Credential.temporary_token_create(\n db,\n data_source,\n ProQuestCredentialType.PROQUEST_JWT_TOKEN.value,\n patron,\n duration,\n token,\n )\n\n self._logger.debug(\n \"Finished saving a ProQuest JWT bearer token {0}: {1} (new = {2})\".format(\n token, credential, is_new\n )\n )\n\n return credential\n\n def lookup_patron_affiliation_id(\n self,\n db,\n patron,\n affiliation_attributes=(\n SAMLAttributeType.eduPersonPrincipalName.name,\n SAMLAttributeType.eduPersonScopedAffiliation.name,\n ),\n ):\n \"\"\"Look up for patron's SAML affiliation ID.\n\n :param db: Database session\n :type db: sqlalchemy.orm.session.Session\n\n :param patron: Patron object\n :type patron: core.model.patron.Patron\n\n :param affiliation_attributes: SAML attributes containing an affiliation ID\n :type affiliation_attributes: Tuple\n\n :return: Patron's SAML affiliation ID (if any)\n :rtype: Optional[str]\n \"\"\"\n if not is_session(db):\n raise ValueError('\"db\" argument must be a valid SQLAlchemy session')\n if not isinstance(patron, Patron):\n raise ValueError('\"patron\" argument must be an instance of Patron class')\n if affiliation_attributes and not isinstance(affiliation_attributes, tuple):\n raise ValueError('\"affiliation_attributes\" argument must be a tuple')\n\n self._logger.debug(\n \"Started looking for SAML affiliation ID in for patron {0} in {1}\".format(\n patron, affiliation_attributes\n )\n )\n\n saml_credential = self._lookup_saml_token(db, patron)\n\n if not saml_credential:\n self._logger.debug(\"Patron {0} does not have a SAML token\".format(patron))\n return None\n\n saml_subject = self._extract_saml_subject(saml_credential)\n\n self._logger.debug(\n \"Patron {0} has the following SAML subject: {1}\".format(\n patron, saml_subject\n )\n )\n\n affiliation_id = None\n\n for attribute_name in affiliation_attributes:\n self._logger.debug(\"Trying to find attribute {0}\".format(attribute_name))\n\n if attribute_name in saml_subject.attribute_statement.attributes:\n attribute = saml_subject.attribute_statement.attributes[attribute_name]\n\n self._logger.debug(\n \"Found {0} with the following values: {1}\".format(\n attribute, attribute.values\n )\n )\n\n affiliation_id = first_or_default(attribute.values)\n break\n\n self._logger.debug(\n \"Finished looking for SAML affiliation ID in for patron {0} in {1}: {2}\".format(\n patron, affiliation_attributes, affiliation_id\n )\n )\n\n return affiliation_id\n", "id": "1173073", "language": "Python", "matching_score": 3.4248130321502686, "max_stars_count": 16, "path": "api/proquest/credential.py" }, { "content": "# coding=utf-8\nfrom parameterized import parameterized\n\nfrom api.saml.metadata.model import (\n SAMLAttribute,\n SAMLAttributeStatement,\n SAMLAttributeType,\n SAMLNameID,\n SAMLNameIDFormat,\n SAMLSubject,\n SAMLSubjectPatronIDExtractor,\n)\nfrom tests.api.saml import fixtures\n\n\nclass TestAttributeStatement(object):\n def test_init_accepts_list_of_attributes(self):\n # Arrange\n attributes = [\n SAMLAttribute(SAMLAttributeType.uid.name, [12345]),\n SAMLAttribute(SAMLAttributeType.eduPersonTargetedID.name, [12345]),\n ]\n\n # Act\n attribute_statement = SAMLAttributeStatement(attributes)\n\n # Assert\n assert True == (SAMLAttributeType.uid.name in attribute_statement.attributes)\n assert (\n attributes[0].values\n == attribute_statement.attributes[SAMLAttributeType.uid.name].values\n )\n\n assert True == (\n SAMLAttributeType.eduPersonTargetedID.name in attribute_statement.attributes\n )\n assert (\n attributes[1].values\n == attribute_statement.attributes[\n SAMLAttributeType.eduPersonTargetedID.name\n ].values\n )\n\n\nclass TestSAMLSubjectPatronIDExtractor(object):\n @parameterized.expand(\n [\n (\"subject_without_patron_id\", SAMLSubject(None, None), None),\n (\n \"subject_with_eduPersonTargetedID_attribute\",\n SAMLSubject(\n SAMLNameID(SAMLNameIDFormat.UNSPECIFIED, \"\", \"\", \"1\"),\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonTargetedID.name,\n values=[\"2\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonUniqueId.name,\n values=[\"3\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.uid.name, values=[\"4\"]\n ),\n ]\n ),\n ),\n \"3\",\n ),\n (\n \"subject_with_eduPersonUniqueId_attribute\",\n SAMLSubject(\n SAMLNameID(SAMLNameIDFormat.UNSPECIFIED, \"\", \"\", \"1\"),\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonUniqueId.name,\n values=[\"2\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.uid.name, values=[\"3\"]\n ),\n ]\n ),\n ),\n \"2\",\n ),\n (\n \"subject_with_uid_attribute\",\n SAMLSubject(\n SAMLNameID(SAMLNameIDFormat.UNSPECIFIED, \"\", \"\", \"1\"),\n SAMLAttributeStatement(\n [SAMLAttribute(name=SAMLAttributeType.uid.name, values=[\"2\"])]\n ),\n ),\n \"2\",\n ),\n (\n \"subject_with_name_id\",\n SAMLSubject(\n SAMLNameID(SAMLNameIDFormat.UNSPECIFIED, \"\", \"\", \"1\"),\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonPrincipalName.name,\n values=[\"2\"],\n )\n ]\n ),\n ),\n \"1\",\n ),\n (\n \"subject_with_switched_off_use_of_name_id\",\n SAMLSubject(\n SAMLNameID(SAMLNameIDFormat.UNSPECIFIED, \"\", \"\", \"1\"),\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonPrincipalName.name,\n values=[\"2\"],\n )\n ]\n ),\n ),\n None,\n False,\n ),\n (\n \"patron_id_attributes_matching_attributes_in_subject\",\n SAMLSubject(\n SAMLNameID(SAMLNameIDFormat.UNSPECIFIED, \"\", \"\", \"1\"),\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonTargetedID.name,\n values=[\"2\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonUniqueId.name,\n values=[\"3\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.uid.name, values=[\"4\"]\n ),\n ]\n ),\n ),\n \"4\",\n False,\n [SAMLAttributeType.uid.name],\n ),\n (\n \"patron_id_attributes_matching_second_saml_attribute\",\n SAMLSubject(\n SAMLNameID(SAMLNameIDFormat.UNSPECIFIED, \"\", \"\", \"1\"),\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonTargetedID.name,\n values=[None],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonUniqueId.name,\n values=[\"3\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.uid.name, values=[\"4\"]\n ),\n ]\n ),\n ),\n \"4\",\n True,\n [\n SAMLAttributeType.eduPersonTargetedID.name,\n SAMLAttributeType.uid.name,\n ],\n ),\n (\n \"patron_id_attributes_not_matching_attributes_in_subject\",\n SAMLSubject(\n SAMLNameID(SAMLNameIDFormat.UNSPECIFIED, \"\", \"\", \"1\"),\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonTargetedID.name,\n values=[\"2\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonUniqueId.name,\n values=[\"3\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.uid.name, values=[\"4\"]\n ),\n ]\n ),\n ),\n None,\n False,\n [SAMLAttributeType.givenName.name],\n ),\n (\n \"patron_id_attributes_not_matching_attributes_in_subject_and_using_name_id_instead\",\n SAMLSubject(\n SAMLNameID(SAMLNameIDFormat.UNSPECIFIED, \"\", \"\", \"1\"),\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonTargetedID.name,\n values=[\"2\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonUniqueId.name,\n values=[\"3\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.uid.name, values=[\"4\"]\n ),\n ]\n ),\n ),\n \"1\",\n True,\n [SAMLAttributeType.givenName.name],\n ),\n (\n \"patron_id_regular_expression_matching_saml_subject\",\n SAMLSubject(\n SAMLNameID(SAMLNameIDFormat.UNSPECIFIED, \"\", \"\", \"1\"),\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonPrincipalName.name,\n values=[\"<EMAIL>\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonUniqueId.name,\n values=[\"3\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.uid.name, values=[\"4\"]\n ),\n ]\n ),\n ),\n \"patron\",\n False,\n [\n SAMLAttributeType.eduPersonPrincipalName.name,\n SAMLAttributeType.mail.name,\n ],\n fixtures.PATRON_ID_REGULAR_EXPRESSION_ORG,\n ),\n (\n \"patron_id_regular_expression_matching_second_saml_attribute\",\n SAMLSubject(\n SAMLNameID(SAMLNameIDFormat.UNSPECIFIED, \"\", \"\", \"1\"),\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonPrincipalName.name,\n values=[\"<EMAIL>\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonUniqueId.name,\n values=[\"3\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.uid.name, values=[\"4\"]\n ),\n ]\n ),\n ),\n \"patron\",\n False,\n [\n SAMLAttributeType.eduPersonUniqueId.name,\n SAMLAttributeType.eduPersonPrincipalName.name,\n SAMLAttributeType.mail.name,\n ],\n fixtures.PATRON_ID_REGULAR_EXPRESSION_ORG,\n ),\n (\n \"unicode_patron_id_regular_expression_matching_saml_subject\",\n SAMLSubject(\n SAMLNameID(SAMLNameIDFormat.UNSPECIFIED, \"\", \"\", \"1\"),\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonPrincipalName.name,\n values=[\"<EMAIL>\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonUniqueId.name,\n values=[\"3\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.uid.name, values=[\"4\"]\n ),\n ]\n ),\n ),\n \"pątron\",\n False,\n [\n SAMLAttributeType.eduPersonPrincipalName.name,\n SAMLAttributeType.mail.name,\n ],\n fixtures.PATRON_ID_REGULAR_EXPRESSION_ORG,\n ),\n (\n \"patron_id_regular_expression_not_matching_saml_subject\",\n SAMLSubject(\n SAMLNameID(SAMLNameIDFormat.UNSPECIFIED, \"\", \"\", \"1\"),\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonPrincipalName.name,\n values=[\"<EMAIL>\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonUniqueId.name,\n values=[\"3\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.uid.name, values=[\"4\"]\n ),\n ]\n ),\n ),\n None,\n False,\n [\n SAMLAttributeType.eduPersonPrincipalName.name,\n SAMLAttributeType.mail.name,\n ],\n fixtures.PATRON_ID_REGULAR_EXPRESSION_COM,\n ),\n (\n \"patron_id_regular_expression_not_matching_saml_attributes_but_matching_name_id\",\n SAMLSubject(\n SAMLNameID(\n SAMLNameIDFormat.UNSPECIFIED, \"\", \"\", \"<EMAIL>\"\n ),\n SAMLAttributeStatement(\n [\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonPrincipalName.name,\n values=[\"<EMAIL>\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.eduPersonUniqueId.name,\n values=[\"3\"],\n ),\n SAMLAttribute(\n name=SAMLAttributeType.uid.name, values=[\"4\"]\n ),\n ]\n ),\n ),\n \"patron\",\n True,\n [\n SAMLAttributeType.eduPersonPrincipalName.name,\n SAMLAttributeType.mail.name,\n ],\n fixtures.PATRON_ID_REGULAR_EXPRESSION_COM,\n ),\n ]\n )\n def test(\n self,\n _,\n subject,\n expected_patron_id,\n use_name_id=True,\n patron_id_attributes=None,\n patron_id_regular_expression=None,\n ):\n \"\"\"Make sure that SAMLSubjectUIDExtractor correctly extracts a unique patron ID from the SAML subject.\n\n :param _: Name of the test case\n :type _: str\n\n :param expected_patron_id: Expected patron ID\n :type expected_patron_id: str\n\n :param use_name_id: Boolean value indicating whether SAMLSubjectUIDExtractor\n is allowed to search for patron IDs in NameID\n :type use_name_id: bool\n\n :param patron_id_attributes: List of SAML attributes used by SAMLSubjectUIDExtractor to search for a patron ID\n :type patron_id_attributes: List[SAMLAttributeType]\n\n :param patron_id_regular_expression: Regular expression used to extract a patron ID from SAML attributes\n :type patron_id_regular_expression: str\n \"\"\"\n # Arrange\n extractor = SAMLSubjectPatronIDExtractor(\n use_name_id, patron_id_attributes, patron_id_regular_expression\n )\n\n # Act\n patron_id = extractor.extract(subject)\n\n # Assert\n assert expected_patron_id == patron_id\n", "id": "6327847", "language": "Python", "matching_score": 2.1576781272888184, "max_stars_count": 0, "path": "tests/api/saml/metadata/test_model.py" }, { "content": "from parameterized import parameterized\nfrom werkzeug.datastructures import MultiDict\n\nfrom api.admin.problem_details import INCOMPLETE_CONFIGURATION\nfrom api.admin.validator import PatronAuthenticationValidatorFactory\nfrom api.app import initialize_database\nfrom api.saml.configuration.model import SAMLConfiguration\nfrom api.saml.configuration.validator import (\n SAML_INCORRECT_METADATA,\n SAML_INCORRECT_PATRON_ID_REGULAR_EXPRESSION,\n SAMLSettingsValidator,\n)\nfrom api.saml.metadata.filter import SAMLSubjectFilter\nfrom api.saml.metadata.parser import SAMLMetadataParser\nfrom api.saml.provider import SAMLWebSSOAuthenticationProvider\nfrom core.python_expression_dsl.evaluator import DSLEvaluationVisitor, DSLEvaluator\nfrom core.python_expression_dsl.parser import DSLParser\nfrom core.util.problem_detail import ProblemDetail\nfrom tests.api.saml import fixtures\nfrom tests.api.saml.controller_test import ControllerTest\n\n\nclass TestSAMLSettingsValidator(ControllerTest):\n @classmethod\n def setup_class(cls):\n super(TestSAMLSettingsValidator, cls).setup_class()\n\n initialize_database(autoinitialize=False)\n\n @parameterized.expand(\n [\n (\n \"missing_sp_metadata_and_missing_idp_metadata\",\n None,\n None,\n None,\n INCOMPLETE_CONFIGURATION.detailed(\n \"Required field 'Service Provider's XML Metadata' is missing\"\n ),\n ),\n (\n \"empty_sp_metadata_and_empty_idp_metadata\",\n fixtures.INCORRECT_XML,\n fixtures.INCORRECT_XML,\n None,\n INCOMPLETE_CONFIGURATION.detailed(\n \"Required field 'Service Provider's XML Metadata' is missing\"\n ),\n ),\n (\n \"incorrect_sp_metadata_and_incorrect_idp_metadata\",\n fixtures.INCORRECT_XML_WITH_ONE_SP_METADATA_WITHOUT_ACS_SERVICE,\n fixtures.INCORRECT_XML_WITH_ONE_IDP_METADATA_WITHOUT_SSO_SERVICE,\n None,\n SAML_INCORRECT_METADATA.detailed(\n \"Service Provider's metadata has incorrect format: \"\n \"Missing urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST AssertionConsumerService\"\n ),\n ),\n (\n \"correct_sp_metadata_and_incorrect_idp_metadata\",\n fixtures.CORRECT_XML_WITH_ONE_SP,\n fixtures.INCORRECT_XML_WITH_ONE_IDP_METADATA_WITHOUT_SSO_SERVICE,\n None,\n SAML_INCORRECT_METADATA.detailed(\n \"Identity Provider's metadata has incorrect format: \"\n \"Missing urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect SingleSignOnService \"\n \"service declaration\"\n ),\n ),\n (\n \"correct_sp_and_idp_metadata\",\n fixtures.CORRECT_XML_WITH_ONE_SP,\n fixtures.CORRECT_XML_WITH_IDP_1,\n None,\n None,\n ),\n (\n \"correct_patron_id_regular_expression\",\n fixtures.CORRECT_XML_WITH_ONE_SP,\n fixtures.CORRECT_XML_WITH_IDP_1,\n r\"(?P<patron_id>.+)@university\\.org\",\n None,\n ),\n (\n \"correct_patron_id_regular_expression_without_patron_id_named_group\",\n fixtures.CORRECT_XML_WITH_ONE_SP,\n fixtures.CORRECT_XML_WITH_IDP_1,\n r\"(?P<patron>.+)@university\\.org\",\n SAML_INCORRECT_PATRON_ID_REGULAR_EXPRESSION.detailed(\n \"SAML patron ID regular expression '(?P<patron>.+)@university\\\\.org' \"\n \"does not have mandatory named group 'patron_id'\"\n ),\n ),\n (\n \"incorrect_patron_id_regular_expression\",\n fixtures.CORRECT_XML_WITH_ONE_SP,\n fixtures.CORRECT_XML_WITH_IDP_1,\n r\"[\",\n SAML_INCORRECT_PATRON_ID_REGULAR_EXPRESSION.detailed(\n \"SAML patron ID regular expression '[' has an incorrect format: \"\n \"unterminated character set at position 0\"\n ),\n ),\n ]\n )\n def test_validate(\n self,\n _,\n sp_xml_metadata,\n idp_xml_metadata,\n patron_id_regular_expression,\n expected_validation_result,\n ):\n \"\"\"Ensure that SAMLSettingsValidator correctly validates the input data.\n\n :param sp_xml_metadata: SP SAML metadata\n :type sp_xml_metadata: str\n\n :param idp_xml_metadata: IdP SAML metadata\n :type idp_xml_metadata: str\n\n :param patron_id_regular_expression: Regular expression used to extract a unique patron ID from SAML attributes\n :type patron_id_regular_expression: str\n\n :param expected_validation_result: Expected result: ProblemDetail object if validation must fail, None otherwise\n :type expected_validation_result: Optional[ProblemDetail]\n \"\"\"\n # Arrange\n submitted_form_data = MultiDict()\n\n if sp_xml_metadata is not None:\n submitted_form_data.add(\n SAMLConfiguration.service_provider_xml_metadata.key, sp_xml_metadata\n )\n if idp_xml_metadata is not None:\n submitted_form_data.add(\n SAMLConfiguration.non_federated_identity_provider_xml_metadata.key,\n idp_xml_metadata,\n )\n if patron_id_regular_expression is not None:\n submitted_form_data.add(\n SAMLConfiguration.patron_id_regular_expression.key,\n patron_id_regular_expression,\n )\n\n submitted_form = {\"form\": submitted_form_data}\n metadata_parser = SAMLMetadataParser()\n parser = DSLParser()\n visitor = DSLEvaluationVisitor()\n evaluator = DSLEvaluator(parser, visitor)\n subject_filter = SAMLSubjectFilter(evaluator)\n validator = SAMLSettingsValidator(metadata_parser, subject_filter)\n\n # Act\n settings = list(SAMLWebSSOAuthenticationProvider.SETTINGS)\n result = validator.validate(settings, submitted_form)\n\n # Assert\n if isinstance(result, ProblemDetail):\n assert expected_validation_result.response == result.response\n else:\n assert expected_validation_result == result\n\n\nclass TestSAMLSettingsValidatorFactory(object):\n @parameterized.expand([(\"validator_using_factory_method\", \"api.saml.provider\")])\n def test_create_can_create(self, _, protocol):\n # Arrange\n factory = PatronAuthenticationValidatorFactory()\n\n # Act\n result = factory.create(protocol)\n\n # Assert\n assert True == isinstance(result, SAMLSettingsValidator)\n", "id": "2267415", "language": "Python", "matching_score": 3.1146206855773926, "max_stars_count": 0, "path": "tests/api/saml/configuration/test_validator.py" }, { "content": "import logging\n\nfrom api.saml.metadata.model import (\n SAMLAttribute,\n SAMLAttributeStatement,\n SAMLNameID,\n SAMLSubject,\n)\nfrom core.exceptions import BaseError\nfrom core.python_expression_dsl.evaluator import DSLEvaluator\n\n\nclass SAMLSubjectFilterError(BaseError):\n \"\"\"Raised in the case of any errors during execution of a filter expression.\"\"\"\n\n def __init__(self, inner_exception):\n \"\"\"Initialize a new instance of SAMLSubjectFilterError class.\n\n :param inner_exception: Inner exception\n :type inner_exception: Exception\n \"\"\"\n message = \"Incorrect filter expression: {0}\".format(str(inner_exception))\n\n super(SAMLSubjectFilterError, self).__init__(message, inner_exception)\n\n\nclass SAMLSubjectFilter(object):\n \"\"\"Executes filter expressions.\"\"\"\n\n def __init__(self, dsl_evaluator):\n \"\"\"Initialize a new instance of SAMLSubjectFilter class.\n\n :param dsl_evaluator: DSL evaluator\n :type dsl_evaluator: core.python_expression_dsl.evaluator.DSLEvaluator\n \"\"\"\n if not isinstance(dsl_evaluator, DSLEvaluator):\n raise ValueError(\n \"Argument 'dsl_evaluator' must be an instance of {0} class\".format(\n DSLEvaluator\n )\n )\n\n self._dsl_evaluator = dsl_evaluator\n self._logger = logging.getLogger(__name__)\n\n def execute(self, expression, subject):\n \"\"\"Apply the expression to the subject and return a boolean value indicating whether it's a valid subject.\n\n :param expression: String containing the filter expression\n :type expression: str\n\n :param subject: SAML subject\n :type subject: api.saml.metadata.model.SAMLSubject\n\n :return: Boolean value indicating whether it's a valid subject\n :rtype: bool\n\n :raise SAMLSubjectFilterError: in the case of any errors occurred during expression evaluation\n \"\"\"\n if not expression or not isinstance(expression, str):\n raise ValueError(\"Argument 'expression' must be a non-empty string\")\n if not isinstance(subject, SAMLSubject):\n raise ValueError(\"Argument 'subject' must an instance of Subject class\")\n\n self._logger.info(\n \"Started applying expression '{0}' to {1}\".format(expression, subject)\n )\n\n try:\n result = self._dsl_evaluator.evaluate(\n expression,\n context={\"subject\": subject},\n safe_classes=[\n SAMLSubject,\n SAMLNameID,\n SAMLAttributeStatement,\n SAMLAttribute,\n ],\n )\n except Exception as exception:\n raise SAMLSubjectFilterError(exception)\n\n self._logger.info(\n \"Finished applying expression '{0}' to {1}: {2}\".format(\n expression, subject, result\n )\n )\n\n result = bool(result)\n\n return result\n\n def validate(self, expression):\n \"\"\"Validate the filter expression.\n\n Try to apply the expression to a dummy Subject object containing all the known SAML attributes.\n\n :param expression: String containing the filter expression\n :type expression: str\n\n :raise: SAMLSubjectFilterError\n \"\"\"\n if not expression or not isinstance(expression, str):\n raise ValueError(\"Argument 'expression' must be a non-empty string\")\n\n try:\n self._dsl_evaluator.parser.parse(expression)\n except Exception as exception:\n raise SAMLSubjectFilterError(exception)\n", "id": "12041959", "language": "Python", "matching_score": 1.7425223588943481, "max_stars_count": 0, "path": "api/saml/metadata/filter.py" }, { "content": "import logging\n\nfrom onelogin.saml2.idp_metadata_parser import OneLogin_Saml2_IdPMetadataParser\nfrom onelogin.saml2.xmlparser import tostring\n\nfrom api.saml.metadata.federations.model import (\n SAMLFederatedIdentityProvider,\n SAMLFederation,\n)\nfrom api.saml.metadata.federations.validator import SAMLFederatedMetadataValidator\nfrom api.saml.metadata.parser import SAMLMetadataParser\nfrom core.exceptions import BaseError\nfrom core.util import first_or_default\n\n\nclass SAMLMetadataLoadingError(BaseError):\n \"\"\"Raised in the case of any errors occurred during loading of SAML metadata from a remote source\"\"\"\n\n\nclass SAMLMetadataLoader(object):\n \"\"\"Loads SAML metadata from a remote source (e.g. InCommon Metadata Service)\"\"\"\n\n def __init__(self):\n \"\"\"Initializes a new instance of SAMLMetadataLoader\"\"\"\n\n self._logger = logging.getLogger(__name__)\n\n def load_idp_metadata(self, url=None):\n \"\"\"Load IdP metadata in an XML format from the specified url.\n\n :param url: URL of a metadata service\n :type url: Optional[string]\n\n :return: XML string containing InCommon Metadata\n :rtype: string\n\n :raise: MetadataLoadError\n \"\"\"\n self._logger.info(\"Started loading IdP XML metadata from {0}\".format(url))\n\n try:\n xml_metadata = OneLogin_Saml2_IdPMetadataParser.get_metadata(url)\n except Exception as exception:\n raise SAMLMetadataLoadingError(inner_exception=exception)\n\n self._logger.info(\"Finished loading IdP XML metadata from {0}\".format(url))\n\n return xml_metadata\n\n\nclass SAMLFederatedIdentityProviderLoader(object):\n \"\"\"Loads metadata of federated IdPs from the specified metadata service.\"\"\"\n\n ENGLISH_LANGUAGE_CODES = (\"en\", \"eng\")\n\n def __init__(self, loader, validator, parser):\n \"\"\"Initialize a new instance of SAMLFederatedIdentityProviderLoader class.\n\n :param loader: SAML metadata loader\n :type loader: api.saml.metadata.federations.loader.SAMLMetadataLoader\n\n :param validator: SAML metadata validator\n :type validator: api.saml.metadata.federations.validator.SAMLFederatedMetadataValidator\n\n :param parser: SAML metadata parser\n :type parser: api.saml.metadata.parser.SAMLMetadataParser\n \"\"\"\n if not isinstance(loader, SAMLMetadataLoader):\n raise ValueError(\n \"Argument 'loader' must be an instance of {0} class\".format(\n SAMLMetadataLoader\n )\n )\n if not isinstance(validator, SAMLFederatedMetadataValidator):\n raise ValueError(\n \"Argument 'validator' must be an instance of {0} class\".format(\n SAMLFederatedMetadataValidator\n )\n )\n if not isinstance(parser, SAMLMetadataParser):\n raise ValueError(\n \"Argument 'parser' must be an instance of {0} class\".format(\n SAMLMetadataParser\n )\n )\n\n self._loader = loader\n self._validator = validator\n self._parser = parser\n\n self._logger = logging.getLogger(__name__)\n\n def _try_to_get_an_english_value(self, localized_values):\n \"\"\"Try to fetch an English value from the list of localized values.\n\n :param localized_values: List of localized values\n :type localized_values: List[api.saml.metadata.LocalizedMetadataItem]\n\n :return: Localized value in English (if any, otherwise first value from the list)\n :rtype: Optional[str]\n \"\"\"\n if not localized_values:\n return None\n\n for localized_value in localized_values:\n if localized_value.language in self.ENGLISH_LANGUAGE_CODES:\n return localized_value.value\n\n return first_or_default(localized_values).value\n\n def load(self, federation):\n \"\"\"Loads metadata of federated IdPs from the specified metadata service.\n\n :param federation: SAML federation where loaded IdPs belong to\n :type federation: api.saml.metadata.federations.model.SAMLFederation\n\n :return: List of SAMLFederatedIdP objects\n :rtype: Iterable[api.saml.configuration.SAMLFederatedIdentityProvider]\n \"\"\"\n if not isinstance(federation, SAMLFederation):\n raise ValueError(\n \"Argument 'federation' must be an instance of {0} class\".format(\n SAMLFederation\n )\n )\n\n self._logger.info(\"Started loading federated IdP's for {0}\".format(federation))\n\n federated_idps = []\n metadata = self._loader.load_idp_metadata(federation.idp_metadata_service_url)\n\n self._validator.validate(federation, metadata)\n\n parsing_results = self._parser.parse(metadata)\n\n for parsing_result in parsing_results:\n idp = parsing_result.provider\n\n if idp.ui_info.display_names:\n display_name = self._try_to_get_an_english_value(\n idp.ui_info.display_names\n )\n elif idp.organization.organization_display_names:\n display_name = self._try_to_get_an_english_value(\n idp.organization.organization_display_names\n )\n elif idp.organization.organization_names:\n display_name = self._try_to_get_an_english_value(\n idp.organization.organization_names\n )\n else:\n display_name = idp.entity_id\n\n xml_metadata = tostring(parsing_result.xml_node, encoding=\"unicode\")\n federated_idp = SAMLFederatedIdentityProvider(\n federation, idp.entity_id.strip(), display_name.strip(), xml_metadata\n )\n\n federated_idps.append(federated_idp)\n\n self._logger.info(\n \"Finished loading {0} federated IdP's for {1}\".format(\n len(federated_idps), federation\n )\n )\n\n return federated_idps\n", "id": "5599396", "language": "Python", "matching_score": 1.2275546789169312, "max_stars_count": 0, "path": "api/saml/metadata/federations/loader.py" }, { "content": "from tests.api.admin.fixtures.dummy_validator import (\n DummyAuthenticationProviderValidator,\n)\n\n\ndef validator_factory():\n return DummyAuthenticationProviderValidator()\n", "id": "11961918", "language": "Python", "matching_score": 0.16249561309814453, "max_stars_count": 0, "path": "tests/api/admin/fixtures/dummy_validator_factory.py" }, { "content": "from parameterized import parameterized\n\nfrom api.lcp.hash import HasherFactory, HashingAlgorithm\n\n\nclass TestHasherFactory(object):\n @parameterized.expand(\n [\n (\n \"sha256\",\n HashingAlgorithm.SHA256,\n \"12345\",\n \"5994471abb01112afcc18159f6cc74b4f511b99806da59b3caf5a9c173cacfc5\",\n ),\n (\n \"sha256_value\",\n HashingAlgorithm.SHA256.value,\n \"12345\",\n \"5994471abb01112afcc18159f6cc74b4f511b99806da59b3caf5a9c173cacfc5\",\n ),\n (\n \"sha512\",\n HashingAlgorithm.SHA512,\n \"12345\",\n \"3627909a29c31381a071ec27f7c9ca97726182aed29a7ddd2e54353322cfb30abb9e3a6df2ac2c20fe23436311d678564d0c8d305930575f60e2d3d048184d79\",\n ),\n (\n \"sha512_value\",\n HashingAlgorithm.SHA512.value,\n \"12345\",\n \"3627909a29c31381a071ec27f7c9ca97726182aed29a7ddd2e54353322cfb30abb9e3a6df2ac2c20fe23436311d678564d0c8d305930575f60e2d3d048184d79\",\n ),\n ]\n )\n def test_create(self, _, hashing_algorithm, value, expected_value):\n #\n hasher_factory = HasherFactory()\n hasher = hasher_factory.create(hashing_algorithm)\n\n result = hasher.hash(value)\n\n assert result == expected_value\n", "id": "8010334", "language": "Python", "matching_score": 1.1252354383468628, "max_stars_count": 0, "path": "tests/api/lcp/test_hash.py" }, { "content": "import json\nfrom unittest.mock import MagicMock, call, create_autospec, patch\n\nfrom flask import request\n\nfrom api.controller import CirculationManager\nfrom api.lcp.collection import LCPAPI\nfrom api.lcp.controller import LCPController\nfrom api.lcp.factory import LCPServerFactory\nfrom api.lcp.server import LCPServer\nfrom core.lcp.credential import LCPCredentialFactory\nfrom core.model import ExternalIntegration\nfrom tests.api.lcp import fixtures\nfrom tests.api.test_controller import ControllerTest\n\n\nclass TestLCPController(ControllerTest):\n def test_get_lcp_passphrase_returns_the_same_passphrase_for_authenticated_patron(\n self,\n ):\n # Arrange\n expected_passphrase = \"<PASSWORD>\"\n\n with patch(\n \"api.lcp.controller.LCPCredentialFactory\"\n ) as credential_factory_constructor_mock:\n credential_factory = create_autospec(spec=LCPCredentialFactory)\n credential_factory.get_patron_passphrase = MagicMock(\n return_value=expected_passphrase\n )\n credential_factory_constructor_mock.return_value = credential_factory\n\n patron = self.default_patron\n manager = CirculationManager(self._db, testing=True)\n controller = LCPController(manager)\n controller.authenticated_patron_from_request = MagicMock(\n return_value=patron\n )\n\n url = \"http://circulationmanager.org/lcp/hint\"\n\n with self.app.test_request_context(url):\n request.library = self._default_library\n\n # Act\n result1 = controller.get_lcp_passphrase()\n result2 = controller.get_lcp_passphrase()\n\n # Assert\n for result in [result1, result2]:\n assert result.status_code == 200\n assert (\"passphrase\" in result.json) == True\n assert result.json[\"passphrase\"] == expected_passphrase\n\n credential_factory.get_patron_passphrase.assert_has_calls(\n [call(self._db, patron), call(self._db, patron)]\n )\n\n def test_get_lcp_license_returns_problem_detail_when_collection_is_missing(self):\n # Arrange\n missing_collection_name = \"missing-collection\"\n license_id = \"e99be177-4902-426a-9b96-0872ae877e2f\"\n expected_license = json.loads(fixtures.LCPSERVER_LICENSE)\n lcp_server = create_autospec(spec=LCPServer)\n lcp_server.get_license = MagicMock(return_value=expected_license)\n library = self.make_default_library(self._db)\n lcp_collection = self._collection(LCPAPI.NAME, ExternalIntegration.LCP)\n library.collections.append(lcp_collection)\n\n with patch(\n \"api.lcp.controller.LCPServerFactory\"\n ) as lcp_server_factory_constructor_mock:\n lcp_server_factory = create_autospec(spec=LCPServerFactory)\n lcp_server_factory.create = MagicMock(return_value=lcp_server)\n lcp_server_factory_constructor_mock.return_value = lcp_server_factory\n\n patron = self.default_patron\n manager = CirculationManager(self._db, testing=True)\n controller = LCPController(manager)\n controller.authenticated_patron_from_request = MagicMock(\n return_value=patron\n )\n\n url = \"http://circulationmanager.org/{0}/licenses{1}\".format(\n missing_collection_name, license_id\n )\n\n with self.app.test_request_context(url):\n request.library = self._default_library\n\n # Act\n result = controller.get_lcp_license(missing_collection_name, license_id)\n\n # Assert\n assert result.status_code == 404\n\n def test_get_lcp_license_returns_the_same_license_for_authenticated_patron(self):\n # Arrange\n license_id = \"e99be177-4902-426a-9b96-0872ae877e2f\"\n expected_license = json.loads(fixtures.LCPSERVER_LICENSE)\n lcp_server = create_autospec(spec=LCPServer)\n lcp_server.get_license = MagicMock(return_value=expected_license)\n library = self.make_default_library(self._db)\n lcp_collection = self._collection(LCPAPI.NAME, ExternalIntegration.LCP)\n library.collections.append(lcp_collection)\n\n with patch(\n \"api.lcp.controller.LCPServerFactory\"\n ) as lcp_server_factory_constructor_mock:\n lcp_server_factory = create_autospec(spec=LCPServerFactory)\n lcp_server_factory.create = MagicMock(return_value=lcp_server)\n lcp_server_factory_constructor_mock.return_value = lcp_server_factory\n\n patron = self.default_patron\n manager = CirculationManager(self._db, testing=True)\n controller = LCPController(manager)\n controller.authenticated_patron_from_request = MagicMock(\n return_value=patron\n )\n\n url = \"http://circulationmanager.org/{0}/licenses{1}\".format(\n LCPAPI.NAME, license_id\n )\n\n with self.app.test_request_context(url):\n request.library = self._default_library\n\n # Act\n result1 = controller.get_lcp_license(LCPAPI.NAME, license_id)\n result2 = controller.get_lcp_license(LCPAPI.NAME, license_id)\n\n # Assert\n for result in [result1, result2]:\n assert result.status_code == 200\n assert result.json == expected_license\n", "id": "1159152", "language": "Python", "matching_score": 3.1176834106445312, "max_stars_count": 0, "path": "tests/api/lcp/test_controller.py" }, { "content": "import datetime\nimport json\nimport os\nimport urllib.parse\nfrom unittest.mock import MagicMock, create_autospec\n\nimport requests_mock\nfrom parameterized import parameterized\n\nfrom api.lcp import utils\nfrom api.lcp.encrypt import LCPEncryptionResult\nfrom api.lcp.hash import HasherFactory\nfrom api.lcp.server import LCPServer, LCPServerConfiguration\nfrom core.lcp.credential import LCPCredentialFactory\nfrom core.model.configuration import (\n ConfigurationFactory,\n ConfigurationStorage,\n ExternalIntegration,\n HasExternalIntegration,\n)\nfrom tests.api.lcp import fixtures\nfrom tests.api.lcp.database_test import DatabaseTest\n\n\nclass TestLCPServer(DatabaseTest):\n def setup_method(self):\n super(TestLCPServer, self).setup_method()\n\n self._lcp_collection = self._collection(protocol=ExternalIntegration.LCP)\n self._integration = self._lcp_collection.external_integration\n integration_owner = create_autospec(spec=HasExternalIntegration)\n integration_owner.external_integration = MagicMock(\n return_value=self._integration\n )\n self._configuration_storage = ConfigurationStorage(integration_owner)\n self._configuration_factory = ConfigurationFactory()\n self._hasher_factory = HasherFactory()\n self._credential_factory = LCPCredentialFactory()\n self._lcp_server = LCPServer(\n self._configuration_storage,\n self._configuration_factory,\n self._hasher_factory,\n self._credential_factory,\n )\n\n @parameterized.expand(\n [\n (\"empty_input_directory\", \"\"),\n (\"non_empty_input_directory\", \"/tmp/encrypted_books\"),\n ]\n )\n def test_add_content(self, _, input_directory):\n # Arrange\n lcp_server = LCPServer(\n self._configuration_storage,\n self._configuration_factory,\n self._hasher_factory,\n self._credential_factory,\n )\n encrypted_content = LCPEncryptionResult(\n content_id=fixtures.CONTENT_ID,\n content_encryption_key=\"12345\",\n protected_content_location=\"/opt/readium/files/encrypted\",\n protected_content_disposition=\"encrypted_book\",\n protected_content_type=\"application/epub+zip\",\n protected_content_length=12345,\n protected_content_sha256=\"12345\",\n )\n expected_protected_content_disposition = os.path.join(\n input_directory, encrypted_content.protected_content_disposition\n )\n\n with self._configuration_factory.create(\n self._configuration_storage, self._db, LCPServerConfiguration\n ) as configuration:\n configuration.lcpserver_url = fixtures.LCPSERVER_URL\n configuration.lcpserver_user = fixtures.LCPSERVER_USER\n configuration.lcpserver_password = fixtures.LCPSERVER_PASSWORD\n configuration.lcpserver_input_directory = input_directory\n configuration.provider_name = fixtures.PROVIDER_NAME\n configuration.passphrase_hint = fixtures.TEXT_HINT\n configuration.encryption_algorithm = (\n LCPServerConfiguration.DEFAULT_ENCRYPTION_ALGORITHM\n )\n\n with requests_mock.Mocker() as request_mock:\n url = urllib.parse.urljoin(\n fixtures.LCPSERVER_URL, \"/contents/{0}\".format(fixtures.CONTENT_ID)\n )\n request_mock.put(url)\n\n # Act\n lcp_server.add_content(self._db, encrypted_content)\n\n # Assert\n assert request_mock.called == True\n\n json_request = json.loads(request_mock.last_request.text)\n assert json_request[\"content-id\"] == encrypted_content.content_id\n assert (\n json_request[\"content-encryption-key\"]\n == encrypted_content.content_encryption_key\n )\n assert (\n json_request[\"protected-content-location\"]\n == expected_protected_content_disposition\n )\n assert (\n json_request[\"protected-content-disposition\"]\n == encrypted_content.protected_content_disposition\n )\n assert (\n json_request[\"protected-content-type\"]\n == encrypted_content.protected_content_type\n )\n assert (\n json_request[\"protected-content-length\"]\n == encrypted_content.protected_content_length\n )\n assert (\n json_request[\"protected-content-sha256\"]\n == encrypted_content.protected_content_sha256\n )\n\n @parameterized.expand(\n [\n (\"none_rights\", None, None, None, None),\n (\n \"license_start\",\n datetime.datetime(2020, 1, 1, 00, 00, 00),\n None,\n None,\n None,\n ),\n (\n \"license_end\",\n None,\n datetime.datetime(2020, 12, 31, 23, 59, 59),\n None,\n None,\n ),\n (\"max_printable_pages\", None, None, 10, None),\n (\"max_printable_pages_empty_max_copiable_pages\", None, None, 10, \"\"),\n (\"empty_max_printable_pages\", None, None, \"\", None),\n (\"max_copiable_pages\", None, None, None, 1024),\n (\"empty_max_printable_pages_max_copiable_pages\", None, None, \"\", 1024),\n (\"empty_max_copiable_pages\", None, None, None, \"\"),\n (\n \"dates\",\n datetime.datetime(2020, 1, 1, 00, 00, 00),\n datetime.datetime(2020, 12, 31, 23, 59, 59),\n None,\n None,\n ),\n (\n \"full_rights\",\n datetime.datetime(2020, 1, 1, 00, 00, 00),\n datetime.datetime(2020, 12, 31, 23, 59, 59),\n 10,\n 1024,\n ),\n ]\n )\n def test_generate_license(\n self, _, license_start, license_end, max_printable_pages, max_copiable_pages\n ):\n # Arrange\n patron = self._patron()\n expected_patron_id = \"52a190d1-cd69-4794-9d7a-1ec50392697f\"\n expected_patron_passphrase = \"52a190d1-cd69-4794-9d7a-1ec50392697a\"\n expected_patron_key = self._hasher_factory.create(\n LCPServerConfiguration.DEFAULT_ENCRYPTION_ALGORITHM\n ).hash(expected_patron_passphrase)\n\n with self._configuration_factory.create(\n self._configuration_storage, self._db, LCPServerConfiguration\n ) as configuration:\n configuration.lcpserver_url = fixtures.LCPSERVER_URL\n configuration.lcpserver_user = fixtures.LCPSERVER_USER\n configuration.lcpserver_password = fixtures.LCPSERVER_PASSWORD\n configuration.provider_name = fixtures.PROVIDER_NAME\n configuration.passphrase_hint = fixtures.TEXT_HINT\n configuration.encryption_algorithm = (\n LCPServerConfiguration.DEFAULT_ENCRYPTION_ALGORITHM\n )\n configuration.max_printable_pages = max_printable_pages\n configuration.max_copiable_pages = max_copiable_pages\n\n self._credential_factory.get_patron_id = MagicMock(\n return_value=expected_patron_id\n )\n self._credential_factory.get_patron_passphrase = MagicMock(\n return_value=expected_patron_passphrase\n )\n\n with requests_mock.Mocker() as request_mock:\n url = urllib.parse.urljoin(\n fixtures.LCPSERVER_URL,\n \"/contents/{0}/license\".format(fixtures.CONTENT_ID),\n )\n request_mock.post(url, json=fixtures.LCPSERVER_LICENSE)\n\n # Act\n license = self._lcp_server.generate_license(\n self._db, fixtures.CONTENT_ID, patron, license_start, license_end\n )\n\n # Assert\n assert request_mock.called == True\n assert license == fixtures.LCPSERVER_LICENSE\n\n json_request = json.loads(request_mock.last_request.text)\n assert json_request[\"provider\"] == fixtures.PROVIDER_NAME\n assert json_request[\"user\"][\"id\"] == expected_patron_id\n assert (\n json_request[\"encryption\"][\"user_key\"][\"text_hint\"]\n == fixtures.TEXT_HINT\n )\n assert (\n json_request[\"encryption\"][\"user_key\"][\"hex_value\"]\n == expected_patron_key\n )\n\n if license_start is not None:\n assert json_request[\"rights\"][\"start\"] == utils.format_datetime(\n license_start\n )\n if license_end is not None:\n assert json_request[\"rights\"][\"end\"] == utils.format_datetime(\n license_end\n )\n if max_printable_pages is not None and max_printable_pages != \"\":\n assert json_request[\"rights\"][\"print\"] == max_printable_pages\n if max_copiable_pages is not None and max_copiable_pages != \"\":\n assert json_request[\"rights\"][\"copy\"] == max_copiable_pages\n\n all_rights_fields_are_empty = all(\n [\n rights_field is None or rights_field == \"\"\n for rights_field in [\n license_start,\n license_end,\n max_printable_pages,\n max_copiable_pages,\n ]\n ]\n )\n if all_rights_fields_are_empty:\n assert (\"rights\" in json_request) == False\n\n self._credential_factory.get_patron_id.assert_called_once_with(\n self._db, patron\n )\n self._credential_factory.get_patron_passphrase.assert_called_once_with(\n self._db, patron\n )\n", "id": "12313900", "language": "Python", "matching_score": 3.7579782009124756, "max_stars_count": 0, "path": "tests/api/lcp/test_server.py" }, { "content": "import json\nimport logging\nimport os\nimport re\nimport subprocess\nfrom json import JSONEncoder\n\nfrom flask_babel import lazy_gettext as _\n\nfrom api.lcp import utils\nfrom core.exceptions import BaseError\nfrom core.model.configuration import (\n ConfigurationAttributeType,\n ConfigurationGrouping,\n ConfigurationMetadata,\n)\n\n\nclass LCPEncryptionException(BaseError):\n \"\"\"Raised in the case of any errors occurring during LCP encryption process\"\"\"\n\n\nclass LCPEncryptionConfiguration(ConfigurationGrouping):\n \"\"\"Contains different settings required by LCPEncryptor\"\"\"\n\n DEFAULT_LCPENCRYPT_LOCATION = \"/go/bin/lcpencrypt\"\n DEFAULT_LCPENCRYPT_DOCKER_IMAGE = \"readium/lcpencrypt\"\n\n lcpencrypt_location = ConfigurationMetadata(\n key=\"lcpencrypt_location\",\n label=_(\"lcpencrypt's location\"),\n description=_(\n \"Full path to the local lcpencrypt binary. \"\n \"The default value is {0}\".format(DEFAULT_LCPENCRYPT_LOCATION)\n ),\n type=ConfigurationAttributeType.TEXT,\n required=False,\n default=DEFAULT_LCPENCRYPT_LOCATION,\n )\n\n lcpencrypt_output_directory = ConfigurationMetadata(\n key=\"lcpencrypt_output_directory\",\n label=_(\"lcpencrypt's output directory\"),\n description=_(\n \"Full path to the directory where lcpencrypt stores encrypted content. \"\n \"If not set encrypted books will be stored in lcpencrypt's working directory\"\n ),\n type=ConfigurationAttributeType.TEXT,\n required=False,\n )\n\n\nclass LCPEncryptionResult(object):\n \"\"\"Represents an output sent by lcpencrypt\"\"\"\n\n CONTENT_ID = \"content-id\"\n CONTENT_ENCRYPTION_KEY = \"content-encryption-key\"\n PROTECTED_CONTENT_LOCATION = \"protected-content-location\"\n PROTECTED_CONTENT_LENGTH = \"protected-content-length\"\n PROTECTED_CONTENT_SHA256 = \"protected-content-sha256\"\n PROTECTED_CONTENT_DISPOSITION = \"protected-content-disposition\"\n PROTECTED_CONTENT_TYPE = \"protected-content-type\"\n\n def __init__(\n self,\n content_id,\n content_encryption_key,\n protected_content_location,\n protected_content_disposition,\n protected_content_type,\n protected_content_length,\n protected_content_sha256,\n ):\n \"\"\"Initializes a new instance of LCPEncryptorResult class\n\n :param: content_id: Content identifier\n :type content_id: Optional[string]\n\n :param: content_encryption_key: Content encryption key\n :type content_encryption_key: Optional[string]\n\n :param: protected_content_location: Complete file path of the encrypted content\n :type protected_content_location: Optional[string]\n\n :param: protected_content_disposition: File name of the encrypted content\n :type protected_content_disposition: Optional[string]\n\n :param: protected_content_type: Media type of the encrypted content\n :type protected_content_type: Optional[string]\n\n :param: protected_content_length: Size of the encrypted content\n :type protected_content_length: Optional[string]\n\n :param: protected_content_sha256: Hash of the encrypted content\n :type protected_content_sha256: Optional[string]\n \"\"\"\n self._content_id = content_id\n self._content_encryption_key = content_encryption_key\n self._protected_content_location = protected_content_location\n self._protected_content_disposition = protected_content_disposition\n self._protected_content_type = protected_content_type\n self._protected_content_length = protected_content_length\n self._protected_content_sha256 = protected_content_sha256\n\n @property\n def content_id(self):\n \"\"\"Returns a content encryption key\n\n :return: Content encryption key\n :rtype: Optional[string]\n \"\"\"\n return self._content_id\n\n @property\n def content_encryption_key(self):\n \"\"\"Returns a content identifier\n\n :return: Content identifier\n :rtype: Optional[string]\n \"\"\"\n return self._content_encryption_key\n\n @property\n def protected_content_location(self):\n \"\"\"Returns a complete file path of the encrypted content\n\n :return: Complete file path of the encrypted content\n :rtype: Optional[string]\n \"\"\"\n return self._protected_content_location\n\n @property\n def protected_content_disposition(self):\n \"\"\"Returns a file name of the encrypted content\n\n :return: File name of the encrypted content\n :rtype: Optional[string]\n \"\"\"\n return self._protected_content_disposition\n\n @property\n def protected_content_type(self):\n \"\"\"Returns a media type of the encrypted content\n\n :return: Media type of the encrypted content\n :rtype: Optional[string]\n \"\"\"\n return self._protected_content_type\n\n @property\n def protected_content_length(self):\n \"\"\"Returns a size of the encrypted content\n\n :return: Size of the encrypted content\n :rtype: Optional[string]\n \"\"\"\n return self._protected_content_length\n\n @property\n def protected_content_sha256(self):\n \"\"\"Returns a hash of the encrypted content\n\n :return: Hash of the encrypted content\n :rtype: Optional[string]\n \"\"\"\n return self._protected_content_sha256\n\n @classmethod\n def from_dict(cls, result_dict):\n \"\"\"Creates an LCPEncryptorResult object from a Python dictionary\n\n :param result_dict: Python dictionary containing an lcpencrypt output\n :type result_dict: Dict\n\n :return: LCPEncryptorResult object\n :rtype: LCPEncryptionResult\n \"\"\"\n content_id = result_dict.get(cls.CONTENT_ID)\n content_encryption_key = result_dict.get(cls.CONTENT_ENCRYPTION_KEY)\n protected_content_location = result_dict.get(cls.PROTECTED_CONTENT_LOCATION)\n protected_content_length = result_dict.get(cls.PROTECTED_CONTENT_LENGTH)\n protected_content_sha256 = result_dict.get(cls.PROTECTED_CONTENT_SHA256)\n protected_content_disposition = result_dict.get(\n cls.PROTECTED_CONTENT_DISPOSITION\n )\n protected_content_type = result_dict.get(cls.PROTECTED_CONTENT_TYPE)\n\n return cls(\n content_id=content_id,\n content_encryption_key=content_encryption_key,\n protected_content_location=protected_content_location,\n protected_content_disposition=protected_content_disposition,\n protected_content_type=protected_content_type,\n protected_content_length=protected_content_length,\n protected_content_sha256=protected_content_sha256,\n )\n\n def __eq__(self, other):\n \"\"\"Compares two LCPEncryptorResult objects\n\n :param other: LCPEncryptorResult object\n :type other: LCPEncryptionResult\n\n :return: Boolean value indicating whether two items are equal\n :rtype: bool\n \"\"\"\n if not isinstance(other, LCPEncryptionResult):\n return False\n\n return (\n self.content_id == other.content_id\n and self.content_encryption_key == other.content_encryption_key\n and self.protected_content_location == other.protected_content_location\n and self.protected_content_length == other.protected_content_length\n and self.protected_content_sha256 == other.protected_content_sha256\n and self.protected_content_disposition\n == other.protected_content_disposition\n and self.protected_content_type == other.protected_content_type\n )\n\n def __repr__(self):\n \"\"\"Returns a string representation of a LCPEncryptorResult object\n\n :return: string representation of a LCPEncryptorResult object\n :rtype: string\n \"\"\"\n return (\n \"<LCPEncryptor.Result(\"\n \"content_id={0}, \"\n \"content_encryption_key={1}, \"\n \"protected_content_location={2}, \"\n \"protected_content_length={3}, \"\n \"protected_content_sha256={4}, \"\n \"protected_content_disposition={5}, \"\n \"protected_content_type={6})>\".format(\n self.content_id,\n self.content_encryption_key,\n self.protected_content_location,\n self.protected_content_length,\n self.protected_content_sha256,\n self.protected_content_disposition,\n self.protected_content_type,\n )\n )\n\n\nclass LCPEncryptorResultJSONEncoder(JSONEncoder):\n \"\"\"Serializes LCPEncryptorResult as a JSON object\"\"\"\n\n def default(self, result):\n \"\"\"Serializers a Subject object to JSON\n\n :param result: LCPEncryptorResult object\n :type result: LCPEncryptionResult\n\n :return: String containing JSON representation of the LCPEncryptorResult object\n :rtype: string\n \"\"\"\n if not isinstance(result, LCPEncryptionResult):\n raise ValueError(\"result must have type LCPEncryptorResult\")\n\n result = {\n \"content-id\": result.content_id,\n \"content-encryption-key\": result.content_encryption_key,\n \"protected-content-location\": result.protected_content_location,\n \"protected-content-length\": result.protected_content_length,\n \"protected-content-sha256\": result.protected_content_sha256,\n \"protected-content-disposition\": result.protected_content_disposition,\n \"protected-content-type\": result.protected_content_type,\n }\n\n return result\n\n\nclass LCPEncryptor(object):\n \"\"\"Wrapper around lcpencrypt tool containing logic to run it locally and in a Docker container\"\"\"\n\n class Parameters(object):\n \"\"\"Parses input parameters for lcpencrypt\"\"\"\n\n def __init__(self, file_path, identifier, configuration):\n \"\"\"Initializes a new instance of Parameters class\n\n :param file_path: File path to the book to be encrypted\n :type file_path: string\n\n :param identifier: Book's identifier\n :type identifier: string\n\n :param configuration: LCPEncryptionConfiguration instance\n :type configuration: instance\n \"\"\"\n self._lcpencrypt_location = configuration.lcpencrypt_location\n self._input_file_path = str(file_path)\n self._content_id = str(identifier)\n\n output_directory = configuration.lcpencrypt_output_directory\n\n self._output_file_path = None\n\n if output_directory:\n _, input_extension = os.path.splitext(file_path)\n target_extension = utils.get_target_extension(input_extension)\n output_file_path = os.path.join(\n output_directory,\n identifier + target_extension\n if target_extension not in identifier\n else identifier,\n )\n\n self._output_file_path = output_file_path\n\n @property\n def lcpencrypt_location(self):\n \"\"\"Returns location of lcpencrypt binary\n\n :return: Location of lcpencrypt binary\n :rtype: string\n \"\"\"\n return self._lcpencrypt_location\n\n @property\n def input_file_path(self):\n \"\"\"Returns path of the input file\n\n :return: Path of the input file\n :rtype: string\n \"\"\"\n return self._input_file_path\n\n @property\n def content_id(self):\n \"\"\"Returns content ID\n\n :return: Content ID\n :rtype: string\n \"\"\"\n return self._content_id\n\n @property\n def output_file_path(self):\n \"\"\"Returns path of the output file\n\n :return: Path of the output file\n :rtype: string\n \"\"\"\n return self._output_file_path\n\n def to_array(self):\n \"\"\"Returns parameters in an array\n\n :return: Parameters in an array\n :rtype: List\n \"\"\"\n parameters = [\n self._lcpencrypt_location,\n \"-input\",\n self._input_file_path,\n \"-contentid\",\n self._content_id,\n ]\n\n if self._output_file_path:\n parameters.extend([\"-output\", self._output_file_path])\n\n return parameters\n\n OUTPUT_REGEX = re.compile(r\"(\\{.+\\})?(.+)\", re.DOTALL)\n\n def __init__(self, configuration_storage, configuration_factory):\n \"\"\"Initializes a new instance of LCPEncryptor class\n\n :param configuration_storage: ConfigurationStorage object\n :type configuration_storage: ConfigurationStorage\n\n :param configuration_factory: Factory creating LCPEncryptionConfiguration instance\n :type configuration_factory: api.config.ConfigurationFactory\n \"\"\"\n self._logger = logging.getLogger(__name__)\n self._configuration_storage = configuration_storage\n self._configuration_factory = configuration_factory\n\n def _lcpencrypt_exists_locally(self, configuration):\n \"\"\"Returns a Boolean value indicating whether lcpencrypt exists locally\n\n :param configuration: LCPEncryptionConfiguration instance\n :type configuration: instance\n\n :return: Boolean value indicating whether lcpencrypt exists locally\n :rtype: bool\n \"\"\"\n return os.path.isfile(configuration.lcpencrypt_location)\n\n def _parse_output(self, output):\n \"\"\"Parses lcpencrypt's output\n\n :param output: lcpencrypt's output\n :type output: string\n\n :return: Encryption result\n :rtype: LCPEncryptionResult\n \"\"\"\n bracket_index = output.find(\"{\")\n\n if bracket_index > 0:\n output = output[bracket_index:]\n\n match = self.OUTPUT_REGEX.match(output)\n\n if not match:\n raise LCPEncryptionException(\"Output has a wrong format\")\n\n match_groups = match.groups()\n\n if not match_groups:\n raise LCPEncryptionException(\"Output has a wrong format\")\n\n if not match_groups[0]:\n raise LCPEncryptionException(match_groups[1].strip())\n\n json_output = match_groups[0]\n json_result = json.loads(json_output)\n result = LCPEncryptionResult.from_dict(json_result)\n\n if (\n not result.protected_content_length\n or not result.protected_content_sha256\n or not result.content_encryption_key\n ):\n raise LCPEncryptionException(\"Encryption failed\")\n\n return result\n\n def _run_lcpencrypt_locally(self, file_path, identifier, configuration):\n \"\"\"Runs lcpencrypt using a local binary\n\n :param file_path: File path to the book to be encrypted\n :type file_path: string\n\n :param identifier: Book's identifier\n :type identifier: string\n\n :param configuration: LCPEncryptionConfiguration instance\n :type configuration: instance\n\n :return: Encryption result\n :rtype: LCPEncryptionResult\n \"\"\"\n self._logger.info(\n \"Started running a local lcpencrypt binary. File path: {0}. Identifier: {1}\".format(\n file_path, identifier\n )\n )\n\n parameters = LCPEncryptor.Parameters(file_path, identifier, configuration)\n\n try:\n if parameters.output_file_path:\n self._logger.info(\n \"Creating a directory tree for {0}\".format(\n parameters.output_file_path\n )\n )\n\n output_directory = os.path.dirname(parameters.output_file_path)\n\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\n self._logger.info(\n \"Directory tree {0} has been successfully created\".format(\n output_directory\n )\n )\n\n self._logger.info(\n \"Running lcpencrypt using the following parameters: {0}\".format(\n parameters.to_array()\n )\n )\n\n output = subprocess.check_output(parameters.to_array())\n result = self._parse_output(output)\n except Exception as exception:\n self._logger.exception(\n \"An unhandled exception occurred during running a local lcpencrypt binary\"\n )\n\n raise LCPEncryptionException(str(exception), inner_exception=exception)\n\n self._logger.info(\n \"Finished running a local lcpencrypt binary. File path: {0}. Identifier: {1}. Result: {2}\".format(\n file_path, identifier, result\n )\n )\n\n return result\n\n def encrypt(self, db, file_path, identifier):\n \"\"\"Encrypts a book\n\n :param db: Database session\n :type db: sqlalchemy.orm.session.Session\n\n :param file_path: File path to the book to be encrypted\n :type file_path: string\n\n :param identifier: Book's identifier\n :type identifier: string\n\n :return: Encryption result\n :rtype: LCPEncryptionResult\n \"\"\"\n with self._configuration_factory.create(\n self._configuration_storage, db, LCPEncryptionConfiguration\n ) as configuration:\n if self._lcpencrypt_exists_locally(configuration):\n result = self._run_lcpencrypt_locally(\n file_path, identifier, configuration\n )\n\n return result\n else:\n raise NotImplementedError()\n", "id": "5596441", "language": "Python", "matching_score": 2.2481637001037598, "max_stars_count": 0, "path": "api/lcp/encrypt.py" }, { "content": "from core.lcp.exceptions import LCPError\n\n\ndef format_datetime(datetime_value):\n \"\"\"Converts a datetime value into a string using the format which Go understands\n\n :param datetime_value: Datetime value\n :type datetime_value: datetime.datetime\n\n :return: String representation of the datetime value\n :rtype: string\n \"\"\"\n datetime_string_value = datetime_value.strftime(\"%Y-%m-%dT%H:%M:%S\")\n\n # NOTE: Go can parse only strings where the timezone contains a colon (e.g., -07:00)\n # Unfortunately, Python doesn't support such format and we have to do it manually\n # We assume that all the dates are in UTC\n datetime_string_value += \"+00:00\"\n\n return datetime_string_value\n\n\ndef get_target_extension(input_extension):\n if input_extension == \".epub\":\n target_extension = \".epub\"\n elif input_extension == \".pdf\":\n target_extension = \".lcpdf\"\n elif input_extension == \".lpf\":\n target_extension = \".audiobook\"\n elif input_extension == \".audiobook\":\n target_extension = \".audiobook\"\n else:\n raise LCPError('Unknown extension \"{0}\"'.format(input_extension))\n\n return target_extension\n\n\ndef bind_method(instance, func, as_name=None):\n \"\"\"Bind the function *func* to *instance*, with either provided name *as_name*\n or the existing name of *func*. The provided *func* should accept the\n instance as the first argument, i.e. \"self\".\n \"\"\"\n if as_name is None:\n as_name = func.__name__\n\n bound_method = func.__get__(instance, instance.__class__)\n setattr(instance, as_name, bound_method)\n\n return bound_method\n", "id": "2452655", "language": "Python", "matching_score": 1.6533697843551636, "max_stars_count": 0, "path": "api/lcp/utils.py" }, { "content": "import datetime\n\nimport pytest\nimport pytz\nfrom parameterized import parameterized\n\nfrom core.util.datetime_helpers import (\n datetime_utc,\n from_timestamp,\n strptime_utc,\n to_utc,\n utc_now,\n)\n\n\nclass TestDatetimeUTC(object):\n @parameterized.expand(\n [\n ([2021, 1, 1], \"2021-01-01T00:00:00\", \"2021-01-01T00:00:00+00:00\"),\n ([1955, 11, 5, 12], \"1955-11-05T12:00:00\", \"1955-11-05T12:00:00+00:00\"),\n ([2015, 10, 21, 4, 29], \"2015-10-21T04:29:00\", \"2015-10-21T04:29:00+00:00\"),\n (\n [2015, 5, 9, 9, 30, 15],\n \"2015-05-09T09:30:15\",\n \"2015-05-09T09:30:15+00:00\",\n ),\n ]\n )\n def test_datetime_utc(self, time, formatted, isoformat):\n \"\"\"`datetime_utc` is a wrapper around `datetime.datetime` but it also\n includes UTC information when it is created.\n \"\"\"\n time_format = \"%Y-%m-%dT%H:%M:%S\"\n dt = datetime.datetime(*time, tzinfo=pytz.UTC)\n util_dt = datetime_utc(*time)\n\n # The util function is the same as the datetime function with\n # pytz UTC information.\n assert dt == util_dt\n # A datetime object is returned and works like any datetime object.\n assert util_dt.tzinfo == pytz.UTC\n assert util_dt.strftime(time_format) == formatted\n assert util_dt.isoformat() == isoformat\n assert util_dt.year == time[0]\n assert util_dt.month == time[1]\n assert util_dt.day == time[2]\n\n\nclass TestFromTimestamp(object):\n def test_from_timestamp(self):\n \"\"\"`from_timestamp` is a wrapper around `datetime.fromtimestamp`\n that also includes UTC information.\n \"\"\"\n ts = 0\n datetime_from_ts = datetime.datetime.fromtimestamp(ts, tz=pytz.UTC)\n util_from_ts = from_timestamp(ts)\n\n # The util function returns the right datetime object from a timestamp.\n assert datetime_from_ts == util_from_ts\n assert datetime_from_ts.strftime(\"%Y-%m-%d\") == \"1970-01-01\"\n assert util_from_ts.strftime(\"%Y-%m-%d\") == \"1970-01-01\"\n\n # The UTC information for this datetime object is the pytz UTC value.\n assert util_from_ts.tzinfo is not None\n assert util_from_ts.tzinfo == pytz.UTC\n\n\nclass TestUTCNow(object):\n def test_utc_now(self):\n \"\"\"`utc_now` is a wrapper around `datetime.now` but it also includes\n UTC information.\n \"\"\"\n datetime_now = datetime.datetime.now(tz=pytz.UTC)\n util_now = utc_now()\n\n # Same time but it's going to be off by a few milliseconds.\n assert (datetime_now - util_now).total_seconds() < 2\n\n # The UTC information for this datetime object is the pytz UTC value.\n assert util_now.tzinfo == pytz.UTC\n\n\nclass TestToUTC(object):\n def test_to_utc(self):\n # `utc` marks a naive datetime object as being UTC, or\n # converts a timezone-aware datetime object to UTC.\n d1 = datetime.datetime(2021, 1, 1)\n d2 = datetime.datetime.strptime(\"2020\", \"%Y\")\n\n assert d1.tzinfo is None\n assert d2.tzinfo is None\n\n d1_utc = to_utc(d1)\n d2_utc = to_utc(d2)\n\n # The wrapper function is the same as the `replace` function,\n # just less verbose.\n assert d1_utc == d1.replace(tzinfo=pytz.UTC)\n assert d2_utc == d2.replace(tzinfo=pytz.UTC)\n # The timezone information is from pytz UTC.\n assert d1_utc.tzinfo == pytz.UTC\n assert d2_utc.tzinfo == pytz.UTC\n\n # Passing in None gets you None.\n assert to_utc(None) == None\n\n # Passing in a datetime that's already UTC is a no-op.\n assert d1_utc == to_utc(d1_utc)\n\n # Passing in a datetime from some other timezone converts to the\n # same time in UTC.\n d1 = datetime.datetime(2021, 1, 1)\n d1_eastern = d1_utc.astimezone(pytz.timezone(\"US/Eastern\"))\n assert d1_utc == to_utc(d1_eastern)\n\n @parameterized.expand(\n [\n ([2021, 1, 1], \"2021-01-01\", \"%Y-%m-%d\"),\n ([1955, 11, 5, 12], \"1955-11-05T12:00:00\", \"%Y-%m-%dT%H:%M:%S\"),\n ]\n )\n def test_strptime_utc(self, expect, date_string, format):\n assert strptime_utc(date_string, format) == datetime_utc(*expect)\n\n def test_strptime_utc_error(self):\n # You can only use strptime_utc for time formats that don't\n # mention a timezone.\n with pytest.raises(ValueError) as excinfo:\n strptime_utc(\"2020-01-01T12:00:00+0300\", \"%Y-%m-%dT%H:%M:%S%z\")\n assert (\n \"Cannot use strptime_utc with timezone-aware format %Y-%m-%dT%H:%M:%S%z\"\n in str(excinfo.value)\n )\n", "id": "5756089", "language": "Python", "matching_score": 0.8489443063735962, "max_stars_count": 0, "path": "tests/core/util/test_datetime_helpers.py" }, { "content": "from core.model import Work\nfrom core.testing import DatabaseTest\n\n\nclass TestAppealAssignment(DatabaseTest):\n def test_assign_appeals(self):\n work = self._work()\n work.assign_appeals(0.50, 0.25, 0.20, 0.05)\n assert 0.50 == work.appeal_character\n assert 0.25 == work.appeal_language\n assert 0.20 == work.appeal_setting\n assert 0.05 == work.appeal_story\n assert Work.CHARACTER_APPEAL == work.primary_appeal\n assert Work.LANGUAGE_APPEAL == work.secondary_appeal\n\n # Increase the cutoff point so that there is no secondary appeal.\n work.assign_appeals(0.50, 0.25, 0.20, 0.05, cutoff=0.30)\n assert 0.50 == work.appeal_character\n assert 0.25 == work.appeal_language\n assert 0.20 == work.appeal_setting\n assert 0.05 == work.appeal_story\n assert Work.CHARACTER_APPEAL == work.primary_appeal\n assert Work.NO_APPEAL == work.secondary_appeal\n", "id": "9530902", "language": "Python", "matching_score": 0.10404902696609497, "max_stars_count": 0, "path": "tests/core/models/test_appeal.py" }, { "content": "# encoding: utf-8\n\nimport pytest\nfrom psycopg2.extras import NumericRange\nfrom sqlalchemy import not_\nfrom sqlalchemy.orm.exc import MultipleResultsFound\n\nfrom core.config import Configuration\nfrom core.model import (\n Edition,\n SessionManager,\n Timestamp,\n get_one,\n numericrange_to_tuple,\n tuple_to_numericrange,\n)\nfrom core.testing import DatabaseTest\n\n\nclass TestDatabaseInterface(DatabaseTest):\n def test_get_one(self):\n\n # When a matching object isn't found, None is returned.\n result = get_one(self._db, Edition)\n assert None == result\n\n # When a single item is found, it is returned.\n edition = self._edition()\n result = get_one(self._db, Edition)\n assert edition == result\n\n # When multiple items are found, an error is raised.\n other_edition = self._edition()\n pytest.raises(MultipleResultsFound, get_one, self._db, Edition)\n\n # Unless they're interchangeable.\n result = get_one(self._db, Edition, on_multiple=\"interchangeable\")\n assert result in self._db.query(Edition)\n\n # Or specific attributes are passed that limit the results to one.\n result = get_one(\n self._db, Edition, title=other_edition.title, author=other_edition.author\n )\n assert other_edition == result\n\n # A particular constraint clause can also be passed in.\n titles = [ed.title for ed in (edition, other_edition)]\n constraint = not_(Edition.title.in_(titles))\n result = get_one(self._db, Edition, constraint=constraint)\n assert None == result\n\n def test_initialize_data_does_not_reset_timestamp(self):\n # initialize_data() has already been called, so the database is\n # initialized and the 'site configuration changed' Timestamp has\n # been set. Calling initialize_data() again won't change the\n # date on the timestamp.\n timestamp = get_one(\n self._db,\n Timestamp,\n collection=None,\n service=Configuration.SITE_CONFIGURATION_CHANGED,\n )\n old_timestamp = timestamp.finish\n SessionManager.initialize_data(self._db)\n assert old_timestamp == timestamp.finish\n\n\nclass TestNumericRangeConversion(object):\n \"\"\"Test the helper functions that convert between tuples and NumericRange\n objects.\n \"\"\"\n\n def test_tuple_to_numericrange(self):\n f = tuple_to_numericrange\n assert None == f(None)\n\n one_to_ten = f((1, 10))\n assert isinstance(one_to_ten, NumericRange)\n assert 1 == one_to_ten.lower\n assert 10 == one_to_ten.upper\n assert True == one_to_ten.upper_inc\n\n up_to_ten = f((None, 10))\n assert isinstance(up_to_ten, NumericRange)\n assert None == up_to_ten.lower\n assert 10 == up_to_ten.upper\n assert True == up_to_ten.upper_inc\n\n ten_and_up = f((10, None))\n assert isinstance(ten_and_up, NumericRange)\n assert 10 == ten_and_up.lower\n assert None == ten_and_up.upper\n assert False == ten_and_up.upper_inc\n\n def test_numericrange_to_tuple(self):\n m = numericrange_to_tuple\n two_to_six_inclusive = NumericRange(2, 6, \"[]\")\n assert (2, 6) == m(two_to_six_inclusive)\n two_to_six_exclusive = NumericRange(2, 6, \"()\")\n assert (3, 5) == m(two_to_six_exclusive)\n", "id": "2865239", "language": "Python", "matching_score": 0.7847467660903931, "max_stars_count": 0, "path": "tests/core/models/test_model.py" }, { "content": "# Pull in the session_fixture defined in core/testing.py\n# which does the database setup and initialization\npytest_plugins = [\"core.testing\"]\n", "id": "6566635", "language": "Python", "matching_score": 0.005693537648767233, "max_stars_count": 6, "path": "tests/core/conftest.py" }, { "content": "# encoding: utf-8\nfrom core.model import get_one_or_create\nfrom core.model.contributor import Contributor\nfrom core.model.datasource import DataSource\nfrom core.model.edition import Edition\nfrom core.model.identifier import Identifier\nfrom core.testing import DatabaseTest\n\n\nclass TestContributor(DatabaseTest):\n def test_marc_code_for_every_role_constant(self):\n \"\"\"We have determined the MARC Role Code for every role\n that's important enough we gave it a constant in the Contributor\n class.\n \"\"\"\n for constant, value in list(Contributor.__dict__.items()):\n if not constant.endswith(\"_ROLE\"):\n # Not a constant.\n continue\n assert value in Contributor.MARC_ROLE_CODES\n\n def test_lookup_by_viaf(self):\n\n # Two contributors named Bob.\n bob1, new = Contributor.lookup(self._db, sort_name=\"Bob\", viaf=\"foo\")\n bob2, new = Contributor.lookup(self._db, sort_name=\"Bob\", viaf=\"bar\")\n\n assert bob1 != bob2\n\n assert (bob1, False) == Contributor.lookup(self._db, viaf=\"foo\")\n\n def test_lookup_by_lc(self):\n\n # Two contributors named Bob.\n bob1, new = Contributor.lookup(self._db, sort_name=\"Bob\", lc=\"foo\")\n bob2, new = Contributor.lookup(self._db, sort_name=\"Bob\", lc=\"bar\")\n\n assert bob1 != bob2\n\n assert (bob1, False) == Contributor.lookup(self._db, lc=\"foo\")\n\n def test_lookup_by_viaf_interchangeable(self):\n # Two contributors with the same lc. This shouldn't happen, but\n # the reason it shouldn't happen is these two people are the same\n # person, so lookup() should just pick one and go with it.\n bob1, new = self._contributor(sort_name=\"Bob\", lc=\"foo\")\n bob2, new = self._contributor()\n bob2.sort_name = \"Bob\"\n bob2.lc = \"foo\"\n self._db.commit()\n assert bob1 != bob2\n [some_bob], new = Contributor.lookup(self._db, sort_name=\"Bob\", lc=\"foo\")\n assert False == new\n assert some_bob in (bob1, bob2)\n\n def test_lookup_by_name(self):\n\n # Two contributors named Bob.\n bob1, new = Contributor.lookup(self._db, sort_name=\"Bob\", lc=\"foo\")\n bob2, new = Contributor.lookup(self._db, sort_name=\"Bob\", lc=\"bar\")\n\n # Lookup by name finds both of them.\n bobs, new = Contributor.lookup(self._db, sort_name=\"Bob\")\n assert False == new\n assert [\"Bob\", \"Bob\"] == [x.sort_name for x in bobs]\n\n def test_create_by_lookup(self):\n [bob1], new = Contributor.lookup(self._db, sort_name=\"Bob\")\n assert \"Bob\" == bob1.sort_name\n assert True == new\n\n [bob2], new = Contributor.lookup(self._db, sort_name=\"Bob\")\n assert bob1 == bob2\n assert False == new\n\n def test_merge(self):\n\n # Here's Robert.\n [robert], ignore = Contributor.lookup(self._db, sort_name=\"Robert\")\n\n # Here's Bob.\n [bob], ignore = Contributor.lookup(self._db, sort_name=\"<NAME>\")\n bob.extra[\"foo\"] = \"bar\"\n bob.aliases = [\"Bobby\"]\n bob.viaf = \"viaf\"\n bob.lc = \"lc\"\n bob.display_name = \"<NAME>\"\n bob.family_name = \"Bobb\"\n bob.wikipedia_name = \"Bob_(Person)\"\n\n # Each is a contributor to a Edition.\n data_source = DataSource.lookup(self._db, DataSource.GUTENBERG)\n\n roberts_book, ignore = Edition.for_foreign_id(\n self._db, data_source, Identifier.GUTENBERG_ID, \"1\"\n )\n roberts_book.add_contributor(robert, Contributor.AUTHOR_ROLE)\n\n bobs_book, ignore = Edition.for_foreign_id(\n self._db, data_source, Identifier.GUTENBERG_ID, \"10\"\n )\n bobs_book.add_contributor(bob, Contributor.AUTHOR_ROLE)\n\n # In a shocking turn of events, it transpires that \"Bob\" and\n # \"Robert\" are the same person. We merge \"Bob\" into Robert\n # thusly:\n bob.merge_into(robert)\n\n # 'Bob' is now listed as an alias for Robert, as is Bob's\n # alias.\n assert [\"<NAME>\", \"Bobby\"] == robert.aliases\n\n # The extra information associated with Bob is now associated\n # with Robert.\n assert \"bar\" == robert.extra[\"foo\"]\n\n assert \"viaf\" == robert.viaf\n assert \"lc\" == robert.lc\n assert \"Bobb\" == robert.family_name\n assert \"<NAME>\" == robert.display_name\n assert \"Robert\" == robert.sort_name\n assert \"Bob_(Person)\" == robert.wikipedia_name\n\n # The standalone 'Bob' record has been removed from the database.\n assert (\n []\n == self._db.query(Contributor).filter(Contributor.sort_name == \"Bob\").all()\n )\n\n # Bob's book is now associated with 'Robert', not the standalone\n # 'Bob' record.\n assert [robert] == bobs_book.author_contributors\n\n # confirm the sort_name is propagated, if not already set in the destination contributor\n robert.sort_name = None\n [bob], ignore = Contributor.lookup(self._db, sort_name=\"<NAME>\")\n bob.merge_into(robert)\n assert \"<NAME>\" == robert.sort_name\n\n def _names(self, in_name, out_family, out_display, default_display_name=None):\n f, d = Contributor._default_names(in_name, default_display_name)\n assert f == out_family\n assert d == out_display\n\n def test_default_names(self):\n\n # Pass in a default display name and it will always be used.\n self._names(\n \"<NAME>\", \"Jones\", \"<NAME>\", default_display_name=\"<NAME>\"\n )\n\n # Corporate names are untouched and get no family name.\n self._names(\"Bob's Books.\", None, \"Bob's Books.\")\n self._names(\"Bob's Books, Inc.\", None, \"<NAME>, Inc.\")\n self._names(\"<NAME> &amp; Co.\", None, \"Little, Brown & Co.\")\n self._names(\n \"Philadelphia Broad Street Church (Philadelphia, Pa.)\",\n None,\n \"Philadelphia Broad Street Church\",\n )\n\n # Dates and other gibberish after a name is removed.\n self._names(\"<NAME>, 1855-1910\", \"Twain\", \"<NAME>\")\n self._names(\"<NAME>, ???-1910\", \"Twain\", \"<NAME>\")\n self._names(\"<NAME>, circ. 1900\", \"Twain\", \"<NAME>\")\n self._names(\"<NAME>, !@#!@\", \"Twain\", \"<NAME>\")\n self._names(\"<NAME>. 1842?-1928\", \"Coolbrith\", \"<NAME>\")\n self._names(\"<NAME>, 1st cent.\", \"Caesar\", \"<NAME>\")\n self._names(\"Arrian, 2nd cent.\", \"Arrian\", \"Arrian\")\n self._names(\"Hafiz, 14th cent.\", \"Hafiz\", \"Hafiz\")\n self._names(\"Hormel, Bob 1950?-\", \"Hormel\", \"<NAME>\")\n self._names(\n \"<NAME> 1583-1650? Mon<NAME>ul<NAME>\",\n \"Holland\",\n \"<NAME>\",\n )\n\n # Suffixes stay on the end, except for \"Mrs.\", which goes\n # to the front.\n self._names(\"<NAME>r.\", \"Twain\", \"<NAME>, Jr.\")\n self._names(\"House, Gregory, M.D.\", \"House\", \"Gregory House, M.D.\")\n self._names(\"<NAME>, Mrs.\", \"Twain\", \"Mrs. <NAME>\")\n self._names(\"<NAME>rs\", \"Twain\", \"Mrs <NAME>\")\n\n # The easy case.\n self._names(\"<NAME>\", \"Twain\", \"<NAME>\")\n self._names(\"<NAME>.\", \"Geering\", \"<NAME>\")\n\n def test_sort_name(self):\n bob, new = get_one_or_create(self._db, Contributor, sort_name=None)\n assert None == bob.sort_name\n\n bob, ignore = self._contributor(sort_name=\"<NAME>\")\n bob.sort_name = None\n assert None == bob.sort_name\n\n bob, ignore = self._contributor(sort_name=\"<NAME>\")\n assert \"Bitshifter, Bob\" == bob.sort_name\n\n bob, ignore = self._contributor(sort_name=\"Bitshifter, Bob\")\n assert \"Bitshifter, Bob\" == bob.sort_name\n\n # test that human name parser doesn't die badly on foreign names\n bob, ignore = self._contributor(sort_name=\"Боб Битшифтер\")\n assert \"Битшифтер, Боб\" == bob.sort_name\n", "id": "4120356", "language": "Python", "matching_score": 1.5514270067214966, "max_stars_count": 0, "path": "tests/core/models/test_contributor.py" }, { "content": "from io import BytesIO\n\nimport unicodecsv as csv\nfrom sqlalchemy.sql import func, select\nfrom sqlalchemy.sql.expression import and_, case, join, literal_column, or_\n\nfrom core.model import (\n CirculationEvent,\n Edition,\n Genre,\n Identifier,\n LicensePool,\n Work,\n WorkGenre,\n)\n\n\nclass LocalAnalyticsExporter(object):\n \"\"\"Export large numbers of analytics events in CSV format.\"\"\"\n\n def export(self, _db, start, end, locations=None, library=None):\n\n # Get the results from the database.\n query = self.analytics_query(start, end, locations, library)\n results = _db.execute(query)\n\n # Write the CSV file to a BytesIO.\n header = [\n \"time\",\n \"event\",\n \"identifier\",\n \"identifier_type\",\n \"title\",\n \"author\",\n \"fiction\",\n \"audience\",\n \"publisher\",\n \"imprint\",\n \"language\",\n \"target_age\",\n \"genres\",\n \"location\",\n ]\n output = BytesIO()\n writer = csv.writer(output, encoding=\"utf-8\")\n writer.writerow(header)\n writer.writerows(results)\n return output.getvalue().decode(\"utf-8\")\n\n def analytics_query(self, start, end, locations=None, library=None):\n \"\"\"Build a database query that fetches rows of analytics data.\n\n This method uses low-level SQLAlchemy code to do all\n calculations and data conversations in the database. It's\n modeled after Work.to_search_documents, which generates a\n large JSON document entirely in the database.\n\n :return: An iterator of results, each of which can be written\n directly to a CSV file.\n \"\"\"\n\n clauses = [\n CirculationEvent.start >= start,\n CirculationEvent.start < end,\n ]\n\n if locations:\n event_types = [\n CirculationEvent.CM_CHECKOUT,\n CirculationEvent.CM_FULFILL,\n CirculationEvent.OPEN_BOOK,\n ]\n locations = locations.strip().split(\",\")\n\n clauses += [\n CirculationEvent.type.in_(event_types),\n CirculationEvent.location.in_(locations),\n ]\n\n if library:\n clauses += [CirculationEvent.library == library]\n\n # Build the primary query. This is a query against the\n # CirculationEvent table and a few other tables joined against\n # it. This makes up the bulk of the data.\n events_alias = (\n select(\n [\n func.to_char(CirculationEvent.start, \"YYYY-MM-DD HH24:MI:SS\").label(\n \"start\"\n ),\n CirculationEvent.type.label(\"event_type\"),\n Identifier.identifier,\n Identifier.type.label(\"identifier_type\"),\n Edition.sort_title,\n Edition.sort_author,\n case(\n [(Work.fiction == True, literal_column(\"'fiction'\"))],\n else_=literal_column(\"'nonfiction'\"),\n ).label(\"fiction\"),\n Work.id.label(\"work_id\"),\n Work.audience,\n Edition.publisher,\n Edition.imprint,\n Edition.language,\n CirculationEvent.location,\n ],\n )\n .select_from(\n join(\n CirculationEvent,\n LicensePool,\n CirculationEvent.license_pool_id == LicensePool.id,\n )\n .join(Identifier, LicensePool.identifier_id == Identifier.id)\n .join(Work, Work.id == LicensePool.work_id)\n .join(Edition, Work.presentation_edition_id == Edition.id)\n )\n .where(and_(*clauses))\n .order_by(CirculationEvent.start.asc())\n .alias(\"events_alias\")\n )\n\n # A subquery can hook into the main query by referencing its\n # 'work_id' field in its WHERE clause.\n work_id_column = literal_column(\n events_alias.name + \".\" + events_alias.c.work_id.name\n )\n\n # This subquery gets the names of a Work's genres as a single\n # comma-separated string.\n #\n\n # This Alias selects some number of rows, each containing one\n # string column (Genre.name). Genres with higher affinities with\n # this work go first.\n genres_alias = (\n select([Genre.name.label(\"genre_name\")])\n .select_from(join(WorkGenre, Genre, WorkGenre.genre_id == Genre.id))\n .where(WorkGenre.work_id == work_id_column)\n .order_by(WorkGenre.affinity.desc(), Genre.name)\n .alias(\"genres_subquery\")\n )\n\n # Use array_agg() to consolidate the rows into one row -- this\n # gives us a single value, an array of strings, for each\n # Work. Then use array_to_string to convert the array into a\n # single comma-separated string.\n genres = select(\n [func.array_to_string(func.array_agg(genres_alias.c.genre_name), \",\")]\n ).select_from(genres_alias)\n\n # This subquery gets the a Work's target age as a single string.\n #\n\n # This Alias selects two fields: the lower and upper bounds of\n # the Work's target age. This reuses code originally written\n # for Work.to_search_documents().\n target_age = Work.target_age_query(work_id_column).alias(\"target_age_subquery\")\n\n # Concatenate the lower and upper bounds with a dash in the\n # middle. If both lower and upper bound are empty, just give\n # the empty string. This simulates the behavior of\n # Work.target_age_string.\n target_age_string = select(\n [\n case(\n [\n (\n or_(target_age.c.lower != None, target_age.c.upper != None),\n func.concat(target_age.c.lower, \"-\", target_age.c.upper),\n )\n ],\n else_=literal_column(\"''\"),\n )\n ]\n ).select_from(target_age)\n\n # Build the main query out of the subqueries.\n events = events_alias.c\n query = select(\n [\n events.start,\n events.event_type,\n events.identifier,\n events.identifier_type,\n events.sort_title,\n events.sort_author,\n events.fiction,\n events.audience,\n events.publisher,\n events.imprint,\n events.language,\n target_age_string.label(\"target_age\"),\n genres.label(\"genres\"),\n events.location,\n ]\n ).select_from(events_alias)\n return query\n", "id": "5917075", "language": "Python", "matching_score": 1.5914324522018433, "max_stars_count": 0, "path": "api/local_analytics_exporter.py" }, { "content": "# encoding: utf-8\n\"\"\"Test functionality of util/flask_util.py.\"\"\"\n\nimport datetime\nimport time\nfrom wsgiref.handlers import format_date_time\n\nfrom flask import Response as FlaskResponse\n\nfrom core.util.datetime_helpers import utc_now\nfrom core.util.flask_util import OPDSEntryResponse, OPDSFeedResponse, Response\nfrom core.util.opds_writer import OPDSFeed\n\n\nclass TestResponse(object):\n def test_constructor(self):\n response = Response(\n \"content\",\n 401,\n dict(Header=\"value\"),\n \"mime/type\",\n \"content/type\",\n True,\n 1002,\n )\n assert 1002 == response.max_age\n assert isinstance(response, FlaskResponse)\n assert 401 == response.status_code\n assert \"content\" == str(response)\n assert True == response.direct_passthrough\n\n # Response.headers is tested in more detail below.\n headers = response.headers\n assert \"value\" == headers[\"Header\"]\n assert \"Cache-Control\" in headers\n assert \"Expires\" in headers\n\n def test_headers(self):\n # First, test cases where the response should be private and\n # not cached. These are the kinds of settings used for error\n # messages.\n def assert_not_cached(max_age):\n headers = Response(max_age=max_age).headers\n assert \"private, no-cache\" == headers[\"Cache-Control\"]\n assert \"Authorization\" == headers[\"Vary\"]\n assert \"Expires\" not in headers\n\n assert_not_cached(max_age=None)\n assert_not_cached(max_age=0)\n assert_not_cached(max_age=\"Not a number\")\n\n # Test the case where the response is public but should not be cached.\n headers = Response(max_age=0, private=False).headers\n assert \"public, no-cache\" == headers[\"Cache-Control\"]\n assert \"Vary\" not in headers\n\n # Test the case where the response is private but may be\n # cached privately.\n headers = Response(max_age=300, private=True).headers\n assert \"private, no-transform, max-age=300\" == headers[\"Cache-Control\"]\n assert \"Authorization\" == headers[\"Vary\"]\n\n # Test the case where the response is public and may be cached,\n # including by intermediaries.\n max_age = 60 * 60 * 24 * 12\n obj = Response(max_age=max_age)\n\n headers = obj.headers\n cc = headers[\"Cache-Control\"]\n assert cc == \"public, no-transform, max-age=1036800, s-maxage=518400\"\n\n # We expect the Expires header to look basically like this.\n expect_expires = utc_now() + datetime.timedelta(seconds=max_age)\n expect_expires_string = format_date_time(\n time.mktime(expect_expires.timetuple())\n )\n\n # We'll only check the date part of the Expires header, to\n # minimize the changes of spurious failures based on\n # unfortunate timing.\n expires = headers[\"Expires\"]\n assert expires[:17] == expect_expires_string[:17]\n\n # It's possible to have a response that is private but should\n # be cached. The feed of a patron's current loans is a good\n # example.\n response = Response(max_age=30, private=True)\n cache_control = response.headers[\"Cache-Control\"]\n assert \"private\" in cache_control\n assert \"max-age=30\" in cache_control\n assert \"Authorization\" == response.headers[\"Vary\"]\n\n def test_unicode(self):\n # You can easily convert a Response object to Unicode\n # for use in a test.\n obj = Response(\"some data\")\n assert \"some data\" == str(obj)\n\n\nclass TestOPDSFeedResponse(object):\n \"\"\"Test the OPDS feed-specific specialization of Response.\"\"\"\n\n def test_defaults(self):\n # OPDSFeedResponse provides reasonable defaults for\n # `mimetype` and `max_age`.\n c = OPDSFeedResponse\n\n use_defaults = c(\"a feed\")\n assert OPDSFeed.ACQUISITION_FEED_TYPE == use_defaults.content_type\n assert OPDSFeed.DEFAULT_MAX_AGE == use_defaults.max_age\n\n # Flask Response.mimetype is the same as content_type but\n # with parameters removed.\n assert OPDSFeed.ATOM_TYPE == use_defaults.mimetype\n\n # These defaults can be overridden.\n override_defaults = c(\n \"a feed\", 200, dict(Header=\"value\"), \"mime/type\", \"content/type\", True, 1002\n )\n assert 1002 == override_defaults.max_age\n\n # In Flask code, if mimetype and content_type conflict,\n # content_type takes precedence.\n assert \"content/type\" == override_defaults.content_type\n assert \"content/type\" == override_defaults.mimetype\n\n # A max_age of zero is retained, not replaced by the default.\n do_not_cache = c(max_age=0)\n assert 0 == do_not_cache.max_age\n\n\nclass TestOPDSEntryResponse(object):\n \"\"\"Test the OPDS entry-specific specialization of Response.\"\"\"\n\n def test_defaults(self):\n # OPDSEntryResponse provides a reasonable defaults for\n # `mimetype`.\n c = OPDSEntryResponse\n\n use_defaults = c(\"an entry\")\n assert OPDSFeed.ENTRY_TYPE == use_defaults.content_type\n\n # Flask Response.mimetype is the same as content_type but\n # with parameters removed.\n assert OPDSFeed.ATOM_TYPE == use_defaults.mimetype\n\n # These defaults can be overridden.\n override_defaults = c(\"an entry\", content_type=\"content/type\")\n assert \"content/type\" == override_defaults.content_type\n assert \"content/type\" == override_defaults.mimetype\n", "id": "2378200", "language": "Python", "matching_score": 1.6233181953430176, "max_stars_count": 0, "path": "tests/core/util/test_flask_util.py" }, { "content": "import logging\n\nimport flask\nimport pytest\nfrom flask import Response\nfrom werkzeug.exceptions import MethodNotAllowed\n\nfrom api import routes\nfrom api.controller import CirculationManager\nfrom api.routes import exception_handler\nfrom api.routes import h as error_handler_object\nfrom core.app_server import ErrorHandler\n\nfrom .test_controller import ControllerTest\n\n\nclass MockApp(object):\n \"\"\"Pretends to be a Flask application with a configured\n CirculationManager.\n \"\"\"\n\n def __init__(self):\n self.manager = MockManager()\n\n\nclass MockManager(object):\n \"\"\"Pretends to be a CirculationManager with configured controllers.\"\"\"\n\n def __init__(self):\n self._cache = {}\n\n # This is used by the allows_patron_web annotator.\n self.patron_web_domains = set([\"http://patron/web\"])\n\n def __getattr__(self, controller_name):\n return self._cache.setdefault(controller_name, MockController(controller_name))\n\n\nclass MockControllerMethod(object):\n \"\"\"Pretends to be one of the methods of a controller class.\"\"\"\n\n def __init__(self, controller, name):\n \"\"\"Constructor.\n\n :param controller: A MockController.\n :param name: The name of this method.\n \"\"\"\n self.controller = controller\n self.name = name\n self.callable_name = name\n\n def __call__(self, *args, **kwargs):\n \"\"\"Simulate a successful method call.\n\n :return: A Response object, as required by Flask, with this\n method smuggled out as the 'method' attribute.\n \"\"\"\n self.args = args\n self.kwargs = kwargs\n response = Response(\"I called %s\" % repr(self), 200)\n response.method = self\n return response\n\n def __repr__(self):\n return \"<MockControllerMethod %s.%s>\" % (self.controller.name, self.name)\n\n\nclass MockController(MockControllerMethod):\n \"\"\"Pretends to be a controller.\n\n A controller has methods, but it may also be called _as_ a method,\n so this class subclasses MockControllerMethod.\n \"\"\"\n\n AUTHENTICATED_PATRON = \"i am a mock patron\"\n\n def __init__(self, name):\n \"\"\"Constructor.\n\n :param name: The name of the controller.\n \"\"\"\n self.name = name\n\n # If this controller were to be called as a method, the method\n # name would be __call__, not the name of the controller.\n self.callable_name = \"__call__\"\n\n self._cache = {}\n self.authenticated = False\n self.csrf_token = False\n self.authenticated_problem_detail = False\n\n def authenticated_patron_from_request(self):\n if self.authenticated:\n patron = object()\n flask.request.patron = self.AUTHENTICATED_PATRON\n return self.AUTHENTICATED_PATRON\n else:\n return Response(\n \"authenticated_patron_from_request called without authorizing\", 401\n )\n\n def __getattr__(self, method_name):\n \"\"\"Locate a method of this controller as a MockControllerMethod.\"\"\"\n return self._cache.setdefault(\n method_name, MockControllerMethod(self, method_name)\n )\n\n def __repr__(self):\n return \"<MockControllerMethod %s>\" % self.name\n\n\nclass RouteTestFixtures(object):\n def request(self, url, method=\"GET\"):\n \"\"\"Simulate a request to a URL without triggering any code outside\n routes.py.\n \"\"\"\n # Map an incoming URL to the name of a function within routes.py\n # and a set of arguments to the function.\n function_name, kwargs = self.resolver.match(url, method)\n # Locate the corresponding function in our mock app.\n mock_function = getattr(self.routes, function_name)\n\n # Call it in the context of the mock app.\n with self.app.test_request_context():\n return mock_function(**kwargs)\n\n def assert_request_calls(self, url, method, *args, **kwargs):\n \"\"\"Make a request to the given `url` and assert that\n the given controller `method` was called with the\n given `args` and `kwargs`.\n \"\"\"\n http_method = kwargs.pop(\"http_method\", \"GET\")\n response = self.request(url, http_method)\n assert response.method == method\n assert response.method.args == args\n assert response.method.kwargs == kwargs\n\n # Make sure the real controller has a method by the name of\n # the mock method that was called. We won't call it, because\n # it would slow down these tests dramatically, but we can make\n # sure it exists.\n if self.real_controller:\n real_method = getattr(self.real_controller, method.callable_name)\n\n # TODO: We could use inspect.getarcspec to verify that the\n # argument names line up with the variables passed in to\n # the mock method. This might remove the need to call the\n # mock method at all.\n\n def assert_request_calls_method_using_identifier(\n self, url, method, *args, **kwargs\n ):\n # Call an assertion method several times, using different\n # types of identifier in the URL, to make sure the identifier\n # is always passed through correctly.\n #\n # The url must contain the string '<identifier>' standing in\n # for the place where an identifier should be plugged in, and\n # the *args list must include the string '<identifier>'.\n authenticated = kwargs.pop(\"authenticated\", False)\n if authenticated:\n assertion_method = self.assert_authenticated_request_calls\n else:\n assertion_method = self.assert_request_calls\n assert \"<identifier>\" in url\n args = list(args)\n identifier_index = args.index(\"<identifier>\")\n for identifier in (\n \"<identifier>\",\n \"an/identifier/\",\n \"http://an-identifier/\",\n \"http://an-identifier\",\n ):\n modified_url = url.replace(\"<identifier>\", identifier)\n args[identifier_index] = identifier\n assertion_method(modified_url, method, *args, **kwargs)\n\n def assert_authenticated_request_calls(self, url, method, *args, **kwargs):\n \"\"\"First verify that an unauthenticated request fails. Then make an\n authenticated request to `url` and verify the results, as with\n assert_request_calls\n \"\"\"\n authentication_required = kwargs.pop(\"authentication_required\", True)\n\n http_method = kwargs.pop(\"http_method\", \"GET\")\n response = self.request(url, http_method)\n if authentication_required:\n assert 401 == response.status_code\n assert (\n \"authenticated_patron_from_request called without authorizing\"\n == response.get_data(as_text=True)\n )\n else:\n assert 200 == response.status_code\n\n # Set a variable so that authenticated_patron_from_request\n # will succeed, and try again.\n self.manager.index_controller.authenticated = True\n try:\n kwargs[\"http_method\"] = http_method\n self.assert_request_calls(url, method, *args, **kwargs)\n finally:\n # Un-set authentication for the benefit of future\n # assertions in this test function.\n self.manager.index_controller.authenticated = False\n\n def assert_supported_methods(self, url, *methods):\n \"\"\"Verify that the given HTTP `methods` are the only ones supported\n on the given `url`.\n \"\"\"\n # The simplest way to do this seems to be to try each of the\n # other potential methods and verify that MethodNotAllowed is\n # raised each time.\n check = set([\"GET\", \"POST\", \"PUT\", \"DELETE\"]) - set(methods)\n # Treat HEAD specially. Any controller that supports GET\n # automatically supports HEAD. So we only assert that HEAD\n # fails if the method supports neither GET nor HEAD.\n if \"GET\" not in methods and \"HEAD\" not in methods:\n check.add(\"HEAD\")\n for method in check:\n logging.debug(\"MethodNotAllowed should be raised on %s\", method)\n pytest.raises(MethodNotAllowed, self.request, url, method)\n logging.debug(\"And it was.\")\n\n\nclass RouteTest(ControllerTest, RouteTestFixtures):\n \"\"\"Test what happens when an HTTP request is run through the\n routes we've registered with Flask.\n \"\"\"\n\n # The first time setup_method() is called, it will instantiate a real\n # CirculationManager object and store it in REAL_CIRCULATION_MANAGER.\n # We only do this once because it takes about a second to instantiate\n # this object. Calling any of this object's methods could be problematic,\n # since it's probably left over from a previous test, but we won't be\n # calling any methods -- we just want to verify the _existence_,\n # in a real CirculationManager, of the methods called in\n # routes.py.\n @classmethod\n def setup_class(cls):\n super(RouteTest, cls).setup_class()\n cls.REAL_CIRCULATION_MANAGER = None\n\n def setup_method(self):\n self.setup_circulation_manager = False\n super(RouteTest, self).setup_method()\n if not self.REAL_CIRCULATION_MANAGER:\n library = self._default_library\n # Set up the necessary configuration so that when we\n # instantiate the CirculationManager it gets an\n # adobe_vendor_id controller -- this wouldn't normally\n # happen because most circulation managers don't need such a\n # controller.\n self.initialize_adobe(library, [library])\n self.adobe_vendor_id.password = <PASSWORD>\n manager = CirculationManager(self._db, testing=True)\n self.REAL_CIRCULATION_MANAGER = manager\n app = MockApp()\n self.routes = routes\n self.manager = app.manager\n self.original_app = self.routes.app\n self.resolver = self.original_app.url_map.bind(\"\", \"/\")\n\n # For convenience, set self.controller to a specific controller\n # whose routes are being tested.\n controller_name = getattr(self, \"CONTROLLER_NAME\", None)\n if controller_name:\n self.controller = getattr(self.manager, controller_name)\n\n # Make sure there's a controller by this name in the real\n # CirculationManager.\n self.real_controller = getattr(\n self.REAL_CIRCULATION_MANAGER, controller_name\n )\n else:\n self.real_controller = None\n\n self.routes.app = app\n\n def teardown_method(self):\n super(RouteTest, self).teardown_method()\n self.routes.app = self.original_app\n\n\nclass TestAppConfiguration(object):\n\n # Test the configuration of the real Flask app.\n def test_configuration(self):\n assert False == routes.app.url_map.merge_slashes\n\n\nclass TestIndex(RouteTest):\n\n CONTROLLER_NAME = \"index_controller\"\n\n def test_index(self):\n for url in \"/\", \"\":\n self.assert_request_calls(url, self.controller)\n\n def test_authentication_document(self):\n url = \"/authentication_document\"\n self.assert_request_calls(url, self.controller.authentication_document)\n\n def test_public_key_document(self):\n url = \"/public_key_document\"\n self.assert_request_calls(url, self.controller.public_key_document)\n\n\nclass TestOPDSFeed(RouteTest):\n\n CONTROLLER_NAME = \"opds_feeds\"\n\n def test_acquisition_groups(self):\n # An incoming lane identifier is passed in to the groups()\n # method.\n method = self.controller.groups\n self.assert_request_calls(\"/groups\", method, None)\n self.assert_request_calls(\n \"/groups/<lane_identifier>\", method, \"<lane_identifier>\"\n )\n\n def test_feed(self):\n # An incoming lane identifier is passed in to the feed()\n # method.\n url = \"/feed\"\n self.assert_request_calls(url, self.controller.feed, None)\n url = \"/feed/<lane_identifier>\"\n self.assert_request_calls(url, self.controller.feed, \"<lane_identifier>\")\n\n def test_navigation_feed(self):\n # An incoming lane identifier is passed in to the navigation_feed()\n # method.\n url = \"/navigation\"\n self.assert_request_calls(url, self.controller.navigation, None)\n url = \"/navigation/<lane_identifier>\"\n self.assert_request_calls(url, self.controller.navigation, \"<lane_identifier>\")\n\n def test_crawlable_library_feed(self):\n url = \"/crawlable\"\n self.assert_request_calls(url, self.controller.crawlable_library_feed)\n\n def test_crawlable_list_feed(self):\n url = \"/lists/<list_name>/crawlable\"\n self.assert_request_calls(\n url, self.controller.crawlable_list_feed, \"<list_name>\"\n )\n\n def test_crawlable_collection_feed(self):\n url = \"/collections/<collection_name>/crawlable\"\n self.assert_request_calls(\n url, self.manager.opds_feeds.crawlable_collection_feed, \"<collection_name>\"\n )\n\n def test_lane_search(self):\n url = \"/search\"\n self.assert_request_calls(url, self.controller.search, None)\n\n url = \"/search/<lane_identifier>\"\n self.assert_request_calls(url, self.controller.search, \"<lane_identifier>\")\n\n def test_qa_feed(self):\n url = \"/feed/qa\"\n self.assert_authenticated_request_calls(url, self.controller.qa_feed)\n\n def test_qa_series_feed(self):\n url = \"/feed/qa/series\"\n self.assert_authenticated_request_calls(url, self.controller.qa_series_feed)\n\n\nclass TestMARCRecord(RouteTest):\n CONTROLLER_NAME = \"marc_records\"\n\n def test_marc_page(self):\n url = \"/marc\"\n self.assert_request_calls(url, self.controller.download_page)\n\n\nclass TestSharedCollection(RouteTest):\n\n CONTROLLER_NAME = \"shared_collection_controller\"\n\n def test_shared_collection_info(self):\n url = \"/collections/<collection_name>\"\n self.assert_request_calls(url, self.controller.info, \"<collection_name>\")\n\n def test_shared_collection_register(self):\n url = \"/collections/<collection_name>/register\"\n self.assert_request_calls(\n url, self.controller.register, \"<collection_name>\", http_method=\"POST\"\n )\n self.assert_supported_methods(url, \"POST\")\n\n def test_shared_collection_borrow_identifier(self):\n url = \"/collections/<collection_name>/<identifier_type>/<identifier>/borrow\"\n self.assert_request_calls_method_using_identifier(\n url,\n self.controller.borrow,\n \"<collection_name>\",\n \"<identifier_type>\",\n \"<identifier>\",\n None,\n )\n self.assert_supported_methods(url, \"GET\", \"POST\")\n\n def test_shared_collection_borrow_hold_id(self):\n url = \"/collections/<collection_name>/holds/<hold_id>/borrow\"\n self.assert_request_calls(\n url, self.controller.borrow, \"<collection_name>\", None, None, \"<hold_id>\"\n )\n self.assert_supported_methods(url, \"GET\", \"POST\")\n\n def test_shared_collection_loan_info(self):\n url = \"/collections/<collection_name>/loans/<loan_id>\"\n self.assert_request_calls(\n url, self.controller.loan_info, \"<collection_name>\", \"<loan_id>\"\n )\n\n def test_shared_collection_revoke_loan(self):\n url = \"/collections/<collection_name>/loans/<loan_id>/revoke\"\n self.assert_request_calls(\n url, self.controller.revoke_loan, \"<collection_name>\", \"<loan_id>\"\n )\n\n def test_shared_collection_fulfill_no_mechanism(self):\n url = \"/collections/<collection_name>/loans/<loan_id>/fulfill\"\n self.assert_request_calls(\n url, self.controller.fulfill, \"<collection_name>\", \"<loan_id>\", None\n )\n\n def test_shared_collection_fulfill_with_mechanism(self):\n url = \"/collections/<collection_name>/loans/<loan_id>/fulfill/<mechanism_id>\"\n self.assert_request_calls(\n url,\n self.controller.fulfill,\n \"<collection_name>\",\n \"<loan_id>\",\n \"<mechanism_id>\",\n )\n\n def test_shared_collection_hold_info(self):\n url = \"/collections/<collection_name>/holds/<hold_id>\"\n self.assert_request_calls(\n url, self.controller.hold_info, \"<collection_name>\", \"<hold_id>\"\n )\n\n def test_shared_collection_revoke_hold(self):\n url = \"/collections/<collection_name>/holds/<hold_id>/revoke\"\n self.assert_request_calls(\n url, self.controller.revoke_hold, \"<collection_name>\", \"<hold_id>\"\n )\n\n\nclass TestProfileController(RouteTest):\n\n CONTROLLER_NAME = \"profiles\"\n\n def test_patron_profile(self):\n url = \"/patrons/me\"\n self.assert_authenticated_request_calls(\n url,\n self.controller.protocol,\n )\n\n\nclass TestLoansController(RouteTest):\n\n CONTROLLER_NAME = \"loans\"\n\n def test_active_loans(self):\n url = \"/loans\"\n self.assert_authenticated_request_calls(\n url,\n self.controller.sync,\n )\n self.assert_supported_methods(url, \"GET\", \"HEAD\")\n\n def test_borrow(self):\n url = \"/works/<identifier_type>/<identifier>/borrow\"\n self.assert_request_calls_method_using_identifier(\n url,\n self.controller.borrow,\n \"<identifier_type>\",\n \"<identifier>\",\n None,\n authenticated=True,\n )\n self.assert_supported_methods(url, \"GET\", \"PUT\")\n\n url = \"/works/<identifier_type>/<identifier>/borrow/<mechanism_id>\"\n self.assert_request_calls_method_using_identifier(\n url,\n self.controller.borrow,\n \"<identifier_type>\",\n \"<identifier>\",\n \"<mechanism_id>\",\n authenticated=True,\n )\n self.assert_supported_methods(url, \"GET\", \"PUT\")\n\n def test_fulfill(self):\n # fulfill does *not* require authentication, because this\n # controller is how a no-authentication library fulfills\n # open-access titles.\n url = \"/works/<license_pool_id>/fulfill\"\n self.assert_request_calls(\n url, self.controller.fulfill, \"<license_pool_id>\", None, None\n )\n\n url = \"/works/<license_pool_id>/fulfill/<mechanism_id>\"\n self.assert_request_calls(\n url, self.controller.fulfill, \"<license_pool_id>\", \"<mechanism_id>\", None\n )\n\n url = \"/works/<license_pool_id>/fulfill/<mechanism_id>/<part>\"\n self.assert_request_calls(\n url,\n self.controller.fulfill,\n \"<license_pool_id>\",\n \"<mechanism_id>\",\n \"<part>\",\n )\n\n def test_revoke_loan_or_hold(self):\n url = \"/loans/<license_pool_id>/revoke\"\n self.assert_authenticated_request_calls(\n url, self.controller.revoke, \"<license_pool_id>\"\n )\n\n # TODO: DELETE shouldn't be in here, but \"DELETE\n # /loans/<license_pool_id>/revoke\" is interpreted as an attempt\n # to match /loans/<identifier_type>/<path:identifier>, the\n # method tested directly below, which does support DELETE.\n self.assert_supported_methods(url, \"GET\", \"PUT\", \"DELETE\")\n\n def test_loan_or_hold_detail(self):\n url = \"/loans/<identifier_type>/<identifier>\"\n self.assert_request_calls_method_using_identifier(\n url,\n self.controller.detail,\n \"<identifier_type>\",\n \"<identifier>\",\n authenticated=True,\n )\n self.assert_supported_methods(url, \"GET\", \"DELETE\")\n\n\nclass TestAnnotationsController(RouteTest):\n\n CONTROLLER_NAME = \"annotations\"\n\n def test_annotations(self):\n url = \"/annotations/\"\n self.assert_authenticated_request_calls(url, self.controller.container)\n self.assert_supported_methods(url, \"HEAD\", \"GET\", \"POST\")\n\n def test_annotation_detail(self):\n url = \"/annotations/<annotation_id>\"\n self.assert_authenticated_request_calls(\n url, self.controller.detail, \"<annotation_id>\"\n )\n self.assert_supported_methods(url, \"HEAD\", \"GET\", \"DELETE\")\n\n def test_annotations_for_work(self):\n url = \"/annotations/<identifier_type>/<identifier>\"\n self.assert_request_calls_method_using_identifier(\n url,\n self.controller.container_for_work,\n \"<identifier_type>\",\n \"<identifier>\",\n authenticated=True,\n )\n self.assert_supported_methods(url, \"GET\")\n\n\nclass TestURNLookupController(RouteTest):\n\n CONTROLLER_NAME = \"urn_lookup\"\n\n def test_work(self):\n url = \"/works\"\n self.assert_request_calls(url, self.controller.work_lookup, \"work\")\n\n\nclass TestWorkController(RouteTest):\n\n CONTROLLER_NAME = \"work_controller\"\n\n def test_contributor(self):\n url = \"/works/contributor/<contributor_name>\"\n self.assert_request_calls(\n url, self.controller.contributor, \"<contributor_name>\", None, None\n )\n\n def test_contributor_language(self):\n url = \"/works/contributor/<contributor_name>/<languages>\"\n self.assert_request_calls(\n url, self.controller.contributor, \"<contributor_name>\", \"<languages>\", None\n )\n\n def test_contributor_language_audience(self):\n url = \"/works/contributor/<contributor_name>/<languages>/<audiences>\"\n self.assert_request_calls(\n url,\n self.controller.contributor,\n \"<contributor_name>\",\n \"<languages>\",\n \"<audiences>\",\n )\n\n def test_series(self):\n url = \"/works/series/<series_name>\"\n self.assert_request_calls(\n url, self.controller.series, \"<series_name>\", None, None\n )\n\n def test_series_language(self):\n url = \"/works/series/<series_name>/<languages>\"\n self.assert_request_calls(\n url, self.controller.series, \"<series_name>\", \"<languages>\", None\n )\n\n def test_series_language_audience(self):\n url = \"/works/series/<series_name>/<languages>/<audiences>\"\n self.assert_request_calls(\n url, self.controller.series, \"<series_name>\", \"<languages>\", \"<audiences>\"\n )\n\n def test_permalink(self):\n url = \"/works/<identifier_type>/<identifier>\"\n self.assert_request_calls_method_using_identifier(\n url, self.controller.permalink, \"<identifier_type>\", \"<identifier>\"\n )\n\n def test_recommendations(self):\n url = \"/works/<identifier_type>/<identifier>/recommendations\"\n self.assert_request_calls_method_using_identifier(\n url, self.controller.recommendations, \"<identifier_type>\", \"<identifier>\"\n )\n\n def test_related_books(self):\n url = \"/works/<identifier_type>/<identifier>/related_books\"\n self.assert_request_calls_method_using_identifier(\n url, self.controller.related, \"<identifier_type>\", \"<identifier>\"\n )\n\n def test_report(self):\n url = \"/works/<identifier_type>/<identifier>/report\"\n self.assert_request_calls_method_using_identifier(\n url,\n self.controller.report,\n \"<identifier_type>\",\n \"<identifier>\",\n )\n self.assert_supported_methods(url, \"GET\", \"POST\")\n\n\nclass TestAnalyticsController(RouteTest):\n CONTROLLER_NAME = \"analytics_controller\"\n\n def test_track_analytics_event(self):\n url = \"/analytics/<identifier_type>/<identifier>/<event_type>\"\n\n # This controller can be called either authenticated or\n # unauthenticated.\n self.assert_request_calls_method_using_identifier(\n url,\n self.controller.track_event,\n \"<identifier_type>\",\n \"<identifier>\",\n \"<event_type>\",\n authenticated=True,\n authentication_required=False,\n )\n\n\nclass TestAdobeVendorID(RouteTest):\n\n CONTROLLER_NAME = \"adobe_vendor_id\"\n\n def test_adobe_vendor_id_get_token(self):\n url = \"/AdobeAuth/authdata\"\n self.assert_authenticated_request_calls(\n url,\n self.controller.create_authdata_handler,\n self.controller.AUTHENTICATED_PATRON,\n )\n # TODO: test what happens when vendor ID is not configured.\n\n def test_adobe_vendor_id_signin(self):\n url = \"/AdobeAuth/SignIn\"\n self.assert_request_calls(\n url, self.controller.signin_handler, http_method=\"POST\"\n )\n self.assert_supported_methods(url, \"POST\")\n\n def test_adobe_vendor_id_accountinfo(self):\n url = \"/AdobeAuth/AccountInfo\"\n self.assert_request_calls(\n url, self.controller.userinfo_handler, http_method=\"POST\"\n )\n self.assert_supported_methods(url, \"POST\")\n\n def test_adobe_vendor_id_status(self):\n url = \"/AdobeAuth/Status\"\n self.assert_request_calls(\n url,\n self.controller.status_handler,\n )\n\n\nclass TestAdobeDeviceManagement(RouteTest):\n CONTROLLER_NAME = \"adobe_device_management\"\n\n def test_adobe_drm_devices(self):\n url = \"/AdobeAuth/devices\"\n self.assert_authenticated_request_calls(\n url, self.controller.device_id_list_handler\n )\n self.assert_supported_methods(url, \"GET\", \"POST\")\n\n def test_adobe_drm_device(self):\n url = \"/AdobeAuth/devices/<device_id>\"\n self.assert_authenticated_request_calls(\n url, self.controller.device_id_handler, \"<device_id>\", http_method=\"DELETE\"\n )\n self.assert_supported_methods(url, \"DELETE\")\n\n\nclass TestOAuthController(RouteTest):\n # TODO: We might be able to do a better job of checking that\n # flask.request.args are propagated through, instead of checking\n # an empty dict.\n CONTROLLER_NAME = \"oauth_controller\"\n\n def test_oauth_authenticate(self):\n url = \"/oauth_authenticate\"\n _db = self.manager._db\n self.assert_request_calls(\n url, self.controller.oauth_authentication_redirect, {}, _db\n )\n\n def test_oauth_callback(self):\n url = \"/oauth_callback\"\n _db = self.manager._db\n self.assert_request_calls(\n url, self.controller.oauth_authentication_callback, _db, {}\n )\n\n\nclass TestODLNotificationController(RouteTest):\n CONTROLLER_NAME = \"odl_notification_controller\"\n\n def test_odl_notify(self):\n url = \"/odl_notify/<loan_id>\"\n self.assert_request_calls(url, self.controller.notify, \"<loan_id>\")\n self.assert_supported_methods(url, \"GET\", \"POST\")\n\n\nclass TestHeartbeatController(RouteTest):\n CONTROLLER_NAME = \"heartbeat\"\n\n def test_heartbeat(self):\n url = \"/heartbeat\"\n self.assert_request_calls(url, self.controller.heartbeat)\n\n\nclass TestHealthCheck(RouteTest):\n # This code isn't in a controller, and it doesn't really do anything,\n # so we check that it returns a specific result.\n def test_health_check(self):\n response = self.request(\"/healthcheck.html\")\n assert 200 == response.status_code\n\n # This is how we know we actually called health_check() and\n # not a mock method -- the Response returned by the mock\n # system would have an explanatory message in its .data.\n assert \"\" == response.get_data(as_text=True)\n\n\nclass TestExceptionHandler(RouteTest):\n def test_exception_handling(self):\n # The exception handler deals with most exceptions by running them\n # through ErrorHandler.handle()\n assert isinstance(error_handler_object, ErrorHandler)\n\n # Temporarily replace the ErrorHandler used by the\n # exception_handler function -- this is what we imported as\n # error_handler_object.\n class MockErrorHandler(object):\n def handle(self, exception):\n self.handled = exception\n return Response(\"handled it\", 500)\n\n routes.h = MockErrorHandler()\n\n # Simulate a request that causes an unhandled exception.\n with self.app.test_request_context():\n value_error = ValueError()\n result = exception_handler(value_error)\n\n # The exception was passed into MockErrorHandler.handle.\n assert value_error == routes.h.handled\n\n # The Response is created was passed along.\n assert \"handled it\" == result.get_data(as_text=True)\n assert 500 == result.status_code\n\n # werkzeug HTTPExceptions are _not_ run through\n # handle(). werkzeug handles the conversion to a Response\n # object representing a more specific (and possibly even\n # non-error) HTTP response.\n with self.app.test_request_context():\n exception = MethodNotAllowed()\n response = exception_handler(exception)\n assert 405 == response.status_code\n\n # Restore the normal error handler.\n routes.h = error_handler_object\n", "id": "5923033", "language": "Python", "matching_score": 2.741502285003662, "max_stars_count": 0, "path": "tests/api/test_routes.py" }, { "content": "import base64\nimport json\n\nimport pytest\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom Crypto.PublicKey import RSA\n\nfrom api.circulation import FulfillmentInfo\nfrom api.circulation_exceptions import *\nfrom api.odl import ODLAPI\nfrom api.shared_collection import BaseSharedCollectionAPI, SharedCollectionAPI\nfrom core.config import CannotLoadConfiguration\nfrom core.model import (\n ConfigurationSetting,\n Hold,\n IntegrationClient,\n Loan,\n create,\n get_one,\n)\nfrom core.testing import DatabaseTest, MockRequestsResponse\n\n\nclass MockAPI(BaseSharedCollectionAPI):\n def __init__(self, _db, collection):\n self.checkouts = []\n self.returns = []\n self.fulfills = []\n self.holds = []\n self.released_holds = []\n self.fulfillment = None\n\n def checkout_to_external_library(self, client, pool, hold=None):\n self.checkouts.append((client, pool))\n\n def checkin_from_external_library(self, client, loan):\n self.returns.append((client, loan))\n\n def fulfill_for_external_library(self, client, loan, mechanism):\n self.fulfills.append((client, loan, mechanism))\n return self.fulfillment\n\n def release_hold_from_external_library(self, client, hold):\n self.released_holds.append((client, hold))\n\n\nclass TestSharedCollectionAPI(DatabaseTest):\n def setup_method(self):\n super(TestSharedCollectionAPI, self).setup_method()\n self.collection = self._collection(protocol=\"Mock\")\n self.shared_collection = SharedCollectionAPI(\n self._db, api_map={\"Mock\": MockAPI}\n )\n self.api = self.shared_collection.api(self.collection)\n ConfigurationSetting.for_externalintegration(\n BaseSharedCollectionAPI.EXTERNAL_LIBRARY_URLS,\n self.collection.external_integration,\n ).value = json.dumps([\"http://library.org\"])\n self.client, ignore = IntegrationClient.register(self._db, \"http://library.org\")\n edition, self.pool = self._edition(\n with_license_pool=True, collection=self.collection\n )\n [self.delivery_mechanism] = self.pool.delivery_mechanisms\n\n def test_initialization_exception(self):\n class MisconfiguredAPI(object):\n def __init__(self, _db, collection):\n raise CannotLoadConfiguration(\"doomed!\")\n\n api_map = {self._default_collection.protocol: MisconfiguredAPI}\n shared_collection = SharedCollectionAPI(self._db, api_map=api_map)\n # Although the SharedCollectionAPI was created, it has no functioning\n # APIs.\n assert {} == shared_collection.api_for_collection\n\n # Instead, the CannotLoadConfiguration exception raised by the\n # constructor has been stored in initialization_exceptions.\n e = shared_collection.initialization_exceptions[self._default_collection.id]\n assert isinstance(e, CannotLoadConfiguration)\n assert \"doomed!\" == str(e)\n\n def test_api_for_licensepool(self):\n collection = self._collection(protocol=ODLAPI.NAME)\n edition, pool = self._edition(with_license_pool=True, collection=collection)\n shared_collection = SharedCollectionAPI(self._db)\n assert isinstance(shared_collection.api_for_licensepool(pool), ODLAPI)\n\n def test_api_for_collection(self):\n collection = self._collection()\n shared_collection = SharedCollectionAPI(self._db)\n # The collection isn't a shared collection, so looking up its API\n # raises an exception.\n pytest.raises(CirculationException, shared_collection.api, collection)\n\n collection.protocol = ODLAPI.NAME\n shared_collection = SharedCollectionAPI(self._db)\n assert isinstance(shared_collection.api(collection), ODLAPI)\n\n def test_register(self):\n # An auth document URL is required to register.\n pytest.raises(\n InvalidInputException,\n self.shared_collection.register,\n self.collection,\n None,\n )\n\n # If the url doesn't return a valid auth document, there's an exception.\n auth_response = \"not json\"\n\n def do_get(*args, **kwargs):\n return MockRequestsResponse(200, content=auth_response)\n\n pytest.raises(\n RemoteInitiatedServerError,\n self.shared_collection.register,\n self.collection,\n \"http://library.org/auth\",\n do_get=do_get,\n )\n\n # The auth document also must have a link to the library's catalog.\n auth_response = json.dumps({\"links\": []})\n pytest.raises(\n RemoteInitiatedServerError,\n self.shared_collection.register,\n self.collection,\n \"http://library.org/auth\",\n do_get=do_get,\n )\n\n # If no external library URLs are configured, no one can register.\n auth_response = json.dumps(\n {\"links\": [{\"href\": \"http://library.org\", \"rel\": \"start\"}]}\n )\n ConfigurationSetting.for_externalintegration(\n BaseSharedCollectionAPI.EXTERNAL_LIBRARY_URLS,\n self.collection.external_integration,\n ).value = None\n pytest.raises(\n AuthorizationFailedException,\n self.shared_collection.register,\n self.collection,\n \"http://library.org/auth\",\n do_get=do_get,\n )\n\n # If the library's URL isn't in the configuration, it can't register.\n auth_response = json.dumps(\n {\"links\": [{\"href\": \"http://differentlibrary.org\", \"rel\": \"start\"}]}\n )\n ConfigurationSetting.for_externalintegration(\n BaseSharedCollectionAPI.EXTERNAL_LIBRARY_URLS,\n self.collection.external_integration,\n ).value = json.dumps([\"http://library.org\"])\n pytest.raises(\n AuthorizationFailedException,\n self.shared_collection.register,\n self.collection,\n \"http://differentlibrary.org/auth\",\n do_get=do_get,\n )\n\n # Or if the public key is missing from the auth document.\n auth_response = json.dumps(\n {\"links\": [{\"href\": \"http://library.org\", \"rel\": \"start\"}]}\n )\n pytest.raises(\n RemoteInitiatedServerError,\n self.shared_collection.register,\n self.collection,\n \"http://library.org/auth\",\n do_get=do_get,\n )\n\n auth_response = json.dumps(\n {\n \"public_key\": {\"type\": \"not RSA\", \"value\": \"123\"},\n \"links\": [{\"href\": \"http://library.org\", \"rel\": \"start\"}],\n }\n )\n pytest.raises(\n RemoteInitiatedServerError,\n self.shared_collection.register,\n self.collection,\n \"http://library.org/auth\",\n do_get=do_get,\n )\n\n auth_response = json.dumps(\n {\n \"public_key\": {\"type\": \"RSA\"},\n \"links\": [{\"href\": \"http://library.org\", \"rel\": \"start\"}],\n }\n )\n pytest.raises(\n RemoteInitiatedServerError,\n self.shared_collection.register,\n self.collection,\n \"http://library.org/auth\",\n do_get=do_get,\n )\n\n # Here's an auth document with a valid key.\n key = RSA.generate(2048)\n public_key = key.publickey().exportKey().decode(\"utf-8\")\n encryptor = PKCS1_OAEP.new(key)\n auth_response = json.dumps(\n {\n \"public_key\": {\"type\": \"RSA\", \"value\": public_key},\n \"links\": [{\"href\": \"http://library.org\", \"rel\": \"start\"}],\n }\n )\n response = self.shared_collection.register(\n self.collection, \"http://library.org/auth\", do_get=do_get\n )\n\n # An IntegrationClient has been created.\n client = get_one(\n self._db,\n IntegrationClient,\n url=IntegrationClient.normalize_url(\"http://library.org/\"),\n )\n decrypted_secret = encryptor.decrypt(\n base64.b64decode(response.get(\"metadata\", {}).get(\"shared_secret\"))\n )\n assert client.shared_secret == decrypted_secret.decode(\"utf-8\")\n\n def test_borrow(self):\n # This client is registered, but isn't one of the allowed URLs for the collection\n # (maybe it was registered for a different shared collection).\n other_client, ignore = IntegrationClient.register(\n self._db, \"http://other_library.org\"\n )\n\n # Trying to borrow raises an exception.\n pytest.raises(\n AuthorizationFailedException,\n self.shared_collection.borrow,\n self.collection,\n other_client,\n self.pool,\n )\n\n # A client that's registered with the collection can borrow.\n self.shared_collection.borrow(self.collection, self.client, self.pool)\n assert [(self.client, self.pool)] == self.api.checkouts\n\n # If the client's checking out an existing hold, the hold must be for that client.\n hold, ignore = create(\n self._db, Hold, integration_client=other_client, license_pool=self.pool\n )\n pytest.raises(\n CannotLoan,\n self.shared_collection.borrow,\n self.collection,\n self.client,\n self.pool,\n hold=hold,\n )\n\n hold.integration_client = self.client\n self.shared_collection.borrow(\n self.collection, self.client, self.pool, hold=hold\n )\n assert [(self.client, self.pool)] == self.api.checkouts[1:]\n\n def test_revoke_loan(self):\n other_client, ignore = IntegrationClient.register(\n self._db, \"http://other_library.org\"\n )\n loan, ignore = create(\n self._db, Loan, integration_client=other_client, license_pool=self.pool\n )\n pytest.raises(\n NotCheckedOut,\n self.shared_collection.revoke_loan,\n self.collection,\n self.client,\n loan,\n )\n\n loan.integration_client = self.client\n self.shared_collection.revoke_loan(self.collection, self.client, loan)\n assert [(self.client, loan)] == self.api.returns\n\n def test_fulfill(self):\n other_client, ignore = IntegrationClient.register(\n self._db, \"http://other_library.org\"\n )\n loan, ignore = create(\n self._db, Loan, integration_client=other_client, license_pool=self.pool\n )\n pytest.raises(\n CannotFulfill,\n self.shared_collection.fulfill,\n self.collection,\n self.client,\n loan,\n self.delivery_mechanism,\n )\n\n loan.integration_client = self.client\n\n # If the API does not return content or a content link, the loan can't be fulfilled.\n pytest.raises(\n CannotFulfill,\n self.shared_collection.fulfill,\n self.collection,\n self.client,\n loan,\n self.delivery_mechanism,\n )\n assert [(self.client, loan, self.delivery_mechanism)] == self.api.fulfills\n\n self.api.fulfillment = FulfillmentInfo(\n self.collection,\n self.pool.data_source.name,\n self.pool.identifier.type,\n self.pool.identifier.identifier,\n \"http://content\",\n \"text/html\",\n None,\n None,\n )\n fulfillment = self.shared_collection.fulfill(\n self.collection, self.client, loan, self.delivery_mechanism\n )\n assert [(self.client, loan, self.delivery_mechanism)] == self.api.fulfills[1:]\n assert self.delivery_mechanism == loan.fulfillment\n\n def test_revoke_hold(self):\n other_client, ignore = IntegrationClient.register(\n self._db, \"http://other_library.org\"\n )\n hold, ignore = create(\n self._db, Hold, integration_client=other_client, license_pool=self.pool\n )\n\n pytest.raises(\n CannotReleaseHold,\n self.shared_collection.revoke_hold,\n self.collection,\n self.client,\n hold,\n )\n\n hold.integration_client = self.client\n self.shared_collection.revoke_hold(self.collection, self.client, hold)\n assert [(self.client, hold)] == self.api.released_holds\n", "id": "1174478", "language": "Python", "matching_score": 3.061508893966675, "max_stars_count": 0, "path": "tests/api/test_shared_collection.py" }, { "content": "import os\nimport sys\n\nbin_dir = os.path.split(__file__)[0]\npackage_dir = os.path.join(bin_dir, \"..\")\nsys.path.append(os.path.abspath(package_dir))\n\nfrom axis import Axis360API\nfrom circulation_exceptions import *\nfrom overdrive import OverdriveAPI\nfrom threem import ThreeMAPI\n\nfrom circulation import CirculationAPI\nfrom core.model import Identifier, Patron, get_one_or_create, production_session\n\nbarcode, pin, borrow_urn, hold_urn = sys.argv[1:5]\nemail = os.environ.get(\n \"DEFAULT_NOTIFICATION_EMAIL_ADDRESS\", \"<EMAIL>\"\n)\n\n_db = production_session()\npatron, ignore = get_one_or_create(_db, Patron, authorization_identifier=barcode)\n\nborrow_identifier = Identifier.parse_urn(_db, borrow_urn, True)[0]\nhold_identifier = Identifier.parse_urn(_db, hold_urn, True)[0]\nborrow_pool = borrow_identifier.licensed_through\nhold_pool = hold_identifier.licensed_through\n\nif any(x.type == Identifier.THREEM_ID for x in [borrow_identifier, hold_identifier]):\n threem = ThreeMAPI(_db)\nelse:\n threem = None\n\nif any(x.type == Identifier.OVERDRIVE_ID for x in [borrow_identifier, hold_identifier]):\n overdrive = OverdriveAPI(_db)\nelse:\n overdrive = None\n\nif any(x.type == Identifier.AXIS_360_ID for x in [borrow_identifier, hold_identifier]):\n axis = Axis360API(_db)\nelse:\n axis = None\n\ncirculation = CirculationAPI(_db, overdrive=overdrive, threem=threem, axis=axis)\n\nactivity = circulation.patron_activity(patron, pin)\nprint(\"-\" * 80)\nfor i in activity:\n print(i)\nprint(\"-\" * 80)\n\nlicensepool = borrow_pool\nmechanism = licensepool.delivery_mechanisms[0]\ntry:\n circulation.fulfill(patron, pin, licensepool, mechanism)\nexcept NoActiveLoan as e:\n print(\" No active loan...\")\ncirculation.borrow(patron, pin, licensepool, mechanism, email)\nprint(\"Attempting to borrow\", licensepool.work)\nprint(\"Initial revoke loan\")\nprint(circulation.revoke_loan(patron, pin, licensepool))\nprint(\"Fulfill with no loan\")\ntry:\n circulation.fulfill(patron, pin, licensepool, mechanism)\nexcept NoActiveLoan as e:\n print(\" Exception as expected.\")\nprint(\"Borrow\")\nprint(circulation.borrow(patron, pin, licensepool, mechanism, email))\nprint(\"Borrow again!\")\nprint(circulation.borrow(patron, pin, licensepool, mechanism, email))\nprint(\"Fulfill with loan\")\nprint(circulation.fulfill(patron, pin, licensepool, mechanism))\n\n\nlicensepool = hold_pool\nprint(\"Attempting to place hold on\", licensepool.work)\nprint(\"Initial release hold\")\nprint(\"\", circulation.release_hold(patron, pin, licensepool))\nprint(\"Creating hold.\")\nprint(\"\", circulation.borrow(patron, pin, licensepool, mechanism, email))\nprint(\"Creating hold again!\")\ntry:\n print(circulation.borrow(patron, pin, licensepool, mechanism, email))\nexcept CannotLoan as e:\n print(\" Exception as expected.\")\nprint(\"Attempt to fulfill hold.\")\ntry:\n print(circulation.fulfill(patron, pin, licensepool, mechanism))\nexcept NoActiveLoan as e:\n print(\" Exception as expected\")\n\nactivity = circulation.patron_activity(patron, pin)\nprint(\"-\" * 80)\nfor i in activity:\n print(i)\nprint(\"-\" * 80)\n\nprint(\"Revoke loan\")\nprint(circulation.revoke_loan(patron, pin, licensepool))\nprint(\"Revoke already revoked loan\")\nprint(circulation.revoke_loan(patron, pin, licensepool))\n\nprint(\"Release hold.\")\nprint(circulation.release_hold(patron, pin, licensepool))\nprint(\"Release nonexistent hold.\")\nprint(circulation.release_hold(patron, pin, licensepool))\n", "id": "3242324", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "integration_tests/test_circulation.py" }, { "content": "import sys\n\nfrom api import app\n\nurl = None\nif len(sys.argv) > 1:\n url = sys.argv[1]\n\nif __name__ == \"__main__\":\n app.run(url)\n", "id": "4778826", "language": "Python", "matching_score": 0.048779651522636414, "max_stars_count": 0, "path": "app.py" } ]
2.525696
Cam-ille
[ { "content": "# aguest.py\nfrom dbbank import DB\n\nclass Customer:\n def __init__(self):\n self.menu_title = '投資人'\n self.account = ''\n self.menu = {\n 'a':'登入.註冊',\n 'b':'買進/賣出',\n 'c':'個人交易紀錄',\n 'q':'離開',\n }\n self.menu_func = {\n 'a': lambda c, s: self.login_or_enroll(c, s),\n 'b': lambda c, s: self.currency_buy_sell(c, s),\n 'c': lambda c, s: self.currency_transaction_record(c, s)\n }\n self.divider = '='*20\n\n def show_menu(self, account=''):\n \"\"\" 主選單\n \"\"\"\n print(self.divider)\n if self.account == '':\n print(self.menu_title, '尚未登入')\n else:\n print(self.menu_title, self.account)\n print(self.divider)\n for fid, fname in self.menu.items():\n print('%s:%s' % (fid, fname))\n print(self.divider)\n opt = input('請選擇: ').lower()\n if opt in self.menu.keys():\n return opt, self.menu[opt]\n else:\n return '', '無此功能!'\n\n def login_or_enroll(self, db, func_title):\n \"\"\" 登入.註冊\n \"\"\"\n account_input = input('請輸入帳號: ')\n if db.check_if_customer_enrolled(account_input):\n self.account = account_input\n print(db.get_customer_info(self.account))\n else:\n if db.insert_or_update_customer(account_input, 'insert'):\n print('註冊成功')\n\n def currency_buy_sell(self, db, func_title):\n \"\"\" 買進賣出\n \"\"\"\n import random\n while True:\n print('幣別 ',' 買入 ',' 賣出')\n currency=db.list_all_currency()\n opt=input('Which currency? (1.美元 2.港幣 3.歐元 4.日圓 5.人民幣 exit.離開):')\n if opt == 'exit':\n break\n else:\n currency_code = currency[int(opt)-1]\n buy=currency_code[2]\n sold=currency_code[1]\n buy_in=0\n sold_out=0\n choose=input('1.BUY IN 2.SOLD OUT:')\n balance = db.get_latest_balance(self.account, currency_code)\n print('BALANCE:',balance)\n if choose=='1':\n money=int(input('How much(use NTD)?'))\n buy_in= money//buy\n balance=balance+buy_in\n print('BUY:',buy_in)\n elif choose=='2':\n money=int(input('How much?'))\n sold_out= money\n balance=balance-sold_out\n if balance<0:\n print('餘額不足')\n break\n else:\n print('SOLD:',sold_out)\n db.insert_record(self.account, currency[int(opt)-1][0], buy_in, sold_out, balance, db.get_date())\n # 1. 從其他顧客中任選一些,範圍從1~n-1\n # 1.1 選的方式,先將所有顧客帳號查詢出來,放在一個 tuple or list\n # 1.2 承上,再做隨機取樣,sample(other_customers, randint(1, n-1))\n # 2. 假設9個裡面取出5個\n # 2.1 這5個,逐一讀取每個 account,然後幫他亂數決定要買或要賣\n # 2.2 先做到一次買賣一種貨幣\n # 2.3 買的部分範圍自訂,賣的部分範圍最高要限定目前該貨幣的餘額\n # 2.4 可以考慮,亂數範圍,以10或50或100取整\n # 2.5 如果完成,再回頭調整成可以一次買賣多種貨幣\n list_other_customers=db.get_other_customer()\n n = len(list_other_customers)\n # 先用3個以內測試驗算,等沒問題後再將 3 改成 n\n other_customers=random.sample(list_other_customers,random.randint(1, n))\n # print(other_customers)\n for customer in other_customers:\n print(customer[0])\n choose=random.choice(['buy in','sold out'])\n opt = random.choice(['1','2','3','4','5'])\n currency_code = currency[int(opt)-1]\n buy=currency_code[2]\n sold=currency_code[1]\n if choose=='buy in':\n money=random.randint(10,100000)\n buy_in= money//buy\n balance=balance+buy_in\n print('BUY:',buy_in)\n elif choose=='sold out':\n money=random.randint(10,balance)\n sold_out= money\n balance=balance-sold_out\n if balance<0:\n print('餘額不足')\n break\n else:\n print('SOLD:',sold_out)\n db.random_records(customer[0], currency_code[0], buy_in, sold_out, balance, db.get_date())\n print()\n return func_title\n\n def currency_transaction_record(self, db, func_title):\n \"\"\" 交易紀錄 \n 1. 查詢個人所有紀錄\n 2. 依區間查詢個人紀錄\n \"\"\"\n import datetime\n current_date = datetime.date.today()\n while True :\n subopt = input('1.查詢個人所有紀錄 2.依區間查詢個人紀錄(exit.離開): ')\n # 1. 最近一月、最近一季、最近半年、最近一年\n # 2. 輸入前後日期條件\n # 上述方法都要用 BETWEEN 語法\n # SELECT * FROM RECORD WHERE DATE_TIME BETWEEN ? AND ?\n if subopt == 'exit':\n break\n elif subopt=='1':\n db.list_record_by_account(self.account)\n elif subopt=='2':\n while True:\n opt = input('1.最近一月 2.最近一季 3.最近半年 4.最近一年 5.輸入日期(exit.離開):')\n days_opts = (30,90,180,365)\n if opt == 'exit':\n break\n elif opt =='5':\n date1=input('請輸入之前日期')\n date2=input('請輸入最近日期')\n db.account_input_date(self.account,date1,date2)\n else:\n days=days_opts[int(opt)-1]\n date_ago=current_date - datetime.timedelta(days=days)\n db.records_date(date_ago,current_date,self.account)\n return func_title\n\n# entry point\nwith DB() as db:\n acustomer = Customer()\n while True:\n func_id, func_name = acustomer.show_menu()\n if func_id == 'q':\n break\n elif func_id == '':\n print(func_name)\n else:\n if acustomer.account == '':\n func_id = 'a'\n print('請先登入或註冊')\n acustomer.menu_func[func_id](db, func_name)\n print()\n", "id": "9559106", "language": "Python", "matching_score": 4.768630027770996, "max_stars_count": 0, "path": "code/acustomer.py" }, { "content": "# abank.py\nfrom dbbank import DB\nclass Bank:\n def __init__(self):\n self.menu_title = '銀行'\n self.menu = {\n 'a':'匯率查詢',\n 'b':'幣別匯率變動參數設定',\n 'c':'顧客交易紀錄',\n 'q':'離開',\n }\n self.menu_func = {\n 'a': lambda c, s: self.exchange_rate_inquiry(c, s),\n 'b': lambda c, s: self.parameter_setting(c, s),\n 'c': lambda c, s: self.guest_transaction_record(c, s)\n }\n self.divider = '='*20\n\n def show_menu(self):\n \"\"\" 主選單\n \"\"\"\n print(self.divider)\n print(self.menu_title)\n print(self.divider)\n for fid, fname in self.menu.items():\n print('%s:%s' % (fid, fname))\n print(self.divider)\n opt = input('請選擇: ').lower()\n if opt in self.menu.keys():\n return opt, self.menu[opt]\n else:\n return '', '無此功能!'\n\n def exchange_rate_inquiry(self, db, func_title):\n \"\"\" 匯率查詢\n \"\"\"\n print('{:3} {:3} {:3}'.format('幣別','買入','賣出'))\n db.list_all_currency()\n return func_title\n\n def parameter_setting(self, db, func_title):\n \"\"\" 幣別匯率變動參數設定\n 1. 設定各種幣別的上下限範圍\n 2. 設定每次變動的範圍\n \"\"\"\n import random\n while True:\n currency=db.list_currency_settings()\n subopt = input('1.美元 2.港幣 3.歐元 4.日圓 5.人民幣 exit.離開: ')\n if subopt == 'exit':\n break\n else:\n set_range1 = float(input('請輸入上限範圍: '))\n set_range2 = float(input('請輸入下限範圍: '))\n change_limit= float(input('請輸入變動幅度: '))\n \n if set_range2>set_range1:\n continue\n else:\n db.insert_or_update_currency_rate(currency[int(subopt)-1][0],set_range2, set_range1, change_limit) \n return func_title\n\n def guest_transaction_record(self, db, func_title):\n \"\"\" 顧客交易紀錄\n 1. 預設可以查詢全部\n 2. 不分客戶依區間查詢\n 3. 查詢個人交易紀錄\n 3a. 查詢個人所有紀錄\n 3b. 依區間查詢個人紀錄\n \"\"\"\n while True:\n import datetime\n current_date = datetime.date.today()\n # db.list_all_customer()\n db.all_records()\n subopt = input('1.依區間查詢 2.查詢個人交易紀錄 exit.離開: ')\n if subopt == 'exit':\n break\n else:\n if subopt=='1':\n opt = input('1.最近一月 2.最近一季 3.最近半年 4.最近一年(exit.離開):')\n days_opts=(30,90,180,365)\n if opt == 'exit':\n break\n else:\n days=days_opts[int(opt)-1]\n date_ago=current_date - datetime.timedelta(days=days)\n db.records_date(date_ago,current_date)\n else:\n while True:\n account = input('請輸入帳號 (exit.離開): ')\n if account == 'exit':\n break\n else:\n db.list_record_by_account(account)\n print('-'*60)\n return func_title\n\n# entry point\nwith DB() as db:\n abank = Bank()\n while True:\n func_id, func_name = abank.show_menu()\n if func_id == 'q':\n break\n elif func_id == '':\n print(func_name)\n else:\n abank.menu_func[func_id](db, func_name)\n print()\n", "id": "11554112", "language": "Python", "matching_score": 2.2883141040802, "max_stars_count": 0, "path": "code/abank.py" }, { "content": "class DB:\n def __init__(self):\n self.conn = None\n self.cur = None\n self.title_side = '-'*12\n\n def __enter__(self):\n self.open()\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n return False\n\n def open(self):\n \"\"\" 開啟資料庫連線\n \"\"\"\n if self.conn is None:\n import sqlite3\n self.conn = sqlite3.connect('bank.db')\n self.cur = self.conn.cursor()\n return True\n\n def close(self):\n \"\"\" 關閉資料庫連線\n \"\"\"\n if self.conn is not None:\n self.conn.close()\n self.conn = None\n return True\n\n def get_max_id(self, arg_table):\n \"\"\" 取得資料最新編號\n \"\"\"\n self.cur.execute(\"SELECT MAX(ID) FROM {}\".format(arg_table))\n return self.cur.fetchone()[0] + 1\n\n def list_all_currency(self):\n \"\"\" 匯率表\n \"\"\"\n import random\n self.cur.execute(\"SELECT * FROM CURRENCY_RATE\")\n all_rows = self.cur.fetchall()\n for row in all_rows:\n currency = row[0]\n # SELECT\n self.cur.execute(\"SELECT * FROM CURRENCY_RATE_SETTINGS WHERE CURRENCY=?\",(currency,))\n currency_set = self.cur.fetchone()\n set_limit = currency_set[3]\n # range1\n set_range1 = currency_set[1]\n range1 = row[1]\n range1_top = range1 + set_limit\n range1_bottom = range1 - set_limit\n if range1_top > set_range1:\n range1_top = set_range1\n changed_range1=round(random.uniform(range1_bottom, range1_top),3)\n # range2\n set_range2 = currency_set[2]\n range2 = row[2]\n range2_top = range2 + set_limit\n range2_bottom = range2 - set_limit\n if range2_top > set_range2:\n range2_top = set_range2\n changed_range2=round(random.uniform(range2_bottom, range2_top),3)\n # UPDATE\n self.cur.execute(\"UPDATE CURRENCY_RATE SET BUY_IN=?, SOLD_OUT=? WHERE CURRENCY=?\",(changed_range1,changed_range2, currency))\n\n\n self.cur.execute(\"SELECT * FROM CURRENCY_RATE\")\n all_rows = self.cur.fetchall()\n for row in all_rows:\n print('{:3} {:6} {:6}'.format(*row))\n return all_rows\n\n def list_currency_settings(self):\n \"\"\" 匯率設定表\n \"\"\"\n self.cur.execute(\"SELECT * FROM CURRENCY_RATE_SETTINGS\")\n all_rows = self.cur.fetchall()\n for row in all_rows:\n print('{:3} {:6} {:6}'.format(*row))\n return all_rows\n\n def insert_or_update_currency_rate(self, currency, set_range1, set_range2, change_limit):\n \"\"\" 增修匯率\n \"\"\"\n self.cur.execute(\"SELECT COUNT(*) FROM CURRENCY_RATE_SETTINGS WHERE CURRENCY=?\", (currency,))\n count_result = self.cur.fetchone()[0]\n if count_result == 0:\n self.cur.execute(\"INSERT INTO CURRENCY_RATE_SETTINGS (?, ?, ?, ?)\", \n (currency, set_range1, set_range2, change_limit))\n elif count_result > 0:\n self.cur.execute(\"UPDATE CURRENCY_RATE_SETTINGS SET RANGE1=?, RANGE2=?, CHANGE_LIMIT=? WHERE CURRENCY=?\", (set_range1, set_range2, change_limit, currency))\n return self.conn.commit()\n\n def list_all_customer(self):\n \"\"\" 顧客資料\n \"\"\"\n self.cur.execute(\"SELECT * FROM CUSTOMER\")\n all_rows = self.cur.fetchall()\n for row in all_rows:\n print(row)\n print()\n\n def check_if_customer_enrolled(self, arg_account):\n \"\"\" 檢查是否註冊\n \"\"\"\n self.cur.execute(\"SELECT COUNT(*) FROM CUSTOMER WHERE ACCOUNT=?\", (arg_account,))\n if self.cur.fetchone()[0] == 1:\n return True\n else:\n return False\n\n def insert_or_update_customer(self, account_id, action):\n \"\"\" 增修顧客\n \"\"\"\n data_ok = True\n full_name = input('全名: ')\n if full_name == 'q':\n return False\n else:\n data_ok = False\n\n # 資料無誤,准許註冊 \n if data_ok:\n if action == 'insert':\n customer_max_id = self.get_max_id('CUSTOMER')\n self.cur.execute(\"INSERT INTO CUSTOMER VALUES (?, ?, ?)\", \n (customer_max_id, account_id, full_name))\n elif action == 'update':\n self.cur.execute(\"UPDATE CUSTOMER SET NAME=? WHERE ACCOUNT=?\", \n (full_name, account_id))\n self.conn.commit()\n return True\n else:\n return False\n\n def get_other_customer(self):\n \"\"\" 查詢其他顧客資訊\n \"\"\"\n self.cur.execute(\"SELECT ACCOUNT FROM CUSTOMER WHERE ID>1\")\n other_customer_info = self.cur.fetchall()\n return other_customer_info\n \n def get_customer_info(self, account):\n \"\"\" 查詢顧客資訊\n \"\"\"\n self.cur.execute(\"SELECT * FROM CUSTOMER WHERE ACCOUNT=?\", (account,))\n customer_info = self.cur.fetchone()\n return customer_info\n\n def insert_record(self, account, currency, buy_in, sold_out, balance, date):\n \"\"\" 增修紀錄\n \"\"\"\n record_max_id = self.get_max_id('RECORD')\n self.cur.execute(\"INSERT INTO RECORD VALUES (?, ?, ?, ?, ?, ?, ?)\", \n (record_max_id, account, currency, buy_in, sold_out, balance, self.get_date()))\n return self.conn.commit()\n\n def random_records(self, account, currency, buy_in, sold_out, balance, date):\n record_max_id = self.get_max_id('RECORD')\n self.cur.execute(\"INSERT INTO RECORD VALUES (?, ?, ?, ?, ?, ?, ?)\",(record_max_id, account, currency, buy_in, sold_out, balance, self.get_date()))\n return self.conn.commit()\n\n def list_record_by_account(self, account):\n \"\"\" 個人交易紀錄查詢\n \"\"\"\n account_like = ''.join(('%', account, '%'))\n self.cur.execute(\"SELECT * FROM RECORD WHERE ACCOUNT LIKE ?\", (account_like,))\n all_rows = self.cur.fetchall()\n if len(all_rows) > 0:\n for row in all_rows:\n print(row)\n else:\n print(account, '查無紀錄')\n\n def get_date(self):\n from datetime import datetime\n date_time=datetime.now()\n return date_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n def get_latest_balance(self, account, currency):\n self.cur.execute(\"SELECT BALANCE FROM RECORD WHERE ACCOUNT=? AND CURRENCY=? AND DATE_TIME < ? ORDER BY DATE_TIME DESC LIMIT 1 OFFSET 0\",\n (account,currency[0],self.get_date()))\n balance=self.cur.fetchone()\n if balance is not None:\n return balance[0]\n else:\n return 0\n\n def all_records(self):\n self.cur.execute(\"SELECT * FROM RECORD\")\n all_rows = self.cur.fetchall()\n for row in all_rows:\n print('{:3} {:10} {:3} {:10} {:10} {:10} {:20}'.format(*row))\n\n def records_date(self, date, current_date, account=''):\n if account == '':\n self.cur.execute(\"SELECT * FROM RECORD WHERE SUBSTR(DATE_TIME,1,10) BETWEEN ? AND ?\",(date,current_date,))\n else:\n self.cur.execute(\"SELECT * FROM RECORD WHERE ACCOUNT=? AND SUBSTR(DATE_TIME,1,10) BETWEEN ? AND ?\",(account,date,current_date,))\n all_rows=self.cur.fetchall()\n currency_types = ['USD', 'HKD', 'EUR', 'JPY', 'CNY']\n records_count = [0 for i in range(len(currency_types))]\n # records_count = []\n # for i in range(len(currency_types)):\n # records_count.append(0)\n for row in all_rows:\n records_count[currency_types.index(row[2])]+=1\n print(row)\n print(currency_types)\n print(records_count)\n \n def account_input_date(self, account, date1, date2):\n self.cur.execute(\"SELECT * FROM RECORD WHERE SUBSTR(DATE_TIME,1,10) BETWEEN ? AND ?\",(date1,date2,))\n all_rows=self.cur.fetchall()\n for row in all_rows:\n print(row)", "id": "10722152", "language": "Python", "matching_score": 4.182739734649658, "max_stars_count": 0, "path": "code/dbbank.py" }, { "content": "import os\n\ntry:\n os.unlink('bank.db')\nexcept:\n print('首次建檔')\n\nimport sqlite3\nconn = sqlite3.connect('bank.db')\ncur = conn.cursor()\n\ndef show_all_rows(all_rows):\n for row in all_rows:\n print(row)\n print()\n\n# 匯率表\ncur.execute('''CREATE TABLE CURRENCY_RATE(CURRENCY text,BUY_IN real,SOLD_OUT real)''')\ncur.execute(\"INSERT INTO CURRENCY_RATE VALUES ('USD', 30.455, 31.094)\")\ncur.execute(\"INSERT INTO CURRENCY_RATE VALUES ('HKD', 3.761, 3.977)\")\ncur.execute(\"INSERT INTO CURRENCY_RATE VALUES ('EUR', 34.150, 35.490)\")\ncur.execute(\"INSERT INTO CURRENCY_RATE VALUES ('JPY', 0.269, 0.082)\")\ncur.execute(\"INSERT INTO CURRENCY_RATE VALUES ('CNY', 4.501, 4.633)\")\nconn.commit()\n\ncur.execute(\"SELECT * FROM CURRENCY_RATE\")\nshow_all_rows(cur.fetchall())\n\n# 匯率設定\ncur.execute(\"CREATE TABLE CURRENCY_RATE_SETTINGS(CURRENCY text,RANGE1 real,RANGE2 real,CHANGE_LIMIT real)\")\ncur.execute(\"INSERT INTO CURRENCY_RATE_SETTINGS VALUES ('USD', 29.100, 32.000, 0.1)\")\ncur.execute(\"INSERT INTO CURRENCY_RATE_SETTINGS VALUES ('HKD', 2.001, 4.001, 0.02)\")\ncur.execute(\"INSERT INTO CURRENCY_RATE_SETTINGS VALUES ('EUR', 34.100, 37.150, 0.3)\")\ncur.execute(\"INSERT INTO CURRENCY_RATE_SETTINGS VALUES ('JPY', 0.251, 0.321, 0.01)\")\ncur.execute(\"INSERT INTO CURRENCY_RATE_SETTINGS VALUES ('CNY', 4.321, 5.032, 0.03)\")\nconn.commit()\n\ncur.execute(\"SELECT * FROM CURRENCY_RATE_SETTINGS\")\nshow_all_rows(cur.fetchall())\n\n# 顧客資料\ncur.execute('''CREATE TABLE CUSTOMER (ID integer, ACCOUNT text, NAME text)''')\ncur.execute(\"INSERT INTO CUSTOMER VALUES (1, 'camille', 'Camille')\")\ncur.execute(\"INSERT INTO CUSTOMER VALUES (2, 'chris', '<NAME>')\")\ncur.execute(\"INSERT INTO CUSTOMER VALUES (3, 'gal', '<NAME>')\")\ncur.execute(\"INSERT INTO CUSTOMER VALUES (4, 'bill', '<NAME>')\")\ncur.execute(\"INSERT INTO CUSTOMER VALUES (5, 'steve', '<NAME>')\")\ncur.execute(\"INSERT INTO CUSTOMER VALUES (6, 'tom', '<NAME>')\")\ncur.execute(\"INSERT INTO CUSTOMER VALUES (7, 'anne', '<NAME>')\")\ncur.execute(\"INSERT INTO CUSTOMER VALUES (8, 'trump', '<NAME>')\")\ncur.execute(\"INSERT INTO CUSTOMER VALUES (9, 'mark', '<NAME>')\")\nconn.commit()\n\ncur.execute(\"SELECT * FROM CUSTOMER\")\nshow_all_rows(cur.fetchall())\n\n# 交易紀錄\ncur.execute('''CREATE TABLE RECORD (ID integer, ACCOUNT text, CURRENCY text, BUY_IN integer, SOLD_OUT integer, BALANCE integer, DATE_TIME text)''')\ncur.execute(\"INSERT INTO RECORD VALUES (1, 'chris', 'USD', 10000, 0, 10000, '2017-10-01 00:01:02')\")\ncur.execute(\"INSERT INTO RECORD VALUES (2, 'gal', 'JPY', 50000, 0, 50000, '2017-10-01 10:19:55')\")\ncur.execute(\"INSERT INTO RECORD VALUES (3, 'camille', 'HKD', 30000, 0, 30000, '2019-05-01 09:31:43')\")\ncur.execute(\"INSERT INTO RECORD VALUES (4, 'bill', 'EUR', 70000, 0, 70000, '2019-05-01 15:36:27')\")\ncur.execute(\"INSERT INTO RECORD VALUES (5, 'steve', 'USD', 25000, 0, 25000, '2019-05-01 11:05:22')\")\nconn.commit()\n\ncur.execute(\"SELECT * FROM RECORD\")\nshow_all_rows(cur.fetchall())\n\n\nconn.close()\n", "id": "5615283", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "code/dbbank-init.py" }, { "content": "import time\nfin = open('words_in.txt', 'rt', encoding='utf-8-sig')\nlines = fin.readlines()\nfin.close()\n\nfout = open('words_out.txt', 'wt', encoding='utf-8-sig')\nfor line in lines:\n # time.sleep(0.2)\n for i in range(len(line)):\n al = line[i]\n if not al.lower().islower() and al not in (' ', '-') :\n print(':'.join((line[0:i-1:],line[i:-1:])), file=fout)\n break\nfout.close()", "id": "12049083", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "code/cleanup.py" }, { "content": "\"\"\"\n題目:奇偶秘差 (APCS-1060304-1)\n輸入:任意數 n\n處理:產生 n 個數值,範圍 10 的 12~15 次方\n(+3) 分別計算奇位數和及偶位數和\n(+1) 奇位數和相減偶位數和,取絕對值,即為秘密差\n輸出:\n(+1) 數字、奇位數和、偶位數和、秘密差\n(+2) 最大及最小秘密差的數值\n\"\"\"\nimport random\nn=int(input('n:'))\nfor i in range(n):\n number = str(random.randint(10**12,10**15))\n print(number)\n number1 = number[0::2]\n number2 = number[1::2]\n sum1 = 0\n sum2 = 0\n for c in number1:\n sum1 += int(c)\n for c in number2:\n sum2 += int(c)\n print(sum1, sum2)\n print(abs(sum1-sum2))", "id": "7990792", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "code/ex1.py" }, { "content": "atuple = (7, 4, 3, 9)\nprint(atuple.index(3))", "id": "2590092", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "code/ex2.py" } ]
1
abi-ba-hacka
[ { "content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10 on 2017-04-09 16:15\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('auth', '0008_alter_user_username_max_length'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Owner',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),\n ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),\n ('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),\n ('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),\n ('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),\n ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),\n ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),\n ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),\n ('is_verified', models.BooleanField(default=False, help_text='Designates whether this user has completed the email verification process to allow login.', verbose_name='verified')),\n ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),\n ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),\n ],\n options={\n 'abstract': False,\n 'verbose_name': 'user',\n 'verbose_name_plural': 'users',\n },\n ),\n migrations.CreateModel(\n name='Beer',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('name', models.CharField(max_length=64)),\n ],\n options={\n 'ordering': ('created',),\n 'verbose_name': 'Beer',\n 'verbose_name_plural': 'Beers',\n },\n ),\n migrations.CreateModel(\n name='Growler',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('code', models.CharField(max_length=64, unique=True)),\n ],\n options={\n 'ordering': ('created',),\n 'verbose_name': 'Growler',\n 'verbose_name_plural': 'Growlers',\n },\n ),\n migrations.CreateModel(\n name='Prize',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('name', models.CharField(max_length=64)),\n ],\n options={\n 'ordering': ('created',),\n 'verbose_name': 'Prize',\n 'verbose_name_plural': 'Prizes',\n },\n ),\n migrations.CreateModel(\n name='Refill',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('beer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Beer')),\n ('growler', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='refills', to='api.Growler')),\n ],\n options={\n 'ordering': ('created',),\n 'verbose_name': 'Refill',\n 'verbose_name_plural': 'Refills',\n },\n ),\n migrations.CreateModel(\n name='Shelter',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('name', models.CharField(max_length=64)),\n ('address', models.CharField(blank=True, max_length=64)),\n ('city', models.CharField(blank=True, max_length=64)),\n ('state', models.CharField(blank=True, max_length=64)),\n ('country', models.CharField(blank=True, max_length=64)),\n ],\n options={\n 'ordering': ('created',),\n 'verbose_name': 'Shelter',\n 'verbose_name_plural': 'Shelters',\n },\n ),\n migrations.AddField(\n model_name='refill',\n name='location',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Shelter'),\n ),\n migrations.AddField(\n model_name='refill',\n name='prize',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Prize'),\n ),\n migrations.AddField(\n model_name='growler',\n name='origin',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Shelter'),\n ),\n migrations.AddField(\n model_name='growler',\n name='owner',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='growlers', to=settings.AUTH_USER_MODEL),\n ),\n ]\n", "id": "5345358", "language": "Python", "matching_score": 5.918423652648926, "max_stars_count": 0, "path": "api/migrations/0001_initial.py" }, { "content": "from __future__ import unicode_literals\n\nimport uuid\nimport json\nimport random\nfrom django.db import models\nfrom authemail.models import EmailUserManager, EmailAbstractUser\n\n\nclass Owner(EmailAbstractUser):\n # Required\n objects = EmailUserManager()\n\n\nclass Shelter(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n name = models.CharField(max_length=64)\n address = models.CharField(max_length=64, blank=True)\n city = models.CharField(max_length=64, blank=True)\n state = models.CharField(max_length=64, blank=True)\n country = models.CharField(max_length=64, blank=True)\n\n class Meta:\n verbose_name = 'Refugio'\n verbose_name_plural = 'Refugios'\n ordering = ('created',)\n\n def __unicode__(self):\n return ', '.join([self.name, self.city])\n\n\nclass Growler(models.Model):\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n origin = models.ForeignKey(Shelter)\n owner = models.ForeignKey(Owner, related_name='growlers')\n code = models.CharField(max_length=64, unique=True)\n\n class Meta:\n verbose_name = 'Growler'\n verbose_name_plural = 'Growlers'\n ordering = ('created',)\n\n def __init__(self, *args, **kwargs):\n super(Growler, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.code = self.code or str(uuid.uuid4()).replace('-', '')[:8]\n super(Growler, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return '%s from %s' % (self.code, self.owner.email)\n\n\nclass Prize(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n name = models.CharField(max_length=64)\n\n class Meta:\n verbose_name = 'Premio'\n verbose_name_plural = 'Premios'\n ordering = ('created',)\n\n def __unicode__(self):\n return self.name\n\n\nclass Beer(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n name = models.CharField(max_length=64)\n\n class Meta:\n verbose_name = 'Cerveza'\n verbose_name_plural = 'Cervezas'\n ordering = ('created',)\n\n def __unicode__(self):\n return self.name\n\n\nclass Refill(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n location = models.ForeignKey(Shelter)\n beer = models.ForeignKey(Beer)\n prize = models.ForeignKey(Prize)\n growler = models.ForeignKey(Growler, related_name='refills')\n\n class Meta:\n verbose_name = 'Recarga'\n verbose_name_plural = 'Recargas'\n ordering = ('created',)\n\n def __unicode__(self):\n return self.beer.name\n", "id": "799580", "language": "Python", "matching_score": 1.4011496305465698, "max_stars_count": 0, "path": "api/models.py" }, { "content": "from rest_framework import serializers\nimport api.models as models\nimport json\n\n\nclass GrowlerSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Growler\n fields = '__all__'\n\n\nclass RefillSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.Refill\n fields = '__all__'\n", "id": "9714216", "language": "Python", "matching_score": 1.2862911224365234, "max_stars_count": 0, "path": "api/serializers.py" }, { "content": "\"\"\"Growler URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.conf.urls import url, include\nfrom rest_framework import routers\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom api import views\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\nadmin.site.site_header = 'Growler Mania Admin'\n\n\nclass SettingsViewSet(viewsets.GenericViewSet):\n def list(self, request, *args, **kwargs):\n return Response(settings.EXPORTED_SETTINGS)\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'settings', SettingsViewSet, base_name='settings')\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^api/auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^api/v1/', include(router.urls)),\n\n url(r'^$', views.index, name='refill_index'),\n url(r'^refill/$', views.index, name='refill_index'),\n url(r'^refill/(?P<refill_id>[0-9a-zA-Z_-]+)/$', views.show, name='refill_show'),\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "id": "6618537", "language": "Python", "matching_score": 1.5236636400222778, "max_stars_count": 0, "path": "growler/urls.py" }, { "content": "from django.contrib import admin\nfrom .models import *\n# Register your models here.\n\n\n@admin.register(Shelter)\nclass ShelterAdmin(admin.ModelAdmin):\n list_display = ['name', 'address', 'city', 'state', 'country']\n search_fields = ['name', 'city', 'state']\n\n\n@admin.register(Beer)\nclass BeerAdmin(admin.ModelAdmin):\n list_display = ['name']\n search_fields = ['name']\n\n\n@admin.register(Prize)\nclass PrizeAdmin(admin.ModelAdmin):\n list_display = ['name']\n search_fields = ['name']\n\n\n@admin.register(Growler)\nclass GrowlerAdmin(admin.ModelAdmin):\n list_display = ['owner', 'created', 'code']\n readonly_fields = ['code']\n search_fields = ['code']\n\n\n@admin.register(Refill)\nclass RefillAdmin(admin.ModelAdmin):\n list_display = ['user', 'created', 'beer', 'prize']\n\n def beer(self, obj):\n return obj.get_beer_display()\n\n def prize(self, obj):\n return obj.get_prize_display()\n\n def user(self, obj):\n owner = obj.growler.owner\n return \"%s %s\" % (owner.first_name, owner.last_name)\n", "id": "2198248", "language": "Python", "matching_score": 1.593691110610962, "max_stars_count": 0, "path": "api/admin.py" }, { "content": "import api.models as models\nimport random\nfrom django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse\n\n\ndef show(request, refill_id):\n refill = get_object_or_404(models.Refill, pk=refill_id)\n total = models.Refill.objects.filter(growler=refill.growler).count()\n return render(request, 'refill.html', {'refill': refill, 'total': total, 'prize': refill.prize.name})\n\n\ndef index(request):\n shelters = [{'id': s.id, 'name': s.name} for s in models.Shelter.objects.all()]\n beers = [{'id': b.id, 'name': b.name} for b in models.Beer.objects.all()]\n if request.method == 'GET':\n return render(request, 'add_refill.html', {'beers': beers, 'shelters': shelters})\n try:\n code = request.POST['code']\n growler = models.Growler.objects.get(code=code)\n except Exception:\n return render(request, 'add_refill.html', {\n 'err': \"ID %s invalido.\" % (code or ''),\n 'beers': beers,\n 'shelters': shelters,\n })\n else:\n refill = models.Refill()\n refill.growler = growler\n refill.prize = random.choice(models.Prize.objects.all())\n refill.location = models.Shelter.objects.get(pk=request.POST['shelter'])\n refill.beer = models.Beer.objects.get(pk=request.POST['beer'])\n refill.save()\n return HttpResponseRedirect(reverse('refill_show', args=(refill.id,)))\n", "id": "9827231", "language": "Python", "matching_score": 0.46186551451683044, "max_stars_count": 0, "path": "api/views.py" } ]
1.462407
farzadramezani
[ { "content": "import logging\nimport random\nimport time\n\nfrom django.http import Http404\nfrom django.shortcuts import render\nfrom django.templatetags.static import static\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import TemplateView\n\nfrom djangopwa import version\n\n\nlogger = logging.getLogger('djpwa.pwa.views')\n\n\ndef videos(request):\n return render(request, 'videos/videos.html')\n\ndef offline(request):\n return render(request, 'pwa/offline.html')\n\n\ndef my_page(request):\n routes = {\n 'Home': reverse('home'),\n 'Say hi': reverse('say_something', kwargs={'key': 'hi'}),\n 'Say bye': reverse('say_something', kwargs={'key': 'bye'}),\n 'Say something invalid': reverse('say_something', kwargs={'key': 'invalid'}),\n 'Response in random time': reverse('random_response'),\n 'Fill dynamic cache': reverse('fill_dynamic_cache', kwargs={'id': 1}),\n 'Must not cache': reverse('must_not_cache'),\n }\n\n return render(request, 'pwa/my_page.html', context={'routes': routes})\n\n\ndef say_something(request, key):\n things_to_say = {\n 'hi': 'Hello world',\n 'bye': 'Have a nice day',\n }\n\n if key not in things_to_say:\n raise Http404(f'{key} is not a valid thing to say')\n\n return render(request, 'pwa/say_something.html', context={'thing': things_to_say[key]})\n\n\ndef random_response(request):\n response_time_ms = random.choice((0, 10, 50, 100, 1_000, 10_000))\n response_time = response_time_ms / 1_000\n print(f'Selected response time {response_time}')\n time.sleep(response_time)\n return render(request, 'pwa/random_response.html', context={'response_time': response_time})\n\n\ndef fill_dynamic_cache(request, id):\n return render(request, 'pwa/fill_dynamic_cache.html', context={'id': id})\n\n\n@never_cache\ndef must_not_cache(request):\n return render(request, 'pwa/must_not_cache.html', context={'requested_at': timezone.now()})\n\n\nclass ServiceWorkerView(TemplateView):\n template_name = 'sw.js'\n content_type = 'application/javascript'\n name = 'sw.js'\n\n def get_context_data(self, **kwargs):\n return {\n 'version': version,\n 'icon_url': static('icons/aurss.512x512.png'),\n 'manifest_url': static('manifest.json'),\n 'style_url': static('style.css'),\n 'home_url': reverse('home'),\n 'offline_url': reverse('offline'),\n }\n", "id": "8632166", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "djangopwa/apps/pwa/views.py" }, { "content": "\"\"\"djangopwa URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\n\nfrom . import version\nfrom .apps.pwa import views as pwa_views\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n path('', pwa_views.videos, name='videos'),\n path('offline/', pwa_views.offline, name='offline'),\n path('videos/', pwa_views.videos, name='videos'),\n path('say-something/<str:key>', pwa_views.say_something, name='say_something'),\n path('random-response', pwa_views.random_response, name='random_response'),\n path('fill-dynamic-cache/<int:id>', pwa_views.fill_dynamic_cache, name='fill_dynamic_cache'),\n path('must-not-cache', pwa_views.must_not_cache, name='must_not_cache'),\n\n # The service worker cannot be in /static because its scope will be limited to /static.\n # Since we want it to have a scope of the full application, we rely on this TemplateView\n # trick to make it work.\n path(\n 'sw.js',\n pwa_views.ServiceWorkerView.as_view(),\n name=pwa_views.ServiceWorkerView.name,\n ),\n]\n", "id": "11616339", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "djangopwa/urls.py" } ]
0
Niharika-Sharma
[ { "content": "#################################################################################################################\n#### GUI Interface for users\n#################################################################################################################\nfrom tkinter import *\nimport tkinter as tk\nimport tkinter.messagebox\n\nimport keras\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\n\nroot = Tk()\nroot.geometry('1500x1500')\nroot.title(\"Prediction Form\")\n\nlabel = Label(root, text=\"Prediction form\",width=20,font=(\"bold\", 20))\nlabel.place(x=375,y=20)\n\n\nlabel_0 = Label(root, text=\"Full Name\",width=20,font=(\"bold\", 10))\nlabel_0.place(x=0,y=50)\nentry_0 = Entry(root)\nentry_0.place(x=200,y=50)\n\n\nlabel_1 = Label(root, text=\"Age\",width=10,font=(\"bold\", 10))\nlabel_1.place(x=750,y=50)\nentry_1 = Entry(root)\nentry_1.place(x=950,y=50)\n\n\nlabel_2 = Label(root, text=\"Gender\",width=20,font=(\"bold\", 10))\nlabel_2.place(x=0,y=80)\nglobal var2 \nvar2 = IntVar()\nRadiobutton(root, text=\"Male\",padx = 5, variable=var2, value=1).place(x=375,y=80)\nRadiobutton(root, text=\"Female\",padx = 20, variable=var2, value=0).place(x=750,y=80)\n\n\nlabel_3 = Label(root, text=\"Address\",width=20,font=(\"bold\", 10))\nlabel_3.place(x=0,y=110)\nglobal var3\nvar3 = IntVar()\nRadiobutton(root, text=\"Urban\",padx = 5, variable=var3, value=0).place(x=375,y=110)\nRadiobutton(root, text=\"Rural\",padx = 20, variable=var3, value=1).place(x=750,y=110)\n\n\nlabel_4 = Label(root, text=\"Parent's Cohabitation Status\",width=24,font=(\"bold\", 10))\nlabel_4.place(x=0,y=140)\nglobal var4\nvar4 = IntVar()\nRadiobutton(root, text=\"Apart\",padx = 5, variable=var4, value=1).place(x=375,y=140)\nRadiobutton(root, text=\"Together\",padx = 20, variable=var4, value=0).place(x=750,y=140)\n\n\nlabel_5 = Label(root, text=\"Mother's Education\",width=20,font=(\"bold\", 10))\nlabel_5.place(x=0,y=170)\nglobal var5\nvar5 = IntVar()\nRadiobutton(root, text=\"none\",padx = 5, variable=var5, value=0).place(x=250,y=170)\nRadiobutton(root, text=\"primary education\",padx = 20, variable=var5, value=1).place(x=400,y=170)\nRadiobutton(root, text=\"5th to 9th grade\",padx = 5, variable=var5, value=2).place(x=630,y=170)\nRadiobutton(root, text=\"secondary education\",padx = 20, variable=var5, value=3).place(x=820,y=170)\nRadiobutton(root, text=\"higher education\",padx = 20, variable=var5, value=4).place(x=1010,y=170)\n\n\nlabel_6 = Label(root, text=\"Father's Education\",width=20,font=(\"bold\", 10))\nlabel_6.place(x=0,y=200)\nglobal var6\nvar6 = IntVar()\nRadiobutton(root, text=\"none\",padx = 5, variable=var6, value=0).place(x=250,y=200)\nRadiobutton(root, text=\"primary education\",padx = 20, variable=var6, value=1).place(x=400,y=200)\nRadiobutton(root, text=\"5th to 9th grade\",padx = 5, variable=var6, value=2).place(x=630,y=200)\nRadiobutton(root, text=\"secondary education\",padx = 20, variable=var6, value=3).place(x=820,y=200)\nRadiobutton(root, text=\"higher education\",padx = 20, variable=var6, value=4).place(x=1010,y=200)\n\n\nlabel_7 = Label(root, text=\"<NAME>\",width=20,font=(\"bold\", 10))\nlabel_7.place(x=0,y=230)\nglobal var7\nvar7 = IntVar()\nRadiobutton(root, text=\"teacher\",padx = 5, variable=var7, value=4).place(x=250,y=230)\nRadiobutton(root, text=\"health care related\",padx = 20, variable=var7, value=1).place(x=400,y=230)\nRadiobutton(root, text=\"services\",padx = 5, variable=var7, value=3).place(x=630,y=230)\nRadiobutton(root, text=\"at_home\",padx = 20, variable=var7, value=0).place(x=820,y=230)\nRadiobutton(root, text=\"other\",padx = 20, variable=var7, value=2).place(x=1010,y=230)\n\n\n\nlabel_8 = Label(root, text=\"<NAME>\",width=20,font=(\"bold\", 10))\nlabel_8.place(x=0,y=260)\nglobal var8\nvar8 = IntVar()\nRadiobutton(root, text=\"teacher\",padx = 5, variable=var8, value=4).place(x=250,y=260)\nRadiobutton(root, text=\"health care related\",padx = 20, variable=var8, value=1).place(x=400,y=260)\nRadiobutton(root, text=\"services\",padx = 5, variable=var8, value=3).place(x=630,y=260)\nRadiobutton(root, text=\"at_home\",padx = 20, variable=var8, value=0).place(x=820,y=260)\nRadiobutton(root, text=\"other\",padx = 20, variable=var8, value=2).place(x=1010,y=260)\n\n\nlabel_9 = Label(root, text=\"Travel Time\",width=20,font=(\"bold\", 10))\nlabel_9.place(x=0,y=290)\nglobal var9\nvar9 = IntVar()\nRadiobutton(root, text=\"<15 min\",padx = 5, variable=var9, value=1).place(x=270,y=290)\nRadiobutton(root, text=\"15-30 min\",padx = 20, variable=var9, value=2).place(x=550,y=290)\nRadiobutton(root, text=\"30-60 min\",padx = 5, variable=var9, value=3).place(x=830,y=290)\nRadiobutton(root, text=\">60 min\",padx = 20, variable=var9, value=4).place(x=1110,y=290)\n\n\nlabel_10 = Label(root, text=\"Study Time\",width=20,font=(\"bold\", 10))\nlabel_10.place(x=0,y=320)\nglobal var10\nvar10 = IntVar()\nRadiobutton(root, text=\"<2 hours\",padx = 5, variable=var10, value=1).place(x=270,y=320)\nRadiobutton(root, text=\"2 to 5 hours\",padx = 20, variable=var10, value=2).place(x=550,y=320)\nRadiobutton(root, text=\"5 to 10 hours\",padx = 5, variable=var10, value=3).place(x=830,y=320)\nRadiobutton(root, text=\">10 hours\",padx = 20, variable=var10, value=4).place(x=1110,y=320)\n\n\nlabel_11 = Label(root, text=\"number of past class failures\",width=24,font=(\"bold\", 10))\nlabel_11.place(x=0,y=350)\nglobal var11\nvar11 = IntVar()\nRadiobutton(root, text=\"0\",padx = 5, variable=var11, value=0).place(x=270,y=350)\nRadiobutton(root, text=\"1\",padx = 20, variable=var11, value=1).place(x=550,y=350)\nRadiobutton(root, text=\"2\",padx = 5, variable=var11, value=2).place(x=830,y=350)\nRadiobutton(root, text=\"higher\",padx = 20, variable=var11, value=3).place(x=1110,y=350)\n\n\nlabel_12 = Label(root, text=\"Extra Education Support\",width=24,font=(\"bold\", 10))\nlabel_12.place(x=0,y=380)\nglobal var12\nvar12 = IntVar()\nRadiobutton(root, text=\"NO\",padx = 5, variable=var12, value=0).place(x=200,y=380)\nRadiobutton(root, text=\"Yes\",padx = 20, variable=var12, value=1).place(x=250,y=380)\n\n\nlabel_13 = Label(root, text=\"Extra Paid Classes\",width=20,font=(\"bold\", 10))\nlabel_13.place(x=420,y=380)\nglobal var13\nvar13 = IntVar()\nRadiobutton(root, text=\"NO\",padx = 5, variable=var13, value=0).place(x=620,y=380)\nRadiobutton(root, text=\"Yes\",padx = 20, variable=var13, value=1).place(x=670,y=380)\n\n\nlabel_14 = Label(root, text=\"Want higher Education\",width=20,font=(\"bold\", 10))\nlabel_14.place(x=910,y=380)\nglobal var14\nvar14 = IntVar()\nRadiobutton(root, text=\"NO\",padx = 5, variable=var14, value=0).place(x=1110,y=380)\nRadiobutton(root, text=\"Yes\",padx = 20, variable=var14, value=1).place(x=1160,y=380)\n\n\nlabel_15 = Label(root, text=\"Internet Access\",width=20,font=(\"bold\", 10))\nlabel_15.place(x=0,y=410)\nglobal var15\nvar15 = IntVar()\nRadiobutton(root, text=\"NO\",padx = 5, variable=var15, value=0).place(x=200,y=410)\nRadiobutton(root, text=\"Yes\",padx = 20, variable=var15, value=1).place(x=250,y=410)\n\n\nlabel_16 = Label(root, text=\"Romantic Relationship\",width=20,font=(\"bold\", 10))\nlabel_16.place(x=750,y=410)\nvar16 = IntVar()\nRadiobutton(root, text=\"NO\",padx = 5, variable=var16, value=0).place(x=950,y=410)\nRadiobutton(root, text=\"Yes\",padx = 20, variable=var16, value=1).place(x=1000,y=410)\n\n\nlabel_17 = Label(root, text=\"Quality of family relationship\",width=24,font=(\"bold\", 10))\nlabel_17.place(x=0,y=440)\nglobal var17\nvar17 = IntVar()\nRadiobutton(root, text=\"1(Very Bad)\",padx = 5, variable=var17, value=1).place(x=250,y=440)\nRadiobutton(root, text=\"2\",padx = 20, variable=var17, value=2).place(x=410,y=440)\nRadiobutton(root, text=\"3\",padx = 5, variable=var17, value=3).place(x=560,y=440)\nRadiobutton(root, text=\"4\",padx = 20, variable=var17, value=4).place(x=710,y=440)\nRadiobutton(root, text=\"5(Excellent)\",padx = 20, variable=var17, value=5).place(x=860,y=440)\n\n\nlabel_18 = Label(root, text=\"Free time after school\",width=24,font=(\"bold\", 10))\nlabel_18.place(x=0,y=470)\nglobal var18\nvar18 = IntVar()\nRadiobutton(root, text=\"1(Very low)\",padx = 5, variable=var18, value=1).place(x=250,y=470)\nRadiobutton(root, text=\"2\",padx = 20, variable=var18, value=2).place(x=410,y=470)\nRadiobutton(root, text=\"3\",padx = 5, variable=var18, value=3).place(x=560,y=470)\nRadiobutton(root, text=\"4\",padx = 20, variable=var18, value=4).place(x=710,y=470)\nRadiobutton(root, text=\"5(Very High)\",padx = 20, variable=var18, value=5).place(x=860,y=470)\n\n\nlabel_19 = Label(root, text=\"Going out with friends\",width=24,font=(\"bold\", 10))\nlabel_19.place(x=0,y=500)\nglobal var19\nvar19 = IntVar()\nRadiobutton(root, text=\"1(Very low)\",padx = 5, variable=var19, value=1).place(x=250,y=500)\nRadiobutton(root, text=\"2\",padx = 20, variable=var19, value=2).place(x=410,y=500)\nRadiobutton(root, text=\"3\",padx = 5, variable=var19, value=3).place(x=560,y=500)\nRadiobutton(root, text=\"4\",padx = 20, variable=var19, value=4).place(x=710,y=500)\nRadiobutton(root, text=\"5(Very high)\",padx = 20, variable=var19, value=5).place(x=860,y=500)\n\n\nlabel_20 = Label(root, text=\"Workday alcohol consumption\",width=24,font=(\"bold\", 10))\nlabel_20.place(x=0,y=530)\nglobal var20\nvar20 = IntVar()\nRadiobutton(root, text=\"1(Very low)\",padx = 5, variable=var20, value=1).place(x=250,y=530)\nRadiobutton(root, text=\"2\",padx = 20, variable=var20, value=2).place(x=410,y=530)\nRadiobutton(root, text=\"3\",padx = 5, variable=var20, value=3).place(x=560,y=530)\nRadiobutton(root, text=\"4\",padx = 20, variable=var20, value=4).place(x=710,y=530)\nRadiobutton(root, text=\"5(Very High)\",padx = 20, variable=var20, value=5).place(x=860,y=530)\n\n\nlabel_21 = Label(root, text=\"Weekend alcohol consumption\",width=24,font=(\"bold\", 10))\nlabel_21.place(x=0,y=560)\nglobal var21\nvar21 = IntVar()\nRadiobutton(root, text=\"1(Very low)\",padx = 5, variable=var21, value=1).place(x=250,y=560)\nRadiobutton(root, text=\"2\",padx = 20, variable=var21, value=2).place(x=410,y=560)\nRadiobutton(root, text=\"3\",padx = 5, variable=var21, value=3).place(x=560,y=560)\nRadiobutton(root, text=\"4\",padx = 20, variable=var21, value=4).place(x=710,y=560)\nRadiobutton(root, text=\"5(Very high)\",padx = 20, variable=var21, value=5).place(x=860,y=560)\n\n\nlabel_22 = Label(root, text=\"Current health status\",width=24,font=(\"bold\", 10))\nlabel_22.place(x=0,y=590)\nglobal var22\nvar22 = IntVar()\nRadiobutton(root, text=\"1(Very Bad)\",padx = 5, variable=var22, value=1).place(x=250,y=590)\nRadiobutton(root, text=\"2\",padx = 20, variable=var22, value=2).place(x=410,y=590)\nRadiobutton(root, text=\"3\",padx = 5, variable=var22, value=3).place(x=560,y=590)\nRadiobutton(root, text=\"4\",padx = 20, variable=var22, value=4).place(x=710,y=590)\nRadiobutton(root, text=\"5(Very Good)\",padx = 20, variable=var22, value=5).place(x=860,y=590)\n\n\nlabel_23 = Label(root, text=\"Absences (Range: 0 to 93) \",width=24,font=(\"bold\", 10))\nlabel_23.place(x=0,y=610)\nentry_23 = Entry(root)\nentry_23.place(x=375,y=610)\n\n\n\ndef client_exit():\n root.destroy()\n\ndef show_result():\n \n i1 = int(entry_1.get())\n i2 = int(var2.get())\n i3 = int(var3.get())\n i4 = int(var4.get())\n i5 = int(var5.get())\n i6 = int(var6.get())\n i7 = int(var7.get())\n i8 = int(var8.get())\n i9 = int(var9.get())\n i10 = int(var10.get())\n i11 = int(var11.get())\n i12 = int(var12.get())\n i13 = int(var13.get())\n i14 = int(var14.get())\n i15 = int(var15.get())\n i16 = int(var16.get())\n i17 = int(var17.get())\n i18 = int(var18.get())\n i19 = int(var19.get())\n i20 = int(var20.get())\n i21 = int(var21.get())\n i22 = int(var22.get())\n i23 = int(entry_23.get())\n print(i2,i1,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23)\n np.array([[i2,i1,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23]])\n sc = StandardScaler()\n classifier = keras.models.load_model('/home/niharika/Desktop/ML_Project/ANN_student(2).model')\n\n new_prediction = classifier.predict(sc.fit_transform(np.array([[i2,i1,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23]])))\n a = float(new_prediction)\n print(a)\n if a > 0.5:\n pred = \"You will PASS the Examination\"\n else:\n pred = \"You will FAIL the Examination\"\n\n print(pred)\n tk.messagebox.showinfo( \"Prediction\", pred )\n \nButton(root, text='Submit',width=20,bg='brown',fg='white', command = show_result).place(x=550,y=670)\nButton(root, text='EXIT',width=20,bg='brown',fg='white', command = client_exit).place(x=550,y=700)\n\n\n\nroot.mainloop()\n\n\n", "id": "160004", "language": "Python", "matching_score": 4.273364543914795, "max_stars_count": 0, "path": "Model/GUI.py" }, { "content": "import matplotlib.pyplot as plt\nimport numpy as np\n\ndef display(raw_code,title,time_interval=10,time_type=None):\n code=[]\n time=[]\n new_code=[]\n new_time=[]\n beg=1\n end=beg+time_interval\n \n for i in range(len(raw_code)):\n time.append(np.arange(beg,end))\n if i==0:\n code.append(np.array([raw_code[i]]*time_interval))\n else:\n code.append(np.array([raw_code[i]]*(time_interval+1)))\n\n beg=end-1\n end+=time_interval\n \n \n time=np.array(time)\n code=np.array(code)\n\n for i in range(len(raw_code)):\n for j,k in zip(code[i],time[i]):\n new_code.append(j)\n new_time.append(k)\n \n if time_type=='half':\n length=len(raw_code)//2\n else:\n length=len(raw_code)\n \n for i in range(length):\n plt.plot([(i+1)*10,(i+1)*10],[-1.5,1.5],linestyle=':',color='k')\n \n \n plt.plot([1,1],[-2,2],color = 'k')\n plt.plot([1,len(new_time)],[0,0],color ='k')\n plt.plot(new_time,new_code,color = 'r')\n plt.title(title)\n plt.show()\n \ndef unipolar(code_string):\n code=np.array(list(code_string),dtype=np.int)\n display(code,title='Unipolar')\n \ndef polar_NRZ_L(code_string):\n code=np.array(list(code_string),dtype=np.int)\n code[code==0]=-1\n display(code,title='Polar NRZ-L')\n \ndef polar_NRZ_I(code_string):\n code=np.array(list(code_string),dtype=np.int)\n \n for i in range(1,len(code)):\n if (code[0]=='0'or code[0]==0):\n code[0]=-1 \n \n if code[i]==0:\n code[i]=code[i-1]\n else:\n if code[i-1]==-1:\n code[i]=1\n else:\n code[i]=-1\n display(code,title='Polar NRZ-I')\n \ndef AMI(code_string):\n code=np.array(list(code_string),dtype=np.int)\n temp=[]\n flag=1\n for i in range(len(code)):\n if code[i]==1:\n temp.append(flag)\n #temp.append(0)\n flag=-1*flag\n else:\n temp.append(0)\n #temp.append(0)\n code=temp\n display(code,title='AMI')\n \n\ndef B8ZS(code_string):\n s1=code_string.replace(\"00000000\",\"000vb0vb\")\n #print(s1)\n code=np.array(list(s1))\n #print(code)\n temp=[]\n flag=1\n for i in range(len(code)):\n if code[i]=='1'or code[i]=='b' :\n temp.append(flag)\n m = flag\n flag=-1*flag\n \n elif code[i]=='v':\n temp.append(m)\n \n \n \n else:\n temp.append(0)\n \n code=temp\n display(code,title='B8ZS')\n \ndef HDB3(code_string):\n s=code_string.replace(\"0000\",\"xxxx\")\n code1=np.array(list(s))\n \n m=0\n for i in range(len(code1)):\n if code1[i]=='1':\n m=m+1\n \n elif code1[i]=='x':\n if m%2 == 0:\n code1[i]='b'\n code1[i+1]='0'\n code1[i+2]='0'\n code1[i+3]='v'\n m=m+2\n else :\n code1[i]='0'\n code1[i+1]='0'\n code1[i+2]='0'\n code1[i+3]='v'\n m=m+1\n else:\n continue\n #print(code1)\n code = code1\n #s1=s \n \n #breaks after counting the no of ones before 1st set \n \n #code=np.array(list(s1))\n #print(code)\n temp=[]\n flag=1\n for i in range(len(code)):\n if code[i]=='1' or code[i]=='b' :\n temp.append(flag)\n m = flag\n flag=-1*flag\n \n elif code[i]=='v':\n temp.append(m)\n \n \n \n else:\n temp.append(0)\n \n code=temp\n \n display(code,title='HDB3') \n\n\ndef manchester(code_string):\n code=np.array(list(code_string),dtype=np.int)\n temp=[]\n \n for i in range(len(code)):\n if code[i]==1:\n temp.append(-1)\n temp.append(1)\n else:\n temp.append(1)\n temp.append(-1)\n code=temp\n display(code,title='Manchester',time_interval=5,time_type='half')\n\ndef differential_manchester(code_string):\n code=np.array(list(code_string),dtype=np.int)\n temp=[]\n flag1=1\n flag2=-1\n for i in range(len(code)):\n \n if i==0 and code[0]==0:\n flag1=1\n flag2=-1\n temp.append(flag1)\n temp.append(flag2)\n i=i+1\n \n elif i==0 and code[0]==1:\n flag1=-1\n flag2=1\n temp.append(flag1)\n temp.append(flag2)\n i=i+1\n \n elif code[i]==1:\n flag1 = -1*flag1\n flag2 = -1*flag2\n temp.append(flag1)\n temp.append(flag2)\n \n elif code[i]==0:\n temp.append(flag1)\n temp.append(flag2)\n \n \n \n \n code=temp\n display(code,title='Differential Manchester',time_interval=5,time_type='half')\n\n \nfrom tkinter import *\nimport tkinter as tk\nimport tkinter.messagebox\n\nimport keras\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\n\nroot = Tk()\nroot.geometry('1500x1500')\nroot.title(\"Prediction Form\")\n\nlabel = Label(root, text=\"Line Coding\",width=20,font=(\"bold\", 30))\nlabel.place(x=375,y=20)\n\n\nlabel_1 = Label(root, text=\"DATA BITS: \",width=20,font=(\"bold\", 12))\nlabel_1.place(x=350,y=100)\nentry_1 = Entry(root)\nentry_1.place(x=550,y=100)\n\n\n\nlabel_5 = Label(root, text=\"Type of Encoding\",width=20,font=(\"bold\", 12))\nlabel_5.place(x=350,y=170)\nglobal var5\nvar5 = IntVar()\nRadiobutton(root, text=\"Unipolar NRZ\",padx = 5, variable=var5, value=0).place(x=650,y=170)\nRadiobutton(root, text=\"NRZ-I\",padx = 5, variable=var5, value=1).place(x=650,y=200)\nRadiobutton(root, text=\"NRZ-L\",padx = 20, variable=var5, value=2).place(x=635,y=230)\nRadiobutton(root, text=\"Manchester\",padx = 5, variable=var5, value=3).place(x=650,y=260)\nRadiobutton(root, text=\"Differential Manchester\",padx = 20, variable=var5, value=4).place(x=635,y=290)\nRadiobutton(root, text=\"AMI\",padx = 20, variable=var5, value=5).place(x=635,y=320)\n\n\n\nlabel_9 = Label(root, text=\"Scrambling Schemes(For AMI):\",width=24,font=(\"bold\", 12))\nlabel_9.place(x=350,y=370)\nglobal var9\nvar9 = IntVar()\nRadiobutton(root, text=\"None\",padx = 5, variable=var9, value=0).place(x=650,y=370)\nRadiobutton(root, text=\"B8ZS\",padx = 20, variable=var9, value=1).place(x=635,y=400)\nRadiobutton(root, text=\"HDB3\",padx = 5, variable=var9, value=2).place(x=650,y=430)\n\ndef client_exit():\n root.destroy()\n\ndef show_result():\n \n i1 = str(entry_1.get())\n i5 = int(var5.get())\n i9 = int(var9.get())\n print(i1)\n if i5 == 1:\n polar_NRZ_I(i1)\n elif i5 == 2:\n polar_NRZ_L(i1)\n elif i5 == 3:\n manchester(i1)\n elif i5 == 4:\n differential_manchester(i1)\n elif i5 == 5:\n if i9 == 0:\n AMI(i1)\n elif i9 == 1:\n B8ZS(i1)\n elif i9 == 2:\n HDB3(i1)\n elif i5 == 0:\n unipolar(i1)\n root.destroy()\n \n \n \n #tk.messagebox.showinfo( \"Prediction\", pred )\n #tk.graph.showgraph( polar_NRZ_I(i1))\n \nButton(root, text='Submit',width=20,bg='brown',fg='white', command = show_result).place(x=550,y=550)\n#Button(root, text='EXIT',width=20,bg='brown',fg='white', command = client_exit).place(x=550,y=600)\n\n\n\nroot.mainloop()\n", "id": "12342389", "language": "Python", "matching_score": 0.8150379061698914, "max_stars_count": 0, "path": "code/Encoding-Scrambling.py" }, { "content": "###############################################################################################################\n#### Data Refining and buildin an ANN (Artificial Neural Network) Model\n###############################################################################################################\nimport pandas as pd\nimport numpy as np\n\ndf = pd.read_csv(\"/home/niharika/Desktop/ML_Project/student/data.csv\")\n\ndf.drop(['G1', 'G2'], axis=1, inplace=True)\ndf = df.drop(['school', 'famsize', 'reason',\n 'guardian', 'famsup', 'activities', 'nursery',\n ], axis=1)\n\n\n\n#0 stands for F and 1 stands for M\ndf['sex'] = df['sex'].apply(lambda x: 0 if x == 'F' else 1)\n# 0 stands for U and 1 stands for R. [U=Urban, R=Rural]\ndf['address'] = df['address'].apply(lambda x: 0 if x == 'U' else 1)\n# LE3 = Less than 3. [0], GE3 = Greater than 3.[1]\ndf['Pstatus'] = df['Pstatus'].apply(lambda x: 0 if x == 'T' else 1)\n# 0 = no and 1 = yes\ndf['paid'] = df['paid'].apply(lambda x: 0 if x == 'no' else 1)\n# 0 = no and 1 = yes\ndf['higher'] = df['higher'].apply(lambda x: 0 if x == 'no' else 1)\ndf['internet'] = df['internet'].apply(lambda x: 0 if x == 'no' else 1)\ndf['romantic'] = df['romantic'].apply(lambda x: 0 if x == 'no' else 1)\ndf['Mjob'] = df['Mjob'].apply(lambda x: 0 if x == 'at_home' else (1 if x=='health' else (2 if x=='other' else (3 if x=='services' else 4) )))\ndf['Fjob'] = df['Fjob'].apply(lambda x: 0 if x == 'at_home' else (1 if x=='health' else (2 if x=='other' else (3 if x=='services' else 4) )))\ndf['schoolsup'] = df['schoolsup'].apply(lambda x: 0 if x == 'no' else 1)\n\ndf['grade_status'] = df['G3'].apply(lambda x: 'Fail' if x < 12 else 'Pass')\ndf['grade_status'] = df['grade_status'].apply(lambda x: 0 if x == 'Fail' else 1)\n\ndf_concat = df\n\n'''\ndf_Mjob = pd.get_dummies(df['Mjob']).iloc[:, 1:]\ndf_Fjob = pd.get_dummies(df['Fjob']).iloc[:, 1:]\n\n\ndf_concat = pd.concat([df_Mjob,df_Fjob,df], axis=1)\n'''\nX = df_concat.iloc[:, :-2].values\ny = df_concat.iloc[:, -1].values\n\n\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2, random_state =0)\n\n\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.fit_transform(X_test)\n\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n# Initialising the ANN\nclassifier = Sequential()\n\n# Adding the input layer and the first hidden layer\nclassifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 23))\n\n# Adding the second hidden layer\nclassifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))\n\n# Adding the output layer\nclassifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))\n\n# Compiling the ANN\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Fitting the ANN to the Training set\nclassifier.fit(X_train, y_train, batch_size = 10, epochs = 100)\n\n# Part 3 - Making predictions and evaluating the model\n\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\ny_pred = (y_pred > 0.5)\n\n\n# Predicting a single new observation\n\"\"\"Predict if the customer with the following informations will leave the bank:\nGeography: France\nCredit Score: 600\nGender: Male\nAge: 40\nTenure: 3\nBalance: 60000\nNumber of Products: 2\nHas Credit Card: Yes\nIs Active Member: Yes\nEstimated Salary: 50000\"\"\"\nclassifier.save('ANN_student(2).model')\n'''\nnew_prediction = classifier.predict(sc.transform(np.array([[0,15,0,0,1,1,0,3,1,2,0,1,0,1,1,0,4,3,2,2,3,3,6]])))\nif (new_prediction > 0.5) :\n print(\"Fail\")\nelse:\n print(\"Pass\")\n'''\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\n\n", "id": "5123753", "language": "Python", "matching_score": 4.020877361297607, "max_stars_count": 0, "path": "Model/training_model.py" }, { "content": "from keras.models import Sequential\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\n\n# Initialising the CNN\nclassifier = Sequential()\n\n# Step 1 - Convolution\nclassifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))\n\n# Step 2 - Pooling\nclassifier.add(MaxPooling2D(pool_size = (2, 2)))\n\n# Adding a second convolutional layer\nclassifier.add(Conv2D(32, (3, 3), activation = 'relu'))\nclassifier.add(MaxPooling2D(pool_size = (2, 2)))\n\n# Step 3 - Flattening\nclassifier.add(Flatten())\n\n# Step 4 - Full connection\nclassifier.add(Dense(units = 128, activation = 'relu'))\nclassifier.add(Dense(units = 1, activation = 'sigmoid'))\n\n# Compiling the CNN\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Part 2 - Fitting the CNN to the images\n\nfrom keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n\ntest_datagen = ImageDataGenerator(rescale = 1./255)\n\ntraining_set = train_datagen.flow_from_directory('/home/niharika/Desktop/ML_Project/CNN/dataset/training_set',\n target_size = (64, 64),\n batch_size = 32,\n class_mode = 'binary')\n\ntest_set = test_datagen.flow_from_directory('/home/niharika/Desktop/ML_Project/CNN/dataset/test_set',\n target_size = (64, 64),\n batch_size = 32,\n class_mode = 'binary')\n\n\nclassifier.fit_generator(training_set,\n steps_per_epoch = 800,\n epochs = 15,\n validation_data = test_set,\n validation_steps = 200)\n\n\nclassifier.save('CNN_trained_model(1).model')\n\n\n\n", "id": "1944807", "language": "Python", "matching_score": 0.8786776661872864, "max_stars_count": 0, "path": "Model/training_model.py" }, { "content": "import tkinter as tk\nfrom tkinter import filedialog\nimport keras\nimport numpy as np\nfrom keras.preprocessing import image\nmodel = keras.models.load_model('/home/niharika/Desktop/ML_Project/CNN/CNN_trained_model(2).model')\n\n\nfrom tkinter import *\nimport tkinter as tk\nfrom tkinter import filedialog\nfrom PIL import Image, ImageTk\nclass Window(Frame):\n def __init__(self,master = None):\n Frame.__init__(self,master)\n self.master = master\n self.init_window()\n def init_window(self):\n self.master.title(\"CNN Model\")\n self.pack(fill=BOTH, expand=1)\n \n button = tk.Button(root, text='Upload Image', width=25, command=self.upload_img)\n button.pack()\n \n button = tk.Button(root, text='Exit', width=25, command=self.client_exit)\n button.pack()\n \n def upload_img(self):\n file_path = filedialog.askopenfilename()\n test_image = image.load_img(file_path, target_size = (64, 64))\n test_image = image.img_to_array(test_image)\n test_image = np.expand_dims(test_image, axis = 0)\n result = model.predict(test_image)\n \n if result[0][0] == 1:\n prediction = 'dog'\n else:\n prediction = 'cat'\n \n \n self.showImg(file_path,prediction)\n \n \n def showImg(self,file_path,prediction):\n \n load = Image.open(file_path)\n load = load.resize((250, 250), Image.ANTIALIAS)\n render = ImageTk.PhotoImage(load)\n img = Label(self, image = render)\n img.image = render\n img.place(x = 75, y=0)\n text = Label(self,text =\"PREDICTION: \" + prediction)\n text.pack()\n \n \n \n def client_exit(self):\n root.destroy()\n \nroot = Tk()\nroot.geometry(\"400x300\")\napp = Window(root)\nroot.mainloop()\n \n\n\n", "id": "1188318", "language": "Python", "matching_score": 5.579843521118164, "max_stars_count": 0, "path": "Model/gui.py" }, { "content": "from tkinter import *\nimport tkinter as tk\nfrom tkinter import filedialog\nfrom PIL import Image, ImageTk\nimport keras\nimport numpy as np\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nimport PIL.Image, PIL.ImageTk\nimport time\nimport cv2\nimport os\n\n\ndef run(runfile):\n with open(runfile,\"r\") as rnf:\n exec(rnf.read())\n\nmodel = keras.models.load_model('CNN_face-mask.model')\n#Provide path to your model\n\n\n\nclass Window(Frame):\n def __init__(self,master = None):\n Frame.__init__(self,master)\n self.master = master\n self.init_window()\n def init_window(self):\n self.master.title(\"CNN Model\")\n self.pack(fill=BOTH, expand=1)\n \n button = tk.Button(root, text='Upload Photo', width=25, command=self.upload_img)\n button.pack()\n \n button = tk.Button(root, text='Webcam', width=25, command=self.webcam)\n button.pack()\n \n button = tk.Button(root, text='Exit', width=25, command=self.client_exit)\n button.pack()\n \n \n \n \n \n def upload_img(self):\n \n file_path = filedialog.askopenfilename()\n test_image = load_img(file_path,target_size = (150, 150))\n test_image = img_to_array(test_image)\n test_image = np.expand_dims(test_image, axis = 0)\n result = model.predict(test_image)\n \n if result[0][0] == 1:\n prediction = 'Without Mask'\n else:\n prediction = 'With Mask'\n \n \n self.showImg(file_path,prediction)\n \n \n\n \n def webcam(self):\n run('Webcam.py')\n \n \n \n \n \n def showImg(self,file_path,prediction):\n \n load = Image.open(file_path)\n load = load.resize((250, 250), Image.ANTIALIAS)\n render = ImageTk.PhotoImage(load)\n img = Label(self, image = render)\n img.image = render\n img.place(x = 75, y=0)\n text = Label(self,text =\"PREDICTION: \" + prediction)\n text.pack()\n \n \n \n def client_exit(self):\n root.destroy()\n\n\n\n\n\nroot = Tk()\nroot.geometry(\"400x300\")\napp = Window(root)\nroot.mainloop()\n \n\n\n", "id": "2057729", "language": "Python", "matching_score": 2.5461385250091553, "max_stars_count": 0, "path": "GUI_Code/GUI.py" }, { "content": "import keras\nfrom keras.preprocessing.image import img_to_array\nimport imutils\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\nimport keras\nimport numpy as np\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nimport tkinter\n\n\nwindow = tkinter.Tk()\n\nmodel = keras.models.load_model('CNN_face-mask.model')\n#Provide path to your model\n\nface_clsfr=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nlabels_dict={0:'without_mask',1:'with_mask'}\ncolor_dict={0:(0,0,255),1:(0,255,0)}\n#size = 4\n\nwebcam = cv2.VideoCapture(0) #Use camera 0\n\n # We load the xml file\nclassifier = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\n\n\nwhile True:\n (rval, im) = webcam.read()\n im=cv2.flip(im,1,1) #Flip to act as a mirror\n\n # Resize the image to speed up detection\n mini = cv2.resize(im, (im.shape[1] // 4, im.shape[0] // 4))\n\n # detect MultiScale / faces \n faces = classifier.detectMultiScale(mini)\n\n # Draw rectangles around each face\n for f in faces:\n (x, y, w, h) = [v * 4 for v in f] #Scale the shapesize backup\n #Save just the rectangle faces in SubRecFaces\n face_img = im[y:y+h, x:x+w]\n resized=cv2.resize(face_img,(150,150))\n test_image = img_to_array(resized)\n test_image = np.expand_dims(test_image, axis = 0)\n result = model.predict(test_image)\n \n \n #print(result, result[0][0])\n if result[0][0] == 1:\n label = 1\n else:\n label = 0\n \n cv2.rectangle(im,(x,y),(x+w,y+h),color_dict[label],2)\n cv2.rectangle(im,(x,y-40),(x+w,y),color_dict[label],-1)\n cv2.putText(im, labels_dict[label], (x, y-10),cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,255,255),2)\n \n # Show the image\n cv2.imshow('Webcam', im)\n key = cv2.waitKey(10)\n # if Esc key is press then break out of the loop \n if key == 27:\n window.destroy()\n #The Esc key\n break\n# Stop video\nwebcam.release()\n\n# Close all started windows\ncv2.destroyAllWindows()\n\nwindow.mainloop() \n", "id": "11366481", "language": "Python", "matching_score": 2.0804314613342285, "max_stars_count": 0, "path": "GUI_Code/Webcam.py" } ]
2.546139
tigicion
[ { "content": "# 找出所有相加之和为 n 的 k 个数的组合。组合中只允许含有 1 - 9 的正整数,并且每种组合中不存在重复的数字。\n\n# 说明:\n\n# 所有数字都是正整数。\n# 解集不能包含重复的组合。\nfrom typing import List\nclass Solution:\n def combinationSum3(self, k: int, n: int) -> List[List[int]]:\n res=[]\n def bt(l, cur, s):\n if s>=n:\n if s==n and len(l)==k:\n res.append(list(l))\n else:\n return\n for i in range(cur, 10):\n if s+i>n:\n return\n l.append(i)\n bt(l, i+1, s+i)\n l.pop()\n\n bt([], 1, 0)\n return res\n\nif __name__==\"__main__\":\n s=Solution()\n print(s.combinationSum3(3, 9))\n \n", "id": "12485344", "language": "Python", "matching_score": 1, "max_stars_count": 1, "path": "backtracking/lc216.py" }, { "content": "# 城市的天际线是从远处观看该城市中所有建筑物形成的轮廓的外部轮廓。给你所有建筑物的位置和高度,请返回由这些建筑物形成的 天际线 。\n\n# 每个建筑物的几何信息由数组 buildings 表示,其中三元组 buildings[i] = [lefti, righti, heighti] 表示:\n\n# lefti 是第 i 座建筑物左边缘的 x 坐标。\n# righti 是第 i 座建筑物右边缘的 x 坐标。\n# heighti 是第 i 座建筑物的高度。\n# 天际线 应该表示为由 “关键点” 组成的列表,格式 [[x1,y1],[x2,y2],...] ,并按 x 坐标 进行 排序 。关键点是水平线段的左端点。列表中最后一个点是最右侧建筑物的终点,y 坐标始终为 0 ,仅用于标记天际线的终点。此外,任何两个相邻建筑物之间的地面都应被视为天际线轮廓的一部分。\n\n# 注意:输出天际线中不得有连续的相同高度的水平线。例如 [...[2 3], [4 5], [7 5], [11 5], [12 7]...] 是不正确的答案;三条高度为 5 的线应该在最终输出中合并为一个:[...[2 3], [4 5], [12 7], ...]\n# 输入:buildings = [[2,9,10],[3,7,15],[5,12,12],[15,20,10],[19,24,8]]\n# 输出:[[2,10],[3,15],[7,12],[12,0],[15,10],[20,8],[24,0]]\nfrom bisect import bisect_left as bi\nfrom typing import List\nclass Solution:\n def getSkyline(self, buildings: List[List[int]]) -> List[List[int]]:\n t = []\n for i in buildings:\n t.append([i[0], i[2], 1])\n t.append([i[1], i[2], -1])\n t.sort(key=lambda a: a[0])\n s=[]\n res=[]\n def minus(a, b):\n t=bi(a, b)\n a.pop(t)\n def add(a, b):\n t=bi(a,b)\n a.insert(t, b)\n for k, i in enumerate(t):\n if i[2]==1:\n add(s, i[1])\n else:\n minus(s, i[1])\n if (k==len(t)-1 or t[k][0]!=t[k+1][0]) and (len(res)==0 or len(s)==0 or s[-1]!=res[-1][1]):\n if len(s)==0:\n res.append([i[0], 0])\n else:\n res.append([i[0], s[-1]])\n print(s, res)\n return res\n\nif __name__ == \"__main__\":\n a= [[2,9,10],[3,7,15],[5,12,12],[15,20,10],[19,24,8]]\n print(Solution().getSkyline(a))", "id": "7456122", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "skyline/lc218.py" }, { "content": "# 给你一个整数数组 nums 和两个整数 k 和 t 。请你判断是否存在 两个不同下标 i 和 j,使得 abs(nums[i] - nums[j]) <= t ,同时又满足 abs(i - j) <= k 。\n\n# 如果存在则返回 true,不存在返回 false。\n\nfrom typing import List\n\nclass Solution:\n def containsNearbyAlmostDuplicate(self, nums: List[int], k: int, t: int) -> bool:\n def getk(v):\n return v//(t+1)\n a={}\n for i, j in enumerate(nums):\n b = getk(j)\n # print(i, j, b, a)\n if b in a:\n return True\n elif b-1 in a and abs(j - a[b-1])<=t:\n return True\n elif b+1 in a and abs(j - a[b+1])<=t:\n return True\n else:\n a[b]=j\n if i-k >=0:\n del a[getk(nums[i-k])]\n return False\n\nif __name__ ==\"__main__\":\n print(Solution().containsNearbyAlmostDuplicate([1,5,9,1,5,9],2,3))", "id": "5853595", "language": "Python", "matching_score": 2.4047915935516357, "max_stars_count": 1, "path": "slide_window/lc220.py" }, { "content": "# 给定一个整数数组和一个整数 k,判断数组中是否存在两个不同的索引 i 和 j,使得 nums [i] = nums [j],并且 i 和 j 的差的 绝对值 至多为 k。\n\n\nfrom typing import List\nclass Solution:\n def containsNearbyDuplicate(self, nums: List[int], k: int) -> bool:\n a={}\n for j, i in enumerate(nums):\n if i in a.keys():\n return True\n else:\n a[i]=1\n if j-k>=0:\n del a[nums[j-k]] \n return False\n \nif __name__==\"__main__\":\n print(Solution().containsNearbyDuplicate([1,2,3,1,2,3],3))", "id": "6892048", "language": "Python", "matching_score": 1, "max_stars_count": 1, "path": "slide_window/lc219.py" }, { "content": "# 你是一个专业的小偷,计划偷窃沿街的房屋,每间房内都藏有一定的现金。这个地方所有的房屋都 围成一圈 ,\n# 这意味着第一个房屋和最后一个房屋是紧挨着的。同时,相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,\n# 系统会自动报警 。\n\n# 给定一个代表每个房屋存放金额的非负整数数组,计算你 在不触动警报装置的情况下 ,今晚能够偷窃到的最高金额。\nfrom typing import List\nclass Solution:\n def rob(self, nums: List[int]) -> int:\n def rob_range(nums, s, e):\n l=e-s+1\n dp=[0 for _ in range(l)]\n dp[0]=nums[s]\n dp[1]=max(nums[s],nums[s+1])\n for i in range(2, l):\n dp[i]=max(dp[i-1], dp[i-2]+nums[s+i])\n return dp[-1]\n \n l=len(nums)\n if l<=3:\n return max(nums)\n return max(rob_range(nums, 0, l-2), rob_range(nums, 1, l-1))\n \nif __name__==\"__main__\":\n s=Solution()\n print(s.rob([1,2,3,1]))\n", "id": "7920343", "language": "Python", "matching_score": 1, "max_stars_count": 1, "path": "dynamic_programming/lc213.py" }, { "content": "# 给定整数数组 nums 和整数 k,请返回数组中第 k 个最大的元素。\n\n# 请注意,你需要找的是数组排序后的第 k 个最大的元素,而不是第 k 个不同的元素。\nfrom typing import List\nclass Solution:\n def findKthLargest(self, nums: List[int], k: int) -> int:\n _len=len(nums)\n k=_len-k\n if _len<=k:\n return min(nums)\n def part(l, r):\n if l>=r:\n return l\n t=nums[l]\n s=l\n l=l+1\n while l<=r:\n if nums[l]<=t:\n l+=1\n elif nums[r]>=t:\n r-=1\n else:\n nums[l], nums[r] = nums[r], nums[l]\n nums[s], nums[l-1] = nums[l-1], nums[s]\n return l-1\n\n def qfind(l, r):\n m=part(l,r)\n if m==k:\n return m\n elif m>k:\n return qfind(l, m-1)\n else:\n return qfind(m+1, r)\n\n res=qfind(0, _len-1)\n return nums[res]\n\nif __name__==\"__main__\":\n s=Solution()\n print(s.findKthLargest([3,2,1,5,6,4],2))\n ", "id": "11768666", "language": "Python", "matching_score": 1, "max_stars_count": 1, "path": "sort/lc215.py" } ]
1
andiyan1
[ { "content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Test Products and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\n\nclass EstimateTool(Document):\n\tdef get_bom_item_button(self):\n\t\tdl = frappe.db.sql(\"\"\"select b1.item_code, b1.item_name, b1.qty, b1.stock_uom, b1.rate, b1.amount\n\t\t\tfrom\n\t\t\t\t`tabBOM Item` b1\n\t\t\twhere\n\t\t\t\tb1.parent = %s\n\t\t\torder by b1.idx ASC\"\"\", self.bom, as_dict=1)\n\n\t\tself.set('estimate_tool_item', [])\n\n\t\tfor d in dl:\n\t\t\tnl = self.append('estimate_tool_item', {})\n\t\t\tnl.item_code = d.item_code\n\t\t\tnl.item_name = d.item_name\n\t\t\tnl.item_quantity = d.qty\n\t\t\tnl.item_uom = d.stock_uom\n\t\t\tnl.price_list_rate = d.rate\n\t\t\tnl.amounted_total = d.amount\n\t\t\tnl.invoiced_amount = d.amount\n\t\t\tnl.factor_1 = \"1\"\n\t\t\tnl.factor_2 = \"1\"\n\t\t\tnl.factor_3 = \"1\"\n\t\t\tnl.factor_4 = \"1\"\n\t\t\tnl.factor_5 = \"1\"\n\n\tpass\n", "id": "1012735", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "estimate/estimate/doctype/estimate_tool/estimate_tool.py" } ]
0
andro2157
[ { "content": "# License: BSD 3 clause\n\nimport unittest\n\nimport numpy as np\n\nfrom tick.hawkes.inference import HawkesSumGaussians\n\n\nclass Test(unittest.TestCase):\n def setUp(self):\n self.int_1 = 4\n self.int_2 = 6\n self.float_1 = 0.3\n self.float_2 = 0.2\n\n def test_hawkes_sumgaussians_solution(self):\n \"\"\"...Test solution obtained by HawkesSumGaussians on toy timestamps\n \"\"\"\n events = [[\n np.array([1, 1.2, 3.4, 5.8, 10.3, 11, 13.4]),\n np.array([2, 5, 8.3, 9.10, 15, 18, 20, 33])\n ], [\n np.array([2, 3.2, 11.4, 12.8, 45]),\n np.array([2, 3, 8.8, 9, 15.3, 19])\n ]]\n\n n_nodes = len(events[0])\n n_gaussians = 3\n max_mean_gaussian = 5\n step_size = 1e-3\n C = 10\n lasso_grouplasso_ratio = 0.7\n\n baseline_start = np.zeros(n_nodes) + .2\n amplitudes_start = np.zeros((n_nodes, n_nodes, n_gaussians)) + .2\n\n learner = HawkesSumGaussians(\n n_gaussians=n_gaussians, max_mean_gaussian=max_mean_gaussian,\n step_size=step_size, C=C,\n lasso_grouplasso_ratio=lasso_grouplasso_ratio, n_threads=3,\n max_iter=10, verbose=False, em_max_iter=3)\n learner.fit(events[0], baseline_start=baseline_start,\n amplitudes_start=amplitudes_start)\n\n baseline = np.array([0.0979586, 0.15552228])\n\n amplitudes = np.array([[[0.20708954, -0.00627318, 0.08388442],\n [-0.00341803, 0.34805652, -0.00687372]],\n [[-0.00341635, 0.1608013, 0.05531324],\n [-0.00342652, -0.00685425, 0.19046195]]])\n\n np.testing.assert_array_almost_equal(learner.baseline, baseline,\n decimal=6)\n np.testing.assert_array_almost_equal(learner.amplitudes, amplitudes,\n decimal=6)\n\n kernel_values = np.array([\n -0.00068796, 0.01661161, 0.08872543, 0.21473618, 0.25597692,\n 0.15068586, 0.04194497, 0.00169372, -0.00427233, -0.00233042\n ])\n kernels_norm = np.array([[0.28470077, 0.33776477],\n [0.21269818, 0.18018118]])\n\n np.testing.assert_almost_equal(\n learner.get_kernel_values(0, 1, np.linspace(0, 4, 10)),\n kernel_values)\n np.testing.assert_almost_equal(learner.get_kernel_norms(),\n kernels_norm)\n\n means_gaussians = np.array([0., 1.66666667, 3.33333333])\n std_gaussian = 0.5305164769729844\n np.testing.assert_array_almost_equal(learner.means_gaussians,\n means_gaussians)\n self.assertEqual(learner.std_gaussian, std_gaussian)\n\n learner.n_gaussians = learner.n_gaussians + 1\n means_gaussians = np.array([0., 1.25, 2.5, 3.75])\n std_gaussian = 0.3978873577297384\n np.testing.assert_array_almost_equal(learner.means_gaussians,\n means_gaussians)\n self.assertEqual(learner.std_gaussian, std_gaussian)\n\n def test_hawkes_sumgaussians_set_data(self):\n \"\"\"...Test set_data method of Hawkes SumGaussians\n \"\"\"\n events = [[\n np.array([1, 1.2, 3.4, 5.8, 10.3, 11, 13.4]),\n np.array([2, 5, 8.3, 9.10, 15, 18, 20, 33])\n ], [\n np.array([2, 3.2, 11.4, 12.8, 45]),\n np.array([2, 3, 8.8, 9, 15.3, 19])\n ]]\n\n learner = HawkesSumGaussians(1)\n learner._set_data(events)\n self.assertEqual(learner.n_nodes, 2)\n\n events = [\n np.array([1, 1.2, 3.4, 5.8, 10.3, 11, 13.4]),\n np.array([2, 5, 8.3, 9.10, 15, 18, 20, 33])\n ]\n\n learner = HawkesSumGaussians(1)\n learner._set_data(events)\n self.assertEqual(learner.n_nodes, 2)\n\n msg = \"All realizations should have 2 nodes, but realization 1 has \" \\\n \"1 nodes\"\n with self.assertRaisesRegex(RuntimeError, msg):\n events = [[\n np.array([1, 1.2, 3.4, 5.8, 10.3, 11, 13.4]),\n np.array([2, 5, 8.3, 9.10, 15, 18, 20, 33])\n ], [np.array([2, 3.2, 11.4, 12.8, 45])]]\n learner._set_data(events)\n\n def test_hawkes_sumgaussians_parameters(self):\n \"\"\"...Test that hawkes sumgaussians parameters are correctly linked\n \"\"\"\n learner = HawkesSumGaussians(1, n_gaussians=self.int_1)\n self.assertEqual(learner.n_gaussians, self.int_1)\n self.assertEqual(learner._learner.get_n_gaussians(), self.int_1)\n learner.n_gaussians = self.int_2\n self.assertEqual(learner.n_gaussians, self.int_2)\n self.assertEqual(learner._learner.get_n_gaussians(), self.int_2)\n\n learner = HawkesSumGaussians(max_mean_gaussian=self.float_1)\n self.assertEqual(learner.max_mean_gaussian, self.float_1)\n self.assertEqual(learner._learner.get_max_mean_gaussian(),\n self.float_1)\n learner.max_mean_gaussian = self.float_2\n self.assertEqual(learner.max_mean_gaussian, self.float_2)\n self.assertEqual(learner._learner.get_max_mean_gaussian(),\n self.float_2)\n\n learner = HawkesSumGaussians(1, step_size=self.float_1)\n self.assertEqual(learner.step_size, self.float_1)\n self.assertEqual(learner._learner.get_step_size(), self.float_1)\n learner.step_size = self.float_2\n self.assertEqual(learner.step_size, self.float_2)\n self.assertEqual(learner._learner.get_step_size(), self.float_2)\n\n def test_hawkes_sumgaussians_lasso_grouplasso_ratio_parameter(self):\n \"\"\"...Test that hawkes sumgaussians lasso_grouplasso_ratio parameter is \n correctly linked\n \"\"\"\n # First learner initialization\n C = 5e-3\n learner = HawkesSumGaussians(1, lasso_grouplasso_ratio=self.float_1,\n C=C)\n strength_lasso = self.float_1 / learner.C\n strength_grouplasso = (1. - self.float_1) / learner.C\n self.assertEqual(learner.strength_lasso, strength_lasso)\n self.assertEqual(learner.strength_grouplasso, strength_grouplasso)\n self.assertEqual(learner._learner.get_strength_lasso(), strength_lasso)\n self.assertEqual(learner._learner.get_strength_grouplasso(),\n strength_grouplasso)\n self.assertEqual(learner.C, C)\n\n # change lasso_grouplasso_ratio\n learner.lasso_grouplasso_ratio = self.float_2\n strength_lasso = self.float_2 / learner.C\n strength_grouplasso = (1. - self.float_2) / learner.C\n self.assertEqual(learner.strength_lasso, strength_lasso)\n self.assertEqual(learner.strength_grouplasso, strength_grouplasso)\n self.assertEqual(learner._learner.get_strength_lasso(), strength_lasso)\n self.assertEqual(learner._learner.get_strength_grouplasso(),\n strength_grouplasso)\n self.assertEqual(learner.lasso_grouplasso_ratio, self.float_2)\n self.assertEqual(learner.C, C)\n\n def test_hawkes_sumgaussians_C_parameter(self):\n \"\"\"...Test that hawkes sumgaussians C parameter is correctly linked\n \"\"\"\n # First leaner initialization\n lasso_grouplasso_ratio = 0.3\n learner = HawkesSumGaussians(\n 1, C=self.float_1, lasso_grouplasso_ratio=lasso_grouplasso_ratio)\n strength_lasso = learner.lasso_grouplasso_ratio / self.float_1\n strength_grouplasso = (1. - learner.lasso_grouplasso_ratio) / \\\n self.float_1\n self.assertEqual(learner.strength_lasso, strength_lasso)\n self.assertEqual(learner.strength_grouplasso, strength_grouplasso)\n self.assertEqual(learner._learner.get_strength_lasso(), strength_lasso)\n self.assertEqual(learner._learner.get_strength_grouplasso(),\n strength_grouplasso)\n self.assertEqual(learner.lasso_grouplasso_ratio,\n lasso_grouplasso_ratio)\n\n # Change C\n learner.C = self.float_2\n strength_lasso = learner.lasso_grouplasso_ratio / self.float_2\n strength_grouplasso = (1. - learner.lasso_grouplasso_ratio) / \\\n self.float_2\n self.assertEqual(learner.strength_lasso, strength_lasso)\n self.assertEqual(learner.strength_grouplasso, strength_grouplasso)\n self.assertEqual(learner._learner.get_strength_lasso(), strength_lasso)\n self.assertEqual(learner._learner.get_strength_grouplasso(),\n strength_grouplasso)\n self.assertAlmostEqual(learner.C, self.float_2)\n self.assertEqual(learner.lasso_grouplasso_ratio,\n lasso_grouplasso_ratio)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "210252", "language": "Python", "matching_score": 3.0114855766296387, "max_stars_count": 0, "path": "tick/hawkes/inference/tests/hawkes_sumgaussians_test.py" }, { "content": "# License: BSD 3 clause\n\nimport unittest\n\nimport numpy as np\n\nfrom tick.hawkes.inference import HawkesEM\nfrom tick.hawkes.model.tests.model_hawkes_test_utils import (\n hawkes_intensities, hawkes_log_likelihood)\n\n\nclass Test(unittest.TestCase):\n def setUp(self):\n np.random.seed(123269)\n self.n_nodes = 3\n self.n_realizations = 2\n\n self.events = [[\n np.cumsum(np.random.rand(4 + i)) for i in range(self.n_nodes)\n ] for _ in range(self.n_realizations)]\n\n def test_hawkes_em_attributes(self):\n \"\"\"...Test attributes of HawkesEM are correctly inherited\n \"\"\"\n em = HawkesEM(kernel_support=10)\n em.fit(self.events)\n self.assertEqual(em.n_nodes, self.n_nodes)\n self.assertEqual(em.n_realizations, self.n_realizations)\n\n def test_hawkes_em_fit(self):\n \"\"\"...Test fit method of HawkesEM\n \"\"\"\n kernel_support = 3\n kernel_size = 3\n baseline = np.zeros(self.n_nodes) + .2\n kernel = np.zeros((self.n_nodes, self.n_nodes, kernel_size)) + .4\n\n em = HawkesEM(kernel_support=kernel_support, kernel_size=kernel_size,\n n_threads=2, max_iter=10, verbose=False)\n em.fit(self.events, baseline_start=baseline, kernel_start=kernel)\n\n np.testing.assert_array_almost_equal(\n em.baseline, [1.2264, 0.2164, 1.6782], decimal=4)\n\n expected_kernel = [[[2.4569e-02, 2.5128e-06,\n 0.0000e+00], [1.8072e-02, 5.4332e-11, 0.0000e+00],\n [2.7286e-03, 4.0941e-08, 3.5705e-15]],\n [[8.0077e-01, 2.2624e-02,\n 6.7577e-10], [2.7503e-02, 3.1840e-05, 0.0000e+00],\n [1.4984e-01, 7.8428e-06, 2.8206e-12]],\n [[1.2163e-01, 1.0997e-02,\n 5.4724e-05], [4.7348e-02, 6.6093e-03, 5.5433e-12],\n [1.0662e-03, 5.3920e-05, 1.4930e-08]]]\n\n np.testing.assert_array_almost_equal(em.kernel, expected_kernel,\n decimal=4)\n\n em2 = HawkesEM(\n kernel_discretization=np.array([0., 1., 2., 3.]), n_threads=1,\n max_iter=10, verbose=False)\n em2.fit(self.events, baseline_start=baseline, kernel_start=kernel)\n np.testing.assert_array_almost_equal(em2.kernel, expected_kernel,\n decimal=4)\n\n np.testing.assert_array_almost_equal(\n em.get_kernel_values(1, 0, np.linspace(0, 3, 5)),\n [0.0000e+00, 8.0077e-01, 2.2624e-02, 6.7577e-10, 0.0000e+00],\n decimal=4)\n\n np.testing.assert_array_almost_equal(\n em.get_kernel_norms(),\n [[0.0246, 0.0181, 0.0027], [0.8234, 0.0275, 0.1499],\n [0.1327, 0.054, 0.0011]], decimal=3)\n\n np.testing.assert_array_equal(\n em.get_kernel_supports(),\n np.ones((self.n_nodes, self.n_nodes)) * 3)\n\n def test_hawkes_em_score(self):\n \"\"\"...Test score (ie. likelihood) function of Hawkes EM\n \"\"\"\n\n def approximate_likelihood(em, events, end_times, precision=2):\n n_total_jumps = sum(map(len, events))\n kernels_func = [[\n lambda t, i=i, j=j: em.get_kernel_values(i, j, np.array([t]))[0]\n for j in range(n_nodes)\n ] for i in range(n_nodes)]\n intensities = hawkes_intensities(events, em.baseline, kernels_func)\n return hawkes_log_likelihood(intensities, events, end_times,\n precision=precision) / n_total_jumps\n\n # We use only 2 nodes otherwise integral approximation might be very\n # slow\n n_nodes = 2\n kernel_support = 1\n kernel_size = 3\n baseline = np.random.rand(n_nodes) + .2\n kernel = np.random.rand(n_nodes, n_nodes, kernel_size) + .4\n\n train_events = \\\n [np.cumsum(np.random.rand(2 + i)) for i in range(n_nodes)]\n\n test_events = \\\n [2 + np.cumsum(np.random.rand(2 + i)) for i in range(n_nodes)]\n\n # Test for 2 kind of discretization\n train_kwargs = [{\n 'kernel_support': 1,\n 'kernel_size': 3\n }, {\n 'kernel_discretization': np.array([0., 1., 1.5, 3.])\n }]\n\n # Test with and without fitting\n fits = [True, False]\n\n for kwargs, fit in zip(train_kwargs, fits):\n em = HawkesEM(**kwargs)\n end_times = max(map(max, train_events)) + 0.2 * kernel_support\n\n msg = '^You must either call `fit` before `score` or provide events'\n with self.assertRaisesRegex(ValueError, msg):\n em.score()\n\n if fit:\n em.fit(train_events, end_times=end_times,\n baseline_start=baseline, kernel_start=kernel)\n else:\n em.baseline = baseline\n em.kernel = kernel\n\n # Score on em train data\n if fit:\n em_train_score = em.score()\n else:\n em_train_score = em.score(train_events, end_times=end_times)\n self.assertAlmostEqual(\n em_train_score,\n approximate_likelihood(em, train_events, end_times, 2),\n delta=1e-1, msg='Failed on train for {}'.format(kwargs))\n\n # Score on test data\n em_test_score = em.score(events=test_events)\n test_end_times = max(map(max, test_events))\n self.assertAlmostEqual(\n em_test_score,\n approximate_likelihood(em, test_events, test_end_times, 4),\n delta=1e-3, msg='Failed on test for {}'.format(kwargs))\n\n def test_hawkes_em_kernel_support(self):\n \"\"\"...Test that Hawkes em kernel support parameter is correctly\n synchronized\n \"\"\"\n kernel_support_1 = 4.4\n learner = HawkesEM(kernel_support_1)\n self.assertEqual(learner.kernel_support, kernel_support_1)\n self.assertEqual(learner._learner.get_kernel_support(),\n kernel_support_1)\n expected_kernel_discretization = [\n 0.0, 0.44, 0.88, 1.32, 1.76, 2.2, 2.64, 3.08, 3.52, 3.96, 4.4\n ]\n np.testing.assert_array_almost_equal(learner.kernel_discretization,\n expected_kernel_discretization)\n\n kernel_support_2 = 6.2\n learner.kernel_support = kernel_support_2\n self.assertEqual(learner.kernel_support, kernel_support_2)\n self.assertEqual(learner._learner.get_kernel_support(),\n kernel_support_2)\n\n expected_kernel_discretization = [\n 0.0, 0.62, 1.24, 1.86, 2.48, 3.1, 3.72, 4.34, 4.96, 5.58, 6.2\n ]\n np.testing.assert_array_almost_equal(learner.kernel_discretization,\n expected_kernel_discretization)\n\n def test_hawkes_em_kernel_size(self):\n \"\"\"...Test that Hawkes em kernel size parameter is correctly\n synchronized\n \"\"\"\n kernel_size_1 = 4\n learner = HawkesEM(4., kernel_size=kernel_size_1)\n self.assertEqual(learner.kernel_size, kernel_size_1)\n self.assertEqual(learner._learner.get_kernel_size(), kernel_size_1)\n expected_kernel_discretization = [0., 1., 2., 3., 4.]\n np.testing.assert_array_almost_equal(learner.kernel_discretization,\n expected_kernel_discretization)\n\n kernel_size_2 = 5\n learner.kernel_size = kernel_size_2\n self.assertEqual(learner.kernel_size, kernel_size_2)\n self.assertEqual(learner._learner.get_kernel_size(), kernel_size_2)\n expected_kernel_discretization = [0.0, 0.8, 1.6, 2.4, 3.2, 4]\n np.testing.assert_array_almost_equal(learner.kernel_discretization,\n expected_kernel_discretization)\n\n def test_hawkes_em_kernel_dt(self):\n \"\"\"...Test that Hawkes em kernel dt parameter is correctly\n synchronized\n \"\"\"\n kernel_support = 4\n kernel_size = 10\n learner = HawkesEM(kernel_support, kernel_size=kernel_size)\n self.assertEqual(learner.kernel_dt, 0.4)\n self.assertEqual(learner._learner.get_kernel_fixed_dt(), 0.4)\n\n kernel_dt_1 = 0.2\n learner.kernel_dt = kernel_dt_1\n self.assertEqual(learner.kernel_dt, kernel_dt_1)\n self.assertEqual(learner._learner.get_kernel_fixed_dt(), kernel_dt_1)\n self.assertEqual(learner.kernel_size, 20)\n expected_kernel_discretization = [\n 0., 0.2, 0.4, 0.6, 0.8, 1., 1.2, 1.4, 1.6, 1.8, 2., 2.2, 2.4, 2.6,\n 2.8, 3., 3.2, 3.4, 3.6, 3.8, 4.\n ]\n np.testing.assert_array_almost_equal(learner.kernel_discretization,\n expected_kernel_discretization)\n\n kernel_dt_1 = 0.199\n learner.kernel_dt = kernel_dt_1\n self.assertEqual(learner.kernel_dt, 0.19047619047619047)\n self.assertEqual(learner._learner.get_kernel_fixed_dt(),\n 0.19047619047619047)\n self.assertEqual(learner.kernel_size, 21)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "12680899", "language": "Python", "matching_score": 2.872570276260376, "max_stars_count": 0, "path": "tick/hawkes/inference/tests/hawkes_em_test.py" }, { "content": "# License: BSD 3 clause\n\nimport numpy as np\n\nfrom tick.hawkes.inference.base import LearnerHawkesNoParam\nfrom tick.hawkes.inference.build.hawkes_inference import (HawkesBasisKernels as\n _HawkesBasisKernels)\nfrom tick.solver.base.utils import relative_distance\n\n\nclass HawkesBasisKernels(LearnerHawkesNoParam):\n \"\"\"This class is used for performing non parametric estimation of\n multi-dimensional Hawkes processes based on expectation maximization\n algorithm and the hypothesis that kernels are linear\n combinations of some basis kernels.\n\n Hawkes processes are point processes defined by the intensity:\n\n .. math::\n \\\\forall i \\\\in [1 \\\\dots D], \\\\quad\n \\\\lambda_i = \\\\mu_i + \\\\sum_{j=1}^D \\\\int \\\\phi_{ij} dN_j\n\n where\n\n * :math:`D` is the number of nodes\n * :math:`\\mu_i` are the baseline intensities\n * :math:`\\phi_{ij}` are the kernels\n\n The basis kernel hypothesis translates to:\n\n .. math::\n \\\\phi_{ij}(t) = \\\\sum_{u}^U a_{ij}^u g^u(t)\n\n where\n\n * :math:`U` is the number of basis kernels.\n * :math:`g^u` is a basis kernel\n * :math:`a_{ij}^u` is the amplitude of basis kernel :math:`u` in kernel\n :math:`\\phi_{ij}`\n\n Finally we also suppose that basis kernels :math:`g^u` are piecewise\n constant on a given support and number of intervals.\n\n Parameters\n ----------\n kernel_support : `float`\n The support size common to all the kernels.\n\n n_basis : `int`, default=`None`\n Number of non parametric basis kernels to be used.\n If `None` or 0, it will be set to `n_nodes`\n\n kernel_size : `int`, default=10\n Number of discretizations of the kernel\n\n C : `float`, default=1e-1\n The penalization parameter. It penalizes both the amplitudes\n squared values and the basis kernels smoothness through the\n integral of their squared derivative.\n\n tol : `float`, default=1e-5\n The tolerance of the solver (iterations stop when the stopping\n criterion is below it). If not reached the solver does ``max_iter``\n iterations\n\n max_iter : `int`, default=100\n Maximum number of iterations of the solver\n\n verbose : `bool`, default=False\n If `True`, we verbose things, otherwise the solver does not\n print anything (but records information in history anyway)\n\n print_every : `int`, default=10\n Print history information when ``n_iter`` (iteration number) is\n a multiple of ``print_every``\n\n record_every : `int`, default=10\n Record history information when ``n_iter`` (iteration number) is\n a multiple of ``record_every``\n\n n_threads : `int`, default=1\n Number of threads used for parallel computation.\n\n * if `int <= 0`: the number of physical cores available on the CPU\n * otherwise the desired number of threads\n\n Other Parameters\n ----------------\n ode_max_iter : `int`, default=100\n Maximum number of loop for inner ODE (ordinary differential equation)\n algorithm.\n\n ode_tol : `float`, default=1e-5\n Tolerance of loop for inner inner ODE (ordinary differential equation)\n algorithm.\n\n Attributes\n ----------\n n_nodes : `int`\n Number of nodes of the estimated Hawkes process\n\n n_realizations : `int`\n Number of given realizations`\n\n baseline : `np.array` shape=(n_nodes)\n The estimated baseline\n\n amplitudes : `np.array` shape=(n_nodes, n_nodes, n_basis)\n Amplitudes of all basis kernels for all kernels.\n\n basis_kernels : `np.array` shape=(n_basis, kernel_size)\n Estimated basis kernels\n\n kernel_dt : `float`\n Kernel discretization step. It is equal to\n `kernel_support` / `kernel_size`\n\n kernel_discretization : `np.ndarray`, shape=(kernel_size + 1, )\n Kernel discretizations points, denotes the interval on which basis\n kernels are piecewise constant.\n\n References\n ----------\n <NAME>., <NAME>. and <NAME>., 2013, June. Learning Triggering Kernels for\n Multi-dimensional Hawkes Processes. In `ICML (3) (pp. 1301-1309)`_.\n\n Some rewriting notes for implementing the algorithm can be found in the\n doc/tex directory.\n\n .. _ICML (3) (pp. 1301-1309): http://jmlr.org/proceedings/papers/v28/zhou13.html\n \"\"\"\n\n _attrinfos = {\n 'baseline': {\n 'writable': False\n },\n 'amplitudes': {\n 'writable': False\n },\n 'basis_kernels': {\n 'writable': False\n },\n '_amplitudes_2d': {\n 'writable': False\n },\n }\n\n def __init__(self, kernel_support, n_basis=None, kernel_size=10, tol=1e-5,\n C=1e-1, max_iter=100, verbose=False, print_every=10,\n record_every=10, n_threads=1, ode_max_iter=100, ode_tol=1e-5):\n\n LearnerHawkesNoParam.__init__(self, max_iter=max_iter, verbose=verbose,\n tol=tol, print_every=print_every,\n record_every=record_every,\n n_threads=n_threads)\n\n self.ode_max_iter = ode_max_iter\n self.ode_tol = ode_tol\n\n alpha = 1. / C\n if n_basis is None:\n n_basis = 0\n\n self._learner = _HawkesBasisKernels(kernel_support, kernel_size,\n n_basis, alpha, n_threads)\n self._amplitudes_2d = None\n\n self.history.print_order = [\n \"n_iter\", \"rel_baseline\", \"rel_amplitudes\", \"rel_basis_kernels\"\n ]\n\n def fit(self, events, end_times=None, baseline_start=None,\n amplitudes_start=None, basis_kernels_start=None):\n \"\"\"Fit the model according to the given training data.\n\n Parameters\n ----------\n events : `list` of `list` of `np.ndarray`\n List of Hawkes processes realizations.\n Each realization of the Hawkes process is a list of n_node for\n each component of the Hawkes. Namely `events[i][j]` contains a\n one-dimensional `numpy.array` of the events' timestamps of\n component j of realization i.\n If only one realization is given, it will be wrapped into a list\n\n end_times : `np.ndarray` or `float`, default = None\n List of end time of all hawkes processes that will be given to the\n model. If None, it will be set to each realization's latest time.\n If only one realization is provided, then a float can be given.\n\n baseline_start : `None` or `np.ndarray`, shape=(n_nodes)\n Used to force start values for baseline attribute\n If `None` starts with uniform 1 values\n\n amplitudes_start : `None` or `np.ndarray`, shape=(n_nodes,n_nodes,D)\n Used to force start values for amplitude parameter\n If `None` starts with random values uniformly sampled between\n 0.5 and 0.9\n\n basis_kernels_start : `None` or `np.darray`, shape=(D,kernel_size)\n Used to force start values for the basis kernels\n If `None` starts with random values uniformly sampled between\n 0 and 0.1\n \"\"\"\n LearnerHawkesNoParam.fit(self, events, end_times=end_times)\n self.solve(baseline_start=baseline_start,\n amplitudes_start=amplitudes_start,\n basis_kernels_start=basis_kernels_start)\n return self\n\n def _solve(self, baseline_start=None, amplitudes_start=None,\n basis_kernels_start=None):\n \"\"\"Perform nonparametric estimation\n\n Parameters\n ----------\n baseline_start : `None` or `np.ndarray`, shape=(n_nodes)\n Used to force start values for baseline attribute\n If `None` starts with uniform 1 values\n\n amplitudes_start : `None` or `np.ndarray', shape=(n_nodes,n_nodes,D)\n Used to force start values for amplitude parameter\n If `None` starts with random values uniformly sampled between\n 0.5 and 0.9\n\n basis_kernels_start : `None` or `p.andarray, shape=(D,kernel_size)\n Used to force start values for the basis kernels\n If `None` starts with random values uniformly sampled between\n 0 and 0.1\n \"\"\"\n if baseline_start is None:\n self._set(\"baseline\", np.zeros(self.n_nodes) + 1)\n else:\n self._set(\"baseline\", baseline_start.copy())\n\n if amplitudes_start is None:\n self._set(\n \"amplitudes\",\n np.random.uniform(\n 0.5, 0.9, size=(self.n_nodes, self.n_nodes, self.n_basis)))\n else:\n self._set(\"amplitudes\", amplitudes_start.copy())\n\n if basis_kernels_start is None:\n self._set(\n \"basis_kernels\",\n 0.1 * np.random.uniform(size=(self.n_basis, self.kernel_size)))\n else:\n self._set(\"basis_kernels\", basis_kernels_start.copy())\n\n self._set(\n '_amplitudes_2d',\n self.amplitudes.reshape((self.n_nodes,\n self.n_nodes * self.n_basis)))\n\n for i in range(self.max_iter + 1):\n if self._should_record_iter(i):\n prev_baseline = self.baseline.copy()\n prev_amplitudes = self.amplitudes.copy()\n prev_basis_kernels = self.basis_kernels.copy()\n\n rel_ode = self._learner.solve(self.baseline, self.basis_kernels,\n self._amplitudes_2d,\n self.ode_max_iter, self.ode_tol)\n\n if self._should_record_iter(i):\n rel_baseline = relative_distance(self.baseline, prev_baseline)\n rel_amplitudes = relative_distance(self.amplitudes,\n prev_amplitudes)\n rel_basis_kernels = relative_distance(self.basis_kernels,\n prev_basis_kernels)\n\n converged = max(rel_baseline, rel_amplitudes,\n rel_basis_kernels) <= self.tol\n force_print = (i == self.max_iter) or converged\n\n self._handle_history(i, rel_baseline=rel_baseline,\n rel_amplitudes=rel_amplitudes,\n rel_basis_kernels=rel_basis_kernels,\n rel_ode=rel_ode, force=force_print)\n\n if converged:\n break\n\n def get_kernel_supports(self):\n return np.zeros((self.n_nodes, self.n_nodes)) + self.kernel_support\n\n def get_kernel_values(self, i, j, abscissa_array):\n \"\"\"Computes value of the specified kernel on given time values. This\n makes our learner compliant with `tick.plot.plot_hawkes_kernels` API\n\n Parameters\n ----------\n i : `int`\n First index of the kernel\n\n j : `int`\n Second index of the kernel\n\n abscissa_array : `np.ndarray`, shape=(n_points, )\n 1d array containing all the times at which this kernel will\n computes it value\n\n Returns\n -------\n output : `np.ndarray`, shape=(n_points, )\n 1d array containing the values of the specified kernels at the\n given times.\n \"\"\"\n indices_in_support = (abscissa_array > 0) & \\\n (abscissa_array < self.kernel_support)\n index = np.searchsorted(self.kernel_discretization,\n abscissa_array[indices_in_support]) - 1\n\n kernel_values = np.empty_like(abscissa_array)\n kernel_values[np.invert(indices_in_support)] = 0\n\n kernels_ij_sum = np.zeros(self.kernel_size)\n for d in range(self.n_basis):\n kernels_ij_sum += self.amplitudes[i, j, d] * self.basis_kernels[d]\n\n kernel_values[indices_in_support] = kernels_ij_sum[index]\n return kernel_values\n\n def objective(self, coeffs, loss: float = None):\n raise NotImplementedError()\n\n @property\n def kernel_support(self):\n return self._learner.get_kernel_support()\n\n @kernel_support.setter\n def kernel_support(self, val):\n self._learner.set_kernel_support(val)\n\n @property\n def kernel_size(self):\n return self._learner.get_kernel_size()\n\n @kernel_size.setter\n def kernel_size(self, val):\n self._learner.set_kernel_size(val)\n\n @property\n def n_basis(self):\n return self._learner.get_n_basis()\n\n @n_basis.setter\n def n_basis(self, val):\n if val is None:\n val = 0\n self._learner.set_n_basis(val)\n\n @property\n def C(self):\n return 1. / self._learner.get_alpha()\n\n @C.setter\n def C(self, val):\n self._learner.set_alpha(1. / val)\n\n @property\n def kernel_discretization(self):\n return self._learner.get_kernel_discretization()\n\n @property\n def kernel_dt(self):\n return self._learner.get_kernel_dt()\n\n @kernel_dt.setter\n def kernel_dt(self, val):\n self._learner.set_kernel_dt(val)\n", "id": "3529270", "language": "Python", "matching_score": 3.998311758041382, "max_stars_count": 0, "path": "tick/hawkes/inference/hawkes_basis_kernels.py" }, { "content": "# License: BSD 3 clause\n\nimport unittest\n\nimport numpy as np\n\nfrom tick.hawkes.inference import HawkesBasisKernels\n\n\nclass Test(unittest.TestCase):\n def test_em_basis_kernels(self):\n \"\"\"...Test\n \"\"\"\n ticks = [[\n np.array([1, 1.2, 3.4, 5.8, 10.3, 11, 13.4]),\n np.array([2, 5, 8.3, 9.10, 15, 18, 20, 33])\n ], [\n np.array([2, 3.2, 11.4, 12.8, 45]),\n np.array([2, 3, 8.8, 9, 15.3, 19])\n ]]\n\n n_basis = 2\n n_nodes = len(ticks[0])\n\n kernel_support = 4\n kernel_dt = .1\n kernel_size = int(np.ceil(kernel_support / kernel_dt))\n\n C = 5e-2\n\n mu = np.zeros(n_nodes) + .2\n auvd = np.zeros((n_nodes, n_nodes, n_basis)) + .4\n auvd[1, :, :] += .2\n gdm = np.zeros((n_basis, kernel_size))\n for i in range(int(kernel_size)):\n gdm[0, i] = 0.1 * 0.29 * np.exp(-0.29 * i * kernel_dt)\n for i in range(int(kernel_size)):\n gdm[1, i] = 0.8 * 1 * np.exp(-1 * i * kernel_dt)\n\n em = HawkesBasisKernels(kernel_support=kernel_support,\n kernel_size=kernel_size, n_basis=n_basis, C=C,\n n_threads=2, max_iter=4, ode_max_iter=100)\n\n em.fit(ticks, baseline_start=mu, amplitudes_start=auvd,\n basis_kernels_start=gdm)\n\n np.testing.assert_array_almost_equal(em.baseline, [0.153022, 0.179124],\n decimal=4)\n\n np.testing.assert_array_almost_equal(\n em.amplitudes,\n [[[1.21125e-05, 1.744123e-03], [2.267314e-05, 3.287014e-03]], [[\n 1.48773260e-05, 2.06898364e-03\n ], [6.60131078e-06, 7.28397551e-04]]], decimal=4)\n\n basis_kernels = np.array([[\n 0.0001699, 0.00031211, 0.00043944, 0.0005521, 0.00066688,\n 0.00078411, 0.0009040, 0.00101736, 0.001112, 0.00119935,\n 0.00129047, 0.00135828, 0.0014302, 0.00146572, 0.00149012,\n 0.00150987, 0.00152401, 0.00153267, 0.0015464, 0.00156525,\n 0.00157363, 0.00156589, 0.00156298, 0.00155548, 0.0015339,\n 0.00149196, 0.0014178, 0.0013323, 0.00125075, 0.00117292,\n 0.0010985, 0.00100652, 0.00091741, 0.00082029, 0.00071975,\n 0.00062118, 0.0005242, 0.0004228, 0.00029559, 0.00015301\n ], [\n 0.0036240, 0.0066125, 0.00929557, 0.01163643, 0.01404666,\n 0.01653209, 0.0190978, 0.0215138, 0.02351321, 0.02535836,\n 0.02730293, 0.02874743, 0.0302991, 0.03096259, 0.03135936,\n 0.0316486, 0.03182045, 0.03187917, 0.0320631, 0.03237183,\n 0.03243794, 0.03216082, 0.03200454, 0.0317527, 0.0311904,\n 0.03017171, 0.02846015, 0.02650652, 0.02465883, 0.02291335,\n 0.0212655, 0.01931795, 0.01745896, 0.01547088, 0.01346942,\n 0.01154156, 0.0096817, 0.00779345, 0.00543011, 0.00279355\n ]])\n\n np.testing.assert_array_almost_equal(em.basis_kernels, basis_kernels,\n decimal=3)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "9238108", "language": "Python", "matching_score": 0.686580240726471, "max_stars_count": 0, "path": "tick/hawkes/inference/tests/hawkes_basis_kernels_test.py" }, { "content": "\"\"\"\n============================\nFit Hawkes power law kernels\n============================\n\nThis Hawkes learner based on conditional laws\n(`tick.inference.HawkesConditionalLaw`) is able to fit Hawkes power law\nkernels commonly found in finance applications.\n\nIt has been introduced in the following paper:\n\n<NAME>., & <NAME>. (2014).\nSecond order statistics characterization of Hawkes processes and\nnon-parametric estimation. `arXiv preprint arXiv:1401.0903`_.\n\n.. _arXiv preprint arXiv:1401.0903: https://arxiv.org/pdf/1401.0903.pdf\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tick.hawkes import SimuHawkes, HawkesKernelPowerLaw, HawkesConditionalLaw\nfrom tick.plot import plot_hawkes_kernels\n\nmultiplier = np.array([0.012, 0.008, 0.004, 0.005])\ncutoff = 0.0005\nexponent = 1.3\n\nsupport = 2000\n\nhawkes = SimuHawkes(\n kernels=[[HawkesKernelPowerLaw(multiplier[0], cutoff, exponent, support),\n HawkesKernelPowerLaw(multiplier[1], cutoff, exponent, support)],\n [HawkesKernelPowerLaw(multiplier[2], cutoff, exponent, support),\n HawkesKernelPowerLaw(multiplier[3], cutoff, exponent, support)]],\n baseline=[0.05, 0.05], seed=382, verbose=False)\nhawkes.end_time = 50000\nhawkes.simulate()\n\ne = HawkesConditionalLaw(claw_method=\"log\",\n delta_lag=0.1, min_lag=0.002, max_lag=100,\n quad_method=\"log\",\n n_quad=50, min_support=0.002, max_support=support,\n n_threads=-1)\n\ne.incremental_fit(hawkes.timestamps)\ne.compute()\n\nfig = plot_hawkes_kernels(e, log_scale=True, hawkes=hawkes, show=False,\n min_support=0.002, support=100)\nfor ax in fig.axes:\n ax.legend(loc=3)\n ax.set_ylim([1e-7, 1e2])\n\nplt.show()\n", "id": "6144949", "language": "Python", "matching_score": 2.7309489250183105, "max_stars_count": 0, "path": "examples/plot_hawkes_conditional_law.py" }, { "content": "\"\"\"\n=========================\nFit exotic Hawkes kernels\n=========================\n\nThis learner assumes Hawkes kernels are linear combinations of a given number\nof kernel basis.\n\nHere it is run on a an exotic data set generated with mixtures of two cosinus\nfunctions. We observe that we can correctly retrieve the kernels and the two\ncosinus basis functions which have generated the kernels. This experiment\nis run on toy datasets in the `original paper`_.\n\nIt could have been more precise if end_time or kernel_size was increased.\n\n.. _original paper: http://jmlr.org/proceedings/papers/v28/zhou13.html\n\"\"\"\n\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tick.plot import plot_basis_kernels, plot_hawkes_kernels\nfrom tick.hawkes import SimuHawkes, HawkesKernelTimeFunc, HawkesBasisKernels\n\nend_time = 1e9\nC = 1e-3\nkernel_size = 40\nmax_iter = 100\n\n\n# We first simulate a similar Hawkes process\ndef g1(t):\n return np.cos(np.pi * t / 10) + 1.1\n\n\ndef g2(t):\n return np.cos(np.pi * (t / 10 + 1)) + 1.1\n\n\nt_values = np.linspace(0, 20, 1000)\nu_values = [(0.007061, 0.001711),\n (0.005445, 0.003645),\n (0.003645, 0.005445),\n (0.001790, 0.007390)]\n\nhawkes = SimuHawkes(baseline=[1e-5, 1e-5], seed=1093, verbose=False)\nfor i, j in itertools.product(range(2), repeat=2):\n u1, u2 = u_values[2 * i + j]\n y_values = g1(t_values) * u1 + g2(t_values) * u2\n kernel = HawkesKernelTimeFunc(t_values=t_values, y_values=y_values)\n hawkes.set_kernel(i, j, kernel)\n\nhawkes.end_time = end_time\nhawkes.simulate()\nticks = hawkes.timestamps\n\n# And then perform estimation with two basis kernels\nkernel_support = 20\nn_basis = 2\n\nem = HawkesBasisKernels(kernel_support, n_basis=n_basis,\n kernel_size=kernel_size, C=C,\n n_threads=4, max_iter=max_iter,\n verbose=False, ode_tol=1e-5)\nem.fit(ticks)\n\nfig = plot_hawkes_kernels(em, hawkes=hawkes, support=19.9, show=False)\nfor ax in fig.axes:\n ax.set_ylim([0, 0.025])\n\nfig = plot_basis_kernels(em, basis_kernels=[g2, g1], show=False)\nfor ax in fig.axes:\n ax.set_ylim([0, 0.5])\n\nplt.show()\n", "id": "12704084", "language": "Python", "matching_score": 3.3677916526794434, "max_stars_count": 0, "path": "examples/plot_hawkes_basis_kernels.py" }, { "content": "\"\"\"\n=====================================\nHawkes simulation with exotic kernels\n=====================================\n\nSimulation of Hawkes processes with usage of custom kernels\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom tick.base import TimeFunction\nfrom tick.hawkes import SimuHawkes, HawkesKernelExp, HawkesKernelTimeFunc\nfrom tick.plot import plot_point_process\n\nt_values = np.array([0, 1, 1.5], dtype=float)\ny_values = np.array([0, .2, 0], dtype=float)\ntf1 = TimeFunction([t_values, y_values],\n inter_mode=TimeFunction.InterConstRight, dt=0.1)\nkernel_1 = HawkesKernelTimeFunc(tf1)\n\nt_values = np.array([0, .1, 2], dtype=float)\ny_values = np.array([0, .4, -0.2], dtype=float)\ntf2 = TimeFunction([t_values, y_values],\n inter_mode=TimeFunction.InterLinear, dt=0.1)\nkernel_2 = HawkesKernelTimeFunc(tf2)\n\nhawkes = SimuHawkes(kernels=[[kernel_1, kernel_1],\n [HawkesKernelExp(.07, 4), kernel_2]],\n baseline=[1.5, 1.5], verbose=False, seed=23983)\n\nrun_time = 40\ndt = 0.01\nhawkes.track_intensity(dt)\nhawkes.end_time = run_time\nhawkes.simulate()\n\nfig, ax = plt.subplots(hawkes.n_nodes, 1, figsize=(14, 8))\nplot_point_process(hawkes, t_max=20, ax=ax)\n\nplt.show()\n", "id": "12718660", "language": "Python", "matching_score": 3.41859769821167, "max_stars_count": 0, "path": "examples/plot_hawkes_time_func_simu.py" }, { "content": "\"\"\"\n========================================\nInhomogeneous Poisson process simulation\n========================================\n\nThis example show how to simulate any inhomogeneous Poisson process. Its \nintensity is modeled through `tick.base.TimeFunction`\n\"\"\"\n\nimport numpy as np\nfrom tick.base import TimeFunction\n\nfrom tick.plot import plot_point_process\nfrom tick.hawkes import SimuInhomogeneousPoisson\n\nrun_time = 30\n\nT = np.arange((run_time * 0.9) * 5, dtype=float) / 5\nY = np.maximum(15 * np.sin(T) * (np.divide(np.ones_like(T),\n np.sqrt(T + 1) + 0.1 * T)), 0.001)\n\ntf = TimeFunction((T, Y), dt=0.01)\n\n# We define a 1 dimensional inhomogeneous Poisson process with the\n# intensity function seen above\nin_poi = SimuInhomogeneousPoisson([tf], end_time=run_time, verbose=False)\n\n# We activate intensity tracking and launch simulation\nin_poi.track_intensity(0.1)\nin_poi.simulate()\n\n# We plot the resulting inhomogeneous Poisson process with its\n# intensity and its ticks over time\nplot_point_process(in_poi)\n", "id": "8722000", "language": "Python", "matching_score": 0.30882853269577026, "max_stars_count": 0, "path": "examples/plot_poisson_inhomogeneous.py" }, { "content": "# License: BSD 3 clause\n\nfrom .base import SolverFirstOrderSto\n\nfrom .build.solver import SDCADouble as _SDCADouble\nfrom .build.solver import SDCAFloat as _SDCAFloat\n\nimport numpy as np\n\ndtype_class_mapper = {\n np.dtype('float32'): _SDCAFloat,\n np.dtype('float64'): _SDCADouble\n}\n\n\nclass SDCA(SolverFirstOrderSto):\n \"\"\"Stochastic Dual Coordinate Ascent\n\n For the minimization of objectives of the form\n\n .. math::\n \\\\frac 1n \\\\sum_{i=1}^n f_i(w^\\\\top x_i) + g(w),\n\n where the functions :math:`f_i` have smooth gradients and :math:`g` is\n prox-capable. This solver actually requires more than that, since it is\n working in a Fenchel dual formulation of the primal problem given above.\n First, it requires that some ridge penalization is used, hence the mandatory\n parameter ``l_l2sq`` below: SDCA will actually minimize the objective\n\n .. math::\n \\\\frac 1n \\\\sum_{i=1}^n f_i(x_i^\\\\top w) + g(w) + \\\\frac{\\\\lambda}{2}\n \\\\| w \\\\|_2^2,\n\n where :math:`\\lambda` is tuned with the ``l_l2sq`` (see below). Now, putting\n :math:`h(w) = g(w) + \\lambda \\|w\\|_2^2 / 2`, SDCA maximize\n the Fenchel dual problem\n\n .. math::\n D(\\\\alpha) = \\\\frac 1n \\\\sum_{i=1}^n \\\\Bigg[ - f_i^*(-\\\\alpha_i) -\n \\lambda h^*\\\\Big( \\\\frac{1}{\\\\lambda n} \\\\sum_{i=1}^n \\\\alpha_i x_i)\n \\\\Big) \\\\Bigg],\n\n where :math:`f_i^*` and :math:`h^*` and the Fenchel duals of :math:`f_i`\n and :math:`h` respectively.\n Function :math:`f = \\\\frac 1n \\\\sum_{i=1}^n f_i` corresponds\n to the ``model.loss`` method of the model (passed with ``set_model`` to the\n solver) and :math:`g` corresponds to the ``prox.value`` method of the\n prox (passed with the ``set_prox`` method). One iteration of\n :class:`SDCA <tick.solver.SDCA>` corresponds to the\n following iteration applied ``epoch_size`` times:\n\n .. math::\n \\\\begin{align*}\n \\\\delta_i &\\\\gets \\\\arg\\\\min_{\\\\delta} \\\\Big[ \\\\; f_i^*(-\\\\alpha_i -\n \\\\delta) + w^\\\\top x_i \\\\delta + \\\\frac{1}{2 \\\\lambda n} \\\\| x_i\\\\|_2^2\n \\\\delta^2 \\\\Big] \\\\\\\\\n \\\\alpha_i &\\\\gets \\\\alpha_i + \\\\delta_i \\\\\\\\\n v &\\\\gets v + \\\\frac{1}{\\\\lambda n} \\\\delta_i x_i \\\\\\\\\n w &\\\\gets \\\\nabla g^*(v)\n \\\\end{align*}\n\n where :math:`i` is sampled at random (strategy depends on ``rand_type``) at\n each iteration. The ridge regularization :math:`\\\\lambda` can be tuned with\n ``l_l2sq``, the seed of the random number generator for generation\n of samples :math:`i` can be seeded with ``seed``. The iterations stop\n whenever tolerance ``tol`` is achieved, or after ``max_iter`` epochs\n (namely ``max_iter`` :math:`\\\\times` ``epoch_size`` iterates).\n The obtained solution :math:`w` is returned by the ``solve`` method, and is\n also stored in the ``solution`` attribute of the solver. The dual solution\n :math:`\\\\alpha` is stored in the ``dual_solution`` attribute.\n\n Internally, :class:`SDCA <tick.solver.SDCA>` has dedicated code when\n the model is a generalized linear model with sparse features, and a\n separable proximal operator: in this case, each iteration works only in the\n set of non-zero features, leading to much faster iterates.\n\n Parameters\n ----------\n l_l2sq : `float`\n Level of L2 penalization. L2 penalization is mandatory for SDCA.\n Convergence properties of this solver are deeply connected to this\n parameter, which should be understood as the \"step\" used by the\n algorithm.\n\n tol : `float`, default=1e-10\n The tolerance of the solver (iterations stop when the stopping\n criterion is below it)\n\n max_iter : `int`, default=10\n Maximum number of iterations of the solver, namely maximum number of\n epochs (by default full pass over the data, unless ``epoch_size`` has\n been modified from default)\n\n verbose : `bool`, default=True\n If `True`, solver verboses history, otherwise nothing is displayed,\n but history is recorded anyway\n\n seed : `int`, default=-1\n The seed of the random sampling. If it is negative then a random seed\n (different at each run) will be chosen.\n\n epoch_size : `int`, default given by model\n Epoch size, namely how many iterations are made before updating the\n variance reducing term. By default, this is automatically tuned using\n information from the model object passed through ``set_model``.\n\n rand_type : {'unif', 'perm'}, default='unif'\n How samples are randomly selected from the data\n\n * if ``'unif'`` samples are uniformly drawn among all possibilities\n * if ``'perm'`` a random permutation of all possibilities is\n generated and samples are sequentially taken from it. Once all of\n them have been taken, a new random permutation is generated\n\n print_every : `int`, default=1\n Print history information every time the iteration number is a\n multiple of ``print_every``. Used only is ``verbose`` is True\n\n record_every : `int`, default=1\n Save history information every time the iteration number is a\n multiple of ``record_every``\n\n Attributes\n ----------\n model : `Model`\n The model used by the solver, passed with the ``set_model`` method\n\n prox : `Prox`\n Proximal operator used by the solver, passed with the ``set_prox``\n method\n\n solution : `numpy.array`, shape=(n_coeffs,)\n Minimizer found by the solver\n\n dual_solution : `numpy.array`\n Dual vector corresponding to the primal solution obtained by the solver\n\n history : `dict`-like\n A dict-type of object that contains history of the solver along\n iterations. It should be accessed using the ``get_history`` method\n\n time_start : `str`\n Start date of the call to ``solve()``\n\n time_elapsed : `float`\n Duration of the call to ``solve()``, in seconds\n\n time_end : `str`\n End date of the call to ``solve()``\n\n dtype : `{'float64', 'float32'}`, default='float64'\n Type of the arrays used. This value is set from model and prox dtypes.\n\n References\n ----------\n * <NAME> and <NAME>, Accelerated proximal stochastic dual\n coordinate ascent for regularized loss minimization, *ICML 2014*\n \"\"\"\n\n _attrinfos = {'l_l2sq': {'cpp_setter': 'set_l_l2sq'}}\n\n def __init__(self, l_l2sq: float, epoch_size: int = None,\n rand_type: str = 'unif', tol: float = 1e-10,\n max_iter: int = 10, verbose: bool = True,\n print_every: int = 1, record_every: int = 1, seed: int = -1):\n\n self.l_l2sq = l_l2sq\n SolverFirstOrderSto.__init__(\n self, step=0, epoch_size=epoch_size, rand_type=rand_type, tol=tol,\n max_iter=max_iter, verbose=verbose, print_every=print_every,\n record_every=record_every, seed=seed)\n\n def _set_cpp_solver(self, dtype_or_object_with_dtype):\n self.dtype = self._extract_dtype(dtype_or_object_with_dtype)\n solver_class = self._get_typed_class(dtype_or_object_with_dtype,\n dtype_class_mapper)\n\n epoch_size = self.epoch_size\n if epoch_size is None:\n epoch_size = 0\n\n self._set(\n '_solver',\n solver_class(self.l_l2sq, epoch_size, self.tol, self._rand_type,\n self.seed))\n\n def objective(self, coeffs, loss: float = None):\n \"\"\"Compute the objective minimized by the solver at ``coeffs``\n\n Parameters\n ----------\n coeffs : `numpy.ndarray`, shape=(n_coeffs,)\n The objective is computed at this point\n\n loss : `float`, default=`None`\n Gives the value of the loss if already known (allows to\n avoid its computation in some cases)\n\n Returns\n -------\n output : `float`\n Value of the objective at given ``coeffs``\n \"\"\"\n prox_l2_value = 0.5 * self.l_l2sq * np.linalg.norm(coeffs) ** 2\n return SolverFirstOrderSto.objective(self, coeffs,\n loss) + prox_l2_value\n\n def dual_objective(self, dual_coeffs):\n \"\"\"Compute the dual objective at ``dual_coeffs``\n\n Parameters\n ----------\n dual_coeffs : `numpy.ndarray`, shape=(n_samples,)\n The dual objective objective is computed at this point\n\n Returns\n -------\n output : `float`\n Value of the dual objective at given ``dual_coeffs``\n \"\"\"\n primal = self.model._sdca_primal_dual_relation(self.l_l2sq,\n dual_coeffs)\n prox_l2_value = 0.5 * self.l_l2sq * np.linalg.norm(primal) ** 2\n return self.model.dual_loss(dual_coeffs) - prox_l2_value\n\n def _set_rand_max(self, model):\n try:\n # Some model, like Poisreg with linear link, have a special\n # rand_max for SDCA\n model_rand_max = model._sdca_rand_max\n except (AttributeError, NotImplementedError):\n model_rand_max = model._rand_max\n\n self._set(\"_rand_max\", model_rand_max)\n\n @property\n def dual_solution(self):\n return self._solver.get_dual_vector()\n", "id": "8377196", "language": "Python", "matching_score": 7.303974151611328, "max_stars_count": 0, "path": "tick/solver/sdca.py" }, { "content": "# License: BSD 3 clause\n\nimport numpy as np\n\nfrom tick.base_model import ModelGeneralizedLinear\nfrom .base import SolverFirstOrderSto, SolverSto\n\nfrom tick.solver.build.solver import SAGA_VarianceReductionMethod_Last\nfrom tick.solver.build.solver import SAGA_VarianceReductionMethod_Average\nfrom tick.solver.build.solver import SAGA_VarianceReductionMethod_Random\n\nfrom tick.solver.build.solver import SAGADouble as _SAGADouble\nfrom tick.solver.build.solver import SAGAFloat as _SAGAFloat\n\n__author__ = \"<NAME>\"\n\nvariance_reduction_methods_mapper = {\n 'last': SAGA_VarianceReductionMethod_Last,\n 'avg': SAGA_VarianceReductionMethod_Average,\n 'rand': SAGA_VarianceReductionMethod_Random\n}\n\ndtype_class_mapper = {\n np.dtype('float32'): _SAGAFloat,\n np.dtype('float64'): _SAGADouble\n}\n\n\nclass SAGA(SolverFirstOrderSto):\n \"\"\"Stochastic Average Gradient solver, for the minimization of objectives\n of the form\n\n .. math::\n \\\\frac 1n \\\\sum_{i=1}^n f_i(w) + g(w),\n\n where the functions :math:`f_i` have smooth gradients and :math:`g` is\n prox-capable. Note that :class:`SAGA <tick.solver.SAGA>` works only\n with linear models, see :ref:`linear_model` and :ref:`robust`, where all\n linear models are listed.\n Function :math:`f = \\\\frac 1n \\\\sum_{i=1}^n f_i` corresponds\n to the ``model.loss`` method of the model (passed with ``set_model`` to the\n solver) and :math:`g` corresponds to the ``prox.value`` method of the\n prox (passed with the ``set_prox`` method).\n One iteration of :class:`SAGA <tick.solver.SAGA>` corresponds to the\n following iteration applied ``epoch_size`` times:\n\n .. math::\n \\\\begin{align*}\n w &\\\\gets \\\\mathrm{prox}_{\\\\eta g} \\\\Big(w - \\\\eta \\\\Big(\\\\nabla f_i(w)\n - \\\\delta_i + \\\\frac 1n \\\\sum_{i'=1}^n \\\\delta_{i'} \\\\Big) \\\\Big) \\\\\\\\\n \\\\delta_i &\\\\gets \\\\nabla f_i(w)\n \\\\end{align*}\n\n where :math:`i` is sampled at random (strategy depends on ``rand_type``) at\n each iteration, and where :math:`\\\\bar w` and :math:`\\\\nabla f(\\\\bar w)`\n are updated at the beginning of each epoch, with a strategy that depend on\n the ``variance_reduction`` parameter. The step-size :math:`\\\\eta` can be\n tuned with ``step``, the seed of the random number generator for generation\n of samples :math:`i` can be seeded with ``seed``. The iterations stop\n whenever tolerance ``tol`` is achieved, or after ``max_iter`` epochs\n (namely ``max_iter`` :math:`\\\\times` ``epoch_size`` iterates).\n The obtained solution :math:`w` is returned by the ``solve`` method, and is\n also stored in the ``solution`` attribute of the solver.\n\n Internally, :class:`SAGA <tick.solver.SAGA>` has dedicated code when\n the model is a generalized linear model with sparse features, and a\n separable proximal operator: in this case, each iteration works only in the\n set of non-zero features, leading to much faster iterates.\n\n Parameters\n ----------\n step : `float`\n Step-size parameter, the most important parameter of the solver.\n Whenever possible, this can be automatically tuned as\n ``step = 1 / model.get_lip_max()``. Otherwise, use a try-an-improve\n approach\n\n tol : `float`, default=1e-10\n The tolerance of the solver (iterations stop when the stopping\n criterion is below it)\n\n max_iter : `int`, default=10\n Maximum number of iterations of the solver, namely maximum number of\n epochs (by default full pass over the data, unless ``epoch_size`` has\n been modified from default)\n\n verbose : `bool`, default=True\n If `True`, solver verboses history, otherwise nothing is displayed,\n but history is recorded anyway\n\n seed : `int`, default=-1\n The seed of the random sampling. If it is negative then a random seed\n (different at each run) will be chosen.\n\n epoch_size : `int`, default given by model\n Epoch size, namely how many iterations are made before updating the\n variance reducing term. By default, this is automatically tuned using\n information from the model object passed through ``set_model``.\n\n variance_reduction : {'last', 'avg', 'rand'}, default='last'\n Strategy used for the computation of the iterate used in\n variance reduction (also called phase iterate). A warning will be\n raised if the ``'avg'`` strategy is used when the model is a\n generalized linear model with sparse features, since it is strongly\n sub-optimal in this case\n\n * ``'last'`` : the phase iterate is the last iterate of the previous\n epoch\n * ``'avg``' : the phase iterate is the average over the iterates in the\n past epoch\n * ``'rand'``: the phase iterate is a random iterate of the previous\n epoch\n\n rand_type : {'unif', 'perm'}, default='unif'\n How samples are randomly selected from the data\n\n * if ``'unif'`` samples are uniformly drawn among all possibilities\n * if ``'perm'`` a random permutation of all possibilities is\n generated and samples are sequentially taken from it. Once all of\n them have been taken, a new random permutation is generated\n\n print_every : `int`, default=1\n Print history information every time the iteration number is a\n multiple of ``print_every``. Used only is ``verbose`` is True\n\n record_every : `int`, default=1\n Save history information every time the iteration number is a\n multiple of ``record_every``\n\n Attributes\n ----------\n model : `Model`\n The model used by the solver, passed with the ``set_model`` method\n\n prox : `Prox`\n Proximal operator used by the solver, passed with the ``set_prox``\n method\n\n solution : `numpy.array`, shape=(n_coeffs,)\n Minimizer found by the solver\n\n history : `dict`-like\n A dict-type of object that contains history of the solver along\n iterations. It should be accessed using the ``get_history`` method\n\n time_start : `str`\n Start date of the call to ``solve()``\n\n time_elapsed : `float`\n Duration of the call to ``solve()``, in seconds\n\n time_end : `str`\n End date of the call to ``solve()``\n\n dtype : `{'float64', 'float32'}`, default='float64'\n Type of the arrays used. This value is set from model and prox dtypes.\n\n References\n ----------\n * <NAME>, <NAME>, <NAME>, SAGA: A fast incremental gradient\n method with support for non-strongly convex composite objectives,\n NIPS 2014\n \"\"\"\n _attrinfos = {\"_var_red_str\": {}}\n\n def __init__(self, step: float = None, epoch_size: int = None,\n rand_type: str = \"unif\", tol: float = 0., max_iter: int = 100,\n verbose: bool = True, print_every: int = 10,\n record_every: int = 1, seed: int = -1,\n variance_reduction: str = \"last\"):\n\n # temporary to hold varience reduction type before dtype is known\n self._var_red_str = variance_reduction\n\n SolverFirstOrderSto.__init__(self, step, epoch_size, rand_type, tol,\n max_iter, verbose, print_every,\n record_every, seed=seed)\n\n @property\n def variance_reduction(self):\n return next((k for k, v in variance_reduction_methods_mapper.items()\n if v == self._solver.get_variance_reduction()), None)\n\n @variance_reduction.setter\n def variance_reduction(self, val: str):\n if val not in variance_reduction_methods_mapper:\n raise ValueError(\n 'variance_reduction should be one of \"{}\", got \"{}\".'.format(\n ', '.join(variance_reduction_methods_mapper.keys()), val))\n\n self._var_red_str = val\n self._solver.set_variance_reduction(\n variance_reduction_methods_mapper[val])\n\n def set_model(self, model: ModelGeneralizedLinear):\n \"\"\"Set model in the solver\n\n Parameters\n ----------\n model : `ModelGeneralizedLinear`\n Sets the model in the solver. The model gives the first\n order information about the model (loss, gradient, among\n other things). SAGA only accepts childs of `ModelGeneralizedLinear`\n\n Returns\n -------\n output : `Solver`\n The `Solver` with given model\n \"\"\"\n if not isinstance(model, ModelGeneralizedLinear):\n raise ValueError(\"SAGA accepts only childs of \"\n \"`ModelGeneralizedLinear`\")\n\n return SolverFirstOrderSto.set_model(self, model)\n\n def _set_cpp_solver(self, dtype_or_object_with_dtype):\n self.dtype = self._extract_dtype(dtype_or_object_with_dtype)\n solver_class = self._get_typed_class(dtype_or_object_with_dtype,\n dtype_class_mapper)\n\n # Construct the wrapped C++ SAGA solver\n step = self.step\n if step is None:\n step = 0.\n epoch_size = self.epoch_size\n if epoch_size is None:\n epoch_size = 0\n\n self._set(\n '_solver',\n solver_class(epoch_size, self.tol, self._rand_type, step,\n self.seed))\n self.variance_reduction = self._var_red_str\n", "id": "6581751", "language": "Python", "matching_score": 3.749513864517212, "max_stars_count": 0, "path": "tick/solver/saga.py" }, { "content": "# License: BSD 3 clause\n\nimport unittest\n\nimport numpy as np\n\nfrom tick.solver import SAGA\n\nfrom tick.solver.tests import TestSolver\nfrom tick.solver.build.solver import SAGADouble as _SAGA\n\nfrom tick.linear_model import ModelLogReg, SimuLogReg\n\nfrom tick.survival import SimuCoxReg, ModelCoxRegPartialLik\n\nfrom tick.solver.build.solver import SAGA_VarianceReductionMethod_Last\nfrom tick.solver.build.solver import SAGA_VarianceReductionMethod_Average\nfrom tick.solver.build.solver import SAGA_VarianceReductionMethod_Random\n\nfrom tick.simulation import weights_sparse_gauss\n\n\nclass SAGATest(object):\n def test_solver_saga(self):\n \"\"\"...Check SAGA solver for a Logistic Regression with Ridge penalization\"\"\"\n solver = SAGA(step=1e-3, max_iter=100, verbose=False, tol=0)\n self.check_solver(solver, fit_intercept=True, model=\"logreg\",\n decimal=1)\n\n def test_saga_sparse_and_dense_consistency(self):\n \"\"\"...SolverTest SAGA can run all glm models and is consistent with sparsity\"\"\"\n\n def create_solver():\n return SAGA(max_iter=1, verbose=False, step=1e-5,\n seed=TestSolver.sto_seed)\n\n self._test_solver_sparse_and_dense_consistency(create_solver)\n\n def test_variance_reduction_setting(self):\n \"\"\"...SolverTest SAGA variance_reduction parameter is correctly set\"\"\"\n svrg = SAGA()\n\n coeffs0 = weights_sparse_gauss(20, nnz=5, dtype=self.dtype)\n interc0 = None\n\n X, y = SimuLogReg(coeffs0, interc0, n_samples=3000, verbose=False,\n seed=123, dtype=self.dtype).simulate()\n\n model = ModelLogReg().fit(X, y)\n svrg.set_model(model)\n svrg.astype(self.dtype)\n self.assertEqual(svrg.variance_reduction, 'last')\n self.assertEqual(svrg._solver.get_variance_reduction(),\n SAGA_VarianceReductionMethod_Last)\n\n svrg = SAGA(variance_reduction='rand')\n svrg.set_model(model)\n svrg.astype(self.dtype)\n self.assertEqual(svrg.variance_reduction, 'rand')\n self.assertEqual(svrg._solver.get_variance_reduction(),\n SAGA_VarianceReductionMethod_Random)\n\n svrg.variance_reduction = 'avg'\n self.assertEqual(svrg.variance_reduction, 'avg')\n self.assertEqual(svrg._solver.get_variance_reduction(),\n SAGA_VarianceReductionMethod_Average)\n\n svrg.variance_reduction = 'rand'\n self.assertEqual(svrg.variance_reduction, 'rand')\n self.assertEqual(svrg._solver.get_variance_reduction(),\n SAGA_VarianceReductionMethod_Random)\n\n svrg.variance_reduction = 'last'\n self.assertEqual(svrg.variance_reduction, 'last')\n self.assertEqual(svrg._solver.get_variance_reduction(),\n SAGA_VarianceReductionMethod_Last)\n\n with self.assertRaises(ValueError):\n svrg.variance_reduction = 'wrong_name'\n\n def test_set_model(self):\n \"\"\"...SolverTest set_model of saga, should only accept childs of\n ModelGeneralizedLinear\"\"\"\n # We try to pass a ModelCoxRegPartialLik which is not a generalized\n # linear model to SAGA to check that the error is raised\n msg = '^SAGA accepts only childs of `ModelGeneralizedLinear`$'\n with self.assertRaisesRegex(ValueError, msg):\n w = weights_sparse_gauss(n_weights=2, nnz=0, dtype=self.dtype)\n X, T, C = SimuCoxReg(w, dtype=self.dtype, verbose=False).simulate()\n model = ModelCoxRegPartialLik().fit(X, T, C)\n SAGA().set_model(model)\n\n def test_saga_dtype_can_change(self):\n \"\"\"...Test saga astype method\n \"\"\"\n\n def create_solver():\n return SAGA(max_iter=100, verbose=False, step=0.01,\n seed=TestSolver.sto_seed)\n\n self._test_solver_astype_consistency(create_solver)\n\n\nclass SAGATestFloat32(TestSolver, SAGATest):\n def __init__(self, *args, **kwargs):\n TestSolver.__init__(self, *args, dtype=\"float32\", **kwargs)\n\n\nclass SAGATestFloat64(TestSolver, SAGATest):\n def __init__(self, *args, **kwargs):\n TestSolver.__init__(self, *args, dtype=\"float64\", **kwargs)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "6093902", "language": "Python", "matching_score": 4.1505889892578125, "max_stars_count": 0, "path": "tick/solver/tests/saga_test.py" }, { "content": "# License: BSD 3 clause\n\nimport io, unittest\nimport numpy as np\n\nimport pickle\nfrom scipy.sparse import csr_matrix\n\nfrom tick.solver.tests import TestSolver\n\nfrom tick.prox import ProxL1\nfrom tick.linear_model import ModelLinReg, SimuLinReg\nfrom tick.linear_model import ModelLogReg, SimuLogReg\nfrom tick.linear_model import ModelPoisReg, SimuPoisReg\nfrom tick.linear_model import ModelHinge, ModelQuadraticHinge, ModelSmoothedHinge\n\nfrom tick.simulation import weights_sparse_gauss\n\n\nclass Test(TestSolver):\n def test_linear_model_serialization(self):\n \"\"\"...Test serialization of linear models\n \"\"\"\n model_map = {\n ModelLinReg: SimuLinReg,\n ModelLogReg: SimuLogReg,\n ModelPoisReg: SimuPoisReg,\n ModelHinge: SimuLogReg,\n ModelQuadraticHinge: SimuLogReg,\n ModelSmoothedHinge: SimuLogReg,\n }\n\n for mod in model_map:\n model = mod(fit_intercept=False)\n\n coeffs0 = weights_sparse_gauss(20, nnz=5)\n interc0 = None\n\n features, labels = model_map[mod](coeffs0, interc0, n_samples=100,\n verbose=False,\n seed=123).simulate()\n model.fit(features, labels)\n\n pickled = pickle.loads(pickle.dumps(model))\n\n self.assertTrue(model._model.compare(pickled._model))\n self.assertEqual(\n model.loss(features[0]), pickled.loss(features[0]))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "1721025", "language": "Python", "matching_score": 2.1933932304382324, "max_stars_count": 0, "path": "tick/linear_model/tests/serializing_test.py" }, { "content": "\"\"\"\n==============================\nAsynchronous stochastic solver\n==============================\n\nThis example illustrates the convergence speed of the asynchronous version of\nSVRG solver. This solver called KroMagnon has been introduced in\n\n<NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2015.\nPerturbed iterate analysis for asynchronous stochastic optimization.\n`arXiv preprint arXiv:1507.06970.`_.\n\n.. _arXiv preprint arXiv:1507.06970.: https://arxiv.org/abs/1507.06970\n\nTo obtain good speedup in a relative short time example we have designed very\nsparse and ill-conditonned problem.\n\"\"\"\n\n\nfrom scipy import sparse\nimport matplotlib.pyplot as plt\nfrom tick.plot import plot_history\nimport numpy as np\nfrom tick.linear_model import SimuLogReg, ModelLogReg\nfrom tick.simulation import weights_sparse_gauss\nfrom tick.solver import SVRG\nfrom tick.prox import ProxElasticNet\n\n\nseed = 1398\nnp.random.seed(seed)\n\nn_samples = 40000\nn_features = 20000\nsparsity = 1e-4\npenalty_strength = 1e-6\n\nweights = weights_sparse_gauss(n_features, nnz=10)\nintercept = 0.2\nfeatures = sparse.rand(n_samples, n_features, density=sparsity, format='csr')\n\nsimulator = SimuLogReg(weights, n_samples=n_samples, features=features,\n verbose=False, intercept=intercept)\nfeatures, labels = simulator.simulate()\n\nmodel = ModelLogReg(fit_intercept=True)\nmodel.fit(features, labels)\nprox = ProxElasticNet(penalty_strength, ratio=0.5, range=(0, n_features))\nsvrg_step = 1. / model.get_lip_max()\n\ntest_n_threads = [1, 2, 4]\n\nsvrg_list = []\nsvrg_labels = []\n\nfor n_threads in test_n_threads:\n svrg = SVRG(step=svrg_step, seed=seed, max_iter=30, verbose=False,\n n_threads=n_threads)\n svrg.set_model(model).set_prox(prox)\n svrg.solve()\n\n svrg_list += [svrg]\n if n_threads == 1:\n svrg_labels += ['SVRG']\n else:\n svrg_labels += ['ASVRG {}'.format(n_threads)]\n\nplot_history(svrg_list, x=\"time\", dist_min=True, log_scale=True,\n labels=svrg_labels, show=False)\nplt.ylim([3e-3, 0.3])\nplt.ylabel('log distance to optimal objective', fontsize=14)\nplt.tight_layout()\nplt.show()\n\n", "id": "10836842", "language": "Python", "matching_score": 3.94575572013855, "max_stars_count": 0, "path": "examples/plot_asynchronous_stochastic_solver.py" }, { "content": "\"\"\"\n============================================\nGeneralized linear models solver convergence\n============================================\n\nThis example illustrates the optimization of three linear models:\n * Linear regression (`tick.optim.model.ModelLinReg`)\n * Logistic regression (`tick.optim.model.ModelLogReg`)\n * Poisson regression (`tick.optim.model.ModelPoisReg`)\n\nwith five different solvers:\n * LBFGS (`tick.solver.BFGS`)\n * SVRG (`tick.solver.SVRG`)\n * SDCA (`tick.solver.SDCA`)\n * GD (`tick.solver.GD`)\n * AGD (`tick.solver.AGD`)\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom tick.plot import plot_history\nimport numpy as np\nfrom itertools import product\nfrom tick.linear_model import SimuLinReg, SimuLogReg, SimuPoisReg, \\\n ModelLinReg, ModelLogReg, ModelPoisReg\nfrom tick.solver import SDCA, SVRG, BFGS, GD, AGD\nfrom tick.prox import ProxZero, ProxL2Sq\n\nseed = 1398\nnp.random.seed(seed)\n\n\ndef create_model(model_type, n_samples, n_features, with_intercept=True):\n weights = np.random.randn(n_features)\n intercept = None\n if with_intercept:\n intercept = np.random.normal()\n\n if model_type == 'Poisson':\n # we need to rescale features to avoid overflows\n weights /= n_features\n if intercept is not None:\n intercept /= n_features\n\n if model_type == 'Linear':\n simulator = SimuLinReg(weights, intercept=intercept,\n n_samples=n_samples, verbose=False)\n elif model_type == 'Logistic':\n simulator = SimuLogReg(weights, intercept=intercept,\n n_samples=n_samples, verbose=False)\n elif model_type == 'Poisson':\n simulator = SimuPoisReg(weights, intercept=intercept,\n n_samples=n_samples, verbose=False)\n\n labels, features = simulator.simulate()\n\n if model_type == 'Linear':\n model = ModelLinReg(fit_intercept=with_intercept)\n elif model_type == 'Logistic':\n model = ModelLogReg(fit_intercept=with_intercept)\n elif model_type == 'Poisson':\n model = ModelPoisReg(fit_intercept=with_intercept)\n\n model.fit(labels, features)\n return model\n\n\ndef run_solvers(model, l_l2sq):\n try:\n svrg_step = 1. / model.get_lip_max()\n except AttributeError:\n svrg_step = 1e-3\n try:\n gd_step = 1. / model.get_lip_best()\n except AttributeError:\n gd_step = 1e-1\n\n bfgs = BFGS(verbose=False, tol=1e-13)\n bfgs.set_model(model).set_prox(ProxL2Sq(l_l2sq))\n bfgs.solve()\n bfgs.history.set_minimizer(bfgs.solution)\n bfgs.history.set_minimum(bfgs.objective(bfgs.solution))\n bfgs.solve()\n\n svrg = SVRG(step=svrg_step, verbose=False, tol=1e-10, seed=seed)\n svrg.set_model(model).set_prox(ProxL2Sq(l_l2sq))\n svrg.history.set_minimizer(bfgs.solution)\n svrg.history.set_minimum(bfgs.objective(bfgs.solution))\n svrg.solve()\n\n sdca = SDCA(l_l2sq, verbose=False, seed=seed, tol=1e-10)\n sdca.set_model(model).set_prox(ProxZero())\n sdca.history.set_minimizer(bfgs.solution)\n sdca.history.set_minimum(bfgs.objective(bfgs.solution))\n sdca.solve()\n\n gd = GD(verbose=False, tol=1e-10, step=gd_step, linesearch=False)\n gd.set_model(model).set_prox(ProxL2Sq(l_l2sq))\n gd.history.set_minimizer(bfgs.solution)\n gd.history.set_minimum(bfgs.objective(bfgs.solution))\n gd.solve()\n\n agd = AGD(verbose=False, tol=1e-10, step=gd_step, linesearch=False)\n agd.set_model(model).set_prox(ProxL2Sq(l_l2sq))\n agd.history.set_minimizer(bfgs.solution)\n agd.history.set_minimum(bfgs.objective(bfgs.solution))\n agd.solve()\n\n return bfgs, svrg, sdca, gd, agd\n\n\nmodel_types = ['Linear', 'Logistic', 'Poisson']\nl_l2sqs = [1e-3, 1e-2, 1e-1]\n\nfig, axes = plt.subplots(len(model_types), len(l_l2sqs),\n figsize=(4 * len(l_l2sqs), 3 * len(model_types)),\n sharey=True, sharex=True)\n\nn_samples = 1000\nn_features = 20\n\nfor (model_type, l_l2sq), ax in zip(product(model_types, l_l2sqs),\n axes.ravel()):\n model = create_model(model_type, n_samples, n_features)\n\n bfgs, svrg, sdca, gd, agd = run_solvers(model, l_l2sq)\n plot_history([bfgs, svrg, sdca, gd, agd], ax=ax,\n dist_min=True, log_scale=True)\n ax.legend_.remove()\n ax.set_xlabel('')\n ax.set_ylim([1e-9, 1])\n\nfor l_l2sq, ax in zip(l_l2sqs, axes[0]):\n ax.set_title('$\\lambda = %.2g$' % l_l2sq)\n\nfor model_type, ax in zip(model_types, axes):\n ax[0].set_ylabel('%s regression' % model_type, fontsize=17)\n\nfor ax in axes[-1]:\n ax.set_xlabel('epochs')\n\naxes[-1][1].legend(loc=9, bbox_to_anchor=(0.5, -0.2), ncol=5)\nplt.show()\n", "id": "4006567", "language": "Python", "matching_score": 3.1724226474761963, "max_stars_count": 0, "path": "examples/plot_glm_convergence.py" }, { "content": "\"\"\"\n============================================\nComparison of solvers for Poisson regression\n============================================\n\nIn this example, we explain how to try out different solvers for the Poisson\nregression model, using :math:`\\ell_2^2` penalization, namely ridge (which is\nthe default value for the ``penalty`` parameter in\n`tick.inference.PoissonRegression`).\n\nNote that for this learner, the ``step`` of the solver cannot be tuned\nautomatically. So, the default value might work, or not.\nWe therefore urge users to try out different values of the ``step`` parameter\nuntil getting good concergence properties.\n\nOther penalizations are available in `tick.inference.PoissonRegression`:\n\n* no penalization, using ``penalty='none'``\n* :math:`\\ell_1` penalization, using ``penalty='l1'``\n* elastic-net penalization (combination of :math:`\\ell_1` and :math:`\\ell_2^2`,\n using ``penalty='elasticnet'``, where in this case the ``elastic_net_ratio``\n parameter should be used as well\n* total-variation penalization, using ``penalty='tv'``\n\n**Remark**: we don't use in this example ``solver='sgd'`` (namely vanilla\nstochastic gradient descent, see `tick.solver.SGD`) since it performs\ntoo poorly.\n\nThe plot given below compares the distance to the minimum of each solver along\niterations, on a logarithmic scale.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tick.simulation import weights_sparse_gauss\nfrom tick.linear_model import SimuPoisReg, PoissonRegression\nfrom tick.plot import plot_history\n\n\nn_samples = 50000\nn_features = 100\nnp.random.seed(123)\nweight0 = weights_sparse_gauss(n_features, nnz=int(n_features-1)) / 20.\nintercept0 = -0.1\nX, y = SimuPoisReg(weight0, intercept0, n_samples=n_samples,\n verbose=False, seed=123).simulate()\n\nopts = {'verbose': False, 'record_every': 1, 'tol': 1e-8, 'max_iter': 40}\n\npoisson_regressions = [\n PoissonRegression(solver='gd', **opts),\n PoissonRegression(solver='agd', **opts),\n PoissonRegression(solver='svrg', random_state=1234, **opts),\n PoissonRegression(solver='bfgs', **opts)\n]\n\nfor poisson_regression in poisson_regressions:\n poisson_regression.fit(X, y)\n\nplot_history(poisson_regressions, log_scale=True, dist_min=True)\nplt.title('Solvers comparison for Poisson regression', fontsize=16)\nplt.tight_layout()\n", "id": "9160752", "language": "Python", "matching_score": 3.4044547080993652, "max_stars_count": 0, "path": "examples/plot_poisson_regression.py" }, { "content": "\"\"\"\n===========================\nPrecision vs speed tradeoff\n===========================\n\nIn this example we compare the convergence speed of our learners given the\nfloat precision used.\n\nIn both case the convergence speed in term of number of iterations\n(on the left) is similar up to float 32 precision.\nBut compared to the running time (on the right), we can see that using\nfloat 32 instead of float 64 leads to faster convergence up to\nfloat 32 precision.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nfrom tick.dataset import fetch_tick_dataset\nfrom tick.linear_model import LogisticRegression\nfrom tick.plot import plot_history\n\nX, y = fetch_tick_dataset('binary/adult/adult.trn.bz2')\nX = X.toarray() # It is more visible with dense matrices\n\nmax_iter = 50\nseed = 7108\n\nlearner_64 = LogisticRegression(tol=0, max_iter=max_iter,\n record_every=2, random_state=seed)\nlearner_64.fit(X, y)\n\nX_32, y_32 = X.astype('float32'), y.astype('float32')\nlearner_32 = LogisticRegression(tol=0, max_iter=max_iter,\n record_every=2, random_state=seed)\nlearner_32.fit(X_32, y_32)\n\n# For a fair comparison, we access private attributes to compute both\n# objective with float 64 precision\nlearner_32._solver_obj.history.values['obj'] = [\n learner_64._solver_obj.objective(coeffs.astype('float64'))\n for coeffs in learner_32._solver_obj.history.values['x']\n]\n\nfig, axes = plt.subplots(1, 2, figsize=(8, 4), sharey=True)\nplot_history([learner_32, learner_64], x='n_iter',\n labels=['float 32', 'float 64'],\n dist_min=True, log_scale=True,\n ax=axes[0])\nplot_history([learner_32, learner_64], x='time',\n labels=['float 32', 'float 64'],\n dist_min=True, log_scale=True,\n ax=axes[1])\n\naxes[0].set_ylabel(r'$\\frac{f(w^t) - f(w^*)}{f(w^*)}$')\naxes[0].set_xlabel('n epochs')\naxes[1].set_ylabel('')\naxes[1].set_xlabel('time (s)')\n\nplt.show()\n", "id": "10786724", "language": "Python", "matching_score": 0.7427446246147156, "max_stars_count": 0, "path": "examples/plot_low_precision_learner.py" }, { "content": "\"\"\"\n====================================================================\nConvSCCS cross validation on simulated longitudinal features example\n====================================================================\n\nIn this example we simulate longitudinal data with preset relative incidence\nfor each feature. We then perform a cross validation of the ConvSCCS model\nand compare the estimated coefficients to the relative incidences used for\nthe simulation.\n\"\"\"\nfrom time import time\nimport numpy as np\nfrom scipy.sparse import csr_matrix, hstack\nfrom matplotlib import cm\nimport matplotlib.pylab as plt\nfrom tick.survival.simu_sccs import CustomEffects\nfrom tick.survival import SimuSCCS, ConvSCCS\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n# Simulation parameters\nseed = 0\nlags = 49\nn_samples = 2000\nn_intervals = 750\nn_corr = 3\n\n# Relative incidence functions used for the simulation\nce = CustomEffects(lags + 1)\nnull_effect = [ce.constant_effect(1)] * 2\nintermediate_effect = ce.bell_shaped_effect(2, 30, 15, 15)\nlate_effects = ce.increasing_effect(2, curvature_type=4)\n\nsim_effects = [*null_effect, intermediate_effect, late_effects]\n\nn_features = len(sim_effects)\nn_lags = np.repeat(lags, n_features).astype('uint64')\n\ncoeffs = [np.log(c) for c in sim_effects]\n\n# Time drift (age effect) used for the simulations.\ntime_drift = lambda t: np.log(8 * np.sin(.01 * t) + 9)\n\n# Simaltion of the features.\nsim = SimuSCCS(n_samples, n_intervals, n_features, n_lags,\n time_drift=time_drift, coeffs=coeffs, seed=seed,\n n_correlations=n_corr, verbose=False)\nfeatures, censored_features, labels, censoring, coeffs = sim.simulate()\n\n# Plot the Hawkes kernel matrix used to generate the features.\nfig, ax = plt.subplots(figsize=(7, 6))\nheatmap = ax.pcolor(sim.hawkes_exp_kernels.adjacency, cmap=cm.Blues)\ndivider = make_axes_locatable(ax)\ncax = divider.append_axes(\"right\", size=\"5%\", pad=0.5)\nfig.colorbar(heatmap, cax=cax)\nax.set_title('Hawkes adjacency matrix used for the simulation');\nplt.show()\n\n## Add age_groups features to feature matrices.\nagegrps = [0, 125, 250, 375, 500, 625, 750]\nn_agegrps = len(agegrps) - 1\n\nfeat_agegrp = np.zeros((n_intervals, n_agegrps))\nfor i in range(n_agegrps):\n feat_agegrp[agegrps[i]:agegrps[i + 1], i] = 1\n\nfeat_agegrp = csr_matrix(feat_agegrp)\nfeatures = [hstack([f, feat_agegrp]).tocsr() for f in features]\ncensored_features = [hstack([f, feat_agegrp]).tocsr() for f in\n censored_features]\nn_lags = np.hstack([n_lags, np.zeros(n_agegrps)])\n\n# Learning\n# Example code for cross validation\n# start = time()\n# learner = ConvSCCS(n_lags=n_lags.astype('uint64'),\n# penalized_features=np.arange(n_features),\n# random_state=42)\n# C_TV_range = (1, 4)\n# C_L1_range = (2, 5)\n# _, cv_track = learner.fit_kfold_cv(features, labels, censoring,\n# C_TV_range, C_L1_range,\n# confidence_intervals=True,\n# n_samples_bootstrap=20, n_cv_iter=50)\n# elapsed_time = time() - start\n# print(\"Elapsed time (model training): %.2f seconds \\n\" % elapsed_time)\n# print(\"Best model hyper parameters: \\n\")\n# print(\"C_tv : %f \\n\" % cv_track.best_model['C_tv'])\n# print(\"C_group_l1 : %f \\n\" % cv_track.best_model['C_group_l1'])\n# cv_track.plot_cv_report(35, 45)\n# plt.show()\n# confidence_intervals = cv_track.best_model['confidence_intervals']\n\n# using the parameters resulting from cross-validation\nlearner = ConvSCCS(n_lags=n_lags.astype('uint64'),\n penalized_features=np.arange(n_features),\n random_state=42, C_tv=270.2722840570933,\n C_group_l1=5216.472772625124)\n\n_, confidence_intervals = learner.fit(features, labels, censoring,\n confidence_intervals=True,\n n_samples_bootstrap=20)\n\n# Plot estimated parameters\n# get bootstrap confidence intervals\nrefitted_coeffs = confidence_intervals['refit_coeffs']\nlower_bound = confidence_intervals['lower_bound']\nupper_bound = confidence_intervals['upper_bound']\n\nn_rows = int(np.ceil(n_features / 2))\nremove_last_plot = (n_features % 2 != 0)\n\nfig, axarr = plt.subplots(n_rows, 2, sharex=True, sharey=True, figsize=(10, 6))\ny = confidence_intervals['refit_coeffs']\nlb = confidence_intervals['lower_bound']\nub = confidence_intervals['upper_bound']\nfor i, c in enumerate(y[:-6]):\n ax = axarr[i // 2][i % 2]\n l = n_lags[i]\n ax.plot(np.exp(coeffs[i]), label=\"True RI\")\n ax.step(np.arange(l + 1), np.exp(c), label=\"Estimated RI\")\n ax.fill_between(np.arange(l + 1), np.exp(lb[i]), np.exp(ub[i]), alpha=.5,\n color='orange', step='pre', label=\"95% boostrap CI\")\nplt.suptitle('Estimated relative risks with 95% confidence bands')\naxarr[0][1].legend(loc='best')\n[ax[0].set_ylabel('Relative incidence') for ax in axarr]\n[ax.set_xlabel('Time after exposure start') for ax in axarr[-1]]\nif remove_last_plot:\n fig.delaxes(axarr[-1][-1])\nplt.show()\n\nnormalize = lambda x: x / np.sum(x)\nm = np.repeat(np.hstack(refitted_coeffs[-6:]), 125)\nlb = np.repeat(np.hstack(lower_bound[-6:]), 125)\nub = np.repeat(np.hstack(upper_bound[-6:]), 125)\nplt.figure()\nplt.plot(np.arange(n_intervals),\n normalize(np.exp(time_drift(np.arange(n_intervals)))))\nplt.step(np.arange(n_intervals), normalize(np.exp(m)))\nplt.fill_between(np.arange(n_intervals), np.exp(lb) / np.exp(m).sum(),\n np.exp(ub) / np.exp(m).sum(), alpha=.5, color='orange',\n step='pre')\nplt.xlabel('Age')\nplt.ylabel('Normalized Age Relative Incidence')\nplt.title(\"Normalized age effect with 95% confidence bands\");\nplt.show()\n", "id": "12751748", "language": "Python", "matching_score": 1.3235222101211548, "max_stars_count": 0, "path": "examples/plot_conv_sccs_cv_results.py" }, { "content": "\"\"\"\n==============================\nExamples of proximal operators\n==============================\n\nPlot examples of proximal operators available in tick\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tick.prox import ProxL1, ProxElasticNet, ProxL2Sq, \\\n ProxPositive, ProxSlope, ProxTV, ProxZero, ProxBinarsity, ProxGroupL1, \\\n ProxEquality, ProxL1w\n\nnp.random.seed(12)\nx = np.random.randn(50)\na, b = x.min() - 1e-1, x.max() + 1e-1\ns = 0.4\n\nproxs = [\n ProxZero(),\n ProxPositive(),\n ProxL2Sq(strength=s),\n ProxL1(strength=s),\n ProxElasticNet(strength=s, ratio=0.5),\n ProxSlope(strength=s),\n ProxTV(strength=s),\n ProxEquality(range=(25, 40)),\n ProxL1w(strength=s, weights=0.1 * np.arange(50, dtype=np.double)),\n ProxGroupL1(strength=2*s, blocks_start=np.arange(0, 50, 10),\n blocks_length=10 * np.ones((5,))),\n ProxBinarsity(strength=s, blocks_start=np.arange(0, 50, 10),\n blocks_length=10 * np.ones((5,)))\n]\n\nfig, _ = plt.subplots(3, 4, figsize=(16, 12), sharey=True, sharex=True)\nfig.axes[0].stem(x)\nfig.axes[0].set_title(\"original vector\", fontsize=16)\nfig.axes[0].set_xlim((-1, 51))\nfig.axes[0].set_ylim((a, b))\n\nfor i, prox in enumerate(proxs):\n fig.axes[i + 1].stem(prox.call(x))\n fig.axes[i + 1].set_title(prox.name, fontsize=16)\n fig.axes[i + 1].set_xlim((-1, 51))\n fig.axes[i + 1].set_ylim((a, b))\n\nplt.tight_layout()\nplt.show()\n", "id": "2542207", "language": "Python", "matching_score": 0.6464377641677856, "max_stars_count": 0, "path": "examples/plot_prox_example.py" }, { "content": "# License: BSD 3 clause\n\nimport numpy as np\n\nfrom tick.prox.base import Prox\nfrom .base import SolverFirstOrder\nfrom .base.utils import relative_distance\n\n\nclass CompositeProx(Prox):\n \"\"\"Class of prox that wraps a list of prox\n\n Parameters\n ----------\n prox_list : `list` of `Prox`\n List of prox that are wrapped\n\n Attributes\n ----------\n n_proxs : int\n Number of wrapped Prox\n\n Notes\n -----\n You cannot call globally this Prox, you must call all wrapped Prox one\n by one. Otherwise the order of your prox in the ProxList might change the\n result\n \"\"\"\n _attrinfos = {\n \"prox_list\": {\n \"writable\": False\n },\n \"n_proxs\": {\n \"writable\": False\n }\n }\n\n def __init__(self, prox_list: list):\n if isinstance(prox_list, Prox):\n prox_list = [prox_list]\n\n # Range is stored in each Prox individually\n Prox.__init__(self, range=None)\n if len(prox_list) == 0:\n raise ValueError('prox_list must have at least one Prox')\n self.prox_list = prox_list\n self.n_proxs = len(self.prox_list)\n\n def _call(self, coeffs: np.ndarray, step: object, out: np.ndarray):\n raise ValueError(\"You cannot call globally a CompositeProx\")\n\n def call_i(self, i: int, coeffs: np.ndarray, step: object):\n \"\"\"Calls ith prox\n \"\"\"\n return self.prox_list[i].call(coeffs, step)\n\n def value(self, coeffs: np.ndarray):\n prox_value = 0\n for prox in self.prox_list:\n prox_value += prox.value(coeffs)\n return prox_value\n\n def astype(self, dtype_or_object_with_dtype):\n def cast_prox(prox):\n return prox.astype(dtype_or_object_with_dtype)\n\n return CompositeProx(list(map(cast_prox, self.prox_list)))\n\n\nclass GFB(SolverFirstOrder):\n \"\"\"Generalized Forward-Backward algorithm\n\n For the minimization of objectives of the form\n\n .. math::\n f(x) + \\\\sum_{p=1}^P g_p(x)\n\n where :math:`f` has a smooth gradient and :math:`g_1, \\ldots, g_P` are\n prox-capable. Function :math:`f` corresponds to the ``model.loss`` method\n of the model (passed with ``set_model`` to the solver) and\n :math:`g_1, \\ldots, g_P` correspond to the list of prox passed with the\n ``set_prox`` method.\n One iteration of :class:`GFB <tick.solver.GFB>` is as follows:\n\n .. math::\n \\\\begin{align*}\n &\\\\text{for } p=1, \\\\ldots, P \\\\; \\\\text{ do the following:} \\\\\\\\\n & \\\\quad z_p \\\\gets \\\\mathrm{prox}_{P \\\\eta g_p} \\\\Big(2 w - z_p^{\\\\text{old}}\n - \\\\eta \\\\nabla f(w) \\\\Big) \\\\\\\\\n & \\\\quad z_p \\\\gets z_p^{\\\\text{old}} + \\\\beta (z_p - w) \\\\\\\\\n &w \\\\gets \\\\frac 1P \\\\sum_{p=1}^P z_p\n \\\\end{align*}\n\n where :math:`\\\\nabla f(w)` is the gradient of :math:`f` given by the\n ``model.grad`` method and :math:`\\\\mathrm{prox}_{\\\\eta g_p}` is given by the\n ``prox[p].call`` method. The step-size :math:`\\\\eta` can be tuned with\n ``step``. The iterations stop whenever tolerance ``tol`` is achieved, or\n after ``max_iter`` iterations. The obtained solution :math:`w` is returned\n by the ``solve`` method, and is also stored in the ``solution`` attribute\n of the solver. The level of sur-relaxation :math:`\\\\beta` can be tuned\n using the ``surrelax`` attribute.\n\n Parameters\n ----------\n step : `float`, default=None\n Step-size parameter, the most important parameter of the solver.\n Whenever possible, this can be tuned as\n ``step = 1 / model.get_lip_best()``\n\n tol : `float`, default=1e-10\n The tolerance of the solver (iterations stop when the stopping\n criterion is below it)\n\n max_iter : `int`, default=500\n Maximum number of iterations of the solver.\n\n surrelax : `float`, default=1\n Level of sur-relaxation to use in the algorithm.\n\n verbose : `bool`, default=True\n If `True`, solver verboses history, otherwise nothing is displayed,\n but history is recorded anyway\n\n print_every : `int`, default=10\n Print history information every time the iteration number is a\n multiple of ``print_every``. Used only is ``verbose`` is True\n\n record_every : `int`, default=1\n Save history information every time the iteration number is a\n multiple of ``record_every``\n\n Attributes\n ----------\n model : `Model`\n The model used by the solver, passed with the ``set_model`` method\n\n prox : `list` of `Prox`\n List of proximal operators used by the solver, passed with the\n ``set_prox`` method\n\n solution : `numpy.array`, shape=(n_coeffs,)\n Minimizer found by the solver\n\n history : `dict`-like\n A dict-type of object that contains history of the solver along\n iterations. It should be accessed using the ``get_history`` method\n\n time_start : `str`\n Start date of the call to ``solve()``\n\n time_elapsed : `float`\n Duration of the call to ``solve()``, in seconds\n\n time_end : `str`\n End date of the call to ``solve()``\n\n References\n ----------\n * <NAME>, <NAME>, <NAME>, A generalized forward-backward splitting,\n *SIAM Journal on Imaging Sciences* (2013)\n \"\"\"\n\n def __init__(self, step: float = None, tol: float = 1e-10,\n max_iter: int = 500, surrelax=1., verbose: bool = True,\n print_every: int = 10, record_every: int = 1):\n SolverFirstOrder.__init__(self, step=step, tol=tol, max_iter=max_iter,\n verbose=verbose, print_every=print_every,\n record_every=record_every)\n self.surrelax = surrelax\n\n def set_prox(self, prox_list: list):\n \"\"\"\n Parameters\n ----------\n prox_list : `list` of `Prox`\n List of all proximal operators of the model\n \"\"\"\n if not isinstance(prox_list, CompositeProx):\n prox_list = CompositeProx(prox_list)\n\n if self.dtype is not None:\n prox_list = prox_list.astype(self.dtype)\n SolverFirstOrder.set_prox(self, prox_list)\n return self\n\n def initialize_values(self, x0, step):\n step, obj, x, x_old = \\\n SolverFirstOrder._initialize_values(self, x0, step,\n n_empty_vectors=1)\n z_list = [np.zeros_like(x) for _ in range(self.prox.n_proxs)]\n z_old_list = [np.zeros_like(x) for _ in range(self.prox.n_proxs)]\n return x, x_old, z_list, z_old_list, obj, step\n\n def _solve(self, x0: np.ndarray, step: float):\n minimizer, prev_minimizer, z_list, z_old_list, obj, step = \\\n self.initialize_values(x0, step)\n\n n_prox = self.prox.n_proxs\n for n_iter in range(self.max_iter + 1):\n # We will record on this iteration and we must be ready\n if self._should_record_iter(n_iter):\n prev_minimizer[:] = minimizer\n prev_obj = self.objective(prev_minimizer)\n\n grad_x = self.model.grad(minimizer)\n for i in range(n_prox):\n z = z_list[i]\n z_old = z_old_list[i]\n z[:] = self.prox.call_i(\n i, 2 * prev_minimizer - z_old - step * grad_x,\n n_prox * step)\n # Relaxation step\n z[:] = z_old + self.surrelax * (z - prev_minimizer)\n\n minimizer[:] = 1. / n_prox * sum(z_list)\n\n for i in range(n_prox):\n z_old_list[i][:] = z_list[i]\n\n # Let's record metrics\n if self._should_record_iter(n_iter):\n rel_delta = relative_distance(minimizer, prev_minimizer)\n obj = self.objective(minimizer)\n rel_obj = abs(obj - prev_obj) / abs(prev_obj)\n\n converged = rel_obj < self.tol\n # if converged, we stop the loop and record the last step\n # in history\n self._handle_history(n_iter, force=converged, obj=obj,\n x=minimizer.copy(), rel_delta=rel_delta,\n step=step, rel_obj=rel_obj)\n if converged:\n break\n\n self._set('solution', minimizer)\n", "id": "4730451", "language": "Python", "matching_score": 5.023096561431885, "max_stars_count": 0, "path": "tick/solver/gfb.py" }, { "content": "# License: BSD 3 clause\n\nimport numpy as np\nfrom abc import ABC, abstractmethod\n\nfrom tick.base_model import Model\nfrom tick.prox.base import Prox\nfrom . import SolverFirstOrder, SolverSto\nfrom .utils import relative_distance\n\n__author__ = 'stephanegaiffas'\n\n# TODO: property for step that sets it in the C++\n\n\nclass SolverFirstOrderSto(SolverFirstOrder, SolverSto):\n \"\"\"The base class for a first order stochastic solver.\n It only deals with verbosing information, and setting parameters.\n\n Parameters\n ----------\n epoch_size : `int`\n Epoch size\n\n rand_type : `str`\n Type of random sampling\n\n * if ``\"unif\"`` samples are uniformly drawn among all possibilities\n * if ``\"perm\"`` a random permutation of all possibilities is\n generated and samples are sequentially taken from it. Once all of\n them have been taken, a new random permutation is generated\n\n tol : `float`, default=0\n The tolerance of the solver (iterations stop when the stopping\n criterion is below it). By default the solver does ``max_iter``\n iterations\n\n max_iter : `int`\n Maximum number of iterations of the solver\n\n verbose : `bool`, default=True\n If `True`, we verbose things, otherwise the solver does not\n print anything (but records information in history anyway)\n\n print_every : `int`, default = 10\n Print history information every time the iteration number is a\n multiple of ``print_every``\n\n record_every : `int`, default = 1\n Information along iteration is recorded in history each time the\n iteration number of a multiple of ``record_every``\n\n seed : `int`\n The seed of the random sampling. If it is negative then a random seed\n (different at each run) will be chosen.\n\n Attributes\n ----------\n model : `Solver`\n The model to solve\n\n prox : `Prox`\n Proximal operator to solve\n\n time_start : `str`\n Start date of the call to solve()\n\n time_elapsed : `float`\n Duration of the call to solve(), in seconds\n\n time_end : `str`\n End date of the call to solve()\n\n Notes\n -----\n This class should not be used by end-users\n \"\"\"\n\n _attrinfos = {\"_step\": {\"writable\": False}}\n\n def __init__(self, step: float = None, epoch_size: int = None,\n rand_type=\"unif\", tol: float = 0., max_iter=100, verbose=True,\n print_every=10, record_every=1, seed=-1):\n\n self._step = None\n\n # We must first construct SolverSto (otherwise self.step won't\n # work in SolverFirstOrder)\n SolverSto.__init__(self, epoch_size=epoch_size, rand_type=rand_type,\n seed=seed)\n SolverFirstOrder.__init__(self, step=step, tol=tol, max_iter=max_iter,\n verbose=verbose, print_every=print_every,\n record_every=record_every)\n\n self._set_cpp_solver('float64')\n\n def set_model(self, model: Model):\n \"\"\"Set model in the solver\n\n Parameters\n ----------\n model : `Model`\n Sets the model in the solver. The model gives the first\n order information about the model (loss, gradient, among\n other things)\n\n Returns\n -------\n output : `Solver`\n The `Solver` with given model\n \"\"\"\n self.validate_model(model)\n if self.dtype != model.dtype or self._solver is None:\n self._set_cpp_solver(model.dtype)\n\n self.dtype = model.dtype\n SolverFirstOrder.set_model(self, model)\n SolverSto.set_model(self, model)\n return self\n\n def set_prox(self, prox: Prox):\n \"\"\"Set proximal operator in the solver\n\n Parameters\n ----------\n prox : `Prox`\n The proximal operator of the penalization function\n\n Returns\n -------\n output : `Solver`\n The solver with given prox\n\n Notes\n -----\n In some solvers, ``set_model`` must be called before\n ``set_prox``, otherwise and error might be raised\n \"\"\"\n SolverFirstOrder.set_prox(self, prox)\n SolverSto.set_prox(self, prox)\n return self\n\n @property\n def step(self):\n return self._step\n\n @step.setter\n def step(self, val):\n self._set(\"_step\", val)\n if val is None:\n val = 0.\n if self._solver is not None:\n self._solver.set_step(val)\n\n def _solve(self, x0: np.array = None, step: float = None):\n \"\"\"\n Launch the solver\n\n Parameters\n ----------\n x0 : np.array, shape=(n_coeffs,)\n Starting iterate for the solver\n\n step : float\n Step-size or learning rate for the solver\n\n Returns\n -------\n output : np.array, shape=(n_coeffs,)\n Obtained minimizer\n \"\"\"\n from tick.solver import SDCA\n if not isinstance(self, SDCA):\n if step is not None:\n self.step = step\n step, obj, minimizer, prev_minimizer = \\\n self._initialize_values(x0, step, n_empty_vectors=1)\n self._solver.set_starting_iterate(minimizer)\n\n else:\n # In sdca case x0 is a dual vector\n step, obj, minimizer, prev_minimizer = \\\n self._initialize_values(None, step, n_empty_vectors=1)\n if x0 is not None:\n self._solver.set_starting_iterate(x0)\n\n # At each iteration we call self._solver.solve that does a full\n # epoch\n for n_iter in range(self.max_iter + 1):\n\n # We will record on this iteration and we must be ready\n if self._should_record_iter(n_iter):\n prev_minimizer[:] = minimizer\n prev_obj = self.objective(prev_minimizer)\n\n # Launch one epoch using the wrapped C++ solver\n self._solver.solve()\n\n # Let's record metrics\n if self._should_record_iter(n_iter):\n self._solver.get_minimizer(minimizer)\n # The step might be modified by the C++ solver\n # step = self._solver.get_step()\n obj = self.objective(minimizer)\n rel_delta = relative_distance(minimizer, prev_minimizer)\n rel_obj = abs(obj - prev_obj) / abs(prev_obj)\n converged = rel_obj < self.tol\n # If converged, we stop the loop and record the last step\n # in history\n self._handle_history(n_iter, force=converged, obj=obj,\n x=minimizer.copy(), rel_delta=rel_delta,\n rel_obj=rel_obj)\n if converged:\n break\n\n self._solver.get_minimizer(minimizer)\n self._set(\"solution\", minimizer)\n return minimizer\n\n def _get_typed_class(self, dtype_or_object_with_dtype, dtype_map):\n import tick.base.dtype_to_cpp_type\n return tick.base.dtype_to_cpp_type.get_typed_class(\n self, dtype_or_object_with_dtype, dtype_map)\n\n def _extract_dtype(self, dtype_or_object_with_dtype):\n import tick.base.dtype_to_cpp_type\n return tick.base.dtype_to_cpp_type.extract_dtype(\n dtype_or_object_with_dtype)\n\n @abstractmethod\n def _set_cpp_solver(self, dtype):\n pass\n\n def astype(self, dtype_or_object_with_dtype):\n if self.model is None:\n raise ValueError(\"Cannot reassign solver without a model\")\n\n import tick.base.dtype_to_cpp_type\n new_solver = tick.base.dtype_to_cpp_type.copy_with(\n self,\n [\"prox\", \"model\", \"_solver\"] # ignore on deepcopy\n )\n new_solver._set_cpp_solver(dtype_or_object_with_dtype)\n new_solver.set_model(self.model.astype(new_solver.dtype))\n if self.prox is not None:\n new_solver.set_prox(self.prox.astype(new_solver.dtype))\n return new_solver\n", "id": "6695625", "language": "Python", "matching_score": 4.947588920593262, "max_stars_count": 0, "path": "tick/solver/base/first_order_sto.py" }, { "content": "# License: BSD 3 clause\n\nimport numpy as np\n\nfrom . import Solver\nfrom tick.base_model import Model\nfrom tick.prox.base import Prox\n\n__author__ = '<NAME>'\n\n\nclass SolverFirstOrder(Solver):\n \"\"\"The base class for a first order solver. It defines methods for\n setting a model (giving first order information) and a proximal\n operator\n\n In only deals with verbosing information, and setting parameters\n\n Parameters\n ----------\n step : `float` default=None\n Step-size of the algorithm\n\n tol : `float`, default=0\n The tolerance of the solver (iterations stop when the stopping\n criterion is below it). By default the solver does ``max_iter``\n iterations\n\n max_iter : `int`\n Maximum number of iterations of the solver\n\n verbose : `bool`, default=True\n If `True`, we verbose things, otherwise the solver does not\n print anything (but records information in history anyway)\n\n print_every : `int`, default = 10\n Print history information every time the iteration number is a\n multiple of ``print_every``\n\n record_every : `int`, default = 1\n Information along iteration is recorded in history each time the\n iteration number of a multiple of ``record_every``\n\n Attributes\n ----------\n model : `Model`\n The model to solve\n\n prox : `Prox`\n Proximal operator to solve\n\n dtype : `{'float64', 'float32'}`, default='float64'\n Type of the arrays used. This value is set from model and prox dtypes.\n\n Notes\n -----\n This class should not be used by end-users\n \"\"\"\n\n _attrinfos = {\n \"model\": {\n \"writable\": False\n },\n \"prox\": {\n \"writable\": False\n },\n \"_initial_n_calls_loss_and_grad\": {\n \"writable\": False\n },\n \"_initial_n_calls_loss\": {\n \"writable\": False\n },\n \"_initial_n_calls_grad\": {\n \"writable\": False\n },\n \"_initial_n_passes_over_data\": {\n \"writable\": False\n },\n }\n\n def __init__(self, step: float = None, tol: float = 0.,\n max_iter: int = 100, verbose: bool = True,\n print_every: int = 10, record_every: int = 1):\n\n self.dtype = None\n\n Solver.__init__(self, tol, max_iter, verbose, print_every,\n record_every)\n self.model = None\n self.prox = None\n self.step = step\n # Martin's complicated and useless stuff :)\n self._initial_n_calls_loss_and_grad = 0\n self._initial_n_calls_loss = 0\n self._initial_n_calls_grad = 0\n self._initial_n_passes_over_data = 0\n\n def validate_model(self, model: Model):\n if not isinstance(model, Model):\n raise ValueError('Passed object of class %s is not a '\n 'Model class' % model.name)\n if not model._fitted:\n raise ValueError('Passed object %s has not been fitted. You must '\n 'call ``fit`` on it before passing it to '\n '``set_model``' % model.name)\n\n def set_model(self, model: Model):\n \"\"\"Set model in the solver\n\n Parameters\n ----------\n model : `Model`\n Sets the model in the solver. The model gives the first\n order information about the model (loss, gradient, among\n other things)\n\n Returns\n -------\n output : `Solver`\n The same instance with given model\n \"\"\"\n self.validate_model(model)\n self.dtype = model.dtype\n self._set(\"model\", model)\n return self\n\n def _initialize_values(self, x0: np.ndarray = None, step: float = None,\n n_empty_vectors: int = 0):\n \"\"\"Initialize values\n\n Parameters\n ----------\n x0 : `numpy.ndarray`\n Starting point\n\n step : `float`\n Initial step\n\n n_empty_vectors : `int`\n Number of empty vector of like x0 needed\n\n Returns\n -------\n step : `float`\n Initial step\n\n obj : `float`\n Initial value of objective function\n\n iterate : `numpy.ndarray`\n copy of starting point\n\n empty vectors : `numpy.ndarray`\n n_empty_vectors empty vectors shaped as x0. For example, those\n vectors can be used to store previous iterate values during\n a solver execution.\n \"\"\"\n # Initialization\n if step is None:\n if self.step is None:\n raise ValueError(\"No step specified.\")\n else:\n step = self.step\n else:\n self.step = step\n if x0 is None:\n x0 = np.zeros(self.model.n_coeffs, dtype=self.dtype)\n iterate = x0.copy()\n obj = self.objective(iterate)\n\n result = [step, obj, iterate]\n for _ in range(n_empty_vectors):\n result.append(np.zeros_like(x0))\n\n return tuple(result)\n\n def set_prox(self, prox: Prox):\n \"\"\"Set proximal operator in the solver\n\n Parameters\n ----------\n prox : `Prox`\n The proximal operator of the penalization function\n\n Returns\n -------\n output : `Solver`\n The solver with given prox\n\n Notes\n -----\n In some solvers, ``set_model`` must be called before\n ``set_prox``, otherwise and error might be raised\n \"\"\"\n if not isinstance(prox, Prox):\n raise ValueError('Passed object of class %s is not a '\n 'Prox class' % prox.name)\n if self.dtype is None or self.model is None:\n raise ValueError(\"Solver must call set_model before set_prox\")\n if prox.dtype != self.dtype:\n prox = prox.astype(self.dtype)\n self._set(\"prox\", prox)\n return self\n\n def astype(self, dtype_or_object_with_dtype):\n if self.model is None:\n raise ValueError(\"Cannot reassign solver without a model\")\n\n import tick.base.dtype_to_cpp_type\n new_solver = tick.base.dtype_to_cpp_type.copy_with(\n self,\n [\"prox\", \"model\"] # ignore on deepcopy\n )\n new_solver.dtype = tick.base.dtype_to_cpp_type.extract_dtype(\n dtype_or_object_with_dtype)\n new_solver.set_model(self.model.astype(new_solver.dtype))\n if self.prox is not None:\n new_solver.set_prox(self.prox.astype(new_solver.dtype))\n return new_solver\n\n def _as_dict(self):\n dd = Solver._as_dict(self)\n if self.model is not None:\n dd[\"model\"] = self.model._as_dict()\n if self.prox is not None:\n dd[\"prox\"] = self.prox._as_dict()\n return dd\n\n def objective(self, coeffs, loss: float = None):\n \"\"\"Compute the objective function\n\n Parameters\n ----------\n coeffs : `np.array`, shape=(n_coeffs,)\n Point where the objective is computed\n\n loss : `float`, default=`None`\n Gives the value of the loss if already known (allows to\n avoid its computation in some cases)\n\n Returns\n -------\n output : `float`\n Value of the objective at given ``coeffs``\n \"\"\"\n if self.prox is None:\n prox_value = 0\n else:\n prox_value = self.prox.value(coeffs)\n\n if loss is None:\n return self.model.loss(coeffs) + prox_value\n else:\n return loss + prox_value\n\n def solve(self, x0=None, step=None):\n \"\"\"\n Launch the solver\n\n Parameters\n ----------\n x0 : `np.array`, shape=(n_coeffs,), default=`None`\n Starting point of the solver\n\n step : `float`, default=`None`\n Step-size or learning rate for the solver. This can be tuned also\n using the ``step`` attribute\n\n Returns\n -------\n output : `np.array`, shape=(n_coeffs,)\n Obtained minimizer for the problem, same as ``solution`` attribute\n \"\"\"\n if x0 is not None and self.dtype is not \"float64\":\n x0 = x0.astype(self.dtype)\n\n if self.model is None:\n raise ValueError('You must first set the model using '\n '``set_model``.')\n if self.prox is None:\n raise ValueError('You must first set the prox using '\n '``set_prox``.')\n solution = Solver.solve(self, x0, step)\n return solution\n\n def _handle_history(self, n_iter: int, force: bool = False, **kwargs):\n \"\"\"Updates the history of the solver.\n\n Parameters\n ----------\n\n Notes\n -----\n This should not be used by end-users.\n \"\"\"\n # self.model.n_calls_loss_and_grad is shared by all\n # solvers using this model\n # hence it might not be at 0 while starting\n # /!\\ beware if parallel computing...\n if n_iter == 0:\n self._set(\"_initial_n_calls_loss_and_grad\",\n self.model.n_calls_loss_and_grad)\n self._set(\"_initial_n_calls_loss\", self.model.n_calls_loss)\n self._set(\"_initial_n_calls_grad\", self.model.n_calls_grad)\n self._set(\"_initial_n_passes_over_data\",\n self.model.n_passes_over_data)\n n_calls_loss_and_grad = \\\n self.model.n_calls_loss_and_grad - \\\n self._initial_n_calls_loss_and_grad\n n_calls_loss = \\\n self.model.n_calls_loss - self._initial_n_calls_loss\n n_calls_grad = \\\n self.model.n_calls_grad - self._initial_n_calls_grad\n n_passes_over_data = \\\n self.model.n_passes_over_data - \\\n self._initial_n_passes_over_data\n Solver.\\\n _handle_history(self, n_iter, force=force,\n n_calls_loss_and_grad=n_calls_loss_and_grad,\n n_calls_loss=n_calls_loss,\n n_calls_grad=n_calls_grad,\n n_passes_over_data=n_passes_over_data,\n **kwargs)\n", "id": "1225652", "language": "Python", "matching_score": 2.5145764350891113, "max_stars_count": 0, "path": "tick/solver/base/first_order.py" }, { "content": "# License: BSD 3 clause\n\nfrom collections import defaultdict\nimport dill\n\nimport numpy as np\nfrom tick.base import Base\nfrom numpy.linalg import norm\n\n\ndef spars_func(coeffs, **kwargs):\n eps = np.finfo(coeffs.dtype).eps\n return np.sum(np.abs(coeffs) > eps, axis=None)\n\n\nclass History(Base):\n \"\"\"A class to manage the history along iterations of a solver\n\n Attributes\n ----------\n print_order : `list` or `str`\n The list of values to print along iterations\n\n values : `dict`\n A `dict` containing the history. Key is the value name and\n values are the values taken along the iterations\n\n last_values : `dict`\n A `dict` containing all the last history values\n\n _minimum_col_width : `int`\n Minimal size of a column when printing the history\n\n _minimizer : `None` or `numpy.ndarray`\n The minimizer of the objective. `None` if not specified.\n This is useful to compute a distance to the optimum.\n\n _minimum : `None` or `float`\n The minimal (optimal) value of the objective. `None` if not\n specified. This is useful to compute a distance to the optimum.\n\n _print_style : `list` or `str`\n The display style of all printed numbers\n\n _history_func : `dict`\n A dict given for all values the function to be applied before\n saving and displaying in history. This is useful for computing\n the sparsity, the rank, among other things, of the iterates\n along iterations of the solver\n\n _n_iter : `int`\n The current iteration number\n\n _col_widths : `list` or `int`\n A list containing the computed width of each column used for\n printing the history, based on the name length of the column\n \"\"\"\n\n _attrinfos = {\n \"values\": {\n \"writable\": False\n },\n \"last_values\": {\n \"writable\": False\n },\n }\n\n def __init__(self):\n Base.__init__(self)\n self._minimum_col_width = 9\n self.print_order = [\"n_iter\", \"obj\", \"step\", \"rel_obj\"]\n # Instantiate values of the history\n self._clear()\n\n self._minimizer = None\n self._minimum = None\n self._set(\"values\", None)\n self._col_widths = None\n self._n_iter = None\n\n # History function to compute history values based on parameters\n # used in a solver\n history_func = {}\n # history_func[\"n_iter\"] = n_iter_func\n # history_func[\"obj\"] = obj_func\n # history_func[\"step\"] = step_func\n # history_func[\"rel_obj\"] = rel_obj_func\n # history_func[\"n_epoch\"] = n_epoch_func\n # history_func[\"n_inner_prod\"] = n_inner_prod_func\n # history_func[\"norm\"] = norm_func\n # history_func[\"spars\"] = spars_func\n # history_func[\"rank\"] = rank_func\n self._history_func = history_func\n\n # Default print style of history values. Default is %.2e\n print_style = defaultdict(lambda: \"%.2e\")\n print_style[\"n_iter\"] = \"%d\"\n print_style[\"n_epoch\"] = \"%d\"\n print_style[\"n_inner_prod\"] = \"%d\"\n print_style[\"spars\"] = \"%d\"\n print_style[\"rank\"] = \"%d\"\n self._print_style = print_style\n\n def _clear(self):\n \"\"\"Reset history values\"\"\"\n self._set(\"values\", defaultdict(list))\n\n def _update(self, **kwargs):\n \"\"\"Update the history along the iterations.\n\n For each keyword argument, we apply the history function corresponding\n to this keyword, and use its results in the history\n \"\"\"\n self._n_iter = kwargs[\"n_iter\"]\n history_func = self._history_func\n # We loop on both, history functions and kerword arguments\n keys = set(kwargs.keys()).union(set(history_func.keys()))\n for key in keys:\n # Either it has a corresponding history function which we\n # apply on all keywords\n if key in history_func:\n func = history_func[key]\n self.values[key].append(func(**kwargs))\n # Either we only record the value\n else:\n value = kwargs[key]\n self.values[key].append(value)\n\n # def set_print_order(self, *args):\n # \"\"\"Allows to set the print order of the solver's history\n # \"\"\"\n # self.set_params(print_order=list(args))\n # self.clear()\n # return self\n\n # def update_history_func(self, **kwargs):\n # self._history_func.update(**kwargs)\n # self._clear()\n # return self\n\n # def update_print_style(self, **kwargs):\n # self.print_style.update(**kwargs)\n # return self\n\n def _format_last(self, name):\n try:\n formatted_str = self._print_style[name] % \\\n self.values[name][-1]\n except TypeError:\n formatted_str = str(self.values[name][-1])\n return formatted_str\n\n def _print_history(self):\n \"\"\"Verbose the current line of history\n \"\"\"\n values = self.values\n n_iter = self._n_iter\n print_order = self.print_order\n # If this is the first iteration, plot the history's column\n # names\n if n_iter == 0:\n min_width = self._minimum_col_width\n line = ' | '.join(\n list([\n name.center(min_width) for name in print_order\n if name in values\n ]))\n names = [name.center(min_width) for name in print_order]\n self._col_widths = list(map(len, names))\n print(line)\n col_widths = self._col_widths\n line = ' | '.join(\n list([\n self._format_last(name).rjust(col_widths[i])\n for i, name in enumerate(print_order) if name in values\n ]))\n print(line)\n\n @property\n def last_values(self):\n last_values = {}\n for key, hist in self.values.items():\n last_values[key] = hist[-1]\n return last_values\n\n def set_minimizer(self, minimizer: np.ndarray):\n \"\"\"Set the minimizer of the objective, to compute distance\n to it along iterations\n\n Parameters\n ----------\n minimizer : `numpy.ndarray`, shape=(n_coeffs,)\n The minimizer of the objective\n\n Notes\n -----\n This adds dist_coeffs in history (distance to the minimizer)\n which is printed along iterations\n \"\"\"\n self._minimizer = minimizer.copy()\n self._history_func[\"dist_coeffs\"] = \\\n lambda x, **kwargs: norm(x - self._minimizer)\n print_order = self.print_order\n if \"dist_coeffs\" not in print_order:\n print_order.append(\"dist_coeffs\")\n\n def set_minimum(self, minimum: float):\n \"\"\"Set the minimum of the objective, to compute distance to the\n optimum along iterations\n\n Parameters\n ----------\n minimum : `float`\n The minimizer of the objective\n\n Notes\n -----\n This adds dist_obj in history (distance to the minimum) which\n is printed along iterations\n \"\"\"\n self._minimum = minimum\n self._history_func[\"dist_obj\"] = \\\n lambda obj, **kwargs: obj - self._minimum\n print_order = self.print_order\n if \"dist_obj\" not in print_order:\n print_order.append(\"dist_obj\")\n\n def _as_dict(self):\n dd = Base._as_dict(self)\n dd.pop(\"values\", None)\n return dd\n\n # We use dill for serialization because history uses lambda functions\n def __getstate__(self):\n return dill.dumps(self.__dict__)\n\n def __setstate__(self, state):\n object.__setattr__(self, '__dict__', dill.loads(state))\n", "id": "12375275", "language": "Python", "matching_score": 0.8425568342208862, "max_stars_count": 0, "path": "tick/solver/history/history.py" }, { "content": "# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef plot_point_process(point_process, plot_intensity=None, n_points=10000,\n plot_nodes=None, t_min=None, t_max=None, max_jumps=None,\n show=True, ax=None):\n \"\"\"Plot point process realization\n\n Parameters\n ----------\n point_process : `SimuPointProcess`\n Point process that will be plotted\n\n plot_intensity : `bool`, default=`None`\n Flag saying if intensity should be plotted. If `None`, intensity will\n be plotted if it has been tracked.\n\n n_points : `int`, default=10000\n Number of points used for intensity plot.\n\n plot_nodes : `list` of `int`, default=`None`\n List of nodes that will be plotted. If `None`, all nodes are considered\n\n t_min : `float`, default=`None`\n If not `None`, time at which plot will start\n\n t_max : `float`, default=`None`\n If not `None`, time at which plot will stop\n\n max_jumps : `int`, default=`None`\n If not `None`, maximum of jumps per coordinate that will be plotted.\n This is useful when plotting big point processes to ensure a only\n readable part of them will be plotted\n\n show : `bool`, default=`True`\n if `True`, show the plot. Otherwise an explicit call to the show\n function is necessary. Useful when superposing several plots.\n\n ax : `list` of `matplotlib.axes`, default=None\n If not None, the figure will be plot on this axis and show will be\n set to False.\n \"\"\"\n if plot_nodes is None:\n plot_nodes = range(point_process.n_nodes)\n\n labels = []\n for node in plot_nodes:\n label = 'ticks #{}'.format(node)\n if t_min is not None or t_max is not None:\n if t_min is None:\n label_t_min = '0'\n else:\n label_t_min = '{:.3g}'.format(t_min)\n if t_max is None:\n label_t_max = '{:.3g}'.format(point_process.simulation_time)\n else:\n label_t_max = '{:.3g}'.format(t_max)\n\n label += ', $t \\in [{}, {}]$'.format(label_t_min, label_t_max)\n\n if max_jumps is not None:\n label += ', max jumps={}'.format(max_jumps)\n\n labels += [label]\n\n if ax is None:\n fig, ax = plt.subplots(\n len(plot_nodes), 1, sharex=True, sharey=True,\n figsize=(12, 4 * len(plot_nodes)))\n else:\n show = False\n\n if len(plot_nodes) == 1:\n ax = [ax]\n\n if plot_intensity is None:\n plot_intensity = point_process.is_intensity_tracked()\n\n timestamps = point_process.timestamps\n if plot_intensity:\n intensity_times = point_process.intensity_tracked_times\n intensities = point_process.tracked_intensity\n else:\n intensity_times, intensities = None, None\n\n timestamps, intensity_times, intensities = _extract_process_interval(\n plot_nodes, point_process.end_time, timestamps,\n intensity_times=intensity_times, intensities=intensities, t_min=t_min,\n t_max=t_max, max_jumps=max_jumps)\n\n for count, i in enumerate(plot_nodes):\n if not plot_intensity:\n _plot_tick_bars(timestamps[i], ax[count], labels[count])\n\n else:\n _plot_tick_intensity(timestamps[i], intensity_times,\n intensities[i], ax[count], labels[count],\n n_points)\n\n ax[-1].set_xlabel(r'$t$', fontsize=18)\n\n if show is True:\n plt.show()\n\n return ax[0].figure\n\n\ndef _plot_tick_bars(timestamps_i, ax, label):\n for t in timestamps_i:\n ax.axvline(x=t)\n ax.set_title(label, fontsize=20)\n ax.get_yaxis().set_visible(False)\n\n\ndef _plot_tick_intensity(timestamps_i, intensity_times, intensity_i, ax, label,\n n_points):\n x_intensity = np.linspace(intensity_times.min(), intensity_times.max(),\n n_points)\n y_intensity = np.interp(x_intensity, intensity_times, intensity_i, left=0)\n ax.plot(x_intensity, y_intensity)\n\n x_ticks = timestamps_i\n y_ticks = np.interp(x_ticks, intensity_times, intensity_i)\n\n ax.scatter(x_ticks, y_ticks)\n ax.set_title(label)\n\n\ndef _extract_process_interval(plot_nodes, end_time, timestamps,\n intensities=None, intensity_times=None,\n t_min=None, t_max=None, max_jumps=None):\n t_min_is_specified = t_min is not None\n if not t_min_is_specified:\n t_min = 0\n t_max_is_specified = t_max is not None\n if not t_max_is_specified:\n t_max = end_time\n\n if t_min >= end_time:\n raise ValueError('`t_min` should be smaller than `end_time`')\n if t_max <= 0:\n raise ValueError('`t_max` should be positive')\n\n if max_jumps is not None:\n # if neither t_min or t_ax is given, we act as if t_min=0 was given\n if t_min_is_specified or not t_max_is_specified:\n for i in plot_nodes:\n timestamps_i = timestamps[i]\n i_t_min = np.searchsorted(timestamps_i, t_min, side=\"left\")\n # This might happen if max_points = 0\n last_index = i_t_min + max_jumps - 1\n if last_index < 0:\n t_max = 0\n elif last_index < len(timestamps_i) \\\n and timestamps_i[last_index] < t_max:\n t_max = timestamps_i[last_index]\n\n elif t_max_is_specified:\n for i in plot_nodes:\n timestamps_i = timestamps[i]\n i_t_max = np.searchsorted(timestamps_i, t_max, side=\"left\")\n # This might happen if max_points = 0\n first_index = i_t_max - max_jumps\n if first_index >= len(timestamps_i) - 1:\n t_min = end_time\n elif first_index >= 0 and timestamps_i[first_index] > t_min:\n t_min = timestamps_i[first_index]\n\n extracted_timestamps = [\n timestamps_i[(timestamps_i >= t_min) & (timestamps_i <= t_max)]\n for timestamps_i in timestamps\n ]\n\n if intensity_times is not None:\n intensity_extracted_points = (intensity_times >= t_min) \\\n & (intensity_times <= t_max)\n extracted_intensity_times = intensity_times[intensity_extracted_points]\n\n extracted_intensity = [\n intensity[intensity_extracted_points] for intensity in intensities\n ]\n else:\n extracted_intensity_times, extracted_intensity = None, None\n\n return extracted_timestamps, extracted_intensity_times, extracted_intensity\n", "id": "3390354", "language": "Python", "matching_score": 1.9036180973052979, "max_stars_count": 0, "path": "tick/plot/plot_point_processes.py" }, { "content": "# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom tick.plot.plot_utilities import share_x, share_y\n\n\ndef plot_hawkes_kernel_norms(kernel_object, show=True, pcolor_kwargs=None,\n node_names=None):\n \"\"\"Generic function to plot Hawkes kernel norms.\n\n Parameters\n ----------\n kernel_object : `Object`\n An object that must have the following API :\n\n * `kernel_object.n_nodes` : a field that stores the number of nodes\n of the associated Hawkes process (thus the number of kernels is\n this number squared)\n * `kernel_object.get_kernel_norms()` : must return a 2d numpy\n array with the norm of each kernel\n\n show : `bool`, default=`True`\n if `True`, show the plot. Otherwise an explicit call to the show\n function is necessary. Useful when superposing several plots.\n\n pcolor_kwargs : `dict`, default=`None`\n Extra pcolor kwargs such as cmap, vmin, vmax\n\n node_names : `list` of `str`, shape=(n_nodes, ), default=`None`\n node names that will be displayed on axis.\n If `None`, node index will be used.\n\n Notes\n -----\n Kernels are displayed such that it shows norm of column influence's\n on row.\n \"\"\"\n n_nodes = kernel_object.n_nodes\n\n if node_names is None:\n node_names = range(n_nodes)\n elif len(node_names) != n_nodes:\n ValueError('node_names must be a list of length {} but has length {}'\n .format(n_nodes, len(node_names)))\n\n row_labels = ['${} \\\\rightarrow$'.format(i) for i in node_names]\n column_labels = ['$\\\\rightarrow {}$'.format(i) for i in node_names]\n\n norms = kernel_object.get_kernel_norms()\n fig, ax = plt.subplots()\n\n if pcolor_kwargs is None:\n pcolor_kwargs = {}\n\n if norms.min() >= 0:\n pcolor_kwargs.setdefault(\"cmap\", plt.cm.Blues)\n else:\n # In this case we want a diverging colormap centered on 0\n pcolor_kwargs.setdefault(\"cmap\", plt.cm.RdBu)\n max_abs_norm = np.max(np.abs(norms))\n pcolor_kwargs.setdefault(\"vmin\", -max_abs_norm)\n pcolor_kwargs.setdefault(\"vmax\", max_abs_norm)\n\n heatmap = ax.pcolor(norms, **pcolor_kwargs)\n\n # put the major ticks at the middle of each cell\n ax.set_xticks(np.arange(norms.shape[0]) + 0.5, minor=False)\n ax.set_yticks(np.arange(norms.shape[1]) + 0.5, minor=False)\n\n # want a more natural, table-like display\n ax.invert_yaxis()\n ax.xaxis.tick_top()\n\n ax.set_xticklabels(row_labels, minor=False, fontsize=17)\n ax.set_yticklabels(column_labels, minor=False, fontsize=17)\n\n fig.subplots_adjust(right=0.8)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.5)\n fig.colorbar(heatmap, cax=cax)\n\n if show:\n plt.show()\n\n return fig\n\n\ndef plot_hawkes_kernels(kernel_object, support=None, hawkes=None, n_points=300,\n show=True, log_scale=False, min_support=1e-4, ax=None):\n \"\"\"Generic function to plot Hawkes kernels.\n\n Parameters\n ----------\n kernel_object : `Object`\n An object that must have the following API :\n\n * `kernel_object.n_nodes` : a field that stores the number of nodes\n of the associated Hawkes process (thus the number of kernels is\n this number squared)\n * `kernel_object.get_kernel_supports()` : must return a 2d numpy\n array with the size of the support of each kernel\n * `kernel_object.get_kernel_values(self, i, j, abscissa_array)` :\n must return as a numpy 1d array the sampled `(i,j)` kernel values\n corresponding to the abscissa `abscissa_array`\n\n support : `float`, default=None\n the size of the support that will be used to plot all the kernels.\n If None or non positive then the maximum kernel supports is used\n\n hawkes : `SimuHawkes`, default=None\n If a `SimuHawkes` object is given then the kernels plots are superposed\n with those of this object (considered as the `True` kernels). This is\n used to plot on the same plots the estimated kernels along with\n the true kernels.\n\n n_points : `int`, default=300\n Number of points that will be used in abscissa. More points will lead\n to a more precise graph.\n\n show : `bool`, default=`True`\n if `True`, show the plot. Otherwise an explicit call to the show\n function is necessary. Useful when superposing several plots.\n\n log_scale : `bool`, default=`False`\n If `True`, then x-axis and y-axis are on a log-scale. This is useful\n to plot power-law kernels.\n\n min_support : `float`, default=1e-4\n Start value of the plot. Only used if log_scale is `True`.\n \n ax : `np.ndarray` of `matplotlib.axes`, default=None\n If not None, the figure will be plot on these axes and show will be\n set to False.\n \"\"\"\n if support is None or support <= 0:\n plot_supports = kernel_object.get_kernel_supports()\n support = plot_supports.max() * 1.2\n\n n_nodes = kernel_object.n_nodes\n\n if log_scale:\n x_values = np.logspace(\n np.log10(min_support), np.log10(support), n_points)\n else:\n x_values = np.linspace(0, support, n_points)\n\n if ax is None:\n fig, ax_list_list = plt.subplots(n_nodes, n_nodes, sharex=True,\n sharey=True)\n else:\n if ax.shape != (n_nodes, n_nodes):\n raise ValueError('Given ax has shape {} but should have shape {}'\n .format(ax.shape, (n_nodes, n_nodes)))\n ax_list_list = ax\n show = False\n\n if n_nodes == 1:\n ax_list_list = np.array([[ax_list_list]])\n\n for i, ax_list in enumerate(ax_list_list):\n for j, ax in enumerate(ax_list):\n y_values = kernel_object.get_kernel_values(i, j, x_values)\n ax.plot(x_values, y_values, label=\"Kernel (%d, %d)\" % (i, j))\n\n if hawkes:\n y_true_values = hawkes.kernels[i, j].get_values(x_values)\n ax.plot(x_values, y_true_values,\n label=\"True Kernel (%d, %d)\" % (i, j))\n\n # set x_label for last line\n if i == n_nodes - 1:\n ax.set_xlabel(r\"$t$\", fontsize=18)\n\n ax.set_ylabel(r\"$\\phi^{%g,%g}(t)$\" % (i, j), fontsize=18)\n\n if log_scale:\n ax.set_xscale('log')\n ax.set_yscale('log')\n\n legend = ax.legend()\n for label in legend.get_texts():\n label.set_fontsize(12)\n\n if show:\n plt.show()\n\n return ax_list_list.ravel()[0].figure\n\n\ndef plot_hawkes_baseline_and_kernels(\n hawkes_object, kernel_support=None, hawkes=None, n_points=300,\n show=True, log_scale=False, min_support=1e-4, ax=None):\n \"\"\"Generic function to plot Hawkes baseline and kernels.\n\n Parameters\n ----------\n hawkes_object : `Object`\n An object that must have the following API :\n\n * `kernel_object.n_nodes` : a field that stores the number of nodes\n of the associated Hawkes process (thus the number of kernels is\n this number squared)\n * `kernel_object.get_baseline_values(self, i, abscissa_array)` :\n must return as a numpy 1d array the sampled `i` baseline values\n corresponding to the abscissa `abscissa_array`\n * `kernel_object.period_length` : a field that stores the size of the \n baseline period\n * `kernel_object.get_kernel_supports()` : must return a 2d numpy\n array with the size of the support of each kernel\n * `kernel_object.get_kernel_values(self, i, j, abscissa_array)` :\n must return as a numpy 1d array the sampled `(i,j)` kernel values\n corresponding to the abscissa `abscissa_array`\n\n kernel_support : `float`, default=None\n the size of the support that will be used to plot all the kernels.\n If None or non positive then the maximum kernel supports is used\n\n hawkes : `SimuHawkes`, default=None\n If a `SimuHawkes` object is given then the baseline and kernels plots \n are superposed with those of this object (considered as the `True` \n baseline and kernels). This is used to plot on the same plots the \n estimated value along with the true values.\n\n n_points : `int`, default=300\n Number of points that will be used in abscissa. More points will lead\n to a more precise graph.\n\n show : `bool`, default=`True`\n if `True`, show the plot. Otherwise an explicit call to the show\n function is necessary. Useful when superposing several plots.\n\n log_scale : `bool`, default=`False`\n If `True`, then x-axis and y-axis of kernels are on a log-scale. \n This is useful to plot power-law kernels.\n\n min_support : `float`, default=1e-4\n Start value of the kernels plot. Only used if log_scale is `True`.\n\n ax : `np.ndarray` of `matplotlib.axes`, default=None\n If not None, the figure will be plot on these axes and show will be\n set to False.\n \"\"\"\n n_nodes = hawkes_object.n_nodes\n\n if ax is None:\n fig, ax_list_list = plt.subplots(n_nodes, n_nodes + 1, figsize=(10, 6))\n else:\n ax_list_list = ax\n show = False\n\n # invoke plot_hawkes_kernels\n ax_kernels = ax_list_list[:, 1:]\n plot_hawkes_kernels(hawkes_object, support=kernel_support, hawkes=hawkes,\n n_points=n_points, show=False, log_scale=log_scale,\n min_support=min_support, ax=ax_kernels)\n share_x(ax_kernels)\n share_y(ax_kernels)\n\n # plot hawkes baselines\n ax_baselines = ax_list_list[:, 0]\n t_values = np.linspace(0, hawkes_object.period_length, n_points)\n for i in range(n_nodes):\n ax = ax_baselines[i]\n ax.plot(t_values, hawkes_object.get_baseline_values(i, t_values),\n label='baseline ({})'.format(i))\n ax.plot(t_values, hawkes.get_baseline_values(i, t_values),\n label='true baseline ({})'.format(i))\n ax.set_ylabel(\"$\\mu_{}(t)$\".format(i), fontsize=18)\n\n # set x_label for last line\n if i == n_nodes - 1:\n ax.set_xlabel(r\"$t$\", fontsize=18)\n\n legend = ax.legend()\n for label in legend.get_texts():\n label.set_fontsize(12)\n\n share_x(ax_baselines.reshape(2, 1))\n share_y(ax_baselines.reshape(2, 1))\n\n if show:\n plt.show()\n\n return ax_list_list.ravel()[0].figure\n\n\ndef _normalize_functions(y_values_list, t_values):\n \"\"\"Normalize list of functions by their integral value\n\n Parameters\n ----------\n y_values_list : `list` of np.ndarray\n y values of the list of function we want to normalize\n\n t_values : `np.ndarray`\n t values shared by all functions given with y_values_list\n\n Returns\n -------\n normalized_y_values_list : `list` of np.ndarray\n Normalized y values of the given list of function\n\n normalizations : `np.ndarray`\n Normalization factors that have been used\n \"\"\"\n y_values_list = np.array(y_values_list)\n normalizations = [\n 1. / np.trapz(y_values, t_values) for y_values in y_values_list\n ]\n normalized_y_values_list = (y_values_list.T * normalizations).T\n return normalized_y_values_list, normalizations\n\n\ndef _find_best_match(diff_matrix):\n \"\"\"Find best best possible match by induction\n\n Parameters\n ----------\n diff_matrix : `np.ndarray`, shape=(n_basis, n_basis)\n Matrix containing differences for all pairs of values\n\n Returns\n -------\n matches : `list`, shape=(n_nodes,)\n List of all found matches\n \"\"\"\n diff_matrix = diff_matrix.astype(float)\n matches = []\n n_basis = diff_matrix.shape[0]\n for _ in range(n_basis):\n row, col = np.unravel_index(np.argmin(diff_matrix), (n_basis, n_basis))\n diff_matrix[row, :] = np.inf\n diff_matrix[:, col] = np.inf\n matches += [(row, col)]\n return matches\n\n\ndef plot_basis_kernels(learner, support=None, basis_kernels=None, n_points=300,\n show=True):\n \"\"\"Function used to plot basis of kernels\n \n It is used jointly with `tick.hawkes.inference.HawkesBasisKernels` learner class.\n\n Parameters\n ----------\n learner : `HawkesBasisKernels`\n The given learner which basis kernels are plotted\n\n support : `float`, default=None\n the size of the support that will be used to plot all the kernels.\n If None or non positive then the maximum kernel supports is used\n\n basis_kernels : `list` of `func`, default=None\n True basis kernels. If not `None`, it will find the closest estimated\n basis kernel and will plot it together.\n This basis kernels will be normalized to fit better with their\n estimations.\n\n n_points : `int`, default=300\n Number of points that will be used in abscissa. More points will lead\n to a more precise graph.\n\n show : `bool`, default=`True`\n if `True`, show the plot. Otherwise an explicit call to the show\n function is necessary. Useful when superposing several plots.\n \"\"\"\n if support is None or support <= 0:\n support = learner.kernel_support\n\n fig, ax_list = plt.subplots(1, learner.n_basis, figsize=(8, 4),\n sharey=True)\n\n if basis_kernels is not None:\n if len(basis_kernels) != learner.n_basis:\n raise ValueError('Learner has {} basis kernels, cannot '\n 'compare to {} basis kernels'.format(\n learner.n_basis, len(basis_kernels)))\n\n t_values = learner.kernel_discretization[:-1]\n\n basis_values_list = [\n basis_kernel(t_values) for basis_kernel in basis_kernels\n ]\n normalized_basis_kernels, basis_normalizations = \\\n _normalize_functions(basis_values_list, t_values)\n\n normalized_estimates, estimated_normalizations = \\\n _normalize_functions(learner.basis_kernels, t_values)\n\n kernel_diff = np.array([[\n np.trapz(np.abs(nbf - ne), t_values)\n for nbf in normalized_basis_kernels\n ] for ne in normalized_estimates])\n\n matches = _find_best_match(kernel_diff)\n\n t_values = np.linspace(0, support, n_points)\n for estimated_index, basis_index in matches:\n basis_kernel = basis_kernels[basis_index]\n estimated = learner.basis_kernels[estimated_index]\n\n piecewise_y = np.repeat(estimated, 2)\n piecewise_t = np.hstack(\n (learner.kernel_discretization[0],\n np.repeat(learner.kernel_discretization[1:-1], 2),\n learner.kernel_discretization[-1]))\n\n ax_list[basis_index].step(piecewise_t, piecewise_y,\n label=\"estimated %i\" % estimated_index)\n rescaled_basis = basis_kernel(t_values) * \\\n basis_normalizations[basis_index] / \\\n estimated_normalizations[estimated_index]\n ax_list[basis_index].plot(t_values, rescaled_basis,\n label=\"true basis %i\" % basis_index)\n\n legend = ax_list[basis_index].legend()\n for label in legend.get_texts():\n label.set_fontsize(12)\n\n else:\n for estimated_index in range(learner.n_basis):\n estimated = learner.basis_kernels[estimated_index]\n\n piecewise_y = np.repeat(estimated, 2)\n piecewise_t = np.hstack(\n (learner.kernel_discretization[0],\n np.repeat(learner.kernel_discretization[1:-1], 2),\n learner.kernel_discretization[-1]))\n\n ax_list[estimated_index].plot(\n piecewise_t, piecewise_y,\n label=\"estimated %i\" % estimated_index)\n legend = ax_list[estimated_index].legend()\n for label in legend.get_texts():\n label.set_fontsize(12)\n\n if show:\n plt.show()\n\n return fig\n", "id": "1230149", "language": "Python", "matching_score": 2.9919707775115967, "max_stars_count": 0, "path": "tick/plot/plot_hawkes.py" }, { "content": "\"\"\"\n=========================================\nHawkes process with non constant baseline\n=========================================\n\nThis example simulates and then estimates Hawkes kernels with varying\nbaselines. In this example the intensity is written the following way\n\n:math:`\\\\lambda_i(t) = \\\\mu_i(t) + \\\\sum_{j=1}^D \\\\int \\\\phi_{ij}(t - s)dN_j(s)`\n\nKernels are sum of exponentials and varying baseline :math:`\\\\mu_i(t)`\npiecewise constant.\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nfrom tick.plot import plot_hawkes_baseline_and_kernels\nfrom tick.hawkes import (\n SimuHawkesSumExpKernels, SimuHawkesMulti, HawkesSumExpKern\n)\n\nperiod_length = 300\nbaselines = [[0.3, 0.5, 0.6, 0.4, 0.2, 0],\n [0.8, 0.5, 0.2, 0.3, 0.3, 0.4]]\nn_baselines = len(baselines[0])\ndecays = [.5, 2., 6.]\nadjacency = [[[0, .1, .4], [.2, 0., .2]],\n [[0, 0, 0], [.6, .3, 0]]]\n\n# simulation\nhawkes = SimuHawkesSumExpKernels(baseline=baselines,\n period_length=period_length,\n decays=decays, adjacency=adjacency,\n seed=2093, verbose=False)\nhawkes.end_time = 1000\nhawkes.adjust_spectral_radius(0.5)\n\nmulti = SimuHawkesMulti(hawkes, n_simulations=4)\nmulti.simulate()\n\n# estimation\nlearner = HawkesSumExpKern(decays=decays,\n n_baselines=n_baselines,\n period_length=period_length)\n\nlearner.fit(multi.timestamps)\n\n# plot\nfig = plot_hawkes_baseline_and_kernels(learner, hawkes=hawkes, show=False)\nfig.tight_layout()\n\nplt.show()\n", "id": "5794732", "language": "Python", "matching_score": 3.1860642433166504, "max_stars_count": 0, "path": "examples/plot_hawkes_varying_baseline.py" }, { "content": "\"\"\"\n=======================================\nFit Hawkes kernel norms using cumulants\n=======================================\n\nThis non parametric Hawkes cumulants matching\n(`tick.hawkes.HawkesCumulantMatching`) algorithm estimates directly\nkernels norms without making any assumption on kernel shapes.\n\nIt has been originally described in this paper:\n\n<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>.\n(2017, July). Uncovering causality from multivariate Hawkes integrated\ncumulants.\n`In International Conference on Machine Learning (pp. 1-10)`_.\n\n.. _In International Conference on Machine Learning (pp. 1-10): http://proceedings.mlr.press/v70/achab17a.html\n\"\"\"\n\nimport numpy as np\n\nfrom tick.hawkes import (HawkesCumulantMatching, SimuHawkesExpKernels,\n SimuHawkesMulti)\nfrom tick.plot import plot_hawkes_kernel_norms\n\nnp.random.seed(7168)\n\nn_nodes = 3\nbaselines = 0.3 * np.ones(n_nodes)\ndecays = 0.5 + np.random.rand(n_nodes, n_nodes)\nadjacency = np.array([\n [1, 1, -0.5],\n [0, 1, 0],\n [0, 0, 2],\n], dtype=float)\n\nadjacency /= 4\n\nend_time = 1e5\nintegration_support = 5\nn_realizations = 5\n\nsimu_hawkes = SimuHawkesExpKernels(\n baseline=baselines, adjacency=adjacency, decays=decays,\n end_time=end_time, verbose=False, seed=7168)\nsimu_hawkes.threshold_negative_intensity(True)\n\nmulti = SimuHawkesMulti(simu_hawkes, n_simulations=n_realizations, n_threads=-1)\nmulti.simulate()\n\nnphc = HawkesCumulantMatching(integration_support, cs_ratio=.15, tol=1e-10,\n step=0.3)\n\nnphc.fit(multi.timestamps)\nplot_hawkes_kernel_norms(nphc)\n", "id": "4954011", "language": "Python", "matching_score": 2.1927146911621094, "max_stars_count": 0, "path": "examples/plot_hawkes_cumulants_matching.py" }, { "content": "# License: BSD 3 clause\n\nimport os\nimport pickle\nimport unittest\n\nimport numpy as np\n\nfrom tick.base.inference import InferenceTest\n\nskip = True\ntry:\n import tensorflow\n skip = False\nexcept ImportError:\n print(\"tensorflow not found, skipping HawkesCumulantMatching test\")\n\nif not skip:\n from tick.hawkes import HawkesCumulantMatching\n\n class Test(InferenceTest):\n def setUp(self):\n self.dim = 2\n np.random.seed(320982)\n\n @staticmethod\n def get_train_data(decay):\n saved_train_data_path = os.path.join(\n os.path.dirname(__file__),\n 'hawkes_cumulant_matching_test-train_data.pkl')\n\n with open(saved_train_data_path, 'rb') as f:\n train_data = pickle.load(f)\n\n baseline = train_data[decay]['baseline']\n adjacency = train_data[decay]['adjacency']\n timestamps = train_data[decay]['timestamps']\n\n return timestamps, baseline, adjacency\n\n def test_hawkes_cumulants(self):\n \"\"\"...Test that estimated cumulants are coorect\n \"\"\"\n timestamps, baseline, adjacency = Test.get_train_data(decay=3.)\n\n expected_L = [2.149652, 2.799746, 4.463995]\n\n expected_C = [[15.685827, 16.980316,\n 30.232248], [16.980316, 23.765304, 36.597161],\n [30.232248, 36.597161, 66.271089]]\n\n expected_K = [[49.179092, -959.246309, -563.529052],\n [-353.706952, -1888.600201, -1839.608349],\n [-208.913969, -2103.952235, -150.937999]]\n\n learner = HawkesCumulantMatching(100.)\n learner._set_data(timestamps)\n self.assertFalse(learner._cumulant_computer.cumulants_ready)\n learner.compute_cumulants()\n self.assertTrue(learner._cumulant_computer.cumulants_ready)\n\n np.testing.assert_array_almost_equal(learner.mean_intensity,\n expected_L)\n np.testing.assert_array_almost_equal(learner.covariance,\n expected_C)\n np.testing.assert_array_almost_equal(learner.skewness, expected_K)\n\n self.assertAlmostEqual(learner.approximate_optimal_cs_ratio(),\n 0.999197628503)\n\n learner._set_data(timestamps)\n self.assertTrue(learner._cumulant_computer.cumulants_ready)\n\n def test_hawkes_cumulants_solve(self):\n \"\"\"...Test that hawkes cumulant reached expected value\n \"\"\"\n timestamps, baseline, adjacency = Test.get_train_data(decay=3.)\n learner = HawkesCumulantMatching(100., cs_ratio=0.9, max_iter=299,\n print_every=30, step=1e-2,\n solver='adam', C=1e-3, tol=1e-5)\n learner.fit(timestamps)\n\n expected_R_pred = [[0.423305, -0.559607,\n -0.307212], [-0.30411, 0.27066, -0.347162],\n [0.484648, 0.331057, 1.591584]]\n\n np.testing.assert_array_almost_equal(learner.solution,\n expected_R_pred)\n\n expected_baseline = [36.808583, 32.304106, -15.123118]\n\n np.testing.assert_array_almost_equal(learner.baseline,\n expected_baseline)\n\n expected_adjacency = [[-3.34742247, -6.28527387, -2.21012092],\n [-2.51556256, -5.55341413, -1.91501755],\n [1.84706793, 3.2770494, 1.44302449]]\n\n np.testing.assert_array_almost_equal(learner.adjacency,\n expected_adjacency)\n\n np.testing.assert_array_almost_equal(\n learner.objective(learner.adjacency), 149029.4540306161)\n\n np.testing.assert_array_almost_equal(\n learner.objective(R=learner.solution), 149029.4540306161)\n\n # Ensure learner can be fit again\n timestamps_2, baseline, adjacency = Test.get_train_data(decay=2.)\n learner.step = 1e-1\n learner.penalty = 'l2'\n learner.fit(timestamps_2)\n\n expected_adjacency_2 = [[-0.021966, -0.178811, -0.107636],\n [0.775206, 0.384494,\n 0.613925], [0.800584, 0.581281, 0.60177]]\n\n np.testing.assert_array_almost_equal(learner.adjacency,\n expected_adjacency_2)\n\n learner_2 = HawkesCumulantMatching(\n 100., cs_ratio=0.9, max_iter=299, print_every=30, step=1e-1,\n solver='adam', penalty='l2', C=1e-3, tol=1e-5)\n learner_2.fit(timestamps_2)\n\n np.testing.assert_array_almost_equal(learner.adjacency,\n expected_adjacency_2)\n\n # Check cumulants are not computed again\n learner_2.step = 1e-2\n learner_2.fit(timestamps_2)\n\n def test_hawkes_cumulants_unfit(self):\n \"\"\"...Test that HawkesCumulantMatching raises an error if no data is\n given\n \"\"\"\n learner = HawkesCumulantMatching(100., cs_ratio=0.9, max_iter=299,\n print_every=30, step=1e-2,\n solver='adam')\n\n msg = '^Cannot compute cumulants if no realization has been provided$'\n with self.assertRaisesRegex(RuntimeError, msg):\n learner.compute_cumulants()\n\n def test_hawkes_cumulants_solve_l1(self):\n \"\"\"...Test that hawkes cumulant reached expected value with l1\n penalization\n \"\"\"\n timestamps, baseline, adjacency = Test.get_train_data(decay=3.)\n learner = HawkesCumulantMatching(\n 100., cs_ratio=0.9, max_iter=299, print_every=30, step=1e-2,\n solver='adam', penalty='l1', C=1, tol=1e-5)\n learner.fit(timestamps)\n\n expected_R_pred = [[0.434197, -0.552021,\n -0.308883], [-0.299366, 0.272764, -0.347764],\n [0.48448, 0.331059, 1.591587]]\n\n np.testing.assert_array_almost_equal(learner.solution,\n expected_R_pred)\n\n expected_baseline = [32.788801, 29.324684, -13.275885]\n\n np.testing.assert_array_almost_equal(learner.baseline,\n expected_baseline)\n\n expected_adjacency = [[-2.925945, -5.54899, -1.97438],\n [-2.201373, -5.009153,\n -1.740234], [1.652958, 2.939054, 1.334677]]\n\n np.testing.assert_array_almost_equal(learner.adjacency,\n expected_adjacency)\n\n np.testing.assert_array_almost_equal(\n learner.objective(learner.adjacency), 149061.5590630687)\n\n np.testing.assert_array_almost_equal(\n learner.objective(R=learner.solution), 149061.5590630687)\n\n def test_hawkes_cumulants_solve_l2(self):\n \"\"\"...Test that hawkes cumulant reached expected value with l2\n penalization\n \"\"\"\n timestamps, baseline, adjacency = Test.get_train_data(decay=3.)\n learner = HawkesCumulantMatching(\n 100., cs_ratio=0.9, max_iter=299, print_every=30, step=1e-2,\n solver='adam', penalty='l2', C=0.1, tol=1e-5)\n learner.fit(timestamps)\n\n expected_R_pred = [[0.516135, -0.484529,\n -0.323191], [-0.265853, 0.291741, -0.35285],\n [0.482819, 0.331344, 1.591535]]\n\n np.testing.assert_array_almost_equal(learner.solution,\n expected_R_pred)\n\n expected_baseline = [17.066997, 17.79795, -6.07811]\n\n np.testing.assert_array_almost_equal(learner.baseline,\n expected_baseline)\n\n expected_adjacency = [[-1.310854, -2.640152, -1.054596],\n [-1.004887, -2.886297,\n -1.065671], [0.910245, 1.610029, 0.913469]]\n\n np.testing.assert_array_almost_equal(learner.adjacency,\n expected_adjacency)\n\n np.testing.assert_array_almost_equal(\n learner.objective(learner.adjacency), 149232.94041039888)\n\n np.testing.assert_array_almost_equal(\n learner.objective(R=learner.solution), 149232.94041039888)\n\n\nif __name__ == \"__main__\":\n if not skip:\n unittest.main()\n", "id": "6140600", "language": "Python", "matching_score": 1.3167937994003296, "max_stars_count": 0, "path": "tick/hawkes/inference/tests/hawkes_cumulant_matching_test.py" }, { "content": "# License: BSD 3 clause\n\nimport os\n\nimport numpy as np\nimport scipy\n\nfrom tick.array.build.array import (\n tick_float_array_to_file,\n tick_float_array2d_to_file,\n tick_float_sparse2d_to_file,\n tick_double_array_to_file,\n tick_double_array2d_to_file,\n tick_double_sparse2d_to_file,\n tick_float_array_from_file,\n tick_float_array2d_from_file,\n tick_float_sparse2d_from_file,\n tick_double_array_from_file,\n tick_double_array2d_from_file,\n tick_double_sparse2d_from_file,\n)\n\n\ndef serialize_array(array, filepath):\n \"\"\"Save an array on disk on a format that tick C++ modules can read\n\n This method is intended to be used by developpers only, mostly for\n benchmarking in C++ on real datasets imported from Python\n\n Parameters\n ----------\n array : `np.ndarray` or `scipy.sparse.csr_matrix`\n 1d or 2d array\n\n filepath : `str`\n Path where the array will be stored\n\n Returns\n -------\n path : `str`\n Global path of the serialized array\n \"\"\"\n if array.dtype not in [np.float32, np.float64]:\n raise ValueError('Only float32/64 arrays can be serrialized')\n\n if array.dtype == \"float32\":\n if isinstance(array, np.ndarray):\n if len(array.shape) == 1:\n serializer = tick_float_array_to_file\n elif len(array.shape) == 2:\n serializer = tick_float_array2d_to_file\n else:\n raise ValueError('Only 1d and 2d arrays can be serrialized')\n else:\n if len(array.shape) == 2:\n serializer = tick_float_sparse2d_to_file\n else:\n raise ValueError('Only 2d sparse arrays can be serrialized')\n elif array.dtype == \"float64\" or array.dtype == \"double\":\n if isinstance(array, np.ndarray):\n if len(array.shape) == 1:\n serializer = tick_double_array_to_file\n elif len(array.shape) == 2:\n serializer = tick_double_array2d_to_file\n else:\n raise ValueError('Only 1d and 2d arrays can be serrialized')\n else:\n if len(array.shape) == 2:\n serializer = tick_double_sparse2d_to_file\n else:\n raise ValueError('Only 2d sparse arrays can be serrialized')\n else:\n raise ValueError('Unhandled serrialization type')\n\n serializer(filepath, array)\n return os.path.abspath(filepath)\n\n\ndef load_array(filepath, array_type='dense', array_dim=1, dtype=\"float64\"):\n \"\"\"Loaf an array from disk from a format that tick C++ modules can read\n\n This method is intended to be used by developpers only, mostly for\n benchmarking in C++ on real datasets imported from Python\n\n Parameters\n ----------\n filepath : `str`\n Path where the array was stored\n\n array_type : {'dense', 'sparse'}, default='dense'\n Expected type of the array\n\n array_dim : `int`\n Expected dimension of the array\n\n Returns\n -------\n array : `np.ndarray` or `scipy.sparse.csr_matrix`\n 1d or 2d array\n \"\"\"\n abspath = os.path.abspath(filepath)\n if not os.path.exists(filepath):\n raise FileNotFoundError('File {} does not exists'.format(abspath))\n\n if dtype == \"float32\":\n if array_type == 'dense':\n if array_dim == 1:\n reader = tick_float_array_from_file\n elif array_dim == 2:\n reader = tick_float_array2d_from_file\n else:\n raise ValueError('Only 1d and 2d arrays can be loaded')\n elif array_type == 'sparse':\n if array_dim == 2:\n reader = tick_float_sparse2d_from_file\n else:\n raise ValueError('Only 2d sparse arrays can be loaded')\n else:\n raise ValueError('Cannot load this class of array')\n elif dtype == \"float64\" or dtype == \"double\":\n if array_type == 'dense':\n if array_dim == 1:\n reader = tick_double_array_from_file\n elif array_dim == 2:\n reader = tick_double_array2d_from_file\n else:\n raise ValueError('Only 1d and 2d arrays can be loaded')\n elif array_type == 'sparse':\n if array_dim == 2:\n reader = tick_double_sparse2d_from_file\n else:\n raise ValueError('Only 2d sparse arrays can be loaded')\n else:\n raise ValueError('Cannot load this class of array')\n else:\n raise ValueError('Unhandled serrialization type')\n\n return reader(filepath)\n", "id": "16970", "language": "Python", "matching_score": 0.40205883979797363, "max_stars_count": 0, "path": "tick/array/serialize.py" } ]
2.932271
jwalthour
[ { "content": "import argparse\nimport wave\nimport array\nimport struct\nimport math\nimport winsound\nimport io\nimport sys\n\n# Memberwise multiply of two equal-size arrays\ndef mix_signals(a,b):\n return [a_s * b_s for a_s,b_s in zip(a, b)]\n\ndef gen_tone(sample_indices, freq, amplitude, samples_per_sec):\n return [int(amplitude * math.sin((2 * math.pi)*i*(float(freq)/samples_per_sec))) for i in sample_indices]\n\n# Makes beep sample sets as wav files\nclass Beep:\n SAMPLES_PER_SEC = 44100.0\n NUM_CHANNELS = 1\n SAMPLE_BYTE_WIDTH = 2\n RAMP_FRACTION = 0.05 # percent each end used on ramping\n def __init__(self, f, t, v, t_pad=0):\n buffer = io.BytesIO('')\n note_output = wave.open(buffer, 'wb')\n note_output.setparams((self.NUM_CHANNELS, self.SAMPLE_BYTE_WIDTH, int(self.SAMPLES_PER_SEC), 0, 'NONE', 'not compressed'))\n \n i_range = range(0, int(self.SAMPLES_PER_SEC * (t + t_pad)))\n ramp_samples = int(self.RAMP_FRACTION * t * self.SAMPLES_PER_SEC)\n plateau_samples = int(t * self.SAMPLES_PER_SEC) - 2 * ramp_samples;\n pad_samples = int(t_pad * self.SAMPLES_PER_SEC)\n amplitude = [(x / float(ramp_samples) ) for x in range(0, ramp_samples)] \\\n + [1 for x in range(0, plateau_samples)] \\\n + [(1.0 - (x / float(ramp_samples))) for x in range(0, ramp_samples)] \\\n + [0 for x in range(0, pad_samples)]\n tone = gen_tone(i_range, f, v, self.SAMPLES_PER_SEC)\n note = mix_signals(amplitude, tone)\n packed_values = array.array('h', [int(s) for s in note])\n note_output.writeframes(packed_values.tostring())\n self._buffer = buffer.getvalue()\n def play(self):\n winsound.PlaySound(self._buffer, winsound.SND_MEMORY)\n\n \nclass MorsePlayer:\n _tone_f = 880\n _tone_vol = 30000\n _wpm = 5 # The number of \"PARIS\" transmissions per minute\n _DITS_PER_WORD = 50 # The representative word (\"PARIS\") takes 50 dots\n \n _MORSE_KEY = {\n ' ':[' '],\n '0':['-', '-', '-', '-', '-'],\n '1':['.', '-', '-', '-', '-'],\n '2':['.', '.', '-', '-', '-'],\n '3':['.', '.', '.', '-', '-'],\n '4':['.', '.', '.', '.', '-'],\n '5':['.', '.', '.', '.', '.'],\n '6':['-', '.', '.', '.', '.'],\n '7':['-', '-', '.', '.', '.'],\n '8':['-', '-', '-', '.', '.'],\n '9':['-', '-', '-', '-', '.'],\n '.':['.', '-', '.', '-', '.', '-'],\n ',':['-', '-', '.', '.', '-', '-'],\n ':':['-', '-', '-', '.', '.', '.'],\n '-':['-', '.', '.', '.', '.', '-'],\n \"'\":['.', '-', '-', '-', '-', '.'],\n '/':['-', '.', '.', '-', '.'],\n '?':['.', '.', '-', '-', '.', '.'],\n\t\t\n 'A':['.', '-'],\n 'B':['-', '.', '.', '.'],\n 'C':['-', '.', '-', '.'],\n 'D':['-', '.', '.'],\n 'E':['.'],\n 'F':['.', '.', '-', '.'],\n 'G':['-', '-', '.'],\n 'H':['.', '.', '.', '.'],\n 'I':['.', '.'],\n 'J':['.', '-', '-', '-'],\n 'K':['-', '.', '-'],\n 'L':['.', '-', '.', '.'],\n 'M':['-', '-'],\n 'N':['-', '.'],\n 'O':['-', '-', '-'],\n 'P':['.', '-', '-', '.'],\n 'Q':['-', '-', '.', '-'],\n 'R':['.', '-', '.'],\n 'S':['.', '.', '.'],\n 'T':['-'],\n 'U':['.', '.', '-'],\n 'V':['.', '.', '.', '-'],\n 'W':['.', '-', '-'],\n 'X':['-', '.', '.', '-'],\n 'Y':['-', '.', '-', '-'],\n 'Z':['-', '-', '.', '.']\n }\n \n def setFreq(self, freq):\n if(freq > 0):\n self._tone_f = freq\n self._clearTones()\n \n def setVol(self, vol):\n if(vol > 0 and vol <= 100):\n self._tone_vol = 32768.0 * (vol / 100.0)\n self._clearTones()\n \n def setWpm(self, wpm):\n if(wpm > 0):\n self._wpm = wpm\n self._clearTones()\n\n def setIntercharGap(self, gap):\n if(gap > 0):\n self._interchar_gap_time_dits = gap\n self._clearTones()\n\n def playMorse(self, char_string):\n for c in char_string:\n self.playChar(c)\n \n def playChar(self, char):\n self._genTonesIfNeeded();\n if char.upper() in self._MORSE_KEY:\n sys.stdout.write(char + ' ')\n seq = self._MORSE_KEY[char.upper()]\n for symbol in seq:\n sys.stdout.write(symbol)\n if symbol == '.':\n self._dit.play()\n elif symbol == '-':\n self._dah.play()\n elif symbol == ' ':\n self._word_gap.play()\n sys.stdout.write(\"\\n\")\n self._interchar_gap.play()\n \n def _dotDurationS(self): # seconds per dot\n return (60.0 / self._wpm) / self._DITS_PER_WORD \n \n def _clearTones(self): # delete locally stored tones\n self._dit = None\n self._dah = None\n self._interchar_gap = None\n self._word_gap = None\n \n def _genTonesIfNeeded(self):\n if self._dit == None or self._dah == None or self._interchar_gap == None or self._word_gap == None:\n self._genTones() \n \n def _genTones(self): # regenerate locally stored tones\n dit_t = self._dotDurationS()\n dah_t = 3 * dit_t\n self._dit = Beep(self._tone_f, dit_t, self._tone_vol, dit_t)\n self._dah = Beep(self._tone_f, dit_t * 3, self._tone_vol,dit_t)\n # Silence between characters\n self._interchar_gap = Beep(self._tone_f, 0, self._tone_vol,\n dit_t * self._interchar_gap_time_dits)\n # Played as a silent dit, but remember there are two interchar gaps,\n # so this winds up being 7 dits of silence between words.\n self._word_gap = Beep(self._tone_f, 0, self._tone_vol,dit_t)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-w\", \"--wpm\", default=5, type=float, help=\"words per minute at which to play morse code (default 5)\")\n parser.add_argument(\"-f\", \"--freq\", default=880, type=float, help=\"frequency of the tone to play, in Hz (default 880)\")\n parser.add_argument(\"-v\", \"--vol\", default=50, type=float, help=\"volume of the tone to play, in percent (default 50)\")\n parser.add_argument(\"-g\", \"--gap\", default=3, type=float, help=\"time gap between characters, in dits (default 3)\")\n args = parser.parse_args()\n\n mp = MorsePlayer()\n mp.setFreq(args.freq)\n mp.setVol(args.vol)\n mp.setWpm(args.wpm)\n mp.setIntercharGap(args.gap)\n \n in_data = sys.stdin.read()\n while(in_data):\n mp.playMorse(in_data)\n in_data = sys.stdin.read()\n", "id": "11194487", "language": "Python", "matching_score": 5.18081521987915, "max_stars_count": 1, "path": "morse_player.py" }, { "content": "import wave\nimport random\nimport array\nimport struct\nimport math\n\nNOTE_LEN_SEC = 5.0\nSAMPLES_PER_SEC = 44100.0\nNUM_CHANNELS = 1\nSAMPLE_BYTE_WIDTH = 2\n\n# functions that generate ready-to-store toness\ndef gen_tone(sample_indices, freq, amplitude):\n return [int(amplitude * math.sin((2 * math.pi)*i*(float(freq)/SAMPLES_PER_SEC))) for i in i_range]\n\ndef gen_ramped_tone(sample_indices, freq, amplitude):\n return [int(amplitude * (1.0 - float(i) / len(sample_indices)) * math.sin((2 * math.pi)*i*(float(freq)/SAMPLES_PER_SEC))) for i in i_range]\n\ndef gen_exp_tone(sample_indices, freq, amplitude):\n return [int(amplitude * ((1.0 - float(i) / len(sample_indices))**3) * math.sin((2 * math.pi)*i*(float(freq)/SAMPLES_PER_SEC))) for i in i_range]\n\n# functions that generate arrays of floats, which must be truncated prior to storage\ndef gen_float_tone(sample_indices, freq, phase_rad=0):\n return [math.sin(phase_rad + (2 * math.pi)*i*(float(freq)/SAMPLES_PER_SEC)) for i in i_range]\n\n# Memberwise multiply of two equal-size arrays\ndef mix_signals(a,b):\n return [a_s * b_s for a_s,b_s in zip(a, b)]\n\n# Multiply an array by a scalar\ndef multiply_signal(scalar, signal):\n return [scalar * s for s in signal]\n\n# Multiply and shift an array by scalars\ndef shift_multiply_signal(coeff, offset, signal):\n return [coeff * s + offset for s in signal]\n\nnote_output = wave.open('note.wav', 'w')\n\nnote_output.setparams((NUM_CHANNELS, SAMPLE_BYTE_WIDTH, int(SAMPLES_PER_SEC), 0, 'NONE', 'not compressed'))\n\n# for i in range(0, SAMPLE_LEN):\n # # value = random.randint(-32767, 32767)\n # value = 1000 * math.sin((2 * math.pi)*i*(440.0/44100.0))\n # packed_value = struct.pack('h', value)\n # note_output.writeframes(packed_value)\n # note_output.writeframes(packed_value)\n\ndef db_to_ratio(db):\n return 10**(db / 10.0)\n\ndef ratio_to_db(ratio):\n return 10 * math.log10(ratio)\n\n# Frequency profiles\ni_range = range(0, int(SAMPLES_PER_SEC * NOTE_LEN_SEC))\nharmonics = range(1,12)\n\nviolin_hs_db = [75,67,42,53,64,47,60,47,41,37,41,39] # from the Da Vinci stradivarius, according to http://www.nagyvaryviolins.com/\nviolin_hs_ratio = [db_to_ratio(h) for h in violin_hs_db]\nviolin_hs_ratio_normalized = multiply_signal(1.0 / max(violin_hs_ratio), violin_hs_ratio)\n\nguitar_hs_ratio_normalized = [0.21, 1, 0.4, 0.19, 0.09, 0.08, 0.15, 0.08, 0.02, 0.09, 0.02, 0.08]# copied from http://img37.imageshack.us/img37/5249/e2fftanalysis.png\n\n# Amplitude profile\nswell_time = 2\nburst_time = 0.5\nfade_time = NOTE_LEN_SEC - swell_time\nswell_samples = int(swell_time * SAMPLES_PER_SEC)\nburst_samples = int(burst_time * SAMPLES_PER_SEC)\nfade_samples = int(fade_time * SAMPLES_PER_SEC)\nswell_ramp = [(x / float(swell_samples) )**3 for x in range(0, swell_samples)] \\\n + [1 for x in range(0, burst_samples)] \\\n + [(1 - (x / float(fade_samples)))**3 for x in range(0, fade_samples)]\n\n\nbase_f = 196 # G3\ntremolo_f = 8\na_violin = 3000.0\na_guitar = 3000.0\n\n# Generate instruments\nviolin_tones = [gen_tone(i_range, base_f * h, a_violin * hs) for h,hs in zip(harmonics,violin_hs_ratio_normalized)]\nguitar_tones = [gen_tone(i_range, base_f * h, a_guitar * hs) for h,hs in zip(harmonics,guitar_hs_ratio_normalized)]\n\nviolin_sound = [sum(column) for column in zip(*violin_tones)]\nguitar_sound = [sum(column) for column in zip(*guitar_tones)]\nnote = mix_signals(swell_ramp, violin_sound)\n\n# Save to file\npacked_values = array.array('h', [int(s) for s in note])\nnote_output.writeframes(packed_values.tostring())\n\n\nnote_output.close()", "id": "4379061", "language": "Python", "matching_score": 0.34095498919487, "max_stars_count": 0, "path": "gen_notes.py" }, { "content": "\"\"\"\nCheck the moisture sensors exactly once. Log to file. If sensor 0 reports moisture below\nthreshold, dispense water for a fixed amount of time.\n\"\"\"\n\nfrom __future__ import print_function\nprint(\"Importing... \", end='')\nimport datetime \nimport sys\nimport time\nimport os.path\nimport RPi.GPIO as GPIO\nimport Adafruit_ADS1x15 # sudo pip install adafruit-ads1x15\nprint(\"done.\")\n\nprint(\"GPIO setup... \", end='')\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\nADC_PIN_NUM = 7\nSENSE_PIN_0 = 13\nSENSE_PIN_1 = 29\nPIN_ENA = 22\nPIN_IN1 = 16\nPIN_IN2 = 18\nGPIO.setup([PIN_ENA,\n\t\t\tPIN_IN1,\n\t\t\tPIN_IN2], GPIO.OUT)\nGPIO.output([PIN_ENA,\n\t\t\tPIN_IN1,\n\t\t\tPIN_IN2], GPIO.LOW)\n\nGPIO.setup([ADC_PIN_NUM,SENSE_PIN_0,SENSE_PIN_1], GPIO.OUT)\nprint(\"done.\")\n\nLOGFILE='/home/gardener/watering_log.csv'\nprint(\"Opening log file... \", end='')\nexisted_already = os.path.isfile(LOGFILE)\nlogfile = open(LOGFILE, 'a')\nif not existed_already:\n\t# This is our first time opening the file; print CSV header\n\t# logfile.write('time,\"ADC0\",\"ADC1\",\"moisture 0\",\"moisture 1\",\"watered?\"\\r\\n')\n\tlogfile.write('time,\"ADC0\",\"ADC1\"\\r\\n')\nprint(\"done.\")\n\n# Power on sensor and ADC\nprint(\"Connecting to ADC... \", end='')\nGPIO.output([ADC_PIN_NUM],GPIO.HIGH)\nadc = Adafruit_ADS1x15.ADS1015(0x48)\nprint(\"done.\")\n\n\n# Value, in ADC ticks, corresponding to saturated soil\n# ADC_WETTEST_READING=1340.0\nADC_WETTEST_READING=1\n# Moisture fraction at which water is dispensed\nWATERING_THRESHOLD_FRAC = 0.75\n# The number of seconds for which the pump should be run in one watering.\nWATERING_PUMP_DURATION_S = 1.5\n\n# Number of times to sample the sensor\nNUM_SAMPLES = 500\nSAMPLE_INTERVAL_S = 0.1\n\ndef dispense_water():\n\ttry:\n\t\t# Set motor direction\n\t\tGPIO.output(PIN_IN1, GPIO.HIGH)\n\t\tGPIO.output(PIN_IN2, GPIO.LOW)\n\t\t# Enable motor\n\t\tGPIO.output(PIN_ENA, GPIO.HIGH)\n\t\t# Let the motor run\n\t\ttime.sleep(WATERING_PUMP_DURATION_S)\n\tfinally:\n\t\t# Disable motor\n\t\tGPIO.output([PIN_ENA,\n\t\t\t\t\tPIN_IN1,\n\t\t\t\t\tPIN_IN2], GPIO.LOW)\n\t\t\n\ntry:\n\tprint(\"Taking readings... \", end='')\n\tsys.stdout.flush()\n\twater = False\n\taccum_moisture_ticks = [0.0,0.0];\n\t# accum_moisture_frac = 0;\n\tfor i in range(0, NUM_SAMPLES):\n\t\tGPIO.output([SENSE_PIN_0,SENSE_PIN_1],GPIO.HIGH)\n\t\tmoistures_ticks = [adc.read_adc(chan) for chan in [0,1]]\n\t\tGPIO.output([SENSE_PIN_0,SENSE_PIN_1],GPIO.LOW)\n\t\t# not pythonic, i know. we only have two sensors, give me a break.\n\t\taccum_moisture_ticks[0] += moistures_ticks[0]\n\t\taccum_moisture_ticks[1] += moistures_ticks[1]\n\t\t# moistures_frac = [(m / ADC_WETTEST_READING) for m in moistures_ticks]\n\t\t# water = moistures_frac[0] < WATERING_THRESHOLD_FRAC\n\t\t# log_line += str(water) + ','\n\t\t# print(log_line)\n\t\ttime.sleep(SAMPLE_INTERVAL_S);\n\t# Log and flush prior to running the motor, in case something happens during that.\n\tmoistures_ticks_avg = [accum / NUM_SAMPLES for accum in accum_moisture_ticks]\n\tlog_line = datetime.datetime.now().strftime('\"%Y-%m-%d %H:%M:%S.%f\",')\n\tlog_line += str(moistures_ticks_avg[0]) + ',' + str(moistures_ticks_avg[1]) + ','\n\tlog_line += '\\r\\n'\n\tlogfile.write(log_line)\n\tlogfile.close()\n\tlogfile = None\n\tprint(\"done.\")\n\n\tif water:\n\t\tdispense_water()\nfinally:\n\t# print(\"Powering down sensor and ADC.\")\n\t# GPIO.output([ADC_PIN_NUM,SENSE_PIN_0,SENSE_PIN_1],GPIO.LOW)\n\tprint(\"Powering down sensors.\")\n\tGPIO.output([SENSE_PIN_0,SENSE_PIN_1],GPIO.LOW)\n\tif logfile:\n\t\tlogfile.close()\n\n\t# Explicitly do not cleanup. This leaves the pins low, and thus the devices unpowered.\n\t#GPIO.cleanup()\n", "id": "9232719", "language": "Python", "matching_score": 6.344841957092285, "max_stars_count": 0, "path": "check_and_water_once.py" }, { "content": "\"\"\"\nCheck the moisture sensors exactly once and log to a file.\n\nThe idea here is to put a call to this in crontab, so it gets called periodically.\n\"\"\"\n\nprint(\"importing\")\nimport datetime \nimport os.path\nimport RPi.GPIO as GPIO\nimport Adafruit_ADS1x15 # sudo pip install adafruit-ads1x15\n\nprint(\"GPIO setup\")\nGPIO.setmode(GPIO.BOARD)\nADC_PIN_NUM = 7\nSENSE_PIN_0 = 13\nSENSE_PIN_1 = 29\nGPIO.setup([ADC_PIN_NUM,SENSE_PIN_0,SENSE_PIN_1], GPIO.OUT)\n\nLOGFILE='/home/gardener/moisture_log.csv'\nprint(\"Opening log file\")\nexisted_already = os.path.isfile(LOGFILE)\nlogfile = open(LOGFILE, 'a')\nif not existed_already:\n\t# This is our first time opening the file; print CSV header\n\tlogfile.write(\"time,3\\r\\n\")\n\n# Power on sensor and ADC\nprint(\"Connecting to ADC\")\nGPIO.output([ADC_PIN_NUM,SENSE_PIN_0,SENSE_PIN_1],GPIO.HIGH)\nadc = Adafruit_ADS1x15.ADS1015(0x48)\n\n# Value, in ADC ticks, corresponding to saturated soil\nADC_WETTEST_READING=1340\n\ntry:\n\tline = datetime.datetime.now().strftime('\"%Y-%m-%d %H:%M:%S\",')\n\tline += str(adc.read_adc(3))\n\tline += '\\r\\n'\n\tlogfile.write(line)\nfinally:\n\tprint(\"Powering down sensor and ADC.\")\n\tGPIO.output([ADC_PIN_NUM,SENSE_PIN_0,SENSE_PIN_1],GPIO.LOW)\n\tlogfile.close()\n\n\t# Explicitly do not cleanup. This leaves the pins low, and thus the devices unpowered.\n\t#GPIO.cleanup()\n", "id": "11682622", "language": "Python", "matching_score": 2.125962734222412, "max_stars_count": 0, "path": "check_moisture_once.py" }, { "content": "import time, math\nimport RPi.GPIO as GPIO\n\nPIN=4\n\ndef set(value):\n\tif value:\n\t\tGPIO.output(PIN, GPIO.HIGH)\n\telse:\n\t\tGPIO.output(PIN, GPIO.LOW)\n\n\t\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(PIN, GPIO.OUT)\n", "id": "4056037", "language": "Python", "matching_score": 2.824528694152832, "max_stars_count": 1, "path": "Pi/Python/gpio_toggler.py" }, { "content": "#!/usr/bin/env python\n\n\"\"\"\n\nThis application runs an automated roast profile.\nSee farther down for assumptions about pins.\n\n\"\"\"\nimport time, math\nimport RPi.GPIO as GPIO\n\nfrom sensors.max31865 import max31865\n\n# Pin assignments\n\n# High to enable heating element, low to disable.\nPIN_HEATER_ON = 4\n\n# SPI 0 - connection to MAX31865 PTC amplifier\nPIN_THERMISTOR_CS = 8\nPIN_THERMISTOR_MISO = 9\nPIN_THERMISTOR_MOSI = 10\nPIN_THERMISTOR_CLK = 11\n\n# For early demos, use bang-bang control with hardcoded setpoints.\n# SETPOINT_HIGH_C = 220.0\n# SETPOINT_LOW__C = 210.0\nSETPOINT_HIGH_C = 70.0\nSETPOINT_LOW__C = 50.0\n\ndef c_to_f(c_temp):\n\treturn c_temp * 9.0/5.0 + 32.0\n\nif __name__ == \"__main__\":\n\t# Initialize IO\n\tGPIO.setwarnings(False)\n\tGPIO.setmode(GPIO.BCM)\n\tthermistor = max31865(PIN_THERMISTOR_CS ,\n\t\t\t\t\t\t\tPIN_THERMISTOR_MISO,\n\t\t\t\t\t\t\tPIN_THERMISTOR_MOSI,\n\t\t\t\t\t\t\tPIN_THERMISTOR_CLK )\n\tthermistor.setCal(95.104980, 127.539062)\n\tGPIO.setup(PIN_HEATER_ON, GPIO.OUT)\n\tGPIO.output(PIN_HEATER_ON, GPIO.LOW)\n\t\n\tprint(\"Will use bang-bang control loop to seek %f-%f degrees C.\"%(SETPOINT_HIGH_C, SETPOINT_LOW__C))\n\traw_input(\"Turn on fan and press enter.\")\n\t\n\tturnHeaterOn = False\n\t\n\ttry:\n\t\twhile True:\n\t\t\ttempC = thermistor.readTemp()\n\t\t\t\n\t\t\tif tempC > SETPOINT_HIGH_C:\n\t\t\t\tturnHeaterOn = False\n\t\t\telif tempC < SETPOINT_LOW__C:\n\t\t\t\tturnHeaterOn = True\n\t\t\telse:\n\t\t\t\tpass # We're between the two setpoints; leave it at whatever it was last iteration\n\t\t\t\n\t\t\tprint(\"Temp is %f degrees C (%f F). Heater: %r\"%(tempC, c_to_f(tempC), turnHeaterOn))\n\t\t\tif turnHeaterOn:\n\t\t\t\tGPIO.output(PIN_HEATER_ON, GPIO.HIGH)\n\t\t\telse:\n\t\t\t\tGPIO.output(PIN_HEATER_ON, GPIO.LOW)\n\tfinally:\n\t\tGPIO.output(PIN_HEATER_ON, GPIO.LOW)\n\t\tGPIO.cleanup()\n\t\t", "id": "448186", "language": "Python", "matching_score": 0.24398177862167358, "max_stars_count": 0, "path": "roast.py" }, { "content": "from numpy import array\nstereo_cal = {'minError': 0.8355805093062012, 'leftProjMat': array([[ 1.36007984e+03, 0.00000000e+00, 5.64734444e+02,\n 0.00000000e+00],\n [ 0.00000000e+00, 1.36007984e+03, -5.05244206e+03,\n 0.00000000e+00],\n [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00,\n 0.00000000e+00]]), 'rightProjMat': array([[ 1.36007984e+03, 0.00000000e+00, 5.64734444e+02,\n 0.00000000e+00],\n [ 0.00000000e+00, 1.36007984e+03, -5.05244206e+03,\n 7.93533656e+06],\n [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00,\n 0.00000000e+00]])}\n", "id": "4734172", "language": "Python", "matching_score": 4.145265102386475, "max_stars_count": 0, "path": "src/stereo_cal.py" }, { "content": "from numpy import array\nstereo_cal = {'minError': 7.020103378301475, 'leftProjMat': array([[ 1.07157998e+03, 0.00000000e+00, -2.38154042e+02,\n 0.00000000e+00],\n [ 0.00000000e+00, 1.07157998e+03, 2.75471520e+02,\n 0.00000000e+00],\n [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00,\n 0.00000000e+00]]), 'rightProjMat': array([[ 1.07157998e+03, 0.00000000e+00, -2.38154042e+02,\n -1.03190287e+06],\n [ 0.00000000e+00, 1.07157998e+03, 2.75471520e+02,\n 0.00000000e+00],\n [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00,\n 0.00000000e+00]])}\n", "id": "11723374", "language": "Python", "matching_score": 0.7080602645874023, "max_stars_count": 0, "path": "src/stereo_cal.py" }, { "content": "import numpy as np\nimport cv2\nimport glob\nimport argparse\n\n\nclass StereoCalibration(object):\n def __init__(self, filepath):\n # termination criteria\n self.criteria = (cv2.TERM_CRITERIA_EPS +\n cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n self.criteria_cal = (cv2.TERM_CRITERIA_EPS +\n cv2.TERM_CRITERIA_MAX_ITER, 100, 1e-5)\n\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n self.objp = np.zeros((9*6, 3), np.float32)\n self.objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)\n print(\"Object points: \" + repr(self.objp)) # Longer in the first dimension: array([[0., 0., 0.], [1., 0., 0.], [2., 0., 0.], ... ], dtype=float32)\n\n # Arrays to store object points and image points from all the images.\n self.objpoints = [] # 3d point in real world space\n self.imgpoints_l = [] # 2d points in image plane.\n self.imgpoints_r = [] # 2d points in image plane.\n\n self.cal_path = filepath\n self.read_images(self.cal_path)\n\n def read_images(self, cal_path):\n images_right = glob.glob(cal_path + 'right*.JPG')\n images_left = glob.glob(cal_path + 'left*.JPG')\n images_left.sort()\n images_right.sort()\n\n for i, fname in enumerate(images_right):\n img_l = cv2.imread(images_left[i])\n img_r = cv2.imread(images_right[i])\n\n gray_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)\n gray_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)\n\n # Find the chess board corners\n ret_l, corners_l = cv2.findChessboardCorners(gray_l, (9, 6), None)\n ret_r, corners_r = cv2.findChessboardCorners(gray_r, (9, 6), None)\n\n # If found, add object points, image points (after refining them)\n self.objpoints.append(self.objp)\n\n if ret_l is True:\n rt = cv2.cornerSubPix(gray_l, corners_l, (11, 11),\n (-1, -1), self.criteria)\n print(\"Left image points in frame %d: %s\" %(i,repr(corners_l))) # array([[[244.40527 , 94.136856]], [[274.3947 , 92.21057 ]], [[305.50098 , 90.3172 ]], [[338.3092 , 88.79298 ]], [[371.72195 , 87.874794]],\n\n self.imgpoints_l.append(corners_l)\n\n # Draw and display the corners\n ret_l = cv2.drawChessboardCorners(img_l, (9, 6),\n corners_l, ret_l)\n cv2.imshow(images_left[i], img_l)\n cv2.waitKey(500)\n\n if ret_r is True:\n rt = cv2.cornerSubPix(gray_r, corners_r, (11, 11),\n (-1, -1), self.criteria)\n self.imgpoints_r.append(corners_r)\n\n # Draw and display the corners\n ret_r = cv2.drawChessboardCorners(img_r, (9, 6),\n corners_r, ret_r)\n cv2.imshow(images_right[i], img_r)\n cv2.waitKey(500)\n img_shape = gray_l.shape[::-1]\n\n print(\"\")\n print(\"----------------\")\n print(\"\")\n \n print(\"Before calibrateCamera: \")\n print(\"img_shape: \" + repr(img_shape)) # (640, 480)\n print(\"self.objpoints: \" + repr(self.objpoints)) # A list: [array([[0., 0., 0.], [1., 0., 0.], [2., 0., 0.], [3., 0., 0.], ...\n print(\"self.imgpoints_l: \" + repr(self.imgpoints_l)) # Also a list of arrays\n rt, self.M1, self.d1, self.r1, self.t1 = cv2.calibrateCamera(\n self.objpoints, self.imgpoints_l, img_shape, None, None)\n rt, self.M2, self.d2, self.r2, self.t2 = cv2.calibrateCamera(\n self.objpoints, self.imgpoints_r, img_shape, None, None)\n # These appear unchanged\n print(\"After calibrateCamera: \")\n print(\"img_shape: \" + repr(img_shape))\n print(\"self.objpoints: \" + repr(self.objpoints))\n print(\"self.imgpoints_l: \" + repr(self.imgpoints_l))\n\n self.camera_model = self.stereo_calibrate(img_shape)\n\n def stereo_calibrate(self, dims):\n flags = 0\n flags |= cv2.CALIB_FIX_INTRINSIC\n # flags |= cv2.CALIB_FIX_PRINCIPAL_POINT\n flags |= cv2.CALIB_USE_INTRINSIC_GUESS\n flags |= cv2.CALIB_FIX_FOCAL_LENGTH\n # flags |= cv2.CALIB_FIX_ASPECT_RATIO\n flags |= cv2.CALIB_ZERO_TANGENT_DIST\n # flags |= cv2.CALIB_RATIONAL_MODEL\n # flags |= cv2.CALIB_SAME_FOCAL_LENGTH\n # flags |= cv2.CALIB_FIX_K3\n # flags |= cv2.CALIB_FIX_K4\n # flags |= cv2.CALIB_FIX_K5\n\n stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER +\n cv2.TERM_CRITERIA_EPS, 100, 1e-5)\n ret, M1, d1, M2, d2, R, T, E, F = cv2.stereoCalibrate(\n self.objpoints, self.imgpoints_l,\n self.imgpoints_r, self.M1, self.d1, self.M2,\n self.d2, dims,\n criteria=stereocalib_criteria, flags=flags)\n\n print('Intrinsic_mtx_1', M1)\n print('dist_1', d1)\n print('Intrinsic_mtx_2', M2)\n print('dist_2', d2)\n print('R', R)\n print('T', T)\n print('E', E)\n print('F', F)\n (leftRectXform, rightRectXform, leftProjMat, rightProjMat, Q, leftRoi, rightRoi) = cv2.stereoRectify(M1, d1, M2, d2, dims, R, T)\n print(\"leftRectXform : \", leftRectXform )\n print(\"rightRectXform: \", rightRectXform)\n print(\"leftProjMat : \", leftProjMat )\n print(\"rightProjMat : \", rightProjMat )\n print(\"Q: \", Q)\n print(\"leftRoi: \", leftRoi)\n print(\"rightRoi: \", rightRoi)\n\n # for i in range(len(self.r1)):\n # print(\"--- pose[\", i+1, \"] ---\")\n # self.ext1, _ = cv2.Rodrigues(self.r1[i])\n # self.ext2, _ = cv2.Rodrigues(self.r2[i])\n # print('Ext1', self.ext1)\n # print('Ext2', self.ext2)\n\n print('')\n\n camera_model = dict([('M1', M1), ('M2', M2), ('dist1', d1),\n ('dist2', d2), ('rvecs1', self.r1),\n ('rvecs2', self.r2), ('R', R), ('T', T),\n ('E', E), ('F', F)])\n\n cv2.destroyAllWindows()\n return camera_model\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('filepath', help='String Filepath')\n args = parser.parse_args()\n cal_data = StereoCalibration(args.filepath)\n", "id": "11258257", "language": "Python", "matching_score": 1.463797688484192, "max_stars_count": 0, "path": "src/camera_calibrate.py" }, { "content": "import cv2\n\ndef readVideoStream(url):\n cap = cv2.VideoCapture(url)\n\n while True:\n ret, frame = cap.read()\n if not ret:\n print(\"No frame.\")\n return\n cv2.imshow('Video', frame)\n \n if cv2.waitKey(1) == 27:\n return\n\nif __name__ == '__main__':\n # Make a file that defines these two variables\n from auth import USERNAME,PASSWORD\n # Left camera in my setup. Replace with yours.\n urlPattern = 'http://%s:%s@192.168.0.253/mjpg/video.mjpg'\n url = urlPattern%(USERNAME,PASSWORD)\n readVideoStream(url)\n\n ", "id": "4986433", "language": "Python", "matching_score": 1.467435359954834, "max_stars_count": 0, "path": "src/demoReadStream.py" }, { "content": "\"\"\"\nUser-interactive stereo calibration capture and computation application\n\"\"\"\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.debug('Importing')\nfrom datetime import datetime\nimport sys\nimport os\nimport cv2\nimport argparse\nimport numpy as np\nfrom stereo_calibrator import StereoCalibrator\nfrom cal_target_defs import calibrationTargets\nlogger.debug('Done')\n\ndef readStereoData(leftUrl, rightUrl, savePath, leftFormat = 'left_%d.png', rightFormat = 'right_%d.png'):\n \"\"\"\n User-interactive function, presents a window for esc/s/c\n Returns a list of pairs of paths like: [('savePath/left0.png','savePath/right0.png'), ...]\n that lists off all the files saved, or None if the user asked to exit\n leftUrl, rightUrl : string, URL to each camera stream, including authentication, compatible with cv2.VideoCapture(url)\n savePath : directory in which to save data files\n leftFormat, rightFormat : strings, formats with exactly one %d, which will be populated with an index starting at 0\n \"\"\"\n leftCap = cv2.VideoCapture(leftUrl)\n rightCap = cv2.VideoCapture(rightUrl)\n \n paths = []\n imgNum = 0\n while True:\n leftRet, leftFrame = leftCap.read()\n # leftTimestamp = leftCap.get(cv2.CAP_PROP_POS_MSEC)\n rightRet, rightFrame = rightCap.read()\n # rightTimestamp = leftCap.get(cv2.CAP_PROP_POS_MSEC)\n if not leftRet and rightRet:\n print(\"No frame.\")\n return paths\n \n # logger.info(\"Frame timestamps: %f, %f\"%(leftTimestamp,rightTimestamp))\n \n frame = np.hstack((leftFrame, rightFrame))\n cv2.imshow('Press escape to exit, s to save, c to calibrate based on saved', frame)\n \n key = cv2.waitKey(1)\n if key == 27:\n return None\n elif key == ord('c'):\n return paths\n elif key == ord('s'):\n leftFn = os.path.join(savePath, leftFormat%imgNum)\n rightFn = os.path.join(savePath, rightFormat%imgNum)\n try:\n os.makedirs(savePath, exist_ok=True)\n cv2.imwrite(leftFn, leftFrame)\n cv2.imwrite(rightFn, rightFrame)\n paths += [(leftFn, rightFn)]\n logger.info(\"Saved pairs at %s,%s\"%(leftFn, rightFn))\n except Exception as e:\n logger.error(\"Failed to save frames.\", exc_info=True)\n imgNum += 1\n \n # else:\n # logger.debug('Got key: %d'%key)\n \n \nif __name__ == '__main__':\n logging.basicConfig(level=logging.WARNING)\n logger.setLevel(logging.DEBUG)\n # This is super verbose\n logging.getLogger('stereo_calibrator').setLevel(logging.DEBUG)\n \n parser = argparse.ArgumentParser(description='Capture and/or process stereo calibration data.')\n parser.add_argument(\"-c\", \"--capture\", help=\"Perform user-interactive image capture prior to processing\", action=\"store_true\", default=False)\n parser.add_argument(\"-p\", \"--process\", help=\"Process images to compute stereo calibration\", action=\"store_true\", default=False)\n parser.add_argument(\"-l\", \"--leftUrl\", metavar='URL', help=\"Left camera URL pattern, first %%s is replaced with username, second with password\", default='http://%s:%s@192.168.0.253/mjpg/video.mjpg')\n parser.add_argument(\"-r\", \"--rightUrl\", metavar='URL', help=\"Right camera URL pattern, first %%s is replaced with username, second with password\", default='http://%s:%s@192.168.0.252/mjpg/video.mjpg')\n parser.add_argument(\"-f\", \"--folderNameFormat\", metavar='Folder', help=\"Folder name full of images. Will run strftime on this string, so you can use strftime escape characters.\", default='stereo_cal_data_%Y-%d-%m_%H-%M-%S')\n parser.add_argument(\"-lf\", \"--leftFilenameFormat\", metavar='Filename', help=\"Pattern for left-camera filenames within the specified folder. Must have one %%d in it, which will receive an integer counting from 0.\", default='left_%d.png')\n parser.add_argument(\"-rf\", \"--rightFilenameFormat\", metavar='Filename', help=\"Pattern for right-camera filenames within the specified folder. Must have one %%d in it, which will receive an integer counting from 0.\", default='right_%d.png')\n parser.add_argument(\"-u\", \"--username\", help=\"Username with which to log into camera\")\n parser.add_argument(\"-pw\", \"--password\", help=\"Password with which to log into camera\")\n parser.add_argument(\"-t\", \"--target\", metavar=\"index\", help=\"Calibration target index (omit for user-interactive)\")\n args = parser.parse_args()\n\n if not args.capture and not args.process:\n print(\"Need at least one of --capture or --process.\")\n parser.print_help()\n sys.exit(0)\n \n # Application settings\n # leftUrlPattern = 'http://%s:%s@192.168.0.253/mjpg/video.mjpg'\n # rightUrlPattern = 'http://%s:%s@192.168.0.252/mjpg/video.mjpg'\n # folderNameFormat = 'stereo_cal_data_%Y-%d-%m_%H-%M-%S'\n leftUrlPattern = args.leftUrl\n rightUrlPattern = args.rightUrl\n folderNameFormat = args.folderNameFormat\n \n dataDir = datetime.now().strftime(folderNameFormat)\n if args.process:\n if args.target is not None:\n try:\n chosenTargetIdx = int(args.target)\n except:\n print(\"Must provide an integer for calibration target index, instead got %s.\"%args.target)\n sys.exit(0)\n if chosenTargetIdx >= len(calibrationTargets):\n print(\"Invalid calibration target index %d; exiting.\"%chosenTargetIdx)\n sys.exit(0)\n else:\n print(\"This program can seek the following targets: \")\n for i,target in enumerate(calibrationTargets):\n print('%2d: (%2dx%2d) \"%s\"s'%(i, target['dims'][0], target['dims'][1], target['desc']))\n chosenTargetIdx = input(\"Enter number of target to use: \")\n try:\n chosenTargetIdx = int(chosenTargetIdx)\n except:\n pass\n if type(chosenTargetIdx) is not int or chosenTargetIdx >= len(calibrationTargets):\n print(\"Invalid ca selection; exiting.\")\n sys.exit(0)\n calTarget = calibrationTargets[chosenTargetIdx]\n if args.capture:\n print(\"Will connect to (where %s:%s is the username and password):\")\n print(\"Left cam: %s\"%leftUrlPattern)\n print(\"Right cam: %s\"%rightUrlPattern)\n print(\"Will save data to this directory: %s\"%dataDir)\n print(\"During video stream, press the 's' key when the image is clear, the scene is stationary, and the target is visible in both images.\")\n print(\"Press escape to exit.\")\n if args.process:\n print(\"Press 'c' to process the captured images.\")\n\n \n leftUrl = leftUrlPattern%(args.username,args.password)\n rightUrl = rightUrlPattern%(args.username,args.password)\n paths = readStereoData(leftUrl, rightUrl, dataDir, args.leftFilenameFormat, args.rightFilenameFormat)\n else:\n # Look through the indicated directory to find files\n allFilesInDir = [f for f in os.listdir(dataDir) if os.path.isfile(os.path.join(dataDir, f))]\n # print(\"allFilesInDir: \" + repr(allFilesInDir))\n # We expect a pretty strict filename format here. \n # Stop looping as soon as we run out of files.\n keepLooking = True\n i = 0\n paths = []\n while keepLooking:\n leftPath = os.path.join(dataDir, args.leftFilenameFormat%i)\n rightPath = os.path.join(dataDir, args.rightFilenameFormat%i)\n if os.path.isfile(leftPath) and os.path.isfile(rightPath):\n paths += [(leftPath, rightPath)]\n i += 1\n else:\n keepLooking = False\n print(\"Found %d stereo pairs in %s.\"%(i, dataDir))\n\n if args.process:\n if paths is not None and len(paths) > 0:\n print(\"Got paths: \" + repr(paths))\n sc = StereoCalibrator(calTarget['dims'], calTarget['dotSpacingMm'], calTarget['simpleBlobDet'])\n \n stereo_cal = sc.find_stereo_pair_calibration(paths)\n if stereo_cal is not None:\n print(\"Have cal: \" + repr(stereo_cal))\n else:\n print(\"Failed to find cal.\")\n else:\n print(\"Commanded to process a calibration, but have no images to process.\")\n print(\"Exiting.\")\n", "id": "163138", "language": "Python", "matching_score": 8.971487045288086, "max_stars_count": 0, "path": "src/calibrate_stereo_pair.py" }, { "content": "\"\"\"\nUser-interactive stereo calibration capture and computation application\n\"\"\"\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.debug('Importing')\nfrom datetime import datetime\nimport os\nimport cv2\nimport numpy as np\nfrom stereo_calibrator import StereoCalibrator\nlogger.debug('Done')\n\n\ndef readVideoStream(url):\n cap = cv2.VideoCapture(url)\n\n while True:\n ret, frame = cap.read()\n if not ret:\n print(\"No frame.\")\n return\n cv2.imshow('Video', frame)\n \n if cv2.waitKey(1) == 27:\n return\n\ndef readStereoData(leftUrl, rightUrl, savePath):\n \"\"\"\n User-interactive function, presents a window for esc/s/c\n Returns a list of pairs of paths like: [('savePath/left0.png','savePath/right0.png'), ...]\n that lists off all the files saved, or None if the user asked to exit\n leftUrl, rightUrl : string, URL to each camera stream, including authentication, compatible with cv2.VideoCapture(url)\n savePath : directory in which to save data files\n \"\"\"\n leftCap = cv2.VideoCapture(leftUrl)\n rightCap = cv2.VideoCapture(rightUrl)\n \n paths = []\n imgNum = 0\n while True:\n leftRet, leftFrame = leftCap.read()\n # leftTimestamp = leftCap.get(cv2.CAP_PROP_POS_MSEC)\n rightRet, rightFrame = rightCap.read()\n # rightTimestamp = leftCap.get(cv2.CAP_PROP_POS_MSEC)\n if not leftRet and rightRet:\n print(\"No frame.\")\n return paths\n \n # logger.info(\"Frame timestamps: %f, %f\"%(leftTimestamp,rightTimestamp))\n \n frame = np.hstack((leftFrame, rightFrame))\n cv2.imshow('Press escape to exit, s to save, c to calibrate based on saved', frame)\n \n key = cv2.waitKey(1)\n if key == 27:\n return None\n elif key == ord('c'):\n return paths\n elif key == ord('s'):\n leftFn = os.path.join(savePath, 'left_%d.png'%imgNum)\n rightFn = os.path.join(savePath, 'right_%d.png'%imgNum)\n try:\n os.makedirs(savePath, exist_ok=True)\n cv2.imwrite(leftFn, leftFrame)\n cv2.imwrite(rightFn, rightFrame)\n paths += [(leftFn, rightFn)]\n logger.info(\"Saved pairs at %s,%s\"%(leftFn, rightFn))\n except Exception as e:\n logger.error(\"Failed to save frames.\", exc_info=True)\n imgNum += 1\n \n # else:\n # logger.debug('Got key: %d'%key)\n \n \nif __name__ == '__main__':\n logging.basicConfig(level=logging.WARNING)\n logger.setLevel(logging.DEBUG)\n # This is super verbose\n logging.getLogger('stereo_calibrator').setLevel(logging.DEBUG)\n\n from auth import USERNAME,PASSWORD # Make a file that defines these two strings\n # Application settings\n leftUrlPattern = 'http://%s:%s@192.168.0.253/mjpg/video.mjpg'\n rightUrlPattern = 'http://%s:%s@192.168.0.252/mjpg/video.mjpg'\n folderNameFormat = 'stereo_cal_data_%Y-%d-%m_%H-%M-%S'\n \n dataDir = datetime.now().strftime(folderNameFormat)\n \n print(\"Welcome to stereo pair calibration. Will connect to (where %s:%s is the username and password):\")\n print(\"Left cam: %s\"%leftUrlPattern)\n print(\"Right cam: %s\"%rightUrlPattern)\n print(\"Will save data to this directory: %s\"%dataDir)\n print(\"During video stream, press the 's' key when the image is clear, the scene is stationary, and the target is visible in both images.\")\n print(\"Press escape to exit.\")\n print(\"Press 'c' to process the captured images.\")\n\n \n leftUrl = leftUrlPattern%(USERNAME,PASSWORD)\n rightUrl = rightUrlPattern%(USERNAME,PASSWORD)\n paths = readStereoData(leftUrl, rightUrl, dataDir)\n if paths is not None:\n print(\"Got paths: \" + repr(paths))\n sc = StereoCalibrator()\n \n stereo_cal = sc.find_stereo_pair_calibration(paths)\n if stereo_cal is not None:\n print(\"Have cal: \" + repr(stereo_cal))\n else:\n print(\"Failed to find cal.\")\n print(\"Exiting.\")\n", "id": "5648113", "language": "Python", "matching_score": 2.661069393157959, "max_stars_count": 0, "path": "src/calibrate_stereo_pair.py" }, { "content": "#!/usr/bin/python3\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.debug('Importing')\nimport cv2\nimport cv2.aruco\nimport numpy as np\nlogger.debug('Done')\n\nclass StereoCalibrator:\n def __init__(self, calPatternDims=(8, 8), calDotSpacingMm=(25.877, 25.877), detector=None):\n \"\"\"\n calPatternDims : (int, int) - the rows,cols indicating the number of dots on the target\n calDotSpacingMm : (float,float) - in x,y, the number of millimeters between dots in x and y\n detector : a cv2.SimpleBlobDetector, or None for the default\n \"\"\"\n self._cal_target_dot_det = self.make_detector()\n \n # Set up the calibration pattern \n self._calPatternDims = calPatternDims\n # self._calPatternDims = (24, 48) # in dots, row,col\n self._calDotSpacingMm = calDotSpacingMm\n # self._calDotSpacingMm = (25.4, 25.4) # in mm, x,y\n self._IMAGE_SIZE = (800,600) # in px, x,y\n self._SENSOR_DIMS = (4*0.707107,4*0.707107) # in mm, row,col\n self._cal_3space_pattern = [] #[(x,y), ...]\n # OpenCV coordinate convention: x+ rightward, y+ downward, z+ out away from camera.\n for y in range(0, self._calPatternDims[0]):\n for x in range(0, self._calPatternDims[1]):\n self._cal_3space_pattern += [(x * self._calDotSpacingMm[0], y * self._calDotSpacingMm[1], 0)]\n # logger.debug(\"self._cal_3space_pattern: \" + repr(self._cal_3space_pattern))\n \n def make_detector(self):\n # Setup SimpleBlobDetector parameters.\n parms = cv2.SimpleBlobDetector_Params()\n \n # Change thresholds\n parms.minThreshold = 0;\n parms.maxThreshold = 128;\n \n # Filter by Area.\n parms.filterByArea = True\n parms.minArea = 5\n \n # Filter by Circularity\n parms.filterByCircularity = True\n parms.minCircularity = 0.25\n \n # Filter by Convexity\n parms.filterByConvexity = False\n parms.minConvexity = 0.9\n parms.maxConvexity = 1\n \n # Filter by Inertia\n parms.filterByInertia = True\n parms.minInertiaRatio = 0.5\n \n # logger.debug(\"Orig minDistBetweenBlobs: \" + str(parms.minDistBetweenBlobs))\n parms.minDistBetweenBlobs = 5\n parms.blobColor = 0\n \n # Create a detector with the parameters\n return cv2.SimpleBlobDetector_create(parms)\n\n def find_single_cam_calibration(self, image_paths):\n \"\"\"\n image_paths : list of image file paths\n return : cameraMatrix,distCoeffs if successful, or None,None if not\n \"\"\"\n all_points_in_3space, all_points_in_images = self._find_point_vectors(image_paths)\n if len(all_points_in_3space) > 0:\n # logger.debug(\"np.array(all_points_in_3space) = \" + repr(np.array(all_points_in_3space)))\n all_points_in_3space = np.array(all_points_in_3space, dtype=np.float32)\n # logger.debug(\"all_points_in_3space = \" + str(all_points_in_3space))\n # logger.debug(\"all_points_in_images = \" + str(all_points_in_images))\n found,cameraMatrix,distCoeffs,rvecs,tvecs = cv2.calibrateCamera(all_points_in_3space, all_points_in_images, self._IMAGE_SIZE, None, None)\n \n # Debug by projecting the points in the calibration pattern onto the image\n # for img_path,points,rvec,tvec in zip(image_paths, all_points_in_3space, rvecs, tvecs):\n # img = cv2.imread(img_path)\n # imagePoints, jacobian = cv2.projectPoints(points, rvec, tvec, cameraMatrix, distCoeffs)\n # self._draw_points_on_image(img, imagePoints)\n # cv2.imshow('reprojected on %s'%img_path, img)\n # cv2.waitKey()\n \n # logger.debug(\"found: \" + repr(found) + \",\\n cameraMatrix: \" + repr(cameraMatrix) + \",\\n distCoeffs: \" + repr(distCoeffs) + \",\\n rvecs: \" + repr(rvecs) + \",\\n tvecs: \" + repr(tvecs))\n # logger.debug(\"found: \" + repr(found) + \",\\n rvecs: \" + repr(rvecs) + \",\\n tvecs: \" + repr(tvecs))\n return cameraMatrix,distCoeffs\n else: \n logger.error(\"Can't find any calibration patterns in any of the supplied images. Can't compute single camera calibration.\")\n return None,None\n \n def _draw_points_on_image(self, image, points): \n \"\"\"\n Annotate an image with points for debugging\n image : color opencv image\n points : list of coordinates in image\n \"\"\"\n RADIUS = 1\n COLOR = (0,0,255)\n i = 0\n for point in points:\n # point is x,y, like : np.array([[697.77185, 396.0037 ]], dtype=float32\n # logger.debug(\"point: %s\"%repr(point))\n cv2.circle(image, tuple(point[0]), RADIUS, COLOR, -1)\n cv2.putText(image, '%d'%i, tuple(point[0]), cv2.FONT_HERSHEY_SIMPLEX, 0.33, COLOR)\n i += 1\n \n def _find_point_vectors(self, image_paths, rowCol=False):\n \"\"\"\n Get the coorinates of the dots on the calibration target\n \n image_paths : list of N image file paths\n rowCol : true to return points in row,col convention. False to use x,y convention.\n returns : (<list of N copies of self._cal_3space_pattern>, <list of arrays of dot coordinates in images>)\n \"\"\"\n all_points_in_images = []\n all_points_in_3space = []\n \n first_loop = True\n for image_path in image_paths:\n img = cv2.imread(image_path)\n points = np.array([[]])\n found,points = cv2.findCirclesGrid(img, self._calPatternDims, points, cv2.CALIB_CB_SYMMETRIC_GRID, self._cal_target_dot_det)\n if found:\n # logger.debug(\"points.shape: %s\"%repr(points.shape))\n points = points[:,0,:] # This doesn't seem to actually change anything, it seems to be just a spare dimension?\n # findCirclesGrid returns x,y convention. Convert to row,col\n if rowCol:\n points = points[:,[1,0]]\n # logger.debug(\"points.shape: %s\"%repr(points.shape))\n # logger.debug((\"Found \" + str(len(points)) + \" cal points in \" + image_path) if found else \"No cal pattern found in \" + image_path)\n all_points_in_images += [points]\n all_points_in_3space += [self._cal_3space_pattern]\n \n # self._draw_points_on_image(img, points)\n # cv2.imshow(image_path, img)\n else:\n logger.warning(\"Didn't find calibration pattern in this image: %s\"%image_path)\n # cv2.waitKey()\n \n return all_points_in_3space, all_points_in_images\n \n def find_stereo_pair_calibration(self, pair_image_paths):\n \"\"\"\n \n left_image_paths : list of strings, each of which is a path to an image from the left camera\n right_image_paths : list of strings, each of which is a path to an image from the right camera\n pair_image_paths : list of twoples, of the form (\"/path/to/one/left/image\", \"/path/to/one/right/image\"),\n \n returns : If failure, None. If successs, projection matrices for cv2.triangulatePoints, stored in a dictionary like this:\n {\n 'leftProjMat':leftProjMat ,\n 'rightProjMat':rightProjMat,\n }\n \"\"\"\n left_image_paths = [pair[0] for pair in pair_image_paths]\n right_image_paths = [pair[1] for pair in pair_image_paths]\n\n # First must calibrate individual cameras\n logger.info(\"Computing left camera calibration\")\n lCameraMatrix, lDistCoeffs = self.find_single_cam_calibration(left_image_paths)\n logger.info(\"lCameraMatrix: \" + repr(lCameraMatrix))\n logger.info(\"Computing right camera calibration\")\n rCameraMatrix, rDistCoeffs = self.find_single_cam_calibration(right_image_paths)\n logger.info(\"rCameraMatrix: \" + repr(rCameraMatrix))\n \n if lCameraMatrix is None or rCameraMatrix is None:\n logger.error(\"Failed to find one or both camera matrices.\")\n return None\n \n # Find individual dots in all the images\n logger.info(\"Finding points in left images from pairs\")\n all_points_in_3space, all_points_in_left_images = self._find_point_vectors(left_image_paths, True)\n logger.info(\"Finding points in right images from pairs\")\n all_points_in_3space, all_points_in_right_images = self._find_point_vectors(right_image_paths, True)\n all_points_in_3space = np.array(all_points_in_3space, dtype=np.float32)\n # logger.debug(\"all_points_in_3space: \" + repr(all_points_in_3space))\n logger.debug(\"all_points_in_left_images: \" + repr(all_points_in_left_images))\n logger.debug(\"all_points_in_right_images: \" + repr(all_points_in_right_images))\n # logger.debug(\"self._IMAGE_SIZE: \" + repr(self._IMAGE_SIZE))\n logger.debug(\"len(all_points_in_3space): \" + str(len(all_points_in_3space)))\n logger.debug(\"len(all_points_in_left_images): \" + str(len(all_points_in_left_images)))\n logger.debug(\"len(all_points_in_right_images): \" + str(len(all_points_in_right_images)))\n logger.info(\"Computing stereo calibration\")\n flags = 0\n flags |= cv2.CALIB_FIX_INTRINSIC\n # flags |= cv2.CALIB_FIX_PRINCIPAL_POINT\n flags |= cv2.CALIB_USE_INTRINSIC_GUESS\n flags |= cv2.CALIB_FIX_FOCAL_LENGTH\n # flags |= cv2.CALIB_FIX_ASPECT_RATIO\n flags |= cv2.CALIB_ZERO_TANGENT_DIST\n # flags |= cv2.CALIB_RATIONAL_MODEL\n # flags |= cv2.CALIB_SAME_FOCAL_LENGTH\n # flags |= cv2.CALIB_FIX_K3\n # flags |= cv2.CALIB_FIX_K4\n # flags |= cv2.CALIB_FIX_K5\n\n stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER +\n cv2.TERM_CRITERIA_EPS, 100, 1e-5)\n try:\n minError, lCameraMatrix, lDistCoeffs, rCameraMatrix, rDistCoeffs, R, T, E, F = cv2.stereoCalibrate(all_points_in_3space, all_points_in_left_images, all_points_in_right_images, lCameraMatrix, lDistCoeffs, rCameraMatrix, rDistCoeffs, self._IMAGE_SIZE, criteria=stereocalib_criteria, flags=flags)\n except:\n logger.error(\"Failed to find stereo calibration.\", exc_info=True)\n return None\n logger.debug(\"minError: \" + repr(minError))\n logger.debug(\"lCameraMatrix: \" + repr(lCameraMatrix))\n logger.debug(\"lDistCoeffs: \" + repr(lDistCoeffs))\n logger.debug(\"rCameraMatrix: \" + repr(rCameraMatrix))\n logger.debug(\"rDistCoeffs: \" + repr(rDistCoeffs))\n logger.debug(\"R: \" + repr(R))\n logger.debug(\"T: \" + repr(T))\n logger.debug(\"E: \" + repr(E))\n logger.debug(\"F: \" + repr(F))\n \n # For debugging only\n # imageL = cv2.imread(left_image_paths[0])\n # imageR = cv2.imread(right_image_paths[0])\n # lUd = cv2.undistort(imageL, lCameraMatrix, lDistCoeffs)\n # rUd = cv2.undistort(imageR, rCameraMatrix, rDistCoeffs)\n # cv2.imshow('left', imageL)\n # cv2.imshow('right', imageR)\n # cv2.imshow('left undistorted', lUd)\n # cv2.imshow('right undistorted', rUd)\n # cv2.waitKey()\n \n # Compute projection matrices\n #https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#stereorectify\n (leftRectXform, rightRectXform, leftProjMat, rightProjMat, Q, leftRoi, rightRoi) = cv2.stereoRectify(lCameraMatrix, lDistCoeffs, rCameraMatrix, rDistCoeffs, self._IMAGE_SIZE, R, T)\n Txf = rightProjMat[0,3]\n Tyf = rightProjMat[1,3]\n if(Txf != 0 and Tyf == 0):\n logger.info(\"Horizontal stereo configuration detected.\")\n elif(Txf == 0 and Tyf != 0):\n logger.info(\"Vertical stereo configuration detected.\")\n else:\n logger.error(\"Invalid stereo configuration detected. Txf=%f, Tyf=%f\"%(Txf,Tyf))\n \n # TODO: ROI rectangles are x,y,width,height. Check if we got a reasonable fraction of each image\n \n logger.debug(\"leftRectXform : \" + repr(leftRectXform ))\n logger.debug(\"rightRectXform: \" + repr(rightRectXform))\n logger.debug(\"leftProjMat : \" + repr(leftProjMat ))\n logger.debug(\"rightProjMat : \" + repr(rightProjMat ))\n logger.debug(\"Q: \" + repr(Q))\n logger.debug(\"leftRoi: \" + repr(leftRoi))\n logger.debug(\"rightRoi: \" + repr(rightRoi))\n\n retDict = {\n 'minError':minError,\n 'leftProjMat':leftProjMat ,\n 'rightProjMat':rightProjMat,\n }\n \n return retDict\n \n def find_cal_pattern_in_3space(self, stereo_cal, pair_image_path):\n \"\"\"\n stereo_cal: Projection matrices for cv2.triangulatePoints, stored in a dictionary like this:\n {\n 'leftProjMat':leftProjMat ,\n 'rightProjMat':rightProjMat,\n }\n pair_image_path : a twople, of the form (\"/path/to/one/left/image\", \"/path/to/one/right/image\"),\n \n returns: a list of [x,y,z] coordinates in real-world space, in the form:\n np.array([[ 7549.84 , -184252.69 , 40687.215 ],\n [ 7626.0737, -185671.55 , 41133.258 ],\n [ 7643.9023, -186005.36 , 41351.223 ]])\n \"\"\"\n \n # Find individual dots in all the images\n logger.info(\"Finding points in left images from pairs\")\n all_points_in_3space, all_points_in_left_images = self._find_point_vectors([pair_image_path[0]])\n logger.info(\"Finding points in right images from pairs\")\n all_points_in_3space, all_points_in_right_images = self._find_point_vectors([pair_image_path[1]])\n \n # for image_path,points in zip(pair_image_path, [all_points_in_left_images[0], all_points_in_right_images[0]]):\n # img = cv2.imread(image_path)\n # self._draw_points_on_image(img, points)\n # cv2.imshow(image_path, img)\n all_points_in_left_images = all_points_in_left_images[0]\n # logger.debug(\"Shape: %s\"%repr(all_points_in_left_images.shape))\n # all_points_in_left_images = all_points_in_left_images[:,0,:]\n # logger.debug(\"Shape: %s\"%repr(all_points_in_left_images.shape))\n all_points_in_right_images = all_points_in_right_images[0]\n # all_points_in_right_images = all_points_in_right_images[:,0,:]\n # Switch from x,y to row,col\n # all_points_in_left_images = all_points_in_left_images[:,[1,0]]\n # all_points_in_right_images = all_points_in_right_images[:,[1,0]]\n all_points_in_left_images = all_points_in_left_images.transpose()\n # logger.debug(\"Shape: %s\"%repr(all_points_in_left_images.shape))\n all_points_in_right_images = all_points_in_right_images.transpose()\n \n # logger.debug('all_points_in_left_images: ' + repr(all_points_in_left_images))\n \n points4d = cv2.triangulatePoints(stereo_cal['leftProjMat'], stereo_cal['rightProjMat'], all_points_in_left_images, all_points_in_right_images)\n points3d = cv2.convertPointsFromHomogeneous(points4d.transpose())\n \n # logger.debug('points4d: ' + repr(points4d))\n logger.debug('points3d: ' + repr(points3d))\n return points3d[:,0,:]\n \n\ndef mark_dots(infilepath, outfilepath, detector):\n \"\"\"\n Test routine for debugging blob finder params\n \"\"\"\n image = cv2.imread(infilepath)\n blobs = detector.detect(image)\n print(\"Found \"+str(len(blobs))+\" blobs \" + infilepath + \" -> \" + outfilepath)\n # Draw detected blobs as red circles.\n # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob\n annotated = cv2.drawKeypoints(image, blobs, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n cv2.imwrite(outfilepath, annotated)\n\n \nif __name__ == '__main__':\n logging.basicConfig(level=logging.WARNING)\n logger.setLevel(logging.DEBUG)\n\n sc = StereoCalibrator()\n cal_img_dir = 'test images/2019-10-18 stereo cal images/'\n pair_image_names = [\n ('left/left-00001.png','right/right-00001.png'),\n ('left/left-00002.png','right/right-00002.png'),\n # ('left/left-00003.png','right/right-00003.png'),\n ('left/left-00004.png','right/right-00004.png'),\n ('left/left-00005.png','right/right-00005.png'),\n ('left/left-00006.png','right/right-00006.png'),\n # ('left/left-00007.png','right/right-00007.png'),\n # ('left/left-00008.png','right/right-00008.png'),\n # ('left/left-00009.png','right/right-00009.png'),\n ('left/left-00010.png','right/right-00010.png'),\n ('left/left-00011.png','right/right-00011.png'),\n ('left/left-00012.png','right/right-00012.png'),\n ('left/left-00013.png','right/right-00013.png'),\n # ('left/left-00014.png','right/right-00014.png'),\n # ('left/left-00015.png','right/right-00015.png'),\n ('left/left-00019.png','right/right-00019.png'),\n # ('left/left-00020.png','right/right-00020.png'),\n ('left/left-00021.png','right/right-00021.png'),\n ('left/left-00022.png','right/right-00022.png'),\n ('left/left-00023.png','right/right-00023.png'),\n # ('left/left-00025.png','right/right-00025.png'),\n ]\n pair_cal_images = [(cal_img_dir + pair[0], cal_img_dir + pair[1]) for pair in pair_image_names]\n # det = sc.make_detector()\n # for img in all_images:\n # outfile = 'dotted_' + img;\n # mark_dots(cal_img_dir + img, cal_img_dir + outfile, det)\n \n if False:\n # Progressively include more pairs\n for numPairs in range(1, len(pair_cal_images) + 1):\n logger.debug(\"=============================================================================\")\n logger.debug(\"Trying with %d pairs.\"%numPairs)\n pairsToUse = pair_cal_images[0:numPairs]\n stereo_cal = sc.find_stereo_pair_calibration(pairsToUse )\n input('Press enter to continue...')\n elif False:\n # Do just the first two. Those are in the same relative position, so if it doesn't work on these, it never will\n stereo_cal = sc.find_stereo_pair_calibration(pair_cal_images[0:2])\n elif True:\n # Just use two images\n stereo_cal = sc.find_stereo_pair_calibration(\n [(cal_img_dir + 'left/left-00012.png', cal_img_dir + 'right/right-00012.png')])\n else:\n # Do em all at once\n stereo_cal = sc.find_stereo_pair_calibration( pair_cal_images)\n \n output_fn = 'src/stereo_cal.py'\n with open(output_fn, 'w+') as outfile:\n outfile.write('from numpy import array\\nstereo_cal = ' + repr(stereo_cal) + '\\n')\n logger.info('Done, saved calibration to ' + output_fn)\n\n # test\n points3d = sc.find_cal_pattern_in_3space(stereo_cal, pair_cal_images[0])\n \n if True:\n # https://matplotlib.org/3.1.1/gallery/mplot3d/scatter3d.html\n # This import registers the 3D projection, but is otherwise unused.\n from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\n\n import matplotlib.pyplot as plt\n import numpy as np\n \n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n \n ax.scatter(points3d[:,0], points3d[:,1], points3d[:,2], marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n\n plt.show()\n\n cv2.waitKey()\n", "id": "6409211", "language": "Python", "matching_score": 8.668669700622559, "max_stars_count": 0, "path": "src/stereo_calibrator.py" }, { "content": "#!/usr/bin/python3\nprint('Importing')\nimport cv2\nimport cv2.aruco\nimport numpy as np\n\ndef make_detector():\n # Setup SimpleBlobDetector parameters.\n parms = cv2.SimpleBlobDetector_Params()\n \n # Change thresholds\n parms.minThreshold = 0;\n parms.maxThreshold = 128;\n \n # Filter by Area.\n parms.filterByArea = True\n parms.minArea = 5\n \n # Filter by Circularity\n parms.filterByCircularity = True\n parms.minCircularity = 0.25\n \n # Filter by Convexity\n parms.filterByConvexity = False\n parms.minConvexity = 0.9\n parms.maxConvexity = 1\n \n # Filter by Inertia\n parms.filterByInertia = True\n parms.minInertiaRatio = 0.5\n \n print(\"Orig minDistBetweenBlobs: \" + str(parms.minDistBetweenBlobs))\n parms.minDistBetweenBlobs = 5\n parms.blobColor = 0\n \n # Create a detector with the parameters\n return cv2.SimpleBlobDetector_create(parms)\n\nclass Markerfinder:\n def __init__(self, marker_dict=None):\n \"\"\"\n marker_dict : an aruco marker dictionary to use for finding markers\n \"\"\"\n if marker_dict is None:\n marker_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_7X7_1000)\n self._marker_dict = marker_dict\n \n # Courtesy https://www.learnopencv.com/blob-detection-using-opencv-python-c/\n # Setup SimpleBlobDetector parameters.\n self._params = cv2.SimpleBlobDetector_Params()\n \n # Change thresholds\n self._params.minThreshold = 0;\n self._params.maxThreshold = 32;\n \n # Filter by Area.\n self._params.filterByArea = True\n self._params.minArea = 5\n \n # Filter by Circularity\n self._params.filterByCircularity = True\n self._params.minCircularity = 0.25\n \n # Filter by Convexity\n self._params.filterByConvexity = True\n self._params.minConvexity = 0.9\n self._params.maxConvexity = 1\n \n # Filter by Inertia\n self._params.filterByInertia = True\n self._params.minInertiaRatio = 0.5\n \n print(\"Orig minDistBetweenBlobs: \" + str(self._params.minDistBetweenBlobs))\n self._params.minDistBetweenBlobs = 1\n self._params.blobColor = 0\n \n # Create a detector with the parameters\n self._dot_detector = cv2.SimpleBlobDetector_create(self._params)\n \n self._cal_target_dot_det = make_detector()\n \n # Set up the calibration pattern \n self.CAL_PATTERN_DIMS = (8, 8) # in dots\n self.CAL_DOT_SPACING = (25.8, 25.8) # in mm\n self._IMAGE_SIZE = (800,600) # in px\n self._cal_3space_pattern = []\n for x in range(0, self.CAL_PATTERN_DIMS[0]):\n for y in range(0, self.CAL_PATTERN_DIMS[1]):\n self._cal_3space_pattern += [(x * self.CAL_DOT_SPACING[0], y * self.CAL_DOT_SPACING[1], 0)]\n self._cal_3space_pattern\n \n def find_single_cam_calibration(self, image_paths):\n \"\"\"\n images : list of image file paths\n \"\"\"\n all_points_in_3space, all_points_in_images = self._find_point_vectors(image_paths)\n if len(all_points_in_3space) > 0:\n # print(\"np.array(all_points_in_3space) = \" + repr(np.array(all_points_in_3space)))\n all_points_in_3space = np.array(all_points_in_3space, dtype=np.float32)\n # print(\"all_points_in_3space = \" + str(all_points_in_3space))\n found,cameraMatrix,distCoeffs,rvecs,tvecs = cv2.calibrateCamera(all_points_in_3space, all_points_in_images, self._IMAGE_SIZE, np.array([]), np.array([]))\n # print(\"found: \" + repr(found) + \",\\n cameraMatrix: \" + repr(cameraMatrix) + \",\\n distCoeffs: \" + repr(distCoeffs) + \",\\n rvecs: \" + repr(rvecs) + \",\\n tvecs: \" + repr(tvecs))\n return cameraMatrix,distCoeffs\n \n def _find_point_vectors(self, image_paths):\n all_points_in_images = []\n all_points_in_3space = []\n for image_path in image_paths:\n img = cv2.imread(image_path)\n # print(\"img : \" + repr(img))\n points = np.array([[]])\n found,points = cv2.findCirclesGrid(img, self.CAL_PATTERN_DIMS, points, cv2.CALIB_CB_SYMMETRIC_GRID, self._cal_target_dot_det)\n print((\"Found \" + str(len(points)) + \" cal points in \" + image_path) if found else \"No cal pattern found in \" + image_path)\n if found:\n all_points_in_images += [points]\n all_points_in_3space += [self._cal_3space_pattern]\n return all_points_in_3space, all_points_in_images\n \n def find_stereo_pair_calibration(self, left_image_paths, right_image_paths, pair_image_paths):\n \"\"\"\n \n left_image_paths : list of strings, each of which is a path to an image from the left camera\n right_image_paths : list of strings, each of which is a path to an image from the right camera\n pair_image_paths : list of twoples, of the form (\"/path/to/one/left/image\", \"/path/to/one/right/image\"),\n \n \"\"\"\n # First must calibrate individual cameras\n lCameraMatrix, lDistCoeffs = mf.find_single_cam_calibration(left_image_paths)\n rCameraMatrix, rDistCoeffs = mf.find_single_cam_calibration(right_image_paths)\n \n # redefine these \n left_image_paths = [pair[0] for pair in pair_cal_images]\n right_image_paths = [pair[1] for pair in pair_cal_images]\n \n # Find individual dots in all the images\n all_points_in_3space, all_points_in_left_images = self._find_point_vectors(left_image_paths)\n all_points_in_3space, all_points_in_right_images = self._find_point_vectors(right_image_paths)\n all_points_in_3space = np.array(all_points_in_3space, dtype=np.float32)\n # print(\"all_points_in_3space: \" + repr(all_points_in_3space))\n # print(\"all_points_in_left_images: \" + repr(all_points_in_left_images))\n # print(\"all_points_in_right_images: \" + repr(all_points_in_right_images))\n # print(\"self._IMAGE_SIZE: \" + repr(self._IMAGE_SIZE))\n print(\"len(all_points_in_3space): \" + str(len(all_points_in_3space)))\n print(\"len(all_points_in_left_images): \" + str(len(all_points_in_left_images)))\n print(\"len(all_points_in_right_images): \" + str(len(all_points_in_right_images)))\n retval, lCameraMatrix, lDistCoeffs, rCameraMatrix, rDistCoeffs, R, T, E, F = cv2.stereoCalibrate(all_points_in_3space, all_points_in_left_images, all_points_in_right_images, lCameraMatrix, lDistCoeffs, rCameraMatrix, rDistCoeffs, self._IMAGE_SIZE)\n print(\"retval: \" + repr(retval))\n print(\"lCameraMatrix: \" + repr(lCameraMatrix))\n print(\"lDistCoeffs: \" + repr(lDistCoeffs))\n print(\"rCameraMatrix: \" + repr(rCameraMatrix))\n print(\"rDistCoeffs: \" + repr(rDistCoeffs))\n print(\"R: \" + repr(R))\n print(\"T: \" + repr(T))\n print(\"E: \" + repr(E))\n print(\"F: \" + repr(F))\n \n\n def find_markers_single_image(self, image):\n \"\"\"\n Locates markers that consist of an aruco marker surrounded by 20 dots\n \n image : An opencv image\n returns : A dictionary, whose keys are aruco marker IDs, and values are twoples indicating marker centerpoint\n \"\"\"\n corners, ids, rejects = cv2.aruco.detectMarkers(image, self._marker_dict)\n\n markedup = cv2.aruco.drawDetectedMarkers(image, corners, ids)\n\n print(\"corners: \" + repr(corners))\n markers = {}\n for i in range(0, len(ids)):\n # print(str(i) + \": corners[i]: \" + repr(corners[i]))\n # print(str(i) + \": vector_ul_lr: \" + repr(vector_ul_lr))\n # print(str(i) + \": 0.25 * vector_ul_lr: \" + repr(0.25 * vector_ul_lr))\n # print(str(i) + \": corners[i][0][2] + 0.25 * vector_ul_lr: \" + repr(corners[i][0][2] + 0.25 * vector_ul_lr))\n # Compute a box 25% wider in every dimension.\n # I sorted this out in longform and then condensed it. Sorry.\n grown_box = np.array([corners[i][0][(c + 2) % 4] + 0.25 * (corners[i][0][(c + 2) % 4] - corners[i][0][c]) for c in range(0,4)], dtype=np.int32)\n # print(str(i) + \": grown_box: \" + repr(grown_box))\n poly_pts = grown_box.reshape((-1,1,2))\n \n # Create a mask that will select just the dots\n mask = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8)\n cv2.fillConvexPoly(mask, poly_pts, 1, 0)\n cv2.fillConvexPoly(mask, corners[i].astype(np.int32), 0, 0)\n masked = cv2.bitwise_and(image, image, mask=mask)\n cv2.polylines(markedup, [poly_pts], True, (0,255,0))\n \n dots = self._get_dots(masked)\n # print(\"Dots at: \" + repr(dots))\n # For debugging, we're just gonna return a marked up image for now \n # return masked\n return dots\n\n def _get_dots(self, masked_image):\n \"\"\"\n Locates dot centerpoints in image. Assumes image is masked down to just one marker's worth of dots.\n returns : list of twoples, representing (x,y) coordinates in image of the \n \"\"\"\n blobs = self._dot_detector.detect(masked_image)\n print(\"Found \"+str(len(blobs))+\" blobs. \")\n # Draw detected blobs as red circles.\n # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob\n # return cv2.drawKeypoints(masked_image, blobs, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n return [blob.pt for blob in blobs]\n\n\ndef mark_dots(infilepath, outfilepath, detector):\n \"\"\"\n Test routine for debugging blob finder params\n \"\"\"\n \n image = cv2.imread(infilepath)\n blobs = detector.detect(image)\n print(\"Found \"+str(len(blobs))+\" blobs \" + infilepath + \" -> \" + outfilepath)\n # Draw detected blobs as red circles.\n # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob\n annotated = cv2.drawKeypoints(image, blobs, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n cv2.imwrite(outfilepath, annotated)\n\nif __name__ == '__main__':\n from stereo_cal import stereo_cal\n mf = Markerfinder();\n marker_img_dir = 'test images/2019-10-18 stereo cal images/'\n pair_image_names = [\n ('left/left-00001.png','right/right-00001.png'),\n ('left/left-00002.png','right/right-00002.png'),\n # ('left/left-00003.png','right/right-00003.png'),\n ('left/left-00004.png','right/right-00004.png'),\n ('left/left-00005.png','right/right-00005.png'),\n ('left/left-00006.png','right/right-00006.png'),\n # ('left/left-00007.png','right/right-00007.png'),\n # ('left/left-00008.png','right/right-00008.png'),\n # ('left/left-00009.png','right/right-00009.png'),\n ('left/left-00010.png','right/right-00010.png'),\n ('left/left-00011.png','right/right-00011.png'),\n ('left/left-00012.png','right/right-00012.png'),\n ('left/left-00013.png','right/right-00013.png'),\n # ('left/left-00014.png','right/right-00014.png'),\n # ('left/left-00015.png','right/right-00015.png'),\n ('left/left-00019.png','right/right-00019.png'),\n # ('left/left-00020.png','right/right-00020.png'),\n ('left/left-00021.png','right/right-00021.png'),\n ('left/left-00022.png','right/right-00022.png'),\n ('left/left-00023.png','right/right-00023.png'),\n # ('left/left-00025.png','right/right-00025.png'),\n ]\n left_cal_images = [cal_img_dir + img for img in left_image_names]\n right_cal_images = [cal_img_dir + img for img in right_image_names]\n pair_cal_images = [(cal_img_dir + pair[0], cal_img_dir + pair[1]) for pair in pair_image_names]\n all_images = left_image_names + right_image_names\n det = make_detector()\n for img in all_images:\n outfile = 'dotted_' + img;\n mark_dots(cal_img_dir + img, cal_img_dir + outfile, det)\n mf.find_stereo_pair_calibration(left_cal_images, right_cal_images, pair_cal_images)\n mf.find_markers_single_image(img)\n ", "id": "12014089", "language": "Python", "matching_score": 3.6768908500671387, "max_stars_count": 0, "path": "src/markerfinder.py" }, { "content": "#!/usr/bin/python3\n\"\"\"\nDefinitions for some simple calibration targets\n\"\"\"\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.debug('Importing')\nimport cv2\nlogger.debug('Done')\n\nprinted8x8BlobParams = cv2.SimpleBlobDetector_Params()\nprinted8x8BlobParams.minThreshold = 0;\nprinted8x8BlobParams.maxThreshold = 128;\nprinted8x8BlobParams.filterByArea = True\nprinted8x8BlobParams.minArea = 5\nprinted8x8BlobParams.filterByCircularity = True\nprinted8x8BlobParams.minCircularity = 0.25\nprinted8x8BlobParams.filterByConvexity = False\nprinted8x8BlobParams.minConvexity = 0.9\nprinted8x8BlobParams.maxConvexity = 1\nprinted8x8BlobParams.filterByInertia = True\nprinted8x8BlobParams.minInertiaRatio = 0.5\nprinted8x8BlobParams.minDistBetweenBlobs = 5\nprinted8x8BlobParams.blobColor = 0\n\n\n\n\nperfboard24x48BlobParams = cv2.SimpleBlobDetector_Params()\nperfboard24x48BlobParams.minThreshold = 0;\nperfboard24x48BlobParams.maxThreshold = 128;\nperfboard24x48BlobParams.filterByArea = True\nperfboard24x48BlobParams.minArea = 2\nperfboard24x48BlobParams.filterByCircularity = True\nperfboard24x48BlobParams.minCircularity = 0.75\nperfboard24x48BlobParams.filterByConvexity = False\nperfboard24x48BlobParams.minConvexity = 0.5\nperfboard24x48BlobParams.maxConvexity = 1\nperfboard24x48BlobParams.filterByInertia = True\nperfboard24x48BlobParams.minInertiaRatio = 0.5\nperfboard24x48BlobParams.minDistBetweenBlobs = 1\nperfboard24x48BlobParams.blobColor = 0\n\n# Data structure to import\ncalibrationTargets = [{\n 'desc':'Printed 8x8 grid of dots',\n 'simpleBlobDetParams':printed8x8BlobParams,\n 'simpleBlobDet': cv2.SimpleBlobDetector_create(printed8x8BlobParams),\n 'dims': (8, 8), #rows,cols\n 'dotSpacingMm': (25.877, 25.877), #x,y\n}, { \n 'desc':'Perfboard 2ft x 4ft target',\n 'simpleBlobDetParams':perfboard24x48BlobParams,\n 'simpleBlobDet': cv2.SimpleBlobDetector_create(perfboard24x48BlobParams),\n 'dims': (24, 48), #rows,cols\n 'dotSpacingMm': (25.4, 25.4), #x,y\n}]", "id": "7169936", "language": "Python", "matching_score": 0.005389196332544088, "max_stars_count": 0, "path": "src/cal_target_defs.py" }, { "content": "#!/usr/bin/python\n\nimport smbus\n\n\"\"\"\nClass to interface with the Melexis MLX90614 family of infrared thermometers.\n\"\"\"\nclass MLX90614:\n _TEMP_RESOLUTION_K = 0.02\n _K_TO_C = 273.15\n \n _AMBIENT_TEMP_REGISTER = 0x06\n _OBJECT_1_TEMP_REGISTER = 0x07\n _OBJECT_2_TEMP_REGISTER = 0x08 # Only present in dual-zone models\n \n \n def __init__(self, bus = 1, addr = 0x5a):\n \"\"\"\n bus: I2C bus number - eg 1 means /dev/i2c-1, etc\n addr: I2C device address for the sensor\n \"\"\"\n self._bus = smbus.SMBus(bus)\n self._addr = addr\n\n @staticmethod\n def _c_to_f(c_temp):\n \"\"\"\n c: temperature in celsius\n returns: temperature in fahrenheit\n \"\"\"\n return c_temp * 9.0/5.0 + 32.0\n \n @staticmethod\n def _word_to_c(word):\n \"\"\"\n Convert a word from the MLX90614 to a temperature\n word: an integer as read from a temperature register on the device\n returns: indicated temperature in celsius\n \"\"\"\n temp_k = MLX90614._TEMP_RESOLUTION_K * word\n return temp_k - MLX90614._K_TO_C\n \n def read_ambient_temp_f(self):\n \"\"\"\n returns: ambient temperature (case of sensor) in Fahrenheit\n \"\"\"\n return MLX90614._c_to_f(self.read_ambient_temp_c())\n \n def read_ambient_temp_c(self):\n \"\"\"\n returns: ambient temperature (case of sensor) in Celsius\n \"\"\"\n return MLX90614._word_to_c(self._bus.read_word_data(self._addr, MLX90614._AMBIENT_TEMP_REGISTER))\n \n def read_object1_temp_f(self):\n \"\"\"\n returns: temperature in first zone of sensor, in Fahrenheit\n \"\"\"\n return MLX90614._c_to_f(self.read_object1_temp_c())\n \n def read_object1_temp_c(self):\n \"\"\"\n returns: temperature in first zone of sensor, in Celsius\n \"\"\"\n return MLX90614._word_to_c(self._bus.read_word_data(self._addr, MLX90614._OBJECT_1_TEMP_REGISTER))\n \n def read_object2_temp_f(self):\n \"\"\"\n returns: temperature in second zone of sensor, in Fahrenheit - or 0K if this is a single-zone sensor.\n \"\"\"\n return MLX90614._c_to_f(self.read_object2_temp_c())\n \n def read_object2_temp_c(self):\n \"\"\"\n returns: temperature in second zone of sensor, in Celsius - or 0K if this is a single-zone sensor.\n \"\"\"\n return MLX90614._word_to_c(self._bus.read_word_data(self._addr, MLX90614._OBJECT_1_TEMP_REGISTER))\n\nif __name__ == \"__main__\":\n sensor = MLX90614()\n print(\"Object: %3.2f Ambient: %3.2f\"%(sensor.read_object1_temp_f(), sensor.read_ambient_temp_f()))", "id": "1380257", "language": "Python", "matching_score": 1.5560280084609985, "max_stars_count": 1, "path": "Pi/Python/mlx90614.py" }, { "content": "#!/usr/bin/env python\n\n# Be sure to enable SPI on your Pi before running.\n# See the following resources:\n# http://raspberrypi-aa.github.io/session3/spi.html\n# https://learn.sparkfun.com/tutorials/raspberry-pi-spi-and-i2c-tutorial\n\n# Requires py-spidev, please run the following:\n# sudo apt-get update sudo apt-get install python-dev\n# git clone git://github.com/doceme/py-spidev\n# cd py-spidev\n# sudo python setup.py install\nimport spidev\n\nclass Max31855:\n def __init__(self, bus=0, device=0):\n self._spi = spidev.SpiDev()\n self._spi.open(bus, device)\n self._spi.max_speed_hz = 5000\n self._spi.mode = 0b01\n self._have_reading = False\n\n def take_reading(self):\n \"\"\"\n Reads the sensor and stores its measurements internal to this object.\n After calling, use accessors to read data.\n \"\"\"\n data = self._spi.xfer2([0, 0, 0, 0])\n self._parse_data(data)\n self._have_reading = True;\n\n def thermocouple_temp_c(self):\n \"\"\" Returns the temperature measured at the end of the thermocouple \"\"\"\n if(not self._have_reading):\n raise Exception(\"Need to take a reading first\")\n else:\n return self._thermocouple_temp_c\n def thermocouple_temp_f(self):\n \"\"\" Returns the temperature measured at the end of the thermocouple \"\"\"\n return self._c_to_f(self.thermocouple_temp_c())\n\n def internal_temp_c(self):\n \"\"\" Returns the temperature of the Max31855 die \"\"\"\n if(not self._have_reading):\n raise Exception(\"Need to take a reading first\")\n else:\n return self._internal_temp_c\n def internal_temp_f(self):\n \"\"\" Returns the temperature of the Max31855 die \"\"\"\n return self._c_to_f(self.internal_temp_c())\n\n def is_faulted(self):\n \"\"\"\n Returns true if the Max31855 reports a problem with\n the thermocouple connection\n \"\"\"\n if(not self._have_reading):\n raise Exception(\"Need to take a reading first\")\n else:\n return self._fault\n\n def fault_reason(self):\n \"\"\"\n Return a human-readable string describing the\n fault reported by the Max31855\n \"\"\"\n if(not self._have_reading):\n raise Exception(\"Need to take a reading first\")\n else:\n if not self._fault:\n return \"No fault\"\n elif self._short_to_vcc:\n return \"Short to Vcc\"\n elif self._short_to_gnd:\n return \"Short to Gnd\"\n elif self._open_circuit:\n return \"Open circuit\"\n else:\n return \"Unknown\"\n\n @classmethod\n def _c_to_f(cls, c_temp):\n return c_temp * 9.0/5.0 + 32.0\n\n def _parse_data(self, arr):\n \"\"\"\n Takes a 4B string from the Max31855 and\n breaks out its constituents.\n Argument must be an array of 4 byte values.\n \"\"\"\n # See datasheet for explanation\n first_half = (arr[0]<<8) + arr[1]\n second_half = (arr[2]<<8) + arr[3]\n self._thermocouple_temp_c = ((first_half >> 2) & 0x1fff) * 0.25\n if(((first_half >> 2) & 0x2000) > 0):\n self._thermocouple_temp_c *= -1\n self._internal_temp_c = ((second_half >> 4) & 0x7ff) * 0.0625\n if(((second_half >> 4) & 0x800) > 0):\n self._internal_temp_c *= -1\n self._fault = ((first_half & 0x0001) > 0)\n self._short_to_vcc = ((second_half & 0x0004) > 0)\n self._short_to_gnd = ((second_half & 0x0002) > 0)\n self._open_circuit = ((second_half & 0x0001) > 0)\n", "id": "5765846", "language": "Python", "matching_score": 3.436204671859741, "max_stars_count": 0, "path": "sensors/max31855.py" }, { "content": "#!/usr/bin/python\nimport max31855\nimport time\nm = max31855.Max31855()\n\nwhile True:\n\tm.take_reading()\n\tprint(\"Fault? %s Therm: %f Chip: %f Fault: %s\"%(str(m.is_faulted()), m.thermocouple_temp_f(), m.internal_temp_f(), m.fault_reason()))\n\ttime.sleep(0.1)\n", "id": "5437914", "language": "Python", "matching_score": 1.7865667343139648, "max_stars_count": 1, "path": "Pi/Python/tester.py" } ]
2.393516
bertrandvidal
[ { "content": "import string\n\n\nknown_digits = string.digits + string.ascii_uppercase\n\n\ndef to_decimal(number, base):\n return sum(known_digits.index(digit) * base ** idx\n for idx, digit in enumerate(number[::-1]))\n\n\nassert to_decimal(\"10\", 10) == 10\nassert to_decimal(\"10\", 2) == 2\nassert to_decimal(\"FF\", 16) == 255\nassert to_decimal(\"A1\", 16) == 161\n\n\ndef from_decimal(number, base):\n result = []\n while number:\n result.append(known_digits[number % base])\n number = number / base\n return \"\".join(result[::-1])\n\n\nassert from_decimal(10, 16) == \"A\"\nassert from_decimal(15, 16) == \"F\"\nassert from_decimal(19, 16) == \"13\"\nassert from_decimal(10, 2) == \"1010\"\n\n\ndef convert(number, base, new_base):\n return from_decimal(to_decimal(number, base), new_base)\n\n\nassert convert(\"1010\", 2, 16) == \"A\"\nassert convert(\"1010\", 10, 16) == \"3F2\"\nassert convert(\"1010\", 2, 10) == \"10\"\nassert convert(\"1010\", 16, 16) == \"1010\"\nassert convert(\"1010\", 16, 2) == \"1000000010000\"\nassert convert(\"1010\", 16, 10) == \"4112\"\n", "id": "5075523", "language": "Python", "matching_score": 1.4307904243469238, "max_stars_count": 0, "path": "base_to_base/convert.py" }, { "content": "d = {\"I\":1, \"V\": 5, \"X\": 10, \"L\": 50, \"C\": 100, \"D\": 500, \"M\": 1000}\n\ndef compute(left, right):\n if left < right:\n return right - left\n else:\n return left + right\n\ndef convert(number):\n return map(lambda x: d[x], number)\n\ndef to_decimal(number):\n components = convert(number)\n total = 0\n while components:\n if len(components) == 1:\n total += components.pop()\n else:\n first, second = components[0:2:1]\n if first < second:\n total += second - first\n components = components[2:]\n else:\n total += first\n components = components[1:]\n return total\n\n\ndef rec_to_decimal(components, total=0):\n if not components:\n return total\n if len(components) == 1:\n return total + components.pop()\n else:\n first, second = components[0:2:1]\n if first < second:\n return rec_to_decimal(components[2:], total + (second - first))\n else:\n return rec_to_decimal(components[1:], total + first)\n\nassert to_decimal(\"XLVIII\") == 48\nassert to_decimal(\"CCVII\") == 207\nassert to_decimal(\"MLXVI\") == 1066\nassert to_decimal(\"MCMLIV\") == 1954\nassert to_decimal(\"MCMXC\") == 1990\nassert to_decimal(\"MMXIV\") == 2014\n\nassert rec_to_decimal(convert(\"XLVIII\")) == 48\nassert rec_to_decimal(convert(\"CCVII\")) == 207\nassert rec_to_decimal(convert(\"MLXVI\")) == 1066\nassert rec_to_decimal(convert(\"MCMLIV\")) == 1954\nassert rec_to_decimal(convert(\"MCMXC\")) == 1990\nassert rec_to_decimal(convert(\"MMXIV\")) == 2014\n\n", "id": "6940059", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "roman/roman.py" }, { "content": "import unittest\n\nfrom parse_this.exception import ParseThisException\nfrom parse_this.types import _check_types\n\n\nclass TestTypes(unittest.TestCase):\n def test_check_types_not_enough_types_provided(self):\n self.assertRaises(\n ParseThisException, _check_types, \"function\", {}, [\"i_dont_have_a_type\"], ()\n )\n\n def test_check_types_too_many_types_provided(self):\n self.assertRaises(\n ParseThisException,\n _check_types,\n \"function\",\n {\"a\": int, \"b\": int},\n [\"i_am_alone\"],\n (),\n )\n\n def test_check_types_with_default(self):\n func_args = [\"i_am_alone\", \"i_have_a_default_value\"]\n self.assertEqual(\n _check_types(\n \"function\",\n {\"i_am_an_int\": int, \"i_have_a_default_value\": str},\n func_args,\n (\"default_value\",),\n ),\n func_args,\n )\n\n def test_check_types_with_default_type_not_specified(self):\n func_args = [\"i_am_an_int\", \"i_have_a_default_value\"]\n self.assertEqual(\n _check_types(\n \"function\", {\"i_am_an_int\": int}, func_args, (\"default_value\",)\n ),\n func_args,\n )\n\n def test_check_types_remove_self(self):\n func_args = [\"i_am_an_int\", \"i_have_a_default_value\"]\n self.assertEqual(\n _check_types(\n \"function\",\n {\"i_am_an_int\": int},\n [\"self\"] + func_args,\n (\"default_value\",),\n ),\n func_args,\n )\n\n def test_check_types_remove_class(self):\n func_args = [\"i_am_an_int\", \"i_have_a_default_value\"]\n self.assertEqual(\n _check_types(\n \"function\",\n {\"i_am_an_int\": int},\n [\"cls\"] + func_args,\n (\"default_value\",),\n ),\n func_args,\n )\n\n def test_check_types_no_args(self):\n self.assertEqual(_check_types(\"function\", {}, [], ()), [])\n\n def test_check_types_return_annotation(self):\n self.assertEqual(_check_types(\"function\", {\"return\": int}, [], ()), [])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "5272585", "language": "Python", "matching_score": 2.9252586364746094, "max_stars_count": 1, "path": "test/types_test.py" }, { "content": "from typing import Callable, Dict, List, Tuple\n\nfrom parse_this.exception import ParseThisException\n\n\ndef _check_types(\n func_name: str,\n annotations: Dict[str, Callable],\n func_args: List[str],\n defaults: Tuple,\n):\n \"\"\"Make sure that enough types were given to ensure conversion. Also remove\n potential 'self'/'cls' from the function arguments.\n\n Args:\n func_name: name of the decorated function\n annotations: annotations extract from a function signature\n func_args: list of function arguments name\n defaults: tuple of default values for the function argument\n\n Raises:\n ParseThisException: we cannot infer the type of all of the arguments using\n the annotations and the default values\n \"\"\"\n defaults = defaults or ()\n types_annotations = dict(annotations)\n\n if \"return\" in types_annotations:\n del types_annotations[\"return\"]\n\n if func_args and func_args[0] in (\"self\", \"cls\"):\n func_args = func_args[1:]\n\n if len(types_annotations) > len(func_args):\n raise ParseThisException(\n \"Too many types provided for conversion for '{}'.\".format(func_name)\n )\n if len(types_annotations) < len(func_args) - len(defaults):\n raise ParseThisException(\n \"Not enough types provided for conversion for '{}'\".format(func_name)\n )\n return func_args\n", "id": "6834439", "language": "Python", "matching_score": 2.152944326400757, "max_stars_count": 1, "path": "parse_this/types.py" }, { "content": "import logging\nimport typing\nfrom argparse import ArgumentParser\nfrom functools import wraps\nfrom inspect import getfullargspec\nfrom typing import Callable, Dict, Optional, Type\n\nfrom parse_this.args import _get_args_and_defaults, _get_args_to_parse\nfrom parse_this.call import _call, _call_method_from_namespace, _get_parser_call_method\nfrom parse_this.exception import ParseThisException\nfrom parse_this.help.action import FullHelpAction\nfrom parse_this.parsing import (\n _add_log_level_argument,\n _get_arg_parser,\n _get_parseable_methods,\n)\nfrom parse_this.types import _check_types\n\n_LOG = logging.getLogger(__name__)\n\n\nclass FunctionParser(object):\n \"\"\"Parse command line arguments, transform them to the appropriate type and\n delegate the call to a given callable.\n \"\"\"\n\n def __call__(\n self,\n func: Callable,\n args: typing.List[str] = None,\n delimiter_chars: str = \":\",\n log_level: bool = False,\n ):\n \"\"\"Create an ArgParser for the given function converting the command line\n arguments and passing them to the function, return the result of the\n function call.\n\n Args:\n func: the function for which the command line arguments to be parsed\n args: a list of arguments to be parsed if None sys.argv is used\n delimiter_chars: characters used to separate the parameters from their\n help message in the docstring. Defaults to ':'\n log_level: indicate whether or not a '--log-level' argument should be\n handled to set the log level during the execution\n \"\"\"\n _LOG.debug(\"Creating parser for %s\", func.__name__)\n (func_args, _, _, defaults, _, _, annotations) = getfullargspec(func)\n func_args = _check_types(func.__name__, annotations, func_args, defaults)\n args_and_defaults = _get_args_and_defaults(func_args, defaults)\n parser = _get_arg_parser(\n func, annotations, args_and_defaults, delimiter_chars, log_level\n )\n self._set_function_parser(func, parser)\n arguments = parser.parse_args(_get_args_to_parse(args))\n return _call(func, func_args, arguments)\n\n @typing.no_type_check\n def _set_function_parser(self, func: Callable, parser: ArgumentParser):\n func.parser = parser\n\n\nclass MethodParser(object):\n \"\"\"Creates an argument parser for the decorated function.\n\n Note:\n The method '__init__' can not be decorated if the class is not\n decorated with 'parse_class'\n \"\"\"\n\n _name: Optional[str]\n _delimiter_chars: str\n _log_level: bool\n\n def __init__(\n self, delimiter_chars: str = \":\", name: str = None, log_level: bool = False\n ):\n \"\"\"\n Args:\n delimiter_chars: characters used to separate the parameters from their\n help message in the docstring.\n name: name that will be used for the parser when used in a class\n decorated with `parse_class`. If not provided the name of the method will\n be used\n log_level: indicate whether or not a '--log-level' argument should be\n handled to set the log level during the execution\n \"\"\"\n self._delimiter_chars = delimiter_chars\n self._name = name\n self._log_level = log_level\n\n def __call__(self, func: Callable):\n \"\"\"Add an argument parser attribute `parser` to the decorated function.\n\n Args:\n func: the function for which we want to create an argument parser\n \"\"\"\n if not hasattr(func, \"parser\"):\n _LOG.debug(\n \"Creating parser for '%s'%s\",\n func.__name__,\n \"/%s\" % self._name if self._name else \"\",\n )\n (func_args, _, _, defaults, _, _, annotations) = getfullargspec(func)\n func_args = _check_types(func.__name__, annotations, func_args, defaults)\n args_and_defaults = _get_args_and_defaults(func_args, defaults)\n parser = _get_arg_parser(\n func,\n annotations,\n args_and_defaults,\n self._delimiter_chars,\n self._log_level,\n )\n parser.get_name = lambda: self._name or func.__name__\n self._set_method_parser(func, parser)\n\n @wraps(func)\n def decorated(*args, **kwargs):\n return func(*args, **kwargs)\n\n return decorated\n\n @typing.no_type_check\n def _set_method_parser(self, func: Callable, parser: ArgumentParser):\n func.parser = parser\n func.parser.call = _get_parser_call_method(func)\n\n\nclass ClassParser(object):\n \"\"\"Allows to create a global argument parser for a class along with\n subparsers with each if its properly decorated methods.\"\"\"\n\n _parse_private: bool\n _description: Optional[str]\n _cls: Type = None\n _log_level: bool\n\n def __init__(\n self,\n description: str = None,\n parse_private: bool = False,\n log_level: bool = False,\n ):\n \"\"\"\n\n Args:\n description: give a specific description for the top level parser,\n if not specified it will be the class docstring.\n parse_private: specifies whether or not 'private' methods should be\n parsed, defaults to False\n log_level: indicate whether or not a '--log-level' argument should be\n handled to set the log level during the execution\n \"\"\"\n self._description = description\n self._parse_private = parse_private\n self._log_level = log_level\n\n def __call__(self, cls: Type):\n \"\"\"\n Args:\n cls: class to be decorated\n \"\"\"\n _LOG.debug(\"Creating parser for class '%s'\", cls.__name__)\n self._cls = cls\n init_parser, methods_to_parse = _get_parseable_methods(cls)\n self._set_class_parser(init_parser, methods_to_parse, cls)\n return cls\n\n def _add_sub_parsers(\n self,\n top_level_parser,\n methods_to_parse: Dict[str, ArgumentParser],\n class_name: str,\n ):\n \"\"\"Add all the sub-parsers to the top_level_parser.\n\n Args:\n top_level_parser: the top level parser\n methods_to_parse: dict of method name pointing to their associated\n argument parser\n class_name: name of the decorated class\n\n Returns:\n a dict of registered name of the parser i.e. sub command name\n pointing to the method real name\n \"\"\"\n description = \"Accessible methods of {}\".format(class_name)\n sub_parsers = top_level_parser.add_subparsers(\n description=description, dest=\"method\"\n )\n # Holds the mapping between the name registered for the parser\n # and the method real name. It is useful in the 'inner_call'\n # method retrieve the real method\n parser_to_method = {}\n for method_name, parser in methods_to_parse.items():\n # We use the name provided in 'create_parser` or the name of the\n # decorated method\n parser_name = parser.get_name() # type: ignore[attr-defined]\n # Make the method name compatible for the argument parsing\n if parser_name.startswith(\"_\"):\n if not self._parse_private:\n # We skip private methods if the caller asked not to\n # parse them\n continue\n # 'Private' methods are exposed without their leading or\n # trailing '_'s. Also works for 'special' methods.\n parser_name = parser_name.strip(\"_\")\n parser_name = parser_name.replace(\"_\", \"-\")\n parser_to_method[parser_name] = method_name\n sub_parsers.add_parser(\n parser_name,\n parents=[parser],\n add_help=False,\n description=parser.description,\n )\n return parser_to_method\n\n def _set_class_parser(\n self,\n init_parser: ArgumentParser,\n methods_to_parse: Dict[str, ArgumentParser],\n cls: Type,\n ):\n \"\"\"Creates the complete argument parser for the decorated class.\n\n Args:\n init_parser: argument parser for the __init__ method or None\n methods_to_parse: dict of method name pointing to their associated\n argument parser\n cls: the class we are decorating\n\n Returns:\n The decorated class with an added attribute 'parser'\n \"\"\"\n top_level_parents = [init_parser] if init_parser else []\n description = self._description or cls.__doc__\n top_level_parser = ArgumentParser(\n description=description,\n parents=top_level_parents,\n add_help=False,\n conflict_handler=\"resolve\",\n )\n top_level_parser.add_argument(\n \"-h\", \"--help\", action=FullHelpAction, help=\"Display this help message\"\n )\n if self._log_level:\n _add_log_level_argument(top_level_parser)\n parser_to_method = self._add_sub_parsers(\n top_level_parser, methods_to_parse, cls.__name__\n )\n # Update the dict with the __init__ method so we can instantiate\n # the decorated class\n if init_parser:\n parser_to_method[\"__init__\"] = \"__init__\"\n self._set_parser_call_method(parser_to_method, top_level_parser)\n cls.parser = top_level_parser\n\n @typing.no_type_check\n def _set_parser_call_method(\n self, parser_to_method: Dict[str, str], top_level_parser: ArgumentParser\n ):\n top_level_parser.call = self._get_parser_call_method(parser_to_method)\n\n def _get_parser_call_method(self, parser_to_method: Dict[str, Callable]):\n \"\"\"Return the parser special method 'call' that handles sub-command\n calling.\n\n Args:\n parser_to_method: mapping of the parser registered name\n to the method it is linked to\n \"\"\"\n\n def inner_call(args=None, instance=None):\n \"\"\"Allows to call the method invoked from the command line or\n provided argument.\n\n Args:\n args: list of arguments to parse, defaults to command line arguments\n instance: an instance of the decorated class. If instance is None,\n the default, and __init__ is decorated the object will be\n instantiated on the fly from the command line arguments\n \"\"\"\n parser = self._cls.parser\n namespace = parser.parse_args(_get_args_to_parse(args))\n if instance is None:\n # If the __init__ method is not part of the method to\n # decorate we cannot instantiate the class\n if \"__init__\" not in parser_to_method:\n raise ParseThisException(\n (\n \"'__init__' method is not decorated. \"\n \"Please provide an instance to \"\n \"'{}.parser.call' or decorate the \"\n \"'__init___' method with \"\n \"'create_parser'\".format(self._cls.__name__)\n )\n )\n # We instantiate the class from the command line arguments\n instance = _call_method_from_namespace(self._cls, \"__init__\", namespace)\n method_name = parser_to_method[namespace.method]\n return _call_method_from_namespace(instance, method_name, namespace)\n\n return inner_call\n", "id": "11866194", "language": "Python", "matching_score": 5.7724409103393555, "max_stars_count": 1, "path": "parse_this/parsers.py" }, { "content": "import logging\nfrom argparse import ArgumentParser, _HelpAction\nfrom typing import Any, Callable, Dict, List, Tuple, Type\n\nfrom parse_this.args import _NO_DEFAULT\nfrom parse_this.exception import ParseThisException\nfrom parse_this.help.description import prepare_doc\n\n_LOG = logging.getLogger(__name__)\n\n\ndef _get_parseable_methods(cls: Type):\n \"\"\"Return all methods of cls that are parseable i.e. have been decorated\n by '@create_parser'.\n\n Args:\n cls: the class currently being decorated\n\n Note:\n classmethods will not be included as they can only be referenced once\n the class has been defined\n Returns:\n a 2-tuple with the parser of the __init__ method if any and a dict\n of the form {'method_name': associated_parser}\n \"\"\"\n _LOG.debug(\"Retrieving parseable methods for '%s'\", cls.__name__)\n init_parser = None\n methods_to_parse = {}\n for name, obj in vars(cls).items():\n # Every callable object that has a 'parser' attribute will be\n # added as a subparser.\n # This won't work for classmethods because reference to\n # classmethods are only possible once the class has been defined\n if callable(obj) and hasattr(obj, \"parser\"):\n _LOG.debug(\"Found method '%s'\", name)\n if name == \"__init__\":\n # If we find the decorated __init__ method it will be\n # used as the top level parser\n init_parser = obj.parser\n else:\n methods_to_parse[obj.__name__] = obj.parser\n return init_parser, methods_to_parse\n\n\ndef _add_log_level_argument(parser: ArgumentParser):\n parser.add_argument(\n \"--log-level\", required=False, choices=list(logging._nameToLevel.keys())\n )\n\n\ndef _get_arg_parser(\n func: Callable,\n annotations: Dict[str, Callable],\n args_and_defaults: List[Tuple[str, Any]],\n delimiter_chars: str,\n log_level: bool = False,\n):\n \"\"\"Return an ArgumentParser for the given function. Arguments are defined\n from the function arguments and their associated defaults.\n\n Args:\n func: function for which we want an ArgumentParser\n annotations: is a dictionary mapping parameter names to annotations\n args_and_defaults: list of 2-tuples (arg_name, arg_default)\n delimiter_chars: characters used to separate the parameters from their\n help message in the docstring\n log_level: indicate whether or not a '--log-level' argument should be\n handled to set the log level during the execution\n \"\"\"\n _LOG.debug(\"Creating ArgumentParser for '%s'\", func.__name__)\n (description, arg_help) = prepare_doc(\n func, [x for (x, _) in args_and_defaults], delimiter_chars\n )\n parser = ArgumentParser(description=description)\n if log_level:\n _add_log_level_argument(parser)\n for (arg, default) in args_and_defaults:\n help_msg = arg_help[arg]\n arg_type = annotations.get(arg)\n if default is _NO_DEFAULT:\n arg_type = arg_type or (lambda x: x)\n if arg_type == bool:\n _LOG.debug(\n \"Adding optional flag %s.%s (default: True)\", func.__name__, arg\n )\n parser.add_argument(\n \"--%s\" % arg,\n default=True,\n required=False,\n action=\"store_false\",\n help=\"%s. Defaults to True if not specified\" % help_msg,\n )\n else:\n _LOG.debug(\n \"Adding positional argument %s.%s: %s\", func.__name__, arg, arg_type\n )\n parser.add_argument(arg, help=help_msg, type=arg_type)\n else:\n if default is None and arg_type is None:\n raise ParseThisException(\n \"To use default value of 'None' you need \"\n \"to specify the type of the argument '{}' \"\n \"for the method '{}'\".format(arg, func.__name__)\n )\n arg_type = arg_type or type(default)\n if arg_type == bool:\n action = \"store_false\" if default else \"store_true\"\n _LOG.debug(\n \"Adding optional flag %s.%s (default: %s)\",\n func.__name__,\n arg,\n default,\n )\n parser.add_argument(\n \"--%s\" % arg, help=help_msg, default=default, action=action\n )\n else:\n _LOG.debug(\n \"Adding optional argument %s.%s: %s (default: %s)\",\n func.__name__,\n arg,\n arg_type,\n default,\n )\n parser.add_argument(\n \"--%s\" % arg, help=help_msg, default=default, type=arg_type\n )\n return parser\n\n\ndef _get_args_name_from_parser(parser: ArgumentParser):\n \"\"\"Retrieve the name of the function argument linked to the given parser.\n\n Args:\n parser: a function parser\n \"\"\"\n # Retrieve the 'action' destination of the method parser i.e. its\n # argument name. The HelpAction is ignored.\n return [\n action.dest\n for action in parser._actions\n if not isinstance(action, _HelpAction) and action.dest != \"log_level\"\n ]\n", "id": "5707793", "language": "Python", "matching_score": 2.685972213745117, "max_stars_count": 1, "path": "parse_this/parsing.py" }, { "content": "from parse_this.exception import ParseThisException\nfrom parse_this.parsers import ClassParser, FunctionParser, MethodParser\n\n__all__ = [\n \"ParseThisException\",\n \"parse_this\",\n \"create_parser\",\n \"parse_class\",\n]\n\nparse_this = FunctionParser()\n\ncreate_parser = MethodParser\n\nparse_class = ClassParser\n", "id": "6693566", "language": "Python", "matching_score": 2.7110960483551025, "max_stars_count": 1, "path": "parse_this/__init__.py" }, { "content": "class ParseThisException(Exception):\n \"\"\"Error base class raised by this module.\"\"\"\n", "id": "9262677", "language": "Python", "matching_score": 0.2780153453350067, "max_stars_count": 1, "path": "parse_this/exception/__init__.py" }, { "content": "import unittest\nfrom collections import namedtuple\n\nfrom parse_this.call import (\n _call,\n _call_method_from_namespace,\n _get_parser_call_method,\n)\nfrom parse_this.exception import ParseThisException\nfrom test.helpers import Parseable, concatenate_string, parse_me_no_docstring\n\n\nclass TestCall(unittest.TestCase):\n def test_get_parser_call_method_returns_callable(self):\n call_method = _get_parser_call_method(concatenate_string)\n self.assertTrue(callable(call_method))\n\n def test_get_parser_call_method_raise_on_init(self):\n call_method = _get_parser_call_method(Parseable.__init__)\n self.assertRaises(ParseThisException, call_method, None)\n\n def test_get_parser_call_method_execution(self):\n call_method = _get_parser_call_method(Parseable.parseable)\n self.assertEqual(call_method(Parseable(12), [\"2\"]), 24)\n\n def test_get_parser_call_method_on_function(self):\n call_method = _get_parser_call_method(concatenate_string)\n self.assertEqual(call_method(args=\"yes 2\".split()), \"yesyes\")\n\n def test_call_on_parse_me_no_docstring(self):\n Namespace = namedtuple(\"Namespace\", [\"one\", \"two\", \"three\"])\n fake_namespace = Namespace(**{\"one\": 2, \"two\": 12, \"three\": 3})\n self.assertEqual(\n _call(parse_me_no_docstring, [\"one\", \"two\", \"three\"], fake_namespace),\n (24, 9),\n )\n\n def test_call_method_from_namespace_create_instance(self):\n Namespace = namedtuple(\"Namespace\", [\"a\"])\n fake_namespace = Namespace(a=2)\n parseable = _call_method_from_namespace(Parseable, \"__init__\", fake_namespace)\n self.assertIsInstance(parseable, Parseable)\n self.assertEqual(parseable._a, 2)\n\n def test_call_method_from_namespace_execution(self):\n Namespace = namedtuple(\"Namespace\", [\"d\"])\n fake_namespace = Namespace(d=2)\n self.assertEqual(\n _call_method_from_namespace(Parseable(12), \"parseable\", fake_namespace), 24\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "8942448", "language": "Python", "matching_score": 3.505669116973877, "max_stars_count": 1, "path": "test/call_test.py" }, { "content": "import unittest\n\nfrom parse_this import create_parser\nfrom parse_this.args import _NO_DEFAULT\nfrom parse_this.exception import ParseThisException\nfrom parse_this.parsing import _get_arg_parser, _get_parseable_methods\nfrom test.helpers import (\n Parseable,\n has_bool_arguments,\n has_flags,\n has_none_default_value,\n parse_me_full_docstring,\n)\nfrom test.utils import captured_output\n\n\nclass TestParsing(unittest.TestCase):\n def test_get_arg_parser_bool_argument(self):\n self.assertEqual(has_bool_arguments.parser.call(args=[]), True)\n self.assertEqual(has_bool_arguments.parser.call(args=[\"--a\"]), False)\n\n def test_get_arg_parser_bool_default_value(self):\n self.assertEqual(has_flags.parser.call(args=[\"12\"]), (12, False))\n self.assertEqual(has_flags.parser.call(args=[\"12\", \"--b\"]), (12, True))\n\n def test_get_arg_parser_with_none_default_value(self):\n self.assertEqual(has_none_default_value.parser.call(args=[\"12\"]), (12, None))\n self.assertEqual(\n has_none_default_value.parser.call(args=[\"12\", \"--b\", \"yes\"]), (12, \"yes\")\n )\n\n def test_get_arg_parser_none_default_value_without_type(self):\n with self.assertRaises(ParseThisException):\n\n @create_parser(int)\n def have_none_default_value(a: int, b=None):\n pass\n\n def test_get_parseable_methods(self):\n (init_parser, method_to_parser) = _get_parseable_methods(Parseable)\n self.assertIsNotNone(init_parser)\n self.assertListEqual(\n sorted(list(method_to_parser.keys())), [\"_private_method\", \"parseable\"]\n )\n\n def test_get_parseable_methods_do_not_include_classmethod(self):\n (_, method_to_parser) = _get_parseable_methods(Parseable)\n self.assertNotIn(\"cls_method\", method_to_parser.keys())\n\n def test_get_arg_parser_annotation_take_precedence(self):\n parser = _get_arg_parser(\n parse_me_full_docstring,\n {\"one\": int, \"two\": int, \"three\": int},\n [(\"one\", _NO_DEFAULT), (\"two\", _NO_DEFAULT), (\"three\", _NO_DEFAULT)],\n \":\",\n )\n namespace = parser.parse_args(\"1 2 3\".split())\n self.assertEqual(namespace.one, 1)\n self.assertEqual(namespace.two, 2)\n self.assertEqual(namespace.three, 3)\n\n def test_get_arg_parser_with_default_value(self):\n parser = _get_arg_parser(\n parse_me_full_docstring,\n {\"one\": str, \"two\": int, \"three\": int},\n [(\"one\", _NO_DEFAULT), (\"two\", _NO_DEFAULT), (\"three\", 12)],\n \":\",\n )\n namespace = parser.parse_args(\"yes 42\".split())\n self.assertEqual(namespace.one, \"yes\")\n self.assertEqual(namespace.two, 42)\n self.assertEqual(namespace.three, 12)\n\n def test_get_arg_parser_without_default_value(self):\n parser = _get_arg_parser(\n parse_me_full_docstring,\n {\"one\": str, \"two\": int, \"three\": int},\n [(\"one\", _NO_DEFAULT), (\"two\", _NO_DEFAULT), (\"three\", 12)],\n \":\",\n )\n namespace = parser.parse_args(\"no 12 --three=23\".split())\n self.assertEqual(namespace.one, \"no\")\n self.assertEqual(namespace.two, 12)\n self.assertEqual(namespace.three, 23)\n\n def test_get_arg_parser_required_arguments(self):\n parser = _get_arg_parser(\n parse_me_full_docstring,\n {\"one\": str, \"two\": int, \"three\": int},\n [(\"one\", _NO_DEFAULT), (\"two\", _NO_DEFAULT), (\"three\", 12)],\n \":\",\n )\n with captured_output():\n self.assertRaises(\n SystemExit, parser.parse_args, \"we_are_missing_two\".split()\n )\n\n def test_get_arg_parser_argument_type(self):\n parser = _get_arg_parser(\n parse_me_full_docstring,\n {\"one\": str, \"two\": int, \"three\": int},\n [(\"one\", _NO_DEFAULT), (\"two\", _NO_DEFAULT), (\"three\", 12)],\n \":\",\n )\n with captured_output():\n self.assertRaises(\n SystemExit, parser.parse_args, \"yes i_should_be_an_int\".split()\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "5739821", "language": "Python", "matching_score": 2.997004508972168, "max_stars_count": 1, "path": "test/parsing_test.py" }, { "content": "from parse_this import create_parser, parse_class\n\n\ndef no_docstring():\n pass\n\n\ndef with_args(a, b):\n pass\n\n\ndef blank_line_in_wrong_place(one: int, two: int):\n \"\"\"I put the blank line after arguments ...\n\n Args:\n one: this help is #1\n\n two: this once won't appear sadly\n \"\"\"\n return one * two\n\n\ndef parse_me_full_docstring(one: str, two: int, three: int = 12):\n \"\"\"Could use some parsing.\n\n Args:\n one: some stuff shouldn't be written down\n two: I can turn 2 syllables words into 6 syllables words\n three: I don't like the number three\n\n Returns:\n the first string argument concatenated with itself 'two' times and the\n last parameters multiplied by itself\n \"\"\"\n return one * two, three * three\n\n\ndef parse_me_no_docstring(one: int, two: int, three: int):\n return one * two, three * three\n\n\ndef multiline_docstring(one: int, two: int, three: int):\n \"\"\"I am a sneaky function.\n\n Args:\n one: this one is a no brainer\n three: noticed you're missing docstring for two and\n I'm multiline too!\n\n Returns:\n the first string argument concatenated with itself 'two' times and the\n last parameters multiplied by itself\n \"\"\"\n return one * two, three * three\n\n\ndef different_delimiter_chars(one: int, two: int, three: int):\n \"\"\"I am a sneaky function.\n\n Args:\n one -- this one is a no brainer even with dashes\n three -- noticed you're missing docstring for two and\n I'm multiline too!\n\n Returns:\n the first string argument concatenated with itself 'two' times and the\n last parameters multiplied by itself\n \"\"\"\n return one * two, three * three\n\n\ndef parse_me(one: str, two: int, three: int = 12):\n \"\"\"Could use some parsing.\n\n Args:\n one: some stuff shouldn't be written down\n two: I can turn 2 syllables words into 6 syllables words\n three: I don't like the number three\n\n Returns:\n the first string argument concatenated with itself 'two' times and the\n last parameters multiplied by itself\n \"\"\"\n return one * two, three * three\n\n\n@create_parser()\ndef concatenate_string(string: str, nb_concat: int):\n return string * nb_concat\n\n\n@create_parser()\ndef has_none_default_value(a: int, b: str = None):\n return a, b\n\n\n@create_parser()\ndef has_flags(a: int, b: bool = False):\n return a, b\n\n\n@create_parser()\ndef has_bool_arguments(a: bool):\n return a\n\n\n@parse_class()\nclass Parseable(object):\n @create_parser()\n def __init__(self, a: int):\n self._a = a\n\n @create_parser()\n def _private_method(self, b: int):\n return self._a * b\n\n def not_parseable(self, c: int):\n return self._a * c\n\n @create_parser()\n def parseable(self, d: int):\n return self._a * d\n\n @classmethod\n @create_parser()\n def cls_method(cls, e: int):\n return e * e\n\n\n@parse_class(parse_private=True)\nclass ParseableWithPrivateMethod(object):\n @create_parser()\n def __init__(self, a: int):\n self._a = a\n\n @create_parser()\n def _private_method(self, b: int):\n return self._a * b\n\n def not_parseable(self, c: int):\n return self._a * c\n\n @create_parser()\n def parseable(self, d: int):\n return self._a * d\n\n @classmethod\n @create_parser()\n def cls_method(cls, e: int):\n return e * e\n\n\n@create_parser()\ndef i_am_parseable(one: str, two: int, three: int = 12):\n \"\"\"I too want to be parseable.\n\n Args:\n one: the one and only\n two: for the money\n three: don't like the number three\n \"\"\"\n return one * two, three * three\n\n\nclass Dummy(object):\n def __init__(self, a):\n self._a = a\n\n @create_parser(delimiter_chars=\"--\")\n def multiply_all(self, b: int, c: int = 2):\n \"\"\"Will multiply everything!\n\n Args:\n b -- the Queen B\n c -- a useless value\n\n Returns:\n Everything multiplied\n \"\"\"\n return self._a * b * c\n\n @classmethod\n @create_parser()\n def mult(cls, d: int, e: int = 2):\n return d * e\n\n\nclass NeedParseClassDecorator(object):\n @create_parser()\n def __init__(self, a: int):\n self._a = a\n\n\n@parse_class(description=\"Hello World\", parse_private=True)\nclass NeedParsing(object):\n \"\"\"This will be used as the parser description.\"\"\"\n\n @create_parser()\n def __init__(self, four: int):\n \"\"\"\n Args:\n four: an int that will be used to multiply stuff\n \"\"\"\n self._four = four\n\n @create_parser()\n def multiply_self_arg(self, num: int):\n return self._four * num\n\n @create_parser()\n def _private_method(self, num: int):\n return self._four * num\n\n @create_parser()\n def __str__(self):\n return str(self._four)\n\n @create_parser()\n def could_you_parse_me(self, one: str, two: int, three: int = 12):\n \"\"\"I would like some arg parsing please.\n\n Args:\n one: and only one\n two: will never be first\n three: I don't like the number three\n \"\"\"\n return one * two, three * three\n\n @classmethod\n @create_parser()\n def parse_me_if_you_can(cls, one: str, two: int, three: int = 12):\n return one * two, three * three\n\n @create_parser(name=\"new-name\")\n def rename_me_please(self, one: str, two: int):\n return one * two\n\n\n@parse_class()\nclass ShowMyDocstring(object):\n \"\"\"This should be the parser description\"\"\"\n\n @create_parser()\n def _will_not_appear(self, num: int):\n return num * num\n\n @create_parser()\n def __str__(self):\n return self.__class__.__name__\n\n\n@parse_class()\nclass NeedInitDecorator(object):\n def __init__(self, val: int):\n self._val = val\n\n @create_parser()\n def do_stuff(self, num: int, div: int = 2):\n return self._val * num / div\n", "id": "3946901", "language": "Python", "matching_score": 4.449583053588867, "max_stars_count": 1, "path": "test/helpers.py" }, { "content": "import unittest\n\nfrom parse_this.exception import ParseThisException\nfrom parse_this.parsers import FunctionParser\nfrom test.helpers import (\n Dummy,\n NeedInitDecorator,\n NeedParseClassDecorator,\n NeedParsing,\n ShowMyDocstring,\n different_delimiter_chars,\n i_am_parseable,\n parse_me_full_docstring,\n)\nfrom test.utils import captured_output\n\n\nclass TestFunctionParser(unittest.TestCase):\n def test_function_return(self):\n parser = FunctionParser()\n actual = parser(parse_me_full_docstring, \"first 2 --three 3\".split())\n expected = parse_me_full_docstring(\"first\", 2, 3)\n self.assertEqual(actual, expected)\n\n def test_function_default(self):\n parser = FunctionParser()\n actual = parser(parse_me_full_docstring, \"first 2\".split())\n expected = parse_me_full_docstring(\"first\", 2)\n self.assertEqual(actual, expected)\n\n def test_function_delimiter_chars(self):\n parser = FunctionParser()\n with captured_output() as (out, _):\n with self.assertRaises(SystemExit):\n parser(\n different_delimiter_chars, \"--help\".split(), delimiter_chars=\"--\"\n )\n help_message = out.getvalue()\n self.assertIn(\"this one is a no brainer even with dashes\", help_message)\n self.assertIn(\n \"noticed you're missing docstring for two and I'm multiline \" \"too!\",\n help_message,\n )\n\n def test_function_description(self):\n parser = FunctionParser()\n parser(different_delimiter_chars, \"1 2 3\".split(), delimiter_chars=\"--\")\n self.assertEqual(\n \"I am a sneaky function.\", different_delimiter_chars.parser.description\n )\n\n\nclass TestMethodParser(unittest.TestCase):\n def test_create_parser_on_function(self):\n parser = i_am_parseable.parser\n self.assertEqual(parser.description, \"I too want to be parseable.\")\n self.assertEqual(parser.call(args=\"yes 2 --three 3\".split()), (\"yesyes\", 9))\n\n def test_create_parser_on_method(self):\n parser = Dummy.multiply_all.parser\n self.assertEqual(parser.description, \"Will multiply everything!\")\n self.assertEqual(parser.call(Dummy(12), [\"2\"]), 48)\n\n def test_create_parser_on_classmethod(self):\n parser = Dummy.mult.parser\n self.assertEqual(parser.call(Dummy, \"2 --e 2\".split()), 4)\n\n def test_create_parser_on_init(self):\n parser = NeedParseClassDecorator.__init__.parser\n self.assertRaises(ParseThisException, parser.call, None, [\"2\"])\n\n def test_create_parser_rename(self):\n need_parsing = NeedParsing(12)\n parser = need_parsing.could_you_parse_me.parser\n # at this stage the '_' aren't replaced yet\n self.assertEqual(parser.get_name(), \"could_you_parse_me\")\n\n def test_create_parser_default_name(self):\n need_parsing = NeedParsing(12)\n parser = need_parsing.rename_me_please.parser\n self.assertEqual(parser.get_name(), \"new-name\")\n\n\nclass TestClassParser(unittest.TestCase):\n def test_parse_class_description(self):\n self.assertEqual(NeedParsing.parser.description, \"Hello World\")\n self.assertEqual(\n ShowMyDocstring.parser.description, \"This should be the parser description\"\n )\n\n def test_parse_class_add_parser(self):\n self.assertTrue(hasattr(NeedParsing, \"parser\"))\n self.assertTrue(hasattr(NeedParsing(12), \"parser\"))\n self.assertTrue(hasattr(ShowMyDocstring, \"parser\"))\n\n def test_parse_class_subparsers(self):\n parser = NeedParsing.parser\n self.assertEqual(parser.call(\"12 multiply-self-arg 2\".split()), 24)\n self.assertEqual(\n parser.call(\"12 could-you-parse-me yes 2 --three 4\".split()), (\"yesyes\", 16)\n )\n\n def test_parse_class_expose_private_method(self):\n parser = NeedParsing.parser\n self.assertEqual(parser.call(\"12 private-method 2\".split()), 24)\n\n def test_parse_class_expose_special_method(self):\n parser = NeedParsing.parser\n self.assertEqual(parser.call(\"12 str\".split()), \"12\")\n\n def test_parse_class_do_not_expose_private_methods(self):\n with captured_output():\n with self.assertRaises(SystemExit):\n ShowMyDocstring.parser.parse_args(\"will-not-appear 12\".split())\n with self.assertRaises(SystemExit):\n ShowMyDocstring.parser.parse_args(\"str\".split())\n\n def test_parse_class_method_is_still_parseable(self):\n need_parsing = NeedParsing(12)\n parser = need_parsing.could_you_parse_me.parser\n self.assertEqual(\n parser.call(need_parsing, \"yes 2 --three 3\".split()), (\"yesyes\", 9)\n )\n\n def test_parse_class_init_need_decoration(self):\n with self.assertRaises(ParseThisException):\n NeedInitDecorator.parser.call(\"do-stuff 12\".split())\n\n def test_parse_class_need_init_decorator_with_instance(self):\n instance = NeedInitDecorator(2)\n self.assertEqual(\n NeedInitDecorator.parser.call(\"do-stuff 12\".split(), instance), 12\n )\n self.assertEqual(\n NeedInitDecorator.parser.call(\"do-stuff 12 --div 3\".split(), instance), 8\n )\n\n def test_parse_class_classmethod_are_not_sub_command(self):\n with captured_output():\n with self.assertRaises(SystemExit):\n NeedParsing.parser.call(\"12 parse-me-if-you-can one 2\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "5806455", "language": "Python", "matching_score": 3.9125988483428955, "max_stars_count": 1, "path": "test/parsers_test.py" }, { "content": "import unittest\n\nfrom parse_this.help.description import _get_default_help_message, prepare_doc\nfrom test.helpers import (\n Parseable,\n ParseableWithPrivateMethod,\n blank_line_in_wrong_place,\n different_delimiter_chars,\n multiline_docstring,\n no_docstring,\n parse_me_full_docstring,\n parse_me_no_docstring,\n with_args,\n)\nfrom test.utils import captured_output\n\n\nclass TestHelp(unittest.TestCase):\n def test_get_default_help_message_no_docstring(self):\n (description, _) = _get_default_help_message(no_docstring, [])\n self.assertIsNotNone(description)\n self.assertIn(no_docstring.__name__, description)\n\n def test_get_default_help_message_add_default_args_help(self):\n (_, args_help) = _get_default_help_message(with_args, [\"a\", \"b\"])\n self.assertListEqual(sorted(list(args_help.keys())), [\"a\", \"b\"])\n (_, args_help) = _get_default_help_message(\n with_args, [\"a\", \"b\"], None, {\"a\": \"I have an help message\"}\n )\n self.assertListEqual(sorted(list(args_help.keys())), [\"a\", \"b\"])\n\n def test_prepare_doc_blank_line_in_wrong_place(self):\n (description, help_msg) = prepare_doc(\n blank_line_in_wrong_place, [\"one\", \"two\"], \":\"\n )\n self.assertEqual(description, \"I put the blank line after arguments ...\")\n self.assertEqual(\n help_msg, {\"one\": \"this help is #1\", \"two\": \"Help message for two\"}\n )\n\n def test_prepare_doc_full_docstring(self):\n (description, help_msg) = prepare_doc(\n parse_me_full_docstring, [\"one\", \"two\", \"three\"], \":\"\n )\n self.assertEqual(description, \"Could use some parsing.\")\n self.assertEqual(\n help_msg,\n {\n \"one\": \"some stuff shouldn't be written down\",\n \"two\": \"I can turn 2 syllables words into 6 syllables words\",\n \"three\": \"I don't like the number three\",\n },\n )\n\n def test_prepare_doc_no_docstring(self):\n (description, help_msg) = prepare_doc(\n parse_me_no_docstring, [\"one\", \"two\", \"three\"], \":\"\n )\n self.assertEqual(description, \"Argument parsing for parse_me_no_docstring\")\n self.assertEqual(\n help_msg,\n {\n \"one\": \"Help message for one\",\n \"two\": \"Help message for two\",\n \"three\": \"Help message for three\",\n },\n )\n\n def test_prepare_doc_will_you_dare(self):\n (description, help_msg) = prepare_doc(\n multiline_docstring, [\"one\", \"two\", \"three\"], \":\"\n )\n self.assertEqual(description, \"I am a sneaky function.\")\n self.assertEqual(\n help_msg,\n {\n \"one\": \"this one is a no brainer\",\n \"two\": \"Help message for two\",\n \"three\": \"noticed you're missing docstring for two and \"\n + \"I'm multiline too!\",\n },\n )\n\n def test_prepare_doc_delimiter_chars(self):\n (description, help_msg) = prepare_doc(\n different_delimiter_chars, [\"one\", \"two\", \"three\"], \"--\"\n )\n self.assertEqual(description, \"I am a sneaky function.\")\n self.assertEqual(\n help_msg,\n {\n \"one\": \"this one is a no brainer even with dashes\",\n \"two\": \"Help message for two\",\n \"three\": \"noticed you're missing docstring for two and \"\n + \"I'm multiline too!\",\n },\n )\n\n\nclass TestFullHelpAction(unittest.TestCase):\n def test_help_is_complete(self):\n with captured_output() as (out, _):\n self.assertRaises(SystemExit, Parseable.parser.parse_args, [\"-h\"])\n help_message = out.getvalue()\n self.assertIn(\"parseable\", help_message)\n # Private methods and classmethods are not exposed by default\n self.assertNotIn(\"private_method\", help_message)\n self.assertNotIn(\"cls_method\", help_message)\n\n def test_help_is_complete_with_private_method(self):\n with captured_output() as (out, _):\n self.assertRaises(\n SystemExit, ParseableWithPrivateMethod.parser.parse_args, [\"-h\"]\n )\n help_message = out.getvalue()\n self.assertIn(\"parseable\", help_message)\n self.assertIn(\"private_method\", help_message)\n # Classmethods are not exposed by default\n self.assertNotIn(\"cls_method\", help_message)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "2374716", "language": "Python", "matching_score": 1.3863420486450195, "max_stars_count": 1, "path": "test/help_test.py" }, { "content": "from unittest import TestCase\n\nfrom lru import LeastRecentlyUsedCache, lru\n\n\nclass TestLeastRecentlyUsedCache(TestCase):\n\n def test_default_capacity(self):\n lru = LeastRecentlyUsedCache()\n self.assertIsNotNone(lru.get_capacity())\n\n def test_constructor_capacity(self):\n capacity = 12\n lru = LeastRecentlyUsedCache(capacity)\n self.assertEqual(lru.get_capacity(), capacity)\n\n def test_retrieve_value(self):\n key = \"some-test-value\"\n value = \"some-test-value\"\n lru = LeastRecentlyUsedCache()\n lru.add(key, value)\n self.assertEqual(value, lru.get(key))\n\n def test_add_access_order(self):\n nb_items = 5\n lru = LeastRecentlyUsedCache(nb_items + 1)\n [lru.add(x, x ** 2) for x in range(nb_items)]\n self.assertEqual(nb_items, len(lru._keys))\n self.assertEqual(list(reversed(range(nb_items))), lru._keys)\n\n def test_get_access_order(self):\n nb_items = 5\n lru = LeastRecentlyUsedCache(nb_items + 1)\n [lru.add(x, x ** 2) for x in range(nb_items)]\n [lru.get(x) for x in reversed(range(nb_items))]\n self.assertEqual(nb_items, len(lru._keys))\n self.assertEqual(list(range(nb_items)), lru._keys)\n\n def test_get_unknown_key_preserve_order(self):\n nb_items = 5\n lru = LeastRecentlyUsedCache(nb_items + 1)\n [lru.add(x, x ** 2) for x in range(nb_items)]\n self.assertEqual(nb_items, len(lru._keys))\n self.assertEqual(list(reversed(range(nb_items))), lru._keys)\n lru.get(nb_items + 1)\n self.assertEqual(list(reversed(range(nb_items))), lru._keys)\n\n def test_eviction_on_add_value(self):\n nb_items = 3\n lru = LeastRecentlyUsedCache(nb_items)\n [lru.add(x, x ** 2) for x in range(5)]\n self.assertIsNone(lru.get(1))\n self.assertEqual(list(reversed(range(5)))[:-2], lru._keys)\n self.assertEqual({x: x ** 2 for x in list(reversed(range(5)))[:-2]}, lru._storage)\n\n def test_callback_called_for_non_cached_values(self):\n nb_calls = 0\n\n def square(key):\n nonlocal nb_calls\n nb_calls += 1\n return key ** 2\n\n lru = LeastRecentlyUsedCache(callback=square)\n value = lru.get(2)\n self.assertEqual(value, 4)\n self.assertEqual(nb_calls, 1)\n value = lru.get(2)\n self.assertEqual(value, 4)\n self.assertEqual(nb_calls, 1)\n\n def test_decorator(self):\n nb_calls = 0\n\n @lru(3)\n def square(x):\n nonlocal nb_calls\n nb_calls += 1\n return x ** 2\n\n value = square(3)\n self.assertEqual(value, 9)\n self.assertEqual(nb_calls, 1)\n value = square(3)\n self.assertEqual(value, 9)\n self.assertEqual(nb_calls, 1)\n\n square(4)\n square(5)\n square(6)\n self.assertEqual(nb_calls, 4)\n value = square(3)\n self.assertEqual(value, 9)\n self.assertEqual(nb_calls, 5)\n", "id": "643529", "language": "Python", "matching_score": 2.9846854209899902, "max_stars_count": 0, "path": "caching/test_lru.py" }, { "content": "from typing import Any, Callable\n\n\nclass LeastRecentlyUsedCache:\n \"\"\"\n Offers fix memory caching where least recently used entries are evicted.\n \"\"\"\n _DEFAULT_CAPACITY = 10\n\n def __init__(self, capacity: int = _DEFAULT_CAPACITY, callback: Callable = None):\n \"\"\"\n\n :param capacity: the maximum number of key/value pairs that will be held in memory\n :param callback: callback to invoke if the key isn't known\n \"\"\"\n self._storage = {}\n self._callback = callback or self._storage.get\n self._capacity = capacity\n self._keys = []\n\n def get_capacity(self) -> Any:\n \"\"\"\n\n :return: maximum number of key/value pairs the cache can hold before eviction takes place\n \"\"\"\n return self._capacity\n\n def add(self, key: Any, value: Any) -> None:\n \"\"\" Stores the key/value pair.\n\n :param key: key under which the value will be stored\n :param value: value link to the given key\n :return: None\n \"\"\"\n self._update_keys_access_order(key)\n self._evict_if_necessary()\n self._storage[key] = value\n\n def get(self, key: Any) -> Any:\n \"\"\" Return the value stored for the given key or the provided default\n\n :param key: key associated to the value we are trying to retrieve\n :return: value associated with key if any or the given default\n \"\"\"\n if key in self._storage:\n self._update_keys_access_order(key)\n return self._storage.get(key)\n value = self._callback(key)\n if value:\n self.add(key, value)\n return value\n\n def _update_keys_access_order(self, key) -> None:\n \"\"\" Keep the list of keys in accessed order \"\"\"\n try:\n # we remove the key from the \"spot\" it was last access so it can be put\n # at the head of the list\n self._keys.remove(key)\n except ValueError:\n # key is new so their is nothing to do\n pass\n # always add 'key' at the head of the list since it's the last item that was accessed\n self._keys.insert(0, key)\n\n def _evict_if_necessary(self):\n \"\"\" Ensure we do not store more value than 'capacity' \"\"\"\n keys_length = len(self._keys)\n if keys_length > self._capacity:\n evicted = self._keys.pop(keys_length - 1)\n self._storage.pop(evicted)\n\n\ndef lru(size):\n def wrapped(function):\n cache = LeastRecentlyUsedCache(capacity=size, callback=function)\n return cache.get\n return wrapped\n", "id": "4533988", "language": "Python", "matching_score": 0.6156113743782043, "max_stars_count": 0, "path": "caching/lru.py" }, { "content": "#!/usr/bin/env python3\n\nimport sys\nimport os\nimport bs4\nimport json\nimport textrazor\n\nbookmark_file = sys.argv[1]\n\n# extract all href from bookmark file\nwith open(os.path.abspath(os.path.expanduser(bookmark_file))) as f:\n cleaned_up_lines = [line.strip(\"\\n \") for line in f.readlines()]\n soup = bs4.BeautifulSoup(\" \".join(cleaned_up_lines), 'html.parser')\n links = [link.attrs['href'] for link in soup.find_all('a')]\n\nlink_topics = {}\n\napi_client = textrazor.TextRazor(os.environ['TEXTRAZOR_API_KEY'],\n extractors=['topics'])\napi_client.set_cleanup_use_metadata(True)\napi_client.set_cleanup_mode('stripTags')\n\nfor link in links:\n response = api_client.analyze_url(link)\n link_topics[link] = {t.label: t.score for t in\n sorted(response.topics(),\n key=lambda x: x.score,\n reverse=True)[:10]}\n\nwith open(\"output.json\", \"w\") as f:\n json.dump(link_topics, f)\n\n", "id": "2079443", "language": "Python", "matching_score": 2.038409471511841, "max_stars_count": 0, "path": "bookmark_categorization/classify_bookmarks.py" }, { "content": "#!/usr/bin/env python3\nimport json\nimport os\nimport sys\nfrom typing import Dict, List\n\nfrom bs4 import BeautifulSoup\n\nbookmark_file = sys.argv[1]\nLINKS = {}\n\n# extract all href from bookmark file\nwith open(os.path.abspath('bookmarks_10_23_21.html')) as f:\n cleaned_up_lines = [line.strip(\"\\n \") for line in f.readlines()]\n soup = BeautifulSoup(\" \".join(cleaned_up_lines), 'html.parser')\n LINKS = {\n link.attrs['href']:\n (\n {\n 'ADD_DATE': link.attrs['add_date'],\n 'ICON': link.attrs.get('icon'),\n 'HREF': link.attrs['href'],\n },\n link.contents[0]\n )\n for link in soup.find_all('a')\n }\n\n\ndef create_bookmark_tag(attributes: Dict, contents: str) -> str:\n return f'<DT><A HREF=\"{attributes[\"HREF\"]}\" ICON=\"{attributes.get(\"ICON\", \"\")}\" ADD_DATE=\"{attributes[\"ADD_DATE\"]}\">{contents}</A>'\n\n\ndef create_folder_tag(folder: Dict) -> List[str]:\n print(f\"Handling folder {folder['name']}\")\n content = [f'<DT><H3 ADD_DATE=\"1554239916\" LAST_MODIFIED=\"1620574779\">{folder[\"name\"]}</H3>',\n '<DL><p>']\n\n for sub_folder in folder['children']:\n content.extend(create_folder_tag(sub_folder))\n\n for link in folder['links']:\n (attributes, contents) = LINKS[link]\n content.append(create_bookmark_tag(attributes, contents))\n content.append('</DL><p>')\n\n return content\n\n\nhtml_content: list[str] = \"\"\"<!DOCTYPE NETSCAPE-Bookmark-file-1>\n<META HTTP-EQUIV=\"Content-Type\" CONTENT=\"text/html; charset=UTF-8\">\n<TITLE>Bookmarks</TITLE>\n<H1>Bookmarks</H1>\n<DL><p>\"\"\".split('\\n')\n\nwith open(os.path.abspath(bookmark_file)) as f:\n html_content.extend(create_folder_tag(json.load(f)))\n\nhtml_content.append(\"</DL><p>\")\n\nprint(len(html_content))\n\nwith open(os.path.expanduser(\"~/Documents/bert.html\"), \"w\") as f:\n f.writelines([l + \"\\n\" for l in html_content])\n", "id": "8399370", "language": "Python", "matching_score": 0.2008337676525116, "max_stars_count": 0, "path": "bookmark_categorization/bookmarks_to_html.py" }, { "content": "from collections import Counter\nfrom itertools import permutations\nfrom hashlib import md5\n\n\nclass PoultryAndAnts:\n \"\"\"\n See http://followthewhiterabbit.trustpilot.com/cs/step3.html\n \"\"\"\n\n def __init__(self, anagram, targets):\n \"\"\"\n :param anagram: anagram that we are looking for\n :param targets: list of md5s of anagrams that would match the given\n anagram\n \"\"\"\n anagram_text = anagram.replace(\" \", \"\")\n self.anagram_counter = Counter(anagram_text)\n self.anagram_length = len(anagram_text)\n self.targets = targets\n\n def _words_match_anagram(self, words):\n for perm in permutations(words):\n # check for md5 of all permutations with white spaces\n sentence = \" \".join(perm)\n digest = md5(sentence).hexdigest()\n if digest in self.targets:\n print \"*\" * 25, \"\\n%s\\n\" % sentence, \"*\" * 25\n return list(perm)\n return []\n\n def find_match(self, node):\n \"\"\" Given a fully built tree return the first anagram found that has\n an md5 found in self.targets. Recursive method visiting the given\n node first then its children in order they were added.\n\n :param node: the node being visited\n :return: the first found list of words that have an md5 found in\n self.targets, None if no such list of words can be found\n \"\"\"\n (words, length, counter, children) = node\n # we've reached a leaf node that has the right amount of letters\n if not children and length == self.anagram_length and counter == \\\n self.anagram_counter:\n match = self._words_match_anagram(words)\n if match:\n return match\n for child in children:\n return self.find_match(child)\n\n def nb_leaf_nodes(self, node):\n \"\"\" Informational method to get the number of leaf nodes that can be\n reached from the given node. Recursive method.\n\n :param node: node being visited\n :return: number of leaf nodes that can be reached from the given node\n \"\"\"\n (_, __, ___, children) = node\n if not children:\n return 1\n leaves = 0\n for c in children:\n leaves += self.nb_leaf_nodes(c)\n return leaves\n\n def add_node(self, node, new_word, new_word_counter):\n \"\"\" Add 'new_word' to the given node and to any of its children if\n 'word_contains' returns true.\n\n :param node: current node that should get 'new_word' added to\n :param new_word: the word to add to the current node\n :param new_word_counter: the Counter from 'new_word'\n :return: None\n \"\"\"\n (words, length, counter, children) = node\n for c in children:\n self.add_node(c, new_word, new_word_counter)\n new_word_length = length + len(new_word)\n if new_word_length <= self.anagram_length:\n new_node_counter = new_word_counter + counter\n if self.word_contains(new_node_counter, self.anagram_counter):\n new_node_words = words + [new_word]\n if new_node_counter == self.anagram_counter:\n self._words_match_anagram(new_node_words)\n node = (new_node_words, new_word_length, new_node_counter, [])\n children.append(node)\n\n def word_contains(self, needle, haystack):\n \"\"\" Check whether or not the needle (Counter of a node) can be\n fit in haystack (Counter of anagram). A Counter is said to fit within\n another Counter if for common letters the haystack has more occurrences.\n\n :param needle: Counter of a single word\n :param haystack: Counter of the anagram\n :return: true if the needle fit in the haystack\n \"\"\"\n return all(haystack[k] - v >= 0 for k, v in needle.items())\n\n def build_tree(self, words):\n \"\"\" Build a tree of nodes that only contain letter from the anagram\n and each letter will appear no more times that it does in the anagram\n\n Node: ([words], sum length of [words], Counter([words]), [children])\n\n TODO: output should not be mingled with the logic\n\n :param words: the word list to use to find an anagram\n :return: the original node that was passed in\n \"\"\"\n node = ([], 0, Counter(), [])\n eligible_words = []\n for word in set(words):\n # If all letters of the word are in the anagram\n word_counter = Counter(word)\n if self.word_contains(word_counter, self.anagram_counter):\n eligible_words.append((word, len(word), word_counter))\n print 'eligible words: ', len(eligible_words)\n try:\n for (idx, (word, word_len, counter)) in enumerate(eligible_words):\n if not idx % 10:\n print 'done: ', idx, ' - nb leaves: ', self.nb_leaf_nodes(\n node)\n self.add_node(node, word, counter)\n except KeyboardInterrupt:\n pass\n return node\n\n\ndef print_tree(node, indent=0):\n (words, _, counter, children) = node\n print ' ' * indent, words, ' -- ', counter, ': ', len(children)\n for c in children:\n print_tree(c, indent + 1)\n\n\nif __name__ == \"__main__\":\n\n with open(\"./md5_test.txt\", \"r\") as f:\n target_md5 = [l.strip(\"\\n\") for l in f]\n\n with open(\"./words_test.txt\", \"r\") as f:\n wordlist = [line.strip(\"\\n\") for line in f]\n\n anagram = \"bert goes to the circus\"\n\n ants = PoultryAndAnts(anagram, target_md5)\n print \"Looking for '%s': %s\" % (anagram, ants.anagram_counter)\n tree = ants.build_tree(wordlist)\n print_tree(tree)\n print \"total nb-leaves: %d\" % ants.nb_leaf_nodes(tree)\n print \"match: %s\" % ants.find_match(tree)\n", "id": "10315978", "language": "Python", "matching_score": 4.81788444519043, "max_stars_count": 0, "path": "ants/poultry.py" }, { "content": "import unittest\nfrom collections import Counter\nfrom hashlib import md5\n\nfrom poultry import PoultryAndAnts\n\n\nclass PoultryAndAntsTest(unittest.TestCase):\n\n def test_word_contains_with_empty_counters(self):\n self.assertTrue(\n PoultryAndAnts(\"\", []).word_contains(Counter(), Counter()))\n\n def test_word_contains_with_needle_empty_counter(self):\n self.assertTrue(\n PoultryAndAnts(\"\", []).word_contains(Counter(), Counter(\"a\")))\n\n def test_word_contains_with_haystack_empty_counter(self):\n self.assertFalse(\n PoultryAndAnts(\"\", []).word_contains(Counter(\"a\"), Counter()))\n\n def test_word_contains_with_needle_equal_to_haystack(self):\n self.assertTrue(\n PoultryAndAnts(\"\", []).word_contains(Counter(\"a\"), Counter(\"a\")))\n\n def test_word_contains_with_needle_contained_in_haystack(self):\n self.assertTrue(\n PoultryAndAnts(\"\", []).word_contains(Counter(\"test\"),\n Counter(\"this is a test\")))\n\n def test_word_contains_with_needle_not_contained_in_haystack(self):\n self.assertFalse(\n PoultryAndAnts(\"\", []).word_contains(Counter(\"test\"),\n Counter(\"word\")))\n\n def test_word_contains_with_needle_has_one_too_many_letter(self):\n self.assertFalse(\n PoultryAndAnts(\"\", []).word_contains(Counter(\"abc\"),\n Counter(\"ab\")))\n\n def test_word_contains_with_needle_has_one_too_many_letter_occurrence(self):\n self.assertFalse(\n PoultryAndAnts(\"\", []).word_contains(Counter(\"aba\"),\n Counter(\"ab\")))\n\n def test_nb_leaf_nodes_empty_tree(self):\n nodes = PoultryAndAnts(\"\", []).nb_leaf_nodes((None, None, None, []))\n self.assertEqual(nodes, 1)\n\n def test_nb_leaf_nodes_with_one_level(self):\n children = [(None, None, None, []) for _ in [1, 2, 3]]\n nodes = PoultryAndAnts(\"\", []).nb_leaf_nodes(\n (None, None, None, children))\n self.assertEqual(nodes, len(children))\n\n def test_nb_leaf_nodes_with_multiple_level(self):\n child_1 = (None, None, None, [(None, None, None, []) for _ in range(3)])\n nodes = PoultryAndAnts(\"\", []).nb_leaf_nodes(\n (None, None, None, [child_1, child_1]))\n self.assertEqual(nodes, 6)\n\n def test_build_tree_structure(self):\n ants = PoultryAndAnts(\"ac\", [])\n tree = ants.build_tree([\"a\", \"b\", \"c\", \"d\"])\n self.assertEqual(ants.nb_leaf_nodes(tree), 2)\n\n ac_node = ([\"a\", \"c\"], 2, Counter(\"ac\"), [])\n a_node = ([\"a\"], 1, Counter(\"a\"), [ac_node])\n c_node = ([\"c\"], 1, Counter(\"c\"), [])\n expected_tree = ([], 0, Counter(), [a_node, c_node])\n self.assertEqual(tree, expected_tree)\n\n def test_add_node_top_node(self):\n ants = PoultryAndAnts(\"abc\", [])\n node = ([], 1, Counter(), [])\n ants.add_node(node, \"a\", Counter(\"a\"))\n nb_nodes = ants.nb_leaf_nodes(node)\n self.assertEqual(nb_nodes, 1)\n\n def test_add_node_not_part_of_anagram(self):\n ants = PoultryAndAnts(\"abc\", [])\n node = ([], 1, Counter(), [])\n ants.add_node(node, \"d\", Counter(\"d\"))\n nb_nodes = ants.nb_leaf_nodes(node)\n self.assertEqual(nb_nodes, 1) # the root node is also a leaf\n\n def test_add_node_all_letters(self):\n ants = PoultryAndAnts(\"abc\", [])\n node = ([], 1, Counter(), [])\n ants.add_node(node, \"a\", Counter(\"a\"))\n ants.add_node(node, \"b\", Counter(\"b\"))\n ants.add_node(node, \"c\", Counter(\"c\"))\n nb_nodes = ants.nb_leaf_nodes(node)\n self.assertEqual(nb_nodes, 4)\n\n def test_build_tree(self):\n ants = PoultryAndAnts(\"abc\", [])\n nodes = ants.build_tree([\"a\", \"b\", \"c\", \"d\"])\n nb_nodes = ants.nb_leaf_nodes(nodes)\n self.assertEqual(nb_nodes, 4)\n\n def test_build_tree_not_part_of_anagram(self):\n ants = PoultryAndAnts(\"abc\", [])\n nodes = ants.build_tree([\"d\", \"e\"])\n nb_nodes = ants.nb_leaf_nodes(nodes)\n self.assertEqual(nb_nodes, 1) # the root node is also a leaf\n\n def test_find_match(self):\n ants = PoultryAndAnts(\"ab\", [md5(\"b a\").hexdigest()])\n nodes = ants.build_tree([\"a\", \"b\", \"c\", \"d\"])\n match = ants.find_match(nodes)\n self.assertEqual(match, [\"b\", \"a\"])\n\n def test_find_match_no_match(self):\n ants = PoultryAndAnts(\"ab\", [md5(\"b a\").hexdigest()])\n nodes = ants.build_tree([\"c\", \"d\"])\n match = ants.find_match(nodes)\n self.assertIsNone(match)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "7476677", "language": "Python", "matching_score": 0.7623335719108582, "max_stars_count": 0, "path": "ants/test_poultry.py" }, { "content": "number_test_cases = int(input(\"Enter the number of test cases: \"))\n\nfor idx in range(number_test_cases):\n test_case = input(\"Test case #{} \".format(idx+1))\n suppression = 0\n for i, c in enumerate(test_case):\n if i < 1:\n continue\n if c == test_case[i-1]:\n suppression += 1\n print(\"{} suppression(s) for '{}'\".format(suppression, test_case))\n", "id": "12007008", "language": "Python", "matching_score": 0.17445886135101318, "max_stars_count": 0, "path": "alternating_characters/alternating.py" }, { "content": "\"\"\"\nImplement string reverse without using the slice operator or the reverse method.\nIt should be implemented in place\n\"\"\"\n\ndef reverse_in_place(s):\n if not s:\n return s\n for idx in range(len(s)/2):\n s[idx], s[len(s) - idx - 1] = s[len(s) - idx - 1], s[idx]\n return s\n\n\nassert reverse_in_place(None) == None\nassert reverse_in_place(\"\") == \"\"\nassert reverse_in_place(\"f o o d b a r\".split()) == \"r a b d o o f\".split()\n", "id": "5089024", "language": "Python", "matching_score": 0.2102254033088684, "max_stars_count": 0, "path": "algo/str_reverse.py" }, { "content": "from itertools import permutations, izip\nfrom hashlib import md5\nfrom math import factorial\nfrom multiprocessing import Pool,current_process\n\nwith open(\"./md5.txt\", \"r\") as f:\n target_md5 = [l.strip(\"\\n\") for l in f]\n\nwith open(\"./words.txt\", \"r\") as f:\n wordlist = [line.strip(\"\\n\") for line in f]\n\n\ndef match_one_anagram(a, idx):\n if idx % 1000 == 0:\n print \"'%s' checking '%s' - %d\" % (current_process().name, a, idx)\n if md5(\"\".join(a)).hexdigest() in target_md5:\n return a\n\n\nif __name__ == \"__main__\":\n sentence = \"poultry outwits ants\".replace(\" \", \"\")\n all_anagrams = permutations(sentence)\n total_permutations = factorial(len(sentence))\n nb_processes = 10\n pool = Pool(nb_processes)\n print \"using %d process - %d total perms \" % (nb_processes,\n total_permutations)\n results = pool.imap_unordered(match_one_anagram,\n izip(all_anagrams, xrange(total_permutations)),\n total_permutations / nb_processes * 10)\n print \"closing pool\"\n pool.close()\n pool.join()\n idx = 0\n print \"going through results\"\n for r in results:\n if r:\n print r\n if idx % 100 == 0:\n print \"run \", idx\n idx += 1\n", "id": "8256906", "language": "Python", "matching_score": 1.0752569437026978, "max_stars_count": 0, "path": "ants/anagram.py" }, { "content": "\n\ndef is_permutation(str1, str2):\n return sorted(str1) == sorted(str2)\n\n\nassert is_permutation(\"\", \"foo\") == False\nassert is_permutation(\"Nib\", \"bin\") == False\nassert is_permutation(\"a ct\", \"c at\") == True\n\n\ndef array_permutation(str1, str2):\n if len(str1) != len(str2):\n return False\n total = [0] * 128\n for char in str1:\n total[ord(char)] += 1\n for char in str2:\n total[ord(char)] -= 1\n return min(total) == max(total) == 0\n\nassert array_permutation(\"\", \"foo\") == False\nassert array_permutation(\"Nib\", \"bin\") == False\nassert array_permutation(\"a ct\", \"c at\") == True\n", "id": "986454", "language": "Python", "matching_score": 1.6263831853866577, "max_stars_count": 0, "path": "algo/str_permutation.py" }, { "content": "\"\"\"\nProblem: Determine if a string s1 is a rotation of another string s2, by calling (only once) a function is_substring\n\"\"\"\n\n\ndef is_substring(str1, str2):\n return str1 in str2\n\n\ndef is_rotation(str1, str2):\n if str1 is None or str2 is None:\n return False\n if len(str1) != len(str2):\n return False\n return is_substring(str2, str1 + str1)\n\n\nassert is_rotation('o', 'oo') == False\nassert is_rotation(None, 'foo') == False\nassert is_rotation('', 'foo') == False\nassert is_rotation('', '') == True\nassert is_rotation('foobarbaz', 'barbazfoo') == True\nassert is_rotation('oobarbazf', 'barbazfoo') == True\n", "id": "815369", "language": "Python", "matching_score": 0.13397462666034698, "max_stars_count": 0, "path": "algo/str_rotation.py" }, { "content": "\"\"\"\nProblem: Compress a string such that 'AAABCCDDDD' becomes 'A3B1C2D4'. Only compress the string if it saves space.\n\"\"\"\n\n\ndef str_compress(s):\n if not s:\n return s\n if len(s) <= 2 * len(set(s)):\n # No need to compress as there are not enough letters\n return s\n char, cpt = s[0], 1\n result = \"\"\n for current_char in s[1:]:\n if current_char == char:\n cpt += 1\n else:\n result += char + str(cpt)\n cpt = 1\n char = current_char\n return result + char + str(cpt)\n\n\nassert str_compress(None) == None\nassert str_compress(\"\") == \"\"\nassert str_compress(\"AABBCC\") == \"AABBCC\"\nassert str_compress(\"AABCCDDDD\") == \"A2B1C2D4\"\nassert str_compress(\"AAA\") == \"A3\"\n\n# BONUS\ndef str_decompress(s):\n result = \"\"\n for _ in range(len(s)//2):\n a, b, *s = s\n result += a * int(b)\n return result\n\nassert str_decompress(\"A4\") == \"AAAA\"\nassert str_decompress(\"A2B1C2D4\") == \"AABCCDDDD\"\n", "id": "4552219", "language": "Python", "matching_score": 0.06741323322057724, "max_stars_count": 0, "path": "algo/str_compress.py" }, { "content": "from string import ascii_lowercase\n\n# https://www.codeeval.com/public_sc/37/\n\ndef pangram(input_sentence):\n input_sentence = set(input_sentence.lower())\n missing_letters = []\n if input_sentence == set(ascii_lowercase):\n return None\n for c in ascii_lowercase:\n if c not in input_sentence:\n missing_letters.append(c)\n return missing_letters or None\n\n\nassert pangram(ascii_lowercase) is None\nassert pangram(ascii_lowercase.replace(\"z\", \"a\")) == [\"z\"]\nassert pangram(ascii_lowercase.replace(\"z\", \"a\").replace(\"b\", \"a\")) == [\"b\", \"z\"]\n\nto_be_replaced = []\nfor c in ascii_lowercase:\n to_be_replaced.append(c)\n alphabet = ascii_lowercase\n for to_replace in to_be_replaced:\n alphabet.replace(to_replace, \"a\")\n missing_letters_for_pangram = pangram(alphabet)\n assert missing_letters_for_pangram == to_be_replaced or \\\n alphabet == ascii_lowercase and missing_letters_for_pangram is None\n\nprint(\"done!\")\n", "id": "6219211", "language": "Python", "matching_score": 1.2485291957855225, "max_stars_count": 0, "path": "pangrams/pangrams.py" }, { "content": "from collections import deque\nfrom string import ascii_lowercase, maketrans\n\nfrom challenge import open_next_level\n\nto_translate = \"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\"\n\nrotation = deque(ascii_lowercase)\nrotation.rotate(-2)\ntrans_table = maketrans(ascii_lowercase, \"\".join(rotation))\n\ntranslate = lambda x: x.translate(trans_table)\n\nprint translate(to_translate)\n\nopen_next_level(translate(\"map\"))\n", "id": "2758742", "language": "Python", "matching_score": 0.6683011054992676, "max_stars_count": 0, "path": "py_challenge/level_2.py" }, { "content": "from challenge import open_next_level\nimport requests\nimport zipfile\nimport os\nimport tempfile\n\n\ntmp = tempfile.mkdtemp()\n(_, archive_path) = tempfile.mkstemp(dir=tmp)\n\nwith open(archive_path, \"w\") as channel:\n response = requests.get(\"http://www.pythonchallenge.com/pc/def/channel.zip\")\n response.raise_for_status()\n channel.write(response.content)\n\narchive = zipfile.ZipFile(archive_path)\n\nnothing = 90052\ncomments = []\nwhile nothing:\n current = \"%s.txt\" % nothing\n data = archive.read(current)\n comments.append(archive.getinfo(current).comment)\n nothing = data.split()[-1]\n try:\n int(nothing)\n except ValueError as val_error:\n print \"This might be the one\", val_error\n break\n\nprint \"\".join(comments)\n\nopen_next_level(\"oxygen\")\n", "id": "12759093", "language": "Python", "matching_score": 3.1175076961517334, "max_stars_count": 0, "path": "py_challenge/level_7.py" }, { "content": "from challenge import open_next_level\nimport requests\n\ndef get_next_nothing(content):\n return content.split()[-1]\n\ndef get_page(nothing):\n response = requests.get(\"http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing=%s\" % nothing)\n response.raise_for_status()\n return response.content\n\nnothing = 44827\nwhile nothing:\n nothing = get_next_nothing(get_page(nothing))\n print \"Next nothing is '%s'\" % nothing\n try:\n int(nothing)\n except ValueError as val_error:\n print \"This might be the one\", val_error\n open_next_level(nothing)\n", "id": "11423360", "language": "Python", "matching_score": 1.6100271940231323, "max_stars_count": 0, "path": "py_challenge/level_5.py" }, { "content": "import os\nimport webbrowser\n\nNEXT_LEVEL_URL = \"http://www.pythonchallenge.com/pc/def/%s.html\"\n\n\ndef open_next_level(page):\n \"\"\"Open the default webbrowser using the given parameter.\n\n Args:\n page: the name of the next page to open - no extension required.\n \"\"\"\n page = os.path.splitext(page)[0]\n webbrowser.open(NEXT_LEVEL_URL % page)\n", "id": "4681129", "language": "Python", "matching_score": 2.0881900787353516, "max_stars_count": 0, "path": "py_challenge/challenge.py" }, { "content": "from challenge import open_next_level\n\nopen_next_level(2**38)\n", "id": "5689876", "language": "Python", "matching_score": 1.8378239870071411, "max_stars_count": 0, "path": "py_challenge/level_1.py" }, { "content": "from challenge import open_next_level\n\nimport requests\nimport pickle\n\nprint '\\n'.join([''.join([p[0] * p[1] for p in row]) for row in pickle.loads(requests.get(\"http://www.pythonchallenge.com/pc/def/banner.p\").content)])\nopen_next_level(\"channel\")\n", "id": "8167060", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "py_challenge/level_6.py" }, { "content": "import sys\nfrom contextlib import contextmanager\nfrom io import StringIO\n\n\n@contextmanager\ndef captured_output():\n \"\"\"Allows to safely capture stdout and stderr in a context manager.\"\"\"\n new_out, new_err = StringIO(), StringIO()\n old_out, old_err = sys.stdout, sys.stderr\n try:\n sys.stdout, sys.stderr = new_out, new_err\n yield sys.stdout, sys.stderr\n finally:\n sys.stdout, sys.stderr = old_out, old_err\n", "id": "6761718", "language": "Python", "matching_score": 0.06948523223400116, "max_stars_count": 1, "path": "test/utils.py" }, { "content": "import argparse\nfrom typing import Any, Optional, Sequence, Union\n\n\nclass FullHelpAction(argparse._HelpAction):\n \"\"\"Custom HelpAction to display help from all subparsers.\n\n This allows to have the help for all sub-commands when invoking:\n '<script.py> --help' rather than a somewhat incomplete help message only\n describing the name of the sub-commands.\n Note: taken from https://stackoverflow.com/a/24122778/2003420\n \"\"\"\n\n def __call__(\n self,\n parser: argparse.ArgumentParser,\n namespace: argparse.Namespace,\n values: Union[str, Sequence[Any], None],\n option_string: Optional[str] = None,\n ):\n # Print help for the parser this class is linked to\n parser.print_help()\n # Retrieve sub-parsers of the given parser\n subparsers_actions = [\n action\n for action in parser._actions\n if isinstance(action, argparse._SubParsersAction)\n ]\n for subparser_action in subparsers_actions:\n # Get all subparsers and print their help\n for choice, subparser in subparser_action.choices.items():\n print(\"** Command '{}' **\".format(choice))\n print(\"{}\\n\".format(subparser.format_help()))\n parser.exit()\n", "id": "3400860", "language": "Python", "matching_score": 1.232792615890503, "max_stars_count": 1, "path": "parse_this/help/action.py" }, { "content": "import logging\nimport re\nfrom typing import Callable, List, Optional\n\n_LOG = logging.getLogger(__name__)\n\n\ndef _get_default_help_message(\n func: Callable,\n args: List[str],\n description: Optional[str] = None,\n args_help: Optional[dict] = None,\n):\n \"\"\"Create a default description for the parser and help message for the\n arguments if they are missing.\n\n Args:\n func: the method we are creating a parser for\n args: the argument names of the method\n description: a potentially existing description created from the\n function docstring\n args_help: a dict {arg_name: help} with potentially missing arguments\n\n Returns:\n a tuple (arg_parse_description, complete_args_help)\n \"\"\"\n if description is None:\n description = \"Argument parsing for %s\" % func.__name__\n args_help = args_help or {}\n # If an argument is missing a help message we create a simple one\n for argument in [arg_name for arg_name in args if arg_name not in args_help]:\n args_help[argument] = \"Help message for %s\" % argument\n return description, args_help\n\n\ndef prepare_doc(func: Callable, args: List[str], delimiter_chars: str):\n \"\"\"From the function docstring get the arg parse description and arguments\n help message. If there is no docstring simple description and help\n message are created.\n\n Args:\n func: the function that needs argument parsing\n args: name of the function arguments\n delimiter_chars: characters used to separate the parameters from their\n help message in the docstring\n\n Returns:\n A tuple containing the description to be used in the argument parser and\n a dict indexed on the callable argument name and their associated help\n message\n \"\"\"\n _LOG.debug(\"Preparing doc for '%s'\", func.__name__)\n if not func.__doc__:\n return _get_default_help_message(func, args)\n description = []\n args_help = {}\n fill_description = True\n arg_name = None\n arg_doc_regex = re.compile(\n \"\\b*(?P<arg_name>\\w+)\\s*%s\\s*(?P<help_msg>.+)\" % delimiter_chars # noqa: W605\n )\n for line in func.__doc__.splitlines():\n line = line.strip()\n if line and fill_description:\n description.append(line)\n elif line:\n arg_match = arg_doc_regex.match(line)\n try:\n arg_name = arg_match.groupdict()[\"arg_name\"].strip()\n args_help[arg_name] = arg_match.groupdict()[\"help_msg\"].strip()\n except AttributeError:\n # The line didn't match the pattern we've hit a\n # multiline argument docstring so we add it to the\n # previous argument help message\n if arg_name is not None:\n args_help[arg_name] = \" \".join([args_help[arg_name], line])\n else:\n # The first empty line we encountered means we are done with\n # the description. The first empty line we encounter after\n # filling the argument help means we are done with argument\n # parsing.\n if not fill_description and args_help:\n break\n fill_description = False\n return _get_default_help_message(func, args, \" \".join(description), args_help)\n", "id": "9529284", "language": "Python", "matching_score": 2.4982330799102783, "max_stars_count": 1, "path": "parse_this/help/description.py" }, { "content": "import logging\nfrom argparse import Namespace\nfrom typing import Callable, List\n\nfrom parse_this.args import _get_args_to_parse\nfrom parse_this.exception import ParseThisException\nfrom parse_this.parsing import _get_args_name_from_parser\n\n_LOG = logging.getLogger(__name__)\n\n\ndef _get_parser_call_method(func: Callable):\n \"\"\"Returns the method that is linked to the 'call' method of the parser\n\n Args:\n func: the decorated function\n\n Raises:\n ParseThisException if the decorated method is __init__, __init__ can\n only be decorated in a class decorated by parse_class\n \"\"\"\n func_name = func.__name__\n parser = func.parser # type: ignore[attr-defined]\n\n def inner_call(instance=None, args=None):\n \"\"\"This is method attached to <parser>.call.\n\n Args:\n instance: the instance of the parser\n args: arguments to be parsed\n \"\"\"\n _LOG.debug(\"Calling %s.parser.call\", func_name)\n # Defer this check in the method call so that __init__ can be\n # decorated in class decorated with parse_class\n if func_name == \"__init__\":\n raise ParseThisException(\n (\n \"To use 'create_parser' on the\"\n \"'__init__' you need to decorate the \"\n \"class with '@parse_class'\"\n )\n )\n namespace = parser.parse_args(_get_args_to_parse(args))\n if instance is None:\n # If instance is None we are probably decorating a function not a\n # method and don't need the instance\n args_name = _get_args_name_from_parser(parser)\n return _call(func, args_name, namespace)\n return _call_method_from_namespace(instance, func_name, namespace)\n\n return inner_call\n\n\ndef _call(callable_obj: Callable, arg_names: List[str], namespace: Namespace):\n \"\"\"Actually calls the callable with the namespace parsed from the command\n line.\n\n Args:\n callable_obj: a callable object\n arg_names: name of the function arguments\n namespace: the namespace object parsed from the command line\n \"\"\"\n try:\n logging.basicConfig(level=namespace.log_level)\n except AttributeError:\n pass\n arguments = {arg_name: getattr(namespace, arg_name) for arg_name in arg_names}\n return callable_obj(**arguments)\n\n\ndef _call_method_from_namespace(obj: Callable, method_name: str, namespace: Namespace):\n \"\"\"Call the method, retrieved from obj, with the correct arguments via\n the namespace\n\n Args:\n obj: any kind of object\n method_name: method to be called\n namespace: an argparse.Namespace object containing parsed command\n line arguments\n \"\"\"\n method = getattr(obj, method_name)\n method_parser = method.parser\n arg_names = _get_args_name_from_parser(method_parser)\n if method_name == \"__init__\":\n return _call(obj, arg_names, namespace)\n return _call(method, arg_names, namespace)\n", "id": "3764224", "language": "Python", "matching_score": 2.056411027908325, "max_stars_count": 1, "path": "parse_this/call.py" }, { "content": "import logging\nimport sys\nfrom itertools import zip_longest\nfrom typing import Any, List, Optional, Tuple\n\n_NO_DEFAULT = object()\n\n_LOG = logging.getLogger(__name__)\n\n\ndef _get_args_and_defaults(args: List[str], defaults: Optional[Tuple[Any, ...]] = None):\n \"\"\"Return a list of 2-tuples - the argument name and its default value or\n a special value that indicates there is no default value.\n\n Args:\n args: list of argument name\n defaults: tuple of default values\n \"\"\"\n defaults = defaults or ()\n args_and_defaults = [\n (argument, default)\n for (argument, default) in zip_longest(\n args[::-1], defaults[::-1], fillvalue=_NO_DEFAULT\n )\n ]\n return args_and_defaults[::-1]\n\n\ndef _get_args_to_parse(args: List[str], cli_arguments: Optional[List[str]] = None):\n \"\"\"Return the given arguments if it is not None else sys.argv if it contains\n something, an empty list otherwise.\n\n Args:\n args: argument to be parsed\n cli_arguments: arguments from the command line, defaults to sys.argv; mostly\n for testing purposes\n \"\"\"\n cli_arguments = cli_arguments if cli_arguments is not None else sys.argv\n arguments = args if args is not None else cli_arguments[1:]\n _LOG.debug(\"Parsing arguments: %s\", arguments)\n return arguments\n", "id": "4056796", "language": "Python", "matching_score": 2.9281632900238037, "max_stars_count": 1, "path": "parse_this/args.py" }, { "content": "import unittest\n\nfrom parse_this.args import _NO_DEFAULT, _get_args_and_defaults, _get_args_to_parse\n\n\nclass TestArgs(unittest.TestCase):\n def test_get_args_and_defaults_fill_no_default(self):\n args_and_defaults = _get_args_and_defaults(\n [\"first\", \"second\", \"third\"], (\"default_value\",)\n )\n self.assertListEqual(\n args_and_defaults,\n [\n (\"first\", _NO_DEFAULT),\n (\"second\", _NO_DEFAULT),\n (\"third\", \"default_value\"),\n ],\n )\n\n def test_get_args_and_defaults_no_args(self):\n self.assertListEqual(_get_args_and_defaults([], ()), [])\n\n def test_get_args_and_defaults_no_default(self):\n self.assertListEqual(\n _get_args_and_defaults([\"first\", \"second\"], ()),\n [(\"first\", _NO_DEFAULT), (\"second\", _NO_DEFAULT)],\n )\n\n def test_get_args_to_parse_nothing_to_parse(self):\n self.assertListEqual(_get_args_to_parse(None, []), [])\n\n def test_get_args_to_parse_remove_prog_from_sys_argv(self):\n self.assertListEqual(\n _get_args_to_parse(None, [\"prog\", \"arg_1\", \"arg_2\"]), [\"arg_1\", \"arg_2\"]\n )\n\n def test_get_args_to_parse_with_options(self):\n self.assertListEqual(\n _get_args_to_parse(None, [\"prog\", \"arg\", \"--kwargs=12\"]),\n [\"arg\", \"--kwargs=12\"],\n )\n\n def test_get_args_to_parse_used_empty_args_not_sys_argv(self):\n self.assertListEqual(_get_args_to_parse([], [\"prog\", \"arg_1\", \"arg_2\"]), [])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "4800941", "language": "Python", "matching_score": 2.3779876232147217, "max_stars_count": 1, "path": "test/args_test.py" }, { "content": "import unittest\n\nfrom parse_this import parse_this\nfrom test.helpers import (\n parse_me,\n)\n\n\nclass TestParseThis(unittest.TestCase):\n def test_parse_this_return_value(self):\n self.assertEqual(parse_this(parse_me, \"yes 2\".split()), (\"yesyes\", 144))\n self.assertEqual(parse_this(parse_me, \"no 3 --three 2\".split()), (\"nonono\", 4))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "7242857", "language": "Python", "matching_score": 1.2572773694992065, "max_stars_count": 1, "path": "test/parse_this_test.py" }, { "content": "import datetime\n\nfrom django.utils import timezone\nfrom django.test import TestCase\nfrom django.core.urlresolvers import reverse\n\nfrom polls.models import Question\n\n\ndef create_question(question_text, days):\n \"\"\"\n Creates a question with the given `question_text` published the given\n number of `days` offset to now (negative for questions published\n in the past, positive for questions that have yet to be published).\n \"\"\"\n time = timezone.now() + datetime.timedelta(days=days)\n return Question.objects.create(question_text=question_text, pub_date=time)\n\n\nclass QuestionIndexDetailTest(TestCase):\n\n def test_detail_view_with_future_question(self):\n future_question = create_question(\"Future question\", 30)\n response = self.client.get(reverse(\"polls:detail\",\n args=(future_question.id,)))\n self.assertEqual(response.status_code, 404)\n\n def test_detail_view_with_past_question(self):\n past_question = create_question(\"Past question\", -12)\n response = self.client.get(reverse(\"polls:detail\",\n args=(past_question.id,)))\n self.assertContains(response, past_question.question_text,\n status_code=200)\n\n\nclass QuestionViewTests(TestCase):\n\n def test_index_view_with_no_questions(self):\n \"\"\"If no questions exsit, an appropriate message should be displayed.\"\"\"\n response = self.client.get(reverse(\"polls:index\"))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"No polls are available\")\n self.assertQuerysetEqual(response.context[\"latest_questions\"], [])\n\n def test_index_view_with_a_past_question(self):\n \"\"\"Questions with a pub_date in the past should be displayed on the\n index page\n \"\"\"\n create_question(question_text=\"Past question\", days=-30)\n response = self.client.get(reverse(\"polls:index\"))\n self.assertQuerysetEqual(\n response.context[\"latest_questions\"], [\"<Question: Past question>\"]\n )\n\n def test_index_view_with_a_future_question(self):\n create_question(\"Future question\", 30)\n response = self.client.get(reverse(\"polls:index\"))\n self.assertContains(response, \"No polls are available\", status_code=200)\n self.assertQuerysetEqual(response.context[\"latest_questions\"], [])\n\n def test_index_view_with_future_and_past_question(self):\n create_question(\"Future question\", 30)\n create_question(\"Past question\", -30)\n response = self.client.get(reverse(\"polls:index\"))\n self.assertQuerysetEqual(response.context[\"latest_questions\"],\n [\"<Question: Past question>\"])\n\n def test_index_view_with_two_past_question(self):\n create_question(\"Past question 1\", -30)\n create_question(\"Past question 2\", -12)\n response = self.client.get(reverse(\"polls:index\"))\n self.assertQuerysetEqual(response.context[\"latest_questions\"],\n [\"<Question: Past question 2>\",\n \"<Question: Past question 1>\"])\n\n\n\nclass QuestionMethodTest(TestCase):\n\n def test_was_published_recently_with_future_question(self):\n \"\"\"was_published_recently() should return False for question with a\n pub_date in the future.\"\"\"\n time = timezone.now() + datetime.timedelta(days=30)\n future_question = Question(pub_date=time)\n self.assertEqual(future_question.was_published_recently(), False)\n\n def test_was_published_recently_with_old_question(self):\n time = timezone.now() - datetime.timedelta(days=1)\n old_question = Question(pub_date=time)\n self.assertEqual(old_question.was_published_recently(), False)\n\n def test_was_published_recently_with_recent_question(self):\n time = timezone.now() - datetime.timedelta(minutes=1)\n recent_question = Question(pub_date=time)\n self.assertEqual(recent_question.was_published_recently(), True)\n", "id": "12732541", "language": "Python", "matching_score": 1.3620129823684692, "max_stars_count": 0, "path": "djangoprojects/django-polls/polls/tests.py" }, { "content": "#!/usr/bin/env python3\n\nimport sys\n\nfrom datetime import date, timedelta, datetime\n\ndef _is_friday(current_date):\n return current_date.weekday() == 4\n\ndef _is_13th(current_date):\n return current_date.day == 13\n\ndef friday_the_13th(current_date=None):\n current_date = current_date or date.today()\n max_iteration = 100\n while max_iteration > 0:\n print('testing %s' % current_date.strftime(\"%A %d %B %Y\"))\n if _is_friday(current_date) and _is_13th(current_date):\n return current_date.isoformat()\n if not _is_friday(current_date):\n # adjust date to be a Friday\n delta = timedelta(days=4 - current_date.weekday())\n # for Sat and Sun delta.days is < 0 so applying delta would set us\n # to the previous Friday, instead move to the next Friday\n if delta.days < 0:\n delta += timedelta(days=7)\n current_date += delta\n else:\n # we are already a Friday but not a 13th, move on to the next Fiday\n current_date += timedelta(days=7)\n\n max_iteration -= 1\n return current_date.isoformat()\n\n\nif __name__ == \"__main__\":\n start_date = date.today()\n if len(sys.argv) > 1:\n start_date = datetime.strptime(sys.argv[1], '%Y-%m-%d')\n print(friday_the_13th(start_date))\n", "id": "11794763", "language": "Python", "matching_score": 0.9980839490890503, "max_stars_count": 0, "path": "friday_the_13th/friday.py" }, { "content": "import os\nimport random\n\n\ndef get_movies(top_directory):\n \"\"\"Return the list of all found movies in the given directory.\n\n Args:\n - top_directory: the directory to explore to find movie files\n \"\"\"\n movies = []\n for (dir_path, _, filenames) in os.walk(top_directory):\n for filename in filenames:\n (__, extension) = os.path.splitext(filename)\n if extension in [\".avi\", \".mkv\", \".mpg\", \".AVI\", \".wmv\", \".mp4\"]:\n movies.append(os.path.join(dir_path, filename))\n random.shuffle(movies)\n return movies\n\n\ndef get_start_time(min_start=0, max_start=90):\n \"\"\"Return a time in seconds between 0 and 90 minutes in seconds.\n\n Note that if the movie file is shorter than start time it will be skipped.\n\n Args:\n - min_start: the smallest minute at which the movie can start\n - max_start: the biggest minute at which the movie can start\n \"\"\"\n return random.randint(min_start, max_start) * random.randint(10, 60)\n\n\ndef get_run_time(min_run_time=1, max_run_time=10):\n \"\"\"Return the number of seconds the movie file will be played.\n\n Args:\n - min_run_time: the min amount of minutes the video will be played\n - max_run_time: the max amount of minutes the video will be played\n \"\"\"\n return random.randint(min_run_time, max_run_time) * random.randint(10, 60)\n\n\ndef get_playlist_entry(movie_file):\n \"\"\"Return the entry to add to the playlist for the given movie_file.\n\n Args:\n - movie_file: absolute path to the movie file\n \"\"\"\n return \"\"\"#EXTVLCOPT:start-time=%d\n#EXTVLCOPT:run-time=%d\n%s\n\"\"\" % (get_start_time(), get_run_time(), movie_file)\n\n\ndef generate_playlist(top_directory, playlist):\n \"\"\"Generate a playlist file from the given directory.\n\n Args:\n - top_directory: the directory containing movie files\n - playlist: full path to the playlist file\n \"\"\"\n with open(playlist, \"w\") as playlist_file:\n for m in get_movies(top_directory):\n playlist_file.write(get_playlist_entry(m))\n\n\ngenerate_playlist(\"/media/bvidal/Elements/Films\", \"playlist_random.vlc\")\n\n", "id": "4684288", "language": "Python", "matching_score": 1.4368866682052612, "max_stars_count": 0, "path": "random_vlc_playlist/random_vlc_playlist.py" }, { "content": "#!/usr/bin/env python\nimport struct\nimport sys\nimport wave\n\nimport numpy as np\nfrom PIL import Image\n\nwidth = None\nheight = None\nsampleRate = 48000\nduration = 1.5\nfrequency = 440.0\nborder_skip = 10\nmin_max = []\nbw_image = None\n\noriginal_file_name = sys.argv[1]\noriginal_file_format = None\n\n\ndef scale(v, from_min, from_max, to_min, to_max):\n \"\"\"Scaled and clamped value\"\"\"\n scaled_value = int(\n to_min + (((v - from_min) / (from_max - from_min)) * (to_max - to_min)))\n return clamp(scaled_value, to_min, to_max)\n\n\ndef clamp(value, to_min, to_max):\n return max(min(value, to_max - 1), to_min + 1)\n\n\nwith Image.open(original_file_name) as img:\n # convert image to 1-bit B&W image\n bw_image = img.convert(\"L\")\n original_file_format = img.format\n(width, height) = bw_image.size\n\n# TODO(bvidal): Ideas\n# [] input/output folder\n# [] dockerize\n# [] do not collect min/max to get avg rather take pixel with highest intensity\n# [] using sin wave/numpy to link timeseries rather than linear\n\n# Note that (0, 0) is upper left corner as per\n# https://pillow.readthedocs.io/en/4.0.x/handbook/concepts.html#coordinate-system\n# So when we are looking for the highest non black pixel it actually has the lowest h\nfor w in range(width):\n max_h = height\n min_h = 0\n # We skip the very top and bottom where there often is noise\n for h in range(border_skip, height - border_skip):\n # The x-axis is left on the screenshot so we ignore it when collecting\n # min/max\n if h == height/2:\n continue\n pixel_value = bw_image.getpixel((w, h))\n if pixel_value > 50:\n max_h = min(max_h, h)\n min_h = max(min_h, h)\n # that one is weird but again that's because (0, 0) is upper left of image\n if min_h < max_h:\n # w pixel column only has black pixels\n min_h = max_h = 0\n\n min_max.append((min_h, max_h))\n\nviz_prev_avg = 0\nviz_prev_idx = 0\nviz_w, viz_h = (4096, 1440)\nviz_img = Image.new(\"RGB\", (viz_w, viz_h))\nidx_range = len(min_max)\nviz_frame = int(viz_w / idx_range)\n\n# each column of the image will be \"stretched\" to this many \"frame\" of the wav file\nwav_length = int(duration * sampleRate)\nwav_frame = int(wav_length / idx_range)\nimage_range = height\n# range of a 16-bit wav\nwav_interval_min = -32768\nwav_interval_max = 32767\nwav_range = wav_interval_max - wav_interval_min\nwav_prev_avg = 0\nwav_values = []\n\nfor idx, (min_val, max_val) in enumerate(min_max):\n original_average = max_val + int((min_val - max_val) / 2)\n viz_avg = scale(original_average, 0, height, 0, viz_h)\n # Add min/max/avg in wav_viz_img for visual debugging!!!\n viz_idx = scale(idx, 0, idx_range, 0, viz_w)\n viz_img.putpixel((viz_idx, scale(min_val, 0, height, 0, viz_h)), (0, 255, 0))\n viz_img.putpixel((viz_idx, scale(max_val, 0, height, 0, viz_h)), (255, 0, 0))\n viz_img.putpixel((viz_idx, viz_avg), (0, 0, 255))\n viz_increment_per_step = (viz_avg - viz_prev_avg) / viz_frame\n for viz_step in range(viz_frame + 1):\n viz_img.putpixel(\n (viz_prev_idx + viz_step,\n viz_prev_avg + int(viz_step * viz_increment_per_step)),\n (255, 255, 0))\n viz_prev_avg = viz_avg\n viz_prev_idx = viz_idx\n # Handle wav's values\n wav_avg = scale(original_average, 0, height, wav_interval_min,\n wav_interval_max)\n wav_increment_per_step = (wav_avg - wav_prev_avg) / wav_frame\n for wav_step in range(wav_frame + 1):\n wav_values.append(wav_prev_avg + int(wav_step * wav_increment_per_step))\n wav_values.append(wav_avg)\n wav_prev_avg = wav_avg\n\nwith open(f\"{original_file_name}-viz-debug.{original_file_format}\", \"wb\") as \\\n viz_dbg_file:\n viz_img.save(viz_dbg_file)\n\nwav_viz_width_ratio = 25\nwav_viz_height_ratio = 25\nwav_viz_width = int(len(wav_values) / wav_viz_width_ratio)\nwav_viz_height = int(\n (wav_interval_max - wav_interval_min) / wav_viz_height_ratio)\nwav_viz_img = Image.new(\"RGB\", (wav_viz_width, wav_viz_height))\n\nfor wav_viz_idx, wave_value in enumerate(wav_values[::wav_viz_width_ratio]):\n wav_viz_value = scale(wave_value, wav_interval_min, wav_interval_max, 0,\n wav_viz_height)\n wav_viz_img.putpixel((min(wav_viz_idx, wav_viz_width - 1), wav_viz_value),\n (0, 255, 255))\n\nwith open(f\"{original_file_name}-wav-debug.{original_file_format}\", \"wb\") as \\\n wav_dbg_file:\n wav_viz_img.save(wav_dbg_file)\n\nwav_np_int_values = np.array(wav_values, dtype=\"int16\")\nwith wave.open(f\"{original_file_name}-output.wav\", 'wb') as wav_file:\n wav_file.setnchannels(1) # mono\n # https://docs.python.org/3/library/wave.html#wave.Wave_write.setsampwidth: \"Set\n # the sample width to n bytes.\" and\n # https://www.metadata2go.com/result/2728f923-05ac-43ed-a413-cf3ff59e6689\n # shows 32 bits per sample => 4 bytes\n wav_file.setsampwidth(4)\n wav_file.setframerate(sampleRate) # obtain from audio software\n for wav_value in wav_np_int_values:\n wav_file.writeframesraw(struct.pack('h', wav_value))\n", "id": "4239091", "language": "Python", "matching_score": 1.2153807878494263, "max_stars_count": 0, "path": "audio_to_img_to_audio/img_to_audio.py" }, { "content": "import sys\nimport unicodedata\n\nwith open(sys.argv[1], \"r\") as text_file:\n text = u\"\".join([unicodedata.normalize(\"NFKD\", line) for line in text_file]).lower()\nprint text\n", "id": "2891414", "language": "Python", "matching_score": 0.597618818283081, "max_stars_count": 0, "path": "sam_et_max/word_count/word_count.py" }, { "content": "import os\nimport sys\nimport time\n\nstart = time.time()\ntext_triangle_path = sys.argv[1] if len(sys.argv) == 2 else \"big_triangle.txt\"\n\nprint \"inline using '%s'\" % text_triangle_path\n\nfile_path = os.path.abspath(os.path.join(os.path.dirname(__file__),\n text_triangle_path))\n\n# method to integerify each entry in the file\ndef clean_line(text_line):\n return [int(e.strip()) for e in text_line.split(\" \")]\n\n\ndef compute(top, bottom, counter):\n top.append(0)\n for i, value in enumerate(bottom):\n counter += 1\n bottom[i] = value + max(top[i], top[i - 1])\n return bottom, counter\n\n\nvalues = []\noperation = 0\nwith open(file_path, \"r\") as triangle_file:\n for line in map(clean_line, triangle_file.readlines()):\n values, operation = compute(values, line, operation)\n\nprint \"max %s in %s\" % (max(values), time.time() - start)\nprint \"nb of operations: %d\" % operation\n", "id": "2892682", "language": "Python", "matching_score": 3.4697132110595703, "max_stars_count": 0, "path": "triangle/inline_triangle.py" }, { "content": "import os\nimport sys\nimport time\n\nstart = time.time()\ntext_triangle_path = sys.argv[1] if len(sys.argv) == 2 else \"big_triangle.txt\"\n\nprint \"iterator using '%s'\" % text_triangle_path\n\nfile_path = os.path.abspath(os.path.join(os.path.dirname(__file__),\n text_triangle_path))\n\n\n# method to integerify each entry in the file\ndef clean_line(text_line):\n return [int(e.strip()) for e in text_line.split(\" \")]\n\n\nclass TriangleIterator(object):\n def __init__(self, l):\n self.value = 0\n self.ite = iter(l)\n\n def __iter__(self):\n return self\n\n def next(self):\n left = self.value\n self.value = next(self.ite)\n return left, self.value\n\n\nvalues = []\n\nwith open(file_path, \"r\") as triangle_file:\n for line in map(clean_line, triangle_file.readlines()):\n result = []\n triangle_ite = TriangleIterator(values)\n for value in line:\n try:\n m = max(next(triangle_ite))\n except StopIteration:\n m = 0\n result.append(value + m)\n values = result\n\nprint \"max %s in %s\" % (max(values), time.time() - start)\n", "id": "5678220", "language": "Python", "matching_score": 0.01281508058309555, "max_stars_count": 0, "path": "triangle/iterator_triangle.py" }, { "content": "import sys\n\nsearch = int(sys.argv[1])\nnumbers = range(0, int(sys.argv[2]))\n\ndef bin_search(n, array, op=0):\n middle_index = len(array)/2\n if n < array[middle_index]:\n bin_search(n, array[:middle_index], op+1)\n elif n > array[middle_index]:\n bin_search(n, array[middle_index:], op+1)\n print(n, middle_index, array[middle_index], op)\n\nbin_search(search, numbers)\n\n", "id": "4367165", "language": "Python", "matching_score": 0.5456557273864746, "max_stars_count": 0, "path": "algo/binary_search.py" }, { "content": "import random\n\n\ndef selection_sort(arr):\n if len(arr) <= 1:\n return arr\n for i in range(len(arr)):\n minimum = arr[i]\n min_idx = i\n for j in range(i, len(arr)):\n if arr[j] < minimum:\n minimum = arr[j]\n min_idx = j\n arr[i], arr[min_idx] = arr[min_idx], arr[i]\n return arr\n\n\narray = range(10)\nrandom.shuffle(array)\nassert selection_sort(array) == sorted(range(10))\nassert selection_sort([]) == []\nassert selection_sort([12]) == [12]\n\n\ndef insertion_sort(arr):\n if len(arr) <= 1:\n return arr\n for i in range(len(arr)):\n j = i\n while j > 0 and arr[j-1] > arr[j]:\n arr[j], arr[j-1] = arr[j-1], arr[j]\n j -= 1\n return arr\n\n\n\narray = range(10)\nrandom.shuffle(array)\nassert insertion_sort(array) == sorted(range(10))\nassert insertion_sort([]) == []\nassert insertion_sort([12]) == [12]\n\n\ndef quick_sort(arr):\n if len(arr) <= 1:\n return arr\n pivot = arr.pop(0)\n left = quick_sort([x for x in arr if x < pivot])\n right = quick_sort([x for x in arr if x >= pivot])\n return left + [pivot] + right\n\n\narray = range(10)\nrandom.shuffle(array)\nassert quick_sort(array) == sorted(range(10))\nassert quick_sort([]) == []\nassert quick_sort([12]) == [12]\n\n\n\ndef merge(l, r):\n arr = []\n while l and r:\n if l[0] < r[0]:\n arr.append(l.pop(0))\n else:\n arr.append(r.pop(0))\n arr.extend(l)\n arr.extend(r)\n return arr\n\n\ndef merge_sort(arr):\n if len(arr) <= 1:\n return arr\n middle = len(arr) // 2\n left = merge_sort(arr[:middle])\n right = merge_sort(arr[middle:])\n return merge(left, right)\n\n\narray = range(10)\nrandom.shuffle(array)\nassert merge_sort(array) == sorted(range(10))\nassert merge_sort([]) == []\nassert merge_sort([12]) == [12]\n\n\n\n\n", "id": "1792731", "language": "Python", "matching_score": 0.7655194401741028, "max_stars_count": 0, "path": "algo/sort.py" }, { "content": "\n\nclass Node(object):\n\n def __init__(self, value, left, right):\n self.value = value\n self.left = left\n self.right = right\n\n\nright_1_right_2 = Node(6, None, None)\nright_1_left_2 = Node(5, None, None)\nleft_1_right_2 = Node(4, None, None)\nleft_1_left_2 = Node(3, None, None)\nright_1 = Node(2, right_1_left_2, right_1_right_2)\nleft_1 = Node(1, left_1_left_2, left_1_right_2)\nroot = Node(0, left_1, right_1)\n\n\ndef pre_order(node):\n if node is None:\n return\n print(node.value)\n pre_order(node.left)\n pre_order(node.right)\n\n\nprint(\"PreOrder\")\npre_order(root)\n\n\ndef in_order(node):\n if node is None:\n return\n in_order(node.left)\n print(node.value)\n in_order(node.right)\n\n\nprint(\"InOrder\")\nin_order(root)\n\n\ndef post_order(node):\n if node is None:\n return\n post_order(node.left)\n post_order(node.right)\n print(node.value)\n\n\nprint(\"PostOrder\")\npost_order(root)\n\n\ndef breadth_first(node, queue=None):\n queue = queue or []\n if node is None:\n return queue\n if not queue:\n queue.append(node.value)\n if node.left is not None:\n queue.append(node.left.value)\n if node.right is not None:\n queue.append(node.right.value)\n queue = breadth_first(node.left, queue)\n queue = breadth_first(node.right, queue)\n return queue\n\n\nprint(\"BreadthFirst\")\nprint(breadth_first(root))\n\n\ndef level_order(root):\n queue = []\n queue.append(root)\n while queue:\n node = queue.pop(0)\n print(node.value)\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n\n\nlevel_order(root)\n\n\n", "id": "10966161", "language": "Python", "matching_score": 0.8472291231155396, "max_stars_count": 0, "path": "algo/tree_traversal.py" }, { "content": "#!/bin/bash env\n# Taken from https://leetcode.com/problems/add-two-numbers/\n\n\nclass Node:\n def __init__(self, value, nextNode = None):\n self.value = value\n self.next = nextNode\n\n def toInt(self, depth = 0):\n subTotal = 0\n if self.next:\n subTotal += self.next.toInt(depth + 1)\n return self.value * (10 ** depth) + subTotal\n\n\nn = Node(0)\nassert n.toInt() == 0\n\nn = Node(1)\nassert n.toInt() == 1\n\nn = Node(1, Node(0, Node(2)))\nassert n.toInt() == 201\n\n\nclass AddNodeNumbers:\n\n def add(self, n1: Node, n2: Node) -> Node:\n return self._add(n1, n2, 0)\n\n def _add(self, n1: Node, n2: Node, carry: int) -> Node:\n carry, value = divmod(n1.value + n2.value + carry, 10)\n n = Node(value)\n if n1.next or n2.next or carry:\n n.next = self._add(\n n1.next or Node(0),\n n2.next or Node(0),\n carry)\n return n\n\n\nn1 = Node(2, Node(4, Node(3)))\nn2 = Node(5, Node(6, Node(4)))\naddition = AddNodeNumbers()\nn3 = addition.add(n1, n2)\nassert n3.toInt() == 807, f\"{n3.toInt()} != 807\"\n\nn1 = Node(9, Node(9, Node(9, Node(9, Node(9)))))\nn2 = Node(9, Node(9, Node(9)))\naddition = AddNodeNumbers()\nn3 = addition.add(n1, n2)\nassert n3.toInt() == 99999 + 999, f\"{n3.toInt()} != {99999 + 999}\"\n\n\nprint(\"DONE\")\n\n", "id": "8351358", "language": "Python", "matching_score": 1.1834461688995361, "max_stars_count": 0, "path": "algo/add-two.py" }, { "content": "#!/usr/bin/env python3\nfrom __future__ import annotations\n\nimport json\nimport os\nimport sys\nfrom typing import List, Dict, Set\n\nwith open(os.path.abspath(os.path.expanduser(sys.argv[1]))) as f:\n link_categorization = json.load(f)\n\nbookmarks = None\nbookmark_path = os.path.join(os.path.curdir, 'bookmarks.json')\nif len(sys.argv) == 3:\n bookmark_path = sys.argv[2]\n\nbookmark_path = os.path.abspath(os.path.expanduser(bookmark_path))\n\nif os.path.isfile(bookmark_path):\n with open(bookmark_path) as f:\n bookmarks = json.load(f)\n\nCREATE_NEW_CATEGORY = 'n'\nADD_TO_CATEGORY = 'a'\nBACK_UP = 'b'\nPRINT_BOOKMARKS = 'p'\ncommands = [CREATE_NEW_CATEGORY, ADD_TO_CATEGORY, PRINT_BOOKMARKS, BACK_UP]\nprompt = \"\"\"\nn: create new category\na: add to current category [%s]\nb: go back one level [%s]\np: print categories\n\"\"\"\n\n\nclass BookmarkNode:\n name: str\n parent: BookmarkNode\n links: List[str]\n children: List[BookmarkNode]\n\n def __init__(self, name: str, parent: BookmarkNode = None, links: List[str] = None) -> None:\n self.name = name\n self.parent = parent\n if parent:\n self.parent.children.append(self)\n self.links = links or []\n self.children = []\n\n def add_link(self, link: str) -> BookmarkNode:\n self.links.append(link)\n return self\n\n def to_json(self):\n return {\n 'name': self.name,\n 'links': self.links,\n 'children': [child.to_json() for child in self.children]\n }\n\n @staticmethod\n def from_json(json_payload: dict, parent: BookmarkNode = None) -> BookmarkNode:\n node = BookmarkNode(json_payload['name'], parent, json_payload['links'])\n for children_payload in json_payload['children']:\n BookmarkNode.from_json(children_payload, node)\n return node\n\n\ndef print_bookmarks(node: BookmarkNode, depth: int = 0):\n print((' ' * depth), \"- \", node.name, sep='')\n for link in node.links:\n print((' ' * depth), \" > \", link, sep='')\n for child in node.children:\n print_bookmarks(child, depth + 1)\n\n\ndef print_bread_crumbs(node: BookmarkNode) -> str:\n components = []\n while node is not None:\n components.append(node.name)\n node = node.parent\n return \" > \".join(reversed(components))\n\n\ndef get_already_processed_links(node: BookmarkNode, accumulator: List[str] = None) -> Set[str]:\n accumulator = accumulator or []\n for child in node.children:\n accumulator.extend(get_already_processed_links(child, accumulator))\n accumulator.extend(node.links)\n return set(accumulator)\n\n\ndef categorize_bookmarks(root_node: BookmarkNode, links: Dict[str, Dict]) -> None:\n already_processed_links = get_already_processed_links(root_node)\n for link, topics in links.items():\n if link in already_processed_links:\n continue\n choice = None\n current_node = root_node\n while choice != ADD_TO_CATEGORY:\n while choice not in commands:\n print(print_bread_crumbs(current_node))\n children_names = [(i, c.name) for (i, c) in enumerate(current_node.children)]\n print(f\"{children_names}\")\n print(f\"> {link} \\n{topics}\")\n sub_category_selected = False\n choice = input(prompt % (current_node.name, current_node.parent.name if current_node.parent else None))\n\n try:\n if int(choice) <= len(current_node.children):\n current_node = current_node.children[int(choice)]\n sub_category_selected = True\n except ValueError:\n pass\n\n if choice == CREATE_NEW_CATEGORY:\n new_category_name = input(\"new category: \")\n new_category = BookmarkNode(new_category_name, current_node)\n current_node = new_category\n elif choice == BACK_UP:\n current_node = current_node.parent or current_node\n elif choice == PRINT_BOOKMARKS:\n print_bookmarks(root_node)\n\n if choice in [CREATE_NEW_CATEGORY, ADD_TO_CATEGORY, BACK_UP] or sub_category_selected:\n print(\"\\033c\") # clear screen\n if choice != ADD_TO_CATEGORY:\n choice = None\n if choice == ADD_TO_CATEGORY:\n current_node.add_link(link)\n\n\nroot = BookmarkNode(\"root\", None)\n\nif bookmarks:\n root = BookmarkNode.from_json(bookmarks)\n\n\ndef save_bookmarks(file_path: str, node: BookmarkNode):\n with open(file_path, 'w') as bookmark_file:\n json.dump(node.to_json(), bookmark_file)\n\n\ntry:\n categorize_bookmarks(root, link_categorization)\n save_bookmarks(bookmark_path, root)\nexcept KeyboardInterrupt:\n save_bookmarks(bookmark_path, root)\n", "id": "8500989", "language": "Python", "matching_score": 1.4473971128463745, "max_stars_count": 0, "path": "bookmark_categorization/manual_classification.py" }, { "content": "import os\nimport sys\nimport time\n\nstart = time.time()\ntriangle_file = sys.argv[1] if len(sys.argv) == 2 else \"big_triangle.txt\"\n\nprint \"node using '%s'\" % triangle_file\n\nfile_path = os.path.abspath(os.path.join(os.path.dirname(__file__),\n triangle_file))\n\n\n# method to integerify each entry in the file\ndef clean_line(line, transformation=int):\n return [transformation(e.strip()) for e in line.split(\" \")]\n\n\nvalues = []\n\n\nclass Node(object):\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n self.best_max = None\n self.nb_visit = 0\n\n def __str__(self):\n return \"[%s/%s] l: %s - r: %s\" % (self.value, self.best_max,\n self.left.value if self.left is not\n None else \"*\",\n self.right.value if self.right is\n not None else \"*\")\n\n\ndef get_max(current_node):\n if current_node.best_max is not None:\n return current_node.best_max\n current_node.nb_visit += 1\n left = get_max(current_node.left) if current_node.left is not None else 0\n right = get_max(current_node.right) if current_node.right is not None else 0\n current_node.best_max = current_node.value + max(left, right)\n return current_node.best_max\n\n\n# triangle is a list of list containing the numbers\nwith open(file_path, \"r\") as triangle_file:\n for line in triangle_file.readlines():\n for nb in clean_line(line, float):\n n = Node(nb)\n values.append(n)\n\nmax_len = len(values)\nleft_index = 1\ncurrent = 0\nnb_node = 1\nnode_limit = 1\n\nwhile left_index + 1 < max_len:\n node = values[current]\n node.left = values[left_index]\n node.right = values[left_index + 1]\n left_index += 1\n current += 1\n nb_node += 1\n if nb_node >= node_limit:\n nb_node = 0\n node_limit += 1\n left_index += 1\n\nprint \"max %s in %s\" % (get_max(values[0]), time.time() - start)\nprint \"average nb visit per node %s\" % (sum(x.nb_visit for x in values))\n\n", "id": "235700", "language": "Python", "matching_score": 2.395674228668213, "max_stars_count": 0, "path": "triangle/node_triangle.py" }, { "content": "import os\nimport sys\nimport time\n\n\nstart = time.time()\ntriangle_file = sys.argv[1] if len(sys.argv) == 2 else \"big_triangle.txt\"\n\nprint \"normal using '%s'\" % triangle_file\n\nfile_path = os.path.abspath(os.path.join(os.path.dirname(__file__),\n triangle_file))\n\n\n# method to integerify each entry in the file\ndef clean_line(line):\n return [int(e.strip()) for e in line.split(\" \")]\n\n\n# triangle is a list of list containing the numbers\nwith open(file_path, \"r\") as triangle_file:\n triangle = map(clean_line, triangle_file.readlines())\n\n# base maintains the current 'total'\nbase = triangle.pop(0)\n\n# Each line of the triangle is modified inline which forces us to copy\n# the line back into base\nfor line in triangle:\n # We add this extra entry so we can use the trick where list[-1] is\n # the last element of the list\n base.append(min(0, min(base)))\n for index, entry in enumerate(line):\n # The current entry is added to the max between the number directly\n # above and the number above and one to the left - potentially the\n # last number of the list when index - 1 = -1\n line[index] = entry + max(base[index], base[index - 1])\n base = line[:]\n\nprint \"max %s in %s\" % (max(base), time.time() - start)\n", "id": "957805", "language": "Python", "matching_score": 1.4632797241210938, "max_stars_count": 0, "path": "triangle/triangle.py" }, { "content": "#!/usr/bin/env python\nimport os\nfrom setuptools import setup\n\nREADME_PATH = os.path.join(os.path.dirname(__file__), \"README.md\")\nwith open(README_PATH, \"r\") as README_FILE:\n README = README_FILE.read()\n\nsetup(\n name=\"parse_this\",\n version=\"3.0.0\",\n description=(\n \"Makes it easy to create a command line interface for any \"\n \"function, method or classmethod..\"\n ),\n long_description=README,\n long_description_content_type=\"text/markdown\",\n packages=[\"parse_this\", \"test\"],\n author=\"<NAME>\",\n author_email=\"<EMAIL>\",\n download_url=\"https://pypi.python.org/pypi/parse_this\",\n url=\"https://github.com/bertrandvidal/parse_this\",\n license=\"License :: MIT\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "id": "6414575", "language": "Python", "matching_score": 1.3664450645446777, "max_stars_count": 1, "path": "setup.py" }, { "content": "import os\nfrom uuid import uuid4\nfrom zipfile import ZipFile\nimport urllib2\nimport crypt\nimport spwd\nimport sys\n\nZIP_URL = \"http://xato.net/files/10k%20most%20common.zip\"\nPASSWORDS_FILE = \"10k most common.txt\"\nZIP_FILE = os.path.join(os.path.abspath(os.path.dirname(__file__)), str(uuid4()))\n\nif not os.path.exists(PASSWORDS_FILE):\n # Need this header otherwise get a 403\n req = urllib2.Request(ZIP_URL, headers={\"User-Agent\": \"popo\"})\n try:\n # Dump zip from URL\n with open(ZIP_FILE, \"wb\") as temp_zip:\n temp_zip.write(urllib2.urlopen(req).read())\n # Extract the zip file in the current dir\n with ZipFile(ZIP_FILE) as zip_file:\n zip_file.extractall()\n finally:\n # Remove the zip file\n if os.path.exists(ZIP_FILE):\n os.unlink(ZIP_FILE)\n\n# Read the password from the downloaded file\nwith open(PASSWORDS_FILE, \"r\") as password_file:\n passwords = map(lambda x: x.strip(), password_file.readlines())\n\n# Get entries from shadow password database\nshadow_entries = [entry[:2] for entry in spwd.getspall()]\n\n# We probably need the root access\nif not shadow_entries:\n print \"Can't access shadow password database - probably need root access\"\n sys.exit(1)\n\nfor name, encrypted in shadow_entries:\n print \"Processing password for user '%s':\" % name,\n # This indicate that the user has no password\n if encrypted in [\"*\", \"!\"]:\n print \"no hash to process\"\n continue\n for password in passwords:\n # We use the encrypted version of the password as salt as adviced by\n # the crypt module's doc\n if crypt.crypt(password, encrypted) == encrypted:\n print \"password is '%s'\" % password\n break\n else:\n # The little magic of for/else loops\n print \"failed to break password\"\n\n", "id": "42380", "language": "Python", "matching_score": 0.9831872582435608, "max_stars_count": 0, "path": "sam_et_max/shadow/shadow.py" }, { "content": "API_URL = \"http://q.daskeyboard.com\"\n\nheaders = {'Content-Type': 'application/json'}\ndata = {\n \"clientId\": \"\",\n \"clientSecret\": \"\",\n \"grantType\": \"client_credentials\"\n}\n\n# result = requests.post(API_URL + \"/oauth/1.3/token\", json=data, headers=headers)\n# access_token = result.json()[\"access_token\"]\n\nfrom pprint import pprint\nfrom random import choice\nfrom universalclient import Client\n\nq_api = Client(API_URL, headers=headers)\n\nresponse = q_api.oauth._(\"1.3\").token.post(json=data)\njson_data = response.json()\npprint(json_data)\naccess_token = json_data[\"access_token\"]\n\nzones = q_api.api._(\"1.0\").DK5QPID.zones.get(headers={\"Authorization\": \"Bearer \" + access_token})\n\nsignals = q_api.api._(\"1.0\").signals._setArgs(headers={\"Authorization\": \"Bearer \" + access_token})\n\n\ndef gen_hex_colour_code():\n return ''.join([choice('0123456789ABCDEF') for _ in xrange(6)])\n\n\nfor k in zones.json():\n signal_data = {\n \"name\": \"all of them at once\",\n \"pid\": \"DK5QPID\",\n \"effect\": \"SET_COLOR\",\n \"zoneId\": k[\"id\"],\n \"color\": \"#FFF\"\n }\n print \"setting '%s'\" % k[\"name\"]\n response = signals.post(json=signal_data)\n if not response.ok:\n pprint(response.json())\n", "id": "4993326", "language": "Python", "matching_score": 0.8677440881729126, "max_stars_count": 0, "path": "5q/py5q.py" }, { "content": "from django.conf.urls import url, include\nfrom snippets import views\nfrom rest_framework.routers import DefaultRouter\n\n# Create a router and register our viewsets with it.\nrouter = DefaultRouter()\nrouter.register(r'snippets', views.SnippetViewSet)\nrouter.register(r'users', views.UserViewSet)\n\n# The API URLs are now determined automatically by the router.\n# Additionally, we include the login URLs for the browsable API.\nurlpatterns = [\n url(r'^', include(router.urls)),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n]\n", "id": "257945", "language": "Python", "matching_score": 1.0158647298812866, "max_stars_count": 0, "path": "djangoprojects/django_rest_framework/tutorial/snippets/urls.py" }, { "content": "from jira import JIRA\nimport sys\nimport os\n\n(_, group) = sys.argv\n\nwith open(os.path.expanduser(\"~/.jira\"), \"r\") as f:\n (u, p) = f.readline().strip(\"\\n\").split(\" \")\n\napi = JIRA(\"https://tools.crowdtwist.com/issues\", basic_auth=(u, p))\n\nusers = api.group_members(group)\nusers = [k for k,v in users.iteritems() if v['active']]\n\nwith open(\"%s\" % os.path.join(os.getcwd(), group), \"w\") as f:\n f.writelines(\"\\n\".join(users))\n\nprint \"group: '%s' - %s users written in %s/jira-users.txt\" % (group,\n len(users),\n os.getcwd())\n", "id": "110313", "language": "Python", "matching_score": 2.099909782409668, "max_stars_count": 0, "path": "jira/get_group_users.py" }, { "content": "from jira import JIRA\nimport sys\nimport os\nfrom pprint import pprint\n\n(_, assignee) = sys.argv\n\nwith open(os.path.expanduser(\"~/.jira\"), \"r\") as f:\n (u, p) = f.readline().strip(\"\\n\").split(\" \")\n\napi = JIRA(\"https://tools.crowdtwist.com/issues\", basic_auth=(u, p))\n\n\nresult = api.search_issues(\"assignee=%s and status = closed and \\\"Story Points (0,1,2,3,5,8,13,21)\\\" is not EMPTY\" % assignee, maxResults=999)\nstory_points_field = \"customfield_10002\"\n\n\nprint \"%s, %s, %d\" % (assignee, result.total,\n sum(getattr(t.fields, story_points_field) for t in result))\n\n", "id": "5895741", "language": "Python", "matching_score": 3.244650363922119, "max_stars_count": 0, "path": "jira/closed_points.py" }, { "content": "from jira import JIRA\nimport os\nimport sys\n\n(_, assignee, jql) = sys.argv\n\nwith open(os.path.expanduser(\"~/.jira\"), \"r\") as f:\n (u, p) = f.readline().strip(\"\\n\").split(\" \")\n\napi = JIRA(\"https://tools.crowdtwist.com/issues\", basic_auth=(u, p))\n\nresult = api.search_issues(\"assignee=%s AND %s\" % (assignee, jql), maxResults=1)\n\nprint \"%s, %s\" % (assignee, result.total)\n\n\n", "id": "1917989", "language": "Python", "matching_score": 3.090592861175537, "max_stars_count": 0, "path": "jira/jql_count_issues.py" } ]
1.408566
ouupjj5
[ { "content": "num=10\n\nnum=29\n\nnum=30\n\nnum=40\n", "id": "5289561", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "login.py" } ]
0
gavinju
[ { "content": "import os\n\nfrom sanic import Blueprint\nfrom sanic.log import logger\n\n\nfile_svc = Blueprint('file_svc')\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nstatic_path = os.path.join(dir_path, '..', 'static')\nstatic_file = os.path.join(static_path, 'index.html')\nlogger.info(f'using static path {static_path}')\nfile_svc.static('/ui', static_path)\nfile_svc.static('/ui', static_file)\n\n# TODO: check if following extension is required or not\n'''\nextension_files = ['service-worker.js']\nfor f in extension_files:\n if os.path.isfile(os.path.join(static_path, f)):\n file_svc.static(f'/{f}', os.path.join(static_path, f))\n'''\n", "id": "517572", "language": "Python", "matching_score": 1.375286340713501, "max_stars_count": 1, "path": "server/dataplay/filesvc/__init__.py" }, { "content": "from sanic import Sanic, response\nfrom sanic_openapi import swagger_blueprint\nfrom .datasvc.service import dataset_svc\nfrom .datasvc.registry import DatasetTypeRegistry\nfrom .usersvc.service import user_svc\nfrom .notificationsvc.service import notification_svc\nfrom .dashboardsvc.service import dashboard_svc\nfrom .mlsvc.service import ml_svc\nfrom .confsvc.service import conf_svc\nfrom .filesvc import file_svc\n\nfrom .confsvc.manager import ConfigurationManager\n\nPREFIX = '/api'\n\napp = Sanic(__name__)\n# app.blueprint(openapi_blueprint)\napp.blueprint(swagger_blueprint)\n\napp.config.API_VERSION = '1.0.0'\napp.config.API_TITLE = 'Dataplay API'\napp.config.API_DESCRIPTION = 'Dataplay API'\napp.config.API_CONTACT_EMAIL = '<EMAIL>'\napp.config.API_PRODUCES_CONTENT_TYPES = ['application/json']\n\nserver_config = ConfigurationManager.get_confs('server')\napp.config.HOST = server_config.get('server', 'host')\napp.config.PORT = server_config.getint('server', 'port')\napp.config.DEBUG = server_config.getboolean('server', 'debug')\napp.config.WORKERS = server_config.getint('server', 'workers')\n\ndataset_type_config = ConfigurationManager.get_confs('dataset_type')\ndataset_registry = DatasetTypeRegistry()\nfor section in dataset_type_config.sections():\n module_name = dataset_type_config.get(section, 'module')\n class_name = dataset_type_config.get(section, 'class')\n dataset_registry.register(section, class_name, module_name)\n\napp.blueprint(file_svc)\napp.blueprint(dataset_svc, url_prefix=PREFIX)\napp.blueprint(user_svc, url_prefix=PREFIX)\napp.blueprint(notification_svc, url_prefix=PREFIX)\napp.blueprint(dashboard_svc, url_prefix=PREFIX)\napp.blueprint(ml_svc, url_prefix=PREFIX)\napp.blueprint(conf_svc, url_prefix=PREFIX)\n\n\n@app.route('/')\ndef handle_request(request):\n return response.redirect('/ui')\n\n\nif __name__ == '__main__':\n app.run(\n host=app.config.HOST,\n port=app.config.PORT,\n debug=app.config.DEBUG,\n workers=app.config.WORKERS,\n )\n", "id": "809032", "language": "Python", "matching_score": 1.5906919240951538, "max_stars_count": 1, "path": "server/dataplay/server.py" }, { "content": "#!/usr/bin/env python\n\"\"\" dodo.py is the task definitions for use with the doit package.\n\nLike a makefile, it defines tasks that can be executed with their dependencies.\n\nTasks can be added by specifying a function that begins with task_ - please see\nthe existing tasks for examples. You can also read the doit documentation here:\n - http://pydoit.org/tasks.html\n\nBefore running tasks, doit will _execute_ all the functions in this file (and its\nimported functions) - meaning - if you define a funtion that has a side effect,\nit *will* be called multiple times. Conversely, if you define a task that emits a\nstring as the action - the method will be executed but the action will not be taken.\n\nYou can think of everything here as an autouse fixture - be careful!\n\n\"\"\"\nimport os\nimport subprocess\nimport sys\nfrom re import findall\nfrom typing import MutableMapping, Any, List, Optional\n\nfrom doit import get_var\n\nfrom common.util import get_local_dependency_paths\nfrom doit import get_var\n\nDoitReturn = MutableMapping[str, Any]\n\nPYTHON_FOLDER = os.path.dirname(os.path.abspath(__file__))\n\n# TESTING_IMAGE = \"naughtytao/python-builder:0.1\"\n\nDOIT_CONFIG: MutableMapping[str, List[str]] = {\"default_tasks\": [\"formatcheck\", \"lint\"]}\n\nPROJECT = \"dataplay\"\n\n\ndef make_config_path(filename: str) -> str:\n \"\"\" Utility for making a config file path \"\"\"\n return os.path.join(PYTHON_FOLDER, \"common\", filename)\n\n\ndef task_lint() -> DoitReturn:\n \"\"\" Runs the flake8 linter \"\"\"\n config = make_config_path(\".flake8\")\n linter = f\"flake8 {PROJECT} tests --config={config}\"\n return {\"actions\": [linter], \"verbosity\": 2}\n\n\ndef task_format() -> DoitReturn:\n \"\"\" Runs the black code formatter, changing files inplace \"\"\"\n config = make_config_path(\"pyproject.toml\")\n formatter = f\"black --config {config} {PROJECT} tests *.py\"\n return {\"actions\": [formatter], \"verbosity\": 2}\n\n\ndef task_formatcheck() -> DoitReturn:\n \"\"\" Runs the black code formatter, checking for invalid code \"\"\"\n config = make_config_path(\"pyproject.toml\")\n formatter = f\"black --config {config} --check {PROJECT} tests *.py\"\n return {\"actions\": [formatter], \"verbosity\": 2}\n\n\ndef task_pytest() -> DoitReturn:\n \"\"\" Runs pytest with coverage\"\"\"\n coverage_config = make_config_path(\".coveragerc\")\n tester = f\"pytest -v --cov=./ --cov-config {coverage_config}\"\n return {\"actions\": [tester], \"verbosity\": 2}\n\n\ndef task_update_dependencies() -> DoitReturn:\n \"\"\" Updates the requirements-{mac,linux}.txt files for packages\"\"\"\n commands = []\n\n pinner_template = \"pip-compile requirements.in --no-index --output-file {} \"\n pinner = pinner_template.format(\"requirements.txt\")\n '''\n linux_pinner = pinner_template.format(\"requirements-linux.txt\")\n\n linux_command = f\"'cd /python && pip install pip-tools && {linux_pinner}'\"\n docker_command = (\n f\"docker run -it --rm -v $PWD:/python {TESTING_IMAGE} bash -c {linux_command}\"\n )\n '''\n commands.append(pinner)\n # commands.append(docker_command)\n\n return {\"actions\": commands, \"verbosity\": 2}\n\n\ndef task_install_dep():\n \"\"\" install server dependencies \"\"\"\n return {\n \"actions\": [\n \"curl https://raw.githubusercontent.com/automl/auto-sklearn/master/requirements.txt | xargs -n 1 -L 1 pip3 install\",\n \"pip install -r requirements.txt . --no-deps\",\n \"pip3 install numpy==1.16.0 holidays==0.9.8\",\n ],\n \"verbosity\": 2,\n }\n\n\ndef task_tox() -> DoitReturn:\n \"\"\" Runs tox \"\"\"\n return {\"actions\": [\"tox\"], \"verbosity\": 2}\n\n\ndef task_build() -> DoitReturn:\n \"\"\" Runs python setup.py sdist on packages \"\"\"\n return {\"actions\": [\"python setup.py sdist\"], \"verbosity\": 2}\n\n\ndef task_link_client() -> DoitReturn:\n \"\"\" Create a symbol link of static to the client side build outout \"\"\"\n return {\"actions\": [\"ln -s ../client/dist static\"], \"verbosity\": 2}\n\n\ndef task_server() -> DoitReturn:\n \"\"\" Runs dataplay server \"\"\"\n return {\n \"actions\": [\n \"gunicorn dataplay.server:app —bind=0.0.0.0:8000 --worker-class=sanic.worker.GunicornWorker --workers=5\"\n ],\n \"verbosity\": 2,\n }\n", "id": "6569546", "language": "Python", "matching_score": 0.017125951126217842, "max_stars_count": 1, "path": "server/dodo.py" }, { "content": "import json\n\nfrom sanic import Blueprint\nfrom sanic import response\nfrom sanic.log import logger\nfrom sanic_openapi import doc\n\nfrom .manager import DatasetManager\n\ndataset_svc = Blueprint('dataset_svc')\n\n\n@dataset_svc.get('/datasets', strict_slashes=True)\n@doc.summary('list all datasets')\n@doc.produces(\n [{\"name\": str, \"id\": str, \"type\": str, \"description\": str}], content_type=\"application/json\"\n)\nasync def list_datasets(request):\n try:\n datasets = DatasetManager.list_datasets()\n return response.json(datasets, status=200)\n except Exception:\n logger.exception('faile to list dataset')\n return response.json({}, status=500)\n\n\n@dataset_svc.post('/datasets', strict_slashes=True)\n@doc.summary('creat a dataset')\n@doc.produces({}, content_type=\"application/json\")\n@doc.consumes(\n doc.JsonBody({\"name\": str, \"id\": str, \"type\": str, \"file\": str, \"description\": str}),\n content_type=\"application/json\",\n location=\"body\",\n)\nasync def create_datasets(request):\n logger.debug(f'create dataset with payload={request.body}')\n try:\n request_body = json.loads(request.body)\n DatasetManager.add_dataset(request_body)\n return response.json({}, status=201)\n except Exception:\n logger.exception('faile to create dataset')\n return response.json({}, status=500)\n\n\n@dataset_svc.get('/datasets/<id>', strict_slashes=True)\n@doc.summary('get one dataset')\n@doc.produces(\n {\"name\": str, \"id\": str, \"cols\": [str], \"rows\": [[object]]}, content_type=\"application/json\"\n)\nasync def get_dataset(request, id):\n try:\n dataset = DatasetManager.get_dataset(id)\n payload = dataset.get_payload()\n return response.json(payload, status=200)\n except Exception:\n logger.exception('faile to get dataset')\n return response.json({}, status=500)\n\n\n@dataset_svc.delete('/datasets/<id>', strict_slashes=True)\n@doc.summary('delete one dataset')\nasync def delete_dataset(request, id):\n try:\n DatasetManager.delete_dataset(id)\n return response.json({}, status=204)\n except Exception:\n logger.exception('faile to delete dataset')\n return response.json({}, status=500)\n\n\n@dataset_svc.post('/datasets/<id>/query', strict_slashes=True)\n@doc.summary('run a dataset query')\n@doc.produces({\"cols\": [str], \"rows\": [[object]]}, content_type=\"application/json\")\n@doc.consumes(\n doc.JsonBody({\"type\": str, \"query\": str}), content_type=\"application/json\", location=\"body\"\n)\nasync def query_dataset(request, id):\n logger.debug(f'query dataset query payload={request.body} on {id}')\n try:\n dataset = DatasetManager.get_dataset(id)\n request_body = json.loads(request.body)\n query_result = dataset.query(request_body['query'], request_body['type'], True)\n return response.json(query_result, status=200)\n except Exception:\n logger.exception('faile to query dataset')\n return response.json({}, status=500)\n\n\n@dataset_svc.post('/dataset_upload', strict_slashes=True)\n@doc.summary('upload a dataset file')\nasync def upload_dataset(request):\n name = request.files[\"file\"][0].name\n try:\n DatasetManager.upload_dataset(name, request.files[\"file\"][0].body)\n return response.json({}, status=200)\n except Exception:\n logger.exception('faile to query dataset')\n return response.json({}, status=500)\n\n\n@dataset_svc.post('/query2dataset', strict_slashes=True)\n@doc.summary('export a query result as dataset')\n@doc.produces({}, content_type=\"application/json\")\n@doc.consumes(\n doc.JsonBody(\n {\n \"source_dataset_id\": str,\n \"query_type\": str,\n \"query\": str,\n \"dataset_id\": str,\n \"dataset_name\": str,\n \"dataset_description\": str,\n }\n ),\n content_type=\"application/json\",\n location=\"body\",\n)\nasync def query2dataset(request):\n try:\n request_body = json.loads(request.body)\n DatasetManager.query2dataset(**request_body)\n return response.json({}, status=200)\n except Exception:\n logger.exception('faile to query dataset')\n return response.json({}, status=500)\n", "id": "10375106", "language": "Python", "matching_score": 2.4075682163238525, "max_stars_count": 1, "path": "server/dataplay/datasvc/service.py" }, { "content": "import os\nfrom io import StringIO\nfrom multiprocessing import Process\nimport _thread\n\nimport pandas as pd\nfrom sanic.log import logger\n\nfrom ..confsvc.manager import ConfigurationManager\nfrom ..datasvc.manager import DatasetManager\nfrom ..datasvc.utils import df_to_cols_rows\n\nfrom .job import MLJob\nfrom .automl import AutoClassificationJob, AutoRegressionJob\nfrom .time_serials import TimeSerialsForecastsJob\n\n\nclass MLJobManager:\n @staticmethod\n def list_jobs():\n job_base_dir = ConfigurationManager.get_confs('mljob').get('job', 'dir')\n\n try:\n job_ids = [\n file\n for file in os.listdir(job_base_dir)\n if os.path.isdir(os.path.join(job_base_dir, file))\n ]\n results = []\n for job_id in job_ids:\n try:\n logger.debug(f'find one job with id={job_id}')\n item = {}\n item['id'] = job_id\n status = MLJob.get_status_by_id(job_id)\n item['status'] = status.name\n meta = MLJob.get_meta(job_id)\n for key in ['type', 'name']:\n item[key] = meta[key]\n results.append(item)\n except Exception:\n logger.exception(f'failed to retrieve job id={job_id}')\n return results\n except Exception:\n logger.exception('failed to list job')\n return []\n\n @staticmethod\n def get_job(job_id):\n result = {}\n meta = MLJob.get_meta(job_id)\n for key in meta:\n result[key] = meta[key]\n status = MLJob.get_status_by_id(job_id)\n result['status'] = status.name\n result['id'] = job_id\n return result\n\n @staticmethod\n def delete_job(job_id):\n MLJob.delete_job_by_id(job_id)\n\n @staticmethod\n def create_job(job_payload):\n job_type = job_payload['type']\n job_option = {}\n job_option_attrs = [\n 'name',\n 'dataset',\n 'features',\n 'targets',\n 'job_option',\n 'validation_option',\n ]\n\n for key in job_option_attrs:\n if key not in job_payload:\n job_payload[key] = {}\n job_option[key] = job_payload[key]\n\n if job_type == 'AutoClassificationJob':\n job = AutoClassificationJob(**job_option)\n elif job_type == 'AutoRegressionJob':\n job = AutoRegressionJob(**job_option)\n elif job_type == 'TimeSerialsForecastsJob':\n job = TimeSerialsForecastsJob(**job_option)\n else:\n raise RuntimeError(f'job type={job_type} not supported!')\n\n is_multi_prorcess = ConfigurationManager.get_confs('mljob').getboolean(\n 'job', 'multi_processes'\n )\n if is_multi_prorcess:\n # run train in a new process\n try:\n logger.debug(f'start new process to train ml job={job.id}')\n p = Process(target=job.train)\n p.start()\n # p.join()\n # TODO: update training status using web sock\n except:\n logger.exception(f'failed to run ml job process for job={job.id}')\n else:\n try:\n logger.debug(f'start new thread to train ml job {job.id}')\n _thread.start_new_thread(job.train, ())\n # TODO: update training status using web sock\n except:\n logger.exception(f'failed to run ml job thread for job={job.id}')\n\n return job\n\n @staticmethod\n def predict(job_id, payload):\n data = payload['data']\n input_type = payload['input_type']\n try:\n model = MLJob.get_model(job_id)\n if input_type == 'csv':\n csv_data = StringIO(data)\n df = pd.read_csv(csv_data, sep=\",\")\n df_prediction = model.predict(df)\n output_data = df_prediction.to_csv()\n return output_data\n elif input_type == 'dataset':\n dataset = DatasetManager.get_dataset(data)\n df = dataset.get_df()\n df_prediction = model.predict(df)\n payload = {}\n payload[\"cols\"], payload[\"rows\"] = df_to_cols_rows(df_prediction)\n return payload\n else:\n message = f'input type {input_type} is not supported for prediction'\n logger.error(message)\n raise RuntimeError(message)\n except Exception as e:\n logger.exception(f'failed to do prediction for data={data} id={job_id} error={e}')\n raise e\n", "id": "5099083", "language": "Python", "matching_score": 1.5159010887145996, "max_stars_count": 1, "path": "server/dataplay/mlsvc/manager.py" } ]
1.515901
mister-bailey
[ { "content": "from setuptools import setup, Extension\nimport imp\nimport os.path\n\ninstall_requires=[\n 'numpy',\n 'e3nn',\n ]\n \np = imp.find_module('numpy')[1]\ninclude_dir = os.path.join(p,'core','include','numpy')\next_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),'molecule_pipeline')\n\next = Extension('molecule_pipeline_ext',['molecule_pipeline/molecule_pipeline_ext.cpp','molecule_pipeline/molecule_pipeline_imp.cpp',],\n include_dirs = [include_dir,ext_dir], extra_compile_args=['-std=c++11'])\n\n \nsetup(name='MagNET', install_requires=install_requires,\n py_modules=['molecule_pipeline'],\n ext_modules=[ext])\n\n\n", "id": "5217110", "language": "Python", "matching_score": 4.4933061599731445, "max_stars_count": 0, "path": "bad_setup.py" }, { "content": "from setuptools import setup, Extension\nimport imp\nimport os.path\n\np = imp.find_module('numpy')[1]\ninclude_dir = os.path.join(p,'core','include','numpy')\nsetup_dir = os.path.dirname(os.path.realpath(__file__))\n\next = Extension('molecule_pipeline_ext',['molecule_pipeline_ext.cpp','molecule_pipeline_imp.cpp',],\n include_dirs = [include_dir,setup_dir], extra_compile_args=['-std=c++11'])\n\ninstall_requires=['numpy',]\n\nsetup(name='MagNET', py_modules=['molecule_pipeline'], install_requires=['numpy'], ext_modules=[ext])", "id": "9399543", "language": "Python", "matching_score": 0.2025851011276245, "max_stars_count": 0, "path": "molecule_pipeline/setup.py" }, { "content": "import os\nfrom glob import glob\nfrom configparser import ConfigParser\nfrom collections.abc import Sequence\n\ndef save_dir(save_prefix):\n return str(os.path.split(save_prefix)[0])\n\ndef is_seq(x):\n return isinstance(x, Sequence) and not isinstance(x, str)\n\ndef glob_most_recent(path):\n paths = glob(path)\n if len(paths) > 0:\n return max(paths, key=os.path.getmtime)\n else:\n return None\n \ndef ini_file(dir):\n if os.path.isfile(os.path.join(dir, \"training.ini\")):\n ##print(fff\"ini_file({dir}): unique .ini file\", flush=True)\n return os.path.join(dir, \"training.ini\")\n ##print(fff\"ini_file({dir}): non-unique .ini file\", flush=True)\n return glob_most_recent(os.path.join(dir, \"*.ini\"))\n\ndef history_file_from_ini(path):\n parser = ConfigParser(allow_no_value=True)\n parser.read([path])\n if \"training\" in parser and \"save_prefix\" in parser[\"training\"]:\n ##print(fff\"history_file_from_ini({path}): parsed\", flush=True)\n return parser[\"training\"][\"save_prefix\"] + \"-history.torch\"\n else:\n ##print(fff\"history_file_from_ini({path}): cannot parse\", flush=True)\n return None\n\ndef history_file(path):\n if os.path.isdir(path):\n file = ini_file(path)\n if file is not None:\n file = history_file_from_ini(file)\n if file is None:\n file = glob_most_recent(os.path.join(path, \"*history*.torch\"))\n if file is None:\n file = glob_most_recent(os.path.join(path, \"*.torch\"))\n if file is None:\n file = glob_most_recent(os.path.join(path, \"*history*\"))\n ##print(fff\"history_file({path}): from dir\", flush=True)\n return file\n elif os.path.isfile(path):\n if path.endswith(\".ini\"):\n ##print(fff\"history_file({path}): .ini\", flush=True)\n return history_file_from_ini(path)\n else:\n ##print(fff\"history_file_from_path({path}): direct\", flush=True)\n return path\n else:\n raise ValueError(f\"Can't find history file at {path}\")\n \ndef get_history(run):\n if run is None:\n ##print(fff\"get_history({run}): None\", flush=True)\n return None\n if 'TrainTestHistory' in str(type(run)):\n ##print(fff\"get_history({run}): it's a history\", flush=True)\n return run\n elif isinstance(run, str):\n from history import TrainTestHistory\n ##print(fff\"get_history({run}): get History from file\", flush=True)\n return TrainTestHistory(file=history_file(run))\n else:\n ##print(fff\"get_history({run}): {run}.history!\", flush=True)\n return run.history\n \ndef sorted_runs(runs):\n \"\"\" Sort runs by creation date\"\"\"\n return sorted(runs, key = lambda r : r.config.exploration.creation_time, reverse=True)\n\ndef get_histories(runs, active_only=False, sort=True):\n if is_seq(runs): \n ##print(fff\"get_histories({runs}): sequence of histories\", flush=True)\n # If you give a sequence, you can't ask to sort\n return [get_history(run) for run in runs]\n else:\n if 'EnsembleOfRuns' in str(type(runs)):\n ##print(fff\"get_historeis({runs}): Ensemble\", flush=True)\n ens = runs\n runs = ens.active_runs.values() if active_only else ens.runs.values()\n if sort: runs = sorted_runs(runs)\n return [run.history for run in runs]\n elif isinstance(runs, str):\n if os.path.isdir(runs):\n from exploration import EnsembleOfRuns\n ##print(fff\"get_historeis({runs}): collection from subdir\", flush=True)\n return get_histories(EnsembleOfRuns(parent_dir=runs, use_existing=True, start_exploring=False))\n ##print(fff\"get_historeis({runs}): [] ???\", flush=True)\n return []\n else:\n raise TypeError(f\"'runs' is of type {type(runs)}\")\n \ndef remove_history(histories, history):\n if history is None:\n return histories\n if history in histories:\n histories = histories.copy()\n histories.remove(history)\n return histories\n for h in histories:\n if h.name == history.name:\n histories = histories.copy()\n histories.remove(h)\n return histories\n \nif \"__name__\" == \"__main__\":\n pdir = argv[1] if len(argv) > 0 else \"fluidstack_test\"\n ids = get_histories(argv)\n", "id": "1746531", "language": "Python", "matching_score": 4.477446556091309, "max_stars_count": 0, "path": "train/files.py" }, { "content": "import os\nfrom glob import glob\nfrom configparser import ConfigParser\nfrom collections.abc import Sequence\n\ndef save_dir(save_prefix):\n return str(os.path.split(save_prefix)[0])\n\ndef is_seq(x):\n return isinstance(x, Sequence) and not isinstance(x, str)\n\ndef glob_most_recent(path, exclude=[]):\n paths = [f for f in glob(path) if all(ex not in f for ex in exclude)]\n if len(paths) > 0:\n return max(paths, key=os.path.getmtime)\n else:\n return None\n \ndef ini_file(dir):\n if os.path.isfile(os.path.join(dir, \"training.ini\")):\n ##print(fff\"ini_file({dir}): unique .ini file\", flush=True)\n return os.path.join(dir, \"training.ini\")\n ##print(fff\"ini_file({dir}): non-unique .ini file\", flush=True)\n return glob_most_recent(os.path.join(dir, \"*.ini\"), exclude=[\"wandb.ini\"])\n\ndef history_file_from_ini(path):\n parser = ConfigParser(allow_no_value=True)\n parser.read([path])\n if \"training\" in parser and \"save_prefix\" in parser[\"training\"]:\n ##print(fff\"history_file_from_ini({path}): parsed\", flush=True)\n return parser[\"training\"][\"save_prefix\"] + \"-history.torch\"\n else:\n ##print(fff\"history_file_from_ini({path}): cannot parse\", flush=True)\n return None\n\ndef history_file(path):\n if os.path.isdir(path):\n file = ini_file(path)\n if file is not None:\n file = history_file_from_ini(file)\n if file is None:\n file = glob_most_recent(os.path.join(path, \"*history*.torch\"))\n if file is None:\n file = glob_most_recent(os.path.join(path, \"*.torch\"))\n if file is None:\n file = glob_most_recent(os.path.join(path, \"*history*\"), exclude=[\"wandb.ini\"])\n ##print(fff\"history_file({path}): from dir\", flush=True)\n return file\n elif os.path.isfile(path):\n if path.endswith(\".ini\"):\n ##print(fff\"history_file({path}): .ini\", flush=True)\n return history_file_from_ini(path)\n else:\n ##print(fff\"history_file_from_path({path}): direct\", flush=True)\n return path\n else:\n raise ValueError(f\"Can't find history file at {path}\")\n \ndef get_history(run):\n if run is None:\n ##print(fff\"get_history({run}): None\", flush=True)\n return None\n if 'TrainTestHistory' in str(type(run)):\n ##print(fff\"get_history({run}): it's a history\", flush=True)\n return run\n elif isinstance(run, str):\n from history import TrainTestHistory\n ##print(fff\"get_history({run}): get History from file\", flush=True)\n return TrainTestHistory(file=history_file(run))\n else:\n ##print(fff\"get_history({run}): {run}.history!\", flush=True)\n return run.history\n \ndef sorted_runs(runs):\n \"\"\" Sort runs by creation date\"\"\"\n return sorted(runs, key = lambda r : r.config.exploration.creation_time, reverse=True)\n\ndef get_histories(runs, active_only=False, sort=True):\n if is_seq(runs): \n ##print(fff\"get_histories({runs}): sequence of histories\", flush=True)\n # If you give a sequence, you can't ask to sort\n return [get_history(run) for run in runs]\n else:\n if 'EnsembleOfRuns' in str(type(runs)):\n ##print(fff\"get_historeis({runs}): Ensemble\", flush=True)\n ens = runs\n ens.refresh_runs()\n #runs = list(ens.runnable_runs.values()) \n runs = list(set(ens.runs.values()) - set(ens.unreadable_runs.values()))\n print(f\"Found {len(ens.runs)} runs,\\n{len(runs)} usable runs.\")\n if sort: runs = sorted_runs(runs)\n histories = [run.history_or_backup for run in runs]\n return [h for h in histories if len(h.test.loss) > 0]\n elif isinstance(runs, str):\n from exploration import EnsembleOfRuns\n if os.path.isdir(runs):\n return get_histories(EnsembleOfRuns(configs=[ini_file(runs)], start_exploring=False))\n elif os.path.isfile(runs):\n return get_histories(EnsembleOfRuns(configs=[runs], start_exploring=False))\n else:\n raise ValueError(f\"'{runs}' doesn't correspond to an .ini file or directory\")\n else:\n raise TypeError(f\"'runs' is of type {type(runs)}\")\n \ndef remove_history(histories, history):\n if history is None:\n return histories\n if history in histories:\n histories = histories.copy()\n histories.remove(history)\n return histories\n for h in histories:\n if h.name == history.name:\n histories = histories.copy()\n histories.remove(h)\n return histories\n \nif \"__name__\" == \"__main__\":\n pdir = argv[1] if len(argv) > 0 else \"fluidstack_test\"\n ids = get_histories(argv)\n", "id": "5632050", "language": "Python", "matching_score": 1.3144766092300415, "max_stars_count": 0, "path": "train/files.py" }, { "content": "import matplotlib.pyplot as plt\nfrom history import TrainTestHistory, TestingHistory, unsort, log_predict_from_farther_predictions, sort_by_last_coord, find_terminal_clusters, predictions_with_errors\nfrom files import get_history, get_histories, remove_history\nfrom itertools import cycle\nimport numpy as np\nimport os\nfrom colorsys import hsv_to_rgb, hls_to_rgb\n\ndef colorwheel(steps=20, h0=0, h1=1, l=.5, s=.8):\n dh = (h1 - h0) / (steps + 1)\n h = h0\n while True: \n yield hls_to_rgb(h % 1, l, s)\n h += dh\n \n \ndef colorgrad(steps, rgb0=(1.,.0,.2), rgb1=(.2,.0,1.)):\n rgb_d = tuple((c1-c0) / steps for c0, c1 in zip(rgb0, rgb1))\n rgb = rgb0\n for s in range(steps+1):\n yield rgb\n rgb = tuple(c + dc for c, dc in zip(rgb, rgb_d))\n \n \n \ndef rep_rgb(rgb):\n r, g, b = rgb\n return f\"({r:1.1f}, {g:1.1f}, {b:1.1f})\"\n \n \n \n\ndef plot_runs(runs, run=None, active_only=False, sort=True, **kwargs):\n return plot_histories(get_histories(runs, active_only=active_only, sort=sort), get_history(run), **kwargs) \n \ndef plot_histories(histories, history=None, axes=None, block=True, sort=True, pause=1.0, figsize=(12,8), colors=\"bgrcmyk\", color=\"k\", fade_rate= .99, error_bars=True, show_pred=False, coord='example', **kwargs): \n if axes is None:\n plt.figure(figsize=figsize)\n axes = plt.gca() \n else:\n plt.cla()\n\n if history is not None:\n histories = remove_history(histories, history)\n alpha = .6\n colors = remove(colors, color)\n else:\n alpha = .5\n \n if histories:\n \n sort_indices = np.empty(len(histories), dtype=int)\n if error_bars:\n pred, errors = predictions_with_errors([h.test for h in histories], coord=coord, sort_indices=sort_indices)\n pred = np.exp(pred)\n elif show_pred:\n pred = np.exp(log_predict_from_farther_predictions([h.test for h in histories], coord=coord, sort_indices=sort_indices))\n \n #colors = cycle(colors)\n colors = colorwheel(len(histories), h0 = 0, h1 = 5)\n #colors = colorgrad(len(histories))\n \n sort_indices = np.empty(len(histories), dtype=int)\n \n #(sort_indices)print\n if show_pred:\n h0 = histories[sort_indices[0]]\n far_x, far_y = h0.test.coord(coord)[-1], h0.test.loss[-1]\n x_axes=[]\n y_axes=[]\n for i, h in enumerate(histories):\n if h.test.coord(coord)[-1] == far_x:\n x_axes.append(h.test.coord(coord))\n y_axes.append(h.test.loss)\n else:\n x_axes.append(np.append(h.test.coord(coord),far_x))\n y_axes.append(np.append(h.test.loss,pred[i]*far_y))\n else:\n x_axes = [h.test.coord(coord) for h in histories]\n y_axes = [h.test.loss for h in histories]\n \n \n c_array = np.ndarray((len(histories),3), dtype=float)\n for i in range(len(c_array)):\n c_array[i] = next(colors)\n \n a_array = alpha * np.power(fade_rate, np.arange(len(histories)+1))\n\n axes.set_ylim([0, 80])\n #axes.set_xlim([75000,100000])\n error=0\n for i, h in (enumerate(histories)):\n if error_bars: error = errors[i] \n plot(h, color=c_array[i], alpha = a_array[i], error=error, **kwargs)\n #plt.plot(np.array(x_axes[i]), np.array(y_axes[i]), f'-', color=c_array[i], alpha = a_array[i], **kwargs)\n \n #x_axis = h.test.coord(x_axis) label = h.name, \n #y_axis = h.test.loss\n #plt.plot(np.array(x_axis), np.array(y_axis), f'-', show=False, color=c_array[i], label=h.name, **kwargs)\n \n \n #for h in histories:\n # c = next(colors)\n # h.test.plot(axes, show=False, color=c, alpha = alpha, label = h.name, **kwargs)\n # alpha *= fade_factor\n \n history = histories[sort_indices[0]]\n \n if history is not None:\n #axes.set_ylim([0, 80])\n #axes.set_xlim([0,20000])\n plot(history, color='k', alpha = 1, label = history.name, coord=coord, **kwargs)\n #spectrum = history.test.get_spectral().spectrum\n #axes2 = plt.gca().twinx()\n #axes2.set_xlim([0,100])\n #axes2.plot(range(len(spectrum)), spectrum, color='g', alpha = 1, label = history.name, **kwargs)\n \n plt.legend(loc=\"best\")\n plt.show(block=block)\n if block==False:\n plt.pause(pause)\n return axes\n\n### Transplant from history.py. Trying to rationalize things \ndef plot(history, coord='example', \n color='b', error=None, **kwargs): # x_axis = 'time' is also allowed\n x_axis = history.test.coord(coord)\n y_axis = history.test.loss\n plt.plot(np.array(x_axis), np.array(y_axis), f'-', color=color, **kwargs)\n if error:\n plt.errorbar(x_axis[-1],y_axis[-1],yerr=error,xerr=error/2)\n\n###-------------------------\n\n\ndef plot_run(run, **kwargs):\n return plot_runs([], run, **kwargs)\n\ndef plot_history(history, **kwargs):\n return plot_histories([], history, **kwargs)\n\n\n# INCOMPLETE\ndef plot_extrapolation(runs, run, x=None, active_only=True, coord='example', axes=None, block=True, pause=1.0, figsize=(12,8), colors=\"bgcmyk\", color=\"r\", sort=True, **kwargs):\n \"IMPLEMENTATION UNFINISHED!\"\n #assert runs, f\"'runs' must be a non-empty iterable, but instead runs = {runs}\"\n histories = get_histories(runs, active_only=active_only, sort=sort)\n history = get_history(run)\n histories = remove_history(histories, history)\n if axes is None:\n plt.figure(figsize=figsize)\n axes = plt.gca()\n else:\n plt.cla()\n \n others = [h.test for h in histories]\n colors = cycle(colors)\n for h in histories:\n c = next(colors)\n h.test.plot(axes, show=False, color=c, alpha = .5, label = h.name, **kwargs)\n \n test = history.test\n x_axis = test.coord(coord)\n interval = x_axis[-1] - x_axis[-2]\n new_x = np.arange(x_axis[-1] + interval, x, interval)\n new_loss = np.zeros_like(new_x).astype(float)\n \n log_avg, other_avgs = None, None\n for i in range(len(new_x)):\n log_ext, log_avg, other_avgs, others = test.log_loss_extrapolate(new_x[i], others, coord, log_avg=log_avg, other_avgs=other_avgs) \n new_loss[i] = np.exp(log_ext)\n \n test.plot(axes, show=False, color=color, alpha = 1, label = history.name)\n plt.plot(new_x, new_loss, color + \":\", label='extrapolate')\n \n plt.legend(loc=\"best\")\n plt.show()\n\n\ndef remove(s, x):\n if hasattr(s, \"remove\"):\n s = s.copy()\n s.remove(x)\n return s\n elif hasattr(s, \"replace\"):\n return s.replace(x, \"\")\n else:\n return (y for y in s if y != x)\n \nif __name__ == '__main__':\n from sys import argv\n argv = argv[1:]\n \n active_only = False\n if \"-a\" in argv:\n active_only = True\n argv.remove(\"-a\")\n if \"-f\" in argv:\n fade = True\n argv.remove(\"-f\")\n \n if len(argv) > 0:\n path = argv.pop(0)\n else: path = \"\"\n if len(argv) > 0:\n history = get_history(os.path.join(path, argv.pop()))\n else: history = None\n histories = get_histories(path, active_only=active_only)\n if not histories:\n plot_history(history)\n else: \n plot_histories(histories, history, block=True, sort=True, coord='example')\n\n ", "id": "2792359", "language": "Python", "matching_score": 6.346131801605225, "max_stars_count": 0, "path": "train/plot.py" }, { "content": "import matplotlib.pyplot as plt\nfrom history import TrainTestHistory, TestingHistory, predictions_with_errors\nfrom files import get_history, get_histories, remove_history\nfrom itertools import cycle\nimport numpy as np\nimport os\nfrom colorsys import hsv_to_rgb, hls_to_rgb\n\ndef colorwheel(steps=20, h0=0, h1=1, l=.5, s=.8):\n dh = (h1 - h0) / (steps + 1)\n h = h0\n while True: \n yield hls_to_rgb(h % 1, l, s)\n h += dh\n \n \ndef colorgrad(steps, rgb0=(1.,.0,.2), rgb1=(.2,.0,1.)):\n rgb_d = tuple((c1-c0) / steps for c0, c1 in zip(rgb0, rgb1))\n rgb = rgb0\n for s in range(steps+1):\n yield rgb\n rgb = tuple(c + dc for c, dc in zip(rgb, rgb_d))\n \n \n \ndef rep_rgb(rgb):\n r, g, b = rgb\n return f\"({r:1.1f}, {g:1.1f}, {b:1.1f})\"\n \n \n \n\ndef plot_runs(runs, run=None, active_only=False, sort=True, **kwargs):\n return plot_histories(get_histories(runs, active_only=active_only, sort=sort), get_history(run), **kwargs) \n \ndef plot_histories(histories, history=None, axes=None, block=True, sort=True, pause=1.0, figsize=(12,8), colors=\"bgrcmyk\", color=\"k\", fade_rate= .99, error_bars=False, show_pred=False, coord='example', file_lock=None, **kwargs): \n if axes is None:\n plt.figure(figsize=figsize)\n axes = plt.gca() \n else:\n plt.cla()\n\n if history:\n histories = [history] + remove_history(histories, history)\n alpha = .6\n colors = remove(colors, color)\n else:\n alpha = .5\n \n\n colors = colorwheel(len(histories), h0 = 0, h1 = min(max(len(histories) / 5, 1), 5))\n c_array = [[0.,0.,0.]] * len(histories)\n for i in range(len(c_array)):\n c_array[i] = next(colors)\n \n a_array = alpha * np.power(fade_rate, np.arange(len(histories)+1))\n \n if history:\n c_array[0] = color\n a_array[0] = np.sqrt(a_array[0])\n \n if file_lock is None:\n file_lock = memoryview(b'') # No-op context manager\n\n with file_lock: \n \n if show_pred or error_bars:\n p_with_e = predictions_with_errors([h.test for h in histories], coord=coord, errors=True)\n pred = p_with_e[:,0]\n errors = p_with_e[:,1]\n \n far_i = max(range(len(histories)), key = lambda i : histories[i].test.coord(coord)[-1])\n far_x, far_y = histories[far_i].test.coord(coord)[-1], histories[far_i].test.loss[-1]\n far_pred = pred[far_i]\n pred = far_y * np.exp(pred - far_pred)\n \n x_axes=[]\n y_axes=[]\n for i, h in enumerate(histories):\n if h.test.coord(coord)[-1] == far_x:\n x_axes.append(h.test.coord(coord))\n y_axes.append(h.test.loss)\n else:\n x_axes.append(np.append(h.test.coord(coord),far_x))\n y_axes.append(np.append(h.test.loss, pred[i]))\n else:\n x_axes = [h.test.coord(coord) for h in histories]\n y_axes = [h.test.loss for h in histories]\n \n axes.set_ylim([0, 110])\n #axes.set_xlim([0,100000])\n for i, h in reversed(list(enumerate(histories))):\n #print(f\"Plotting {h.name}, max x = {x_axes[i][-1]}\")\n plt.plot(np.array(x_axes[i]), np.array(y_axes[i]), f'-', color=c_array[i], alpha = a_array[i], label=h.name, **kwargs)\n if error_bars:\n error = errors[i] * y_axes[i][-1] \n plt.errorbar(x_axes[i][-2],y_axes[i][-2],yerr=error,xerr=0, color=c_array[i], alpha = a_array[i]) # xerr=max(error/4, x_axes[i][-2]/10)\n \n plt.legend(loc=\"best\")\n plt.show(block=block)\n if block==False:\n plt.pause(pause)\n return axes\n\n###-------------------------\n\n\ndef plot_run(run, **kwargs):\n return plot_runs([], run, **kwargs)\n\ndef plot_history(history, **kwargs):\n return plot_histories([], history, **kwargs)\n\n\n# INCOMPLETE\ndef plot_extrapolation(runs, run, x=None, active_only=True, coord='example', axes=None, block=True, pause=1.0, figsize=(12,8), colors=\"bgcmyk\", color=\"r\", sort=True, **kwargs):\n \"IMPLEMENTATION UNFINISHED!\"\n #assert runs, f\"'runs' must be a non-empty iterable, but instead runs = {runs}\"\n histories = get_histories(runs, active_only=active_only, sort=sort)\n history = get_history(run)\n histories = remove_history(histories, history)\n if axes is None:\n plt.figure(figsize=figsize)\n axes = plt.gca()\n else:\n plt.cla()\n \n others = [h.test for h in histories]\n colors = cycle(colors)\n for h in histories:\n c = next(colors)\n h.test.plot(axes, show=False, color=c, alpha = .5, label = h.name, **kwargs)\n \n test = history.test\n x_axis = test.coord(coord)\n interval = x_axis[-1] - x_axis[-2]\n new_x = np.arange(x_axis[-1] + interval, x, interval)\n new_loss = np.zeros_like(new_x).astype(float)\n \n log_avg, other_avgs = None, None\n for i in range(len(new_x)):\n log_ext, log_avg, other_avgs, others = test.log_loss_extrapolate(new_x[i], others, coord, log_avg=log_avg, other_avgs=other_avgs) \n new_loss[i] = np.exp(log_ext)\n \n test.plot(axes, show=False, color=color, alpha = 1, label = history.name)\n plt.plot(new_x, new_loss, color + \":\", label='extrapolate')\n \n plt.legend(loc=\"best\")\n plt.show()\n\n\ndef remove(s, x):\n if hasattr(s, \"remove\"):\n s = s.copy()\n s.remove(x)\n return s\n elif hasattr(s, \"replace\"):\n return s.replace(x, \"\")\n else:\n return (y for y in s if y != x)\n \nif __name__ == '__main__':\n from sys import argv\n argv = argv[1:]\n \n active_only = False\n if \"-a\" in argv:\n active_only = True\n argv.remove(\"-a\")\n if \"-f\" in argv:\n fade = True\n argv.remove(\"-f\")\n \n if len(argv) > 0:\n path = argv.pop(0)\n else: path = \"\"\n if len(argv) > 0:\n history = get_history(os.path.join(path, argv.pop()))\n else: history = None\n histories = get_histories(path, active_only=active_only)\n if not histories:\n plot_history(history)\n else: \n plot_histories(histories, history, block=True, show_pred=True, error_bars=True, coord='example')\n\n ", "id": "4131241", "language": "Python", "matching_score": 1.4698278903961182, "max_stars_count": 0, "path": "train/plot.py" }, { "content": "from collections import deque\nimport numpy as np\nimport torch\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport math\nimport time\nimport bisect\nfrom datetime import timedelta\nfrom training_utils import loss_function, loss_from_residuals, dynamic_loss_from_residuals\n#import training_config as config\nfrom pipeline import Molecule\nfrom resizable import H5Array as Array\nimport os\nfrom shutil import copyfile\nimport h5py\nimport statistics\nimport traceback\nfrom collections.abc import Sequence\n\nn_to_s = {\n1:'H',\n6:'C',\n7:'N',\n8:'O',\n16:'S',\n9:'F',\n17:'Cl'\n}\n\n\nclass TrainTestHistory:\n def __init__(self, testing_batches=[], device='cuda', examples_per_epoch=None, relevant_elements=None, \n run_name=None, number_to_symbol=None, smoothing_window=10,\n train_dynamic=None, store_residuals=False, wandb_log=None, wandb_interval=1,\n file=None, save_prefix=None, hdf5=True, use_backup=True, load=True):\n assert hdf5, \"Non-hdf5 histories not currently working.\"\n self.hdf5 = hdf5\n self.use_backup = use_backup\n\n self.file = file\n if file is None and save_prefix is not None:\n self.file = f\"{save_prefix}-history.torch\"\n if isinstance(self.file, str) and not os.path.isfile(self.file) and not os.path.isfile(self.file + '.bak'):\n load = False\n\n if not load:\n # create new history file\n if not isinstance(self.file, h5py.File):\n self.file = h5py.File(self.file, 'w')\n backup = self.file.filename + '.bak'\n if os.path.isfile(backup):\n os.remove(backup)\n \n self.train = TrainingHistory(\n examples_per_epoch, smoothing_window, bool(train_dynamic),\n wandb_log=wandb_log, wandb_interval=wandb_interval,\n file=self.file.create_group('train'), hdf5=hdf5, load=False)\n self.test = TestingHistory(\n examples_per_epoch, testing_batches, relevant_elements, train_dynamic, device,\n number_to_symbol=number_to_symbol, store_residuals=store_residuals, wandb_log=wandb_log,\n file=self.file.create_group('test'), hdf5=True, load=False)\n self.name = run_name\n self.file.attrs['name'] = run_name\n self.file.attrs['examples_per_epoch'] = examples_per_epoch\n self.file.attrs['train_dynamic'] = bool(train_dynamic)\n\n else:\n # load history from file\n if not isinstance(self.file, h5py.File):\n self.file = h5py.File(self.file, 'a')\n filename = self.file.filename\n if os.path.isfile(filename + '.bak'):\n print(\"History file failed to close properly last time!\")\n if input(\"Restore from backup? (y/n) \").strip().lower() == 'y':\n self.file.close()\n os.remove(filename)\n os.rename(filename + '.bak', filename)\n self.file = h5py.File(filename, 'a')\n self.save(verbose=False)\n \n self.name = self.file.attrs['name']\n if 'examples_per_epoch' in self.file.attrs and examples_per_epoch is None:\n examples_per_epoch = self.file.attrs['examples_per_epoch']\n else:\n self.file.attrs['examples_per_epoch'] = examples_per_epoch\n if train_dynamic is None and 'train_dynamic' in self.file.attrs:\n train_dynamic = self.file.attrs['train_dynamic']\n else:\n train_dynamic = bool(train_dynamic)\n self.file.attrs['train_dynamic'] = train_dynamic\n self.train = TrainingHistory(\n examples_per_epoch,\n train_dynamic=train_dynamic,\n wandb_log=wandb_log, wandb_interval=wandb_interval, \n file=self.file['train'], hdf5=True, load=True)\n self.test = TestingHistory(\n examples_per_epoch, testing_batches=testing_batches,\n relevant_elements=relevant_elements, test_dynamic=train_dynamic,\n device=device, wandb_log=wandb_log,\n file=self.file['test'], hdf5=True, load=True)\n \n if os.path.isfile(filename + '.bak'):\n os.remove(filename + '.bak')\n \n def save(self, verbose = True):\n assert self.hdf5, \"Non-hdf5 histories not currently working.\"\n if verbose: print(\"Saving \" + (\"and backing up \" if self.use_backup else \"\") + \"history...\")\n self.file.flush()\n if self.use_backup:\n copyfile(self.file.filename, self.file.filename + \".bak\")\n \n def close(self, verbose = True):\n if self.file is None:\n return\n self.save()\n filename = self.file.filename\n self.file.close()\n if self.use_backup:\n backup = filename + '.bak'\n os.remove(backup)\n self.file = None\n \n \n\n def plot(self, figure=None, x_axis='batch_number', y_axis='smoothed_loss', show=True):\n # x_axis = 'time' is also allowed\n if figure is None:\n plt.figure(figsize=(12,8))\n self.train.plot(plt.gcf(), x_axis, y_axis)\n self.test.plot(plt.gcf(), x_axis)\n if figure is None:\n plt.legend(loc=\"best\")\n if show:\n plt.show()\n\n def log_batch(self, *args, **kwargs):\n self.train.log_batch(*args, **kwargs)\n\n def elapsed_time(self):\n return self.train.elapsed_time[-1]\n\n def run_test(self, model, batch_number=None, epoch=None, batch_in_epoch=None, example_in_epoch=None, example=None, elapsed_time=None, *args, **kwargs):\n if batch_number is None: batch_number = len(self.train.loss)\n if epoch is None: epoch = self.train.epoch[-1]\n if batch_in_epoch is None: batch_in_epoch = self.train.batch_in_epoch[-1]\n if example_in_epoch is None: example_in_epoch = self.train.example_in_epoch[-1]\n if example is None: example = self.train.example[-1]\n if elapsed_time is None: elapsed_time = self.train.elapsed_time[-1]\n return self.test.run_test(model, batch_number, epoch, batch_in_epoch, example_in_epoch, example, elapsed_time, *args, **kwargs)\n\nclass BaseHistory:\n\n # methods for finding comparison points\n\n def max_epoch_batch(self):\n return self.epoch[-1], self.batch[-1]\n\n def max_batch(self):\n raise Exception(\"This should be overloaded!\")\n\n def max_epoch_example(self):\n return self.epoch[-1], self.example_in_epoch[-1]\n \n\nclass TrainingHistory(BaseHistory):\n def __init__(self, examples_per_epoch, smoothing_window=10, train_dynamic=False,\n wandb_log=None, wandb_interval=1, file=None, hdf5=True, load=True):\n self.examples_per_epoch = examples_per_epoch\n self.wandb_log = wandb_log\n self.wandb_interval = wandb_interval\n self.file = file\n \n assert hdf5, \"Non-hdf5 histories not implemented.\"\n if not load:\n self.smoothing_window = smoothing_window\n file.attrs['smoothing_window'] = smoothing_window\n \n self.last_wandb = 0\n file.attrs['last_wandb'] = 0\n\n # initialize the lists we will be accumulating\n # these lists correspond to each other\n # one entry per batch\n # batch 0 is a dummy batch\n self.epoch=Array(file, 'epoch', [0])\n\n # batch n covers example[example_number[n-1]:example_number[n]]\n self.example_in_epoch=Array(file, 'example_in_epoch', [examples_per_epoch])\n self.example=Array(file, 'example', [0])\n \n self.examples_in_batch=Array(file, 'examples_in_batch', [0])\n\n self.batch_in_epoch=Array(file, 'batch_in_epoch', [0])\n self.elapsed_time=Array(file, 'elapsed_time', [0.0])\n\n self.atoms_in_batch=Array(file, 'atoms_in_batch', [0])\n\n self.loss=Array(file, 'loss', [float(\"inf\")])\n self.smoothed_loss=Array(file, 'smoothed_loss', [float(\"inf\")])\n \n if train_dynamic:\n self.dynamic_loss=Array(file, 'dynamic_loss', [float(\"inf\")])\n self.smoothed_dynamic_loss=Array(file, 'smoothed_dynamic_loss', [float(\"inf\")])\n\n # this list has one entry per epoch\n # epoch e starts at index epoch_start[e]\n self.epoch_start=Array(file, 'epoch_start', [0]) # includes a dummy epoch 0 starting at batch 0\n else:\n self.smoothing_window = file.attrs['smoothing_window']\n self.last_wandb = file.attrs['last_wandb']\n \n self.epoch=Array(file, 'epoch')\n self.example_in_epoch=Array(file, 'example_in_epoch')\n self.example=Array(file, 'example') \n self.examples_in_batch=Array(file, 'examples_in_batch')\n self.batch_in_epoch=Array(file, 'batch_in_epoch')\n self.elapsed_time=Array(file, 'elapsed_time')\n self.atoms_in_batch=Array(file, 'atoms_in_batch')\n self.loss=Array(file, 'loss')\n self.smoothed_loss=Array(file, 'smoothed_loss') \n if train_dynamic:\n self.dynamic_loss=Array(file, 'dynamic_loss', self.loss.shape, dtype=float)\n self.smoothed_dynamic_loss=Array(file, 'smoothed_dynamic_loss', self.smoothed_loss.shape, dtype=float)\n self.epoch_start=Array(file, 'epoch_start') \n \n def log_batch(\n self, batch_time, wait_time, examples_in_batch, atoms_in_batch, elapsed_time, example_in_epoch,\n example, loss, dynamic_loss=None, epoch=None, batch_in_epoch=None, verbose=True):\n self.elapsed_time.append(elapsed_time)\n\n if example is None:\n example = (epoch-1) * self.examples_per_epoch + example_in_epoch\n elif example_in_epoch is None:\n example_in_epoch = (example - 1) % self.examples_per_epoch + 1\n\n epoch = self.epoch[-1] if epoch is None else epoch\n if example_in_epoch > self.examples_per_epoch:\n epoch += 1\n if epoch > self.epoch[-1]:\n self.epoch_start.append(len(self.example))\n self.epoch.append(epoch)\n\n self.examples_in_batch.append(examples_in_batch)\n self.example_in_epoch.append(example_in_epoch)\n self.example.append(example)\n self.atoms_in_batch.append(atoms_in_batch)\n batch_in_epoch = math.ceil(example_in_epoch / self.examples_per_batch)\n self.batch_in_epoch.append(batch_in_epoch)\n \n self.loss.append(loss)\n window = min(len(self.loss)-1, self.smoothing_window)\n self.smoothed_loss.append(sum(self.loss[-window:]) / window)\n \n if dynamic_loss is not None:\n self.dynamic_loss.append(dynamic_loss)\n window = min(len(self.dynamic_loss)-1, self.smoothing_window)\n self.smoothed_dynamic_loss.append(sum(self.dynamic_loss[-window:]) / window) \n \n if self.wandb_log is not None and len(self.loss) - 1 - self.last_wandb >= self.wandb_interval:\n self.last_wandb = len(self.loss) - 1\n self.file.attrs['last_wandb'] = self.last_wandb\n log_dict = {\n 'elapsed_time':self.elapsed_time[-1],\n 'epoch':epoch,\n 'batch_in_epoch':batch_in_epoch,\n 'example_in_epoch':example_in_epoch,\n 'example':example,\n 'examples_in_batch':examples_in_batch,\n 'atoms_in_batch':atoms_in_batch,\n 'loss':loss,\n 'smoothed_loss':self.smoothed_loss[-1]\n }\n if dynamic_loss is not None:\n log_dict['dynamic_loss'] = dynamic_loss\n log_dict['smoothed_dynamic_loss'] = self.smoothed_dynamic_loss[-1]\n self.wandb_log(log_dict)\n\n if verbose:\n print(f\"{self.epoch[-1]} : {self.batch_in_epoch[-1]} / {self.batch_in_epoch[-1] + self.batches_remaining_in_epoch()} \" +\n (\"loss =\" if dynamic_loss is None else \"abs_loss =\") + f\"{self.smoothed_loss[-1]:8.3f} \" +\n (\"\" if dynamic_loss is None else f\"dyn_loss ={self.smoothed_dynamic_loss[-1]:8.3f}\") +\n f\" t_train = {batch_time:.2f} s \" +\n (f\"t_wait = {wait_time:.2f} s \" if dynamic_loss is None else \"\") +\n f\"t = {str(timedelta(seconds=self.elapsed_time[-1]))[:-5]} \",\n end=\"\\r\", flush=True)\n\n def num_batches(self):\n return len(self.epoch) - 1\n\n # number of epochs we have seen\n def num_epochs(self):\n return len(self.epoch_start) - 1\n\n def current_epoch(self, batch=-1):\n \"\"\"\n epoch of the next incoming batch\n \"\"\"\n return self.epoch[batch] + self.example_in_epoch[batch] // self.examples_per_epoch\n \n def next_example_in_epoch(self, batch=-1):\n \"\"\"\n example in epoch that would start next batch\n \"\"\"\n return self.example_in_epoch[-1] % self.examples_per_epoch\n\n def next_batch_in_epoch(self, batch=-1):\n \"\"\"\n batch number that would follow given batch\n wraps around to 1 if epoch ends\n \"\"\"\n return 1 if self.example_in_epoch[batch] >= self.examples_per_epoch else self.batch_in_epoch[batch] + 1\n \n @property\n def examples_per_batch(self):\n return max(self.examples_in_batch[-2:])\n\n def batches_remaining_in_epoch(self, batch=-1):\n return math.ceil((self.examples_per_epoch - self.example_in_epoch[batch]) / self.examples_per_batch)\n\n def max_batch(self):\n return len(self.loss) - 1\n \n # x_axis = 'time' and y_axis = 'loss' are also allowed\n def plot(self, figure=None, x_axis='example', y_axis='smoothed_loss', color=\"r\", show=True):\n x_axis = self.example[1:] if x_axis=='example' else self.elapsed_time[1:]\n y_axis = self.smoothed_loss[1:] if y_axis=='smoothed_loss' else self.loss[1:]\n \n if figure is None:\n plt.figure(figsize=(12,8))\n plt.plot(np.array(x_axis), np.array(y_axis), \"o-\", color=color, label=\"train\")\n if figure is None:\n plt.legend(loc=\"best\")\n if show:\n plt.show()\n \n def __getstate__(self):\n d = self.__dict__.copy()\n del d['wandb_log']\n return d\n \nclass TestingHistory(BaseHistory):\n\n def __init__(self, examples_per_epoch, testing_batches, relevant_elements=None, test_dynamic=False, device='cuda',\n number_to_symbol=None, store_residuals=False, wandb_log=None,\n file=None, hdf5=True, load=True):\n self.examples_per_epoch = examples_per_epoch\n self.testing_batches = testing_batches\n self.test_dynamic = test_dynamic\n self.device = device\n self.number_to_symbol = number_to_symbol if number_to_symbol else n_to_s\n self.wandb_log = wandb_log\n\n assert hdf5, \"Non-hdf5 histories are deprecated.\"\n if not load:\n self.relevant_elements = relevant_elements\n file.attrs['relevant_elements'] = relevant_elements\n self.store_residuals = store_residuals\n file.attrs['store_residuals'] = store_residuals\n\n # initialize the lists we will be accumulating\n self.batch_number = Array(file, 'batch_number', dtype=int)\n self.epoch = Array(file, 'epoch', dtype=int)\n self.batch_in_epoch = Array(file, 'batch_in_epoch', dtype=int)\n self.example_in_epoch = Array(file, 'example_in_epoch', dtype=int)\n self.example = Array(file, 'example', dtype=int)\n self.elapsed_time = Array(file, 'elapsed_time', dtype=float)\n self.loss = Array(file, 'loss', dtype=float)\n self.mean_error_by_element = Array(file, 'mean_error_by_element', (0,len(relevant_elements)), dtype=float, resizable_cross=True)\n self.RMSE_by_element = Array(file, 'RMSE_by_element', (0,len(relevant_elements)), dtype=float, resizable_cross=True)\n if test_dynamic:\n self.dynamic_loss = Array(file, 'dynamic_loss', dtype=float)\n self.dynamic_mean_error_by_element = Array(file, 'dynamic_mean_error_by_element', (0,len(relevant_elements)), dtype=float, resizable_cross=True)\n self.dynamic_RMSE_by_element = Array(file, 'dynamic_RMSE_by_element', (0,len(relevant_elements)), dtype=float, resizable_cross=True) \n else:\n self.relevant_elements = file.attrs['relevant_elements']\n self.store_residuals = file.attrs['store_residuals']\n\n self.batch_number = Array(file, 'batch_number')\n self.epoch = Array(file, 'epoch')\n self.batch_in_epoch = Array(file, 'batch_in_epoch')\n self.example_in_epoch = Array(file, 'example_in_epoch')\n self.example = Array(file, 'example')\n self.elapsed_time = Array(file, 'elapsed_time')\n self.loss = Array(file, 'loss')\n self.mean_error_by_element = Array(file, 'mean_error_by_element', resizable_cross=True)\n self.RMSE_by_element = Array(file, 'RMSE_by_element', resizable_cross=True)\n if test_dynamic:\n self.dynamic_loss = Array(file, 'dynamic_loss', self.loss.shape, dtype=float)\n self.dynamic_mean_error_by_element = Array(file, 'dynamic_mean_error_by_element', self.mean_error_by_element.shape, dtype=float, resizable_cross=True)\n self.dynamic_RMSE_by_element = Array(file, 'dynamic_RMSE_by_element', self.RMSE_by_element.shape, dtype=float, resizable_cross=True)\n\n # legacy code, shouldn't be used in newer history files!\n if (self.mean_error_by_element.maxshape[1] is not None) or (self.RMSE_by_element.maxshape[1] is not None):\n mean_error_by_element = np.array(self.mean_error_by_element).copy()\n RMSE_by_element = np.array(self.RMSE_by_element).copy()\n del file['mean_error_by_element']\n del file['RMSE_by_element']\n self.mean_error_by_element = Array(file, 'mean_error_by_element', mean_error_by_element.shape, data=mean_error_by_element, dtype=float, resizable_cross=True)\n self.RMSE_by_element = Array(file, 'RMSE_by_element', RMSE_by_element.shape, data=RMSE_by_element, dtype=float, resizable_cross=True)\n #self.mean_error_by_element[:] = mean_error_by_element\n #self.RMSE_by_element[:] = RMSE_by_element\n\n # allows the resizing of by-element data arrays if relevant elements change\n if relevant_elements is not None and len(self.relevant_elements) != len(relevant_elements):\n print(\"relevant_elements has changed!\")\n if len(self.relevant_elements) < len(relevant_elements):\n self.mean_error_by_element.resize_cross(len(relevant_elements))\n self.RMSE_by_element.resize_cross(len(relevant_elements))\n if test_dynamic:\n self.dynamic_mean_error_by_element.resize_cross(len(relevant_elements))\n self.dynamic_RMSE_by_element.resize_cross(len(relevant_elements))\n self.relevant_elements = relevant_elements\n\n # if we have no testing batches, we won't be running tests, so no need to prep\n if not testing_batches:\n return\n\n # for each relevant element, gives you a list of atom indices in the testing set\n atom_indices = {e:[] for e in self.relevant_elements}\n atom_index = 0\n for batch in testing_batches:\n if test_dynamic: # batches come in pairs. inspect first of pair.\n batch = batch[0]\n atomic_numbers = Molecule.get_atomic_numbers(batch.x)\n for e in atomic_numbers:\n e = e.item()\n if e in self.relevant_elements:\n atom_indices[e].append(atom_index)\n atom_index += 1\n self.atom_indices = {e:np.array(ai) for e,ai in atom_indices.items()}\n\n # precompute weight per testing batch and total testing weight\n self.batch_weights = torch.tensor([torch.sum(batch.weights) for batch in testing_batches])\n self.total_weight = sum(self.batch_weights)\n\n \n def run_test(self, model, batch_number, epoch, batch_in_epoch, example_in_epoch, example, elapsed_time, verbose=True, log=True):\n if verbose: print(\"\")\n\n time0 = time.time()\n losses = []\n residual_chunks = []\n if self.test_dynamic:\n dynamic_losses = []\n dynamic_residual_chunks = []\n model.eval() # don't compute running means\n with torch.no_grad(): # don't compute gradients\n for i, batch in enumerate(self.testing_batches):\n if verbose and i % 10 == 0: print(f\"Testing batches... {i:3} / {len(self.testing_batches)} \", end=\"\\r\", flush=True)\n if self.test_dynamic:\n weights = batch[0].weights.to(self.device)\n residuals = [model(b.to(self.device).x, b.edge_index, b.edge_attr) - b.y for b in batch]\n \n loss = sum(loss_from_residuals(r, weights) for r in residuals) / len(residuals)\n dynamic_residuals = residuals[1] - residuals[0]\n dynamic_loss = loss_from_residuals(dynamic_residuals, weights)\n \n residuals = residuals[0]\n \n dynamic_losses.append(dynamic_loss.detach())\n dynamic_residual_chunks.append(dynamic_residuals.detach())\n else:\n batch.to(self.device)\n loss, residuals = loss_function(model(batch.x, batch.edge_index, batch.edge_attr), batch)\n #if use_tensor_constraint:\n # chunk = chunk[...,0] # Keep only scalar part of residuals\n losses.append(loss.detach())\n residual_chunks.append(residuals)\n\n if verbose: print(\"Collating batch results...\", end=\"\\r\", flush=True)\n # Using average loss over batches, rather than weighted average, to better mirror\n # running average of testing loss:\n #loss = (torch.dot(torch.tensor(losses), self.batch_weights) / self.total_weight).sqrt()\n loss = torch.tensor(losses).sqrt().mean()\n residuals = torch.cat(residual_chunks)\n residuals_by_element = {e:residuals[self.atom_indices[e]] for e in self.relevant_elements}\n\n # compute mean errors and RMSEs\n mean_error_by_element = torch.tensor([residuals_by_element[e].mean() for e in self.relevant_elements])\n RMSE_by_element = torch.tensor([residuals_by_element[e].square().mean().sqrt() for e in self.relevant_elements])\n\n if self.test_dynamic:\n dynamic_loss = torch.tensor(dynamic_losses).sqrt().mean()\n dynamic_residuals = torch.cat(dynamic_residual_chunks)\n dynamic_residuals_by_element = {e:dynamic_residuals[self.atom_indices[e]] for e in self.relevant_elements}\n\n # compute mean errors and RMSEs\n dynamic_mean_error_by_element = torch.tensor([dynamic_residuals_by_element[e].mean() for e in self.relevant_elements])\n dynamic_RMSE_by_element = torch.tensor([dynamic_residuals_by_element[e].square().mean().sqrt() for e in self.relevant_elements])\n else:\n dynamic_loss=None\n dynamic_mean_error_by_element=None\n dynamic_RMSE_by_element=None\n dynamic_residuals_by_element=None\n\n time1 = time.time()\n test_time = time1 - time0\n\n if verbose:\n if self.test_dynamic:\n print(f\" Test loss = {loss:6.3f} Dynamic loss = {dynamic_loss:6.3f} Test time = {test_time:.2f}\")\n print(f\" Element Mean Error RMSE Dyn.Mean Dyn.RMSE\")\n #print(f\"<4> Ee <7> 012.345 <5> 012.345\")\n for i, e in enumerate(self.relevant_elements):\n print(f\" {self.number_to_symbol[e].rjust(2)} {mean_error_by_element[i]:3.3f} {RMSE_by_element[i]:3.3f} {dynamic_mean_error_by_element[i]:3.3f} {dynamic_RMSE_by_element[i]:3.3f}\")\n else:\n print(f\" Test loss = {loss:6.3f} Test time = {test_time:.2f}\")\n print(f\" Element Mean Error RMSE\")\n #print(f\"<4> Ee <7> 012.345 <5> 012.345\")\n for i, e in enumerate(self.relevant_elements):\n print(f\" {self.number_to_symbol[e].rjust(2)} {mean_error_by_element[i]:3.3f} {RMSE_by_element[i]:3.3f}\")\n\n if log:\n if example is None:\n example = (epoch-1) * self.examples_per_epoch + example_in_epoch\n elif example_in_epoch is None:\n example_in_epoch = example % self.examples_per_epoch\n self.log_test(batch_number, epoch, batch_in_epoch, example_in_epoch, example, elapsed_time,\n loss, mean_error_by_element, RMSE_by_element, residuals_by_element,\n dynamic_loss, dynamic_mean_error_by_element,\n dynamic_RMSE_by_element, dynamic_residuals_by_element) \n return loss\n\n def log_test(self, batch_number, epoch, batch_in_epoch, example_in_epoch, example, elapsed_time,\n loss, mean_error_by_element, RMSE_by_element, residuals_by_element=None,\n dynamic_loss=None, dynamic_mean_error_by_element=None,\n dynamic_RMSE_by_element=None, dynamic_residuals_by_element=None):\n self.batch_number.append(batch_number)\n self.epoch.append(epoch)\n self.batch_in_epoch.append(batch_in_epoch)\n self.example_in_epoch.append(example_in_epoch)\n self.example.append(example)\n self.elapsed_time.append(elapsed_time)\n self.loss.append(loss)\n self.mean_error_by_element.append(mean_error_by_element)\n self.RMSE_by_element.append(RMSE_by_element)\n if self.test_dynamic:\n self.dynamic_loss.append(dynamic_loss)\n self.dynamic_mean_error_by_element.append(dynamic_mean_error_by_element)\n self.dynamic_RMSE_by_element.append(dynamic_RMSE_by_element) \n \n if self.store_residuals:\n self.residuals_by_element = residuals_by_element\n if self.test_dynamic:\n self.dynamic_residuals_by_element = dynamic_residuals_by_element\n \n if self.wandb_log is not None:\n wandb_dict = {\n 'batch_number':batch_number,\n 'epoch':epoch,\n 'batch_in_epoch':batch_in_epoch,\n 'example_in_epoch':example_in_epoch,\n 'example':example,\n 'elapsed_time':elapsed_time,\n 'test_loss':loss,\n 'mean_error_by_element':{\n int(e):mean_error_by_element[i].item() for i, e in enumerate(self.relevant_elements)},\n 'RMSE_by_element':{\n int(e):RMSE_by_element[i].item() for i, e in enumerate(self.relevant_elements)}}\n if self.test_dynamic:\n wandb_dict = {**wandb_dict,\n 'dynamic_test_loss':dynamic_loss,\n 'dynamic_mean_error_by_element':{\n int(e):dynamic_mean_error_by_element[i].item() for i, e in enumerate(self.relevant_elements)},\n 'dynamic_RMSE_by_element':{\n int(e):dynamic_RMSE_by_element[i].item() for i, e in enumerate(self.relevant_elements)}, \n }\n self.wandb_log(wandb_dict)\n \n def smoothed_loss(self, i = -1, window=5):\n if i < 0:\n j = len(self.loss) - i + 1\n assert j > 0, \"index out of range\"\n else:\n j = i + 1\n assert j <= len(self.loss), \"index out of range\"\n window = min(window, j)\n return sum(self.loss[j-window:j]) / window\n \n def coord(self, coord):\n if coord == 'time':\n return self.elapsed_time\n elif coord == 'example':\n return self.example\n else:\n raise ValueError(\"Argument 'coord' should be 'time' or 'example'\")\n \n # WARNING: don't share between processes if you use this \n x_coord = 'example'\n \n @property\n def x(self):\n return getattr(self, TestingHistory.x_coord) \n \n # boring. Due for sad fade \n def log_loss_interpolate(self, x, coord='example', window=0):\n x_array = self.coord(coord)\n if x > x_array[-1]:\n raise IndexError(f\"Requested {coord} {x} > {coord}[-1]\")\n if x < x_array[0]:\n return self.loss[0] * 2 \n \n i = np.searchsorted(x_array, x, side='left')\n window = min(i, window)\n i0 = i - window \n \n if x_array[i] == x:\n if window <= 0:\n return np.log(self.loss[i])\n else:\n return np.mean(np.log(self.loss[i-window:i+1]))\n \n x1 = x_array[i-1]\n x2 = x_array[i]\n s = (x2 - x) / (x2 - x1)\n if window == 0:\n return s * np.log(self.loss[i-1]) + (1-s) * np.log(self.loss[i])\n else:\n if i0 > 0:\n loss = np.log(self.loss[i-window-1:i+1])\n loss[0] *= s\n loss[-1] *= (1-s)\n return np.sum(loss) / window\n else:\n loss = np.log(self.loss[i0:i+1])\n loss[-1] *= 1-s\n return np.sum(loss) / (window - s)\n\n # This function may go extinct (or get radically remodeled)\n def log_loss_extrapolate(self, x, histories, coord='example', decay=4, x0=None, x1=None, log_avg=None, other_avgs=None, window=0):\n if len(self.loss) == 0:\n return float(\"nan\"), None, None, histories\n others = [h for h in histories if h.coord(coord)[-1] >= x]\n if others != histories:\n log_avg=None\n other_avgs=None\n if x0 is None:\n try:\n x0 = max(self.coord(coord)[0], *(h.coord(coord)[0] for h in others))\n except TypeError as e:\n print(\"\\n*** We have the mystery error ***\")\n print(e)\n print(f\"type(others) = {type(others)}\")\n print(f\"type((h.coord(coord)[0] for h in others)) = {type((h.coord(coord)[0] for h in others))}\")\n print(f\"type(others def'n) = {type([h for h in histories if h.coord(coord)[-1] >= x])}\")\n print(\"\\nTraceback:\")\n traceback.print_tb(e.__traceback__)\n os._exit(1)\n\n \n if x1 is None:\n x1 = self.coord(coord)[-1]\n if x1 <= x0:\n return np.log(self.loss[-1]), None, None, others\n assert x >= x1, f\"Tried to extrapolate, but given {coord} is within bounds.\"\n if log_avg is None:\n log_avg = self.log_loss_average(x0,x1,coord,decay)\n if other_avgs is None:\n other_avgs = [h.log_average_loss(x0, x1, coord, decay) for h in others]\n \n log_ext = log_avg - statistics.mean(other_avgs) + statistics.mean(h.log_loss_interpolate(x, coord, window=window) for h in others)\n return log_ext, log_avg, other_avgs, others \n \n def log_loss_average(self, x0, x1, coord=None, decay=0, d=0):\n \"\"\"\n This is the jewel of the \"old functions\"\n \n arg 'd' is a weird one. d=0 will calculate the log_loss_average\n d=1 will take differences, i.e. calculate log slope\n \"\"\"\n set_x(coord)\n x = self.x\n assert x1 <= x[-1] and x0 >= x[0], \"Can't average outside of range\"\n if x0==x[0]: # prevent out-of-range errors\n x0 += .001 * (x[1] - x[0])\n \n # find the indices which lie just outside our range\n i0 = np.searchsorted(x, x0, side='left') - 1 # x[i0] < x0 <= x[i0+1]\n i1 = np.searchsorted(x, x1, side='left') # x[i1-1] < x1 <= x[i1]\n #print(f\"i0 = {i0}, i1 = {i1}, len = {len(x)}\")\n #print(f\"-- x0 = {x0:.1f}, x1 = {x1:.1f}\")\n di = i1 - i0\n \n # take a segment of log-losses, interpolating to get the end values\n log_loss = np.log(self.loss[i0:i1+1]) # len di + 1\n log_loss[0] = (log_loss[0] * (x[i0+1] - x0) + \\\n log_loss[1] * (x0 - x[i0])) \\\n / (x[i0+1] - x[i0])\n log_loss[di] = (log_loss[di] * (x1 - x[i1-1]) + log_loss[di-1] * (x[i1] - x1)) / (x[i1] - x[i1-1])\n \n # copy a segment of our coords, making x0 and x1 the end values\n xx = x[i0:i1+1] # len di + 1\n xx[0] = 2 * x0\n xx[di] = 2 * x1\n d = np.diff(xx) # len di\n if decay != 0:\n d *= np.exp((x1 - xx[1:]) * (decay / (x1 - x0)))\n \n avg_log = .5 * np.sum(d * (log_loss[1:] + log_loss[:di])) / np.sum(d)\n return avg_log # len = di len = di\n\n def log_loss_poly_fit(self, x0, x1, coord=None, decay=0, deg=1):\n \"\"\"\n This is the weird one\n \"\"\"\n set_x(coord)\n x = self.x\n assert x1 <= x[-1] and x0 >= x[0], \"Can't average outside of range\"\n if x0==x[0]: # prevent out-of-range errors\n x0 += .001 * (x[1] - x[0])\n \n # find the indices which lie just outside our range\n i0 = np.searchsorted(x, x0, side='left') - 1 # x[i0] < x0 <= x[i0+1]\n i1 = np.searchsorted(x, x1, side='left') # x[i1-1] < x1 <= x[i1]\n #print(f\"i0 = {i0}, i1 = {i1}, len = {len(x)}\")\n #print(f\"-- x0 = {x0:.1f}, x1 = {x1:.1f}\")\n di = i1 - i0\n \n # take a segment of log-losses, interpolating to get the end values\n log_loss = np.log(self.loss[i0:i1+1]) # len di + 1\n log_loss[0] = (log_loss[0] * (x[i0+1] - x0) + \\\n log_loss[1] * (x0 - x[i0])) \\\n / (x[i0+1] - x[i0])\n log_loss[di] = (log_loss[di] * (x1 - x[i1-1]) + log_loss[di-1] * (x[i1] - x1)) / (x[i1] - x[i1-1])\n \n # copy a segment of our coords, making x0 and x1 the end values\n xx = x[i0:i1+1] # len di + 1\n xx[0] = x0\n xx[di] = x1\n \n # Now we just fit a polynomial:\n coeff = np.polyfit(xx, log_loss, deg)\n return coeff\n \n def log_loss_average_with_slope(self, chunk=.1, coord=None, decay=0):\n pass\n \n def get_spectral(self, smooth=.1, coord='example', log_loss=True, decay=0):\n return Spectral(self, log_loss=True, smooth=smooth, coord=coord, decay=decay)\n \n def __getstate__(self):\n d = self.__dict__.copy()\n del d['testing_batches']\n del d['wandb_log']\n return d\n\nclass Spectral:\n \n def __init__(self, h, log_loss=True, smooth=.1, coord='example', decay=0):\n self.h = h\n self.log_loss = log_loss\n self.smooth = smooth\n self.coord = coord\n self.decay = decay\n self.compute()\n \n def compute(self):\n set_x(self.coord)\n x0 = self.h.x[0]\n x1 = self.h.x[1]\n chunk_size = round(len(self.h.x) * self.smooth)\n chunk_num = len(self.h.x) + 1 - chunk_size\n loss = self.h.loss if not self.log_loss else np.log(self.h.loss)\n cumsum = np.cumsum(loss)\n #print(f\"cumsum = {cumsum}\")\n #print(f\"chunk_size = {chunk_size}\")\n #print(f\"cumsum[chunk_size:] = {cumsum[chunk_size:]}\")\n #print(f\"cumsum[:chunk_size] = {cumsum[:chunk_size]}\")\n self.chunk_val = (cumsum[chunk_size:] - cumsum[:-chunk_size]) / chunk_num\n #self.chunk_edges = np.arange(chunk_size-1, chunk_size)\n #assert len(self.chunk_edges) == len(self.chunks)\n #self.x = self.h.x[self.chunk_edges]\n self.spectrum = np.fft.fft(self.chunk_val)\n \n \n def recompute(self):\n self.compute()\n \n \n\n\n \n# Static methods for comparing runs of different lengths,\n# computing uncertainty in long-runs predicions from short data\n# and other things?\ndef set_x(coord):\n TestingHistory.x_coord = 'example' if coord=='example' else 'elapsed_time'\n\ndef sort_by_last_coord(histories, coord=None):\n set_x(coord)\n indices, h_sorted = zip(*sorted(enumerate(histories), key = lambda th : th[1].x[-1], reverse=True))\n return np.array(indices), h_sorted\n\n# Handy unsorting function \n\ndef unsort(indices, data):\n #if data is None:\n # indices = [a[0] for a in indices]\n # data = [a[1] for a in indices]\n indices = np.array(indices)\n if isinstance(data, np.ndarray):\n fixed = np.empty_like(data)\n np.put(fixed, indices, data)\n elif isinstance(data, tuple) and len(data) != len(indices):\n fixed = tuple(unsort(indices, d) for d in data)\n elif isinstance(data, Sequence) and len(data) == len(indices):\n fixed = [None] * len(indices)\n for t, s in enumerate(indices):\n fixed[t] = data[s]\n else:\n fixed = data\n return fixed\n \ndef needs_sorted_histories(f, assume=False, h_arg=0):\n \"\"\"\n Decorator for functions that need histories sorted from longest to shortest\n \"\"\"\n def new_f(*args, sorted=assume, sort_indices=None, **kwargs):\n if not sorted:\n if sort_indices is None:\n sort_indices = np.empty(len(args[h_arg]), dtype=int)\n args=list(args)\n sort_indices[:], args[h_arg] = sort_by_last_coord(args[h_arg], kwargs['coord']) \n value = f(*args, **kwargs)\n return unsort(sort_indices, value)\n else:\n return f(*args, **kwargs)\n #new_f.__name__ = f.__name__\n return new_f\n\n\n# Seems best for now\n@needs_sorted_histories\ndef log_predict_from_farther_predictions(histories, coord=None, window=.1, limit=None):\n \"\"\"\n Seems best for now\n Explain what this does\n \"\"\"\n set_x(coord)\n min_x = max(h.x[0] for h in histories)\n N = len(histories)\n \n pred = np.zeros(len(histories), dtype=float)\n for k, h in enumerate(histories):\n if k == 0:\n continue\n \n x1 = h.x[-1]\n x0 = max((1-window)*x1, min_x)\n hx = np.fromiter((histories[i].log_loss_average(x0, x1, coord)\n for i in range(k+1)), float, count=k+1)\n pred[k] = hx[k] + np.average(pred[:k] - hx[:k])\n \n return pred\n \n@needs_sorted_histories\ndef predictions_with_errors(histories, coord=None, window=.1):\n set_x(coord)\n min_x = max(h.x[0] for h in histories)\n N = len(histories)\n \n pred = np.zeros(N, dtype=float)\n errs = np.zeros(N, dtype=float)\n local = np.empty(N, dtype=float)\n for k, h in enumerate(histories):\n if k < 2:\n continue\n \n x1 = h.x[-1]\n x0 = max((1-window)*x1, min_x)\n hx = np.fromiter((histories[i].log_loss_average(x0, x1, coord)\n for i in range(k+1)), float, count=k+1)\n other = np.average(pred[:k] - hx[:k])\n pred[k] = hx[k] + other\n local[1:k] = hx[1:k] + k * (other - (pred[1:k] - hx[1:k])) / (k-1) - pred[1:k]\n errs[k] = np.std(local[1:k], ddof=1)\n \n \n return pred, errs\n \n \n \n# Not quite right\n# Comparing to average is wrong idea \n@needs_sorted_histories \ndef log_avg_diff_predict(histories, coord=None, window=.1, offset=0):\n \"\"\"\n ????\n calculates a score for each history h, giving\n h's log-difference from the log-average of the others,\n near h's farthest extent (going back 'window' of that extent)\n \n setting offset > 0 creates the predictors at a disadvantage,\n using comparisons from earlier in the runs. Thus, by comparing\n strong predictors vs weak predictors (using two different\n values of 'offset', we can estimate the noise in the predictors)\n \"\"\"\n \n set_x(coord)\n min_x = max(h.x[0] for h in histories)\n N = len(histories)\n\n cls_ind, cls_x = find_terminal_clusters(histories, coord=coord, sorted=True)\n C = len(cls_ind) - 1\n \n diff_avg = np.zeros(N, dtype=float)\n hts = histories\n c = 0\n while c + offset < C:\n if c == 0 and cls_ind[1] == 1:\n c = 1\n continue\n\n c0 = c # far out near the limits, take averages from end\n chunk = slice(cls_ind[c0],cls_ind[c0+1])\n \n c1 = c + offset # deeper in, testing a selection of long\n # runs using predictors from earlier on\n\n n = cls_ind[c0+1] # n : total number of runs we're investigating\n x1 = cls_x[c1] # if offset > 0, generating predictors at smaller time/example\n x0 = max((1 - window) * x1, min_x) # lower bound of critical window\n #print(f\"#### (1-window) = {(1-window):.2f}, (1-window) * x1 = {((1-window) * x1):.2f}\")\n #print(f\"#### x0 = {x0:.1f}, x1 = {x1:.1f}, min_x = {min_x}\")\n \n # each_avg = each run's (log) average over smoothing window:\n each_avg = np.array(list([\n h.log_average_loss(x0, x1, coord, decay=0) # scalar\n # compute for up to + incl. current cluster:\n for h in hts[0:n]\n ]))\n \n sum_average = np.sum(each_avg) # dim = 0\n #print(f\"sum average\\n{sum_average}\\n\")\n #print(f\"diff_avg\\n{diff_avg}\\n\")\n #print(f\"each_avg\\n{each_avg}\\n\", flush=True)\n diff_avg[chunk] = each_avg[chunk] * (1 + 1 / (n - 1)) - sum_average / (n - 1) \n \n c += 1\n \n return diff_avg\n \n#-------------------------- \n\n# =====================================\n# =====================================\n# =====================================\n\n \ndef log_diff_avg(histories, coord=None, window=.1, offset=-1, sorted=False):\n \"\"\"\n More stumbling\n \"\"\"\n if not sorted:\n raise NotImplementedError(\"Histories must be presorted by longest run, descending\")\n\n set_x(coord)\n min_x = min(*histories, key = lambda h : h.x[0]).x[0]\n N = len(histories)\n \n \ndef find_terminal_clusters(histories, coord=None, tolerance=.1, sorted=False):\n \"\"\"\n returns a partition index of the histories grouped by their\n clustering at the same terminus, from latest to earliest.\n 'tolerance' picks how close their termina should be.\n \n Assumes histories sorted from longest to shortest (by 'coord')\n \n returns a pair:\n 1. an array: indices, such that\n histories[range(indices[i],indices[i+1])]\n gives the corresponding group of clustered termina\n 2. an array: last_x, giving the largest coordinate in the\n cluster\n \"\"\"\n assert sorted, NotImplementedError(\"Histories must be presorted by longest run, descending\")\n \n set_x(coord)\n \n indices = [0]\n common_x = [histories[0].x[-1]]\n last_x = common_x[0] \n \n for i, h in enumerate(histories):\n if h.x[-1] < last_x * (1 - tolerance):\n indices.append(i)\n common_x.append(h.x[-1])\n last_x = h.x[-1]\n else:\n common_x[-1] = h.x[-1]\n \n return np.array(indices), np.array(common_x)\n \n \n \n \ndef log_inferred_loss(histories, x, coord='example', window=.1, presorted=False):\n \"\"\"\n calculates a score for each history, at coordinate x,\n predictive of long-term relative performance.\n \"\"\"\n raise NotImplementedError(\"not implemented yet!\")\n \nTH = TestingHistory \n \n \n \n\n#def exp_fn(x, a, speed, limit):\n# return a * np.exp(speed * x) + limit\ndef exp_fn(x, a, speed, limit):\n return np.exp(a * np.exp(speed * x) + limit)\n#def exp_fn(x, a, speed, limit, k, w):\n# return a * np.exp(speed * x) + limit + k / (x + w)\n#def exp_fn(x, k, w, limit):\n# return limit + k / (x + w)\n \n\n# if function is called\nif __name__ == '__main__':\n import sys\n if len(sys.argv) < 2:\n exit()\n input(\"Exploring the evolution of exp fit over time...\")\n history = TrainTestHistory(file=sys.argv[1])\n history.test.show_fit_evolution()\n input(\"Press Enter to continue...\")\n\n\n# this function is deprecated. I include it only for reference.\n\ndef compute_testing_loss(model, testing_batches, device, relevant_elements, training_history,\n molecules_dict, epoch, minibatches_seen):\n print(\"\\ntesting...\", end=\"\\r\", flush=True)\n\n # set model to testing mode (for batchnorm)\n model.eval()\n time1 = time.time()\n n_minibatches = len(testing_batches)\n testing_loss = 0.0\n n_testing_examples_seen = 0\n residuals_by_molecule = {}\n residuals_by_site_label = {}\n stats_by_element = {}\n for minibatch_index, minibatch in enumerate(testing_batches):\n minibatch.to(device)\n\n with torch.no_grad():\n # run model\n output = model(minibatch.x, minibatch.edge_index, minibatch.edge_attr)\n\n # compute MSE\n loss, residuals = loss_function(output, minibatch)\n minibatch_loss = np.sqrt(loss.item())\n testing_loss = testing_loss * n_testing_examples_seen + \\\n minibatch_loss * minibatch.n_examples\n n_testing_examples_seen += minibatch.n_examples\n testing_loss = testing_loss / n_testing_examples_seen\n\n # store residuals\n residuals = residuals.squeeze(-1).cpu().numpy()\n atom_tally = 0\n for example in minibatch.example_list:\n molecule = molecules_dict[example.ID]\n n_atoms = molecule.n_atoms\n if example.ID not in residuals_by_molecule:\n residuals_by_molecule[example.ID] = []\n subset = residuals[atom_tally:atom_tally+n_atoms]\n residuals_by_molecule[example.ID].append(subset)\n atom_tally += n_atoms\n assert atom_tally == residuals.shape[0], \"Testing atom count mismatch!\"\n\n # interim status update\n print(f\"testing {minibatch_index+1:5d} / {n_minibatches:5d} minibatch_test_loss = {minibatch_loss:<10.3f} overall_test_loss = {testing_loss:<10.3f}\", end=\"\\r\", flush=True)\n #testing_loss /= n_minibatches # ???????????????????\n\n # reshape residual data\n all_residuals = { element : [] for element in relevant_elements } # element -> [residuals]\n for ID, results in residuals_by_molecule.items():\n results = np.array(results).T\n molecule = molecules_dict[ID]\n for atomic_index, this_result in enumerate(results):\n element = molecule.atomic_numbers[atomic_index]\n if element not in relevant_elements:\n continue\n site_label = \"f{ID}_{number_to_symbol[element]}{atomic_index+1}\"\n residuals_by_site_label[site_label] = this_result\n all_residuals[element].extend(this_result)\n\n # compute mean errors and RMSEs\n for element, residuals in all_residuals.items():\n residuals = np.array(residuals)\n mean_error = np.mean(residuals)\n RMSE = np.sqrt(np.mean(np.square(residuals)))\n stats_by_element[element] = (mean_error,RMSE)\n\n # log results\n training_history.log_testing_loss(epoch, minibatches_seen, testing_loss,\n residuals_by_molecule, residuals_by_site_label,\n stats_by_element)\n\n # print update\n elapsed = time.time() - time1\n print(f\" testing_loss = {testing_loss:10.3f} t_test = {elapsed:.2f} s \")\n print(\" means / RMSEs = \", end=\"\")\n for element, (mean_error,RMSE) in stats_by_element.items():\n print(f\"{number_to_symbol[element]} : {mean_error:.3f} / {RMSE:.3f} \", end=\"\")\n print(flush=True)\n\n", "id": "475533", "language": "Python", "matching_score": 8.53846263885498, "max_stars_count": 0, "path": "train/history.py" }, { "content": "from collections import deque\nfrom email.parser import BytesParser\nimport numpy as np\n#import sortednp as snp\nimport torch\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport math\nimport time\nimport bisect\nfrom datetime import timedelta\nfrom training_utils import loss_function, loss_from_residuals, dynamic_loss_from_residuals\n#import training_config as config\nfrom pipeline import Molecule\nfrom resizable import H5Array as Array\nimport os\nfrom shutil import copyfile\nimport h5py\nimport statistics\nimport traceback\nfrom collections.abc import Sequence\nimport time\nfrom filelocks import ExistLock, Timeout\n\nn_to_s = {\n1:'H',\n6:'C',\n7:'N',\n8:'O',\n16:'S',\n9:'F',\n17:'Cl'\n}\n\n\nclass TrainTestHistory:\n\n @classmethod\n def get_lock(cls, filename=None, save_prefix=None, load=True, writeable=False, block=True, hard_lock=True, timeout=None, polling_interval=.05):\n if not block:\n timeout = 0.0\n start_time = time.time()\n \n if filename is None:\n filename = save_prefix + \"-history.torch\"\n \n if hard_lock:\n #print(f\"Acquiring ExistLock('{filename + '.lock'}')... \", end='')\n with ExistLock(filename + '.lock'):\n #print(\"Done.\")\n pass\n\n mode = {\n (True, True) : 'r+',\n (True, False) : 'r',\n (False, True) : 'w'\n }[load, writeable]\n file = None\n #print(f\"Opening hdf5 file {filename} ... \", flush=True)\n while file is None:\n try:\n file = h5py.File(filename, mode)\n except BlockingIOError:\n #print(\"Except BlockingIOError.\", flush=True)\n if timeout is None or time.time() - start_time < timeout:\n #print(f\"Timeout ({timeout:.2f} s) not reached\", flush=True)\n time.sleep(polling_interval)\n else:\n #print(f\"Timeout reached. Raising Timeout exception.\", flush=True)\n raise Timeout(f\"Failed to open {filename}\")\n #print(\"Done opening hdf5.\")\n\n if os.path.isfile(filename + \".lock\"):\n os.remove(filename + \".lock\")\n\n return file\n \n def __init__(self, testing_batches=[], device='cpu', examples_per_epoch=None, relevant_elements=None, \n run_name=None, number_to_symbol=None, writeable=False, smoothing_window=10,\n train_dynamic=None, store_residuals=False, wandb_log=None, wandb_interval=1,\n file=None, save_prefix=None, use_backup=True, load=None,\n block=True, hard_lock=True, timeout=None, polling_interval=0.05):\n\n if file is not None:\n self.filename = file\n elif save_prefix is not None:\n self.filename = save_prefix + \"-history.torch\"\n else:\n raise ValueError(\"Must provide either 'file' or 'save_prefix'.\")\n\n self.device = device\n self.use_backup = use_backup\n \n if not os.path.isfile(self.filename):\n if load:\n if self.use_backup and os.path.isfile(self.filename + '.bak'):\n print(f\"Restoring {self.filename} from backup.\")\n copyfile(self.filename + \".bak\", self.filename)\n else:\n raise ValueError(f\"Can't find history file '{self.filename}'\")\n elif not os.path.isfile(self.filename + '.bak'):\n load = False\n elif load is None:\n load = True\n if self.use_backup and os.path.isfile(self.filename + '.bak'):\n print(\"History file failed to close properly last time.\")\n if os.path.getsize(self.filename) < os.path.getsize(self.filename + '.bak'):\n print(f\"Restoring {self.filename} from backup.\")\n copyfile(self.filename + \".bak\", self.filename)\n \n writeable = writeable or not load\n self.writeable = writeable\n \n self.file = TrainTestHistory.get_lock(\n self.filename,\n load=load,\n writeable=writeable,\n block=block,\n hard_lock=hard_lock,\n timeout=timeout,\n polling_interval=polling_interval)\n \n if not load:\n # create new history file\n self.train = TrainingHistory(\n examples_per_epoch, smoothing_window, bool(train_dynamic),\n wandb_log=wandb_log, wandb_interval=wandb_interval,\n file=self.file.create_group('train'), load=False, writeable=writeable)\n self.test = TestingHistory(\n examples_per_epoch, testing_batches, relevant_elements, train_dynamic, device,\n number_to_symbol=number_to_symbol, store_residuals=store_residuals, wandb_log=wandb_log,\n file=self.file.create_group('test'), load=False, writeable=writeable, name=run_name)\n self.name = run_name\n self.file.attrs['name'] = run_name\n self.file.attrs['examples_per_epoch'] = examples_per_epoch\n self.file.attrs['train_dynamic'] = bool(train_dynamic)\n\n else: \n try: # in case the file is corrupted\n self.name = self.file.attrs['name']\n if 'examples_per_epoch' in self.file.attrs and examples_per_epoch is None:\n examples_per_epoch = self.file.attrs['examples_per_epoch']\n else:\n self.file.attrs['examples_per_epoch'] = examples_per_epoch\n if train_dynamic is None and 'train_dynamic' in self.file.attrs:\n train_dynamic = self.file.attrs['train_dynamic']\n else:\n train_dynamic = bool(train_dynamic)\n self.file.attrs['train_dynamic'] = train_dynamic\n self.train = TrainingHistory(\n examples_per_epoch,\n train_dynamic=train_dynamic,\n wandb_log=wandb_log, wandb_interval=wandb_interval, \n file=self.file['train'], load=True, writeable=writeable)\n self.test = TestingHistory(\n examples_per_epoch, testing_batches=testing_batches,\n relevant_elements=relevant_elements, test_dynamic=train_dynamic,\n device=device, wandb_log=wandb_log,\n file=self.file['test'], load=True, writeable=writeable, name=run_name)\n except: # file is corrupted\n self.close(save=False)\n raise\n \n #if os.path.isfile(self.file.filename + '.bak'):\n # os.remove(self.file.filename + '.bak')\n \n \n def save(self, verbose = True):\n self.file.flush()\n if not self.writeable:\n raise RuntimeError(f\"Tried to save history, but {self.filename} in non-saving mode.\")\n if verbose: print(f\"{self.filename} in non-saving mode.\")\n return\n if verbose: print(\"Saving \" + (\"and backing up \" if self.use_backup else \"\") + \"history...\")\n if self.use_backup:\n copyfile(self.filename, self.filename + \".bak\")\n \n def close(self, save=True, verbose = True):\n if self.file:\n if save and self.writeable:\n self.save()\n if (os.path.isfile(self.filename + '.bak') and\n os.path.getsize(self.filename) >= os.path.getsize(self.filename + '.bak')):\n os.remove(self.filename + '.bak')\n self.file.close()\n self.file = None \n \n\n def plot(self, figure=None, x_axis='batch_number', y_axis='smoothed_loss', show=True):\n # x_axis = 'time' is also allowed\n if figure is None:\n plt.figure(figsize=(12,8))\n self.train.plot(plt.gcf(), x_axis, y_axis)\n self.test.plot(plt.gcf(), x_axis)\n if figure is None:\n plt.legend(loc=\"best\")\n if show:\n plt.show()\n\n def log_batch(self, *args, **kwargs):\n self.train.log_batch(*args, **kwargs)\n\n def elapsed_time(self):\n return self.train.elapsed_time[-1]\n\n def run_test(self, model, batch_number=None, epoch=None, batch_in_epoch=None, example_in_epoch=None, example=None, elapsed_time=None, *args, **kwargs):\n if batch_number is None: batch_number = len(self.train.loss)\n if epoch is None: epoch = self.train.epoch[-1]\n if batch_in_epoch is None: batch_in_epoch = self.train.batch_in_epoch[-1]\n if example_in_epoch is None: example_in_epoch = self.train.example_in_epoch[-1]\n if example is None: example = self.train.example[-1]\n if elapsed_time is None: elapsed_time = self.train.elapsed_time[-1]\n return self.test.run_test(model, batch_number, epoch, batch_in_epoch, example_in_epoch, example, elapsed_time, *args, **kwargs)\n\nclass BaseHistory:\n\n # methods for finding comparison points\n\n def max_epoch_batch(self):\n return self.epoch[-1], self.batch[-1]\n\n def max_batch(self):\n raise Exception(\"This should be overloaded!\")\n\n def max_epoch_example(self):\n return self.epoch[-1], self.example_in_epoch[-1]\n \n\nclass TrainingHistory(BaseHistory):\n def __init__(self, examples_per_epoch, smoothing_window=10, train_dynamic=False,\n wandb_log=None, wandb_interval=1, file=None, load=True, writeable=None):\n self.examples_per_epoch = examples_per_epoch\n self.wandb_log = wandb_log\n self.wandb_interval = wandb_interval\n self.file = file\n self.writeable = writeable\n \n if not load:\n self.smoothing_window = smoothing_window\n file.attrs['smoothing_window'] = smoothing_window\n \n self.last_wandb = 0\n file.attrs['last_wandb'] = 0\n\n # initialize the lists we will be accumulating\n # these lists correspond to each other\n # one entry per batch\n # batch 0 is a dummy batch\n self.epoch=Array(file, 'epoch', [0])\n\n # batch n covers example[example_number[n-1]:example_number[n]]\n self.example_in_epoch=Array(file, 'example_in_epoch', [examples_per_epoch])\n self.example=Array(file, 'example', [0])\n \n self.examples_in_batch=Array(file, 'examples_in_batch', [0])\n\n self.batch_in_epoch=Array(file, 'batch_in_epoch', [0])\n self.elapsed_time=Array(file, 'elapsed_time', [0.0])\n\n self.atoms_in_batch=Array(file, 'atoms_in_batch', [0])\n\n self.loss=Array(file, 'loss', [float(\"inf\")])\n self.smoothed_loss=Array(file, 'smoothed_loss', [float(\"inf\")])\n \n if train_dynamic:\n self.dynamic_loss=Array(file, 'dynamic_loss', [float(\"inf\")])\n self.smoothed_dynamic_loss=Array(file, 'smoothed_dynamic_loss', [float(\"inf\")])\n\n # this list has one entry per epoch\n # epoch e starts at index epoch_start[e]\n self.epoch_start=Array(file, 'epoch_start', [0]) # includes a dummy epoch 0 starting at batch 0\n else:\n self.smoothing_window = file.attrs['smoothing_window']\n self.last_wandb = file.attrs['last_wandb']\n \n self.epoch=Array(file, 'epoch')\n self.example_in_epoch=Array(file, 'example_in_epoch')\n self.example=Array(file, 'example') \n self.examples_in_batch=Array(file, 'examples_in_batch')\n self.batch_in_epoch=Array(file, 'batch_in_epoch')\n self.elapsed_time=Array(file, 'elapsed_time')\n self.atoms_in_batch=Array(file, 'atoms_in_batch')\n self.loss=Array(file, 'loss')\n self.smoothed_loss=Array(file, 'smoothed_loss') \n if train_dynamic:\n self.dynamic_loss=Array(file, 'dynamic_loss', self.loss.shape, dtype=float)\n self.smoothed_dynamic_loss=Array(file, 'smoothed_dynamic_loss', self.smoothed_loss.shape, dtype=float)\n self.epoch_start=Array(file, 'epoch_start') \n \n def log_batch(\n self, batch_time, wait_time, examples_in_batch, atoms_in_batch, elapsed_time, example_in_epoch,\n example, loss, dynamic_loss=None, epoch=None, batch_in_epoch=None, verbose=True):\n self.elapsed_time.append(elapsed_time)\n\n if example is None:\n example = (epoch-1) * self.examples_per_epoch + example_in_epoch\n elif example_in_epoch is None:\n example_in_epoch = (example - 1) % self.examples_per_epoch + 1\n\n epoch = self.epoch[-1] if epoch is None else epoch\n if example_in_epoch > self.examples_per_epoch:\n epoch += 1\n if epoch > self.epoch[-1]:\n self.epoch_start.append(len(self.example))\n self.epoch.append(epoch)\n\n self.examples_in_batch.append(examples_in_batch)\n self.example_in_epoch.append(example_in_epoch)\n self.example.append(example)\n self.atoms_in_batch.append(atoms_in_batch)\n batch_in_epoch = math.ceil(example_in_epoch / self.examples_per_batch)\n self.batch_in_epoch.append(batch_in_epoch)\n \n self.loss.append(loss)\n window = min(len(self.loss)-1, self.smoothing_window)\n self.smoothed_loss.append(sum(self.loss[-window:]) / window)\n \n if dynamic_loss is not None:\n self.dynamic_loss.append(dynamic_loss)\n window = min(len(self.dynamic_loss)-1, self.smoothing_window)\n self.smoothed_dynamic_loss.append(sum(self.dynamic_loss[-window:]) / window) \n \n if self.wandb_log is not None and len(self.loss) - 1 - self.last_wandb >= self.wandb_interval:\n self.last_wandb = len(self.loss) - 1\n self.file.attrs['last_wandb'] = self.last_wandb\n log_dict = {\n 'elapsed_time':self.elapsed_time[-1],\n 'epoch':epoch,\n 'batch_in_epoch':batch_in_epoch,\n 'example_in_epoch':example_in_epoch,\n 'example':example,\n 'examples_in_batch':examples_in_batch,\n 'atoms_in_batch':atoms_in_batch,\n 'loss':loss,\n 'smoothed_loss':self.smoothed_loss[-1]\n }\n if dynamic_loss is not None:\n log_dict['dynamic_loss'] = dynamic_loss\n log_dict['smoothed_dynamic_loss'] = self.smoothed_dynamic_loss[-1]\n self.wandb_log(log_dict)\n\n if verbose:\n print(f\"{self.epoch[-1]} : {self.batch_in_epoch[-1]} / {self.batch_in_epoch[-1] + self.batches_remaining_in_epoch()} \" +\n (\"loss =\" if dynamic_loss is None else \"abs_loss =\") + f\"{self.smoothed_loss[-1]:8.3f} \" +\n (\"\" if dynamic_loss is None else f\"dyn_loss ={self.smoothed_dynamic_loss[-1]:8.3f}\") +\n f\" t_train = {batch_time:.2f} s \" +\n (f\"t_wait = {wait_time:.2f} s \" if dynamic_loss is None else \"\") +\n f\"t = {str(timedelta(seconds=self.elapsed_time[-1]))[:-5]} \",\n end=\"\\r\", flush=True)\n\n def num_batches(self):\n return len(self.epoch) - 1\n\n # number of epochs we have seen\n def num_epochs(self):\n return len(self.epoch_start) - 1\n\n def current_epoch(self, batch=-1):\n \"\"\"\n epoch of the next incoming batch\n \"\"\"\n return self.epoch[batch] + self.example_in_epoch[batch] // self.examples_per_epoch\n \n def next_example_in_epoch(self, batch=-1):\n \"\"\"\n example in epoch that would start next batch\n \"\"\"\n return self.example_in_epoch[-1] % self.examples_per_epoch\n\n def next_batch_in_epoch(self, batch=-1):\n \"\"\"\n batch number that would follow given batch\n wraps around to 1 if epoch ends\n \"\"\"\n return 1 if self.example_in_epoch[batch] >= self.examples_per_epoch else self.batch_in_epoch[batch] + 1\n \n @property\n def examples_per_batch(self):\n return max(self.examples_in_batch[-2:])\n\n def batches_remaining_in_epoch(self, batch=-1):\n return math.ceil((self.examples_per_epoch - self.example_in_epoch[batch]) / self.examples_per_batch)\n\n def max_batch(self):\n return len(self.loss) - 1\n \n # x_axis = 'time' and y_axis = 'loss' are also allowed\n def plot(self, figure=None, x_axis='example', y_axis='smoothed_loss', color=\"r\", show=True):\n x_axis = self.example[1:] if x_axis=='example' else self.elapsed_time[1:]\n y_axis = self.smoothed_loss[1:] if y_axis=='smoothed_loss' else self.loss[1:]\n \n if figure is None:\n plt.figure(figsize=(12,8))\n plt.plot(np.array(x_axis), np.array(y_axis), \"o-\", color=color, label=\"train\")\n if figure is None:\n plt.legend(loc=\"best\")\n if show:\n plt.show()\n \n def __getstate__(self):\n d = self.__dict__.copy()\n del d['wandb_log']\n return d\n \nclass TestingHistory(BaseHistory):\n\n def __init__(self, examples_per_epoch, testing_batches, relevant_elements=None, test_dynamic=False, device='cuda',\n number_to_symbol=None, store_residuals=False, wandb_log=None,\n file=None, load=True, writeable=None, name=None):\n self.examples_per_epoch = examples_per_epoch\n self.testing_batches = testing_batches\n self.test_dynamic = test_dynamic\n self.device = device\n self.number_to_symbol = number_to_symbol if number_to_symbol else n_to_s\n self.wandb_log = wandb_log\n self.name = name\n self.writeable=writeable\n\n if not load:\n self.relevant_elements = relevant_elements\n file.attrs['relevant_elements'] = relevant_elements\n self.store_residuals = store_residuals\n file.attrs['store_residuals'] = store_residuals\n\n # initialize the lists we will be accumulating\n self.batch_number = Array(file, 'batch_number', dtype=int)\n self.epoch = Array(file, 'epoch', dtype=int)\n self.batch_in_epoch = Array(file, 'batch_in_epoch', dtype=int)\n self.example_in_epoch = Array(file, 'example_in_epoch', dtype=int)\n self.example = Array(file, 'example', dtype=int)\n self.elapsed_time = Array(file, 'elapsed_time', dtype=float)\n self.loss = Array(file, 'loss', dtype=float)\n self.mean_error_by_element = Array(file, 'mean_error_by_element', (0,len(relevant_elements)), dtype=float, resizable_cross=True)\n self.RMSE_by_element = Array(file, 'RMSE_by_element', (0,len(relevant_elements)), dtype=float, resizable_cross=True)\n if test_dynamic:\n self.dynamic_loss = Array(file, 'dynamic_loss', dtype=float)\n self.dynamic_mean_error_by_element = Array(file, 'dynamic_mean_error_by_element', (0,len(relevant_elements)), dtype=float, resizable_cross=True)\n self.dynamic_RMSE_by_element = Array(file, 'dynamic_RMSE_by_element', (0,len(relevant_elements)), dtype=float, resizable_cross=True) \n else:\n self.relevant_elements = file.attrs['relevant_elements']\n self.store_residuals = file.attrs['store_residuals']\n\n self.batch_number = Array(file, 'batch_number')\n self.epoch = Array(file, 'epoch')\n self.batch_in_epoch = Array(file, 'batch_in_epoch')\n self.example_in_epoch = Array(file, 'example_in_epoch')\n self.example = Array(file, 'example')\n self.elapsed_time = Array(file, 'elapsed_time')\n self.loss = Array(file, 'loss')\n self.mean_error_by_element = Array(file, 'mean_error_by_element', resizable_cross=True)\n self.RMSE_by_element = Array(file, 'RMSE_by_element', resizable_cross=True)\n if test_dynamic:\n self.dynamic_loss = Array(file, 'dynamic_loss', self.loss.shape, dtype=float)\n self.dynamic_mean_error_by_element = Array(file, 'dynamic_mean_error_by_element', self.mean_error_by_element.shape, dtype=float, resizable_cross=True)\n self.dynamic_RMSE_by_element = Array(file, 'dynamic_RMSE_by_element', self.RMSE_by_element.shape, dtype=float, resizable_cross=True)\n\n # legacy code, shouldn't be used in newer history files!\n if (self.mean_error_by_element.maxshape[1] is not None) or (self.RMSE_by_element.maxshape[1] is not None):\n mean_error_by_element = np.array(self.mean_error_by_element).copy()\n RMSE_by_element = np.array(self.RMSE_by_element).copy()\n del file['mean_error_by_element']\n del file['RMSE_by_element']\n self.mean_error_by_element = Array(file, 'mean_error_by_element', mean_error_by_element.shape, data=mean_error_by_element, dtype=float, resizable_cross=True)\n self.RMSE_by_element = Array(file, 'RMSE_by_element', RMSE_by_element.shape, data=RMSE_by_element, dtype=float, resizable_cross=True)\n #self.mean_error_by_element[:] = mean_error_by_element\n #self.RMSE_by_element[:] = RMSE_by_element\n\n # allows the resizing of by-element data arrays if relevant elements change\n if relevant_elements is not None and len(self.relevant_elements) != len(relevant_elements):\n print(\"relevant_elements has changed!\")\n if len(self.relevant_elements) < len(relevant_elements):\n self.mean_error_by_element.resize_cross(len(relevant_elements))\n self.RMSE_by_element.resize_cross(len(relevant_elements))\n if test_dynamic:\n self.dynamic_mean_error_by_element.resize_cross(len(relevant_elements))\n self.dynamic_RMSE_by_element.resize_cross(len(relevant_elements))\n self.relevant_elements = relevant_elements\n\n # if we have no testing batches, we won't be running tests, so no need to prep\n if not testing_batches:\n return\n\n # for each relevant element, gives you a list of atom indices in the testing set\n atom_indices = {e:[] for e in self.relevant_elements}\n atom_index = 0\n for batch in testing_batches:\n if test_dynamic: # batches come in pairs. inspect first of pair.\n batch = batch[0]\n atomic_numbers = Molecule.get_atomic_numbers(batch.x)\n for e in atomic_numbers:\n e = e.item()\n if e in self.relevant_elements:\n atom_indices[e].append(atom_index)\n atom_index += 1\n self.atom_indices = {e:np.array(ai) for e,ai in atom_indices.items()}\n\n # precompute weight per testing batch and total testing weight\n self.batch_weights = torch.tensor([torch.sum(batch.weights) for batch in testing_batches])\n self.total_weight = sum(self.batch_weights)\n\n \n def run_test(self, model, batch_number, epoch, batch_in_epoch, example_in_epoch, example, elapsed_time, verbose=True, log=True):\n if verbose: print(\"\")\n\n time0 = time.time()\n losses = []\n residual_chunks = []\n if self.test_dynamic:\n dynamic_losses = []\n dynamic_residual_chunks = []\n model.eval() # don't compute running means\n with torch.no_grad(): # don't compute gradients\n for i, batch in enumerate(self.testing_batches):\n if verbose and i % 10 == 0: print(f\"Testing batches... {i:3} / {len(self.testing_batches)} \", end=\"\\r\", flush=True)\n if self.test_dynamic:\n weights = batch[0].weights.to(self.device)\n residuals = [model(b.to(self.device).x, b.edge_index, b.edge_attr) - b.y for b in batch]\n \n loss = sum(loss_from_residuals(r, weights) for r in residuals) / len(residuals)\n dynamic_residuals = residuals[1] - residuals[0]\n dynamic_loss = loss_from_residuals(dynamic_residuals, weights)\n \n residuals = residuals[0]\n \n dynamic_losses.append(dynamic_loss.detach())\n dynamic_residual_chunks.append(dynamic_residuals.detach())\n else:\n batch.to(self.device)\n loss, residuals = loss_function(model(batch.x, batch.edge_index, batch.edge_attr), batch)\n #if use_tensor_constraint:\n # chunk = chunk[...,0] # Keep only scalar part of residuals\n losses.append(loss.detach())\n residual_chunks.append(residuals)\n\n if verbose: print(\"Collating batch results...\", end=\"\\r\", flush=True)\n # Using average loss over batches, rather than weighted average, to better mirror\n # running average of testing loss:\n #loss = (torch.dot(torch.tensor(losses), self.batch_weights) / self.total_weight).sqrt()\n loss = torch.tensor(losses).sqrt().mean()\n residuals = torch.cat(residual_chunks)\n residuals_by_element = {e:residuals[self.atom_indices[e]] for e in self.relevant_elements}\n\n # compute mean errors and RMSEs\n mean_error_by_element = torch.tensor([residuals_by_element[e].mean() for e in self.relevant_elements])\n RMSE_by_element = torch.tensor([residuals_by_element[e].square().mean().sqrt() for e in self.relevant_elements])\n\n if self.test_dynamic:\n dynamic_loss = torch.tensor(dynamic_losses).sqrt().mean()\n dynamic_residuals = torch.cat(dynamic_residual_chunks)\n dynamic_residuals_by_element = {e:dynamic_residuals[self.atom_indices[e]] for e in self.relevant_elements}\n\n # compute mean errors and RMSEs\n dynamic_mean_error_by_element = torch.tensor([dynamic_residuals_by_element[e].mean() for e in self.relevant_elements])\n dynamic_RMSE_by_element = torch.tensor([dynamic_residuals_by_element[e].square().mean().sqrt() for e in self.relevant_elements])\n else:\n dynamic_loss=None\n dynamic_mean_error_by_element=None\n dynamic_RMSE_by_element=None\n dynamic_residuals_by_element=None\n\n time1 = time.time()\n test_time = time1 - time0\n\n if verbose:\n if self.test_dynamic:\n print(f\" Test loss = {loss:6.3f} Dynamic loss = {dynamic_loss:6.3f} Test time = {test_time:.2f}\")\n print(f\" Element Mean Error RMSE Dyn.Mean Dyn.RMSE\")\n #print(f\"<4> Ee <7> 012.345 <5> 012.345\")\n for i, e in enumerate(self.relevant_elements):\n print(f\" {self.number_to_symbol[e].rjust(2)} {mean_error_by_element[i]:3.3f} {RMSE_by_element[i]:3.3f} {dynamic_mean_error_by_element[i]:3.3f} {dynamic_RMSE_by_element[i]:3.3f}\")\n else:\n print(f\" Test loss = {loss:6.3f} Test time = {test_time:.2f}\")\n print(f\" Element Mean Error RMSE\")\n #print(f\"<4> Ee <7> 012.345 <5> 012.345\")\n for i, e in enumerate(self.relevant_elements):\n print(f\" {self.number_to_symbol[e].rjust(2)} {mean_error_by_element[i]:3.3f} {RMSE_by_element[i]:3.3f}\")\n\n if log:\n if example is None:\n example = (epoch-1) * self.examples_per_epoch + example_in_epoch\n elif example_in_epoch is None:\n example_in_epoch = example % self.examples_per_epoch\n self.log_test(batch_number, epoch, batch_in_epoch, example_in_epoch, example, elapsed_time,\n loss, mean_error_by_element, RMSE_by_element, residuals_by_element,\n dynamic_loss, dynamic_mean_error_by_element,\n dynamic_RMSE_by_element, dynamic_residuals_by_element) \n return loss\n\n def log_test(self, batch_number, epoch, batch_in_epoch, example_in_epoch, example, elapsed_time,\n loss, mean_error_by_element, RMSE_by_element, residuals_by_element=None,\n dynamic_loss=None, dynamic_mean_error_by_element=None,\n dynamic_RMSE_by_element=None, dynamic_residuals_by_element=None):\n self.batch_number.append(batch_number)\n self.epoch.append(epoch)\n self.batch_in_epoch.append(batch_in_epoch)\n self.example_in_epoch.append(example_in_epoch)\n self.example.append(example)\n self.elapsed_time.append(elapsed_time)\n self.loss.append(loss)\n self.mean_error_by_element.append(mean_error_by_element)\n self.RMSE_by_element.append(RMSE_by_element)\n if self.test_dynamic:\n self.dynamic_loss.append(dynamic_loss)\n self.dynamic_mean_error_by_element.append(dynamic_mean_error_by_element)\n self.dynamic_RMSE_by_element.append(dynamic_RMSE_by_element) \n \n if self.store_residuals:\n self.residuals_by_element = residuals_by_element\n if self.test_dynamic:\n self.dynamic_residuals_by_element = dynamic_residuals_by_element\n \n if self.wandb_log is not None:\n wandb_dict = {\n 'batch_number':batch_number,\n 'epoch':epoch,\n 'batch_in_epoch':batch_in_epoch,\n 'example_in_epoch':example_in_epoch,\n 'example':example,\n 'elapsed_time':elapsed_time,\n 'test_loss':loss,\n 'mean_error_by_element':{\n int(e):mean_error_by_element[i].item() for i, e in enumerate(self.relevant_elements)},\n 'RMSE_by_element':{\n int(e):RMSE_by_element[i].item() for i, e in enumerate(self.relevant_elements)}}\n if self.test_dynamic:\n wandb_dict = {**wandb_dict,\n 'dynamic_test_loss':dynamic_loss,\n 'dynamic_mean_error_by_element':{\n int(e):dynamic_mean_error_by_element[i].item() for i, e in enumerate(self.relevant_elements)},\n 'dynamic_RMSE_by_element':{\n int(e):dynamic_RMSE_by_element[i].item() for i, e in enumerate(self.relevant_elements)}, \n }\n self.wandb_log(wandb_dict)\n \n def smoothed_loss(self, i = -1, window=5):\n if i < 0:\n j = len(self.loss) - i + 1\n assert j > 0, \"index out of range\"\n else:\n j = i + 1\n assert j <= len(self.loss), \"index out of range\"\n window = min(window, j)\n return sum(self.loss[j-window:j]) / window\n \n def coord(self, coord):\n if coord == 'time':\n return self.elapsed_time\n elif coord == 'example':\n return self.example\n else:\n raise ValueError(\"Argument 'coord' should be 'time' or 'example'\")\n \n # WARNING: don't share between processes if you use this \n x_coord = 'example'\n \n @property\n def x(self):\n return getattr(self, TestingHistory.x_coord) \n \n def get_spectral(self, smooth=.1, coord='example', log_loss=True, decay=0):\n return Spectral(self, log_loss=True, smooth=smooth, coord=coord, decay=decay)\n \n def __getstate__(self):\n d = self.__dict__.copy()\n del d['testing_batches']\n del d['wandb_log']\n return d\n\nclass Spectral:\n \n def __init__(self, h, log_loss=True, smooth=.1, coord='example', decay=0):\n self.h = h\n self.log_loss = log_loss\n self.smooth = smooth\n self.coord = coord\n self.decay = decay\n self.compute()\n \n def compute(self):\n set_x(self.coord)\n x0 = self.h.x[0]\n x1 = self.h.x[1]\n chunk_size = round(len(self.h.x) * self.smooth)\n chunk_num = len(self.h.x) + 1 - chunk_size\n loss = self.h.loss if not self.log_loss else np.log(self.h.loss)\n cumsum = np.cumsum(loss)\n #print(f\"cumsum = {cumsum}\")\n #print(f\"chunk_size = {chunk_size}\")\n #print(f\"cumsum[chunk_size:] = {cumsum[chunk_size:]}\")\n #print(f\"cumsum[:chunk_size] = {cumsum[:chunk_size]}\")\n self.chunk_val = (cumsum[chunk_size:] - cumsum[:-chunk_size]) / chunk_num\n #self.chunk_edges = np.arange(chunk_size-1, chunk_size)\n #assert len(self.chunk_edges) == len(self.chunks)\n #self.x = self.h.x[self.chunk_edges]\n self.spectrum = np.fft.fft(self.chunk_val)\n \n \n def recompute(self):\n self.compute()\n \n \n\n\n \n# Static methods for comparing runs of different lengths,\n# computing uncertainty in long-runs predicions from short data\n# and other things?\ndef set_x(coord):\n TestingHistory.x_coord = 'example' if coord=='example' else 'elapsed_time'\n\ndef sort_by_last_coord(histories, coord=None):\n set_x(coord)\n indices, h_sorted = zip(*sorted(enumerate(histories), key = lambda th : th[1].x[-1], reverse=True))\n return np.array(indices), h_sorted\n\n# Handy unsorting function \n\ndef unsort(indices, data):\n indices = np.array(indices)\n if isinstance(data, np.ndarray):\n fixed = np.empty_like(data)\n np.put(fixed, indices, data)\n elif isinstance(data, tuple) and len(data) != len(indices):\n fixed = tuple(unsort(indices, d) for d in data)\n elif isinstance(data, Sequence) and len(data) == len(indices):\n fixed = [None] * len(indices)\n for t, s in enumerate(indices):\n fixed[t] = data[s]\n else:\n fixed = data\n return fixed\n \nclass SList(list):\n pass\n \nclass seg_pass:\n def __init__(self, seg):\n self.seg = seg\nclass start_seg:\n def __init__(self, seg):\n self.seg = seg\n \n \n \ndef needs_sorted_histories(f, assume=False, h_arg=0):\n \"\"\"\n Decorator for functions that need histories sorted from longest to shortest\n \"\"\"\n def new_f(*args, sorted=assume, sort_indices=None, **kwargs):\n if sorted or isinstance(args[h_arg], SList):\n return f(*args, **kwargs)\n else:\n if sort_indices is None:\n sort_indices = np.empty(len(args[h_arg]), dtype=int)\n args=list(args)\n sort_indices[:], args[h_arg] = sort_by_last_coord(args[h_arg], kwargs['coord'])\n args[h_arg] = SList(args[h_arg])\n value = f(*args, **kwargs)\n return unsort(sort_indices, value)\n #new_f.__name__ = f.__name__\n return new_f\n\n \ndef decay_weight(a0, a1, R, end):\n \"\"\"\n Tells you the integrated weight of [a0,a1], given that\n infinitestimal weights are w(x) = exp(R * (end - x))\n\n a0 < a1 are the endpoints of the segment.\n end is the coord at which the decay is 1.0.\n R is the rate of (leftwards) decay.\n \"\"\"\n return (np.exp(R * (end - a0)) - np.exp(R * (end - a1))) / R\n \ndef predictions_with_errors(histories, coord, errors=True, e_lives=4, verbose=0):\n \"\"\"\n Doing pairwise comparisons.\n Weighted by shared length, and distance from lead time\n \"\"\" \n set_x(coord)\n histories = list(histories)\n N = len(histories)\n\n log_loss = [np.log(h.loss) for h in histories]\n\n pred = np.zeros((N,), dtype=float)\n rel_avg = np.zeros((N,N), dtype=float)\n if errors:\n err = np.zeros((N,), dtype=float)\n rel_var = np.zeros((N,N), dtype=float)\n rel_weight = np.zeros((N,N), dtype=float)\n \n for k, h in enumerate(histories):\n end = h.x[-1]\n d = end - h.x[0]\n #R = e_lives / d\n \n for j, g in enumerate(histories):\n \n if j == k:\n continue\n #if len(log_loss[k]) < 4 or not errors:\n # continue\n #\n #diff = log_loss[k][:-2] - log_loss[k][2:]\n #weights = np.exp(-R * h.x[1:-1])\n #avg_diff = np.average(diff, weights=weights)\n #rel_var[k,k] = np.average((diff - avg_diff)**2, weights=weights)\n #rel_weight[k,k] = decay_weight(h.x[1], h.x[-2], R, end)\n \n #else:\n b0 = max(h.x[0], g.x[0])\n b1 = min(h.x[-1], g.x[-1])\n if b1 <= b0:\n continue\n \n X, Y = interleave((h.x, g.x), (log_loss[k], log_loss[j]), (b0, b1))\n \n weights = np.exp(-e_lives * X / b1)\n diff = Y[0] - Y[1]\n rel_avg[k,j] = np.average(diff, weights=weights)\n \n if rel_avg[k,j] < -1000:\n rel_avg[k,j] = 0\n continue\n \n if errors:\n rel_var[k,j] = np.average((diff-rel_avg[k,j])**2, weights=weights)\n rel_weight[k,j] = decay_weight(b0, b1, e_lives / d, end)\n \n if rel_weight[k,j] == 0:\n print(f\" rel_weight[{k},{j}] == 0\")\n \n weight = np.sum(rel_weight[k])\n if weight == 0:\n print(f\" weight[{k}] == 0\")\n pred[k] = np.nan\n if errors:\n err[k] = np.inf\n else:\n pred[k] = np.average(rel_avg[k]) #, weights=rel_weight[k])\n if pred[k] < -1000:\n print(f\" pred[{k}] == {pred[k]}\")\n if errors:\n err[k] = np.sqrt(np.average(rel_var[k])) #, weights=rel_weight[k]))\n if errors:\n return np.stack((pred, err), axis=1)\n else:\n return pred\n\ndef log_inferred_loss(histories, x, coord='example', window=.1):\n \"\"\"\n calculates a score for each history, at coordinate x,\n predictive of long-term relative performance.\n \"\"\"\n raise NotImplementedError(\"not implemented yet!\")\n \n \ndef interleave(X, Y=None, bounds=None):\n \"\"\"\n Each X[i] is an increasing series.\n Interleave them into one series, truncate to bounds=[b0,b1].\n This is W.\n \n If Y is not None, it should have a value for each X\n Z is a collection of interpolations of each Y to the values in W.\n \n Return W, Z (or just W if Y is None)\n \"\"\"\n if bounds=='inner':\n b0 = max(x[0] for x in X)\n b1 = min(x[-1] for x in X)\n elif bounds=='outer':\n # same as if no bounds specified\n bounds = None \n elif bounds:\n b0, b1 = bounds\n \n W = np.array(X[0])\n for x in X[1:]:\n b_indices = np.searchsorted(W, x)\n np.insert(W, b_indices, x)\n if bounds:\n i0 = np.searchsorted(W, b0)\n i1 = np.searchsorted(W, b1)\n W = np.insert(W, (i0,i1), (b0,b1))[i0:i1+2] \n W = np.unique(W)\n \n if Y:\n return W, [np.interp(W, x, y) for (x,y) in zip(X,Y)]\n else:\n return W \n\n\ndef savitzky_golay(y, window_size, order, deriv=0, rate=1):\n r\"\"\"Smooth (and optionally differentiate) data with a Savitzky-Golay filter.\n The Savitzky-Golay filter removes high frequency noise from data.\n It has the advantage of preserving the original shape and\n features of the signal better than other types of filtering\n approaches, such as moving averages techniques.\n Parameters\n ----------\n y : array_like, shape (N,)\n the values of the time history of the signal.\n window_size : int\n the length of the window. Must be an odd integer number.\n order : int\n the order of the polynomial used in the filtering.\n Must be less then `window_size` - 1.\n deriv: int\n the order of the derivative to compute (default = 0 means only smoothing)\n Returns\n -------\n ys : ndarray, shape (N)\n the smoothed signal (or it's n-th derivative).\n Notes\n -----\n The Savitzky-Golay is a type of low-pass filter, particularly\n suited for smoothing noisy data. The main idea behind this\n approach is to make for each point a least-square fit with a\n polynomial of high order over a odd-sized window centered at\n the point.\n Examples\n --------\n t = np.linspace(-4, 4, 500)\n y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)\n ysg = savitzky_golay(y, window_size=31, order=4)\n import matplotlib.pyplot as plt\n plt.plot(t, y, label='Noisy signal')\n plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')\n plt.plot(t, ysg, 'r', label='Filtered signal')\n plt.legend()\n plt.show()\n References\n ----------\n .. [1] <NAME>, <NAME>, Smoothing and Differentiation of\n Data by Simplified Least Squares Procedures. Analytical\n Chemistry, 1964, 36 (8), pp 1627-1639.\n .. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing\n <NAME>, <NAME>, <NAME>, <NAME>\n Cambridge University Press ISBN-13: 9780521880688\n \"\"\"\n from math import factorial\n \n try:\n window_size = np.abs(np.int(window_size))\n order = np.abs(np.int(order))\n except ValueError as msg:\n raise ValueError(\"window_size and order have to be of type int\")\n if window_size % 2 != 1 or window_size < 1:\n raise TypeError(\"window_size size must be a positive odd number\")\n if window_size < order + 2:\n raise TypeError(\"window_size is too small for the polynomials order\")\n order_range = range(order+1)\n half_window = (window_size -1) // 2\n # precompute coefficients\n b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])\n m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)\n # pad the signal at the extremes with\n # values taken from the signal itself\n firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )\n lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])\n y = np.concatenate((firstvals, y, lastvals))\n return np.convolve( m[::-1], y, mode='valid')\n \n \nTH = TestingHistory \n \n \n \n\n#def exp_fn(x, a, speed, limit):\n# return a * np.exp(speed * x) + limit\ndef exp_fn(x, a, speed, limit):\n return np.exp(a * np.exp(speed * x) + limit)\n#def exp_fn(x, a, speed, limit, k, w):\n# return a * np.exp(speed * x) + limit + k / (x + w)\n#def exp_fn(x, k, w, limit):\n# return limit + k / (x + w)\n \n\n# if function is called\nif __name__ == '__main__':\n import sys\n if len(sys.argv) < 2:\n exit()\n input(\"Exploring the evolution of exp fit over time...\")\n history = TrainTestHistory(file=sys.argv[1])\n history.test.show_fit_evolution()\n history.close()\n input(\"Press Enter to continue...\")\n\n\n\n", "id": "8563422", "language": "Python", "matching_score": 4.263708591461182, "max_stars_count": 0, "path": "train/history.py" }, { "content": "from training_config import Config, secs_to_str, str_to_secs, update#, immut\nfrom configparser import ConfigParser\nimport os\nimport shutil\nimport sys\nimport subprocess\nimport math\nimport random\nfrom history import TrainTestHistory, predictions_with_errors\nfrom model_utils import newest_checkpoint\nimport bayes_search as bayes\nimport exploration_policy\nimport torch\nfrom exploration_policy import random_parameters_and_seed, generate_parameters, next_training_limit\nfrom sample_hyperparameters import TrainableSampleGenerator\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom time import sleep\nfrom files import ini_file, save_dir\nfrom itertools import cycle\nfrom plot import plot_runs, plot_extrapolation\nimport exit_codes\nfrom filelocks import HistoriesLock, SamplesLock, FileLock, ExistLock, Timeout\n\nclass HistoryLocked(Exception):\n pass\n\nclass TrainingRun:\n def __init__(self, parent_dir=\"runs\", identifier=None, run_dir=None, config_file=None, settings={},\n create_file=True, create_identifier=True, parent_configs=['exploration.ini'], save_prefix=None):\n settings = settings.copy() # To avoid accumulating same settings from one run to another!\n\n if identifier is None:\n if create_identifier:\n identifier = \"run-\" + hex(random.randrange(16**8))[-8:]\n if os.path.isdir(os.path.join(parent_dir, identifier)):\n raise ValueError(f\"Hash collision with existing training run {identifier}\")\n else:\n raise ValueError(\"No training run identifier provided!\")\n self.identifier = identifier\n if run_dir is None:\n run_dir = os.path.join(parent_dir, identifier)\n self.run_dir = run_dir\n os.makedirs(self.run_dir, exist_ok=True)\n \n if config_file is None:\n config_file = ini_file(run_dir)\n if config_file is None:\n config_file = os.path.join(run_dir, \"training.ini\")\n self.config_file = config_file\n \n if os.path.isfile(config_file):\n parser = ConfigParser(allow_no_value=True)\n parser.read([config_file])\n parser.read_dict(settings)\n if \"training\" in parser:\n #if \"run_name\" in parser[\"training\"]:\n # identifier = parser[\"training\"][\"run_name\"]\n if \"save_prefix\" in parser[\"training\"]:\n save_prefix = parser[\"training\"][\"save_prefix\"]\n del parser\n self.parent_configs = parent_configs\n #self.settings = {k:str(v) for k,v in settings.items()} # may need to allow alternate string conversions\n\n if save_prefix is None:\n save_prefix = os.path.join(self.run_dir, identifier[:7])\n self.save_prefix = save_prefix\n\n settings = update(settings,\n {'training':\n {'save_prefix':self.save_prefix, 'run_name':self.identifier, 'resume':True},\n 'exploration':\n {'creation_time':os.path.getctime(self.run_dir)}})\n\n self.set_config(settings)\n self.__history = None\n \n @property\n def history(self):\n \"\"\"\n Returns a TrainTestHistory from the history file, in writeable mode\n \"\"\"\n if self.__history is None:\n if os.path.isfile(self.save_prefix + \"-history.torch\"):\n try:\n self.__history = TrainTestHistory(save_prefix = self.save_prefix, load=True, use_backup=False, writeable=False, block=False)\n except Timeout:\n raise HistoryLocked(self.save_prefix + \"-history.torch not available.\")\n #except:\n # raise\n if self.__history is None:\n \n self.__history = TrainTestHistory(\n examples_per_epoch=self.config.data.train_size,\n relevant_elements=self.config.relevant_elements, run_name=self.identifier,\n save_prefix=self.config.training.save_prefix, load=False, block=False)\n return self.__history\n \n @property \n def history_or_backup(self):\n \"\"\"\n Returns a TrainTestHistory from the history file or its backup,\n in read-only mode.\n \"\"\"\n if self.__history is not None:\n return self.__history\n try:\n h = self.history\n except HistoryLocked:\n # try loading from backup\n filename = self.config.training.save_prefix + \"-history.torch.bak\"\n if os.path.isfile(filename):\n shutil.copyfile(filename, filename + \".bak\")\n self.__history = TrainTestHistory(file=filename + \".bak\", load=True, writeable=False, hard_lock=False, use_backup=False, block=False)\n else:\n raise HistoryLocked(\"Neither \" + self.save_prefix + \"-history.torch nor its backup are available.\")\n return self.__history\n \n def close_history(self, save=False):\n if self.__history is not None:\n self.__history.close(save=False)\n if self.__history.filename.endswith(\".bak\"):\n os.remove(self.__history.filename)\n self.__history = None\n \n #@property\n #def is_available(self):\n # try:\n # file = TrainTestHistory.get_lock(self.save_prefix + \"-history.torch\", block=False)\n # file.close()\n # return True\n # except:\n # return False\n \n @property\n def readable(self):\n if self.__history is not None:\n return True\n if self.runnable:\n return True\n try:\n file = TrainTestHistory.get_lock(self.save_prefix + \"-history.torch\", writeable=False, hard_lock=False, block=False)\n file.close()\n if os.path.isfile(self.save_prefix + \"-history.torch.bak\"): return True\n else: return False\n except:\n return False\n \n @property\n def runnable(self):\n if self.__history is not None and not self.__history.filename.endswith('.bak'):\n return True\n try:\n file = TrainTestHistory.get_lock(self.save_prefix + \"-history.torch\", writeable=True, hard_lock=True, block=False)\n file.close()\n return True\n except:\n return False\n\n @property\n def config(self):\n if self.__config is None:\n self.__config = Config(*self.parent_configs, self.config_file,\n track_sources=True, use_command_args=False)\n return self.__config\n \n def set_config(self, settings):\n parser = ConfigParser(allow_no_value=True)\n parser.read([self.config_file])\n parser.read_dict(settings)\n with open(self.config_file, 'w') as f:\n parser.write(f)\n self.__config = None\n \n @property\n def epochs(self):\n return self.history.train.current_epoch() - 1\n \n @property\n def time(self):\n return self.history.train.elapsed_time[-1]\n \n @property\n def examples(self):\n return self.history.train.example[-1]\n \n def delete(self):\n self.close_history()\n if os.path.isdir(self.run_dir):\n shutil.rmtree(self.run_dir)\n \n def execute_run(self, configs=[], stub=False, end_process=False):\n self.close_history()\n print(\"\\n==============================================\")\n print(f\"Run {self.identifier}:\")\n print(f\" run dir: {self.run_dir}\")\n print(f\" save_prefix: {self.save_prefix}\")\n print(f\" config file: {self.config_file}\")\n print(\"----------------------------------------------\\n\")\n if stub:\n self.execute_stub()\n return exit_codes.SUCCESS\n configs = self.parent_configs + [self.config_file] + configs\n self.last_execution = subprocess.run([\"python\", \"training.py\", *configs],\n input=b\"y\\ny\\ny\\ny\\n\")\n return self.last_execution\n \n def generate_model(self, save=True):\n from variable_networks import VariableParityNetwork\n model = VariableParityNetwork(**self.config.model.kwargs)\n #torch.save(model, f\"{self.save_prefix}-initial-checkpoint.torch\")\n torch.save(model, os.path.join(self.run_dir, \"model.pt\"))\n \n \n def execute_stub(self):\n # generates histories based on a simulated loss curve\n # logs simulated batches of 1000 examples\n # batch time is l^2 * muls summed over layers\n # loss is an exponential with decay speed l^2 * muls for max layer\n # and asymptote l * muls summed over layers\n \n ls = [list(range(lmax + 1)) for lmax in self.config.model.lmaxes]\n muls = self.config.model.muls\n lmuls = [list(zip(li, mi)) for li,mi in list(zip(ls,muls))]\n print(lmuls)\n \n batch_time = sum(sum((l+1)**2 * m for l,m in layer) for layer in lmuls) / 10\n asymptote = 10 / sum(sum((l+1) * m for l,m in layer) for layer in lmuls)\n decay = max(sum((l+1)**2 * m for l,m in layer) for layer in lmuls) / 1000000\n print(f\"Batch time = {batch_time:.2f}, asymptote = {asymptote:.3f}, decay = {decay}\")\n \n if self.history is None:\n self.__history = TrainTestHistory(\n examples_per_epoch=self.config.data.train_size,\n relevant_elements=self.config.relevant_elements,\n save_prefix = self.save_prefix, run_name=self.identifier,\n load=False, use_backup=False)\n \n epoch = self.epochs\n time = self.time\n example = self.examples\n while True:\n example += 100\n time += batch_time\n epoch = example // self.config.data.train_size + 1\n \n test_loss = math.exp(-example * decay) * (.9 + .2 * random.random()) + asymptote\n train_loss = test_loss * (.9 + .2 * random.random())\n RMSE = np.array([1.5 * test_loss, .5 * test_loss])\n mean_error = RMSE * (np.random.random(2) - .5)\n \n self.history.train.log_batch(\n batch_time, 0, 1000, 18000,\n time, example % self.config.data.train_size, example,\n train_loss, epoch=epoch, verbose=False \n )\n \n self.history.test.log_test(\n example // 1000, epoch,\n (example % self.config.data.train_size) // 1000,\n example % self.config.data.train_size,\n example, time, test_loss,\n mean_error, RMSE \n )\n \n if example >= self.config.training.example_limit:\n print(f\"Finished training after {example} examples\")\n break\n if time >= self.config.training.time_limit:\n print(f\"Finished training after {secs_to_str(time)}\")\n break\n if epoch > self.config.training.epoch_limit:\n print(f\"Finished training after {epoch-1} epochs\")\n \n #self.history.save() \n #print(\"Displaying graph...\")\n #self.history.test.plot()\n self.close_history()\n \n \n \n #def __del__(self):\n # self.close_history()\n\n \nclass EnsembleOfRuns:\n\n def __init__(self, configs=[], include_basic_config=True, start_exploring=False, stub=False, backup_samples=True, verbose=False, plot=None):\n if include_basic_config and 'exploration.ini' not in configs:\n configs.insert(0, 'exploration.ini')\n self.config_files = configs\n self.__config = None\n self.parent_dir = self.config.exploration.run_dir\n \n self.histories_lock = HistoriesLock(self.parent_dir, ensemble=self)\n self.samples_lock = SamplesLock(self.parent_dir, ensemble=self)\n self._test_samples = None\n \n self.runs = {}\n with self.histories_lock:\n self.refresh_runs(delete_empty=True)\n #self.active_runs = {name:run for name,run in self.runs.items() if not run.config.exploration.inactive}\n \n self.stub = stub or self.config.exploration.stub\n self.seed_length = len(random_parameters_and_seed()[1])\n if plot is None:\n plot = self.config.exploration.plot\n \n self.plot = plot\n if self.config.exploration.group_name is None:\n self.set_config({'exploration':{'group_name':configs[-1]+\"_\"+hex(random.randrange(8**4))}})\n if start_exploring:\n self.fill_test_samples()\n self.exploration_loop(verbose=verbose)\n \n @property\n def config(self):\n if self.__config is None:\n self.__config = Config(*self.config_files, track_sources=True, use_command_args=False)\n return self.__config\n \n def activate(self, run, active=True):\n if active:\n run.set_config({'exploration':{'inactive':False}})\n self.active_runs[run.identifier] = run\n else:\n self.inactivate(run)\n \n def inactivate(self, run):\n run.set_config({'exploration':{'inactive':True}})\n self.active_runs.pop(run.identifier, None)\n \n def __delete_run(self, run):\n if self.config.exploration.check_delete and input(f\"Deleting run {run.identifier}... Are you sure? (y/n) \").strip().lower() != 'y':\n return False\n run.delete()\n #self.active_runs.pop(run.identifier, None)\n self.runs.pop(run.identifier, None)\n self.available_runs.pop(run.identifier, None)\n return True\n \n def generate_run(self, losses=None):\n seeds = self.seeds\n if losses is None:\n losses = self.mixed_log_losses(errors=False)\n names = list(losses.keys())\n if len(names) > 0:\n sample_X = [seeds[n] for n in names]\n sample_y = [losses[n] for n in names]\n else:\n sample_X = np.zeros((0,self.seed_length))\n sample_y = np.zeros(0)\n \n current_X = [seeds[name] for name in self.unreadable_runs if name not in names]\n if len(current_X) > 0:\n current_X = np.array(current_X)\n else:\n current_X = None\n \n with self.samples_lock as samples:\n test_X = np.copy(samples.passes)\n\n seed = bayes.next_sample(\n sample_X = sample_X,\n sample_y = sample_y,\n test_X = test_X,\n current_X = current_X\n )[0]\n #print(f\"seed = {seed}\")\n \n samples.set_used(seed=seed)\n \n exploration_policy.mul_coeff = self.config.exploration.mul_coeff\n settings = generate_parameters(seed)\n settings = update(settings, {'exploration':{'seed':list(seed)}}) \n run = TrainingRun(parent_dir=self.parent_dir, settings=settings, parent_configs=self.config_files)\n self.runs[run.identifier] = run\n self.runnable_runs[run.identifier] = run\n #if active: self.active_runs[run.identifier] = run\n return run \n \n @property\n def seeds(self):\n return {name:run.config.exploration.seed for name,run in self.runs.items()\n if run.config.exploration.seed is not None}\n \n @property\n def test_samples(self):\n if self._test_samples is None:\n raise ValueError(\"Process-safe samples access requires a lock.\")\n self._test_samples = TrainableSampleGenerator(self.config.exploration.sample_file, configs=self.config_files, stub=self.stub)\n return self._test_samples\n \n def fill_test_samples(self):\n print(\"checking test samples...\")\n with self.samples_lock as samples:\n if samples.num_passes < .9 * self.config.exploration.random_samples:\n print(\"Filling test samples...\")\n samples.sample(num_passes=self.config.exploration.random_samples, verbose=True, verbose_test=3)\n \n def refresh_runs(self, delete_empty=True):\n with self.histories_lock:\n #print(\"refresh_runs...\")\n fresh_names = [d.name for d in os.scandir(self.parent_dir) if d.is_dir() and ini_file(d.path) is not None]\n #print(\"dir scan.\")\n for name in list(self.runs.keys()):\n self.runs[name].close_history()\n if name in fresh_names:\n fresh_names.remove(name)\n else:\n del self.runs[name]\n #print(\"building run objects... \", end='')\n for name in fresh_names:\n self.runs[name] = TrainingRun(parent_dir=self.parent_dir, identifier=name, parent_configs=self.config_files)\n #print(\"done.\")\n self.runnable_runs = {}\n self.unreadable_runs = {}\n for name, run in list(self.runs.items()):\n #print(f\"-- {name} --\")\n #print(\"Checking if readable...\")\n if run.readable:\n #print(f\"-- {name} is readable.\")\n #print(f\"Checking if runnable...\")\n if delete_empty and run.examples == 0: # and self.delete_run(run):\n del self.runs[name]\n pass\n elif run.runnable:\n #print(f\"-- {name} is runnable.\")\n self.runnable_runs[name] = run\n else:\n self.unreadable_runs[name] = run\n \n @property\n def failures(self):\n return {name:bool(run.config.exploration.failed) for name,run in self.runs.items()}\n\n def last_x(self, coord='time'):\n if len(self.runs) == 0:\n return None\n return max(max_value(run.history.test.coord(coord)) for run in self.runs.values())\n \n def last_x_pair(self, coord='time'):\n return sorted(max_value(run.history.test.coord(coord)) for run in self.runs.values())[-2]\n \n def close_histories(self):\n for run in self.runs.values():\n run.close_history()\n \n def clear_memory(self):\n for run in self.runs.values():\n run.close_history()\n run.__config = None\n \n def train_run(self, run, examples=None, time=None, tries=2):\n \"\"\"\n Train a run up to 'examples' or 'time' limit.\n Not process-safe.\n \"\"\"\n if examples is None: examples = self.config.exploration.max_example\n if time is None: time = self.config.exploration.max_time\n run.set_config({'training':{\n 'example_limit':examples,\n 'time_limit':secs_to_str(time)\n }})\n \n if run.examples >= examples or run.time >= time:\n return True\n if self.stub:\n run.execute_stub()\n return True\n \n self.clear_memory() # also closes histories\n\n for i in range(tries):\n run.close_history()\n exit_code = run.execute_run()\n if exit_code == exit_codes.ABORT:\n raise AbortException()\n if run.examples >= examples or run.time >= time:\n run.close_history()\n break\n else:\n #self.delete_run(run)\n return False\n \n if self.plot:\n self.plot_runs(run, file_lock=self.histories_lock)\n \n return True\n \n def set_config(self, settings):\n parser = ConfigParser()\n parser.read([self.config_files[-1]])\n parser.read_dict(settings)\n with open(self.config_files[-1], 'w') as f:\n parser.write(f)\n self.__config = None\n \n new_limit = next_training_limit\n \n def training_increment(self, verbose=True):\n \"\"\"\n Final attempt at rewriting the 'training increment'.\n We'll see how it goes, and I'll add documentation.\n \"\"\"\n \n print(\"Training...\")\n \n # For the moment, we're working in uniform computation\n # chunks before moving to another run\n \n from scipy.special import softmax\n import scipy.stats\n\n self.refresh_runs()\n n_runs = int(round(math.sqrt(len(self.runs))))\n if not hasattr(self, 'new_run_position'):\n self.new_run_position = random.randint(0, n_runs)\n \n # do as many updates as there are runs,\n # and generate one new run at a random position\n for i in range(n_runs + 1):\n with self.histories_lock:\n self.refresh_runs()\n losses_and_errors = self.mixed_log_losses(errors=True)\n names = list(set(losses_and_errors.keys()) & set(self.runnable_runs.keys()))\n \n if i == self.new_run_position or len(names) == 0:\n # start a fresh run\n with self.samples_lock:\n self.fill_test_samples()\n run = self.generate_run()\n\n elif len(names) == 1:\n run = self.runs[names[0]]\n \n else:\n # choose an old run to continue\n losses_and_errors = np.array([losses_and_errors[n] for n in names])\n print(\"losses_and_errors:\")\n print(losses_and_errors)\n losses = losses_and_errors[:,0]\n errors = losses_and_errors[:,1]\n e_lengths = np.array([self.runs[n].examples for n in names])\n \n # special case: the best run should always\n # catch up to the others\n i_min = np.argmin(losses)\n #if e_lengths[i_min] < e_lengths.max():\n # run = self.runs[names[i_min]]\n \n #else:\n # # move best run to start of list\n # names = names[i_min:] + names[:i_min]\n # e_lengths = np.delete(e_lengths, i_min)\n # np.roll(losses, -i_min)\n # np.roll(errors, -i_min)\n # np.roll(e_lengths, -i_min)\n \n # compute the expected improvement of each run\n joint_errors = (errors**2 + errors[0]**2) / 2\n diff = losses - losses[0]\n norm_diff = diff / joint_errors\n expected_improvement = diff * scipy.stats.norm.cdf(-norm_diff) + joint_errors * scipy.stats.norm.pdf(norm_diff)\n \n norm_ei = expected_improvement / np.std(expected_improvement)\n log_lengths = np.log(e_lengths)\n norm_lengths = log_lengths / np.std(log_lengths)\n \n c = np.argmax(norm_ei - norm_lengths)\n run = self.runs[names[c]]\n \n # short-term file-lock for this run\n ExistLock(f\"{run.save_prefix}-history.torch.lock\").acquire()\n\n try:\n self.train_run(run, examples = run.examples + self.config.exploration.example_increment,\n time = run.time + self.config.exploration.time_increment)\n except:\n pass\n \n\n def exploration_loop(self, max_step=100000, verbose=False):\n step = self.config.exploration.step\n if verbose:\n self.axes = None\n while step in range(max_step+1):\n try:\n self.training_increment(verbose=verbose)\n except AbortException:\n os._exit(exit_codes.ABORT)\n \n def train_to(self, runs=None, names=None, examples=0, time=np.Inf):\n with self.histories_lock:\n self.refresh_runs()\n if runs is None:\n runs = [self.runs[n] for n in names]\n \n while True:\n with self.histories_lock:\n for run in runs:\n if run.runnable and run.examples < examples and run.time < time:\n break\n else:\n break\n ExistLock(f\"{run.save_prefix}-history.torch.lock\").acquire()\n self.train_run(run, examples=examples, time=time)\n \n def plot_runs(self, run=None, **kwargs):\n return plot_runs(self, run=run, **kwargs)\n \n def plot_extrapolation(self, run, x, coord='example', **kwargs):\n pass\n #return plot_extrapolation(self, run=run, x=x, coord=coord, **kwargs)\n \n def extrapolated_log_losses(self, coord='time', errors=True, min_value=1, filter_infinite_values=True, use_backup=True, **kwargs):\n names = []\n histories = []\n losses = {}\n \n with self.histories_lock:\n for name, run in self.runs.items():\n if not run.readable:\n continue\n h = run.history_or_backup\n if len(h.test.coord(coord)) < 4 or h.test.coord(coord)[-1] < min_value:\n continue\n names.append(name)\n histories.append(h.test)\n log_pred = predictions_with_errors(histories, coord=coord, errors=errors, **kwargs)\n losses = {names[i] : log_pred[i] for i in range(len(names)) if not (filter_infinite_values and np.isinf(log_pred[i][0]))}\n return losses\n\n def mixed_log_losses(self, t_coeff=0.25, errors=True, use_backup=True, min_examples=1, min_time=1, **kwargs):\n t_losses = self.extrapolated_log_losses(coord='time', errors=errors, use_backup=use_backup, min_value=min_time, **kwargs)\n e_losses = self.extrapolated_log_losses(coord='example', errors=errors, use_backup=use_backup, min_value=min_examples, **kwargs)\n with self.histories_lock:\n if errors:\n return {name: (t_coeff * t_losses[name][0] + (1-t_coeff) * e_losses[name][0],\n np.sqrt((t_coeff * t_losses[name][1])**2 + ((1-t_coeff) * e_losses[name][1])**2)) for name in t_losses}\n else:\n return {name:t_coeff * t_losses[name] + (1-t_coeff) * e_losses[name] for name in t_losses} \n\n\nclass AbortException(RuntimeError):\n def __str__(self):\n return \"AbortException\"\n \n\n \n\ndef max_value(seq, min=0):\n if len(seq):\n return seq[-1]\n else:\n return min\n\ndef merge_dicts(*dicts):\n return {key:tuple(d[key] for d in dicts) for key in set(dicts[0]).intersection(*dicts[1:])}\n \n \n\n\nif __name__ == '__main__':\n configs = [x for x in sys.argv[1:] if not x.startswith('-')]\n if len(configs) > 0:\n plot = True if '-p' in sys.argv or '--plot' in sys.argv else None\n ensemble = EnsembleOfRuns(configs=configs, start_exploring=True, verbose=True, plot=plot) \n else:\n settings, seed = random_parameters_and_seed()\n print(\"Simulating random run:\")\n print(settings)\n settings = update(settings, {'exploration':{'seed':list(seed)}}) \n run = TrainingRun(settings=settings)\n run.execute_stub()\n\n\n \n\n \n\n", "id": "11832300", "language": "Python", "matching_score": 9.38719367980957, "max_stars_count": 0, "path": "train/exploration.py" }, { "content": "from training_config import Config, secs_to_str, update#, immut\nfrom configparser import ConfigParser\nimport os\nimport shutil\nimport sys\nimport subprocess\nimport math\nimport random\nfrom history import TrainTestHistory\nfrom model_utils import newest_checkpoint\nimport bayes_search as bayes\nimport exploration_policy\nimport torch\nfrom exploration_policy import random_parameters_and_seed, generate_parameters, proceed_with_training, next_training_limit, order_runs as order_runs_policy\nfrom sample_hyperparameters import TrainableSampleGenerator\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom time import sleep\nfrom files import ini_file, save_dir\nfrom itertools import cycle\nfrom plot import plot_runs, plot_extrapolation\nimport exit_codes\n\nclass TrainingRun:\n def __init__(self, parent_dir=\"runs\", identifier=None, run_dir=None, config_file=None, settings={},\n create_file=True, create_identifier=True, parent_configs=['exploration.ini'], save_prefix=None):\n settings = settings.copy() # To avoid accumulating same settings from one run to another!\n\n if identifier is None:\n if create_identifier:\n identifier = \"run-\" + hex(random.randrange(16**8))[-8:]\n if os.path.isdir(os.path.join(parent_dir, identifier)):\n raise Exception(f\"Hash collision with existing training run {identifier}\")\n else:\n raise Exception(\"No training run identifier provided!\")\n self.identifier = identifier\n if run_dir is None:\n run_dir = os.path.join(parent_dir, identifier)\n self.run_dir = run_dir\n os.makedirs(self.run_dir, exist_ok=True)\n \n if config_file is None:\n config_file = ini_file(run_dir)\n if config_file is None:\n config_file = os.path.join(run_dir, \"training.ini\")\n self.config_file = config_file\n \n if os.path.isfile(config_file):\n parser = ConfigParser(allow_no_value=True)\n parser.read([config_file])\n parser.read_dict(settings)\n if \"training\" in parser:\n #if \"run_name\" in parser[\"training\"]:\n # identifier = parser[\"training\"][\"run_name\"]\n if \"save_prefix\" in parser[\"training\"]:\n save_prefix = parser[\"training\"][\"save_prefix\"]\n del parser\n self.parent_configs = parent_configs\n #self.settings = {k:str(v) for k,v in settings.items()} # may need to allow alternate string conversions\n\n if save_prefix is None:\n save_prefix = os.path.join(self.run_dir, identifier[:7])\n self.save_prefix = save_prefix\n\n settings = update(settings,\n {'training':\n {'save_prefix':self.save_prefix, 'run_name':self.identifier, 'resume':True},\n 'exploration':\n {'creation_time':os.path.getctime(save_dir(save_prefix))}})\n\n self.set_config(settings)\n self.__history = None\n \n @property\n def history(self):\n if self.__history is None:\n if os.path.isfile(self.save_prefix + \"-history.torch\"):\n try:\n self.__history = TrainTestHistory(save_prefix = self.save_prefix, load=True, use_backup=False)\n except:\n pass\n if self.__history is None:\n self.__history = TrainTestHistory(\n device=self.config.device, examples_per_epoch=self.config.data.train_size,\n relevant_elements=self.config.relevant_elements, run_name=self.identifier,\n save_prefix=self.config.training.save_prefix, hdf5=True, load=False)\n return self.__history\n \n def close_history(self):\n if self.__history is not None:\n self.__history.close()\n self.__history = None\n\n @property\n def config(self):\n if self.__config is None:\n self.__config = Config(*self.parent_configs, self.config_file,\n track_sources=True, use_command_args=False)\n return self.__config\n \n def set_config(self, settings):\n parser = ConfigParser(allow_no_value=True)\n parser.read([self.config_file])\n parser.read_dict(settings)\n with open(self.config_file, 'w') as f:\n parser.write(f)\n self.__config = None\n \n @property\n def epochs(self):\n return self.history.train.current_epoch() - 1\n \n @property\n def time(self):\n return self.history.train.elapsed_time[-1]\n \n @property\n def examples(self):\n return self.history.train.example[-1]\n \n def delete(self):\n self.close_history()\n if os.path.isdir(self.run_dir):\n shutil.rmtree(self.run_dir)\n \n def execute_run(self, configs=[], stub=False, end_process=False):\n self.close_history()\n print(\"\\n==============================================\")\n print(f\"Run {self.identifier}:\")\n print(f\" run dir: {self.run_dir}\")\n print(f\" save_prefix: {self.save_prefix}\")\n print(f\" config file: {self.config_file}\")\n print(\"----------------------------------------------\\n\")\n if stub:\n self.execute_stub()\n return exit_codes.SUCCESS\n configs = self.parent_configs + [self.config_file] + configs\n self.last_execution = subprocess.run([\"python\", \"training.py\", *configs],\n input=b\"y\\ny\\ny\\ny\\n\")\n return self.last_execution\n \n def generate_model(self, save=True):\n from variable_networks import VariableParityNetwork\n model = VariableParityNetwork(**self.config.model.kwargs)\n #torch.save(model, f\"{self.save_prefix}-initial-checkpoint.torch\")\n torch.save(model, os.path.join(self.run_dir, \"model.pt\"))\n \n \n def execute_stub(self):\n # generates histories based on a simulated loss curve\n # logs simulated batches of 1000 examples\n # batch time is l^2 * muls summed over layers\n # loss is an exponential with decay speed l^2 * muls for max layer\n # and asymptote l * muls summed over layers\n \n ls = [list(range(lmax + 1)) for lmax in self.config.model.lmaxes]\n muls = self.config.model.muls\n lmuls = [list(zip(li, mi)) for li,mi in list(zip(ls,muls))]\n print(lmuls)\n \n batch_time = sum(sum((l+1)**2 * m for l,m in layer) for layer in lmuls) / 10\n asymptote = 10 / sum(sum((l+1) * m for l,m in layer) for layer in lmuls)\n decay = max(sum((l+1)**2 * m for l,m in layer) for layer in lmuls) / 1000000\n print(f\"Batch time = {batch_time:.2f}, asymptote = {asymptote:.3f}, decay = {decay}\")\n \n if self.history is None:\n self.__history = TrainTestHistory(\n examples_per_epoch=self.config.data.train_size,\n relevant_elements=self.config.relevant_elements,\n save_prefix = self.save_prefix, run_name=self.identifier,\n load=False, use_backup=False)\n \n epoch = self.epochs\n time = self.time\n example = self.examples\n while True:\n example += 100\n time += batch_time\n epoch = example // self.config.data.train_size + 1\n \n test_loss = math.exp(-example * decay) * (.9 + .2 * random.random()) + asymptote\n train_loss = test_loss * (.9 + .2 * random.random())\n RMSE = np.array([1.5 * test_loss, .5 * test_loss])\n mean_error = RMSE * (np.random.random(2) - .5)\n \n self.history.train.log_batch(\n batch_time, 0, 1000, 18000,\n time, example % self.config.data.train_size, example,\n train_loss, epoch=epoch, verbose=False \n )\n \n self.history.test.log_test(\n example // 1000, epoch,\n (example % self.config.data.train_size) // 1000,\n example % self.config.data.train_size,\n example, time, test_loss,\n mean_error, RMSE \n )\n \n if example >= self.config.training.example_limit:\n print(f\"Finished training after {example} examples\")\n break\n if time >= self.config.training.time_limit:\n print(f\"Finished training after {secs_to_str(time)}\")\n break\n if epoch > self.config.training.epoch_limit:\n print(f\"Finished training after {epoch-1} epochs\")\n \n #self.history.save() \n #print(\"Displaying graph...\")\n #self.history.test.plot()\n self.close_history()\n \n \n \n #def __del__(self):\n # self.close_history()\n\n \nclass EnsembleOfRuns:\n\n def __init__(self, parent_dir=\"runs\", use_existing=True, configs=['exploration.ini'], start_exploring=False, stub=False, backup_samples=True, verbose=False, plot=False):\n self.parent_dir = parent_dir\n self.config_files = configs\n self.__config = None\n self.__test_samples = None\n self._abort = False\n if use_existing:\n self.runs = {d.name: TrainingRun(parent_dir=parent_dir, identifier=d.name, parent_configs=configs)\n for d in os.scandir(parent_dir) if d.is_dir() and ini_file(d.path) is not None}\n self.active_runs = {name:run for name,run in self.runs.items() if not run.config.exploration.inactive}\n self.delete_empty_runs()\n else:\n self.runs = {}\n self.stub = stub or self.config.exploration.stub\n self.seed_length = len(random_parameters_and_seed()[1])\n if self.config.exploration.group_name is None:\n self.set_config({'exploration':{'group_name':configs[-1]+\"_\"+hex(random.randrange(8**4))}})\n if start_exploring:\n self.fill_test_samples()\n self.exploration_loop(verbose=verbose, plot=plot)\n \n @property\n def config(self):\n if self.__config is None:\n self.__config = Config(*self.config_files, track_sources=True, use_command_args=False)\n return self.__config\n \n def activate(self, run, active=True):\n if active:\n run.set_config({'exploration':{'inactive':False}})\n self.active_runs[run.identifier] = run\n else:\n self.inactivate(run)\n \n def inactivate(self, run):\n run.set_config({'exploration':{'inactive':True}})\n self.active_runs.pop(run.identifier, None)\n \n def delete_run(self, run):\n run.delete()\n self.active_runs.pop(run.identifier, None)\n self.runs.pop(run.identifier, None)\n \n def delete_empty_runs(self):\n for run in list(self.runs.values()):\n if run.examples == 0:\n self.delete_run(run) \n \n def generate_run(self, failure_cost=0, active=True):\n seeds = self.seeds\n names = list(seeds)\n losses = self.mixed_log_losses()\n failures = self.failures\n if len(seeds) > 0:\n sample_X = [seeds[n] for n in names]\n sample_y = [losses[n] for n in names]\n else:\n sample_X = np.zeros((0,self.seed_length))\n sample_y = np.zeros(0)\n \n failures = [failures[n] for n in names]\n if self.test_samples is not None:\n test_X = self.test_samples.passes\n #print(test_X)\n #print(np.array(self.test_samples.seeds))\n X_bounds = None\n else:\n X_bounds = [(0,1)] * self.seed_length\n test_X = None\n\n seed = bayes.next_sample(\n sample_X = sample_X,\n sample_y = sample_y,\n X_bounds = X_bounds,\n test_X = test_X,\n failures = failures,\n failure_cost = failure_cost\n )[0]\n #print(f\"seed = {seed}\")\n if self.test_samples is not None:\n self.test_samples.set_used(seed=seed)\n \n exploration_policy.mul_coeff = self.config.exploration.mul_coeff\n settings = generate_parameters(seed)\n settings = update(settings, {'exploration':{'seed':list(seed), 'inactive':not active}}) \n run = TrainingRun(parent_dir=self.parent_dir, settings=settings, parent_configs=self.config_files)\n self.runs[run.identifier] = run\n if active: self.active_runs[run.identifier] = run\n return run \n \n @property\n def seeds(self):\n return {name:run.config.exploration.seed for name,run in self.runs.items()\n if run.config.exploration.seed is not None}\n \n @property\n def test_samples(self):\n if self.__test_samples is None:\n self.__test_samples = TrainableSampleGenerator(self.config.exploration.sample_file, configs=self.config_files, stub=self.stub)\n return self.__test_samples\n \n def fill_test_samples(self):\n if self.test_samples.num_passes < .9 * self.config.exploration.random_samples:\n print(\"Filling test samples...\")\n self.test_samples.sample(num_passes=self.config.exploration.random_samples, verbose=True, verbose_test=3)\n \n @property\n def failures(self):\n #print(\"We're going a weird failure here...\")\n return {name:bool(run.config.exploration.failed) for name,run in self.runs.items()}\n\n def last_x(self, coord='time'):\n if len(self.runs) == 0:\n return None\n return max(max_value(run.history.test.coord(coord)) for run in self.runs.values())\n \n def last_x_pair(self, coord='time'):\n #print(sorted(max_value(run.history.test.coord(coord)) for run in self.runs.values()))\n #for run in self.runs.values():\n # print(np.array(run.history.test.coord(coord)))\n return sorted(max_value(run.history.test.coord(coord)) for run in self.runs.values())[-2]\n \n def clear_memory(self):\n for run in self.runs.values():\n run.close_history()\n run.__config = None\n \n # train a run up to 'examples' or 'time' limit\n def train_run(self, run, examples=None, time=None, retries=1):\n if examples is None: examples = self.config.exploration.max_example\n if time is None: time = self.config.exploration.max_time\n run.set_config({'training':{\n 'example_limit':examples,\n 'time_limit':secs_to_str(time)\n }})\n if run.examples < examples and run.time < time:\n if self.stub:\n run.execute_stub()\n return True\n else:\n self.clear_memory()\n exit_code = run.execute_run()\n if exit_code == exit_codes.ABORT:\n self._abort = True\n print(\"ABORT received 1.1\")\n return True\n for i in range(retries+1):\n if i == retries:\n self.delete_run(run)\n return False\n if run.examples < examples and run.time < time:\n exit_code = run.execute_run()\n if exit_code == exit_codes.ABORT:\n self._abort = True\n print(\"ABORT received 1.1\")\n return True\n else:\n return True \n \n def set_config(self, settings):\n parser = ConfigParser()\n parser.read([self.config_files[-1]])\n parser.read_dict(settings)\n with open(self.config_files[-1], 'w') as f:\n parser.write(f)\n self.__config = None\n \n new_limit = next_training_limit\n \n # increases the current training limit, and brings all active runs\n # up to that level (or inactivates them if they suck) \n def training_increment_0(self, step=None, runs=None, max_time=None, max_example=None, verbose=True, plot=False):\n new_time, new_example = self.new_limit(step)\n config = self.config.exploration\n if step is None:\n step = config.step\n else:\n self.set_config({'exploration':{\n 'step':step\n }})\n\n if max_time is None: max_time = new_time\n if max_example is None: max_example = new_example\n self.set_config({'exploration':{\n 'max_time':secs_to_str(max_time),\n 'max_example':max_example\n }})\n\n if verbose: print(f\"=== Exploration round {step} ===\")\n if verbose: print(f\"max_time = {secs_to_str(max_time)}, max_example = {max_example}\")\n \n old_to_bring_up = config.active_schedule[step] if step < len(config.active_schedule) else config.active_schedule[-1]\n self.activate_best(n=old_to_bring_up)\n\n runs_to_up = self.active_runs.values() \n if verbose: print(f\"Bringing up {len(runs_to_up)} old runs...\")\n brought_up = 0\n for run in list(runs_to_up):\n bring = self.bring_up_run(run)\n if self._abort:\n print(\"ABORT received 3.1\")\n return\n elif bring:\n brought_up += 1\n #else:\n # raise RuntimeError(\"We have a bad run to dispose of\")\n if plot and run.identifier in self.runs:\n self.axes = self.plot_runs(run, axes=self.axes, block=False)\n\n new_tries = config.try_schedule[step] if step < len(config.try_schedule) else config.try_schedule[-1]\n new_tries += (old_to_bring_up - brought_up) \n if verbose: print(f\"Generating {new_tries} new runs...\")\n brought_up = 0\n while brought_up < new_tries:\n run = self.generate_run()\n bring = self.bring_up_run(run)\n if self._abort:\n print(\"ABORT received 3.2\")\n return\n elif bring:\n brought_up += 1\n else: # Maybe we bailed on a bad run\n pass\n if plot and run.identifier in self.runs:\n self.axes = self.plot_runs(run, axes=self.axes, block=False)\n\n # increases the current training limit, and brings all active runs\n # up to that level (or inactivates them if they suck) \n def training_increment_1(self, step=None, runs=None, max_time=None, max_example=None, verbose=True, plot=False):\n new_time, new_example = self.new_limit(step)\n config = self.config.exploration\n if step is None:\n step = config.step\n else:\n self.set_config({'exploration':{\n 'step':step\n }})\n\n if max_time is None: max_time = new_time\n if max_example is None: max_example = new_example\n self.set_config({'exploration':{\n 'max_time':secs_to_str(max_time),\n 'max_example':max_example\n }})\n\n if verbose: print(f\"=== Exploration round {step} ===\")\n if verbose: print(f\"max_time = {secs_to_str(max_time)}, max_example = {max_example}\")\n \n old_to_bring_up = config.active_schedule[step] if step < len(config.active_schedule) else config.active_schedule[-1]\n self.activate_best(n=old_to_bring_up)\n\n runs_to_up = self.active_runs.values() \n if verbose: print(f\"Bringing up {len(runs_to_up)} old runs...\")\n brought_up = 0\n for run in list(runs_to_up):\n bring = self.bring_up_run(run)\n if self._abort:\n print(\"ABORT received 3.1\")\n return\n elif bring:\n brought_up += 1\n #else:\n # raise RuntimeError(\"We have a bad run to dispose of\")\n if plot and run.identifier in self.runs:\n self.axes = self.plot_runs(run, axes=self.axes, block=False)\n\n new_tries = config.try_schedule[step] if step < len(config.try_schedule) else config.try_schedule[-1]\n new_tries += (old_to_bring_up - brought_up) \n if verbose: print(f\"Generating {new_tries} new runs...\")\n brought_up = 0\n while brought_up < new_tries:\n run = self.generate_run()\n bring = self.bring_up_run(run)\n if self._abort:\n print(\"ABORT received 3.2\")\n return\n elif bring:\n brought_up += 1\n else: # Maybe we bailed on a bad run\n pass\n if plot and run.identifier in self.runs:\n self.axes = self.plot_runs(run, axes=self.axes, block=False)\n \n\n def exploration_loop(self, max_step=100000, verbose=False, plot=False):\n step = self.config.exploration.step\n if verbose:\n self.axes = None\n while step <= max_step:\n self.training_increment(step=step, verbose=verbose, plot=plot)\n if self._abort:\n os._exit(exit_codes.ABORT)\n step += 1 \n \n \n proceed = proceed_with_training\n \n # trains a run in steps until it reaches the current\n # maximum time/example, or until it fails a preliminary\n # judgment\n def bring_up_run(self, run):\n if run not in self.active_runs:\n self.activate(run)\n config = self.config.exploration\n \n # create steps at 1/8th, 1/4th, etc of max extent\n #steps = np.array([.125, .25, .5, 1])\n steps = np.array([.5, 1])\n example_steps = (config.max_example * steps).astype(int)\n time_steps = (config.max_time * steps).astype(int)\n \n # Step through time/example steps until current run has\n # not surpassed a step. This is the step we will start\n # training toward.\n i = 0\n while i < len(steps):\n if run.time >= time_steps[i] or time_steps[i] < config.time_increment \\\n or run.examples >= example_steps[i] or example_steps[i] < config.example_increment:\n i += 1\n else:\n break\n \n for i in range(i, len(steps)):\n success = self.train_run(run, time=time_steps[i], examples=example_steps[i])\n if not success or self._abort or (i < len(steps) - 1 and not self.proceed(run)):\n if self._abort:\n print(\"ABORT received 2\")\n return False\n break \n return True\n \n order_runs = order_runs_policy\n \n # activates the best n runs and inactivates the others\n def activate_best(self, n=4, runs=None):\n if runs is None:\n runs = self.runs.values()\n runs = self.order_runs(runs)\n \n for run in runs[:n]:\n if run not in self.active_runs.values():\n self.activate(run)\n \n for run in runs[n:]:\n if run in self.active_runs.values():\n self.inactivate(run)\n \n def plot_runs(self, run=None, active_only=True, **kwargs):\n pass\n #return plot_runs(self, run=run, active_only=active_only, **kwargs)\n \n def plot_extrapolation(self, run, x, coord='example', **kwargs):\n pass\n #return plot_extrapolation(self, run=run, x=x, coord=coord, **kwargs)\n \n def extrapolated_log_losses(self, coord='time', smoothing=5, active_only=False, value=None):\n runs = self.active_runs if active_only else self.runs\n \n #print(f\"value: {coord} to extrapolate to.\")\n if value is None:\n if len(runs) > 1:\n value = self.last_x_pair(coord)\n #print(f\" value = last_x_pair(\\\"{coord}\\\") = {value}\")\n else:\n value = self.last_x(coord)\n #print(f\" value = {value}\")\n #else: \n # print(F\" value = {value}\")\n \n long_runs = {}\n short_runs = {}\n long_histories = set()\n losses = {}\n for name, run in runs.items():\n if run.examples == 0:\n pass\n elif max_value(run.history.test.coord(coord)) >= value:\n #print(f\"max {coord} = {max_value(run.history.test.coord(coord))}\")\n long_runs[name] = run\n losses[name] = run.history.test.log_loss_interpolate(value, coord=coord, window=smoothing)\n long_histories.add(run.history.test)\n else:\n short_runs[name] = run\n for name, run in short_runs.items():\n losses[name], _, _, _ = run.history.test.log_loss_extrapolate(value, long_histories, coord=coord, window=smoothing)\n return losses\n\n def mixed_log_losses(self, t_coeff=0.25, smoothing=5, active_only=False, t_value=None, e_value=None):\n t_losses = self.extrapolated_log_losses(coord='time', smoothing=smoothing, active_only=active_only, value=t_value)\n e_losses = self.extrapolated_log_losses(coord='example', smoothing=smoothing, active_only=active_only, value=e_value)\n return {name:t_coeff * t_losses[name] + (1-t_coeff) * e_losses[name] for name in t_losses} \n \n \n \n\n \n\ndef max_value(seq, min=0):\n if len(seq):\n return seq[-1]\n else:\n return min\n\ndef merge_dicts(*dicts):\n return {key:tuple(d[key] for d in dicts) for key in set(dicts[0]).intersection(*dicts[1:])}\n \n \n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n config = sys.argv[1]\n parent_dir = os.path.dirname(config)\n ensemble = EnsembleOfRuns(parent_dir=parent_dir, configs=[config], start_exploring=True, verbose=True, plot=True) \n else:\n settings, seed = random_parameters_and_seed()\n print(\"Simulating random run:\")\n print(settings)\n settings = update(settings, {'exploration':{'seed':list(seed)}}) \n run = TrainingRun(settings=settings)\n run.execute_stub()\n\n\n \n\n \n\n", "id": "6436501", "language": "Python", "matching_score": 4.671090126037598, "max_stars_count": 0, "path": "train/exploration.py" }, { "content": "# Treat this much like a config file, editing the functions to set exploration policy\n#import exploration\nimport statistics as stat\nimport math\nfrom collections.abc import Sequence\n\nmul_coeff = 1.0\nlmax_max = 9\n\n# The following is deprecated. Look at the \"return True\" on the first line\n# This mostly handled by the \"active list\" which is called by timilar criteria\n# Answers whether or not it is worth continuing to train 'run'\ndef proceed_with_training(ensemble, run):\n \"\"\"back in action\"\"\"\n return True\n # Some handy values accumulated here \n asymptotes_dict = ensemble.asymptotes(active_only = True)\n asymptote = asymptotes_dict[run.identifier]\n del asymptotes_dict[run.identifier]\n other_asymptotes = asymptotes_dict.values()\n other_worst = max(other_asymptotes, key=lambda x: x[0])\n \n progress = max(run.time / ensemble.config.exploration.max_time,\n run.example / ensemble.config.exploration.max_example)\n \n #### Decision code goes here: ####\n \n if asymptote[0] - 4 * asymptote[1] < other_worst[0] + 4 * other_worst[1]:\n return True\n else:\n return False\n\n ##################################\n \ndef XX__proceed_with_training(ensemble, run, verbose=True):\n \"\"\"Deprecated\"\"\"\n \n losses = ensemble.mixed_log_losses(active_only=True)\n mean = stat.mean(losses.values())\n stdev = stat.stdev(losses.values())\n loss = losses.pop(run.identifier, None)\n\n progress = max(run.time / ensemble.config.exploration.max_time,\n run.examples / ensemble.config.exploration.max_example)\n \n print(f\"Log losses:\")\n print(f\"Current run: {loss}\")\n print(f\"Active runs: mean = {mean:2.2f}, stdev = {stdev:2.2f}\")\n \n inv_prog = math.sqrt(1/progress)\n cutoff = mean + 3 * inv_prog * stdev\n if loss < cutoff:\n print(f\"Current run < mean + 3 * {inv_prog:2.1f} * stdev = {cutoff:2.2f}\")\n print(f\"Not promising... Shunting run for possible continuations later...\")\n return True\n else:\n print(f\"Current run > mean + 3 * {inv_prog:2.1f} * stdev = {cutoff:2.2f}\")\n print(f\"Continuing training of current run...\")\n return False \n \n \n \n \n\n# Returns new values (max_time, max_example)\ndef next_training_limit(ensemble, step):\n config = ensemble.config.exploration\n if step == config.step:\n return config.max_time, config.max_example\n elif step < config.step:\n raise ValueError(\"Can't backtrack in exploration rounds.\")\n\n new_example = schedule_then_geo(config.example_schedule, step, base=config.example_increment)\n new_time = schedule_then_geo(config.time_schedule, step, base=config.time_increment)\n return new_time, new_example\n \ndef schedule_then_geo(arg=None, step=0, ratio=2, base=None):\n if isinstance(arg, Sequence) and len(arg) > 0:\n if step < len(arg):\n return arg[step]\n else:\n return arg[-1] * (ratio ** (step + 1 - len(arg)))\n else:\n if base is None: base = arg\n return base * (ratio ** (step + 1))\n \n\n# puts runs in order of how well they're performing; 0th is best\n#def order_runs(ensemble, runs):\n# return sorted(runs, key=lambda r: r.history.test.asymptote[0])\n\ndef order_runs(ensemble, runs):\n losses = ensemble.mixed_log_losses()\n return sorted(runs, key = lambda r : losses[r.identifier])\n \n\nimport random\n\ndef random_parameters_and_seed(distribution=random.random):\n gen = TrackingGenerator(distribution)\n settings = generate_parameters(gen)\n return settings, gen.seed\n\ndef generate_parameters(var):\n \"\"\"\n Defines a distribution of parameters\n Returns a settings dictionary\n var is an iterable of variables in the range [0,1) which\n we can make use of.\n \"\"\"\n var = iter(var)\n model={}\n training={}\n settings = {'model':model, 'training':training}\n \n # max_radius is exponential from 3 to 8\n model['max_radius'] = 1.5 * 2 ** (1 + 1.4 * next(var))\n\n # number of radial basis funcs is exponential from 8 to 31\n model['number_of_basis'] = int( 2 ** (3 + 2 * next(var)) )\n\n # radial_layers is from 1 to 15\n model['radial_layers'] = int(2 ** (4 * next(var)) )\n\n # radial_h from 10 to 79\n model['radial_h'] = int( 5 * 2 ** (1 + 3 * next(var)) )\n \n # numlayers from exp from 2 to 12\n numlayers = int( 2 ** (1 + 2.584963 * next(var))) \n \n # lmax is a polynomial on [0,1), of x = layer/numlayers\n # lmax = l0 + l1 x + l2 x^2\n # where l0 is whatever gives this min of lmin on [0,1)\n l2 = 6 * next(var) - 3 # l2 in [-3, 3]\n l1 = 6 * next(var) - 3 # l1 in [-3, 3]\n lmin = int(6 * (next(var) ** 2)) # lmin integer in [0,5] inclusive\n \n ns = [l / numlayers for l in range(numlayers)]\n lmaxes = [min(lmax_max, int(round(l1 * n + l2 * n**2))) for n in ns]\n bump = -min(lmaxes)\n\n lmaxes = [l + bump + lmin for l in lmaxes]\n model['lmaxes'] = lmaxes\n \n global mul_coeff\n print(f\"Using mul_coeff = {mul_coeff}.\")\n # multiplicities are a fn of both n = layer/numlayers and x = 10/(2l+1)\n # m = m0 + m01 x + m10 n + m11 xn\n # where m0 is whatever gives this min of mmin\n m01 = mul_coeff * (40 * next(var) - 10) # m01 in [-10, 30]\n m11 = mul_coeff * (40 * next(var) - 10) # m11 in [-10, 30]\n m10 = mul_coeff * (80 * next(var) - 40) # m10 in [-40, 40]\n #mmin = int(16 * (next(var) ** 2)) # mmin integer in [1,16] incl.\n mmin = int(mul_coeff * 2 ** (next(var) * 6)) + 1 # mmin integer in [2,64] incl.\n \n xs = [[10 / (2*l + 1) for l in range(lmaxes[n]+1)] for n in range(numlayers)]\n muls = [[int(m01 * x + m10 * n + m11 * x * n) for x in xl] for n,xl in zip(ns,xs)]\n bump = -min([min(lmul) for lmul in muls])\n muls = [[m + bump + mmin for m in lmul] for lmul in muls]\n model['muls'] = muls\n\n return settings\n\n\nclass TrackingGenerator:\n def __init__(self, dist):\n assert dist == random.random, \"Non-uniform distributions not yet supported!\"\n self.dist = dist\n self.seed = []\n \n def __iter__(self):\n return self\n \n def __next__(self):\n var = self.dist()\n self.seed += [var]\n return var\n\n \ndef show_model(settings):\n lmaxes = settings[\"model\"][\"lmaxes\"]\n muls = settings[\"model\"][\"muls\"]\n\n N = len(lmaxes)\n max_l = max(lmaxes)\n max_w = max(max(len(str(m)) for m in mcol) for mcol in muls)\n\n for l in range(max_l,-1,-1):\n for n in range(N):\n if lmaxes[n] < l:\n print((max_w + 1) * \" \", end='')\n else:\n print(f\" {(muls[n][l]):>{max_w}}\", end='')\n print()\n \n \n import random\n\nif __name__ == '__main__':\n while True:\n gen = TrackingGenerator(random.random)\n settings = generate_parameters(gen)\n print(\"--------------------------------------\")\n show_model(settings)\n \n #print(settings)\n input()\n", "id": "5871644", "language": "Python", "matching_score": 3.0531277656555176, "max_stars_count": 0, "path": "train/exploration_policy.py" }, { "content": "from exploration import EnsembleOfRuns\nimport numpy as np\nimport sys\n\nvariables = [\n 'max radius',\n 'number of basis',\n 'radial layers',\n 'radial h',\n 'num layers',\n 'l2',\n 'l1',\n 'lmin',\n 'm01',\n 'm11',\n 'm10',\n 'mmin',\n]\n\ndef rjust(strings):\n maxlen = max(len(n) for n in strings)\n return [' ' * (maxlen - len(n)) + n for n in strings]\n\ndef sorted_runs(configs=None, ensemble=None):\n if configs:\n ensemble = EnsembleOfRuns(configs=configs, start_exploring=False, verbose=False)\n \n losses = ensemble.mixed_log_losses()\n losses = [(name, loss[0], loss[1]) for name, loss in losses.items()]\n losses.sort(key = lambda l : l[1])\n \n return losses\n\nif __name__ == '__main__':\n configs = [x for x in sys.argv[1:] if not x.startswith('-')]\n if len(configs) == 0:\n raise ValueError('Must specify a config file for hyperparameter exploration.')\n\n ensemble = EnsembleOfRuns(configs=configs, start_exploring=False, verbose=False)\n losses = sorted_runs(ensemble=ensemble)\n \n print(\"Log losses compared by example:\")\n \n for name, loss, error in losses:\n print(f\"{name} : {loss:4.2} ± {error:2.2}\")\n \n X = np.array([ensemble.runs[name].config.exploration.seed for name, _, _ in losses])\n y = np.array([loss for _, loss, _ in losses])\n \n Xy = np.concatenate((X,y.reshape(-1,1)), axis=1)\n corr = np.corrcoef(Xy, rowvar=False)[-1,:-1]\n \n print()\n print(\"Correlation of log loss with each seed variable:\")\n \n for var, c in zip(rjust(variables), list(corr)):\n print(f\"{var} : {c:1.3f}\")\n \n \n \n ", "id": "5457399", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "train/hyper_analyze.py" }, { "content": "code = {\r\n 0 : 'SUCCESS',\r\n 130 : 'ABORT'\r\n}\r\n\r\nfor c, n in code.items():\r\n globals()[n] = c\r\n\r\ndef success(code):\r\n global SUCCESS\r\n return code == SUCCESS", "id": "11367717", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "train/exit_codes.py" }, { "content": "import numpy as np\nimport h5py\nfrom collections.abc import Sequence\n\n# Resisable arrays, which save to disk\n# kinda quick. The original\n# 'NpArray' implementation in the top half\n# isn't nearly as good as the\n# 'H5Array' in the bottom half.\n# I recommend using:\n# 'from resizable import H5Array as Array'\n\n# I may just drop one of them and alias\n# 'H5Array' as 'Array' in here.\n\n\ndef mul_greater(c, x):\n return (x // c + 1) * c\n\nclass NpArray:\n \"\"\"\n Expandable array, based on an underlying numpy array\n Append increases length by 1.\n If check_bounds == False, indexing beyond the end (either\n setting or getting) also increases length, filling with\n zeros.\n \"\"\"\n default_chunk_size = 1000\n \n def __init__(self, array=None, shape=(0,), chunk_size=None, **kwargs):\n if array is not None:\n array = np.array(array)\n shape = array.shape\n self.cross_shape = shape[1:]\n self.length = shape[0]\n if chunk_size is None:\n chunk_size = Array.default_chunk_size\n self.chunk_size = chunk_size\n if array is None:\n self.data = np.zeros((mul_greater(chunk_size, self.length), *self.cross_shape), **kwargs)\n else:\n self.data = np.resize(array, (mul_greater(chunk_size, self.length), *self.cross_shape))\n \n def __repr__(self):\n return repr(self.array)\n \n def __len__(self):\n return self.length\n \n @property\n def array(self):\n return self.data[:self.length]\n \n @array.setter\n def array(self, value):\n self.data[:self.length] = value\n \n @property\n def capacity(self):\n return len(self.data)\n \n @property\n def shape(self):\n return (self.length, *self.cross_shape)\n \n def __getitem__(self, key):\n return (self.array)[key]\n \n def __setitem__(self, key, value):\n self.array[key] = value\n \n def __iter__(self):\n return iter(self.array)\n \n def resize(self, new_length, shrink_data=False):\n if new_length < self.length:\n if shrink_data:\n self.data = np.resize(self.data, (mul_greater(self.chunk_size, new_length), *self.cross_shape))\n self.data[new_length:min(self.length, len(self.data))] = 0\n else:\n self.data[new_length:self.length] = 0\n elif new_length > len(self.data):\n self.data = np.resize(self.data, (mul_greater(self.chunk_size, new_length), *self.cross_shape))\n self.length = new_length\n \n def append(self, value):\n self.resize(self.length + 1)\n self.data[self.length - 1] = value \n \n def __add__(self, other): return self.array + other\n def __sub__(self, other): return self.array - other\n def __mul__(self, other): return self.array * other\n def __matmul__(self, other): return self.array @ other\n def __truediv__(self, other): return self.array / other\n def __floordiv__(self, other): return self.array // other\n def __mod__(self, other): return self.array % other\n def __divmod__(self, other): return divmod(self.array, other)\n def __pow__(self, other): return self.array ** other\n def __lshift__(self, other): return self.array << other\n def __rshift__(self, other): return self.array >> other\n def __and__(self, other): return self.array & other\n def __xor__(self, other): return self.array ^ other\n def __or__(self, other): return self.array | other\n\n def __radd__(self, other): return other + self.array\n def __rsub__(self, other): return other - self.array\n def __rmul__(self, other): return other * self.array\n def __rmatmul__(self, other): return other @ self.array\n def __rtruediv__(self, other): return other / self.array\n def __rfloordiv__(self, other): return other // self.array\n def __rmod__(self, other): return other % self.array\n def __rdivmod__(self, other): return divmod(other, self.array)\n def __rpow__(self, other): return other**self.array\n def __rlshift__(self, other): return other << self.array\n def __rrshift__(self, other): return other >> self.array\n def __rand__(self, other): return other & self.array\n def __rxor__(self, other): return other ^ self.array\n def __ror__(self, other): return other | self.array\n \n def __iadd__(self, other):\n self.array += other\n return self\n def __isub__(self, other):\n self.array -= other\n return self\n def __imul__(self, other): \n self.array *= other\n return self\n def __imatmul__(self, other): \n self.array @= other\n return self\n def __itruediv__(self, other): \n self.array /= other\n return self\n def __ifloordiv__(self, other): \n self.array //= other\n return self\n def __imod__(self, other): \n self.array %= other\n return self\n def __ipow__(self, other): \n self.array ** other\n return self\n def __ilshift__(self, other): \n self.array << other\n return self\n def __irshift__(self, other): \n self.array >> other\n return self\n def __iand__(self, other): \n self.array & other\n return self\n def __ixor__(self, other): \n self.array ^ other\n return self\n def __ior__(self, other): \n self.array | other\n return self\n \n\nclass H5Array(h5py.Dataset):\n default_chunk_size = 256\n default_compression = 'gzip'\n \n def __init__(self, h5, name, arg1=None, chunk_size=None, resizable_cross=None, **kwargs):\n data = None\n if isinstance(arg1, tuple):\n shape = arg1\n elif isinstance(arg1, int):\n shape = (arg1,)\n elif arg1 is not None:\n data = np.array(arg1)\n shape = data.shape\n elif 'data' in kwargs:\n data = np.array(kwargs['data'])\n shape = data.shape\n elif 'shape' in kwargs:\n shape = kwargs['shape']\n elif h5 is not None and name in h5:\n shape = h5[name].shape\n chunk_size = h5[name].chunks[0]\n else:\n shape = (0,)\n \n if chunk_size is None:\n chunk_size = H5Array.default_chunk_size\n self.cross_shape = shape[1:]\n kwargs['shape'] = shape\n if resizable_cross is None and name in h5 and len(h5[name].maxshape) > 1 and h5[name].maxshape[1] is None:\n resizable_cross = True\n kwargs['maxshape'] = tuple(None for _ in shape) if resizable_cross else (None, *self.cross_shape)\n kwargs['chunks'] = (chunk_size, *self.cross_shape)\n if 'compression' not in kwargs:\n kwargs['compression'] = H5Array.default_compression\n \n if data is None:\n if 'dtype' not in kwargs:\n kwargs['dtype'] = h5[name].dtype if name in h5 else int\n dset = h5.require_dataset(name, **kwargs)\n else:\n if 'dtype' not in kwargs:\n kwargs['dtype'] = data.dtype\n dset = h5.create_dataset(name, **kwargs)\n \n super().__init__(dset.id)\n \n def append(self, value):\n self.resize(len(self)+1)\n self[-1] = value\n \n def resize(self, size, axis=0):\n super().resize(size, axis)\n \n @property\n def resizable_cross(self):\n return len(self.maxshape)>1 and self.maxshape[1] is None\n \n def resize_cross(self, size, axis=None):\n assert self.resizable_cross, \"This H5PY Array can't resize its cross-section!\"\n if axis is not None:\n super().resize(size, axis+1)\n elif isinstance(size, Sequence):\n assert len(self.size) == len(size) + 1, \"New cross section is wrong number of dimensions.\"\n super().resize((self.size[0], *size))\n else:\n super().resize(size, axis=1)\n\n \n \n", "id": "11030541", "language": "Python", "matching_score": 0.3006073236465454, "max_stars_count": 0, "path": "train/resizable.py" }, { "content": "from typing import Dict, Union\n\nimport torch\nfrom e3nn import o3\nfrom e3nn.math import soft_one_hot_linspace\nfrom torch_cluster import radius_graph\nfrom torch_geometric.data import Data\nfrom torch_scatter import scatter\n\nfrom e3nn.nn.models.v2106.gate_points_message_passing import MessagePassing\n\n\nclass VariableParityNetwork(torch.nn.Module):\n def __init__(\n self,\n irreps_in,\n irreps_out,\n lmaxes,\n muls,\n max_radius = 1.0,\n num_neighbors = 1,\n #num_nodes = 1,\n #irreps_node_attr=\"0e\",\n irreps_edge_attr=[],\n edge_lmax=None,\n number_of_basis=3,\n radial_layers=10,\n radial_h=100,\n **kwargs,\n #pool_nodes=True,\n ) -> None:\n super().__init__()\n\n assert all(isinstance(l, int) for l in lmaxes), f\"lmaxes should be a list of integers, but\\nlmaxes = {lmaxes}\"\n if isinstance(muls, int):\n muls = [muls] * len(lmaxes)\n else:\n assert len(muls) == len(lmaxes), \"Multiplicities and lmaxes have different numbers of layers!\"\n\n \n lmax_c = 0\n for i in range(len(lmaxes)-1):\n lmax_c = max(lmax_c, lmaxes[i] + lmaxes[i+1])\n if edge_lmax is None:\n self.edge_lmax = min(lmax_c, 11)\n else:\n self.edge_lmax = min(edge_lmax, lmax_c)\n \n self.max_radius = max_radius\n self.number_of_basis = number_of_basis\n self.radial_layers = radial_layers\n self.radial_h = radial_h\n self.irreps_edge_attr = o3.Irreps(irreps_edge_attr) + o3.Irreps.spherical_harmonics(self.edge_lmax)\n #print(self.irreps_edge_attr)\n #self.pool_nodes = pool_nodes\n \n irreps_node_sequence = [o3.Irreps(irreps_in)]\n for i, (mul, lmax) in enumerate(zip(muls, lmaxes)):\n if isinstance(mul, int):\n mul = [mul] * (lmax + 1)\n else:\n assert all(isinstance(m, int) for m in mul), f\"muls[{i}] should be an intaeger or list of integers, but\\nmuls[{i}] = {mul}\"\n assert len(mul) == lmax + 1, f\"On layer {i}, len(muls[{i}] != lmax + 1 == {lmax} + 1\"\n #print(irreps_node_sequence[-1])\n has_paths = irrep_product(irreps_node_sequence[-1], self.irreps_edge_attr)\n irreps_node_sequence.append(o3.Irreps([\n (m,(l,p)) for (l,m) in enumerate (mul) for p in [-1,1]\n if (l,p) in has_paths]))\n irreps_node_sequence.append(o3.Irreps(irreps_out))\n\n self.mp = MessagePassing(\n irreps_node_sequence=irreps_node_sequence,\n irreps_node_attr=\"0e\",\n irreps_edge_attr=self.irreps_edge_attr,\n fc_neurons=[self.number_of_basis] + radial_layers * [radial_h],\n num_neighbors=num_neighbors,\n )\n\n self.irreps_in = self.mp.irreps_node_input\n #self.irreps_node_attr = self.mp.irreps_node_attr\n self.irreps_out = self.mp.irreps_node_output\n\n def forward(self, x, edge_index, edge_vec, edge_attr=None) -> torch.Tensor:\n\n # Edge attributes\n edge_sh = o3.spherical_harmonics(range(self.edge_lmax + 1), edge_vec, True, normalization='component')\n edge_attr = edge_sh if edge_attr is None else torch.cat([edge_attr, edge_sh], dim=1)\n\n # Edge length embedding\n edge_length = edge_vec.norm(dim=1)\n edge_length_embedding = soft_one_hot_linspace(\n edge_length,\n 0.0,\n self.max_radius,\n self.number_of_basis,\n basis='smooth_finite', # the smooth_finite basis with cutoff = True goes to zero at max_radius\n cutoff=True, # no need for an additional smooth cutoff\n ).mul(self.number_of_basis**0.5)\n \n node_attr = x.new_ones(x.shape[0], 1)\n\n node_outputs = self.mp(x, node_attr, edge_index[0], edge_index[1], edge_sh, edge_length_embedding)\n\n return node_outputs\n \ndef irrep_product(irreps1, irreps2):\n \"\"\"\n Returns an o3.Irreps containing all possible products\n \"\"\"\n out = []\n for i1 in irreps1:\n for i2 in irreps2:\n #print(f\" {type(i1)} * {type(i2)} : {i1} * {i2}\")\n out.extend(i1.ir * i2.ir)\n return o3.Irreps(out)\n", "id": "2790924", "language": "Python", "matching_score": 1.4663318395614624, "max_stars_count": 0, "path": "train/variable_networks.py" }, { "content": "import matplotlib.pyplot as plt\nimport torch\nimport numpy as np\nimport sys\n\ndef show_radial_parameters(model, max_radius=None):\n radial_parameters={}\n\n for name, param in model.named_parameters():\n if name.endswith(\"kernel.R.f.weights.0\"):\n radial_parameters[name] = param\n\n radial_data = torch.cat([p.data for p in radial_parameters.values()], dim=0).cpu()\n radial_square_density = radial_data.pow(2).mean(dim=0)\n radial_rms = radial_square_density.pow(.5)\n\n plt.figure()\n if max_radius is not None:\n x_coords = range(len(radial_rms))\n else:\n x_coords = [max_radius * x / len(radial_rms) for x in range(len(radial_rms))]\n plt.bar(x_coords,radial_rms)\n plt.ylabel(\"RMS\")\n plt.show()\n\ndef print_parameter_size(model):\n for name, param in model.named_parameters():\n print(name + ': ' + str(list(param.shape)))\n\ndef count_parameters(model):\n model_count_dict = {} # layer # -> n_params\n n_total_parameters = 0\n for name, param in model.named_parameters():\n n_params = np.prod(param.shape)\n n_total_parameters += n_params\n fields = name.split(\".\")\n layer = fields[1]\n if layer not in model_count_dict:\n model_count_dict[layer]=0\n model_count_dict[layer] += n_params\n for layer,n_params in model_count_dict.items():\n print(f\"Layer {layer}: {n_params}\")\n print(f\"Total parameters: {n_total_parameters}\")\n\n# goshippo.com/blog/measure-real-size-any-python-object\ndef get_object_size(obj, seen=None):\n \"\"\"Recursively finds size of objects\"\"\"\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([get_object_size(v, seen) for v in obj.values()])\n size += sum([get_object_size(k, seen) for k in obj.keys()])\n elif hasattr(obj, '__dict__'):\n size += get_object_size(obj.__dict__, seen)\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([get_object_size(i, seen) for i in obj])\n return size\n", "id": "834945", "language": "Python", "matching_score": 1.0385456085205078, "max_stars_count": 0, "path": "train/diagnostics.py" }, { "content": "import numpy as np\nimport pandas as pd\nimport sqlalchemy as sqa\nfrom sqlalchemy.pool import NullPool\nfrom sqlalchemy.sql.expression import func\nfrom sqlalchemy.schema import MetaData\nfrom io import BytesIO\nimport urllib\n\n# mysql://ekwan16:h9#Li48Z#hY$b@J8@SG-nmrdatabase-2962-master.servers.mongodirector.com/pbe0\n\n# status code meanings:\n# 0 - \"not started\" meaning only one jiggle is present\n# 1 - \"complete\" meaning 0th entry in data column is stationary, 1st entry is jiggle\n# 2 - \"pending\" meaning the stationary is currently being computed\n# 3 - \"error\" meaning something went wrong and this row is considered dead\n\ndef connect(func):\n def wrapped(self, *args, **kwargs):\n connect = True if self.connection is None else False\n if connect: self.__enter__()\n r = func(self, *args, **kwargs)\n if connect: self.__exit__(None, None, None)\n return r\n return wrapped\n \n# converts a byte representation of a numpy array to an actual numpy array\ndef unpack_bytes(arr_bytes):\n load_bytes = BytesIO(arr_bytes)\n loaded_np = np.load(load_bytes, allow_pickle=True)\n return loaded_np\n\nclass Database:\n \n def __init__(self, host, user, passwd=None, db=\"pbe0\", table=\"data_new\", status=1):\n self.host = urllib.parse.quote(host)\n self.user = urllib.parse.quote(user, safe=\"\")\n self.passwd = \"\" if passwd is None else \":\" + urllib.parse.quote(passwd, safe=\"\")\n self.db = db\n self.dialect = \"mysql+pymysql\"\n self.status = status \n self.metadata = MetaData()\n \n self.connection = None\n self.engine = None\n \n self.__enter__()\n self.status_table = \\\n sqa.Table('status_new', self.metadata, autoload=True, autoload_with=self.engine) \\\n if table==\"data_new\" else None\n self.data_table = sqa.Table(table, self.metadata, autoload=True, autoload_with=self.engine)\n self.__exit__(None, None, None)\n \n \n \n \n def __enter__(self):\n if self.connection is not None: return\n self.engine = sqa.create_engine(f\"{self.dialect}://{self.user}{self.passwd}@{self.host}/{self.db}\",\n connect_args={'ssl':{'ssl': {}}}, poolclass=NullPool)\n self.connection = self.engine.connect()\n return self\n \n def __exit__(self, exc_type, exc_value, traceback):\n if self.connection is None: return\n self.connection.close()\n self.connection = None\n self.engine.dispose()\n self.engine = None\n \n @connect \n def fetch_ids(self, number=None, requested_status=None, increment=10000, verbose=False):\n if self.status_table is not None:\n if requested_status is None:\n requested_status = self.status\n query = sqa.select([self.status_table.columns.id]).where(self.status_table.columns.status == requested_status)\n else:\n query = sqa.select([self.data_table.columns.id])\n \n if number:\n query = query.limit(number)\n num = f\"{number:,d}\"\n else:\n num = \"ALL\"\n if verbose: print(f\"Fetching {num} IDs\" + (\"\" if requested_status is None else f\" with status {requested_status}\") + \".\", flush=True)\n result = self.connection.execution_options(stream_results=True).execute(query)\n \n rows = []\n while True:\n batch = result.fetchmany(increment)\n if not batch: break\n rows += batch\n if verbose: print(\".\", end=\"\", flush=True)\n if verbose: print(\" Done.\")\n \n return np.array([int(r[0]) for r in rows])\n \n \n @connect\n def read_rows(self, ids, columns=['id', 'atomic_numbers', 'geometries_and_shieldings', 'compound_type', 'weights'], randomize=False):\n query = sqa.select((getattr(self.data_table.columns, c) for c in columns))\n if hasattr(ids, 'tolist'):\n ids = ids.tolist()\n query = query.where(self.data_table.columns.id.in_(ids))\n if randomize:\n query = query.order_by(func.rand())\n \n query_df = pd.read_sql_query(query, self.engine, index_col=\"id\")\n self.__exit__(None, None, None)\n \n # convert back to the array types\n query_df.atomic_numbers = query_df.atomic_numbers.apply(unpack_bytes)\n query_df.geometries_and_shieldings = query_df.geometries_and_shieldings.apply(unpack_bytes)\n #query_df.symmetric_atoms = query_df.symmetric_atoms.apply(ast.literal_eval)\n query_df.weights = query_df.weights.apply(unpack_bytes)\n \n return query_df\n \n @connect\n def set_status(self, id, status):\n update = sqa.update(self.status_table).where(self.status_table.columns.id == ID).values(status=0)\n self.connection.execute(update)\n\n\nif __name__ == '__main__':\n from configparser import ConfigParser\n parser = ConfigParser()\n parser.read('connect_params.ini')\n connect_params = parser['connect_params']\n db = Database(**connect_params)\n \n ids = db.fetch_ids(10, requested_status=1)\n print('\\n', list(ids))\n \n print(\"\\nRetrieving IDs as rows...\\n\")\n data = db.read_rows(ids)\n \n print(data)\n \n for id, row in data.iterrows():\n print(id, row)\n\n \n\n\n \n \n", "id": "12683782", "language": "Python", "matching_score": 1.4526370763778687, "max_stars_count": 0, "path": "train/sql_df.py" }, { "content": "from glob import glob\nfrom configparser import ConfigParser\nimport sys\nimport os\nimport time\nfrom collections import defaultdict\nfrom collections.abc import Mapping\nfrom numbers import Number, Integral\nimport numpy as np\n\n# import code for evaluating radial models\n#from e3nn.radial import *\n#from laurent import *\nfrom functools import partial\n\n\n# parses a delimited string\ndef parse_list(s, separator=\",\", func=lambda x : x):\n s = s.strip(\" []\")\n return [ func(i.strip()) for i in s.split(separator) ]\n\ntitle_case = lambda s : s.title()\n\ndef eval_int(s):\n \"Evalutes decimal integer literal, allowing leading zeros.\"\n if len(s) == 0:\n return int(s)\n s = s.lstrip(' 0')\n if len(s) == 0:\n return 0\n return int(s)\n\ndef str_to_secs(s):\n t_strings = s.split(\":\")\n assert len(t_strings) <= 3, f\"Time string '{s}' has too many terms!\"\n return sum(eval_int(n) * 60 ** i for i,n in enumerate(reversed(t_strings)))\n \ndef secs_to_str(t, decimals=0):\n h = int(t // (60 * 60))\n t -= h * 60 * 60\n m = int(t // 60)\n t -= m * 60\n s = int(round(t))\n chars = 3 + decimals if decimals > 0 else 2\n return f\"{h}:{m:02d}:{s:0{chars}.{decimals}f}\"\n \ndef get_any(s):\n for e in s:\n break\n return e\n\n# recursively updates possibly-nested dictionary\ndef update(d, u):\n for k, v in u.items():\n if isinstance(v, Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d\n \ndef dict_update(pairs):\n d = dict()\n for k, v in pairs:\n if k in d and isinstance(d[k], Mapping):\n update(d[k], v)\n else:\n d[k] = v\n return d\n \ndef immut(x):\n if isinstance(x, Mapping):\n return tuple((k,immut(v)) for k,v in x.items())\n elif isinstance(x, list):\n return tuple(immut(y) for y in x)\n else:\n return x\n\ndef invert_dict(d, one_to_many=True, recursive=True):\n if recursive and isinstance(next(iter(d.values())), Mapping):\n return dict_update(((k0, {k1 : v1}) for k1, d1 in d.items() for k0, v1 in invert_dict(d1).items()))\n if one_to_many:\n return {k : set(v for v in d if d[v] == k) for k in set(d.values())}\n else:\n return {k : v for v, k in d.items()}\n\nclass NO_STORE:\n def __init__(self):\n return\n\n# dummy section:\nclass SECTION:\n def __init__(self):\n self._mapping = {}\n\nclass ConfigSection:\n def __init__(self, mapping, load_all=True, eval_func=lambda x : x, eval_funcs={}, eval_error=True,\n key_func=lambda x : x, include_keys=None, exclude_keys={}, default=None, defaults={}):\n self._mapping = dict(mapping)\n self._eval_func = eval_func\n self._eval_funcs = eval_funcs\n self._eval_error = eval_error\n self._key_func = key_func\n self._include_keys = include_keys\n self._exclude_keys = set(exclude_keys)\n self._default = default\n self._defaults = defaults\n self._sub_ini = []\n\n if load_all:\n self.load_all()\n\n def load_all(self):\n if self._include_keys is None:\n keys = set(self._mapping.keys())\n else:\n keys = set(self._include_keys)\n keys.update(self._defaults.keys())\n for key in keys:\n if key not in self._exclude_keys:\n self.load(key)\n\n def load(self, key, eval_func=None, key_func=None, eval_error=None, **kwargs):\n if not eval_func:\n if key in self._eval_funcs:\n eval_func = self._eval_funcs[key]\n else:\n eval_func = self._eval_func\n if not key_func:\n key_func = self._key_func\n if eval_error is None:\n eval_error = self._eval_error\n \n if key not in self._mapping:\n if 'default' in kwargs:\n default = kwargs['default']\n elif key in self._defaults:\n default = self._defaults[key]\n else:\n default = self._default\n if default == NO_STORE:\n return\n value = default\n else:\n try:\n value = eval_func(self._mapping[key])\n except Exception:\n if eval_error:\n raise\n value = self._mapping[key]\n self._mapping[key] = value\n\n key = key_func(key).replace(\" \", \"_\")\n self.__dict__[key] = value\n return value\n\n # For pickling:\n def __getstate__(self):\n self_dict = self.__dict__.copy()\n for key in self.__dict__.keys():\n if key.startswith('_'):\n del self_dict[key]\n return self_dict\n\n def items(self):\n return ((key, value) for (key, value) in self.__dict__.items()\n if (not key.startswith('_')) and key not in {'load_all', 'load', 'items'})\n \n def keys(self):\n return (key for key in self.__dict__.keys()\n if (not key.startswith('_')) and key not in {'load_all', 'load', 'items'})\n \n\nclass Config:\n def atomic_number(self, e):\n if isinstance(e, str):\n if e.isnumeric(): return int(e)\n return self.symbol_to_number[e]\n else:\n return e\n\n def load_section(self, name, store_as=None, **kwargs):\n mapping = self._parser[name] if name in self._parser else {}\n section = ConfigSection(mapping, **kwargs)\n if not store_as:\n store_as = name\n store_as = store_as.replace(\" \", \"_\")\n self.__dict__[store_as] = section\n return section\n\n def load_section_into_base(self, name, **kwargs):\n mapping = self._parser[name] if name in self._parser else {}\n section = ConfigSection(mapping, **kwargs)\n self.__dict__.update(section.items())\n self.base_level.add_keys(section.keys())\n return section.items()\n\n # For pickling:\n def __getstate__(self):\n self_dict = self.__dict__.copy()\n for key in self.__dict__.keys():\n if key.startswith('_'):\n del self_dict[key]\n return self_dict\n\n def base_items(self):\n return self.base_level.items()\n #return ((key, value) for (key, value) in self.__dict__.items()\n # if (not key.startswith('_')) and key not in\n # {'atomic_number', 'load_section', 'load_section_into_base', 'items'})\n\n def get_sub_configs(self):\n sub_configs = []\n for _, section in self._parser.items():\n for key, value in section.items():\n if value is None and key.endswith('.ini'):\n del section[key]\n sub_configs.append(key)\n return sub_configs\n \n def file_settings(self, filename):\n fileset = self.by_source[filename]\n settings = {}\n for sec_name, file_sec in fileset.items():\n section = self.__dict__[sec_name]\n settings[sec_name] = {key:section.__dict__[key] for key in file_sec}\n return settings\n\n # parses config files\n # later filenames overwrite values from earlier filenames\n # (so you can have mini configs that change just a few settings)\n # automatically includes \"training.ini\" as first file, unless arg specifies otherwise\n # adds command line arguments to filenames, unless arg specifies otherwise\n def __init__(self, *filenames, settings=None, use_training_ini=True, use_command_args=True, track_sources=True, _set_names=False, verbose=False):\n if _set_names: # Only here to satisfy pylint and the code highlighter\n self._set_names()\n self._parser = ConfigParser(allow_no_value=True)\n\n filenames = list(filenames)\n if use_training_ini and \"training.ini\" not in filenames:\n filenames = [\"training.ini\"] + filenames\n if use_command_args and len(sys.argv) > 1:\n filenames += sys.argv[1:]\n\n #print(f\"Loading config from files {filenames}...\")\n \n files_worked = self._parser.read(filenames)\n if verbose: print(f\"Loaded config from files {files_worked}.\")\n \n # load any sub-configs specified in the file:\n sub_files = self._parser.read(self.get_sub_configs())\n if verbose: print(f\"Loaded sub-config from files {sub_files}\") \n\n # load in extra caller-provided settings:\n if isinstance(settings, Mapping):\n self._parser.read_dict(settings)\n \n # remember which items are global settings\n self.base_level = SubDict(self.__dict__)\n \n # Probably delete this next bite.\n # I don't think I do, or ever will, use it\n if track_sources:\n self.source = defaultdict(dict)\n for f in files_worked + sub_files:\n p = ConfigParser(allow_no_value=True)\n p.read(f)\n for sec_name, section in p.items():\n if sec_name != \"DEFAULT\":\n for prop_name, val in section.items():\n if val is not None:\n self.source[sec_name][prop_name] = f\n del p\n self.by_source = invert_dict(self.source)\n \n # The next batch of settings might be regarded as \"settings\",\n # which help determine behaviours of Config (to the extent it's\n # just a big dictionary/namespace with a fancy constructor).\n # For example, how to render the config as a string for printing\n # Eg., at the start of a training run.\n self.load_section(\"_config_config\",defaults={\n 'max_width':75,\n 'include':None,\n 'exclude':[],\n 'sub_include':{},\n 'sub_exclude':{},\n 'lr_just':'r',\n 'buffer':1,\n 'indent':2,\n 'equiv':' : '\n }, eval_func=eval)\n \n \n\n\n # dictionaries between symbols and numbers\n self.load_section('symbols_numbers_dict', key_func=title_case, eval_func=int)\n self.symbol_to_number = {s:n for s,n in self.symbols_numbers_dict.items()}\n self.number_to_symbol = {n:s for s,n in self.symbols_numbers_dict.items()}\n global number_to_symbol\n number_to_symbol = self.number_to_symbol\n #print(self.symbol_to_number)\n \n # device, all_elements, relevant_elements\n efunc = partial(parse_list, func=self.atomic_number)\n self.load_section_into_base('general', eval_funcs={\n 'all_elements':efunc, 'relevant_elements':efunc,\n 'gpus':eval, 'parallel':eval,\n 'verbose':eval},\n defaults={'device':'cuda', 'gpus':None, 'parallel':True, 'verbose':True})\n\n # in spite of configur's best intentions, we need\n # to see if it's even possible to run mult-gpus\n if self.device == 'cpu':\n self.parallel = False\n if self.parallel:\n ####### Perhaps moot with PyT 1.X's extenion of distributed to all systems #########\n import torch.distributed as dist\n if not dist.is_available():\n self.parallel = None\n self.gpus = 1\n ##########################################################\n else:\n import torch.cuda\n if self.gpus is None:\n self.gpus = torch.cuda.device_count()\n elif 0 < self.gpus < 1:\n self.gpus = round(self.gpus * torch.cuda.device_count())\n else:\n self.gpus = min(self.gpus, torch.cuda.device_count())\n self.parallel = (self.gpus > 1)\n else:\n self.gpus = 1\n\n # all elements\n assert len(self.all_elements) == len(set(self.all_elements)), \"duplicate element\"\n self.n_elements = len(self.all_elements)\n\n # elements to predict NMR shieldings for\n for e in self.relevant_elements:\n assert e in self.all_elements, f\"relevant element {e} not found in all_elements\"\n assert len(self.relevant_elements) == len(set(self.relevant_elements)), \"duplicate element\"\n\n # affine correction to database\n if \"affine_correction\" in self._parser:\n self.load_section('affine_correction', eval_func=eval)\n if self.affine_correction.correct:\n self.affine_correction = {self.atomic_number(title_case(e)) : v\n for e,v in self.affine_correction.items() if e != 'correct'}\n #for e in self.relevant_elements:\n # if e not in self.affine_correction:\n # self.affine_correction[e] = (1., 0.)\n else:\n self.affine_correction = None\n else:\n self.affine_correction = None\n \n # see reference training.ini for all the parameters in 'data'\n self.load_section('data', eval_func=eval, eval_funcs={'source':str, 'hdf5_filenames':glob},\n defaults={'randomize':True, 'get_from_start':False,\n 'multi_jiggle_data':True, 'data_status':1, #'jiggles_per_molecule':1,\n 'test_train_shuffle':None, 'batch_preload':0,\n 'data_size':None,\n 'train_size':None,\n 'test_size':None,\n 'held_back':0\n \n }, eval_error=False)\n #print(\"************ Data Size ***************\")\n #print(\" Before calculation\")\n #print(f\" data_size = {self.data.data_size}\")\n #print(f\" train_size = {self.data.train_size}\")\n #print(f\" test_size = {self.data.test_size}\")\n #print(f\" held_back = {self.data.held_back}\\n\")\n\n # how the raw data are stored\n if self.data.source.startswith(\"SQL\"):\n self.data.connect_params = self.load_section('connect_params')._mapping\n self.data.SQL_fetch_size = self.data.sql_fetch_size\n if self.data.source == \"SQL_0\":\n self.data.SQL_version = 0\n else:\n self.data.SQL_version = 1\n self.data.source = \"SQL\"\n \n if self.data.train_size is None:\n self.data.train_size = self.data.data_size - self.data.test_size - self.data.held_back\n else:\n self.data.data_size = self.data.train_size + self.data.test_size + self.data.held_back\n #print(\" After calculation\")\n #print(f\" data_size = {self.data.data_size}\")\n #print(f\" train_size = {self.data.train_size}\")\n #print(f\" test_size = {self.data.test_size}\")\n #print(f\" held_back = {self.data.held_back}\\n\")\n\n \n\n # training parameters\n self.load_section('training', eval_func=eval,\n eval_funcs={'save_prefix':str, 'time_limit':str_to_secs, 'run_name':str},\n defaults={'save_prefix':None, 'resume':False, 'epoch_limit':1000000000,\n 'example_limit':float('inf'), 'time_limit':float('inf'),\n 'run_name':None, 'use_wandb':False, 'use_tensor_constraint':False,\n 'train_dynamic':False, 'dynamic_ratio':1.,\n 'recent_checkpoints':1, 'best_checkpoints':2,\n 'jit_recompile':False})\n self.training.save_dir = str(os.path.split(self.training.save_prefix)[0])\n if self.training.run_name is None:\n self.training.run_name = str(os.path.split(self.training.save_prefix)[1])\n self.training.gpu_batch_size = max(round(self.training.batch_size / self.gpus), 1)\n self.training.batch_size = self.training.gpu_batch_size * self.gpus\n # For equivalent convergence, learning rate should scale with\n # effective batch size:\n self.training.learning_rate *= self.training.batch_size\n \n \n # wandb authentication\n self.load_section('wandb', eval_funcs={'interval':eval}, defaults={'interval':1})\n\n # model parameters\n self.load_section('model', eval_func=eval, eval_error=False)\n self.model.kwargs = self.model._mapping\n self.model.irreps_in = [ (self.n_elements, (0, 1)) ] # n_features, rank 0 tensor, even parity\n if self.training.use_tensor_constraint:\n import tensor_constraint\n self.model.irreps_out = tensor_constraint.Rs_out\n else:\n self.model.irreps_out = [ (1,(0,1)) ] # one output per atom, rank 0 tensor, even parity\n self.model.kwargs['irreps_in'] = self.model.irreps_in\n self.model.kwargs['irreps_out'] = self.model.irreps_out\n if 'num_neighbors' not in self.model.kwargs and 'n_norm' in self.model.kwargs:\n self.model.num_neighbors = self.model.n_norm\n self.model.kwargs['num_neighbors'] = self.model.n_norm\n self.max_radius = self.model.max_radius\n\n # exploration parameters\n self.load_section('exploration', eval_func=eval,\n eval_funcs={'seed':(lambda x : np.array(eval(x))),\n 'time_increment':str_to_secs,\n 'max_time':str_to_secs,\n 'run_dir':str, 'sample_file':str,\n 'time_schedule':lambda x : parse_list(x, func=str_to_secs),\n 'group_name':str},\n defaults={'seed':None, 'mode':1, 'creation_time':None, 'failed':0, 'max_epoch':None, 'max_example':None, 'max_time':None,\n 'time_increment':None, 'example_increment':None, 'epoch_increment':None,\n 'inactive':None, 'run_dir':None, 'sample_file':\"samples.torch\", 'stub':False,\n 'step':0, 'time_schedule':None, 'example_schedule':None, 'group_name':None,\n 'mul_coeff':1.0, 'sample_too_small':1.0, 'early_test_often':False, 'plot':False,\n 'check_delete':False})\n if self.exploration.creation_time is None:\n if os.path.isdir(self.training.save_dir):\n self.exploration.creation_time = os.path.getctime(self.training.save_dir)\n else:\n self.exploration.creation_time = time.time()\n if self.exploration.max_example is None and self.exploration.max_epoch is not None:\n self.exploration.max_example = int(self.exploration.max_epoch * self.data.training_size)\n if self.exploration.example_increment is None and self.exploration.epoch_increment is not None:\n self.exploration.example_increment = int(self.exploration.epoch_increment * self.data.training_size)\n if self.exploration.sample_file is not None and self.exploration.run_dir is not None:\n self.exploration.sample_file = os.path.join(self.exploration.run_dir, self.exploration.sample_file)\n if self.exploration.early_test_often:\n basic_i = 4\n ratio_i = self.training.testing_interval // basic_i\n powers = ratio_i.bit_length() - 1\n self.training.initial_testing_schedule = [basic_i * (2**p) for p in range(powers)]\n else:\n self.training.initial_testing_schedule = []\n\n\n \n \n def __str__(self, **kwargs): # include, exclude, sub_include, sub_exclude, align_all_sections, max_width):\n \"\"\"\n returns a mult-line string representing the configuration.\n \"\"\"\n\n # Separate out settings as func args, vs. settings as config (A fall-back)\n from types import SimpleNamespace\n X = SimpleNamespace(**self._config_config._mapping, **kwargs)\n \n # Figure out which sections and settings should be (in/ex)cluded\n if X.include is None:\n base = {k:v for k,v in self.base_level.items() if k not in X.exclude and k[0] != \"_\"}\n sections = {n:dict(s._mapping) for n,s in self.__dict__.items() if isinstance(s, ConfigSection) and n[0] != \"_\" and n not in X.exclude}\n else:\n base = {k:v for k,v in self.base_level.items() if k in X.include}\n sections = {n:dict(s._mapping) for n,s in self.__dict__.items() if isinstance(s, ConfigSection) and n in X.include}\n \n for n,sec in sections.items():\n if n in X.sub_exclude:\n sections[n] = {k:v for k,v in sec.items() if k not in X.sub_exclude[n] and k[0] != \"_\"}\n if n in X.sub_include:\n sections[n] = {k:v for k,v in sec.items() if k in X.sub_include[n]}\n \n # figure out width of printout\n wide_value = round(X.max_width/3)\n sec_key_width = {n:str_width(max(sec.keys(), key = len, default=0)) + 2 * X.indent for n,sec in sections.items()}\n sec_val_width = {n:min(str_width(max(sec.values(), key = lambda x : min(str_width(x), wide_value), default=0)), wide_value) + 2 * X.indent for n,sec in sections.items()}\n sec_key_width['base'] = str_width(max(base.keys(), key= len, default=0)) + X.indent\n sec_val_width['base'] = min(str_width(max(base.values(), key = lambda x : min(str_width(x), wide_value), default=0)), wide_value) + X.indent\n sec_total_width = {n:sec_key_width[n] + len(X.equiv) + sec_val_width[n] for n in sec_key_width}\n if X.align_all_sections:\n max_key_width = max(*sec_key_width.values())\n max_val_width = max(*sec_val_width.values())\n total_width = max_key_width + len(X.equiv) + max_val_width\n else:\n total_width = max(sec_total_width['base'], max(sec_total_width.values()))\n \n def pad_print(text,indent=0,buffer=1,filler=None,width=None,skew=.5):\n indent *= X.indent\n if width is None: width = total_width - 2 * indent\n if filler is None: filler = X.filler\n text = (buffer * ' ' + text + buffer * ' ')\n pad = (width - len(text)) // (2 * len(X.filler))\n pad = pad, width - len(text) - pad\n return indent * ' ' + pad[0] * filler + text + pad[1] * filler + \"\\n\"\n \n #def pad_print_squeeze(name,indent=0,buffer=1,filler=None,width=None,ratio=.5):\n # if width is None: width = round(ratio * sec_total_width[name] + (1-ratio) * total_width + indent * X.indent)\n # return pad_print(name,indent=indent,buffer=buffer,filler=filler,width=width)\n \n def print_section(name, indent=0, width=None, align=False, ratio=.5):\n if width is None:\n if align:\n width = total_width - indent * X.indent\n else:\n width = round((ratio * sec_total_width[name] + (1 - ratio) * total_width + indent))\n section = self.base_level if name=='base' else sections[name]\n indent *= X.indent\n if align:\n key_width = max_key_width - indent\n val_width = max_key_width - indent\n else: \n key_width = sec_key_width[name]\n val_width = sec_val_width[name]\n #assert sec_total_width[name] <= width - indent, \\\n # f\"Config representation is too wide ({sec_total_width[name]}) in section {name} with total width ({total_width}) and mixed width ({width}) and indent ({indent}).\"\n out = \"\"\n for key, value in section.items():\n out += indent * ' '\n out += (f\"{key:<{key_width}}\" if X.lr_just=='l' \\\n else f\"{key:>{key_width}}\") + X.equiv\n # break up long values into multi-lines\n value = to_str(value)\n place = max_key_width + len(X.equiv)\n while len(value) > wide_value:\n out += value[:wide_value] + \"\\n\" + place * \" \"\n value = value[wide_value:]\n out += value + \"\\n\"\n return out\n \n out = pad_print(X.title, 0, width = total_width)\n out += \"\\n\"\n out += print_section('base', 1, align=X.align_all_sections)\n \n for secname in sections:\n out += \"\\n\"\n out += pad_print(secname, 1, filler=X.sub_filler)\n out += \"\\n\"\n out += print_section(secname, 2, align=X.align_all_sections)\n \n return \"\\n\" + out\n \nclass SubDict:\n def __init__(self, sup_dict, sub_keys=set()):\n self.dict = sup_dict\n self.keys = set(sub_keys)\n \n def add_key(self, key):\n self.keys.add(key)\n \n def __len__(self):\n return len(self.keys)\n \n def __getitem__(self, key):\n if key not in self.keys:\n raise KeyError(f\"{key} does not exist in this SubDict.\")\n elif key not in self.dict:\n self.keys.remove(key)\n return self.dict[key]\n \n def __setitem__(self, key, value):\n self.keys.add(key)\n self.dict[key] = value\n \n def __delitem__(self, key):\n self.keys.remove(key)\n \n def __iter__(self):\n return iter(self.keys)\n \n def __contains__(self, key):\n return key in self.keys\n \n def update(self, items):\n if isinstance(items, dict):\n items = items.items()\n for key, value in items:\n self[key] = value\n \n def add_keys(self, keys):\n self.keys.update(keys)\n \n def items(self):\n def itr():\n keys = self.keys\n d = self.dict\n for k in keys:\n yield k, d[k]\n return itr()\n \n \n \n \n\n\ndef to_str(x):\n if isinstance(x, Number) and not isinstance(x, Integral):\n return str(f\"{x:.2f}\")\n else:\n return str(x)\n \ndef str_width(x):\n return len(to_str(x)) \n \nif __name__ == '__main__':\n config = Config()\n print(str(config)) \n \n \n \n \n \n \n \n \n\n\n # The only purpose of this section is to get rid of the red squiggly lines from\n # not \"defining\" parameters explicitly in this file. This function is not actually called.\n def _set_names(self):\n self.project = \"Solvent\"\n self.device = \"\"\n self.gpus = 1\n self.parallel = True\n\n self.all_elements = []\n self.relevant_elements = []\n\n self.symbols_numbers_dict = {}\n\n self.data = SECTION()\n self.data.source = \"\"\n self.data.hdf5_filenames = []\n \n self.data.table = \"\"\n self.data.data_status = 0\n self.data.structure = 0\n \n self.data.sql_fetch_size = 0\n self.data.SQL_fetch_size = 0\n self.data.SQL_version = 0\n\n self.data.test_size = 0\n self.data.train_size = 0\n self.data.data_size = 0\n self.data.test_train_shuffle = \"\"\n self.data.randomize = True\n self.data.get_from_start = False\n\n self.data.multi_jiggle_data = False\n self.data.jiggles_per_molecule = 1\n\n self.data.n_molecule_processors = 0\n self.data.molecule_queue_cap = 0\n self.data.example_queue_cap = 0\n self.data.batch_queue_cap = 0\n self.data.batch_preload = 0\n\n self.data.connect_params = {}\n\n self.connect_params = SECTION()\n self.connect_params.host = \"\"\n self.connect_params.user = \"\"\n self.connect_params.passwd = \"\"\n self.connect_params.db = \"\"\n \n self.wandb = SECTION()\n self.wandb.user = ''\n self.wandb.pswd = ''\n self.wandb.interval = 1\n\n self.model = SECTION()\n self.model.kwargs = {}\n self.model.max_radius = 0\n self.model.Rs_in = [ (0,0,0) ] # n_features, rank 0 tensor, even parity\n self.model.Rs_out = [ (0,0,0) ] # one output per atom, rank 0 tensor, even parity\n self.model.lmaxes = []\n self.model.muls = []\n\n self.training = SECTION()\n self.training.example_limit = None\n self.training.epoch_limit = None # number of epochs\n self.training.time_limit = None\n self.training.batch_size = 0 # minibatch sizes\n self.training.testing_interval = 0 # compute testing loss every n minibatches\n self.training.save_interval = 0 # save model every n minibatches\n self.training.save_prefix = \"\" # save checkpoints to files starting with this\n self.training.run_name = None\n self.training.learning_rate = 0 # learning rate\n self.training.resume = False\n self.training.recent_checkpoints = 1\n self.training.best_checkpoints = 1\n self.training.use_wandb = False\n self.training.use_tensor_constraint = False\n self.training.train_dynamic = False\n self.training.dynamic_ratio = 1.\n \n self.exploration = SECTION()\n self.exploration.seed = None\n self.exploration.max_epoch = None\n self.exploration.max_example = None\n self.exploration.max_time = None\n self.exploration.epoch_increment = None\n self.exploration.example_increment = None\n self.exploration.time_increment = None\n self.exploration.run_dir = None\n self.exploration.sample_file = None\n self.exploration.random_samples = 0\n self.exploration.try_schedule = []\n self.exploration.active_schedule = []\n self.exploration.time_schedule = []\n self.exploration.example_schedule = []\n self.exploration.step = 0\n self.exploration.stub = False\n self.exploration.group_name = None\n self.exploration.mul_coeff = 1\n \n self.active_runs = SECTION()\n self.abandoned_runs = SECTION()\n\n self.affine_correction = {}\n\n\n\n\n", "id": "7667242", "language": "Python", "matching_score": 6.215542793273926, "max_stars_count": 0, "path": "train/training_config.py" }, { "content": "if __name__ == '__main__': print(\"Loading numpy...\")\nimport numpy as np\nif __name__ == '__main__': print(\"Loading torch...\")\nimport torch\ntorch.set_default_dtype(torch.float64)\nif __name__ == '__main__': print(\"Loading torch.multiprocessing...\")\nimport torch.multiprocessing as mp\nif __name__ == '__main__': print(\"Loading torch.distributed...\")\nimport torch.distributed as dist\nif __name__ == '__main__': print(\"Loading DistributedDataParallel...\")\nfrom torch.nn.parallel import DistributedDataParallel\nif __name__ == '__main__': print(\"Loading e3nn...\")\nimport e3nn\nimport torch_geometric as tg\nif __name__ == '__main__': print(\"Loading time...\")\nimport time\nfrom collections.abc import Mapping\n#if __name__ == '__main__': print(\"Loading laurent...\")\n#from laurent import LaurentPolynomial\nif __name__ == '__main__': print(\"Loading variable_networks...\")\nfrom variable_networks import VariableParityNetwork\nif __name__ == '__main__': print(\"Loading diagnostics...\")\nfrom diagnostics import print_parameter_size, count_parameters, get_object_size\nif __name__ == '__main__': print(\"Loading history...\")\nfrom history import TrainTestHistory\nif __name__ == '__main__': print(\"Loading collections...\")\nfrom collections import deque\nif __name__ == '__main__': print(\"Loading copy...\")\nfrom copy import copy\nif __name__ == '__main__': print(\"Loading datetime...\")\nfrom datetime import timedelta\nif __name__ == '__main__': print(\"Loading re...\")\nimport re\nif __name__ == '__main__': print(\"Loading sys...\")\nimport sys\nif __name__ == '__main__': print(\"Loading os...\")\nimport os\nif __name__ == '__main__': print(\"Loading math...\")\nimport math\nif __name__ == '__main__': print(\"Loading traceback...\")\nimport traceback\nif __name__ == '__main__': print(\"Loading training_config...\")\nfrom training_config import Config\nif __name__ == '__main__': print(\"Loading training_utils...\")\nfrom training_utils import train_batch, train_dynamic_batch, batch_examples\nif __name__ == '__main__': print(\"Loading pipeline...\", flush=True)\nfrom pipeline import Pipeline, Molecule, test_data_neighbors, generate_multi_jiggles_set\n\nfrom model_utils import get_latest_checkpoint, newest_checkpoints, put_model_to_device, get_optimizer, save_checkpoint, cull_checkpoints\nfrom shuffle import Shuffle\nimport exit_codes\n\nif __name__ == '__main__':\n print(\"done loading modules.\", flush=True)\n\nif os.name == 'posix' and __name__ == '__main__':\n print(\"Setting up multiprocess resources...\", flush=True)\n mp.set_start_method('spawn')\n print(\"Loading resource\", flush=True)\n import resource\n print(\"Getting RLIMIT\", flush=True)\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n print(\n f\"\\nPreviously: maximum # of open file descriptors: {rlimit[0]} (soft limit) / {rlimit[1]} (hard limit)\")\n resource.setrlimit(resource.RLIMIT_NOFILE, (100000, rlimit[1]))\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n print(\n f\"\\nNow: maximum # of open file descriptors: {rlimit[0]} (soft limit) / {rlimit[1]} (hard limit)\")\n \n\nclass Trainer:\n \n def __init__(self, config_files=[], use_training_ini=True, start=True, verbose=None):\n \n # load config\n self.config = Config(*config_files, settings=None, use_training_ini=use_training_ini)\n verbose = self.config.verbose if verbose is None else verbose\n \n self.check_cuda(verbose=verbose)\n \n print(self.config)\n #if not os.path.isdir(self.config.training.save_dir):\n os.makedirs(self.config.training.save_dir, exist_ok=True)\n \n self.model = VariableParityNetwork(**self.config.model.kwargs) #get_untrained_model(self.config.model.kwargs, self.config.training.save_prefix)\n self.model.to(self.config.device)\n\n\n # determines whether we should, or can, resume a run\n self.resume = self.config.training.resume and self.config.training.save_prefix\n\n # Now that we know all_elements, and their cannonical order\n # (on which saved checkpoints are the authority) we can create\n # the one_hot_table for the Molecule class\n Molecule.initialize_one_hot_table(self.config.all_elements)\n \n # Try loading the test train shuffle indices from a file:\n self.shuffle = Shuffle.get_shuffle(self.config, verbose=verbose)\n \n # Find some way of alerting user to the possibility that a shuffle\n # will be overwritten\n \n ### set up molecule pipeline ###\n print(\"\\n=== Starting molecule pipeline ===\\n\")\n print(\"Working...\", end='\\r', flush=True)\n self.pipeline = Pipeline(self.config)\n \n # load test set before starting training\n self.read_test_set(verbose=verbose)\n \n if self.config.training.use_wandb:\n self.init_wandb(verbose=verbose)\n else:\n self.wandb_log = None\n \n self.init_history(verbose=verbose)\n\n if self.resume:\n self.optimizer_state = self.load_model(verbose=verbose)\n else:\n self.optimizer_state = None\n\n if self.config.parallel:\n self.spawn_gpu_processes(verbose=verbose)\n \n put_model_to_device(self.model, self.config.device)\n \n self.optimizer = get_optimizer(self.model, self.config.training.learning_rate, self.optimizer_state)\n \n if os.name == 'nt':\n self.set_abort_listener()\n \n if start: \n self.train(verbose=verbose) \n \n \n def check_cuda(self, verbose = True):\n \"\"\"\n Runs through some basic CUDA functions.\n Crucially, determines based on available GPUs (and\n user preference) whether we will run parallel.\n \"\"\"\n if self.config.device == \"cuda\":\n self.config.device = \"cuda:0\"\n if verbose:\n print(\"\\n=== GPU settings: ===\\n\")\n print(f\"current cuda device: {torch.cuda.current_device()}\")\n print(f\"cuda device count: {torch.cuda.device_count()}\")\n print(f\"cuda device name: {torch.cuda.get_device_name(0)}\")\n print(f\"is cuda available? {torch.cuda.is_available()}\")\n print(f\"cuda version: {torch.version.cuda}\")\n print(f\"device: {self.config.device}\") \n\n def load_model(self, verbose=True):\n \"\"\"\n Loads a model from checkpoint if available (and if self.resume==True),\n storing model state in self.model.\n Initializes self.all_elements in canonical order.\n Returns an ADAM optimizer state if available from checkpoint, else None\n \"\"\"\n \n try:\n model_files = newest_checkpoints(self.config.training.save_prefix,1)\n if len(model_files) == 0:\n #print(f\"No checkpoints matching '{save_prefix + '-*-checkpoint.torch'}'!\")\n #if input(\"Restart training, OVERWRITING any previous training history? (y/n) \").lower().strip() != 'y':\n # os._exit()\n self.resume = False\n return None\n else:\n model_filename = model_files[0]\n if verbose: print(f\"Loading model state from {model_filename}... \")\n model_file = torch.load(model_filename)\n\n assert self.config.model.kwargs == model_file[\"model_kwargs\"], \\\n \"Loaded model hyperparameters don't match config file.\" \n assert set(self.config.all_elements) == set(model_file['all_elements']), \"Loaded model elements and config elements don't match!\"\n \n self.model.load_state_dict(model_file[\"state_dict\"])\n self.config.all_elements = model_file['all_elements']\n return model_file[\"optimizer_state_dict\"]\n \n except RuntimeError:\n print(\"***** Failed to load model sucessfully *****\")\n #if input(\"Restart training, OVERWRITING any previous training history? (y/n) \").lower().strip() != 'y':\n # os._exit() \n raise\n \n def read_test_set(self, verbose=True):\n print(\"\\n=== Loading Test Set ===\\n\")\n time1 = time.time()\n self.pipeline.set_indices(self.shuffle.test_set)\n self.pipeline.start_reading(self.config.data.test_size, batch_size=self.config.training.gpu_batch_size)\n\n # read in and process testing data directly to memory\n self.test_batches = []\n\n if verbose: print(\"Loading test batches...\")\n while self.pipeline.any_coming():\n try:\n batch = self.pipeline.get_batch(20)\n except Exception as e:\n print(f\"Failed to get batch! {len(self.test_batches)}\")\n print(e)\n os._exit()\n self.test_batches.append(batch)\n \n time2 = time.time()\n if verbose: print(f\"Done loading test set. That took {time2-time1:.3f} s.\\n\")\n\n def spawn_gpu_processes(self, verbose=True):\n \"\"\"\n This will spawn (#gpus - 1) extra processes, on the\n aux_train\n \n \n \"\"\"\n \n self.gpu_kill_switch = mp.Lock()\n self.gpu_kill_switch.acquire()\n self.main_kill_switch = mp.Lock()\n \n if verbose: print(f\"Spawning {self.config.gpus-1} extra processes...\", flush=True)\n self.worker_pool = mp.spawn(aux_train, \n (self.config.gpus, self.pipeline,\n self.main_kill_switch,\n self.gpu_kill_switch,\n self.config.training.learning_rate,\n self.model,\n self.optimizer_state,\n self.config.training.train_dynamic, self.config.training.dynamic_ratio,\n self.config.data.batch_preload,\n self.config.training.jit_recompile), self.config.gpus-1, join=False)\n \n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '12355' \n if verbose: print(\"Initializing process group...\")\n dist.init_process_group(backend=\"nccl\", rank=0, world_size=self.config.gpus) \n if verbose: print(\"Building DistributedDataParallel model... \", flush=True)\n torch.cuda.set_device(0)\n self.model = DistributedDataParallel(self.model, device_ids=[0], output_device=0)\n dist.barrier()\n \n def init_wandb(self, verbose=True):\n \"\"\"\n Initializes WandB (weights-and-biases) logging\n \"\"\"\n import wandb\n wandb.login(key=self.config.wandb.pswd)\n \n from configparser import ConfigParser\n wb_cfg = ConfigParser()\n wb_cfg_filename = os.path.join(self.config.training.save_dir, \"wandb.ini\")\n wb_cfg.read(wb_cfg_filename)\n if 'wandb' not in wb_cfg:\n wb_cfg.add_section('wandb')\n if not self.resume or 'id' not in wb_cfg['wandb']:\n wb_cfg['wandb']['id'] = wandb.util.generate_id()\n with open(wb_cfg_filename, 'w') as wb_configfile:\n wb_cfg.write(wb_configfile)\n \n self.wandb_run = wandb.init(name=self.config.training.run_name, group=self.config.exploration.group_name, project=self.config.project, dir=self.config.training.save_dir, resume='allow', id=wb_cfg['wandb']['id'])\n\n wandb.config.batch_size = self.config.training.batch_size\n wandb.config.learning_rate = self.config.training.learning_rate\n wandb.config.muls = self.config.model.muls\n wandb.config.lmaxes = self.config.model.lmaxes\n wandb.config.max_radius = self.config.model.max_radius\n wandb.config.number_of_basis = self.config.model.number_of_basis\n wandb.config.radial_h = self.config.model.radial_h\n wandb.config.radial_layers = self.config.model.radial_layers\n wandb.config.n_norm = self.config.model.n_norm\n wandb.config.batch_norm = self.config.model.batch_norm\n wandb.config.batch_norm_momentum = self.config.model.batch_norm_momentum\n wandb.config.use_tensor_constraint = self.config.training.use_tensor_constraint\n wandb.config.train_dynamic = self.config.training.train_dynamic\n wandb.config.dynamic_ratio = self.config.training.dynamic_ratio\n\n self.wandb_log = wandb.log\n\n def init_history(self, verbose=True):\n \"\"\"\n Either creates or loads a TrainTestHistory object\n \"\"\"\n resume_history = self.resume\n if resume_history:\n try:\n self.history = TrainTestHistory(\n self.test_batches, examples_per_epoch=self.config.data.train_size,\n device=self.config.device, save_prefix=self.config.training.save_prefix, wandb_log=self.wandb_log,\n wandb_interval=self.config.wandb.interval, hdf5=True, load=True,\n relevant_elements=self.config.relevant_elements, train_dynamic=self.config.training.train_dynamic)\n except Exception as e:\n print(\n f\"Failed to load history from {self.config.training.save_prefix + '-history.torch'}\")\n print(e)\n traceback.print_tb(e.__traceback__)\n if input(\"Continue training with old model but new training history? (y/n) \").lower().strip() != 'y':\n print(\"Exiting...\")\n self.pipeline.close()\n os._exit()\n resume_history = False\n if not resume_history:\n self.history = TrainTestHistory(\n self.test_batches, device=self.config.device, examples_per_epoch=self.config.data.train_size,\n relevant_elements=self.config.relevant_elements, run_name=self.config.training.run_name,\n train_dynamic=self.config.training.train_dynamic,\n wandb_log=self.wandb_log, wandb_interval=self.config.wandb.interval,\n save_prefix=self.config.training.save_prefix, hdf5=True, load=False)\n self.epoch = self.history.train.current_epoch()\n self.example_in_epoch = self.history.train.next_example_in_epoch()\n self.example = self.history.train.example[-1]\n self.elapsed_time = self.history.train.elapsed_time[-1]\n self.batch_in_epoch = self.history.train.next_batch_in_epoch()\n if verbose and resume_history:\n print(\"Resuming from prior training...\")\n print(f\" epoch = {self.epoch}\")\n print(f\" example_in_epoch = {self.example_in_epoch}\")\n print(f\" batch_in_epoch = {self.batch_in_epoch}\")\n\n\n def set_abort_listener(self):\n # keyboard abort code\n # press q to abort after current training iteration\n from pynput import keyboard\n from threading import Semaphore\n self.abort_lock = Semaphore(0)\n def invoke_abort():\n self.abort_lock.release()\n hotkey = keyboard.HotKey(keyboard.HotKey.parse('q'), invoke_abort)\n self.listener = keyboard.Listener(on_press=hotkey.press, on_release=hotkey.release)\n self.listener.start()\n \n def train(self, verbose=True):\n self.pipeline.set_indices(self.shuffle.train_set)\n self.pipeline.set_shuffle(True)\n\n start_elapsed = self.elapsed_time\n\n data_queue = deque(maxlen=self.config.data.batch_preload)\n \n def make_test_schedule(interval, schedule=[], example=0):\n last = 1\n if len(schedule) and example < schedule[-1]:\n last = 1\n for e in schedule:\n last = e\n if example < e:\n yield e\n last += (example // interval) * interval\n while True:\n last += interval\n yield last\n\n # ramp up testing intervals at beginning?\n test_schedule = make_test_schedule(\n self.config.training.testing_interval,\n self.config.training.initial_testing_schedule,\n self.example\n )\n \n next_test_example = next(test_schedule)\n \n finish_training = False\n exit_message = \"No exit message provided.\"\n exit_code = 0\n \n kill_file = os.path.join(self.config.training.save_dir, \"kill.file\")\n\n # Avoid protracted JIT-ing to no eventual benefit\n if self.config.training.jit_recompile < float('inf'):\n torch._C._jit_set_bailout_depth(self.config.training.jit_recompile)\n\n\n # catching runtime errors in auxiliary threads and reraising them:\n try:\n # looping over epochs\n while True:\n print(\"\\n\" + (\"Resuming\" if self.example_in_epoch > 0 else \"Starting\") +\n f\" epoch {self.epoch}...\", end=\"\\r\", flush=True)\n\n # though we may start partway through an epoch, subsequent epochs start at example 0 and batch 1\n\n example_of_last_test = (self.example // self.config.training.testing_interval) * self.config.training.testing_interval\n example_of_last_save = (self.example // self.config.training.save_interval) * self.config.training.save_interval\n\n # start reading at example_in_epoch\n self.pipeline.scan_to(self.example_in_epoch)\n self.pipeline.start_reading(self.config.data.train_size - self.example_in_epoch, batch_size=self.config.training.gpu_batch_size)\n\n # loop while any batches are still to come in this epoch\n while self.pipeline.any_coming() or len(data_queue) > 0:\n\n time0 = time.time()\n while len(data_queue) < self.config.data.batch_preload and self.pipeline.any_coming():\n data = self.pipeline.get_batch().to(self.config.device)\n data_queue.appendleft(data)\n time1 = time.time()\n t_wait = time1-time0\n \n try:\n if self.config.training.train_dynamic:\n loss, dynamic_loss = train_dynamic_batch(\n data_queue, self.model, self.optimizer, self.config.training.dynamic_ratio)\n else:\n loss = train_batch(data_queue, self.model, self.optimizer)\n dynamic_loss = None\n except RuntimeError as e:\n print(e)\n if 'CUDA' in e.args[0]:\n self.finish(3)\n else:\n raise\n time2 = time.time()\n train_time = time2 - time1\n self.elapsed_time += train_time\n\n n_examples = min(self.config.training.batch_size, self.config.data.train_size - self.example_in_epoch)\n self.example_in_epoch += n_examples\n self.example += n_examples\n self.batch_in_epoch += 1\n \n self.history.log_batch(\n train_time, t_wait, n_examples, len(data.x) * self.config.gpus, self.elapsed_time,\n self.example_in_epoch, self.example, loss, dynamic_loss, epoch=self.epoch)\n \n test = False\n\n if self.elapsed_time >= self.config.training.time_limit:\n finish_training = True\n test = True\n exit_message = f\"Finished after {str(timedelta(seconds=self.elapsed_time - start_elapsed))[:-5]} elapsed training time.\"\n \n if self.example >= self.config.training.example_limit:\n finish_training = True \n test = True\n exit_message = f\"Finished after training {self.example} examples, or {(self.example/self.config.data.train_size):.2f} epochs.\"\n \n if test or self.example >= next_test_example or not self.pipeline.any_coming():\n test_loss = self.history.run_test(self.model)\n next_test_example = next(test_schedule)\n else:\n test_loss = None\n\n if hasattr(self, 'abort_lock') and self.abort_lock.acquire(blocking=False):\n flush_input()\n if input(\"\\nAbort? (y/n) \").lower().strip() == 'y':\n finish_training = True\n exit_code = exit_codes.ABORT # Abort exploration etc.\n exit_message = \"Aborting training...\"\n if os.path.isfile(kill_file):\n os.remove(kill_file)\n finish_training = True\n exit_code = exit_codes.ABORT # Abort exploration etc.\n exit_message = \"Aborting training...\"\n\n if self.example - example_of_last_save >= self.config.training.save_interval \\\n or not self.pipeline.any_coming() or finish_training \\\n or test_loss is not None:\n example_of_last_save = self.example\n save_checkpoint(self.config.training.save_prefix,\n self.epoch, self.batch_in_epoch,\n self.model, self.config.model.kwargs,\n self.optimizer,\n self.config.all_elements, test_loss)\n self.history.save()\n if self.config.training.recent_checkpoints:\n cull_checkpoints(self.config.training.save_prefix,\n self.config.training.recent_checkpoints,\n self.config.training.best_checkpoints)\n\n #if self.config.parallel\n # if not self.main_kill_switch.acquire(block=False):\n # self.main_kill_switch.release()\n # exit_message = \"One of the auxiliary threads crashed\"\n # self.finish(exit_code=exit_code, verbose=verbose)\n # self.main_kill_switch.release()\n \n if finish_training:\n print(\"\\n\" + exit_message)\n self.finish(exit_code=exit_code, verbose=verbose)\n \n t_other = time.time() - time2\n #print(f\"oth={t_other:.4f} \", end=\"\")\n \n self.epoch += 1\n self.example_in_epoch = 0\n self.batch_in_epoch = 1\n \n if self.epoch > self.config.training.epoch_limit:\n print(f\"\\nFinished training {self.config.training.epoch_limit} epochs.\")\n self.finish() \n \n except Exception as e:\n print(\"Main process crashed with error:\")\n print(f\" {e}\")\n print(\"Traceback:\")\n traceback.print_tb(e.__traceback__)\n print(\"Aborting training...\")\n self.finish(3)\n \n\n def finish(self, exit_code=0, verbose=True):\n if verbose: print(\"Cleaning up...\")\n if self.config.parallel:\n self.gpu_kill_switch.release()\n self.history.close()\n if hasattr(self, 'listener'):\n self.listener.stop()\n if verbose: print(\"Closing pipeline...\")\n self.pipeline.close()\n if self.config.training.use_wandb:\n if verbose: print(\"Finishing WandB...\")\n self.wandb_run.finish()\n if verbose: print(f\"Exiting with exit code {exit_code}.\")\n os._exit(exit_code) \n \n\n# auxiliary training processes (not the main one)\ndef aux_train(rank, world_size, pipeline, main_kill_switch, gpu_kill_switch, learning_rate,\n model, #model_kwargs, model_state_dict=None,\n optimizer_state_dict=None,\n train_dynamic=False, dynamic_ratio=1.,\n preload=1, jit_recompile=2):\n try:\n rank += 1 # We already have a \"main\" process which should take rank 0\n print(f\"GPU rank {rank} reporting for duty.\")\n\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '12355'\n dist.init_process_group(backend=\"nccl\", rank=rank, world_size=world_size)#, group_name=\"train\")\n\n device = f\"cuda:{rank}\"\n torch.cuda.set_device(rank)\n \n #use_tensor_constraint = False if model_kwargs['Rs_out'] == [(1,0,1)] else True\n\n #model = VariableParityNetwork(**model_kwargs)\n #if model_state_dict is not None:\n # model.load_state_dict(model_state_dict)\n model.to(device)\n model = DistributedDataParallel(model, device_ids=[rank], output_device=rank)\n\n dist.barrier()\n \n if jit_recompile < float('inf'):\n torch._C._jit_set_bailout_depth(jit_recompile)\n\n optimizer = torch.optim.Adam(model.parameters(), learning_rate)\n if optimizer_state_dict is not None:\n optimizer.load_state_dict(optimizer_state_dict)\n\n verbose = False #True if rank==1 else False\n \n if verbose: print(f\"[{rank}]: pipeline.batch_queue = {pipeline.batch_queue}\", flush=True)\n\n data_queue = deque(maxlen=preload)\n while True:\n time1 = time.time()\n while (len(data_queue) < preload and pipeline.any_coming()) or len(data_queue)==0:\n data = pipeline.get_batch().to(device)\n data_queue.appendleft(data)\n wait_time = time.time() - time1\n\n if train_dynamic:\n _, _ = train_dynamic_batch(\n data_queue, model, optimizer, dynamic_ratio)\n else:\n _ = train_batch(data_queue, model, optimizer)\n \n if gpu_kill_switch.acquire(False):\n return\n \n except Exception as e:\n print(f\"Auxiliary process {rank} crashed with error:\")\n print(f\" {e}\")\n print(f\"Parent process: {mp.parent_process()}\")\n print(\"Traceback:\")\n traceback.print_tb(e.__traceback__)\n print(f\"Exiting with exit code {3 + rank}\")\n main_kill_switch.acquire()\n #pipeline.close()\n #os.kill(mp.parent_process(), 3 + rank)\n #os._exit(3 + rank)\n\ndef flush_input():\n try:\n import msvcrt\n while msvcrt.kbhit():\n msvcrt.getch()\n except ImportError:\n pass\n #import sys, termios #for linux/unix\n #termios.tcflush(sys.stdin, termios.TCIOFLUSH) \n\n\nif __name__ == '__main__':\n mp.freeze_support()\n trainer = Trainer()\n", "id": "10622560", "language": "Python", "matching_score": 5.915186405181885, "max_stars_count": 0, "path": "train/training.py" }, { "content": "\nfrom glob import glob\nimport os\nimport torch\nfrom variable_networks import VariableParityNetwork\n\ndef get_latest_checkpoint(save_prefix, model=None, all_elements=None, model_kwargs=None, verbose=True):\n \"\"\"\n Gets the latest checkpoint file, and returns it as a model, if possible.\n Args:\n save_prefix: valid checkpoints should have names\n save_prefix + \"-*-checkpoint.torch\"\n model: model to fill with checkpoint. If None,\n will create one with get_untrained_model(...)\n all_elements: sequence of elements from config file, to check\n against loaded checkpoint. If contents are the same,\n but order differs, uses order from checkpoint, for\n training consistency\n model_kwargs: model args from the config file, to check against\n loaded checkpoint\n \"\"\" \n files = newest_checkpoints(save_prefix)\n if len(files) == 0:\n #print(f\"No checkpoints matching '{save_prefix + '-*-checkpoint.torch'}'!\")\n #if input(\"Restart training, OVERWRITING any previous training history? (y/n) \").lower().strip() != 'y':\n # os._exit()\n return None\n model_filename = files[0]\n if verbose: print(f\"Loading model from {model_filename}... \")\n model_dict = torch.load(model_filename)\n \n assert model_kwargs and model_kwargs == model_dict[\"model_kwargs\"], \\\n \"Loaded model hyperparameters don't match config file.\"\n if all_elements:\n assert set(all_elements) == set(model_dict['all_elements']), \"Loaded model elements and config elements don't match!\"\n if model is None:\n model = get_untrained_model(model_kwargs, save_prefix)\n model.all_elements = model_dict['all_elements'] \n model.load_state_dict(model_dict[\"state_dict\"])\n return model\n \ndef get_untrained_model(model_kwargs=None, save_prefix=None, save=False):\n \"\"\"\n Loads or creates an untrained model.\n Must provide either a save-prefix to a valid model file,\n or some model kwargs.\n Preferentially loads an existing untrained model, over building one.\n \"\"\"\n if save_prefix is not None:\n save_file = os.path.join(os.path.dirname(save_prefix), \"model.pt\")\n if save_prefix and os.path.isfile(save_file):\n model = torch.load(save_file)\n elif model_kwargs:\n model = VariableParityNetwork(**model_kwargs)\n else:\n raise ValueError(\"You need to give either model_kwargs, or a save-prefix to a valid untrained model.\") \n if save:\n torch.save(model, save_file)\n return model\n\ndef put_model_to_device(model, device='gpu', kill=True, error_code=3):\n \"\"\"\n Tries moving model to device\n Args:\n model: model to put to GPU (other other device)\n device: send model to this device, defult \"gpu\"\n kill: whether to exit program on CUDA exception,\n or just return error code.\n error_code: to return if CUDA exception\n \"\"\"\n try:\n model.to(device)\n except RuntimeError as e:\n if 'CUDA' in e.args[0]:\n print(f\"Model couldn't fit in CUDA memory!\")\n print(e.args[0])\n traceback.print_tb(e.__traceback__)\n if kill:\n os._exit(error_code)\n else:\n return error_code\n else:\n raise\n return None\n \ndef get_optimizer(model, learning_rate, optimizer_state_dict=None, verbose=False):\n \"\"\"\n Returns an ADAM optimizer attached to the given model\n (and stored on the same device).\n If optimizer_state_dict is not None, it will fill in the optimizer state.\n However, learning_rate will override and saved optimizer state (to allow\n you to adjust during training.)\n \n \"\"\"\n if verbose: print(\"Building optimizer... \", end='')\n optimizer = torch.optim.Adam(model.parameters(), learning_rate)\n if optimizer_state_dict:\n optimizer.load_state_dict(optimizer_state_dict)\n # allow for change in learning rate:\n for param_group in optimizer.param_groups:\n param_group['lr'] = learning_rate\n if verbose: print(\"Done.\")\n return optimizer\n\n \n# in python 3.9+ this is a built-in function, but python 3.9 is rare in the wild right now:\ndef remove_prefix(s, prefix):\n if s.startswith(prefix):\n return s[len(prefix):]\n return s\n\ndef save_checkpoint(save_prefix, epoch, batch, model, model_kwargs, optimizer, all_elements, test_loss, verbose=True):\n \"saves a model and optimizer to disk\"\n filename = save_prefix\n if test_loss is not None:\n filename += f\"-a{test_loss:.3f}\"\n filename += f\"-e{epoch:03d}-b{batch:05}-checkpoint.torch\"\n model_dict = {\n 'state_dict' : {remove_prefix(key, \"module.\"):value for key,value in model.state_dict().items()},\n 'model_kwargs' : model_kwargs,\n 'optimizer_state_dict' : optimizer.state_dict(),\n 'all_elements' : all_elements\n }\n if verbose: print(f\"\\nSaving model to {filename}... \", end='')\n torch.save(model_dict, filename)\n file_size = os.path.getsize(filename) / 1E6\n if verbose: print(f\"occupies {file_size:.2f} MB.\")\n \ndef checkpoint_test_loss(filename):\n \"\"\"\n Reads the checkpoint test loss off of the filename\n and returns it. Returns float('inf') if not possible\n \"\"\"\n par = filename.split('-')\n if len(par) < 5:\n return float('inf')\n try:\n loss = par[-4][1:]\n loss = float(loss)\n except:\n return float('inf')\n return loss\n\ndef best_checkpoints(save_prefix, number):\n \"\"\"\n Returns a sorted list of the best\n checkpoints, ascending on test_loss\n\n Args:\n save_prefix: valid checkpoints should have names\n save_prefix + \"-*-checkpoint.torch\"\n number: return at most this many filenames\n \"\"\"\n return sorted(glob(save_prefix + \"-*-checkpoint.torch\"), key=checkpoint_test_loss)[:number]\n \ndef newest_checkpoints(save_prefix, number=1, exclude=set()):\n \"\"\"\n Returns a sorted list of the {number} newest\n checkpoints, or fewer if fewer exist.\n Newest first.\n \n Args:\n save_prefix: valid checkpoints should have names\n save_prefix + \"-*-checkpoint.torch\"\n number: return at most this many filenames\n \"\"\"\n newest = sorted(glob(save_prefix + \"-*-checkpoint.torch\"), key = os.path.getmtime, reverse=True)\n return [f for f in newest if f not in exclude][:number]\n \n \ndef newest_checkpoint(save_prefix):\n l = newest_checkpoints(save_prefix, number=1)\n if len(l) == 0: return None\n else: return l[0]\n\ndef cull_checkpoints(save_prefix, newest, best):\n \"\"\"\n Deletes those checkpoint files of the form\n {save_prefix + \"-*-checkpoint.torch\"}\n which are neither among the {newest} newest\n nor the {best} best.\n \"\"\"\n best = best_checkpoints(save_prefix, best)\n newest = newest_checkpoints(save_prefix, newest, exclude=best)\n \n for f in glob(save_prefix + \"-*-checkpoint.torch\"):\n if (f not in newest) and (f not in best):\n os.remove(f)\n \n \n\n\n", "id": "2896720", "language": "Python", "matching_score": 1.1718003749847412, "max_stars_count": 0, "path": "train/model_utils.py" }, { "content": "from filelock import FileLock, Timeout\r\nimport torch\r\n\r\nclass CUDAallocator:\r\n \r\n def __init__(self, num_gpus=1, min_gpus=None):\r\n if not torch.cuda.is_available():\r\n raise RuntimeError(\"CUDA is not available on this machine.\")\r\n self.count = torch.cuda.device_count()\r\n print(f\"CUDA device count: {self.count}\")\r\n self.num_gpus = num_gpus\r\n self.min_gpus = min_gpus\r\n self.locks = [None] * self.count\r\n self.gpus = []\r\n \r\n \r\n def acquire_gpu(self):\r\n for i in range(self.count):\r\n try:\r\n self.locks[i] = FileLock(f\"cuda{i}.lock\", timeout=0)\r\n self.locks[i].acquire()\r\n self.gpus.append(i)\r\n self.gpus.sort()\r\n return i\r\n except Timeout:\r\n continue\r\n else:\r\n raise ValueError(\"No free CUDA devices!\")\r\n \r\n def release_gpu(self, i):\r\n self.locks[i].release()\r\n self.locks[i] = None\r\n self.gpus.remove(i)\r\n \r\n def get_gpus(self, num_gpus=None, min_gpus=None):\r\n if num_gpus is None:\r\n num_gpus = self.num_gpus\r\n if min_gpus is None:\r\n min_gpus = self.min_gpus\r\n if min_gpus is None:\r\n min_gpus = num_gpus\r\n \r\n gpus=[]\r\n for j in range(self.num_gpus):\r\n try:\r\n i = self.acquire_gpu()\r\n gpus.append(i)\r\n except:\r\n if len(gpus) >= min_gpus:\r\n break\r\n else:\r\n raise\r\n \r\n return gpus\r\n \r\n def clear_gpus(self):\r\n for i in list(self.gpus):\r\n self.release_gpu(i)\r\n\r\n\r\n", "id": "11125483", "language": "Python", "matching_score": 1.4785128831863403, "max_stars_count": 0, "path": "train/cuda_allocator.py" }, { "content": "from filelock import FileLock, Timeout\r\nimport os\r\nimport time\r\n\r\nclass ProcessFileLock(FileLock):\r\n \"\"\"\r\n FileLock that is unique per path in each process (for, eg., reentrance)\r\n \"\"\"\r\n locks = {}\r\n def __new__(cls, path, *args, **kwargs):\r\n if path in ProcessFileLock.locks:\r\n return ProcessFileLock.locks[path]\r\n else:\r\n lock = super().__new__(cls, path, *args, **kwargs)\r\n lock.__new_init__(path, *args, **kwargs)\r\n ProcessFileLock.locks[path] = lock\r\n return lock\r\n \r\n def __new_init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n \r\n def __init__(self, *args, **kwargs):\r\n pass\r\n\r\nclass ExplosiveFileLock(ProcessFileLock):\r\n def acquire(self, *args, **kwargs):\r\n r = super().acquire(*args, **kwargs)\r\n if self._lock_counter > 1:\r\n raise BlockingIOError(f\"Process attempted to reacquire lock for {self._lock_file}\")\r\n return r\r\n \r\nclass HistoriesLock(FileLock):\r\n def __init__(self, dir, ensemble=None):\r\n super().__init__(os.path.join(dir, \"histories.lock\"))\r\n self.ensemble = ensemble\r\n\r\n def release(self, **kwargs):\r\n super().release()\r\n if self.ensemble and self._lock_counter == 0:\r\n self.ensemble.close_histories()\r\n\r\nclass SamplesLock(FileLock): \r\n def __init__(self, dir, ensemble=None):\r\n super().__init__(os.path.join(dir, \"samples.lock\"))\r\n self.ensemble = ensemble\r\n\r\n def release(self, **kwargs):\r\n if self.ensemble and self._lock_counter == 1:\r\n self.ensemble._test_samples.close()\r\n self.ensemble._test_samples = None\r\n super().release()\r\n \r\n def __enter__(self):\r\n print(\"Acquiring samples lock... \", end='')\r\n super().__enter__()\r\n if self.ensemble._test_samples is None:\r\n from sample_hyperparameters import TrainableSampleGenerator\r\n self.ensemble._test_samples = TrainableSampleGenerator(self.ensemble.config.exploration.sample_file, configs=self.ensemble.config_files, stub=self.ensemble.stub)\r\n print(\"Done.\")\r\n return self.ensemble._test_samples\r\n\r\n \r\nclass ExistLock:\r\n \"\"\"\r\n Locks on the existence of the given file.\r\n No guarantees of atomicity!\r\n Unique per process, for reentry\r\n \"\"\"\r\n locks={}\r\n def __new__(cls, path, *args, **kwargs):\r\n if path in ExistLock.locks:\r\n lock = ExistLock.locks[path]\r\n #print(f\"Reloading ExistLock('{path}')\")\r\n #print(f\" Lock counter = {lock._lock_counter}\")\r\n return lock\r\n else:\r\n #print(f\"Creating new ExistLock('{path}')\")\r\n lock = super().__new__(cls)\r\n lock.__new_init__(path, *args, **kwargs)\r\n ExistLock.locks[path] = lock\r\n return lock\r\n \r\n def __new_init__(self, path, block=True, timeout=None, polling_interval=.05):\r\n self.path = path\r\n if not block:\r\n timeout == 0.0\r\n else:\r\n self.timeout=timeout\r\n self.polling_interval=polling_interval\r\n self._lock_counter = 0\r\n \r\n def acquire(self, block=None, timeout=None):\r\n \"\"\"\r\n Not atomic. Should probably happen within the context of an\r\n atomic lock.\r\n \"\"\"\r\n if block == False:\r\n timeout = 0.0\r\n if timeout is None:\r\n timeout = self.timeout\r\n \r\n #print(f\"Trying to acquire ExistLock('{self.path}')...\")\r\n #print(f\" Lock counter = {self._lock_counter}\")\r\n\r\n start_time = time.time()\r\n while os.path.isfile(self.path):\r\n if self._lock_counter > 0:\r\n self._lock_counter += 1\r\n #print(f\"Acquired, lock counter = {self._lock_counter}\")\r\n return True\r\n if timeout is None or time.time() - start_time < timeout:\r\n time.sleep(self.polling_interval)\r\n else:\r\n return False\r\n \r\n with open(self.path, 'w'):\r\n self._lock_counter = 1\r\n #print(f\"Acquired, lock counter = {self._lock_counter}\")\r\n return True\r\n \r\n def release(self):\r\n self._lock_counter = min(0, self._lock_counter - 1)\r\n if self._lock_counter == 0 and os.path.isfile(self.path):\r\n os.remove(self.path)\r\n \r\n def __enter__(self):\r\n if self.acquire():\r\n return self\r\n else:\r\n raise Timeout(f\"Failed to acquire ExistLock for file {self.path}\")\r\n \r\n def __exit__(self, type, value, traceback):\r\n self.release()\r\n\r\n", "id": "9455", "language": "Python", "matching_score": 0.7080676555633545, "max_stars_count": 0, "path": "train/filelocks.py" }, { "content": "\"\"\"\nMostly copied from wandb client code\nModified \"next_sample\" code to do the following:\n-accepts a 'failure_cost' argument\n-if failure cost 'c' is nonzero, modifies expected improvement of each\n sample according to:\n e' = p e / (p (1-c) + c)\n where 'p' is probability of success and 'e' is unmodified expected improvement\n-returns expected improvements for whole sample\n\nBayesian Search\nCheck out https://arxiv.org/pdf/1206.2944.pdf\n for explanation of bayesian optimization\nWe do bayesian optimization and handle the cases where some X values are integers\nas well as the case where X is very large.\n\"\"\"\n\nimport numpy as np\n#from sklearn.gaussian_process import GaussianProcessRegressor\n#from sklearn.gaussian_process.kernels import Matern\n#import scipy.stats as stats\nimport math\n#from wandb.util import get_module\n#from wandb.sweeps.base import Search\n#from wandb.sweeps.params import HyperParameter, HyperParameterSet\n\n#sklearn.gaussian = get_module('sklearn.gaussian_process')\n#sklearn.linear = get_module('sklearn.linear_model')\n#sklearn.svm = get_module('sklearn.svm')\n#sklearn.discriminant = get_module('sklearn.discriminant_analysis')\n#scipy.stats = get_module('scipy.stats')\n\nimport sklearn.gaussian_process as gaussian\nimport sklearn.linear_model as linear_model\nimport sklearn.svm as svm\nimport sklearn.discriminant_analysis as discriminant\nimport scipy.stats\n\n\ndef fit_normalized_gaussian_process(X, y, nu=1.5):\n \"\"\"\n We fit a gaussian process but first subtract the mean and divide by stddev.\n To undo at prediction tim, call y_pred = gp.predict(X) * y_stddev + y_mean\n \"\"\"\n gp = gaussian.GaussianProcessRegressor(\n kernel=gaussian.kernels.Matern(nu=nu), n_restarts_optimizer=2, alpha=0.0000001, random_state=2\n )\n if len(y) == 1:\n y = np.array(y)\n y_mean = y[0]\n y_stddev = 1\n else:\n y_mean = np.mean(y)\n y_stddev = np.std(y) + 0.0001\n y_norm = (y - y_mean) / y_stddev\n gp.fit(X, y_norm)\n return gp, y_mean, y_stddev\n \ndef train_logistic_regression(X, y):\n lr = linear.LogisticRegression()\n lr.fit(X, y.astype(int))\n return lambda X : lr.predict_proba(X)[...,1], 0, 1\n \ndef train_rbf_svm(X, y):\n svc = svm.SVC(probability=True)\n svc.fit(X, y.astype(int))\n return lambda X : svc.predict_proba(X)[...,1], 0, 1\n \ndef train_qda(X,y):\n qda = discriminant.QuadraticDiscriminantAnalysis()\n qda.fit(X, y.astype(int))\n return lambda X : qda.predict_proba(X)[...,1], 0, 1\n \n\n\ndef sigmoid(x):\n return np.exp(-np.logaddexp(0, -x))\n\n\ndef random_sample(X_bounds, num_test_samples):\n num_hyperparameters = len(X_bounds)\n test_X = np.empty((num_test_samples, num_hyperparameters))\n for ii in range(num_test_samples):\n for jj in range(num_hyperparameters):\n if type(X_bounds[jj][0]) == int:\n assert (type(X_bounds[jj][1]) == int)\n test_X[ii, jj] = np.random.randint(\n X_bounds[jj][0], X_bounds[jj][1])\n else:\n test_X[ii, jj] = np.random.uniform() * (\n X_bounds[jj][1] - X_bounds[jj][0]\n ) + X_bounds[\n jj\n ][\n 0\n ]\n return test_X\n\n\ndef predict(X, y, test_X, nu=1.5):\n gp, norm_mean, norm_stddev = fit_normalized_gaussian_process(X, y, nu=nu)\n y_pred, y_std = gp.predict([test_X], return_std=True)\n y_std_norm = y_std * norm_stddev\n y_pred_norm = (y_pred * norm_stddev) + norm_mean\n return y_pred_norm[0], y_std_norm[0]\n\n\ndef train_runtime_model(sample_X, runtimes, X_bounds, nu=1.5, model='gaussian'):\n if sample_X.shape[0] != runtimes.shape[0]:\n raise ValueError(\"Sample X and runtimes must be the same length\")\n\n if model=='gaussian':\n return train_gaussian_process(sample_X, runtimes, X_bounds, nu=nu)\n elif model=='logistic' and runtimes.any() and not runtimes.all():\n return train_logistic_regression(sample_X, runtimes)\n elif model=='rbf_svm' and runtimes.any() and not runtimes.all():\n return train_rbf_svm(sample_X, runtimes)\n elif model=='qda' and runtimes.sum() > 1 and runtimes.sum() < len(runtimes) - 1:\n return train_qda(sample_X, runtimes)\n else:\n return None, 0, 1\n\n\n#def train_failure_model(sample_X, failures, X_bounds):\n# if sample_X.shape[0] != failures.shape[0]:\n# raise ValueError(\"Sample X and runtimes must be the same length\")\n#\n# return train_gaussian_process(sample_X, runtimes, X_bounds)\n\n\ndef train_gaussian_process(\n sample_X, sample_y, X_bounds, current_X=None, nu=1.5, max_samples=100\n):\n \"\"\"\n Trains a Gaussian Process function from sample_X, sample_y data\n Handles the case where there are other training runs in flight (current_X)\n Arguments:\n sample_X - vector of already evaluated sets of hyperparameters\n sample_y - vector of already evaluated loss function values\n X_bounds - minimum and maximum values for every dimension of X\n current_X - hyperparameters currently being explored\n nu - input to the Matern function, higher numbers make it smoother 0.5, 1.5, 2.5 are good values\n see http://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.Matern.html\n Returns:\n gp - the gaussian process function\n y_mean - mean\n y_stddev - stddev\n To make a prediction with gp on real world data X, need to call:\n (gp.predict(X) * y_stddev) + y_mean\n \"\"\"\n if current_X is not None:\n current_X = np.array(current_X)\n if len(current_X.shape) != 2:\n raise ValueError(\"Current X must be a 2 dimensional array\")\n\n # we can't let the current samples be bigger than max samples\n # because we need to use some real samples to build the curve\n if current_X.shape[0] > max_samples - 5:\n print(\n \"current_X is bigger than max samples - 5 so dropping some currently running parameters\"\n )\n current_X = current_X[:(max_samples - 5), :]\n if len(sample_y.shape) != 1:\n raise ValueError(\"Sample y must be a 1 dimensional array\")\n\n if sample_X.shape[0] != sample_y.shape[0]:\n raise ValueError(\n \"Sample X and sample y must be the same size {} {}\".format(\n sample_X.shape[0], sample_y.shape[0]\n )\n )\n\n if X_bounds is not None and sample_X.shape[1] != len(X_bounds):\n raise ValueError(\n \"Bounds must be the same length as Sample X's second dimension\"\n )\n\n # gaussian process takes a long time to train, so if there's more than max_samples\n # we need to sample from it\n if sample_X.shape[0] > max_samples:\n sample_indices = np.random.randint(sample_X.shape[0], size=max_samples)\n X = sample_X[sample_indices]\n y = sample_y[sample_indices]\n else:\n X = sample_X\n y = sample_y\n gp, y_mean, y_stddev = fit_normalized_gaussian_process(X, y, nu=nu)\n if current_X is not None:\n # if we have some hyperparameters running, we pretend that they return\n # the prediction of the function we've fit\n X = np.append(X, current_X, axis=0)\n current_y_fantasy = (gp.predict(current_X) * y_stddev) + y_mean\n y = np.append(y, current_y_fantasy)\n gp, y_mean, y_stddev = fit_normalized_gaussian_process(X, y, nu=nu)\n return gp.predict, y_mean, y_stddev\n\n\ndef filter_weird_values(sample_X, sample_y):\n is_row_finite = ~(np.isnan(sample_X).any(axis=1) | np.isnan(sample_y))\n sample_X = sample_X[is_row_finite, :]\n sample_y = sample_y[is_row_finite]\n return sample_X, sample_y\n\n\ndef next_sample(\n sample_X,\n sample_y,\n X_bounds=None,\n runtimes=None,\n failures=None,\n current_X=None,\n nu=1.5,\n max_samples_for_gp=100,\n improvement=0.01,\n num_points_to_try=1000,\n opt_func=\"expected_improvement\",\n failure_cost=0,\n test_X=None,\n):\n \"\"\"\n Calculates the best next sample to look at via bayesian optimization.\n Check out https://arxiv.org/pdf/1206.2944.pdf\n for explanation of bayesian optimization\n Arguments:\n sample_X - 2d array of already evaluated sets of hyperparameters\n sample_y - 1d array of already evaluated loss function values\n X_bounds - 2d array minimum and maximum values for every dimension of X\n runtimes - vector of length sample_y - should be the time taken to train each model in sample X\n failures - vector of length sample_y - should be True for models where training failed and False where\n training succeeded. This model will throw out NaNs and Infs so if you want it to avaoid\n failure values for X, use this failure vector.\n current_X - hyperparameters currently being explored\n nu - input to the Matern function, higher numbers make it smoother 0.5, 1.5, 2.5 are good values\n see http://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.Matern.html\n max_samples_for_gp - maximum samples to consider (since algo is O(n^3)) for performance, but also adds some randomness\n improvement - amount of improvement to optimize for -- higher means take more exploratory risks\n num_points_to_try - number of X values to try when looking for value with highest\n expected probability of improvement\n opt_func - one of {\"expected_improvement\", \"prob_of_improvement\"} - whether to optimize expected\n improvement of probability of improvement. Expected improvement is generally better - may want\n to remove probability of improvement at some point. (But I think prboability of improvement\n is a little easier to calculate)\n test_X - X values to test when looking for the best values to try\n Returns:\n suggested_X - X vector to try running next\n suggested_X_prob_of_improvement - probability of the X vector beating the current best\n suggested_X_predicted_y - predicted output of the X vector\n test_X - 2d array of length num_points_to_try by num features: tested X values\n y_pred - 1d array of length num_points_to_try: predicted values for test_X\n y_pred_std - 1d array of length num_points_to_try: predicted std deviation for test_X\n e_i - expected improvement\n prob_of_improve 1d array of lenth num_points_to_try: predicted porbability of improvement\n prob_of_failure 1d array of predicted probabilites of failure\n suggested_X_prob_of_failure\n expected_runtime 1d array of expected runtimes\n \"\"\"\n # Sanity check the data\n sample_X = np.array(sample_X)\n sample_y = np.array(sample_y)\n failures = np.array(failures)\n if test_X is not None:\n test_X = np.array(test_X)\n if len(sample_X.shape) != 2:\n raise ValueError(\"Sample X must be a 2 dimensional array\")\n\n if len(sample_y.shape) != 1:\n raise ValueError(\"Sample y must be a 1 dimensional array\")\n\n if sample_X.shape[0] != sample_y.shape[0]:\n raise ValueError(\"Sample X and y must be same length\")\n\n if test_X is not None:\n # if test_X is set, usually this is for simulation/testing\n if X_bounds is not None:\n raise ValueError(\"Can't set test_X and X_bounds\")\n\n else:\n # normal case where we randomly sample our test_X\n if X_bounds is None:\n raise ValueError(\"Must pass in test_X or X_bounds\")\n\n filtered_X, filtered_y = filter_weird_values(sample_X, sample_y)\n # We train our runtime prediction model on *filtered_X* throwing out the sample points with\n # NaN values because they might break our runtime predictor\n runtime_model = None\n if runtimes is not None:\n runtime_filtered_X, runtime_filtered_runtimes = filter_weird_values(\n sample_X, runtimes\n )\n if runtime_filtered_X.shape[0] >= 2:\n runtime_model, runtime_model_mean, runtime_model_stddev = train_runtime_model(\n runtime_filtered_X, runtime_filtered_runtimes, X_bounds\n )\n # We train our failure model on *sample_X*, all the data including NaNs\n # This is *different* than the runtime model.\n failure_model = None\n if failures is not None and sample_X.shape[0] >= 2:\n failure_filtered_X, failure_filtered_y = filter_weird_values(\n sample_X, failures\n )\n if failure_filtered_X.shape[0] >= 2:\n failure_model, failure_model_mean, failure_model_stddev = train_runtime_model(\n failure_filtered_X, failure_filtered_y, X_bounds, model='rbf_svm'#'logistic'\n )\n # we can't run this algothim with less than two sample points, so we'll\n # just return a random point\n if filtered_X.shape[0] < 2:\n if test_X is not None:\n # pick a random row from test_X\n row = np.random.choice(test_X.shape[0])\n X = test_X[row, :]\n else:\n X = random_sample(X_bounds, 1)[0]\n if filtered_X.shape[0] < 1:\n prediction = 0.0\n else:\n prediction = filtered_y[0]\n return X, 1.0, prediction, None, None, None, None, None, None, None\n\n # build the acquisition function\n gp, y_mean, y_stddev, = train_gaussian_process(\n filtered_X, filtered_y, X_bounds, current_X, nu, max_samples_for_gp\n )\n # Look for the minimum value of our fitted-target-function + (kappa * fitted-target-std_dev)\n if test_X is None: # this is the usual case\n test_X = random_sample(X_bounds, num_points_to_try)\n y_pred, y_pred_std = gp(test_X, return_std=True)\n if failure_model is None:\n prob_of_failure = np.zeros(len(test_X))\n else:\n prob_of_failure = failure_model(\n test_X\n ) * failure_model_stddev + failure_model_mean\n #print(f\"prob_of_failure: {prob_of_failure}\")\n k = 2\n a = 2\n prob_of_failure = a * prob_of_failure**k / (a * prob_of_failure**k + (1 - prob_of_failure)**k)\n if runtime_model is None:\n expected_runtime = [0.0] * len(test_X)\n else:\n expected_runtime = runtime_model(\n test_X\n ) * runtime_model_stddev + runtime_model_mean\n # best value of y we've seen so far. i.e. y*\n min_unnorm_y = np.min(filtered_y)\n # hack for dealing with predicted std of 0\n epsilon = 0.00000001\n if opt_func == \"probability_of_improvement\":\n # might remove the norm_improvement at some point\n # find best chance of an improvement by \"at least norm improvement\"\n # so if norm_improvement is zero, we are looking for best chance of any\n # improvment over the best result observerd so far.\n #norm_improvement = improvement / y_stddev\n min_norm_y = (min_unnorm_y - y_mean) / y_stddev - improvement\n distance = (y_pred - min_norm_y)\n std_dev_distance = (y_pred - min_norm_y) / (y_pred_std + epsilon)\n prob_of_improve = sigmoid(-std_dev_distance)\n if failure_cost > 0:\n prob_of_success = 1 - prob_of_failure\n prob_of_improve *= prob_of_success\n best_test_X_index = np.argmax(prob_of_improve)\n e_i = np.zeros_like(prob_of_improve)\n elif opt_func == \"expected_improvement\":\n min_norm_y = (min_unnorm_y - y_mean) / y_stddev\n Z = -(y_pred - min_norm_y) / (y_pred_std + epsilon)\n prob_of_improve = scipy.stats.norm.cdf(Z)\n e_i = -(y_pred - min_norm_y) * scipy.stats.norm.cdf(Z) + y_pred_std * scipy.stats.norm.pdf(\n Z\n )\n if failure_cost != 0:\n prob_of_success = 1 - prob_of_failure\n e_i = e_i * prob_of_success / (prob_of_failure * failure_cost + prob_of_success)\n #e_i = e_i * (prob_of_failure < failure_cost)\n best_test_X_index = np.argmax(e_i)\n # TODO: support expected improvement per time by dividing e_i by runtime\n suggested_X = test_X[best_test_X_index]\n suggested_X_prob_of_improvement = prob_of_improve[best_test_X_index]\n suggested_X_predicted_y = y_pred[best_test_X_index] * y_stddev + y_mean\n unnorm_y_pred = y_pred * y_stddev + y_mean\n unnorm_y_pred_std = y_pred_std * y_stddev\n unnorm_e_i = e_i * y_stddev\n suggested_X_prob_of_failure = prob_of_failure[best_test_X_index]\n return (\n suggested_X,\n suggested_X_prob_of_improvement,\n suggested_X_predicted_y,\n test_X,\n unnorm_y_pred,\n unnorm_y_pred_std,\n unnorm_e_i,\n prob_of_improve,\n prob_of_failure,\n suggested_X_prob_of_failure,\n expected_runtime,\n )\n\n\ndef target(x):\n return np.exp(-(x - 2) ** 2) + np.exp(-(x - 6) ** 2 / 10) + 1 / (x ** 2 + 1)\n\n\ndef test():\n import matplotlib\n import matplotlib.pyplot as plt\n from matplotlib import cm\n from mpl_toolkits.mplot3d import Axes3D\n from time import sleep\n \n def function(X):\n X = X.copy()\n X[0] = 1 - X[0]\n if np.sum(X) <= 1: #np.dot(X, X) <= 1:\n return -np.dot(X,X) #-np.sum(X).item()\n else:\n return float(\"nan\")\n \n X_bounds = [(0.0,1.0), (0.0,1.0)]\n sample_X = []\n sample_y = []\n failures = []\n failure_cost = .5\n \n # generate samples\n print(\"Generating random samples... \", end='')\n samples = np.zeros((1000,2))\n for i in range(1000):\n print(f\"{i:4d}\\b\\b\\b\\b\", end='')\n X = np.random.random(size=2)\n while np.isnan(function(X)):\n X = np.random.random(size=2)\n samples[i] = X\n print(\"Done.\")\n \n \n n_x0 = 40\n n_x1 = 40\n X_grid_0, X_grid_1 = np.meshgrid(np.linspace(0,1,n_x0), np.linspace(0,1,n_x1))\n X_grid = np.stack((X_grid_0, X_grid_1), axis=-1)\n X_grid_flat = X_grid.reshape(-1,2)\n \n # plotting\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n plt.show(block = False)\n \n #for i in range(50):\n cost = 0\n while True:\n sample_X_array = np.array(sample_X) if len(sample_X) > 0 else np.zeros((0,0))\n sample = next_sample(\n sample_X = sample_X_array,\n sample_y = sample_y,\n #X_bounds = X_bounds,\n test_X = samples,\n failures = failures,\n #failure_cost = failure_cost,\n opt_func = \"probability_of_improvement\"\n )\n next_X = sample[0]\n next_prob_fail = sample[9]\n del sample\n #next_X = np.random.random(size=2)\n next_y = function(next_X)\n\n ax.clear()\n ax.scatter(samples[...,0], samples[...,1], color='black')\n \n if len(failures) - sum(failures) >= 2:\n grid = next_sample(\n sample_X = sample_X_array,\n sample_y = sample_y,\n failures = failures,\n failure_cost = failure_cost,\n test_X = X_grid_flat \n )\n y_pred = grid[4].reshape(n_x0, n_x1)\n prob_fail = grid[8].reshape(n_x0, n_x1)\n del grid\n ax.plot_surface(X_grid_0, X_grid_1, -y_pred, facecolors=cm.coolwarm(prob_fail), alpha=.5)\n #ax.plot_surface(X_grid_0, X_grid_1, prob_fail, facecolors=cm.coolwarm(-y_pred), alpha=.5) \n \n sample_X.append(next_X)\n sample_y.append(next_y)\n failures.append(np.isnan(next_y))\n min_y = np.nanmin(sample_y)\n cost = cost + (failure_cost if np.isnan(next_y) else 1)\n \n #print(next_y, next_prob_fail, min_y)\n #print(sample_y)\n print(f\"[{cost:.1f}]: X = {tuple(next_X)}, y = {next_y if next_y else 0:.4f}, prob_fail = {next_prob_fail if next_prob_fail else 0:.4f}, min_y = {min_y if min_y else 0:.4f}\")\n\n ax.scatter(np.array(sample_X)[...,0], np.array(sample_X)[...,1], -np.array(sample_y), color='red')\n plt.show(block = False)\n if cost >= 40:\n break \n plt.pause(1)\n \n #y_func = np.zeros((n_x0, n_x1))\n #for i in range(n_x0):\n # for j in range(n_x1):\n # y_func[i,j] = function(X_grid[i,j])\n #ax.plot_surface(X_grid_0, X_grid_1, y_pred)#y_pred)#, color=prob_fail)\n #ax.scatter(np.array(sample_X)[...,0], np.array(sample_X)[...,1], np.array(sample_y))\n #plt.show()\n \n input(\"Press Enter to Exit...\")\n\n\nif __name__ == '__main__':\n test()", "id": "1826083", "language": "Python", "matching_score": 1.9591144323349, "max_stars_count": 0, "path": "train/bayes_search.py" }, { "content": "from collections import deque\nfrom collections.abc import Sequence\nfrom functools import partial\nimport gc\n\nimport numpy as np\nimport torch\nimport os\nimport sys\nimport traceback\nimport shutil\n\nfrom resizable import H5Array as Array\nimport h5py\n\nfrom variable_networks import VariableParityNetwork\nfrom training_utils import train_batch\nfrom pipeline import Pipeline\nfrom training_config import Config\nimport exploration_policy\nfrom exploration_policy import random_parameters_and_seed, generate_parameters\n\n\n\nclass TrainableSampleGenerator:\n \n def __init__(self, filename=\"runs/samples.dat\", overwrite=False, use_only=False, configs=['exploration.ini'], num_batches=2, stub=None, model_failure=True):\n self.filename=filename\n self.use_only = use_only\n self.seed_length = len(random_parameters_and_seed()[1])\n self.__passes = None\n \n if overwrite and os.path.isfile(filename):\n os.remove(filename)\n self.file = h5py.File(filename, 'a')\n \n file_exists = \"seeds\" in self.file\n if not file_exists:\n if use_only:\n raise ValueError(\"File doesn't exist but trying to run in use-only mode.\")\n self.seeds = Array(self.file, \"seeds\", (0,self.seed_length), dtype=float)\n else:\n self.seeds = Array(self.file, \"seeds\")\n self.passed = Array(self.file, \"passed\", dtype=bool)\n self.used = Array(self.file, \"used\", dtype=bool)\n \n self.model_failure = model_failure\n self._failure_model = None\n self.failure_model_count = 0\n self.failure_model_step = 10\n #self.failure_train_split = failure_train_split\n \n self.num_batches = num_batches\n self.batches = None\n \n if not use_only:\n if not file_exists:\n self.file.attrs[\"configs\"] = repr(configs)\n self.file.attrs[\"stub\"] = stub\n #else:\n self.config_files = eval(self.file.attrs[\"configs\"])\n \n self.stub = self.file.attrs[\"stub\"]\n if self.stub:\n return\n\n self.config = Config(*self.config_files, track_sources=True, use_command_args=False)\n if self.config.training.jit_recompile < float('inf'):\n torch._C._jit_set_bailout_depth(self.config.training.jit_recompile)\n exploration_policy.mul_coeff = self.config.exploration.mul_coeff * self.config.exploration.sample_too_small\n\n def load_batches(self):\n num_examples = self.config.training.batch_size * self.num_batches\n print(\"Initializing data pipeline... \", end='')\n from shuffle import Shuffle\n shuffle = Shuffle.get_shuffle(self.config)\n pipeline = Pipeline(self.config)\n pipeline.set_indices(shuffle.train_set[:num_examples])\n pipeline.start_reading(num_examples, batch_size=self.config.training.batch_size)\n print(\"Done.\")\n print(\"Reading trial batches... \", end='')\n self.batches = []\n for _ in range(self.num_batches):\n self.batches.append(pipeline.get_batch())\n print(\"Done.\")\n print(\"Closing pipeline... \", end='')\n pipeline.close()\n del pipeline\n del shuffle\n print(\"Done.\")\n \n def sample(self, num_samples=None, num_passes=None, save_interval=20, verbose=True, verbose_test=False):\n if self.use_only:\n raise RuntimeError(\"Tried to generate samples, but sample generator is in use-only mode!\")\n \n current_samples = len(self.seeds)\n current_passes = self.num_passes\n self.__passes = None\n \n #abort early\n if ((num_samples and current_samples >=num_samples) or\n (num_passes and current_passes >= num_passes)):\n return\n \n distribution = partial(np.random.random_sample, self.seed_length)\n \n while True:\n seed = distribution()\n #print(f\"seed = {seed}\")\n self.seeds.append(seed)\n self.passed.append(False)\n self.used.append(False)\n #if len(self.seeds) < 10:\n #print(seed)\n #print(np.array(self.seeds))\n current_samples += 1\n failed = self.test_sample(seed, verbose=verbose_test)\n self.passed[-1] = not failed\n\n \n if current_samples % save_interval == 0:\n self.save()\n if self.config.exploration.backup_samples:\n shutil.copy(self.filename,self.filename + \".bak\")\n \n if not failed:\n current_passes += 1\n pct = 100 * (current_passes / current_samples)\n if verbose: print(f\"*********** {current_passes:5d} *********** {pct:3.2f}%\")\n if num_passes is not None and current_passes >= num_passes:\n break\n else:\n pct = 100 * (current_passes / current_samples)\n if verbose: print(f\"· · · · · · · · · · · · {pct:3.2f}%\")\n if num_samples is not None and current_samples >= num_samples:\n break\n \n self.save()\n\n def test_sample(self, seed, verbose=True):\n if self.use_only:\n raise RuntimeError(\"Tried to test sample, but sample generator is in use-only mode!\")\n \n failed=0\n settings = generate_parameters(seed)\n if self.stub:\n return self.test_stub(settings)\n \n if self.batches is None:\n self.load_batches()\n #if verbose>1:\n # from exploration_policy import show_model\n # show_model(settings)\n #if verbose>2:\n # print(f\" -- {traceback.print_exc()}\")\n \n if verbose: print(\"Building and compiling model. \", end='')\n model_kwargs = self.config.model.kwargs.copy()\n model_kwargs.update(settings['model'])\n try:\n model = VariableParityNetwork(**model_kwargs)\n except Exception as e:\n print(f\"\\nError while building and optimizing model on CPU:\")\n if verbose>2:\n print(f\" -- {traceback.print_exc()}\")\n return 4\n print(\" Done.\")\n try: \n model.to(self.config.device)\n except Exception as e:\n print(f\"Failure moving model to GPU\")\n if verbose>2:\n print(f\" -- {traceback.print_exc()}\")\n if verbose:\n print(f\"\\n!!!! Failed: {e}\")\n return 4\n \n if verbose: print(\"Building optimizer... \", end='')\n try:\n optimizer = torch.optim.Adam(model.parameters(), self.config.training.learning_rate)\n except Exception as e:\n if verbose: print(f\"\\n!!!! Failed: {e}\")\n return 5\n \n b=0\n data_queue = deque(maxlen=self.config.data.batch_preload)\n losses = None\n \n if verbose: print(\"Training Batch\", end='')\n try:\n for i in range(self.num_batches):\n while len(data_queue) < self.config.data.batch_preload and b < len(self.batches):\n data_queue.appendleft(self.batches[b].to(self.config.device))\n b += 1\n \n if verbose: print(f\" {i+1}\", end='')\n losses = train_batch(data_queue, model, optimizer)\n \n except RuntimeError as e:\n if 'CUDA' in e.args[0]:\n failed = 3\n else:\n failed = 6\n \n del losses\n del model\n del optimizer\n del data_queue\n gc.collect()\n #torch.cuda.empty_cache()\n \n if failed:\n if verbose: print(\"\\n!!!! Failed !!!!\")\n else:\n if verbose: print(\"\\n**** Passed ****\")\n return failed\n \n def test_stub(self, settings):\n ls = [list(range(lmax + 1)) for lmax in settings['model']['lmaxes']]\n muls = settings['model']['muls']\n lmuls = [list(zip(li, mi)) for li,mi in list(zip(ls,muls))]\n \n memory = sum(sum(l**2 * m for l,m in layer) for layer in lmuls)\n \n if memory > 100:\n return 3\n else:\n return 0\n \n @property \n def passes(self):\n if self.__passes is None:\n self.__passes = np.array(self.seeds)[self.passed]\n return self.__passes\n \n @property\n def num_passes(self):\n if self.__passes is not None:\n return len(self.__passes)\n else:\n return np.sum(self.passed)\n \n @property\n def failure_model(self):\n if len(self.seeds) < self.failure_model_step:\n return None\n if self._failure_model is None or \\\n len(self.seeds) >= self.failure_model_count + self.failure_model_step:\n self._failure_model = train_rbf_svm(self.seeds, self.passed)\n return self._failure_model\n \n \n \n @staticmethod\n def load(filename, **kwargs):\n if os.path.isfile(filename):\n return TrainableSampleGenerator(filename, **kwargs)\n else:\n raise ValueError(f\"Tried to load {filename}, but file does not exist.\")\n \n def save(self, verbose=False):\n if verbose: print(\"Saving samples... \", end='')\n self.file.flush()\n if verbose: print(\"Done.\")\n \n def close(self, verbose=True):\n if verbose: print(f\"Closing {self.filename}... \", end='')\n self.file.close()\n self.file = None\n self.passed = None\n self.seeds = None\n self.used = None\n if verbose: print(\"Done.\")\n \n def reopen(self, verbose=True):\n if self.file is not None:\n self.close()\n if verbose: print(f\"Reopening {self.filename}... \", end='')\n self.file = h5py.File(self.filename, 'r+')\n self.seeds = Array(self.file, \"seeds\")\n self.passed = Array(self.file, \"passed\")\n self.used = Array(self.file, \"used\")\n if verbose: print(\"Done.\")\n \n def index_of(self, seed):\n return np.nonzero(np.all(np.array(self.seeds)==np.reshape(seed, (1,-1)), axis=1))[0]\n \n def set_used(self, arg=None, seed=None):\n self.__passes = None\n if arg is None:\n if seed is None:\n raise ValueError(\"One of arg or seed must be given.\")\n indices = self.index_of(seed)\n else:\n indices = (arg,)\n for index in indices:\n #print(f\" index = {index}\")\n self.passed[index] = False\n self.used[index] = True\n \ndef test_failure_model(args, stub=False, verbose=2):\n import matplotlib.pyplot as plt\n from sklearn.model_selection import train_test_split\n figsize=(12,8)\n block=False\n pause=1\n \n sampler = TrainableSampleGenerator(stub=stub)\n stepsize = 10\n total = 1000\n \n train_frac = .67\n test_frac = .33 \n \n axes = None\n start_step = (int(len(sampler.seeds) / stepsize) + 1) * stepsize\n for s in range(start_step, total+1, stepsize):\n # get more samples if needed\n sampler.sample(num_samples=s, verbose_test=verbose)\n if sampler.num_passes == len(sampler.seeds) or sampler.num_passes == 0:\n continue\n \n # start collecting data for model\n seeds = np.array(sampler.seeds)\n passed = np.array(sampler.passed)\n failed = np.logical_not(passed)\n \n print(f\"{seeds.shape}, {passed.shape}, {failed.shape}\")\n train_seeds, test_seeds, train_passed, test_passed, train_failed, test_failed \\\n = train_test_split(seeds, passed, failed, train_size=train_frac)\n train_data = \\\n (train_seeds,'train_seeds'), \\\n (train_passed,'trained_passed'), \\\n (train_failed, 'train_failed')\n\n test_data = \\\n (test_seeds,'test_seeds'), \\\n (test_passed,'test_passed'), \\\n (test_failed, 'test_failed')\n\n\n print(\"--- Train/test split ---\")\n for (tn_var, tn_name), (ts_var, ts_name) in zip(train_data, test_data):\n print(f\"{tn_name:<15} : {tn_var.shape[0]:<5},\")\n \n print(\" Fun 1\") \n print(\"Fitting failure model... \", end='')\n model = train_rbf_svm(train_seeds, train_passed)\n print(\"Done\")\n \n # test calibration\n pred = (10 * model(test_seeds)).astype(int) / 10\n bins = np.arange(0, 1, .1)\n to_bin = np.equal(pred[:,np.newaxis], bins[np.newaxis,:])\n bin_passes = np.sum(np.logical_and(test_passed[:,np.newaxis], to_bin), axis=0)\n bin_fails = np.sum(np.logical_and(test_failed[:,np.newaxis], to_bin), axis=0)\n bin_cum = bin_passes + bin_fails\n frac = np.array([(bin_passes[i] / bin_cum[i] if bin_cum[i] > 0 else 0.) for i in range(10)])\n \n if axes is None:\n plt.figure(figsize=figsize)\n axes = plt.gca()\n else:\n plt.cla()\n \n #ax1 = axes.subplots\n plt.bar(bins, frac, width=.08, color='r', label=\"Passes\")\n\n plt.legend(loc=\"best\")\n plt.show(block=block)\n if block==False:\n plt.pause(pause)\n \ndef train_rbf_svm(X, y):\n import sklearn.svm as svm\n svc = svm.SVC(probability=True)\n svc.fit(X, y.astype(int))\n return lambda X : svc.predict_proba(X)[...,1]\n\n\nif __name__ == '__main__':\n if sys.argv[1] == \"fail\":\n test_failure_model(sys.argv[2:])\n elif len(sys.argv) > 1:\n print(f\"=== Running diagnostics on {sys.argv[1]} ===\")\n sampler = TrainableSampleGenerator(filename=sys.argv[1], overwrite=False, use_only=True)\n print(f\" Total samples tried : {len(sampler.seeds)}\")\n print(f\" Total passes : {sampler.num_passes}\")\n print(f\" Total failures : {len(sampler.seeds)-sampler.num_passes}\")\n print(f\" Total used : {np.sum(sampler.used)}\")\n print(f\" Percent passed : {(100 * sampler.num_passes / len(sampler.seeds)):.2f}\") \n else:\n sampler = TrainableSampleGenerator(stub=True)\n sampler.sample(num_passes=1000, verbose_test=False)\n\n\n", "id": "11848364", "language": "Python", "matching_score": 2.4154796600341797, "max_stars_count": 0, "path": "train/sample_hyperparameters.py" }, { "content": "import os.path\nimport torch\nimport numpy as np\n\nclass Shuffle:\n def __init__(self,indices=None,size=None,filename=None,split=None,split_names=[]):\n self.data_size=size\n self.filename=filename\n self.indices=indices\n self.has_split = False\n\n if indices is not None and split is not None:\n self.set_split(split, split_names)\n else:\n self.split = split\n self.split_names = split_names\n \n def set_split(self, split, split_names=[]):\n if self.has_split:\n raise RuntimeError(\"Can only set a split once on a Shuffle object.\")\n self.has_split = True\n split = list(split)\n s = 0\n for i in range(len(split)):\n s += split[i]\n split[i] = s\n if split[0] < 1:\n split = [round(s * self.data_size) for s in split]\n split = [0] + list(split) + [self.data_size]\n self.split = tuple(self.indices[a:b] for a,b in zip(split[:-1],split[1:]))\n for i, n in enumerate(split_names):\n setattr(self, n, self.split[i])\n \n def save(self, filename=None):\n if filename is None:\n filename = self.config.data.test_train_shuffle\n torch.save(self.indices, filename)\n \n\n @staticmethod \n def load(filename,split=None,split_names=[],size=None,verbose=True):\n \"\"\"\n Tries to load shuffled indices from filename\n Stores the resulting indices in self.indices\n If file cannot be found, or if the number of indices\n in the file doesn't match 'size' (if not 'None') or\n then raises a ValueError\n \n Args:\n filename : File to load from\n size : \n split : sequence of either integers (>=1) or fractions (of total size)\n giving divisions between the parts of the split\n split_names : sequence of strings, one more than numbers in split\n giving the names of the split pieces\n \"\"\"\n if os.path.isfile(filename):\n try:\n indices = torch.load(filename)\n except Exception as e:\n if verbose: print(e)\n raise RuntimeError(f\"Failure when loading {filename}\",\"\")\n if size is not None and len(indices) != size:\n raise ValueError(f\"Saved shuffle has size {len(self.indices)}, but config specifies size {data_size}!\", 'SIZE MISMATCH')\n if verbose: print(f\"Loading shuffle indices from {filename}...\")\n shuffle = Shuffle(indices=indices,size=size,filename=filename,split=split,split_names=split_names)\n\n return shuffle \n else:\n raise RuntimeError(f\"Shuffle file {filename} does not exist.\",\"\")\n\n @staticmethod \n def generate(size, connect_params, table, status, sql_version=1, filename=None, split=None, split_names=[], rng=None, seed=None, verbose=True, **kwargs):\n \"\"\"\n Generates a new shuffle of indices for valid database entries,\n and stores it in self.indices.\n If self.filename is not None, we also attempt to save the shuffle\n to a file by that name (unless arg save=False)\n \n The total size of the new shuffle will be self.data_size\n \n connect_params are used as database connection\n parameters (including DB name, username, password)\n \n It selects only rows from table 'table'\n whose status equals 'status'\n \"\"\"\n if verbose: print(f\"Generating new test/train shuffle of {size} examples... \", end=\"\")\n if sql_version == 0:\n from mysql_df import MysqlDB\n db = MysqlDB(connect_params)\n indices = np.array(db.get_finished_idxs(size, **kwargs), dtype=np.int32)\n elif sql_version == 1:\n from sql_df import Database\n db = Database(**connect_params, status=status, **kwargs)\n indices = np.array(db.fetch_ids(size, status, verbose=verbose))\n if rng is None:\n rng = np.random.default_rng(seed)\n rng.shuffle(indices)\n shuffle = Shuffle(indices=indices,size=size,filename=filename,split=split,split_names=split_names)\n if verbose: print(\"Done.\")\n if filename:\n if verbose: print(f\"Saving test/train shuffle indices to {filename}...\")\n shuffle.save(filename)\n return shuffle\n \n def size_mismatch(self, data_size=None):\n \"\"\"\n Returns True if there's a size mismatch between\n data_size (defaults to self.data_size)\n and the number of stored self.indices.\n \"\"\"\n if data_size is None: data_size = self.data_size\n if data_size is None:\n return False # no mismatch of sizes, if no size provided\n elif not self.has_indices:\n return False # no mismatch of sizes, if no data to compare\n elif len(self.indices) == self.data_size:\n return False # mismatches of sizes, in straightforward sense\n else:\n return False\n \n @staticmethod\n def get_shuffle(config, new=None, save=True, verbose=True):\n \"\"\"\n Gets a shuffle one way or another:\n from a file (if new=False, or preferred if new=None) or randomly\n generated from the database.\n Uses data size, test/train/other split, connnect params etc.\n from config\n If save==True, saves if it generates a new shuffle\n \"\"\"\n if isinstance(config, str):\n from training_config import Config\n config = Config(config)\n split = (config.data.train_size, config.data.test_size)\n split_names = ('train_set','test_set','held_back')\n if config.data.test_train_shuffle: # if shuffle filename is specified\n if not new:\n try:\n shuffle = Shuffle.load(config.data.test_train_shuffle,\n split, split_names,\n size=config.data.data_size,\n verbose=verbose)\n return shuffle\n except Exception as e:\n if len(e.args) > 1 and e.args[1] == 'SIZE MISMATCH':\n print(e.args[0])\n if (not config.interactive) or \\\n input(\"Proceed by generating config-size shuffle from DB, and overwriting old shuffle? (y/n)\").strip().lower() != 'y':\n pass\n else:\n raise\n if new is None:\n new = True\n if new:\n filename = config.data.test_train_shuffle if save else None\n return Shuffle.generate(config.data.data_size,\n config.data.connect_params,\n config.data.table,\n config.data.data_status,\n filename=filename,\n split=split,\n split_names=split_names,\n verbose=verbose)\n else:\n raise ValueError(f\"Can't find shuffle at {config.data.test_train_shuffle}\")\n\nif __name__ == '__main__':\n from sys import argv\n argv = argv[1:]\n \n if '-new' in argv:\n argv.remove('-new')\n new = True\n else:\n new = False\n \n if len(argv) > 0:\n config = argv[0]\n else:\n config = 'training.ini'\n \n shuffle = Shuffle.get_shuffle(config, new=new, save=True)\n if new:\n print(f\"Generated new shuffle at {shuffle.config.data.test_train_shuffle}\")\n else:\n print(f\"Shuffle exists at {shuffle.config.data.test_train_shuffle}\")\n \n \n", "id": "901266", "language": "Python", "matching_score": 2.749589681625366, "max_stars_count": 0, "path": "train/shuffle.py" }, { "content": "from training_config import Config\nif __name__ != '__main__':\n print(\"spawning process...\")\nif __name__ == '__main__': print(\"loading standard modules...\")\nimport time\nfrom glob import glob\nimport os\nimport math\nimport sys\nif __name__ == '__main__': print(\"loading torch...\")\nimport torch\ntorch.set_default_dtype(torch.float64)\nif __name__ == '__main__': print(\"loading training-specific libraries...\")\nfrom pipeline import Pipeline, Molecule, test_data_neighbors, generate_index_shuffle, generate_multi_jiggles_set\nfrom training_utils import train_batch, batch_examples, save_checkpoint, cull_checkpoints\nif __name__ == '__main__': print(\"done loading modules.\")\n\ndef main():\n config = Config()\n\n if config.data.source == 'hdf5':\n print(f\"Will use training data from {len(config.data.hdf5_filenames)} files:\")\n for filename in config.data.hdf5_filenames[:4]:\n print(f\" {filename}\")\n print(\" Etc...\")\n elif config.data.source == 'SQL':\n print(f\"Using training data from database:\")\n print(f\" {config.connect_params.db}: {config.connect_params.user}@{config.connect_params.host}\")\n #if 'passwd' not in config.connect_params:\n # self.connect_params['passwd'] = getpass(prompt=\"Please enter password: \")\n \n ### load or generate test/train shuffle\n\n testing_size = config.data.testing_size\n training_size = config.data.training_size\n if config.data.randomize and config.data.test_train_shuffle and os.path.isfile(config.data.test_train_shuffle):\n print(f\"Loading test/train shuffle indices from {config.data.test_train_shuffle}...\")\n test_train_shuffle = torch.load(config.data.test_train_shuffle)\n if len(test_train_shuffle) != testing_size + training_size:\n print(f\"Saved test/train shuffle has size {len(test_train_shuffle)}, but config specifies size {testing_size + training_size}!\")\n generate_shuffle = True\n if input(\"Will generate new shuffle. Overwrite old shuffle file? (y/n) \").strip().lower() == \"y\":\n print(\"Ok.\")\n else:\n config.data.test_train_shuffle = None\n print(\"Ok. Will discard new shuffle after this run.\")\n else:\n generate_shuffle = False \n else:\n generate_shuffle = True\n \n if generate_shuffle:\n if config.data.randomize:\n print(f\"Generating new test indices from {testing_size} examples... \", end=\"\")\n else:\n print(\"Using non-randomized (in-order) test/train indices\")\n if not config.data.multi_jiggle_data: # usual database of distinct molecules\n test_train_shuffle = generate_index_shuffle(testing_size, config.data.connect_params, randomize=config.data.randomize)\n else: # select on smiles string to get specified number of jiggle \n test_train_shuffle = generate_multi_jiggles_set(\n math.ceil((testing_size + training_size) / config.data.jiggles_per_molecule), # of molecules\n config.data.jiggles_per_molecule, config.data.connect_params, config.data.randomize)[\n :testing_size + training_size]\n print(\"Done.\")\n if config.data.test_train_shuffle and config.data.randomize:\n print(f\"Saving test/train shuffle indices to {config.data.test_train_shuffle}...\")\n torch.save(test_train_shuffle, config.data.test_train_shuffle)\n\n test_set_indices = test_train_shuffle[:testing_size] \n\n #print(\"Test set indices:\")\n #print(test_set_indices[:100], \"...\")\n\n ### set up molecule pipeline ###\n\n print(\"\\n=== Starting molecule pipeline ===\\n\")\n print(\"Working...\", end='\\r', flush=True)\n pipeline = Pipeline(config, new_process=False)\n #testing_molecules_dict = pipeline.testing_molecules_dict\n\n print(\"\\n=== Processing test data ===\\n\")\n print(\"Setting test indices...\")\n time1 = time.time()\n pipeline.set_indices(test_set_indices)\n print(\"calling dataset_reader.run()...\")\n pipeline.dataset_reader.run()\n print(\"Resetting database pointer...\")\n pipeline.dataset_reader.run()\n\n print(\"\\n=== Reading into pipeline ===\\n\")\n pipeline.start_reading(testing_size, batch_size=1)\n\n pipeline.dataset_reader.run()\n\n # read in and process testing data directly to memory\n testing_examples = []\n\n print(\"Reading test examples...\")\n while pipeline.any_coming():\n try:\n example = pipeline.get_batch(20)\n except Exception as e:\n print(\"Failed to get batch!\")\n print(e)\n exit()\n testing_examples.append(example)\n #if len(testing_examples) <= 5:\n # test_data_neighbors(example, Rs_in, Rs_out, max_radius, testing_molecules_dict)\n assert len(testing_examples) == testing_size, \\\n f\">>>>> expected {testing_size} testing examples but got {len(testing_examples)}\"\n \n batch_size = config.training.batch_size\n print(\"Batching test examples...\")\n testing_batches = batch_examples(testing_examples, batch_size)\n\n time2 = time.time()\n print(f\"Done preprocessing testing data! That took {time2-time1:.3f} s.\\n\")\n #testing_molecules_dict = dict(testing_molecules_dict)\n\n\n\nif __name__ == '__main__':\n main()\n", "id": "9407303", "language": "Python", "matching_score": 3.635441780090332, "max_stars_count": 0, "path": "train/pipeline_test.py" }, { "content": "from molecule_pipeline import MoleculePipeline\n#import e3nn.point.data_helpers as dh\nimport re\nimport math\nimport os\nfrom torch.multiprocessing import Process, Lock, Semaphore, Value, Queue, Manager, Pool\nimport time\nimport numpy as np\nimport h5py\nimport pandas as pd\nimport torch\ntorch.set_default_dtype(torch.float64)\n\n### Code to Generate Molecules ###\n\nclass Molecule():\n def __init__(self, ID, smiles,\n perturbed_geometries,\n perturbed_shieldings,\n atomic_numbers,\n symmetrical_atoms=None, # list of lists of 0-indexed atom numbers\n weights=None):\n self.ID = ID # database id for molecule\n self.smiles = smiles\n # vector of strings of length n_atoms\n self.atomic_numbers = atomic_numbers\n # number of atoms\n self.n_atoms = len(atomic_numbers)\n # (n_examples, n_atoms, 3)\n self.perturbed_geometries = perturbed_geometries\n self.perturbed_shieldings = perturbed_shieldings\n #print(self.perturbed_shieldings.shape) \n\n self.features = Molecule.get_one_hots(atomic_numbers)\n if weights is None:\n self.weights = Molecule.get_weights(\n atomic_numbers, symmetrical_atoms, None) # (n_atoms,)\n else:\n self.weights = weights\n\n one_hot_table = np.zeros((0,0))\n\n # initialize one-hot table\n # also initalizes revers look-up for atomic numbers\n @staticmethod\n def initialize_one_hot_table(all_elements):\n max_element = max(all_elements)\n Molecule.one_hot_table = np.zeros((max_element+1, len(all_elements)), dtype=np.float64)\n Molecule.atomic_number_index = np.zeros(len(all_elements), dtype=np.int32)\n for i, e in enumerate(all_elements):\n Molecule.one_hot_table[e][i] = 1.0\n Molecule.atomic_number_index[i] = e\n\n # generates one-hots for a list of atomic_numbers\n @staticmethod\n def get_one_hots(atomic_numbers):\n return Molecule.one_hot_table[atomic_numbers]\n\n # get atomic number(s) from one-hots\n @staticmethod\n def get_atomic_numbers(one_hots):\n return one_hots @ Molecule.atomic_number_index\n\n # compute weights for loss function\n @staticmethod\n def get_weights(atomic_symbols, symmetrical_atoms, relevant_elements):\n weights = [\n 1.0 if symbol in relevant_elements else 0.0 for symbol in atomic_symbols]\n weights = np.array(weights)\n for l in symmetrical_atoms:\n weight = 1.0/len(l)\n for i in l:\n weights[i] = weight\n return weights\n \nclass BatchTuple(tuple):\n def __new__(cls, batches):\n #print(f\"Making new BatchTuple{batches}\")\n return tuple.__new__(BatchTuple, batches)\n \n def to(self, device, non_blocking=True):\n #print(\"Sending to device!\")\n return BatchTuple((batch.to(device, non_blocking) for batch in self))\n \n def share_memory_(self):\n #print(\"Sharing memory!\")\n return BatchTuple((batch.share_memory_() for batch in self))\n \n @property\n def n_examples(self):\n return self[0].n_examples\n \n @property\n def x(self):\n return self[0].x\n \n @property\n def weights(self):\n return self[0].weights\n \n @property\n def ID(self):\n return self[0].ID\n \n\n### Parallel Preprocessing Code ###\n\n# polls a lock/semaphore without acquiring it\n# returns: True if it was positive, False otherwise\n\n\ndef check_semaphore(s):\n if s.acquire(False):\n s.release()\n return True\n return False\n\n\ndef set_semaphore(s, x):\n if x and not check_semaphore(s):\n s.release()\n elif (not x) and check_semaphore(s):\n s.acquire()\n\n\ndef wait_semaphore(s):\n s.acquire()\n s.release()\n\n# returns total width (in floating point numbers) of Rs data\n\n\ndef Rs_size(Rs):\n size = 0\n for mul, l, _ in Rs:\n size += mul * (2 * l + 1)\n #print(f\"Computed Rs_size = {size}\")\n return size\n\n\nclass Pipeline():\n def __init__(self, config, share_batches=True, manager=None, new_process=True):\n if new_process == True and manager is None:\n manager = Manager()\n self.knows = Semaphore(0) # > 0 if we know if any are coming\n # == 0 if DatasetReader is processing a command\n self.working = Semaphore(1 if new_process else 100)\n self.finished_reading = Lock() # locked if we're still reading from file\n # number of molecules that have been sent to the pipe:\n self.in_pipe = Value('i', 0)\n \n # Tracking what's already been sent through the pipe:\n self._example_number = Value('i', 0)\n \n # The final kill switch:\n self._close = Value('i', 0)\n\n self.command_queue = manager.Queue(10)\n self.molecule_pipeline = None\n self.batch_queue = Queue(config.data.batch_queue_cap) #manager.Queue(config.data.batch_queue_cap)\n self.share_batches = share_batches\n\n self.dataset_reader = DatasetReader(\"dataset_reader\", self, config, new_process=new_process)\n if new_process:\n self.dataset_reader.start()\n\n def __getstate__(self):\n self_dict = self.__dict__.copy()\n self_dict['dataset_reader'] = None\n return self_dict\n\n # methods for pipeline user/consumer:\n def start_reading(self, examples_to_read, make_molecules=True, batch_size=None, wait=False):\n #print(\"Start reading...\")\n assert check_semaphore(\n self.finished_reading), \"Tried to start reading file, but already reading!\"\n with self.in_pipe.get_lock():\n assert self.in_pipe.value == 0, \"Tried to start reading, but examples already in pipe!\"\n set_semaphore(self.finished_reading, False)\n set_semaphore(self.knows, False)\n self.working.acquire()\n self.command_queue.put(StartReading(\n examples_to_read, make_molecules, batch_size))\n if wait:\n self.wait_till_done()\n\n def wait_till_done(self):\n # wait_semaphore(self.knows)\n # wait_semaphore(self.finished_reading)\n self.working.acquire()\n self.working.release()\n if self.any_coming():\n with self.in_pipe.get_lock():\n ip = self.in_pipe.value\n raise Exception(f\"Waiting with {ip} examples in pipe!\")\n\n def scan_to(self, index):\n assert check_semaphore(\n self.knows), \"Tried to scan to index, but don't know if finished!\"\n assert check_semaphore(\n self.finished_reading), \"Tried to scan to index, but not finished reading!\"\n assert not self.any_coming(), \"Tried to scan to index, but pipeline not empty!\"\n self.working.acquire()\n self.command_queue.put(ScanTo(index))\n with self._example_number.get_lock():\n self._example_number.value = index\n # What to do if things are still in the pipe???\n\n def set_indices(self, test_set_indices):\n self.working.acquire()\n self.command_queue.put(SetIndices(torch.tensor(test_set_indices)))\n self.working.acquire()\n self.command_queue.put(ScanTo(0))\n\n def set_shuffle(self, shuffle):\n self.command_queue.put(SetShuffle(shuffle))\n\n def any_coming(self): # returns True if at least one example is coming\n wait_semaphore(self.knows)\n with self.in_pipe.get_lock():\n return self.in_pipe.value > 0\n\n def get_batch(self, timeout=None):\n #assert self.any_coming(verbose=verbose), \"Tried to get data from an empty pipeline!\"\n x = self.batch_queue.get(True, timeout)\n #print(f\"{type(x)} : {x}\")\n #for b in x:\n # print(f\" --{type(b)} : {b}\")\n \n with self.in_pipe.get_lock():\n self.in_pipe.value -= x.n_examples\n if self.in_pipe.value == 0 and not check_semaphore(self.finished_reading):\n set_semaphore(self.knows, False)\n with self._example_number.get_lock():\n self._example_number.value += x.n_examples\n return x\n\n @property\n def example_number(self):\n with self._example_number.get_lock():\n return self._example_number.value\n\n def close(self):\n self.command_queue.put(CloseReader())\n with self._close.get_lock():\n self._close.value = True\n self.dataset_reader.join(4)\n self.dataset_reader.kill()\n\n # methods for DatasetReader:\n def get_command(self):\n return self.command_queue.get()\n \n def put_molecule_to_ext(self, m, block=True):\n r = self.molecule_pipeline.put_molecule(m, block)\n if not r:\n return False\n with self.in_pipe.get_lock():\n if self.in_pipe.value == 0:\n set_semaphore(self.knows, True)\n self.in_pipe.value += 1\n return True\n\n def put_molecule_data(self, data, atomic_numbers, weights, ID, block=True):\n r = self.molecule_pipeline.put_molecule_data(\n data, atomic_numbers, weights, ID, block)\n if not r:\n return False\n with self.in_pipe.get_lock():\n if self.in_pipe.value == 0:\n set_semaphore(self.knows, True)\n if data.ndim == 3:\n self.in_pipe.value += data.shape[0]\n else:\n self.in_pipe.value += 1\n return True\n\n def get_batch_from_ext(self, block=True):\n return self.molecule_pipeline.get_next_batch(block)\n\n def ext_batch_ready(self):\n return self.molecule_pipeline.batch_ready()\n\n # !!! Call only after you've put the molecules !!!\n def set_finished_reading(self):\n set_semaphore(self.finished_reading, True)\n set_semaphore(self.knows, True)\n self.molecule_pipeline.notify_finished()\n\n def put_batch(self, x):\n if False: #self.share_batches:\n print(\"[P] Sharing memory... \")\n try:\n x.share_memory_()\n except Exception as e:\n print(\"[P] Failed when moving tensor to shared memory\")\n print(e)\n print(\"[P] Done sharing memory\")\n self.batch_queue.put(x)\n \n def time_to_close(self):\n with self._close.get_lock():\n return self._close.value\n\nclass DatasetSignal():\n def __str__(self):\n return \"DatasetSignal\"\n\n\nclass ScanTo(DatasetSignal):\n def __init__(self, index=0):\n self.index=0\n\n def __str__(self):\n return f\"ScanTo({self.index})\"\n \nclass CloseReader(DatasetSignal):\n def __init__(self, aggressive=False):\n self.aggressive = aggressive\n\n\nclass StartReading(DatasetSignal):\n def __init__(self, examples_to_read, make_molecules=True, batch_size=None):\n self.examples_to_read = examples_to_read\n self.make_molecules = make_molecules\n #self.record_in_dict = record_in_dict\n self.batch_size = batch_size\n\n def __str__(self):\n r = f\"StartReading(examples_to_read={self.examples_to_read}, make_molecules={self.make_molecules}\"\n if self.batch_size is not None:\n r += f\", batch_size={self.batch_size}\"\n return r + \")\"\n\n\nclass SetIndices(DatasetSignal):\n def __init__(self, indices):\n \"\"\"\n indices should be sorted!!!\n \"\"\"\n self.indices = indices\n\n def __str__(self):\n return f\"SetIndices({len(self.indices)})\"\n \nclass SetShuffle(DatasetSignal):\n def __init__(self, shuffle=True):\n self.shuffle_incoming = shuffle\n \n def __str__(self):\n return f\"SetShuffle({self.shuffle_incoming})\"\n\nfirst_batch = True \n\nclass DatasetReader(Process):\n def __init__(self, name, pipeline, config, shuffle_incoming=False, new_process=True, requested_jiggles=1):\n if new_process:\n super().__init__(group=None, target=None, name=name)\n self.new_process = new_process\n self.pipeline = pipeline\n self.config = config\n\n self.all_elements = config.all_elements\n self.train_dynamic = config.training.train_dynamic\n self.structure = config.data.structure if not self.train_dynamic \\\n else slice(1,3) if config.data.table == \"dft8k\" \\\n else slice(0,2)\n self.molecule_pipeline = None\n self.use_tensor_constraint = config.training.use_tensor_constraint\n feature_size = len(config.all_elements)\n output_size = 10 if self.use_tensor_constraint else 1\n self.molecule_pipeline_args = (config.training.batch_size, config.max_radius, feature_size,\n output_size, config.data.n_molecule_processors,\n config.data.molecule_queue_cap, config.data.example_queue_cap,\n config.data.batch_queue_cap, config.affine_correction)\n #print(f\"molecule_pipeline_args = {self.molecule_pipeline_args}\")\n #self.molecule_number = 0\n self.index_pos = 0\n self.shuffle_incoming = shuffle_incoming\n\n self.data_source = config.data.source\n if self.data_source == 'hdf5':\n self.hdf5_file_list_index = 0 # which hdf5 file\n self.hdf5_file_index = 0 # which example within the hdf5 file\n self.hdf5_filenames = config.data.hdf5_filenames # hdf5 files to process\n self.read_examples = self.read_examples_from_file\n if config.data.file_format == 0:\n self.read_hdf5 = self.read_hdf5_format_0\n elif config.data.file_format == 1:\n self.read_hdf5 = self.read_hdf5_format_1\n elif self.data_source == 'SQL':\n self.connect_params = config.data.connect_params\n self.SQL_fetch_size = config.data.SQL_fetch_size\n if config.data.SQL_version == 0:\n self.empty_buffer = []\n self.read_examples = self.read_examples_from_SQL_0\n from mysql_df import MysqlDB\n self.database = MysqlDB(self.connect_params)\n else:\n from sql_df import Database\n self.empty_buffer = pd.DataFrame(\n columns=['atomic_numbers',\n 'geometries_and_shieldings',\n 'compound_type', 'weights'],\n dtype=np.float64)\n self.molecule_buffer = self.empty_buffer.copy()\n self.read_examples = self.read_examples_from_SQL_1\n self.database = Database(**self.connect_params, table=config.data.table)\n self.molecule_buffer = self.empty_buffer.copy()\n\n # command loop for reader:\n def run(self):\n if self.molecule_pipeline is None:\n self.molecule_pipeline = MoleculePipeline(*self.molecule_pipeline_args)\n self.pipeline.molecule_pipeline = self.molecule_pipeline\n self.indices = np.array([])\n if len(Molecule.one_hot_table) == 0:\n Molecule.initialize_one_hot_table(self.all_elements)\n \n while True:\n command = self.pipeline.get_command()\n #print(f\"Command: {command}\")\n if isinstance(command, ScanTo):\n # move the reader head to command.index\n if self.data_source == 'hdf5':\n self.hdf5_file_list_index = 0\n self.hdf5_file_index = 0\n # hdf5 can only scan by reading through the files\n if command.index > 0:\n self.read_examples(command.index, False, False)\n self.pipeline.set_finished_reading()\n elif self.data_source == 'SQL':\n self.index_pos = command.index\n self.molecule_buffer = self.empty_buffer.copy()\n #self.molecule_number = 0\n elif isinstance(command, StartReading):\n self.molecule_pipeline.notify_starting(command.batch_size)\n self.read_examples(command.examples_to_read, command.make_molecules)\n self.pipeline.set_finished_reading()\n self.forward_batches()\n elif isinstance(command, SetIndices):\n self.indices = command.indices\n elif isinstance(command, SetShuffle):\n self.shuffle_incoming = command.shuffle_incoming\n elif isinstance(command, CloseReader):\n return\n else:\n raise ValueError(\"unexpected work type\")\n self.pipeline.working.release()\n if not self.new_process:\n break\n\n # iterate through hdf5 filenames, picking up where we left off\n # returns: number of examples processed\n def read_examples_from_file(self, examples_to_read, make_molecules):\n examples_read = 0 # how many examples have been processed this round\n assert self.hdf5_file_list_index < len(self.hdf5_filenames), \\\n \"request to read examples, but files are finished!\"\n while examples_read < examples_to_read:\n hdf5_filename = self.hdf5_filenames[self.hdf5_file_list_index]\n #print(f\"{self.name}: filename={hdf5_filename} file_list_index={self.hdf5_file_list_index} file_index={self.hdf5_file_index}\")\n examples_read += self.read_hdf5(\n hdf5_filename, examples_to_read - examples_read, make_molecules)\n if self.hdf5_file_list_index >= len(self.hdf5_filenames):\n break\n return examples_read\n\n # I've removed the original hdf5 reader, since we don't use that format any more\n # you can find it on the github\n def read_hdf5_format_0(self, filename, examples_to_read, make_molecules):\n raise Exception(\"Old hdf5 format not supported!\")\n\n def read_hdf5_format_1(self, filename, examples_to_read, make_molecules):\n import itertools\n with h5py.File(filename, \"r\") as h5:\n if make_molecules:\n examples_read = 0\n for key, dataset in itertools.islice(h5.items(), self.hdf5_file_index, None):\n molecule = Molecule(int(key), str(dataset.attrs[\"smiles\"]), dataset[..., :3],\n dataset[..., 3], dataset.attrs[\"atomic_numbers\"],\n weights=dataset.attrs[\"weights\"])\n self.pipeline.put_molecule_to_ext(molecule)\n\n # ABORT if we are exiting training\n if self.pipeline.time_to_close():\n return examples_to_read\n\n while self.pipeline.ext_batch_ready():\n self.pipeline.put_batch(\n self.pipeline.get_batch_from_ext())\n\n # update counters\n examples_read += 1\n self.hdf5_file_index += 1\n\n if examples_read == examples_to_read:\n # read enough examples, stopped partway through file\n self.pipeline.set_finished_reading()\n return examples_read\n\n # reached end of file without enough examples\n self.hdf5_file_list_index += 1\n self.hdf5_file_index = 0\n return examples_read\n else:\n file_length = len(h5.keys())\n if self.hdf5_file_index + examples_to_read >= file_length:\n self.hdf5_file_list_index += 1\n self.hdf5_file_index = 0\n return file_length - self.hdf5_file_index\n else:\n self.hdf5_file_index += examples_to_read\n return examples_to_read\n\n def read_examples_from_SQL_0(self, examples_to_read, make_molecules):\n examples_read = 0\n while examples_read < examples_to_read:\n i=0\n for i, (ID, data, weights, smiles) in enumerate(self.molecule_buffer):\n if make_molecules:\n if examples_read == examples_to_read:\n break\n molecule = Molecule(ID, str(smiles), data[:, 1:4], data[:, 4:],\n data[:, 0].astype(np.int32), weights=weights)\n #print(f\"# ID: {molecule.ID}\")\n self.pipeline.put_molecule_to_ext(molecule)\n #if record_in_dict:\n # self.testing_molecules_dict[molecule.ID] = molecule\n examples_read += 1\n \n # ABORT if we are exiting training\n if self.pipeline.time_to_close():\n return examples_to_read\n \n while self.pipeline.ext_batch_ready():\n bad_call = self.pipeline.get_batch_from_ext()\n self.pipeline.put_batch(bad_call)\n self.molecule_buffer = self.molecule_buffer[i:]\n if len(self.molecule_buffer) < self.SQL_fetch_size and self.index_pos < len(self.indices):\n self.molecule_buffer += self.database.read_rows(np.nditer(\n self.indices[self.index_pos : self.index_pos + self.SQL_fetch_size]),\n randomize = self.shuffle_incoming) #, get_tensors=self.use_tensor_constraint)\n self.index_pos += self.SQL_fetch_size\n\n return examples_read\n \n # Molecule(\n # ID, smiles,\n # perturbed_geometries,\n # perturbed_shieldings,\n # atomic_numbers,\n # symmetrical_atoms=None,\n # weights=None\n \n def read_examples_from_SQL_1(self, examples_to_read, make_molecules=True):\n #from clean_database import row_problem\n examples_read = 0\n while examples_read < examples_to_read:\n # iterate through molecule_buffer, which in this case is a pandas dataframe\n i = 0\n for i, (ID, row) in enumerate(self.molecule_buffer.iterrows()):\n if examples_read == examples_to_read:\n break\n #if row_problem(row):\n # continue\n \n #global first_batch\n #if first_batch:\n #print(f\"database.structure = {self.structure}\")\n #print(f\"Molecule {ID}: shape = {tuple(row.geometries_and_shieldings.shape)}\")\n # #print(row.geometries_and_shieldings)\n \n \n # encapsulate these data into a Molecule object and send to the C++ preprocessing pipeline\n geometries_and_shieldings = row.geometries_and_shieldings[self.structure, ...]\n molecule = Molecule(ID, None,\n geometries_and_shieldings[..., :3],\n geometries_and_shieldings[..., 3],\n row.atomic_numbers, weights=row.weights)\n self.pipeline.put_molecule_to_ext(molecule)\n examples_read += 1\n \n #if first_batch:\n # first_batch = False\n # print(\"Processed first batch.\")\n \n # ABORT if we are exiting training\n if self.pipeline.time_to_close():\n return examples_to_read\n \n # this loop does double duty of also grabbing any finished batches from the C++ pipeline\n # and sending them for training:\n self.forward_batches() \n \n \n self.molecule_buffer = self.molecule_buffer.iloc[i:]\n \n # if molecule buffer is running low (and we're not at the end of the train/test set)...\n if len(self.molecule_buffer) < self.SQL_fetch_size and self.index_pos < len(self.indices):\n # fetch from database and concatenate to molecule_buffer\n self.molecule_buffer = pd.concat([self.molecule_buffer,\n self.database.read_rows(\n self.indices[self.index_pos : self.index_pos + self.SQL_fetch_size],\n randomize = self.shuffle_incoming)])\n self.index_pos += self.SQL_fetch_size\n\n return examples_read\n \n def forward_batches(self):\n while self.pipeline.ext_batch_ready():\n batch = self.pipeline.get_batch_from_ext()\n batch = BatchTuple(batch) if self.train_dynamic else batch[0]\n self.pipeline.put_batch(batch)\n \n\n\n# returns a random shuffle of the available indices, for test/train split and random training\ndef generate_index_shuffle(size, connect_params, rng=None, seed=None, status=1, sql_version=1, **kwargs):\n if sql_version == 0:\n from mysql_df import MysqlDB\n db = MysqlDB(connect_params)\n indices = np.array(db.get_finished_idxs(size, ordered=get_from_start, **kwargs), dtype=np.int32)\n elif sql_version == 1:\n from sql_df import Database\n db = Database(**connect_params, status=status, **kwargs)\n indices = np.array(db.fetch_ids(size, status))\n if rng is None:\n rng = np.random.default_rng(seed)\n rng.shuffle(indices)\n\n return indices\n\n# allow ordering within jiggle group??\ndef generate_multi_jiggles_set(n_molecules, n_jiggles, connect_params, randomize=True,\n get_from_start=False, rng=None, seed=None):\n from mysql_df import MysqlDB\n db = MysqlDB(connect_params)\n\n indices = np.array(db.get_columns_with_cond('id', f'mod(id, 1000) < {n_jiggles}', n_molecules * n_jiggles))\n assert len(indices) == n_molecules * n_jiggles, \"Couldn't get requested number of jiggles!\"\n\n if randomize:\n if rng is None:\n rng = np.random.default_rng(seed)\n rng.shuffle(indices)\n else:\n indices.sort()\n return indices\n\n\n\n# method that takes Molecules from a queue (molecule_queue) and places\n# DataNeighbors into another queue (data_neighbors_queue)\ndef process_molecule(pipeline, max_radius, Rs_in, Rs_out):\n while True:\n molecule = pipeline.get_molecule()\n #print(f\"> got molecule {molecule.name}, n_examples={len(molecule.perturbed_geometries)}\")\n assert isinstance(molecule, Molecule), \\\n f\"expected Molecule but got {type(molecule)} instead!\"\n\n features = torch.tensor(molecule.features, dtype=torch.float64)\n weights = torch.tensor(molecule.weights, dtype=torch.float64)\n n_examples = len(molecule.perturbed_geometries)\n for j in range(n_examples):\n g = torch.tensor(\n molecule.perturbed_geometries[j, :, :], dtype=torch.float64)\n s = torch.tensor(\n molecule.perturbed_shieldings[j], dtype=torch.float64).unsqueeze(-1) # [1,N]\n dn = dh.DataNeighbors(x=features, Rs_in=Rs_in, pos=g, r_max=max_radius,\n self_interaction=True, ID=molecule.ID,\n weights=weights, y=s, Rs_out=Rs_out)\n pipeline.put_data_neighbor(dn)\n\n\n# compares two different data neighbors structures (presumably generated in python and c++)\n# confirmed: C++ pipeline produces equivalent results to DataNeighbors\nposition_tolerance = .00001\nshielding_tolerance = .000001\n\n\ndef compare_data_neighbors(dn1, dn2):\n print(\"Comparing pair of Data Neighbors structures...\")\n if dn1.pos.shape[0] != dn2.pos.shape[0]:\n raise ValueError(\n f\"Different numbers of atoms! {dn1.pos.shape[0]} vs {dn2.pos.shape[0]}\")\n n_atoms = dn1.pos.shape[0]\n print(f\"Comparing {n_atoms} atoms...\")\n atom_map = [0] * n_atoms\n atom_taken = [False] * n_atoms\n for i in range(n_atoms):\n for j in range(n_atoms):\n if (not atom_taken[j]) and (torch.norm(dn1.pos[i, :] - dn2.pos[j, :]) <= position_tolerance):\n atom_map[i] = j\n atom_taken[j] = True\n if not torch.equal(dn1.x[i], dn2.x[j]):\n print(f\"1-hots don't match for atom {i}!\")\n raise ValueError()\n if abs(dn1.y[i] - dn2.y[j]) > shielding_tolerance:\n print(\n f\"Shieldings don't match for atom {j}! {dn1.y[i]} vs {dn2.y[j]}\")\n raise ValueError()\n break\n else:\n print(f\"Could not match atom {i}!\")\n raise ValueError()\n print(f\"Matched {n_atoms} atoms. atom_map: \", atom_map)\n\n if dn1.edge_attr.shape[0] != dn2.edge_attr.shape[0]:\n raise ValueError(\n f\"Different numbers of edges! {dn1.edge_attr.shape[0]} vs {dn2.edge_attr.shape[0]}\")\n n_edges = dn1.edge_attr.shape[0]\n print(f\"Comparing {n_edges} edges...\")\n edge_taken = [False] * n_edges\n for a in range(n_edges):\n e1 = torch.tensor([atom_map[dn1.edge_index[0, a]],\n atom_map[dn1.edge_index[1, a]]])\n for b in range(n_edges):\n if edge_taken[b]:\n continue\n e2 = dn2.edge_index[:, b]\n if torch.equal(e1, e2):\n if torch.norm(dn1.edge_attr[a, :] - dn2.edge_attr[b, :]) > position_tolerance:\n print(\n f\"Vectors don't match for edges {a} and {b} : ({dn1.edge_index[0,a]}) -> ({dn1.edge_index[1,a]})\")\n print(f\"{dn1.edge_attr[a,:]} vs { dn2.edge_attr[b,:]}\")\n raise ValueError()\n edge_taken[b] = True\n break\n else:\n print(f\"Could not match edge {a}!\")\n raise ValueError()\n print(f\"Matched {n_edges} edges.\")\n\n print(\"Data Neighbors matched!\")\n\n\ndef test_data_neighbors(example, Rs_in, Rs_out, max_radius, molecule_dict):\n dn1 = example\n molecule = molecule_dict[dn1.ID]\n features = torch.tensor(molecule.features, dtype=torch.float64)\n weights = torch.tensor(molecule.weights, dtype=torch.float64)\n g = torch.tensor(molecule.perturbed_geometries, dtype=torch.float64)\n if g.ndim == 3:\n g = g[0, ...]\n s = torch.tensor(molecule.perturbed_shieldings, dtype=torch.float64)\n if s.ndim == 3:\n print(\"Hello!\")\n s = s[..., 0]\n if s.ndim == 2:\n s = s[0, ...]\n dn2 = dh.DataNeighbors(x=features, Rs_in=Rs_in, pos=g, r_max=max_radius,\n self_interaction=True, ID=molecule.ID, weights=weights, y=s, Rs_out=Rs_out)\n compare_data_neighbors(dn1, dn2)\n", "id": "8931542", "language": "Python", "matching_score": 3.7977139949798584, "max_stars_count": 0, "path": "train/pipeline.py" }, { "content": "import numpy as np\nimport molecule_pipeline_ext\nfrom torch import as_tensor, transpose\nimport sys\n\n#as_tensor = None\n\nclass ExampleBatch():\n def __init__(self, pos, x, y, weights, edge_index, edge_attr, ID=-1, n_examples=1):\n self.pos = pos\n self.x = x\n self.y = y\n self.weights = weights\n self.edge_index = edge_index\n self.edge_attr = edge_attr\n self.ID = ID\n self.n_examples = n_examples\n\n # this is in-place!\n def to(self, device, non_blocking=True):\n self.pos = self.pos.to(device, non_blocking=non_blocking)\n self.x = self.x.to(device, non_blocking=non_blocking)\n self.y = self.y.to(device, non_blocking=non_blocking)\n self.weights = self.weights.to(device, non_blocking=non_blocking)\n self.edge_index = self.edge_index.to(device, non_blocking=non_blocking)\n self.edge_attr = self.edge_attr.to(device, non_blocking=non_blocking)\n return self\n\n # this is in-place!\n def share_memory_(self):\n self.pos = self.pos.share_memory_()\n self.x = self.x.share_memory_()\n self.y = self.y.share_memory_()\n self.weights = self.weights.share_memory_()\n self.edge_index = self.edge_index.share_memory_()\n self.edge_attr = self.edge_attr.share_memory_()\n return self\n\nclass MoleculePipeline():\n def __init__(self, batch_size, max_radius, feature_size, output_size, num_threads = 2,\n molecule_cap = 10000, example_cap = 10000, batch_cap = 100, affine_dict = None):\n if isinstance(feature_size, int):\n self.capsule = molecule_pipeline_ext.newBatchGenerator(batch_size, max_radius, feature_size,\n output_size, num_threads, molecule_cap, example_cap, batch_cap)\n else:\n all_elements = feature_size\n relevant_elements = output_size\n self.capsule = molecule_pipeline_ext.newBatchGeneratorElements(batch_size, max_radius, all_elements,\n relevant_elements, num_threads, molecule_cap, example_cap, batch_cap, affine_dict)\n feature_size = len(feature_size)\n output_size = 1\n\n self.batch_size = batch_size\n self.max_radius = max_radius\n self.feature_size = feature_size\n self.output_size = output_size\n self.num_threads = num_threads\n self.molecule_cap = molecule_cap\n self.example_cap = example_cap\n self.batch_cap = batch_cap\n\n def notify_starting(self, batch_size = None):\n if batch_size is None:\n batch_size = self.batch_size\n molecule_pipeline_ext.notifyStarting(self.capsule, batch_size)\n\n def notify_finished(self):\n molecule_pipeline_ext.notifyFinished(self.capsule)\n \n def put_molecule(self, m, block=True):\n return molecule_pipeline_ext.putMolecule(self.capsule, m.perturbed_geometries, m.features,\n m.perturbed_shieldings, m.weights, m.ID, block)\n\n # send data without first building a Molecule object.\n # Send atomic numbers, and 1-hots will be computed in C++\n def put_molecule_data(self, data, atomic_numbers, weights, ID, block=True):\n return molecule_pipeline_ext.putMoleculeData(self.capsule, data[...,:3], atomic_numbers, data[...,3], weights, ID, block)\n\n def batch_ready(self):\n return molecule_pipeline_ext.batchReady(self.capsule)\n\n def any_batch_coming(self):\n return molecule_pipeline_ext.anyBatchComing(self.capsule)\n\n def molecule_queue_size(self):\n return molecule_pipeline_ext.moleculeQueueSize(self.capsule)\n\n def example_queue_size(self):\n return molecule_pipeline_ext.exampleQueueSize(self.capsule)\n\n def batch_queue_size(self):\n return molecule_pipeline_ext.batchQueueSize(self.capsule)\n\n def num_example(self):\n return molecule_pipeline_ext.numExample(self.capsule)\n\n def num_batch(self):\n return molecule_pipeline_ext.numBatch(self.capsule)\n\n def get_next_batch(self, block = True):\n batch = molecule_pipeline_ext.getNextBatch(self.capsule, block)\n #print(\"Got batch from ext\")\n if batch is None:\n print(\"Batch is None!\")\n return None\n jiggles = np.empty(len(batch), dtype=object)\n for i, example in enumerate(batch):\n pos, x, y, weights, edge_indexT, edge_attr, ID, n_examples = example\n (pos, x, y, weights, edge_index, edge_attr) = (as_tensor(pos), as_tensor(x), as_tensor(y),\n as_tensor(weights), transpose(as_tensor(edge_indexT),0,1), as_tensor(edge_attr))\n jiggles[i] = ExampleBatch(pos, x, y, weights, edge_index, edge_attr, ID, n_examples)\n return jiggles\n\n \n#if __name__ != '__main__':\n# import torch\n# from torch import as_tensor\n# from torch import transpose\n\nif __name__ == '__main__':\n #as_tensor = lambda x: x\n #from numpy import transpose\n import os\n import sys\n def pause():\n if sys.platform.startswith('win'):\n os.system('pause')\n else:\n os.system('read -s -n 1 -p \"Press any key to continue...\"')\n \n print(\"Imported molecule_pipeline\")\n pause()\n print(\"Creating MoleculePipeline(batch_size = 100, max_radius = 5, feature_size = 40, output_size = 8,\"\n \"num_threads = 4, molecule_cap = 1000, example_cap = 1000, batch_cap = 10)...\")\n mp = MoleculePipeline(batch_size = 100, max_radius = 5, feature_size = 40,\n output_size = 8, num_threads = 4, molecule_cap = 1000, example_cap = 1000, batch_cap = 10)\n print(\"Done.\")\n pause()\n print(\"Exiting...\")\n", "id": "234207", "language": "Python", "matching_score": 1.8138258457183838, "max_stars_count": 0, "path": "molecule_pipeline/molecule_pipeline.py" }, { "content": "from enum import Enum\nimport re\nimport math\nimport os\nfrom glob import glob\nimport time\nimport numpy as np\nimport h5py\nimport torch\ntorch.set_default_dtype(torch.float64)\nimport torch_geometric as tg\nimport e3nn\nimport training_config\nfrom molecule_pipeline import ExampleBatch\n\n### Code to Generate Molecules ###\n\n# so we can normalize training data for the nuclei to be predicted\n#relevant_elements = training_config.relevant_elements\n\n# cpu or gpu\n#device = training_config.device\n\n# other parameters\n#training_size = training_config.training_size\n#testing_size = training_config.testing_size\n#batch_size = training_config.batch_size\n\n\n# mean-squared loss\ndef loss_function(predictions, data):\n \"\"\"\n mean-squared loss\n \"\"\"\n residuals = predictions - data.y\n return loss_from_residuals(residuals, data.weights), residuals\n \ndef loss_from_residuals(residuals, weights):\n normalization = weights.sum()\n return (weights.t() @ residuals.square()) / normalization\n \ndef dynamic_loss_from_residuals(res0, res1, weights):\n return loss_from_residuals(res1 - res0, weights)\n\n### Training Code ###\ndef train_batch(data_queue, model, optimizer):\n \"\"\"\n Train a single batch.\n Loss function is square error averaged per (revelant) atom\n Returns RMSE\n \n Args:\n data_queue : a queue of batches preloaded on gpu\n model : the model to train\n optimizer : the model's optimizer\n \"\"\"\n # set model to training mode (for batchnorm)\n model.train()\n\n data = data_queue.pop()\n \n output = model(data.x, data.edge_index, data.edge_attr)\n loss, _ = loss_function(output, data)\n \n # backward pass\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # return RMSE\n return np.sqrt(loss.item())\n\ndef train_dynamic_batch(data_queue, model, optimizer, ratio=1.):\n \"\"\"\n ratio - how many times as important the dynamic loss is than the absolute loss\n \"\"\"\n model.train()\n data0, data1 = data_queue.pop()\n \n w_abs = .5 / (1 + ratio) # half because we have two rounds of absolute loss\n w_dyn = ratio / (1 + ratio)\n weights = data0.weights\n \n # first jiggle\n out0 = model(data0.x, data0.edge_index, data0.edge_attr)\n res0 = out0 - data0.y # residuals for the first jiggle\n loss0 = loss_from_residuals(res0, weights)\n \n # first backward pass\n optimizer.zero_grad()\n (w_abs * loss0).backward()\n optimizer.step()\n \n # free up the computation graph from the first jiggle\n res0 = res0.detach()\n loss0 = loss0.detach()\n del out0\n del data0\n \n # second jiggle\n out1 = model(data1.x, data1.edge_index, data1.edge_attr)\n res1 = out1 - data1.y # residuals for the second jiggle\n loss1 = loss_from_residuals(res1, weights)\n \n # include dynamic loss\n dyn_loss = dynamic_loss_from_residuals(res1, res0, weights)\n loss = w_abs * loss1 + w_dyn * dyn_loss\n \n # second backward pass\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n return np.sqrt((loss0.item() + loss1.item())/2), np.sqrt(dyn_loss.item())\n \n \n \n \n \ndef train_half_dynamic_batch():\n pass\n\n\n# Collect list of examples into batches (slow, so only use for testing dataset)\n# returns a list of batches, where the returned batches each have an extra field: example_list\ndef batch_examples(example_list, batch_size):\n batch_list = []\n for n in range(0,len(example_list),batch_size):\n sub_list = example_list[n:n+batch_size]\n pos = torch.cat([e.pos for e in sub_list])\n x = torch.cat([e.x for e in sub_list])\n y = torch.cat([e.y for e in sub_list])\n weights = torch.cat([e.weights for e in sub_list])\n atom_tally = 0\n sub_list_edges = []\n for e in sub_list:\n sub_list_edges.append(e.edge_index + atom_tally)\n atom_tally += e.pos.shape[0]\n edge_index = torch.cat(sub_list_edges, axis=1)\n edge_attr = torch.cat([e.edge_attr for e in sub_list])\n\n batch = ExampleBatch(pos, x, y, weights, edge_index, edge_attr, n_examples=len(sub_list))\n batch.example_list = sub_list\n batch_list.append(batch)\n return batch_list\n\n#from training_config import Config\n#config = Config()\n#symbol_to_number = config.symbol_to_number\n#number_to_symbol = config.number_to_symbol\n\ndef compare_models(model1, model2, data, tolerance=.01, copy_parameters=True):\n print(\"Comparing 2 models....\")\n if copy_parameters:\n model2.load_state_dict(model1.state_dict())\n model1.eval()\n model2.eval()\n output1 = model1(data.x, data.edge_index, data.edge_attr)\n output2 = model2(data.x, data.edge_index, data.edge_attr)\n #print(torch.abs(output2 - output1) > tolerance)\n print(torch.cat((output1,output2),dim=1))\n print()\n\n\n", "id": "10083776", "language": "Python", "matching_score": 2.694753408432007, "max_stars_count": 0, "path": "train/training_utils.py" }, { "content": "# pylint: disable=not-callable, no-member, invalid-name, line-too-long, wildcard-import, unused-wildcard-import, missing-docstring, arguments-differ\nimport torch\nfrom torch_geometric.data import Batch\nfrom torch_scatter import scatter_add\n\nimport e3nn.point.data_helpers as dh\nfrom e3nn.networks import GatedConvNetwork\nfrom e3nn.o3 import rand_rot\nfrom e3nn.point.message_passing import Convolution\n\n\ndef get_dataset():\n tetris = [[(0, 0, 0), (0, 0, 1), (1, 0, 0), (1, 1, 0)], # chiral_shape_1\n [(0, 0, 0), (0, 0, 1), (1, 0, 0), (1, -1, 0)], # chiral_shape_2\n [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0)], # square\n [(0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3)], # line\n [(0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0)], # corner\n [(0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 1, 0)], # T\n [(0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 1, 1)], # zigzag\n [(0, 0, 0), (1, 0, 0), (1, 1, 0), (2, 1, 0)]] # L\n tetris = torch.tensor(tetris, dtype=torch.get_default_dtype())\n labels = torch.arange(len(tetris))\n\n # apply random rotation\n tetris = torch.einsum('ij,zaj->zai', rand_rot(), tetris)\n\n return tetris, labels\n\n\nclass SumNetwork(torch.nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n self.network = GatedConvNetwork(*args, **kwargs)\n\n def forward(self, *args, batch=None, **kwargs):\n output = self.network(*args, **kwargs)\n return scatter_add(output, batch, dim=0)\n\n\ndef main():\n torch.set_default_dtype(torch.float64)\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n tetris, labels = get_dataset()\n\n x = torch.ones(4, 1)\n Rs_in = [(1, 0)]\n r_max = 1.1\n tetris_dataset = []\n for shape, label in zip(tetris, labels):\n data = dh.DataNeighbors(x, Rs_in, shape, r_max, y=torch.tensor([label]))\n tetris_dataset.append(data)\n\n Rs_hidden = [(16, 0), (16, 1), (16, 2)]\n Rs_out = [(len(tetris), 0)]\n lmax = 3\n\n f = SumNetwork(Rs_in, Rs_hidden, Rs_out, lmax, convolution=Convolution)\n f = f.to(device)\n\n batch = Batch.from_data_list(tetris_dataset)\n batch = batch.to(device)\n\n optimizer = torch.optim.Adam(f.parameters(), lr=3e-3)\n\n for step in range(50):\n N, _ = batch.x.shape\n out = f(batch.x, batch.edge_index, batch.edge_attr, size=N, batch=batch.batch)\n loss = torch.nn.functional.cross_entropy(out, batch.y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n acc = out.cpu().argmax(1).eq(labels).double().mean().item()\n\n out = f(batch.x, batch.edge_index, batch.edge_attr, size=N, batch=batch.batch)\n\n r_tetris, _ = get_dataset()\n\n r_tetris_dataset = []\n for shape, label in zip(r_tetris, labels):\n data = dh.DataNeighbors(x, Rs_in, shape, r_max, y=torch.tensor([label]))\n r_tetris_dataset.append(data)\n\n r_batch = Batch.from_data_list(r_tetris_dataset)\n r_batch = r_batch.to(device)\n\n r_out = f(r_batch.x, r_batch.edge_index, r_batch.edge_attr, size=N, batch=r_batch.batch)\n\n print(\"step={} loss={:.2e} accuracy={:.2f} equivariance error={:.1e}\".format(step, loss.item(), acc, (out - r_out).pow(2).mean().sqrt().item()))\n\n\nif __name__ == '__main__':\n main()\n", "id": "8571017", "language": "Python", "matching_score": 2.3515965938568115, "max_stars_count": 0, "path": "examples/point/tetris_torch_geo.py" }, { "content": "# pylint: disable=arguments-differ, redefined-builtin, missing-docstring, no-member, invalid-name, line-too-long, not-callable\nimport torch\nimport torch_geometric as tg\n\n\nclass Convolution(tg.nn.MessagePassing):\n def __init__(self, kernel):\n super(Convolution, self).__init__(aggr='add', flow='target_to_source')\n self.kernel = kernel\n\n def forward(self, features, edge_index, edge_r, size=None, n_norm=1, groups=1):\n \"\"\"\n :param features: Tensor of shape [n_target, dim(Rs_in)]\n :param edge_index: LongTensor of shape [2, num_messages]\n edge_index[0] = sources (convolution centers)\n edge_index[1] = targets (neighbors)\n :param edge_r: Tensor of shape [num_messages, 3]\n edge_r = position_target - position_source\n :param size: (n_source, n_target) or None\n :param n_norm: typical number of targets per source\n\n :return: Tensor of shape [n_source, dim(Rs_out)]\n \"\"\"\n k = self.kernel(edge_r)\n k.div_(n_norm ** 0.5)\n return self.propagate(edge_index, size=size, x=features, k=k, groups=groups)\n\n def message(self, x_j, k, groups):\n N = x_j.shape[0]\n cout, cin = k.shape[-2:]\n x_j = x_j.view(N, groups, cin) # Rs_tp1\n if k.shape[0] == 0: # https://github.com/pytorch/pytorch/issues/37628\n return torch.zeros(0, groups * cout)\n if k.dim() == 4 and k.shape[1] == groups: # kernel has group dimension\n return torch.einsum('egij,egj->egi', k, x_j).reshape(N, groups * cout)\n return torch.einsum('eij,egj->egi', k, x_j).reshape(N, groups * cout)\n", "id": "11525809", "language": "Python", "matching_score": 1.8901444673538208, "max_stars_count": 0, "path": "e3nn/point/message_passing.py" }, { "content": "import torch\nfrom e3nn.tensor.irrep_tensor import IrrepTensor\nfrom e3nn import rs, o3\n\n\nclass CartesianTensor():\n def __init__(self, tensor, formula=None):\n if tuple(tensor.shape) != tuple(3 for i in range(tensor.dim())):\n raise ValueError(f\"all dimensions of tensor should have shape 3 but tensor has shape {tensor.shape}\")\n if formula is None:\n formula = \"abcdefghijklmnopqrstuvxyz\"[:tensor.dim()]\n self.formula = formula\n self.tensor = tensor\n\n def to_irrep_transformation(self):\n dim = self.tensor.dim()\n change = o3.kron(*[o3.xyz_to_irreducible_basis()] * dim)\n Rs = [(1, 1)] # vectors\n old_indices = self.formula.split(\"=\")[0]\n Rs_out, Q = rs.reduce_tensor(self.formula, **{i: Rs for i in old_indices})\n return Rs_out, torch.einsum('ab,bc->ac', Q, change.reshape(3 ** dim, 3 ** dim))\n\n def to_irrep_tensor(self):\n Rs_out, Q = self.to_irrep_transformation()\n tensor = torch.einsum('ab,b->a', Q, self.tensor.reshape(-1))\n return IrrepTensor(tensor, Rs_out)\n", "id": "9233103", "language": "Python", "matching_score": 0.5122633576393127, "max_stars_count": 0, "path": "e3nn/tensor/cartesian_tensor.py" }, { "content": "# pylint: disable=arguments-differ, redefined-builtin, missing-docstring, no-member, invalid-name, line-too-long, not-callable\nimport torch\nimport torch_geometric as tg\nfrom ase import Atoms, neighborlist\nfrom pymatgen.core.structure import Structure\n\n\ndef neighbor_list_and_relative_vec(pos, r_max, self_interaction=True):\n \"\"\"\n Create neighbor list (edge_index) and relative vectors (edge_attr)\n based on radial cutoff.\n\n :param pos: torch.tensor of coordinates with shape (N, 3)\n :param r_max: float of radial cutoff\n :param self_interaction: whether or not to include self edge\n\n :return: list of edges [(2, num_edges)], Tensor of relative vectors [num_edges, 3]\n\n edges are given by the convention\n edge_list[0] = source (convolution center)\n edge_list[1] = target (neighbor)\n\n Thus, the edge_list has the same convention vector notation for relative vectors\n \\vec{r}_{source, target}\n \"\"\"\n N, _ = pos.shape\n atoms = Atoms(symbols=['H'] * N, positions=pos)\n nl = neighborlist.NeighborList(\n [r_max / 2.] * N, # NeighborList looks for intersecting spheres\n self_interaction=self_interaction,\n bothways=True,\n skin=0.0,\n )\n nl.update(atoms)\n\n nei_list = []\n geo_list = []\n\n for i, p in enumerate(pos):\n indices = nl.get_neighbors(i)[0]\n if self_interaction:\n indices = indices[:-1] # Remove extra self edge\n cart = pos[indices]\n indices = torch.LongTensor([[i, target] for target in indices])\n dist = cart - p\n nei_list.append(indices)\n geo_list.append(dist)\n return torch.cat(nei_list, dim=0).transpose(1, 0), torch.cat(geo_list, dim=0)\n\n\ndef neighbor_list_and_relative_vec_lattice(pos, lattice, r_max, self_interaction=True, r_min=1e-8):\n \"\"\"\n Create neighbor list (edge_index) and relative vectors (edge_attr)\n based on radial cutoff and periodic lattice.\n\n :param pos: torch.tensor of coordinates with shape (N, 3)\n :param r_max: float of radial cutoff\n :param self_interaction: whether or not to include self edge\n\n :return: list of edges [(2, num_edges)], Tensor of relative vectors [num_edges, 3]\n\n edges are given by the convention\n edge_list[0] = source (convolution center)\n edge_list[1] = target (neighbor index)\n\n Thus, the edge_list has the same convention vector notation for relative vectors\n \\vec{r}_{source, target}\n\n Relative vectors are given for the different images of the neighbor atom within r_max.\n \"\"\"\n N, _ = pos.shape\n structure = Structure(lattice, ['H'] * N, pos, coords_are_cartesian=True)\n\n nei_list = []\n geo_list = []\n\n neighbors = structure.get_all_neighbors(\n r_max,\n include_index=True,\n include_image=True,\n numerical_tol=r_min\n )\n for i, (site, neis) in enumerate(zip(structure, neighbors)):\n indices, cart = zip(*[(n.index, n.coords) for n in neis])\n cart = torch.tensor(cart)\n indices = torch.LongTensor([[i, target] for target in indices])\n dist = cart - torch.tensor(site.coords)\n if self_interaction:\n self_index = torch.LongTensor([[i, i]])\n indices = torch.cat([self_index, indices], dim=0)\n self_dist = torch.zeros(1, 3, dtype=dist.dtype)\n dist = torch.cat([self_dist, dist], dim=0)\n nei_list.append(indices)\n geo_list.append(dist)\n return torch.cat(nei_list, dim=0).transpose(1, 0), torch.cat(geo_list, dim=0)\n\n\nclass DataNeighbors(tg.data.Data):\n def __init__(self, x, Rs_in, pos, r_max, self_interaction=True, **kwargs):\n edge_index, edge_attr = neighbor_list_and_relative_vec(\n pos, r_max, self_interaction)\n super(DataNeighbors, self).__init__(\n x=x, edge_index=edge_index, edge_attr=edge_attr, pos=pos, Rs_in=Rs_in, **kwargs)\n\n\nclass DataPeriodicNeighbors(tg.data.Data):\n def __init__(self, x, Rs_in, pos, lattice, r_max, self_interaction=True, **kwargs):\n edge_index, edge_attr = neighbor_list_and_relative_vec_lattice(\n pos, lattice, r_max, self_interaction)\n super(DataPeriodicNeighbors, self).__init__(\n x=x, edge_index=edge_index, edge_attr=edge_attr, pos=pos, lattice=lattice, Rs_in=Rs_in, **kwargs)\n", "id": "10274947", "language": "Python", "matching_score": 3.8907876014709473, "max_stars_count": 0, "path": "e3nn/point/data_helpers.py" }, { "content": "# pylint: disable=not-callable, no-member, invalid-name, line-too-long, wildcard-import, unused-wildcard-import, missing-docstring\nimport torch\nimport e3nn.point.data_helpers as dh\nfrom e3nn import rs\n\n\ndef test_data_helpers():\n N = 7\n lattice = torch.randn(3, 3)\n pos = torch.randn(N, 3)\n Rs_in = [(3, 0), (1, 1)]\n x = torch.randn(N, rs.dim(Rs_in))\n r_max = 1\n dh.neighbor_list_and_relative_vec_lattice(pos, lattice, r_max)\n dh.DataPeriodicNeighbors(x, Rs_in, pos, lattice, r_max)\n dh.neighbor_list_and_relative_vec(pos, r_max)\n dh.DataNeighbors(x, Rs_in, pos, r_max)\n\n\ndef test_silicon_neighbors():\n lattice = torch.tensor([\n [3.34939851, 0. , 1.93377613],\n [1.11646617, 3.1578432 , 1.93377613],\n [0. , 0. , 3.86755226]\n ])\n coords = torch.tensor([\n [0. , 0. , 0. ],\n [1.11646617, 0.7894608 , 1.93377613]\n ])\n r_max = 2.5\n edge_index, edge_attr = dh.neighbor_list_and_relative_vec_lattice(coords, lattice, r_max=r_max)\n edge_index_true = torch.LongTensor([\n [0, 0, 0, 0, 0, 1, 1, 1, 1, 1],\n [0, 1, 1, 1, 1, 1, 0, 0, 0, 0]\n ])\n torch.allclose(edge_index, edge_index_true)\n", "id": "4518152", "language": "Python", "matching_score": 0.32368671894073486, "max_stars_count": 0, "path": "tests/point/data_helpers_test.py" }, { "content": "# pylint: disable=no-member, arguments-differ, redefined-builtin, missing-docstring, line-too-long, invalid-name\nimport torch\n\nfrom e3nn import o3\nfrom e3nn import rs\nfrom e3nn.networks import (\n GatedConvParityNetwork,\n GatedConvNetwork,\n ImageS2Network,\n S2ConvNetwork,\n S2ParityNetwork,\n)\n\n\ndef test_parity_network():\n torch.set_default_dtype(torch.float64)\n\n lmax = 3\n Rs = [(1, l, 1) for l in range(lmax + 1)]\n model = GatedConvParityNetwork(Rs, 4, Rs, lmax, feature_product=True)\n\n features = rs.randn(1, 4, Rs)\n geometry = torch.randn(1, 4, 3)\n\n output = model(features, geometry)\n\n angles = o3.rand_angles()\n D = rs.rep(Rs, *angles, 1)\n R = -o3.rot(*angles)\n ein = torch.einsum\n output2 = ein('ij,zaj->zai', D.T, model(ein('ij,zaj->zai', D, features), ein('ij,zaj->zai', R, geometry)))\n\n assert (output - output2).abs().max() < 1e-10 * output.abs().max()\n\n\ndef test_network():\n torch.set_default_dtype(torch.float64)\n\n lmax = 3\n Rs = [(1, l) for l in range(lmax + 1)]\n model = GatedConvNetwork(Rs, 4 * Rs, Rs, lmax, feature_product=True)\n\n features = rs.randn(1, 4, Rs)\n geometry = torch.randn(1, 4, 3)\n\n output = model(features, geometry)\n\n angles = o3.rand_angles()\n D = rs.rep(Rs, *angles)\n R = o3.rot(*angles)\n ein = torch.einsum\n output2 = ein('ij,zaj->zai', D.T, model(ein('ij,zaj->zai', D, features), ein('ij,zaj->zai', R, geometry)))\n\n assert (output - output2).abs().max() < 1e-10 * output.abs().max()\n\n\ndef test_image_network():\n torch.set_default_dtype(torch.float64)\n\n Rs = [0, 0, 3]\n\n model = ImageS2Network(\n Rs_in=Rs,\n mul=4,\n lmax=6,\n Rs_out=Rs,\n size=5,\n layers=3\n )\n\n image = rs.randn(1, 16, 16, 16, Rs)\n model(image)\n\n\ndef test_s2conv_network():\n torch.set_default_dtype(torch.float64)\n\n lmax = 3\n Rs = [(1, l, 1) for l in range(lmax + 1)]\n model = S2ConvNetwork(Rs, 4, Rs, lmax)\n\n features = rs.randn(1, 4, Rs)\n geometry = torch.randn(1, 4, 3)\n\n output = model(features, geometry)\n\n angles = o3.rand_angles()\n D = rs.rep(Rs, *angles, 1)\n R = -o3.rot(*angles)\n ein = torch.einsum\n output2 = ein('ij,zaj->zai', D.T, model(ein('ij,zaj->zai', D, features), ein('ij,zaj->zai', R, geometry)))\n\n assert (output - output2).abs().max() < 1e-10 * output.abs().max()\n\n\ndef test_equivariance_s2parity_network():\n torch.set_default_dtype(torch.float64)\n mul = 3\n Rs_in = [(mul, l, -1) for l in range(3 + 1)]\n Rs_out = [(mul, l, 1) for l in range(3 + 1)]\n\n net = S2ParityNetwork(Rs_in, mul, lmax=3, Rs_out=Rs_out)\n\n abc = o3.rand_angles()\n D_in = rs.rep(Rs_in, *abc, 1)\n D_out = rs.rep(Rs_out, *abc, 1)\n\n fea = rs.randn(10, Rs_in)\n\n x1 = torch.einsum(\"ij,zj->zi\", D_out, net(fea))\n x2 = net(torch.einsum(\"ij,zj->zi\", D_in, fea))\n assert (x1 - x2).norm() < 1e-3 * x1.norm()\n", "id": "8683797", "language": "Python", "matching_score": 3.9793474674224854, "max_stars_count": 0, "path": "tests/networks_test.py" }, { "content": "# pylint: disable=invalid-name, missing-docstring, no-member, line-too-long\nimport unittest\nfrom functools import partial\n\nimport torch\n\nfrom e3nn import o3, rs, rsh\nfrom e3nn.kernel import Kernel as Kernel1\nfrom e3nn.kernel_mod import Kernel as KernelMod\nfrom e3nn.linear import Linear as Linear1\nfrom e3nn.linear_mod import Linear as LinearMod\nfrom e3nn.networks import GatedConvNetwork, S2Network\nfrom e3nn.non_linearities.gated_block import GatedBlock\nfrom e3nn.non_linearities.gated_block_parity import GatedBlockParity\nfrom e3nn.non_linearities.rescaled_act import absolute, relu, sigmoid, tanh\nfrom e3nn.point.operations import Convolution\nfrom e3nn.radial import ConstantRadialModel\nfrom e3nn.util.default_dtype import torch_default_dtype\n\n\nclass Tests(unittest.TestCase):\n def test_irr_repr_wigner_3j(self):\n \"\"\"Test irr_repr and wigner_3j equivariance.\"\"\"\n with torch_default_dtype(torch.float64):\n l_in = 3\n l_out = 2\n\n for l_f in range(abs(l_in - l_out), l_in + l_out + 1):\n r = torch.randn(100, 3)\n Q = o3.wigner_3j(l_out, l_in, l_f)\n\n abc = torch.randn(3)\n D_in = o3.irr_repr(l_in, *abc)\n D_out = o3.irr_repr(l_out, *abc)\n\n Y = rsh.spherical_harmonics_xyz([l_f], r @ o3.rot(*abc).t())\n W = torch.einsum(\"ijk,zk->zij\", (Q, Y))\n W1 = torch.einsum(\"zij,jk->zik\", (W, D_in))\n\n Y = rsh.spherical_harmonics_xyz([l_f], r)\n W = torch.einsum(\"ijk,zk->zij\", (Q, Y))\n W2 = torch.einsum(\"ij,zjk->zik\", (D_out, W))\n\n self.assertLess((W1 - W2).norm(), 1e-5 * W.norm(), l_f)\n\n def rotation_kernel(self, K):\n \"\"\"Test rotation equivariance on Kernel.\"\"\"\n with torch_default_dtype(torch.float64):\n Rs_in = [(2, 0), (0, 1), (2, 2)]\n Rs_out = [(2, 0), (2, 1), (2, 2)]\n\n k = K(Rs_in, Rs_out, ConstantRadialModel)\n r = torch.randn(3)\n\n abc = torch.randn(3)\n D_in = rs.rep(Rs_in, *abc)\n D_out = rs.rep(Rs_out, *abc)\n\n W1 = D_out @ k(r) # [i, j]\n W2 = k(o3.rot(*abc) @ r) @ D_in # [i, j]\n self.assertLess((W1 - W2).norm(), 10e-5 * W1.norm())\n\n def test_rotation_kernel(self):\n self.rotation_kernel(Kernel1)\n\n def test_rotation_kernel_mod(self):\n self.rotation_kernel(KernelMod)\n\n def rotation_gated_block(self, K):\n \"\"\"Test rotation equivariance on GatedBlock and dependencies.\"\"\"\n with torch_default_dtype(torch.float64):\n Rs_in = [(2, 0), (0, 1), (2, 2)]\n Rs_out = [(2, 0), (2, 1), (2, 2)]\n\n K = partial(K, RadialModel=ConstantRadialModel)\n\n act = GatedBlock(Rs_out, scalar_activation=sigmoid, gate_activation=sigmoid)\n conv = Convolution(K(Rs_in, act.Rs_in))\n\n abc = torch.randn(3)\n rot_geo = o3.rot(*abc)\n D_in = rs.rep(Rs_in, *abc)\n D_out = rs.rep(Rs_out, *abc)\n\n fea = torch.randn(1, 4, rs.dim(Rs_in))\n geo = torch.randn(1, 4, 3)\n\n x1 = torch.einsum(\"ij,zaj->zai\", (D_out, act(conv(fea, geo))))\n x2 = act(conv(torch.einsum(\"ij,zaj->zai\", (D_in, fea)), torch.einsum(\"ij,zaj->zai\", rot_geo, geo)))\n self.assertLess((x1 - x2).norm(), 10e-5 * x1.norm())\n\n def test_rotation_gated_block(self):\n self.rotation_gated_block(Kernel1)\n\n def test_rotation_gated_block_mod(self):\n self.rotation_gated_block(KernelMod)\n\n def parity_kernel(self, K):\n \"\"\"Test parity equivariance on Kernel.\"\"\"\n with torch_default_dtype(torch.float64):\n Rs_in = [(2, 0, 1), (2, 1, 1), (2, 2, -1)]\n Rs_out = [(2, 0, -1), (2, 1, 1), (2, 2, 1)]\n\n k = K(Rs_in, Rs_out, ConstantRadialModel)\n r = torch.randn(3)\n\n D_in = rs.rep(Rs_in, 0, 0, 0, 1)\n D_out = rs.rep(Rs_out, 0, 0, 0, 1)\n\n W1 = D_out @ k(r) # [i, j]\n W2 = k(-r) @ D_in # [i, j]\n self.assertLess((W1 - W2).norm(), 10e-5 * W1.norm())\n\n def test_parity_kernel(self):\n self.parity_kernel(Kernel1)\n\n def test_parity_kernel_mod(self):\n self.parity_kernel(KernelMod)\n\n def parity_gated_block_parity(self, K):\n \"\"\"Test parity equivariance on GatedBlockParity and dependencies.\"\"\"\n with torch_default_dtype(torch.float64):\n mul = 2\n Rs_in = [(mul, l, p) for l in range(3 + 1) for p in [-1, 1]]\n\n K = partial(K, RadialModel=ConstantRadialModel)\n\n scalars = [(mul, 0, +1), (mul, 0, -1)], [(mul, relu), (mul, absolute)]\n rs_nonscalars = [(mul, 1, +1), (mul, 1, -1), (mul, 2, +1), (mul, 2, -1), (mul, 3, +1), (mul, 3, -1)]\n n = 3 * mul\n gates = [(n, 0, +1), (n, 0, -1)], [(n, sigmoid), (n, tanh)]\n\n act = GatedBlockParity(*scalars, *gates, rs_nonscalars)\n conv = Convolution(K(Rs_in, act.Rs_in))\n\n D_in = rs.rep(Rs_in, 0, 0, 0, 1)\n D_out = rs.rep(act.Rs_out, 0, 0, 0, 1)\n\n fea = rs.randn(1, 3, Rs_in)\n geo = torch.randn(1, 3, 3)\n\n x1 = torch.einsum(\"ij,zaj->zai\", (D_out, act(conv(fea, geo))))\n x2 = act(conv(torch.einsum(\"ij,zaj->zai\", (D_in, fea)), -geo))\n self.assertLess((x1 - x2).norm(), 10e-5 * x1.norm())\n\n def test_parity_gated_block_parity(self):\n self.parity_gated_block_parity(Kernel1)\n\n def test_parity_gated_block_parity_mod(self):\n self.parity_gated_block_parity(KernelMod)\n\n def parity_rotation_gated_block_parity(self, K):\n \"\"\"Test parity and rotation equivariance on GatedBlockParity and dependencies.\"\"\"\n with torch_default_dtype(torch.float64):\n mul = 2\n Rs_in = [(mul, l, p) for l in range(3 + 1) for p in [-1, 1]]\n\n K = partial(K, RadialModel=ConstantRadialModel)\n\n scalars = [(mul, 0, +1), (mul, 0, -1)], [(mul, relu), (mul, absolute)]\n rs_nonscalars = [(mul, 1, +1), (mul, 1, -1), (mul, 2, +1), (mul, 2, -1), (mul, 3, +1), (mul, 3, -1)]\n n = 3 * mul\n gates = [(n, 0, +1), (n, 0, -1)], [(n, sigmoid), (n, tanh)]\n\n act = GatedBlockParity(*scalars, *gates, rs_nonscalars)\n conv = Convolution(K(Rs_in, act.Rs_in))\n\n abc = torch.randn(3)\n rot_geo = -o3.rot(*abc)\n D_in = rs.rep(Rs_in, *abc, 1)\n D_out = rs.rep(act.Rs_out, *abc, 1)\n\n fea = torch.randn(1, 4, rs.dim(Rs_in))\n geo = torch.randn(1, 4, 3)\n\n x1 = torch.einsum(\"ij,zaj->zai\", (D_out, act(conv(fea, geo))))\n x2 = act(conv(torch.einsum(\"ij,zaj->zai\", (D_in, fea)), torch.einsum(\"ij,zaj->zai\", rot_geo, geo)))\n self.assertLess((x1 - x2).norm(), 10e-5 * x1.norm())\n\n def test_parity_rotation_gated_block_parity(self):\n self.parity_rotation_gated_block_parity(Kernel1)\n\n def test_parity_rotation_gated_block_parity_mod(self):\n self.parity_rotation_gated_block_parity(KernelMod)\n\n def parity_rotation_linear(self, L):\n \"\"\"Test parity and rotation equivariance on Linear.\"\"\"\n with torch_default_dtype(torch.float64):\n mul = 2\n Rs_in = [(mul, l, p) for l in range(3 + 1) for p in [-1, 1]]\n Rs_out = [(mul, l, p) for l in range(3 + 1) for p in [-1, 1]]\n\n lin = L(Rs_in, Rs_out)\n\n abc = torch.randn(3)\n D_in = rs.rep(lin.Rs_in, *abc, 1)\n D_out = rs.rep(lin.Rs_out, *abc, 1)\n\n fea = torch.randn(rs.dim(Rs_in))\n\n x1 = torch.einsum(\"ij,j->i\", D_out, lin(fea))\n x2 = lin(torch.einsum(\"ij,j->i\", D_in, fea))\n self.assertLess((x1 - x2).norm(), 10e-5 * x1.norm())\n\n def test_parity_rotation_linear(self):\n self.parity_rotation_linear(Linear1)\n\n def test_parity_rotation_linear_mod(self):\n self.parity_rotation_linear(LinearMod)\n\n def test_equivariance_gatedconvnetwork(self):\n with torch_default_dtype(torch.float64):\n mul = 3\n Rs_in = [(mul, l) for l in range(3 + 1)]\n Rs_out = [(mul, l) for l in range(3 + 1)]\n\n net = GatedConvNetwork(Rs_in, [(10, 0), (1, 1), (1, 2), (1, 3)], Rs_out, lmax=3, feature_product=True)\n\n abc = torch.randn(3)\n rot_geo = o3.rot(*abc)\n D_in = rs.rep(Rs_in, *abc)\n D_out = rs.rep(Rs_out, *abc)\n\n fea = torch.randn(1, 10, rs.dim(Rs_in))\n geo = torch.randn(1, 10, 3)\n\n x1 = torch.einsum(\"ij,zaj->zai\", D_out, net(fea, geo))\n x2 = net(torch.einsum(\"ij,zaj->zai\", D_in, fea), torch.einsum(\"ij,zaj->zai\", rot_geo, geo))\n self.assertLess((x1 - x2).norm(), 10e-5 * x1.norm())\n\n def test_equivariance_s2network(self):\n with torch_default_dtype(torch.float64):\n mul = 3\n Rs_in = [(mul, l) for l in range(3 + 1)]\n Rs_out = [(mul, l) for l in range(3 + 1)]\n\n net = S2Network(Rs_in, mul, lmax=4, Rs_out=Rs_out)\n\n abc = o3.rand_angles()\n D_in = rs.rep(Rs_in, *abc)\n D_out = rs.rep(Rs_out, *abc)\n\n fea = torch.randn(10, rs.dim(Rs_in))\n\n x1 = torch.einsum(\"ij,zj->zi\", D_out, net(fea))\n x2 = net(torch.einsum(\"ij,zj->zi\", D_in, fea))\n self.assertLess((x1 - x2).norm(), 1e-3 * x1.norm())\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "6176519", "language": "Python", "matching_score": 2.13480806350708, "max_stars_count": 0, "path": "tests/equivariance_test.py" }, { "content": "# pylint: disable=no-member, arguments-differ, redefined-builtin, missing-docstring, line-too-long, invalid-name\nfrom functools import partial\n\nimport torch\n\nfrom e3nn import o3, rs\nfrom e3nn.kernel import Kernel\nfrom e3nn.non_linearities import GatedBlock, GatedBlockParity\nfrom e3nn.non_linearities.rescaled_act import sigmoid, swish, tanh\nfrom e3nn.non_linearities.s2 import S2Activation\nfrom e3nn.point.operations import Convolution\nfrom e3nn.image.convolution import Convolution as ImageConvolution\nfrom e3nn.image.filter import LowPassFilter\nfrom e3nn.radial import GaussianRadialModel\nfrom e3nn.tensor_product import LearnableTensorSquare\n\n\nclass GatedConvNetwork(torch.nn.Module):\n def __init__(self, Rs_in, Rs_hidden, Rs_out, lmax, layers=3,\n max_radius=1.0, number_of_basis=3, radial_layers=3,\n feature_product=False, kernel=Kernel, convolution=Convolution,\n min_radius=0.0):\n super().__init__()\n\n representations = [Rs_in]\n representations += [Rs_hidden] * layers\n representations += [Rs_out]\n\n RadialModel = partial(GaussianRadialModel, max_radius=max_radius,\n min_radius=min_radius,\n number_of_basis=number_of_basis, h=100,\n L=radial_layers, act=swish)\n\n K = partial(kernel, RadialModel=RadialModel, selection_rule=partial(o3.selection_rule_in_out_sh, lmax=lmax))\n\n def make_layer(Rs_in, Rs_out):\n if feature_product:\n tr1 = rs.TransposeToMulL(Rs_in)\n lts = LearnableTensorSquare(tr1.Rs_out, list(range(lmax + 1)), allow_change_output=True)\n tr2 = torch.nn.Flatten(2)\n Rs = tr1.mul * lts.Rs_out\n act = GatedBlock(Rs_out, swish, sigmoid)\n conv = convolution(K(Rs, act.Rs_in))\n return torch.nn.ModuleList([torch.nn.Sequential(tr1, lts, tr2), conv, act])\n else:\n act = GatedBlock(Rs_out, swish, sigmoid)\n conv = convolution(K(Rs_in, act.Rs_in))\n return torch.nn.ModuleList([conv, act])\n\n self.layers = torch.nn.ModuleList([\n make_layer(Rs_layer_in, Rs_layer_out)\n for Rs_layer_in, Rs_layer_out in zip(representations[:-2], representations[1:-1])\n ])\n\n self.layers.append(convolution(K(representations[-2], representations[-1])))\n self.feature_product = feature_product\n\n def forward(self, input, *args, **kwargs):\n output = input\n N = args[0].shape[-2]\n if 'n_norm' not in kwargs:\n kwargs['n_norm'] = N\n\n if self.feature_product:\n for ts, conv, act in self.layers[:-1]:\n output = ts(output)\n output = conv(output, *args, **kwargs)\n output = act(output)\n else:\n for conv, act in self.layers[:-1]:\n output = conv(output, *args, **kwargs)\n output = act(output)\n\n layer = self.layers[-1]\n output = layer(output, *args, **kwargs)\n return output\n\n\nclass GatedConvParityNetwork(torch.nn.Module):\n def __init__(self, Rs_in, mul, Rs_out, lmax, layers=3,\n max_radius=1.0, number_of_basis=3, radial_layers=3,\n feature_product=False, kernel=Kernel, convolution=Convolution,\n min_radius=0.0):\n super().__init__()\n\n R = partial(GaussianRadialModel, max_radius=max_radius,\n number_of_basis=number_of_basis, h=100,\n L=radial_layers, act=swish, min_radius=min_radius)\n K = partial(kernel, RadialModel=R, selection_rule=partial(o3.selection_rule_in_out_sh, lmax=lmax))\n\n modules = []\n\n Rs = Rs_in\n for _ in range(layers):\n scalars = [(mul, l, p) for mul, l, p in [(mul, 0, +1), (mul, 0, -1)] if rs.haslinearpath(Rs, l, p)]\n act_scalars = [(mul, swish if p == 1 else tanh) for mul, l, p in scalars]\n\n nonscalars = [(mul, l, p) for l in range(1, lmax + 1) for p in [+1, -1] if rs.haslinearpath(Rs, l, p)]\n gates = [(rs.mul_dim(nonscalars), 0, +1)]\n act_gates = [(-1, sigmoid)]\n\n act = GatedBlockParity(scalars, act_scalars, gates, act_gates, nonscalars)\n conv = convolution(K(Rs, act.Rs_in))\n\n if feature_product:\n tr1 = rs.TransposeToMulL(act.Rs_out)\n lts = LearnableTensorSquare(tr1.Rs_out, [(1, l, p) for l in range(lmax + 1) for p in [-1, 1]], allow_change_output=True)\n tr2 = torch.nn.Flatten(2)\n act = torch.nn.Sequential(act, tr1, lts, tr2)\n Rs = tr1.mul * lts.Rs_out\n else:\n Rs = act.Rs_out\n\n block = torch.nn.ModuleList([conv, act])\n modules.append(block)\n\n self.layers = torch.nn.ModuleList(modules)\n\n K = partial(K, allow_unused_inputs=True)\n self.layers.append(convolution(K(Rs, Rs_out)))\n self.feature_product = feature_product\n\n def forward(self, input, *args, **kwargs):\n output = input\n N = args[0].shape[-2]\n if 'n_norm' not in kwargs:\n kwargs['n_norm'] = N\n\n for conv, act in self.layers[:-1]:\n output = conv(output, *args, **kwargs)\n output = act(output)\n\n layer = self.layers[-1]\n output = layer(output, *args, **kwargs)\n return output\n\n\nclass S2ConvNetwork(torch.nn.Module):\n def __init__(self, Rs_in, mul, Rs_out, lmax, layers=3,\n max_radius=1.0, number_of_basis=3, radial_layers=3,\n kernel=Kernel, convolution=Convolution):\n super().__init__()\n\n Rs_hidden = [(1, l, (-1)**l) for i in range(mul) for l in range(lmax + 1)]\n representations = [Rs_in]\n representations += [Rs_hidden] * layers\n representations += [Rs_out]\n\n RadialModel = partial(GaussianRadialModel, max_radius=max_radius,\n number_of_basis=number_of_basis, h=100,\n L=radial_layers, act=swish)\n\n K = partial(kernel, RadialModel=RadialModel, selection_rule=partial(o3.selection_rule_in_out_sh, lmax=lmax))\n\n def make_layer(Rs_in, Rs_out):\n act = S2Activation([(1, l, (-1)**l) for l in range(lmax + 1)], sigmoid, lmax_out=lmax, res=20 * (lmax + 1))\n conv = convolution(K(Rs_in, Rs_out))\n return torch.nn.ModuleList([conv, act])\n\n self.layers = torch.nn.ModuleList([\n make_layer(Rs_layer_in, Rs_layer_out)\n for Rs_layer_in, Rs_layer_out in zip(representations[:-2], representations[1:-1])\n ])\n\n self.layers.append(convolution(K(representations[-2], representations[-1])))\n self.mul = mul\n self.lmax = lmax\n\n def forward(self, input, *args, **kwargs):\n output = input\n N = args[0].shape[-2]\n if 'n_norm' not in kwargs:\n kwargs['n_norm'] = N\n\n for conv, act in self.layers[:-1]:\n output = conv(output, *args, **kwargs)\n shape = list(output.shape)\n # Split multiplicities into new batch\n output = output.reshape(shape[:-1] + [self.mul, (self.lmax + 1) ** 2])\n output = act(output)\n output = output.reshape(shape)\n\n layer = self.layers[-1]\n output = layer(output, *args, **kwargs)\n return output\n\n\nclass S2Network(torch.nn.Module):\n def __init__(self, Rs_in, mul, lmax, Rs_out, layers=3):\n super().__init__()\n\n Rs = self.Rs_in = rs.simplify(Rs_in)\n self.Rs_out = rs.simplify(Rs_out)\n self.act = S2Activation(list(range(lmax + 1)), swish, res=20 * (lmax + 1))\n\n self.layers = []\n\n for _ in range(layers):\n lin = LearnableTensorSquare(Rs, mul * self.act.Rs_in, linear=True, allow_zero_outputs=True)\n\n # s2 nonlinearity\n Rs = mul * self.act.Rs_out\n\n self.layers += [lin]\n\n self.layers = torch.nn.ModuleList(self.layers)\n\n self.tail = LearnableTensorSquare(Rs, self.Rs_out)\n\n def forward(self, x):\n for lin in self.layers:\n x = lin(x)\n\n x = x.reshape(*x.shape[:-1], -1, rs.dim(self.act.Rs_in)) # put multiplicity into batch\n x = self.act(x)\n x = x.reshape(*x.shape[:-2], -1) # put back into representation\n\n x = self.tail(x)\n return x\n\n\nclass S2ParityNetwork(torch.nn.Module):\n def __init__(self, Rs_in, mul, lmax, Rs_out, layers=3):\n super().__init__()\n\n Rs = self.Rs_in = rs.simplify(Rs_in)\n self.Rs_out = rs.simplify(Rs_out)\n\n def make_act(p_val, p_arg, act):\n Rs = [(1, l, p_val * p_arg**l) for l in range(lmax + 1)]\n return S2Activation(Rs, act, res=20 * (lmax + 1))\n\n self.act1, self.act2 = make_act(1, -1, swish), make_act(-1, -1, tanh)\n self.mul = mul\n\n self.layers = []\n\n for _ in range(layers):\n Rs_out = mul * (self.act1.Rs_in + self.act2.Rs_in)\n lin = LearnableTensorSquare(Rs, Rs_out, linear=True, allow_zero_outputs=True)\n\n # s2 nonlinearity\n Rs = mul * (self.act1.Rs_out + self.act2.Rs_out)\n\n self.layers += [lin]\n\n self.layers = torch.nn.ModuleList(self.layers)\n\n self.tail = LearnableTensorSquare(Rs, self.Rs_out)\n\n def forward(self, x):\n for lin in self.layers:\n x = lin(x)\n\n x = x.reshape(*x.shape[:-1], self.mul, -1) # put multiplicity into batch\n x1 = x.narrow(-1, 0, rs.dim(self.act1.Rs_in))\n x2 = x.narrow(-1, rs.dim(self.act1.Rs_in), rs.dim(self.act2.Rs_in))\n x1 = self.act1(x1)\n x2 = self.act2(x2)\n x = torch.cat([x1, x2], dim=-1)\n x = x.reshape(*x.shape[:-2], -1) # put back into representation\n\n x = self.tail(x)\n return x\n\n\nclass ImageS2Network(torch.nn.Module):\n def __init__(self, Rs_in, mul, lmax, Rs_out, size=5, layers=3):\n super().__init__()\n\n Rs = rs.simplify(Rs_in)\n Rs_out = rs.simplify(Rs_out)\n Rs_act = list(range(lmax + 1))\n\n self.mul = mul\n self.layers = []\n\n for _ in range(layers):\n conv = ImageConvolution(Rs, mul * Rs_act, size, lmax=lmax, fuzzy_pixels=True, padding=size // 2)\n\n # s2 nonlinearity\n act = S2Activation(Rs_act, swish, res=60)\n Rs = mul * act.Rs_out\n\n pool = LowPassFilter(scale=2.0, stride=2)\n\n self.layers += [torch.nn.ModuleList([conv, act, pool])]\n\n self.layers = torch.nn.ModuleList(self.layers)\n self.tail = LearnableTensorSquare(Rs, Rs_out)\n\n def forward(self, x):\n \"\"\"\n :param x: [batch, x, y, z, channel_in]\n :return: [batch, x, y, z, channel_out]\n \"\"\"\n for conv, act, pool in self.layers:\n x = conv(x)\n\n x = x.reshape(*x.shape[:-1], self.mul, rs.dim(act.Rs_in)) # put multiplicity into batch\n x = act(x)\n x = x.reshape(*x.shape[:-2], self.mul * rs.dim(act.Rs_out)) # put back into representation\n\n x = pool(x)\n\n x = self.tail(x)\n return x\n", "id": "9757995", "language": "Python", "matching_score": 2.984557628631592, "max_stars_count": 0, "path": "e3nn/networks.py" }, { "content": "# pylint: disable=arguments-differ, redefined-builtin, missing-docstring, no-member, invalid-name, line-too-long, not-callable\nimport torch\n\nfrom e3nn import rs\nfrom e3nn.non_linearities import GatedBlock\nfrom e3nn.non_linearities.rescaled_act import swish, sigmoid\nfrom e3nn.linear import Linear\n\n\nclass DepthwiseConvolution(torch.nn.Module):\n def __init__(self, Rs_in, Rs_out, Rs_mid1, Rs_mid2, groups, convolution, linear=Linear, scalar_activation=swish, gate_activation=sigmoid):\n super().__init__()\n\n act_in = GatedBlock(groups * Rs_mid1, scalar_activation, gate_activation)\n self.lin_in = linear(Rs_in, act_in.Rs_in)\n self.act_in = act_in\n\n act_mid = GatedBlock(Rs_mid2, scalar_activation, gate_activation)\n self.conv = convolution(Rs_mid1, act_mid.Rs_in)\n self.act_mid = act_mid\n\n act_out = GatedBlock(Rs_out, scalar_activation, gate_activation)\n self.lin_out = linear(groups * Rs_mid2, act_out.Rs_in)\n self.act_out = act_out\n\n self.groups = groups\n\n def forward(self, features, *args, **kwargs):\n \"\"\"\n :param features: tensor [..., point, channel]\n :return: tensor [..., point, channel]\n \"\"\"\n features = self.lin_in(features)\n features = self.act_in(features)\n\n features = self.conv(features, *args, **kwargs, groups=self.groups)\n features = self.act_mid(features.reshape(-1, rs.dim(self.act_mid.Rs_in))).reshape(*features.shape[:-1], -1)\n\n features = self.lin_out(features)\n features = self.act_out(features)\n\n return features\n", "id": "6398519", "language": "Python", "matching_score": 0.14377444982528687, "max_stars_count": 0, "path": "e3nn/point/depthwise.py" }, { "content": "# pylint: disable=not-callable, no-member, invalid-name, line-too-long, arguments-differ\n\"\"\"\nFourier Transform : sphere (grid) <--> spherical tensor (Rs=[(1, l) for l in range(lmax + 1)])\n\nWe use the Fast Fourier Transform for specific\n\"\"\"\nimport math\n\nimport lie_learn.spaces.S3 as S3\nimport torch\nfrom e3nn import rsh, o3\nfrom e3nn.util.default_dtype import torch_default_dtype\n\n\ndef s2_grid(res_beta, res_alpha):\n \"\"\"\n grid on the sphere\n \"\"\"\n i = torch.arange(res_beta).to(dtype=torch.get_default_dtype())\n betas = (i + 0.5) / res_beta * math.pi\n\n i = torch.arange(res_alpha).to(dtype=torch.get_default_dtype())\n alphas = i / res_alpha * 2 * math.pi\n return betas, alphas\n\n\ndef spherical_harmonics_s2_grid(lmax, res_beta, res_alpha, _version=1):\n \"\"\"\n computes the spherical harmonics on the grid on the sphere\n \"\"\"\n with torch_default_dtype(torch.float64):\n betas, alphas = s2_grid(res_beta, res_alpha)\n sha = rsh.spherical_harmonics_alpha(lmax, alphas) # [a, m]\n shb = rsh.spherical_harmonics_z(list(range(lmax + 1)), betas.cos(), betas.sin().abs()) # [b, l * m]\n return betas, alphas, shb, sha\n\n\ndef complete_lmax_res(lmax, res_beta, res_alpha):\n \"\"\"\n try to use FFT\n i.e. 2 * lmax + 1 == res_alpha\n \"\"\"\n if res_beta is None:\n res_beta = 2 * (lmax + 1) # minimum req. to go on sphere and back\n\n if res_alpha is None:\n if lmax is not None:\n if res_beta is not None:\n res_alpha = max(2 * lmax + 1, res_beta - 1)\n else:\n res_alpha = 2 * lmax + 1 # minimum req. to go on sphere and back\n elif res_beta is not None:\n res_alpha = res_beta - 1\n\n if lmax is None:\n lmax = min(res_beta // 2 - 1, res_alpha // 2) # maximum possible to go on sphere and back\n\n assert res_beta % 2 == 0\n assert lmax + 1 <= res_beta // 2\n\n return lmax, res_beta, res_alpha\n\n\ndef irfft(x, res):\n \"\"\"\n :param x: tensor of shape [..., m]\n :return: tensor of shape [..., alpha]\n \"\"\"\n assert res % 2 == 1\n *size, sm = x.shape\n x = x.reshape(-1, sm)\n x = torch.cat([\n x.new_zeros(x.shape[0], (res - sm) // 2),\n x,\n x.new_zeros(x.shape[0], (res - sm) // 2),\n ], dim=-1)\n assert x.shape[1] == res\n l = res // 2\n x = torch.stack([\n torch.cat([\n x[:, l:l + 1],\n x[:, l + 1:].div(math.sqrt(2))\n ], dim=1),\n torch.cat([\n torch.zeros_like(x[:, :1]),\n x[:, :l].flip(-1).div(-math.sqrt(2)),\n ], dim=1),\n ], dim=-1)\n x = torch.irfft(x, 1) * res\n return x.reshape(*size, res)\n\n\ndef rfft(x, l):\n \"\"\"\n :param x: tensor of shape [..., alpha]\n :return: tensor of shape [..., m]\n \"\"\"\n *size, res = x.shape\n x = x.reshape(-1, res)\n x = torch.rfft(x, 1)\n x = torch.cat([\n x[:, 1:l + 1, 1].flip(1).mul(-math.sqrt(2)),\n x[:, :1, 0],\n x[:, 1:l + 1, 0].mul(math.sqrt(2)),\n ], dim=1)\n return x.reshape(*size, 2 * l + 1)\n\n\nclass ToS2Grid(torch.nn.Module):\n \"\"\"\n Transform spherical tensor into signal on the sphere\n\n The inverse transformation of FromS2Grid\n \"\"\"\n\n def __init__(self, lmax=None, res=None, normalization='component'):\n \"\"\"\n :param lmax: lmax of the input signal\n :param res: resolution of the output as a tuple (beta resolution, alpha resolution)\n :param normalization: either 'norm' or 'component'\n \"\"\"\n super().__init__()\n\n assert normalization in ['norm', 'component', 'none'], \"normalization needs to be 'norm', 'component' or 'none'\"\n\n if isinstance(res, int) or res is None:\n lmax, res_beta, res_alpha = complete_lmax_res(lmax, res, None)\n else:\n lmax, res_beta, res_alpha = complete_lmax_res(lmax, *res)\n\n betas, alphas, shb, sha = spherical_harmonics_s2_grid(lmax, res_beta, res_alpha)\n\n with torch_default_dtype(torch.float64):\n # normalize such that all l has the same variance on the sphere\n if normalization == 'component':\n n = math.sqrt(4 * math.pi) * torch.tensor([\n 1 / math.sqrt(2 * l + 1)\n for l in range(lmax + 1)\n ]) / math.sqrt(lmax + 1)\n if normalization == 'norm':\n n = math.sqrt(4 * math.pi) * torch.ones(lmax + 1) / math.sqrt(lmax + 1)\n if normalization == 'none':\n n = torch.ones(lmax + 1)\n m = rsh.spherical_harmonics_expand_matrix(range(lmax + 1)) # [l, m, i]\n shb = torch.einsum('lmj,bj,lmi,l->mbi', m, shb, m, n) # [m, b, i]\n\n self.register_buffer('alphas', alphas)\n self.register_buffer('betas', betas)\n self.register_buffer('sha', sha)\n self.register_buffer('shb', shb)\n self.to(torch.get_default_dtype())\n\n @property\n def grid(self):\n \"\"\"\n grid of positions\n \"\"\"\n beta, alpha = torch.meshgrid(self.betas, self.alphas)\n return o3.angles_to_xyz(alpha, beta)\n\n def forward(self, x):\n \"\"\"\n :param x: tensor [..., i=l * m]\n :return: tensor [..., beta, alpha]\n \"\"\"\n size = x.shape[:-1]\n lmax = round(x.shape[-1] ** 0.5) - 1\n x = x.reshape(-1, (lmax + 1) ** 2)\n\n x = torch.einsum('mbi,zi->zbm', self.shb, x) # [batch, beta, m]\n\n sa, sm = self.sha.shape\n if sa >= sm and sa % 2 == 1:\n x = irfft(x, sa)\n else:\n x = torch.einsum('am,zbm->zba', self.sha, x)\n return x.reshape(*size, *x.shape[1:])\n\n\nclass FromS2Grid(torch.nn.Module):\n \"\"\"\n Transform signal on the sphere into spherical tensor\n\n The inverse transformation of ToS2Grid\n \"\"\"\n\n def __init__(self, res=None, lmax=None, normalization='component', lmax_in=None):\n \"\"\"\n :param res: resolution of the input as a tuple (beta resolution, alpha resolution)\n :param lmax: maximum l of the output\n :param normalization: either 'norm' or 'component'\n :param lmax_in: maximum l of the input of ToS2Grid in order to be the inverse\n \"\"\"\n super().__init__()\n\n assert normalization in ['norm', 'component', 'none'], \"normalization needs to be 'norm', 'component' or 'none'\"\n\n if isinstance(res, int) or res is None:\n lmax, res_beta, res_alpha = complete_lmax_res(lmax, res, None)\n else:\n lmax, res_beta, res_alpha = complete_lmax_res(lmax, *res)\n\n if lmax_in is None:\n lmax_in = lmax\n\n betas, alphas, shb, sha = spherical_harmonics_s2_grid(lmax, res_beta, res_alpha)\n\n with torch_default_dtype(torch.float64):\n # normalize such that it is the inverse of ToS2Grid\n if normalization == 'component':\n n = math.sqrt(4 * math.pi) * torch.tensor([\n math.sqrt(2 * l + 1)\n for l in range(lmax + 1)\n ]) * math.sqrt(lmax_in + 1)\n if normalization == 'norm':\n n = math.sqrt(4 * math.pi) * torch.ones(lmax + 1) * math.sqrt(lmax_in + 1)\n if normalization == 'none':\n n = 4 * math.pi * torch.ones(lmax + 1)\n m = rsh.spherical_harmonics_expand_matrix(range(lmax + 1)) # [l, m, i]\n assert res_beta % 2 == 0\n qw = torch.tensor(S3.quadrature_weights(res_beta // 2)) * res_beta**2 / res_alpha # [b]\n shb = torch.einsum('lmj,bj,lmi,l,b->mbi', m, shb, m, n, qw) # [m, b, i]\n\n self.register_buffer('alphas', alphas)\n self.register_buffer('betas', betas)\n self.register_buffer('sha', sha)\n self.register_buffer('shb', shb)\n self.to(torch.get_default_dtype())\n\n @property\n def grid(self):\n \"\"\"\n grid of positions\n \"\"\"\n beta, alpha = torch.meshgrid(self.betas, self.alphas)\n return o3.angles_to_xyz(alpha, beta)\n\n def forward(self, x):\n \"\"\"\n :param x: tensor [..., beta, alpha]\n :return: tensor [..., i=l * m]\n \"\"\"\n size = x.shape[:-2]\n res_beta, res_alpha = x.shape[-2:]\n x = x.reshape(-1, res_beta, res_alpha)\n\n sa, sm = self.sha.shape\n if sm <= sa and sa % 2 == 1:\n x = rfft(x, sm // 2)\n else:\n x = torch.einsum('am,zba->zbm', self.sha, x)\n x = torch.einsum('mbi,zbm->zi', self.shb, x)\n return x.reshape(*size, x.shape[1])\n", "id": "5505074", "language": "Python", "matching_score": 2.4671332836151123, "max_stars_count": 0, "path": "e3nn/s2grid.py" }, { "content": "# pylint: disable=not-callable, no-member, invalid-name, line-too-long, wildcard-import, unused-wildcard-import, missing-docstring\nimport torch\n\nfrom e3nn import o3, rs\nfrom e3nn.tensor.spherical_tensor import spherical_harmonics_dirac, SphericalTensor, projection, adjusted_projection\nfrom e3nn.tensor.fourier_tensor import FourierTensor\n\n\ndef test_sh_dirac():\n with o3.torch_default_dtype(torch.float64):\n for l in range(5):\n r = torch.randn(3)\n a = spherical_harmonics_dirac(r, l)\n v = SphericalTensor(a).signal_xyz(r)\n assert v.sub(1).abs() < 1e-10\n\n\ndef test_projection():\n N = 4\n lmax = 6\n coords = torch.randn(N, 3)\n coords = coords[coords.norm(2, -1) > 0]\n projection(coords, lmax)\n\n\ndef test_adjusted_projection():\n N = 4\n lmax = 6\n coords = torch.randn(N, 3)\n coords = coords[coords.norm(2, -1) > 0]\n adjusted_projection(coords, lmax)\n\n\ndef test_SphericalTensor():\n torch.set_default_dtype(torch.float64)\n lmax = 6\n SphericalTensor(torch.randn((lmax + 1) ** 2))\n mul = 3\n FourierTensor(torch.randn(mul * (lmax + 1) ** 2), mul, lmax)\n\n\ndef test_from_geometry():\n torch.set_default_dtype(torch.float64)\n N = 4\n lmax = 6\n coords = torch.randn(N, 3)\n coords = coords[coords.norm(2, -1) > 0]\n SphericalTensor.from_geometry(coords, lmax)\n\n\ndef test_from_samples():\n torch.set_default_dtype(torch.float64)\n lmax = 2\n signal1 = torch.randn((lmax + 1)**2)\n r, v = SphericalTensor(signal1).signal_on_grid(60)\n signal2 = SphericalTensor.from_samples(r, v, res=200, lmax=lmax).signal\n assert (signal1 - signal2).abs().max() < 0.01\n\n\ndef test_from_geometry_with_radial():\n torch.set_default_dtype(torch.float64)\n N = 4\n lmax = 6\n coords = torch.randn(N, 3)\n coords = coords[coords.norm(2, -1) > 0]\n\n def radial_model(x):\n return torch.ones_like(x).unsqueeze(-1)\n\n FourierTensor.from_geometry(coords, radial_model, lmax)\n\n\ndef test_sph_norm():\n torch.set_default_dtype(torch.float64)\n lmax = 6\n sph = SphericalTensor(torch.randn((lmax + 1) ** 2))\n sph.sph_norm()\n\n\ndef test_plot():\n torch.set_default_dtype(torch.float64)\n N = 4\n lmax = 6\n coords = torch.randn(N, 3)\n coords = coords[coords.norm(2, -1) > 0]\n sph = SphericalTensor.from_geometry(coords, lmax)\n\n n = 16\n r, f = sph.plot(res=n)\n assert r.shape[2] == 3\n assert f.shape[:2] == r.shape[:2]\n\n\ndef test_plot_with_radial():\n torch.set_default_dtype(torch.float64)\n N = 4\n lmax = 6\n coords = torch.randn(N, 3)\n coords = coords[coords.norm(2, -1) > 0]\n\n def radial_model(x):\n return torch.ones_like(x).unsqueeze(-1)\n\n sph = FourierTensor.from_geometry(coords, radial_model, lmax)\n\n n = 16\n center = torch.ones(3)\n r, f = sph.plot(box_length=3.0, n=n, center=center)\n assert list(r.shape) == [n ** 3, 3]\n assert list(f.shape) == [n ** 3]\n\n\ndef test_signal_on_sphere():\n torch.set_default_dtype(torch.float64)\n lmax = 4\n sph = SphericalTensor(torch.randn((lmax + 1)**2))\n\n r, val1 = sph.signal_on_grid(2 * (lmax + 1))\n val2 = sph.signal_xyz(r)\n assert (val1 - val2).abs().max() < 1e-10\n\n\ndef test_change_lmax():\n sph = SphericalTensor(torch.zeros(1))\n sph_new = sph.change_lmax(5)\n assert sph_new.signal.shape[0] == rs.dim(sph_new.Rs)\n\n\ndef test_add():\n lmax = 4\n signal1 = torch.zeros((lmax + 1) ** 2)\n signal2 = signal1.clone()\n signal1[0] = 1.\n signal2[3] = 1.\n sph1 = SphericalTensor(signal1)\n sph2 = SphericalTensor(signal2)\n\n new_sph = sph1 + sph2\n assert new_sph.lmax == max(sph1.lmax, sph2.lmax)\n\n\ndef test_mul_and_dot():\n lmax = 4\n signal1 = torch.zeros((lmax + 1) ** 2)\n signal2 = signal1.clone()\n signal1[0] = 1.\n signal2[3] = 1.\n sph1 = SphericalTensor(signal1)\n sph2 = SphericalTensor(signal2)\n\n new_sph = sph1 * sph2\n assert rs.are_equal(new_sph.Rs, [(rs.mul_dim(sph1.Rs), 0, 0)])\n\n sph1.dot(sph2)\n", "id": "12268194", "language": "Python", "matching_score": 3.0592892169952393, "max_stars_count": 0, "path": "tests/tensor/spherical_tensor_test.py" }, { "content": "# pylint: disable=not-callable, no-member, invalid-name, line-too-long, missing-docstring, arguments-differ\nimport numpy as np\nimport torch\n\nfrom e3nn import rs\nfrom e3nn.kernel_mod import FrozenKernel\nfrom e3nn.tensor.spherical_tensor import projection\n\n\nclass FourierTensor:\n def __init__(self, signal, mul, lmax, p_val=0, p_arg=0):\n \"\"\"\n f: s2 x r -> R^N\n\n Rotations\n [D(g) f](x) = f(g^{-1} x)\n\n Parity\n [P f](x) = p_val f(p_arg x)\n\n f(x) = sum F^l . Y^l(x)\n\n This class contains the F^l\n\n Rotations\n [D(g) f](x) = sum [D^l(g) F^l] . Y^l(x) (using equiv. of Y and orthogonality of D)\n\n Parity\n [P f](x) = sum [p_val p_arg^l F^l] . Y^l(x) (using parity of Y)\n \"\"\"\n if signal.shape[-1] != mul * (lmax + 1)**2:\n raise ValueError(\n \"Last tensor dimension and Rs do not have same dimension.\")\n\n self.signal = signal\n self.lmax = lmax\n self.mul = mul\n self.Rs = rs.convention([(mul, l, p_val * p_arg**l)\n for l in range(lmax + 1)])\n self.radial_model = None\n\n @classmethod\n def from_geometry(cls, vectors, radial_model, lmax, sum_points=True):\n \"\"\"\n :param vectors: tensor of shape [..., xyz]\n :param radial_model: function of signature R+ -> R^mul\n :param lmax: maximal order of the signal\n \"\"\"\n size = vectors.shape[:-1]\n vectors = vectors.reshape(-1, 3) # [N, 3]\n radii = vectors.norm(2, -1)\n radial_functions = radial_model(radii)\n *_size, R = radial_functions.shape\n Rs = [(R, L) for L in range(lmax + 1)]\n mul_map = rs.map_mul_to_Rs(Rs)\n radial_functions = torch.einsum('nr,dr->nd',\n radial_functions.repeat(1, lmax + 1),\n mul_map) # [N, signal]\n\n Ys = projection(vectors / radii.unsqueeze(-1), lmax) # [N, l * m]\n irrep_map = rs.map_irrep_to_Rs(Rs)\n Ys = torch.einsum('nc,dc->nd', Ys, irrep_map) # [N, l * mul * m]\n\n signal = Ys * radial_functions # [N, l * mul * m]\n\n if sum_points:\n signal = signal.sum(0)\n else:\n signal = signal.reshape(*size, -1)\n\n new_cls = cls(signal, R, lmax)\n new_cls.radial_model = radial_model\n return new_cls\n\n def plot(self, box_length, center=None, n=30,\n radial_model=None, relu=True):\n muls, _ls, _ps = zip(*self.Rs)\n # We assume radial functions are repeated across L's\n assert len(set(muls)) == 1\n num_L = len(self.Rs)\n if radial_model is None:\n radial_model = self.radial_model\n\n def new_radial(x):\n return radial_model(x).repeat(1, num_L) # Repeat along filter dim\n r, f = plot_on_grid(box_length, new_radial, self.Rs, n=n)\n # Multiply coefficients\n f = torch.einsum('xd,d->x', f, self.signal)\n f = f.relu() if relu else f\n\n if center is not None:\n r += center.unsqueeze(0)\n\n return r, f\n\n def change_lmax(self, lmax):\n new_Rs = [(self.mul, l) for l in range(lmax + 1)]\n if self.lmax == lmax:\n return self\n elif self.lmax > lmax:\n new_signal = self.signal[:rs.dim(new_Rs)]\n return FourierTensor(new_signal, self.mul, lmax)\n elif self.lmax < lmax:\n new_signal = torch.zeros(rs.dim(new_Rs))\n new_signal[:rs.dim(self.Rs)] = self.signal\n return FourierTensor(new_signal, self.mul, lmax)\n\n def __add__(self, other):\n if self.mul != other.mul:\n raise ValueError(\"Multiplicities do not match.\")\n lmax = max(self.lmax, other.lmax)\n new_self = self.change_lmax(lmax)\n new_other = other.change_lmax(lmax)\n return FourierTensor(new_self.signal + new_other.signal, self.mul, self.lmax)\n\n\ndef plot_on_grid(box_length, radial_model, Rs, n=30):\n l_to_index = {}\n set_of_l = set([l for mul, l, p in Rs])\n start = 0\n for l in set_of_l:\n l_to_index[l] = [start, start + 2 * l + 1]\n start += 2 * l + 1\n\n r = np.mgrid[-1:1:n * 1j, -1:1:n * 1j, -1:1:n * 1j].reshape(3, -1)\n r = r.transpose(1, 0)\n r *= box_length / 2.\n r = torch.from_numpy(r)\n\n Rs_in = [(1, 0)]\n Rs_out = Rs\n\n def radial_lambda(_ignored):\n return radial_model\n\n grid = FrozenKernel(Rs_in, Rs_out, radial_lambda, r)\n f = grid()\n f = f[..., 0]\n return r, f\n", "id": "12844808", "language": "Python", "matching_score": 3.840954065322876, "max_stars_count": 0, "path": "e3nn/tensor/fourier_tensor.py" }, { "content": "# pylint: disable=not-callable, no-member, invalid-name, line-too-long, missing-docstring, arguments-differ\nimport math\n\nimport torch\n\nfrom e3nn import o3, rs, rsh\nfrom e3nn.s2grid import ToS2Grid, FromS2Grid\nfrom e3nn.tensor.irrep_tensor import IrrepTensor\n\n\ndef spherical_harmonics_dirac(vectors, lmax):\n \"\"\"\n approximation of a signal that is 0 everywhere except on the angle (alpha, beta) where it is one.\n the higher is lmax the better is the approximation\n \"\"\"\n return 4 * math.pi / (lmax + 1)**2 * rsh.spherical_harmonics_xyz(list(range(lmax + 1)), vectors)\n\n\ndef projection(vectors, lmax):\n \"\"\"\n :param vectors: tensor of shape [..., xyz]\n :return: tensor of shape [..., l * m]\n \"\"\"\n coeff = spherical_harmonics_dirac(vectors, lmax) # [..., l * m]\n radii = vectors.norm(2, dim=-1, keepdim=True) # [...]\n return coeff * radii\n\n\ndef adjusted_projection(vectors, lmax):\n \"\"\"\n :param vectors: tensor of shape [..., xyz]\n :return: tensor of shape [l * m]\n \"\"\"\n vectors = vectors.reshape(-1, 3)\n radii = vectors.norm(2, -1) # [batch]\n vectors = vectors[radii > 0] # [batch, 3]\n\n coeff = rsh.spherical_harmonics_xyz(list(range(lmax + 1)), vectors) # [batch, l * m]\n A = torch.einsum(\n \"ai,bi->ab\",\n coeff,\n coeff\n )\n # Y(v_a) . Y(v_b) solution_b = radii_a\n solution = torch.lstsq(radii, A).solution.reshape(-1) # [b]\n assert (radii - A @ solution).abs().max() < 1e-5 * radii.abs().max()\n\n return solution @ coeff\n\n\nclass SphericalTensor:\n def __init__(self, signal: torch.Tensor, p_val: int = 0, p_arg: int = 0):\n \"\"\"\n f: s2 -> R\n\n Rotations\n [D(g) f](x) = f(g^{-1} x)\n\n Parity\n [P f](x) = p_val f(p_arg x)\n\n f(x) = sum F^l . Y^l(x)\n\n This class contains the F^l\n\n Rotations\n [D(g) f](x) = sum [D^l(g) F^l] . Y^l(x) (using equiv. of Y and orthogonality of D)\n\n Parity\n [P f](x) = sum [p_val p_arg^l F^l] . Y^l(x) (using parity of Y)\n \"\"\"\n lmax = round(math.sqrt(signal.shape[-1]) - 1)\n\n if signal.shape[-1] != (lmax + 1)**2:\n raise ValueError(\n \"Last tensor dimension and Rs do not have same dimension.\")\n\n self.signal = signal\n self.lmax = lmax\n self.Rs = rs.convention([(1, l, p_val * p_arg**l) for l in range(lmax + 1)])\n self.p_val = p_val\n self.p_arg = p_arg\n\n @classmethod\n def from_geometry(cls, vectors, lmax, p=0, adjusted=True):\n \"\"\"\n :param vectors: tensor of vectors (p=-1) or pseudovectors (p=1) of shape [..., 3=xyz]\n \"\"\"\n if adjusted:\n signal = adjusted_projection(vectors, lmax)\n else:\n vectors = vectors.reshape(-1, 3)\n r = vectors.norm(dim=1)\n sh = rsh.spherical_harmonics_xyz(list(range(lmax + 1)), vectors)\n # 0.5 * sum_a ( Y(v_a) . sum_b r_b Y(v_b) s - r_a )^2\n A = torch.einsum('ai,b,bi->a', sh, r, sh)\n # 0.5 * sum_a ( A_a s - r_a )^2\n # sum_a A_a^2 s = sum_a A_a r_a\n s = torch.dot(A, r) / A.norm().pow(2)\n signal = s * torch.einsum('a,ai->i', r, sh)\n return cls(signal, p_val=1, p_arg=p)\n\n @classmethod\n def from_samples(cls, positions, values, lmax, res=100, p_val=0, p_arg=0):\n \"\"\"\n :param positions: tensor of shape [num_points, 3=xyz]\n :param values: tensor of shape [num_points]\n \"\"\"\n positions = positions.reshape(-1, 3)\n values = values.reshape(-1)\n positions /= positions.norm(p=2, dim=1, keepdim=True)\n assert positions.shape[0] == values.shape[0], \"positions and values must have the same number of points\"\n\n s2 = FromS2Grid(res=res, lmax=lmax, normalization='none')\n pos = s2.grid\n\n cd = torch.cdist(pos, positions, p=2)\n val = values[cd.argmin(2)]\n\n return cls(s2(val), p_val=p_val, p_arg=p_arg)\n\n @classmethod\n def spherical_harmonic(cls, l, m, lmax=None):\n if lmax is None:\n lmax = l\n signal = torch.zeros((lmax + 1)**2)\n signal[l**2 + l + m] = 1\n return cls(signal)\n\n def __repr__(self):\n p_str = \"\"\n if self.p_arg != 0 and self.p_val != 0:\n p_str = f\", [Parity f](x) = {'-' if self.p_val == -1 else ''}f({'-' if self.p_arg == -1 else ''}x)\"\n return f\"{self.__class__.__name__}(lmax={self.lmax}{p_str})\"\n\n def sph_norm(self):\n i = 0\n norms = []\n for l in range(self.lmax + 1):\n n = self.signal[..., i: i + 2 * l + 1].norm(p=2, dim=-1)\n norms.append(n)\n i += 2 * l + 1\n return torch.stack(norms, dim=-1)\n\n def signal_xyz(self, r):\n \"\"\"\n Evaluate the signal on the sphere\n \"\"\"\n sh = rsh.spherical_harmonics_xyz(list(range(self.lmax + 1)), r)\n dim = (self.lmax + 1)**2\n output = torch.einsum(\n 'ai,zi->za', sh.reshape(-1, dim), self.signal.reshape(-1, dim))\n return output.reshape((*self.signal.shape[:-1], *r.shape[:-1]))\n\n def signal_alpha_beta(self, alpha, beta):\n \"\"\"\n Evaluate the signal on the sphere\n \"\"\"\n sh = rsh.spherical_harmonics_alpha_beta(\n list(range(self.lmax + 1)), alpha, beta)\n dim = (self.lmax + 1)**2\n output = torch.einsum(\n 'ai,zi->za', sh.reshape(-1, dim), self.signal.reshape(-1, dim))\n return output.reshape((*self.signal.shape[:-1], *alpha.shape))\n\n def signal_on_grid(self, res=100):\n \"\"\"\n :return: [..., beta, alpha]\n Evaluate the signal on the sphere\n \"\"\"\n s2 = ToS2Grid(self.lmax, res=res, normalization='none')\n return s2.grid, s2(self.signal)\n\n def plot(self, res=100, radius=True, center=None, relu=False):\n \"\"\"\n r, f = self.plot()\n\n import plotly.graph_objects as go\n surface = go.Surface(\n x=r[:, :, 0].numpy(),\n y=r[:, :, 1].numpy(),\n z=r[:, :, 2].numpy(),\n surfacecolor=f.numpy(),\n )\n fig = go.Figure(data=[surface])\n fig.show()\n \"\"\"\n assert self.signal.dim() == 1\n\n r, f = self.signal_on_grid(res)\n f = f.relu() if relu else f\n\n # beta: [0, pi]\n r[0] = r.new_tensor([0, 0, 1])\n r[-1] = r.new_tensor([0, 0, -1])\n f[0] = f[0].mean()\n f[-1] = f[-1].mean()\n\n # alpha: [0, 2pi]\n r = torch.cat([r, r[:, :1]], dim=1) # [beta, alpha, 3]\n f = torch.cat([f, f[:, :1]], dim=1) # [beta, alpha]\n\n if radius:\n r *= f.abs().unsqueeze(-1)\n\n if center is not None:\n r += center\n\n return r, f\n\n def change_lmax(self, lmax):\n new_Rs = [(1, l) for l in range(lmax + 1)]\n if self.lmax == lmax:\n return self\n elif self.lmax > lmax:\n new_signal = self.signal[..., :rs.dim(new_Rs)]\n return SphericalTensor(new_signal, self.p_val, self.p_arg)\n elif self.lmax < lmax:\n new_signal = torch.zeros(*self.signal.shape[:-1], rs.dim(new_Rs))\n new_signal[..., :rs.dim(self.Rs)] = self.signal\n return SphericalTensor(new_signal, self.p_val, self.p_arg)\n\n def __add__(self, other):\n lmax = max(self.lmax, other.lmax)\n new_self = self.change_lmax(lmax)\n new_other = other.change_lmax(lmax)\n\n return SphericalTensor(new_self.signal + new_other.signal, self.p_val, self.p_arg)\n\n def __mul__(self, other):\n # Dot product if Rs of both objects match\n lmax = max(self.lmax, other.lmax)\n new_self = self.change_lmax(lmax)\n new_other = other.change_lmax(lmax)\n\n mult = new_self.signal * new_other.signal\n mapping_matrix = rs.map_mul_to_Rs(new_self.Rs)\n scalars = torch.einsum('rm,...r->...m', mapping_matrix, mult)\n Rs = [(1, 0, p1 * p2) for (_, l1, p1), (_, l2, p2) in zip(new_self.Rs, new_other.Rs)]\n return IrrepTensor(scalars, Rs)\n\n def dot(self, other):\n return (self * other).tensor.sum(-1)\n\n def __matmul__(self, other):\n # Tensor product\n # Better handle mismatch of features indices\n tp = rs.TensorProduct(self.Rs, other.Rs, o3.selection_rule)\n return IrrepTensor(tp(self.signal, other.signal), tp.Rs_out)\n", "id": "12172802", "language": "Python", "matching_score": 2.068225860595703, "max_stars_count": 0, "path": "e3nn/tensor/spherical_tensor.py" }, { "content": "# pylint: disable=not-callable, no-member, invalid-name, line-too-long, wildcard-import, unused-wildcard-import, missing-docstring\nimport torch\nfrom e3nn import o3, rs\nfrom e3nn.tensor_product import (LearnableTensorProduct, LearnableTensorSquare,\n WeightedTensorProduct)\n\n\ndef test_learnable_tensor_square_normalization():\n Rs_in = [1, 2, 3, 4]\n Rs_out = [0, 2, 4, 5]\n\n m = LearnableTensorSquare(Rs_in, Rs_out)\n y = m(rs.randn(1000, Rs_in))\n\n assert y.var().log10().abs() < 1.5, y.var().item()\n\n\ndef test_learnable_tensor_product_normalization():\n Rs_in1 = [2, 0, 4]\n Rs_in2 = [2, 3]\n Rs_out = [0, 2, 4, 5]\n\n m = LearnableTensorProduct(Rs_in1, Rs_in2, Rs_out)\n\n x1 = rs.randn(1000, Rs_in1)\n x2 = rs.randn(1000, Rs_in2)\n y = m(x1, x2)\n\n assert y.var().log10().abs() < 1.5, y.var().item()\n\n\ndef test_weighted_tensor_product():\n torch.set_default_dtype(torch.float64)\n\n Rs_in1 = rs.simplify([1] * 20 + [2] * 4)\n Rs_in2 = rs.simplify([0] * 10 + [1] * 10 + [2] * 5)\n Rs_out = rs.simplify([0] * 3 + [1] * 4)\n\n tp = WeightedTensorProduct(Rs_in1, Rs_in2, Rs_out, groups=2)\n\n x1 = rs.randn(20, Rs_in1)\n x2 = rs.randn(20, Rs_in2)\n w = torch.randn(20, tp.nweight, requires_grad=True)\n\n angles = o3.rand_angles()\n\n z1 = tp(x1, x2, w) @ rs.rep(Rs_out, *angles).T\n z2 = tp(x1 @ rs.rep(Rs_in1, *angles).T, x2 @ rs.rep(Rs_in2, *angles).T, w)\n\n z1.sum().backward()\n\n assert torch.allclose(z1, z2)\n", "id": "10430351", "language": "Python", "matching_score": 2.6265335083007812, "max_stars_count": 0, "path": "tests/tensor_product_test.py" }, { "content": "# pylint: disable=missing-docstring\nfrom .kernel import Kernel\nfrom .rs import TensorProduct, TensorSquare\n\n__version__ = \"0.0.0\"\n__all__ = ['Kernel', 'TensorProduct', 'TensorSquare']\n", "id": "12017112", "language": "Python", "matching_score": 1.439024806022644, "max_stars_count": 0, "path": "e3nn/__init__.py" } ]
2.383538